language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
walkccc__LeetCode
|
solutions/496. Next Greater Element I/496.py
|
{
"start": 0,
"end": 351
}
|
class ____:
def nextGreaterElement(self, nums1: list[int], nums2: list[int]) -> list[int]:
numToNextGreater = {}
stack = [] # a decreasing stack
for num in nums2:
while stack and stack[-1] < num:
numToNextGreater[stack.pop()] = num
stack.append(num)
return [numToNextGreater.get(num, -1) for num in nums1]
|
Solution
|
python
|
jazzband__django-pipeline
|
tests/tests/test_storage.py
|
{
"start": 896,
"end": 1065
}
|
class ____(DummyCompiler):
"""Handles css files"""
output_extension = "css"
def match_file(self, path):
return path.endswith(".css")
|
DummyCSSCompiler
|
python
|
sphinx-doc__sphinx
|
sphinx/ext/imgmath.py
|
{
"start": 1088,
"end": 1426
}
|
class ____(SphinxError):
category = 'Math extension error'
def __init__(
self, msg: str, stderr: str | None = None, stdout: str | None = None
) -> None:
if stderr:
msg += '\n[stderr]\n' + stderr
if stdout:
msg += '\n[stdout]\n' + stdout
super().__init__(msg)
|
MathExtError
|
python
|
pandas-dev__pandas
|
pandas/tests/io/formats/test_to_latex.py
|
{
"start": 11573,
"end": 20775
}
|
class ____:
@pytest.fixture
def caption_table(self):
"""Caption for table/tabular LaTeX environment."""
return "a table in a \\texttt{table/tabular} environment"
@pytest.fixture
def short_caption(self):
"""Short caption for testing \\caption[short_caption]{full_caption}."""
return "a table"
@pytest.fixture
def label_table(self):
"""Label for table/tabular LaTeX environment."""
return "tab:table_tabular"
@pytest.fixture
def caption_longtable(self):
"""Caption for longtable LaTeX environment."""
return "a table in a \\texttt{longtable} environment"
@pytest.fixture
def label_longtable(self):
"""Label for longtable LaTeX environment."""
return "tab:longtable"
def test_to_latex_caption_only(self, df_short, caption_table):
# GH 25436
result = df_short.to_latex(caption=caption_table)
expected = _dedent(
r"""
\begin{table}
\caption{a table in a \texttt{table/tabular} environment}
\begin{tabular}{lrl}
\toprule
& a & b \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
\end{table}
"""
)
assert result == expected
def test_to_latex_label_only(self, df_short, label_table):
# GH 25436
result = df_short.to_latex(label=label_table)
expected = _dedent(
r"""
\begin{table}
\label{tab:table_tabular}
\begin{tabular}{lrl}
\toprule
& a & b \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
\end{table}
"""
)
assert result == expected
def test_to_latex_caption_and_label(self, df_short, caption_table, label_table):
# GH 25436
result = df_short.to_latex(caption=caption_table, label=label_table)
expected = _dedent(
r"""
\begin{table}
\caption{a table in a \texttt{table/tabular} environment}
\label{tab:table_tabular}
\begin{tabular}{lrl}
\toprule
& a & b \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
\end{table}
"""
)
assert result == expected
def test_to_latex_caption_and_shortcaption(
self,
df_short,
caption_table,
short_caption,
):
result = df_short.to_latex(caption=(caption_table, short_caption))
expected = _dedent(
r"""
\begin{table}
\caption[a table]{a table in a \texttt{table/tabular} environment}
\begin{tabular}{lrl}
\toprule
& a & b \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
\end{table}
"""
)
assert result == expected
def test_to_latex_caption_and_shortcaption_list_is_ok(self, df_short):
caption = ("Long-long-caption", "Short")
result_tuple = df_short.to_latex(caption=caption)
result_list = df_short.to_latex(caption=list(caption))
assert result_tuple == result_list
def test_to_latex_caption_shortcaption_and_label(
self,
df_short,
caption_table,
short_caption,
label_table,
):
# test when the short_caption is provided alongside caption and label
result = df_short.to_latex(
caption=(caption_table, short_caption),
label=label_table,
)
expected = _dedent(
r"""
\begin{table}
\caption[a table]{a table in a \texttt{table/tabular} environment}
\label{tab:table_tabular}
\begin{tabular}{lrl}
\toprule
& a & b \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
\end{table}
"""
)
assert result == expected
@pytest.mark.parametrize(
"bad_caption",
[
("full_caption", "short_caption", "extra_string"),
("full_caption", "short_caption", 1),
("full_caption", "short_caption", None),
("full_caption",),
(None,),
],
)
def test_to_latex_bad_caption_raises(self, bad_caption):
# test that wrong number of params is raised
df = DataFrame({"a": [1]})
msg = "`caption` must be either a string or 2-tuple of strings"
with pytest.raises(ValueError, match=msg):
df.to_latex(caption=bad_caption)
def test_to_latex_two_chars_caption(self, df_short):
# test that two chars caption is handled correctly
# it must not be unpacked into long_caption, short_caption.
result = df_short.to_latex(caption="xy")
expected = _dedent(
r"""
\begin{table}
\caption{xy}
\begin{tabular}{lrl}
\toprule
& a & b \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
\end{table}
"""
)
assert result == expected
def test_to_latex_longtable_caption_only(self, df_short, caption_longtable):
# GH 25436
# test when no caption and no label is provided
# is performed by test_to_latex_longtable()
result = df_short.to_latex(longtable=True, caption=caption_longtable)
expected = _dedent(
r"""
\begin{longtable}{lrl}
\caption{a table in a \texttt{longtable} environment} \\
\toprule
& a & b \\
\midrule
\endfirsthead
\caption[]{a table in a \texttt{longtable} environment} \\
\toprule
& a & b \\
\midrule
\endhead
\midrule
\multicolumn{3}{r}{Continued on next page} \\
\midrule
\endfoot
\bottomrule
\endlastfoot
0 & 1 & b1 \\
1 & 2 & b2 \\
\end{longtable}
"""
)
assert result == expected
def test_to_latex_longtable_label_only(self, df_short, label_longtable):
# GH 25436
result = df_short.to_latex(longtable=True, label=label_longtable)
expected = _dedent(
r"""
\begin{longtable}{lrl}
\label{tab:longtable} \\
\toprule
& a & b \\
\midrule
\endfirsthead
\toprule
& a & b \\
\midrule
\endhead
\midrule
\multicolumn{3}{r}{Continued on next page} \\
\midrule
\endfoot
\bottomrule
\endlastfoot
0 & 1 & b1 \\
1 & 2 & b2 \\
\end{longtable}
"""
)
assert result == expected
def test_to_latex_longtable_caption_and_label(
self,
df_short,
caption_longtable,
label_longtable,
):
# GH 25436
result = df_short.to_latex(
longtable=True,
caption=caption_longtable,
label=label_longtable,
)
expected = _dedent(
r"""
\begin{longtable}{lrl}
\caption{a table in a \texttt{longtable} environment} \label{tab:longtable} \\
\toprule
& a & b \\
\midrule
\endfirsthead
\caption[]{a table in a \texttt{longtable} environment} \\
\toprule
& a & b \\
\midrule
\endhead
\midrule
\multicolumn{3}{r}{Continued on next page} \\
\midrule
\endfoot
\bottomrule
\endlastfoot
0 & 1 & b1 \\
1 & 2 & b2 \\
\end{longtable}
"""
)
assert result == expected
def test_to_latex_longtable_caption_shortcaption_and_label(
self,
df_short,
caption_longtable,
short_caption,
label_longtable,
):
# test when the caption, the short_caption and the label are provided
result = df_short.to_latex(
longtable=True,
caption=(caption_longtable, short_caption),
label=label_longtable,
)
expected = _dedent(
r"""
\begin{longtable}{lrl}
\caption[a table]{a table in a \texttt{longtable} environment} \label{tab:longtable} \\
\toprule
& a & b \\
\midrule
\endfirsthead
\caption[]{a table in a \texttt{longtable} environment} \\
\toprule
& a & b \\
\midrule
\endhead
\midrule
\multicolumn{3}{r}{Continued on next page} \\
\midrule
\endfoot
\bottomrule
\endlastfoot
0 & 1 & b1 \\
1 & 2 & b2 \\
\end{longtable}
"""
)
assert result == expected
|
TestToLatexCaptionLabel
|
python
|
fastai__fastai
|
fastai/callback/tracker.py
|
{
"start": 836,
"end": 2473
}
|
class ____(Callback):
"A `Callback` that keeps track of the best value in `monitor`."
order,remove_on_fetch,_only_train_loop = 60,True,True
def __init__(self,
monitor='valid_loss', # value (usually loss or metric) being monitored.
comp=None, # numpy comparison operator; np.less if monitor is loss, np.greater if monitor is metric.
min_delta=0., # minimum delta between the last monitor value and the best monitor value.
reset_on_fit=True # before model fitting, reset value being monitored to -infinity (if monitor is metric) or +infinity (if monitor is loss).
):
if comp is None: comp = np.less if 'loss' in monitor or 'error' in monitor else np.greater
if comp == np.less: min_delta *= -1
self.monitor,self.comp,self.min_delta,self.reset_on_fit,self.best= monitor,comp,min_delta,reset_on_fit,None
def before_fit(self):
"Prepare the monitored value"
self.run = not hasattr(self, "lr_finder") and not hasattr(self, "gather_preds")
if self.reset_on_fit or self.best is None: self.best = float('inf') if self.comp == np.less else -float('inf')
assert self.monitor in self.recorder.metric_names[1:]
self.idx = list(self.recorder.metric_names[1:]).index(self.monitor)
def after_epoch(self):
"Compare the last value to the best up to now"
val = self.recorder.values[-1][self.idx]
if self.comp(val - self.min_delta, self.best): self.best,self.new_best = val,True
else: self.new_best = False
def after_fit(self): self.run=True
# %% ../../nbs/17_callback.tracker.ipynb 19
|
TrackerCallback
|
python
|
gevent__gevent
|
src/greentest/3.14/test_urllib2_localnet.py
|
{
"start": 8149,
"end": 9501
}
|
class ____(http.server.BaseHTTPRequestHandler):
"""This is a 'fake proxy' that makes it look like the entire
internet has gone down due to a sudden zombie invasion. It main
utility is in providing us with authentication support for
testing.
"""
def __init__(self, digest_auth_handler, *args, **kwargs):
# This has to be set before calling our parent's __init__(), which will
# try to call do_GET().
self.digest_auth_handler = digest_auth_handler
http.server.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def log_message(self, format, *args):
# Uncomment the next line for debugging.
# sys.stderr.write(format % args)
pass
def do_GET(self):
(scm, netloc, path, params, query, fragment) = urllib.parse.urlparse(
self.path, "http")
self.short_path = path
if self.digest_auth_handler.handle_request(self):
self.send_response(200, "OK")
self.send_header("Content-Type", "text/html")
self.end_headers()
self.wfile.write(bytes("You've reached %s!<BR>" % self.path,
"ascii"))
self.wfile.write(b"Our apologies, but our server is down due to "
b"a sudden zombie invasion.")
# Test cases
|
FakeProxyHandler
|
python
|
kubernetes-client__python
|
kubernetes/client/api/version_api.py
|
{
"start": 543,
"end": 5113
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_code(self, **kwargs): # noqa: E501
"""get_code # noqa: E501
get the version information for this server # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_code(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: VersionInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_code_with_http_info(**kwargs) # noqa: E501
def get_code_with_http_info(self, **kwargs): # noqa: E501
"""get_code # noqa: E501
get the version information for this server # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_code_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(VersionInfo, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_code" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/version/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VersionInfo', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
|
VersionApi
|
python
|
spyder-ide__spyder
|
spyder/plugins/run/widgets.py
|
{
"start": 5138,
"end": 19310
}
|
class ____(BaseRunConfigDialog):
"""Run execution parameters edition dialog."""
def __init__(
self,
parent,
executor_name,
executor_params: Dict[
Tuple[str, str], SupportedExecutionRunConfiguration
],
param_names: Dict[Tuple[str, str], List[str]],
extensions: Optional[List[str]] = None,
contexts: Optional[Dict[str, List[str]]] = None,
current_params: Optional[ExtendedRunExecutionParameters] = None,
extension: Optional[str] = None,
context: Optional[str] = None,
new_config: bool = False
):
super().__init__(parent, True)
self.executor_name = executor_name
self.executor_params = executor_params
self.param_names = param_names
self.current_params = current_params
self.extensions = extensions or []
self.contexts = contexts or {}
self.extension = extension
self.context = context
self.new_config = new_config
self.parameters_name = None
if current_params is not None:
self.parameters_name = (
_("Default")
if current_params["default"]
else current_params["name"]
)
self.current_widget = None
self.status = RunDialogStatus.Close
self.saved_conf = None
# ---- Public methods
# -------------------------------------------------------------------------
def setup(self):
# --- Configuration name
if self.new_config:
params_name_text = _("Save configuration as:")
else:
params_name_text = _("Configuration name:")
params_name_label = QLabel(params_name_text)
self.store_params_text = QLineEdit(self)
self.store_params_text.setMinimumWidth(250)
store_params_layout = QHBoxLayout()
store_params_layout.addWidget(params_name_label)
store_params_layout.addWidget(self.store_params_text)
store_params_layout.addStretch(1)
# This action needs to be added before setting an icon for it so that
# it doesn't show up in the line edit (despite being set as not visible
# below). That's probably a Qt bug.
status_action = QAction(self)
self.store_params_text.addAction(
status_action, QLineEdit.TrailingPosition
)
self.store_params_text.status_action = status_action
status_action.setIcon(ima.icon("error"))
status_action.setVisible(False)
# This is necessary to fix the style of the tooltip shown inside the
# lineedit
store_params_css = qstylizer.style.StyleSheet()
store_params_css["QLineEdit QToolTip"].setValues(
padding="1px 2px",
)
self.store_params_text.setStyleSheet(store_params_css.toString())
# --- Extension and context widgets
ext_combo_label = QLabel(_("File extension:"))
context_combo_label = QLabel(_("Run context:"))
self.extension_combo = SpyderComboBox(self)
self.extension_combo.addItems(self.extensions)
self.extension_combo.currentIndexChanged.connect(
self.extension_changed)
self.context_combo = SpyderComboBox(self)
self.context_combo.currentIndexChanged.connect(self.context_changed)
self.extension_combo.setMinimumWidth(150)
self.context_combo.setMinimumWidth(150)
ext_context_g_layout = QGridLayout()
ext_context_g_layout.addWidget(ext_combo_label, 0, 0)
ext_context_g_layout.addWidget(self.extension_combo, 0, 1)
ext_context_g_layout.addWidget(context_combo_label, 1, 0)
ext_context_g_layout.addWidget(self.context_combo, 1, 1)
ext_context_layout = QHBoxLayout()
ext_context_layout.addLayout(ext_context_g_layout)
ext_context_layout.addStretch(1)
# --- Runner settings
self.stack = QStackedWidget(self)
# --- Working directory settings
self.wdir_group = QGroupBox(_("Working directory settings"))
wdir_layout = QVBoxLayout(self.wdir_group)
wdir_layout.setContentsMargins(
3 * AppStyle.MarginSize,
3 * AppStyle.MarginSize,
3 * AppStyle.MarginSize,
AppStyle.MarginSize if MAC else 0,
)
self.file_dir_radio = QRadioButton(FILE_DIR)
wdir_layout.addWidget(self.file_dir_radio)
self.cwd_radio = QRadioButton(CW_DIR)
wdir_layout.addWidget(self.cwd_radio)
self.fixed_dir_radio = QRadioButton(FIXED_DIR)
self.wd_edit = QLineEdit(self)
self.fixed_dir_radio.toggled.connect(self.wd_edit.setEnabled)
self.wd_edit.setEnabled(False)
browse_btn = QPushButton(ima.icon('DirOpenIcon'), '', self)
browse_btn.setToolTip(_("Select directory"))
browse_btn.clicked.connect(self.select_directory)
browse_btn.setIconSize(
QSize(AppStyle.ConfigPageIconSize, AppStyle.ConfigPageIconSize)
)
fixed_dir_layout = QHBoxLayout()
fixed_dir_layout.addWidget(self.fixed_dir_radio)
fixed_dir_layout.addWidget(self.wd_edit)
fixed_dir_layout.addWidget(browse_btn)
wdir_layout.addLayout(fixed_dir_layout)
# --- Final layout
layout = self.add_widgets(
store_params_layout,
4 * AppStyle.MarginSize,
ext_context_layout,
(3 if MAC else 4) * AppStyle.MarginSize,
self.stack,
self.wdir_group,
(-2 if MAC else 1) * AppStyle.MarginSize,
)
layout.addStretch()
layout.setContentsMargins(*((AppStyle.InnerContentPadding,) * 4))
self.add_button_box(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
# --- Settings
self.setWindowTitle(
_("New run configuration for: {}").format(self.executor_name)
)
self.layout().setSizeConstraint(QLayout.SetFixedSize)
extension_index = 0
if self.extension is not None:
extension_index = self.extensions.index(self.extension)
self.extension_combo.setEnabled(False)
self.extension_combo.setCurrentIndex(extension_index)
# This is necessary because extension_changed is not triggered
# automatically for this extension_index.
if extension_index == 0:
self.extension_changed(extension_index)
if self.context is not None:
self.context_combo.setEnabled(False)
if self.parameters_name:
self.store_params_text.setText(self.parameters_name)
# Don't allow to change name for default or already saved params.
if self.current_params["default"] or not self.new_config:
self.store_params_text.setEnabled(False)
# --- Stylesheet
self.setStyleSheet(self._stylesheet)
def extension_changed(self, index: int):
if index < 0:
return
self.selected_extension = self.extension_combo.itemText(index)
contexts = self.contexts[self.selected_extension]
self.context_combo.clear()
self.context_combo.addItems(contexts)
self.context_combo.setCurrentIndex(-1)
context_index = 0
if self.context is not None:
context_index = contexts.index(self.context)
self.context_combo.setCurrentIndex(context_index)
def context_changed(self, index: int, reset: bool = False):
if index < 0:
return
# Clear the QStackWidget contents
self.current_widget = None
while self.stack.count() > 0:
widget = self.stack.widget(0)
self.stack.removeWidget(widget)
self.selected_context = self.context_combo.itemText(index)
executor_conf_metadata = self.executor_params[
(self.selected_extension, self.selected_context)]
requires_cwd = executor_conf_metadata['requires_cwd']
ConfigWidget = (executor_conf_metadata['configuration_widget'] or
RunExecutorConfigurationGroup)
if executor_conf_metadata['configuration_widget'] is None:
self.stack.setEnabled(False)
else:
self.stack.setEnabled(True)
self.wdir_group.setEnabled(requires_cwd)
self.current_widget = ConfigWidget(
self, self.selected_context, self.selected_extension, {})
self.stack.addWidget(self.current_widget)
working_dir_params = WorkingDirOpts(
source=WorkingDirSource.ConfigurationDirectory,
path=None)
exec_params = RunExecutionParameters(
working_dir=working_dir_params,
executor_params=None)
default_params = self.current_widget.get_default_configuration()
if self.current_params:
params = self.current_params['params']
working_dir_params = params['working_dir']
exec_params = params
params_set = (
default_params
if reset
else (exec_params["executor_params"] or default_params)
)
if params_set.keys() == default_params.keys():
self.current_widget.set_configuration(params_set)
source = working_dir_params['source']
path = working_dir_params['path']
if source == WorkingDirSource.ConfigurationDirectory:
self.file_dir_radio.setChecked(True)
self.cwd_radio.setChecked(False)
self.fixed_dir_radio.setChecked(False)
self.wd_edit.setText('')
elif source == WorkingDirSource.CurrentDirectory:
self.file_dir_radio.setChecked(False)
self.cwd_radio.setChecked(True)
self.fixed_dir_radio.setChecked(False)
self.wd_edit.setText('')
elif source == WorkingDirSource.CustomDirectory:
self.file_dir_radio.setChecked(False)
self.cwd_radio.setChecked(False)
self.fixed_dir_radio.setChecked(True)
self.wd_edit.setText(path)
if not self.stack.isEnabled() and not self.wdir_group.isEnabled():
ok_btn = self.bbox.button(QDialogButtonBox.Ok)
ok_btn.setEnabled(False)
self.adjustSize()
def select_directory(self):
"""Select directory"""
basedir = str(self.wd_edit.text())
if not osp.isdir(basedir):
basedir = getcwd_or_home()
directory = getexistingdirectory(self, _("Select directory"), basedir)
if directory:
self.wd_edit.setText(directory)
self.dir = directory
def reset_btn_clicked(self):
index = self.context_combo.currentIndex()
self.context_changed(index, reset=True)
def run_btn_clicked(self):
self.status |= RunDialogStatus.Run
def ok_btn_clicked(self):
self.status |= RunDialogStatus.Save
def get_configuration(
self
) -> Tuple[str, str, ExtendedRunExecutionParameters]:
return self.saved_conf
# ---- Qt methods
# -------------------------------------------------------------------------
def accept(self) -> None:
self.status |= RunDialogStatus.Save
widget_conf = self.current_widget.get_configuration()
self.store_params_text.status_action.setVisible(False)
path = None
source = None
if self.file_dir_radio.isChecked():
source = WorkingDirSource.ConfigurationDirectory
elif self.cwd_radio.isChecked():
source = WorkingDirSource.CurrentDirectory
else:
source = WorkingDirSource.CustomDirectory
path = self.wd_edit.text()
cwd_opts = WorkingDirOpts(source=source, path=path)
exec_params = RunExecutionParameters(
working_dir=cwd_opts, executor_params=widget_conf)
if self.current_params:
uuid = self.current_params['uuid']
else:
uuid = str(uuid4())
# Validate name only for new configurations
name = self.store_params_text.text()
if self.new_config:
if name == '':
self.store_params_text.status_action.setVisible(True)
self.store_params_text.status_action.setToolTip(
'\n'.join(textwrap.wrap(EMPTY_NAME, 50))
)
return
else:
extension = self.extension_combo.lineEdit().text()
context = self.context_combo.lineEdit().text()
current_names = self.param_names[(extension, context)]
if name in current_names:
self.store_params_text.status_action.setVisible(True)
self.store_params_text.status_action.setToolTip(
'\n'.join(textwrap.wrap(REPEATED_NAME, 50))
)
return
# Check if params are app default ones.
# Fixes spyder-ide/spyder#22649
if self.current_params is None:
# The user is trying to create new params, so this is not a
# default.
is_default = False
else:
if self.current_params["default"]:
# Default params
is_default = True
else:
# User created params
is_default = False
ext_exec_params = ExtendedRunExecutionParameters(
uuid=uuid,
name=name,
params=exec_params,
file_uuid=None,
default=is_default,
)
self.saved_conf = (self.selected_extension, self.selected_context,
ext_exec_params)
super().accept()
# ---- Private methods
# -------------------------------------------------------------------------
@property
def _stylesheet(self):
# This avoids the extra bottom margin added by the config dialog since
# this widget is one of its children
self._css.QGroupBox.setValues(
marginBottom='0px',
)
return self._css.toString()
|
ExecutionParametersDialog
|
python
|
numba__numba
|
numba/tests/test_sysinfo.py
|
{
"start": 2489,
"end": 4271
}
|
class ____(TestCase):
mem_total = 2 * 1024 ** 2 # 2_097_152
mem_available = 1024 ** 2 # 1_048_576
cpus_list = [1, 2]
def setUp(self):
super(TestSysInfoWithPsutil, self).setUp()
self.psutil_orig_state = nsi._psutil_import
# Mocking psutil
nsi._psutil_import = True
nsi.psutil = NonCallableMock()
vm = nsi.psutil.virtual_memory.return_value
vm.total = self.mem_total
vm.available = self.mem_available
if platform.system() in ('Linux', 'Windows',):
# cpu_affiniy only available on Linux and Windows
proc = nsi.psutil.Process.return_value
proc.cpu_affinity.return_value = self.cpus_list
else:
nsi.psutil.Process.return_value = None
self.info = nsi.get_os_spec_info(platform.system())
def tearDown(self):
super(TestSysInfoWithPsutil, self).tearDown()
nsi._psutil_import = self.psutil_orig_state
def test_has_all_data(self):
keys = (nsi._mem_total, nsi._mem_available)
for k in keys:
with self.subTest(k=k):
self.assertIn(k, self.info.keys())
self.assertIsInstance(self.info[k], int)
def test_has_correct_values(self):
self.assertEqual(self.info[nsi._mem_total], self.mem_total)
self.assertEqual(self.info[nsi._mem_available], self.mem_available)
@skipUnless(platform.system() in ('Linux', 'Windows'),
"CPUs allowed info only available on Linux and Windows")
def test_cpus_list(self):
self.assertEqual(self.info[nsi._cpus_allowed], len(self.cpus_list))
self.assertEqual(self.info[nsi._cpus_list],
' '.join(str(n) for n in self.cpus_list))
|
TestSysInfoWithPsutil
|
python
|
encode__django-rest-framework
|
rest_framework/test.py
|
{
"start": 13890,
"end": 14823
}
|
class ____(testcases.SimpleTestCase):
"""
Isolate URL patterns on a per-TestCase basis. For example,
class ATestCase(URLPatternsTestCase):
urlpatterns = [...]
def test_something(self):
...
class AnotherTestCase(URLPatternsTestCase):
urlpatterns = [...]
def test_something_else(self):
...
"""
@classmethod
def setUpClass(cls):
# Get the module of the TestCase subclass
cls._module = import_module(cls.__module__)
cls._override = override_settings(ROOT_URLCONF=cls.__module__)
if hasattr(cls._module, 'urlpatterns'):
cls._module_urlpatterns = cls._module.urlpatterns
cls._module.urlpatterns = cls.urlpatterns
cls._override.enable()
cls.addClassCleanup(cls._override.disable)
cls.addClassCleanup(cleanup_url_patterns, cls)
super().setUpClass()
|
URLPatternsTestCase
|
python
|
django-import-export__django-import-export
|
import_export/tmp_storages.py
|
{
"start": 1710,
"end": 2869
}
|
class ____(BaseStorage):
_storage = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._configure_storage()
self.MEDIA_FOLDER = kwargs.get("MEDIA_FOLDER", "django-import-export")
# issue 1589 - Ensure that for MediaStorage, we read in binary mode
kwargs.update({"read_mode": "rb"})
super().__init__(**kwargs)
def _configure_storage(self):
from django.core.files.storage import StorageHandler
sh = StorageHandler()
self._storage = (
sh["import_export"] if "import_export" in sh.backends else sh["default"]
)
def save(self, data):
if not self.name:
self.name = uuid4().hex
self._storage.save(self.get_full_path(), ContentFile(data))
def read(self):
with self._storage.open(self.get_full_path(), mode=self.read_mode) as f:
return f.read()
def remove(self):
self._storage.delete(self.get_full_path())
def get_full_path(self):
if self.MEDIA_FOLDER is not None:
return os.path.join(self.MEDIA_FOLDER, self.name)
return self.name
|
MediaStorage
|
python
|
facebook__pyre-check
|
source/interprocedural_analyses/taint/test/integration/entrypoint.py
|
{
"start": 264,
"end": 1445
}
|
class ____:
def some_entrypoint_function():
glob.append(1)
@MyEntrypoint
def method_entrypoint_with_decorator():
glob.append(1)
def nested_run():
def do_the_thing():
glob.append(1)
do_the_thing()
def immediate_examples():
glob.append(1)
def this_one_shouldnt_be_found():
glob.append(1)
def transitive_call_with_globals_passed_in(local_list: List[int]):
local_list.append(1)
def transitive_call_accessing_globals():
glob.append(1)
def leak_globals_by_passing_in():
transitive_call_with_globals_passed_in(glob)
def leak_globals_by_transitive_call():
transitive_call_accessing_globals()
@MyEntrypoint
def function_entrypoint_with_decorator():
glob.append(1)
def entrypoint_into_lambda():
@MyEntrypoint
def lambda_entrypoint_with_decorator():
glob.append(1)
lambda_entrypoint_with_decorator()
def get_these():
immediate_examples()
leak_globals_by_passing_in()
leak_globals_by_transitive_call()
nested_run()
def main():
get_these()
this_one_shouldnt_be_found()
MyClass().some_entrypoint_function()
if __name__ == "__main__":
main()
|
MyClass
|
python
|
google__pytype
|
build_scripts/release.py
|
{
"start": 592,
"end": 2587
}
|
class ____(Exception):
def __init__(self, msg):
super().__init__()
self.msg = msg
def parse_args():
"""Parse and return the command line args."""
allowed_modes = (TEST_MODE, RELEASE_MODE)
parser = argparse.ArgumentParser()
parser.add_argument(
"-m",
"--mode",
type=str,
default=f"{TEST_MODE}",
help=(
"Specify if the release should be uploaded to pypi or testpyi. Can "
"take a value of %s or %s"
)
% allowed_modes,
)
args = parser.parse_args()
if args.mode not in allowed_modes:
sys.exit(f"Invalid --mode option. Should be one of {allowed_modes}")
return args
def verify_no_pytype_installation_exists():
try:
import pytype as _ # pylint: disable=g-import-not-at-top
except ImportError:
return # This is what we want - that Pytype does not already exist.
sys.exit(
"ERROR: Pytype installation detected; Run this script from inside "
"a virtualenv without a pytype installation."
)
def verify_pypirc_exists():
pypirc_path = os.path.join(os.path.expanduser("~"), ".pypirc")
if not os.path.exists(pypirc_path):
sys.exit("ERROR: '.pypirc' file not found.")
def check_if_version_is_ok():
"""Prompt the user to confirm that the version in __version__.py is OK."""
sys.path.append(build_utils.PYTYPE_SRC_ROOT)
version_mod = __import__("pytype.__version__", fromlist=["pytype"])
response = input(
"Making a release with version %s; Continue? "
% getattr(version_mod, "__version__")
)
if response not in ["y", "Y", "yes", "YES"]:
sys.exit("Aborting release.")
def upload_package(package_path, test=False):
twine_cmd = ["twine", "upload"]
if test:
twine_cmd.extend(["--repository", "testpypi"])
twine_cmd.append(os.path.join(package_path, "*"))
print(f"Uploading: {twine_cmd}")
returncode, stdout = build_utils.run_cmd(twine_cmd)
if returncode != 0:
raise ReleaseError(f"Package upload failed:\n{stdout}")
|
ReleaseError
|
python
|
sympy__sympy
|
sympy/codegen/fnodes.py
|
{
"start": 1484,
"end": 2197
}
|
class ____(Token):
""" Represents a renaming in a use statement in Fortran.
Examples
========
>>> from sympy.codegen.fnodes import use_rename, use
>>> from sympy import fcode
>>> ren = use_rename("thingy", "convolution2d")
>>> print(fcode(ren, source_format='free'))
thingy => convolution2d
>>> full = use('signallib', only=['snr', ren])
>>> print(fcode(full, source_format='free'))
use signallib, only: snr, thingy => convolution2d
"""
__slots__ = _fields = ('local', 'original')
_construct_local = String
_construct_original = String
def _name(arg):
if hasattr(arg, 'name'):
return arg.name
else:
return String(arg)
|
use_rename
|
python
|
jina-ai__jina
|
jina/orchestrate/flow/base.py
|
{
"start": 2368,
"end": 148029
}
|
class ____(
PostMixin,
ProfileMixin,
HealthCheckMixin,
JAMLCompatible,
BaseOrchestrator,
metaclass=FlowType,
):
"""Flow is how Jina streamlines and distributes Executors."""
# overload_inject_start_client_flow
@overload
def __init__(
self,
*,
asyncio: Optional[bool] = False,
grpc_channel_options: Optional[dict] = None,
host: Optional[str] = '0.0.0.0',
log_config: Optional[str] = None,
metrics: Optional[bool] = False,
metrics_exporter_host: Optional[str] = None,
metrics_exporter_port: Optional[int] = None,
port: Optional[int] = None,
prefetch: Optional[int] = 1000,
protocol: Optional[Union[str, List[str]]] = 'GRPC',
proxy: Optional[bool] = False,
reuse_session: Optional[bool] = False,
suppress_root_logging: Optional[bool] = False,
tls: Optional[bool] = False,
traces_exporter_host: Optional[str] = None,
traces_exporter_port: Optional[int] = None,
tracing: Optional[bool] = False,
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina client` CLI.
:param asyncio: If set, then the input and output of this Client work in an asynchronous manner.
:param grpc_channel_options: Dictionary of kwargs arguments that will be passed to the grpc channel as options when creating a channel, example : {'grpc.max_send_message_length': -1}. When max_attempts > 1, the 'grpc.service_config' option will not be applicable.
:param host: The host of the Gateway, which the client should connect to, by default it is 0.0.0.0.
:param log_config: The config name or the absolute path to the YAML config file of the logger used in this object.
:param metrics: If set, the sdk implementation of the OpenTelemetry metrics will be available for default monitoring and custom measurements. Otherwise a no-op implementation will be provided.
:param metrics_exporter_host: If tracing is enabled, this hostname will be used to configure the metrics exporter agent.
:param metrics_exporter_port: If tracing is enabled, this port will be used to configure the metrics exporter agent.
:param port: The port of the Gateway, which the client should connect to.
:param prefetch: Number of requests fetched from the client before feeding into the first Executor.
Used to control the speed of data input into a Flow. 0 disables prefetch (1000 requests is the default)
:param protocol: Communication protocol between server and client.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
:param reuse_session: True if HTTPClient should reuse ClientSession. If true, user will be responsible to close it
:param suppress_root_logging: If set, then no root handlers will be suppressed from logging.
:param tls: If set, connect to gateway using tls encryption
:param traces_exporter_host: If tracing is enabled, this hostname will be used to configure the trace exporter agent.
:param traces_exporter_port: If tracing is enabled, this port will be used to configure the trace exporter agent.
:param tracing: If set, the sdk implementation of the OpenTelemetry tracer will be available and will be enabled for automatic tracing of requests and customer span creation. Otherwise a no-op implementation will be provided.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_client_flow
# overload_inject_start_gateway_flow
@overload
def __init__(
self,
*,
compression: Optional[str] = None,
cors: Optional[bool] = False,
deployments_addresses: Optional[str] = '{}',
deployments_metadata: Optional[str] = '{}',
deployments_no_reduce: Optional[str] = '[]',
description: Optional[str] = None,
docker_kwargs: Optional[dict] = None,
entrypoint: Optional[str] = None,
env: Optional[dict] = None,
expose_endpoints: Optional[str] = None,
expose_graphql_endpoint: Optional[bool] = False,
floating: Optional[bool] = False,
graph_conditions: Optional[str] = '{}',
graph_description: Optional[str] = '{}',
grpc_channel_options: Optional[dict] = None,
grpc_server_options: Optional[dict] = None,
host: Optional[str] = '0.0.0.0',
log_config: Optional[str] = None,
metrics: Optional[bool] = False,
metrics_exporter_host: Optional[str] = None,
metrics_exporter_port: Optional[int] = None,
monitoring: Optional[bool] = False,
name: Optional[str] = 'gateway',
no_crud_endpoints: Optional[bool] = False,
no_debug_endpoints: Optional[bool] = False,
port: Optional[int] = None,
port_monitoring: Optional[int] = None,
prefetch: Optional[int] = 1000,
protocol: Optional[Union[str, List[str]]] = ['GRPC'],
provider: Optional[str] = ['NONE'],
provider_endpoint: Optional[str] = None,
proxy: Optional[bool] = False,
py_modules: Optional[List] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
reload: Optional[bool] = False,
replicas: Optional[int] = 1,
retries: Optional[int] = -1,
runtime_cls: Optional[str] = 'GatewayRuntime',
ssl_certfile: Optional[str] = None,
ssl_keyfile: Optional[str] = None,
stateful: Optional[bool] = False,
timeout_ctrl: Optional[int] = 60,
timeout_ready: Optional[int] = 600000,
timeout_send: Optional[int] = None,
title: Optional[str] = None,
traces_exporter_host: Optional[str] = None,
traces_exporter_port: Optional[int] = None,
tracing: Optional[bool] = False,
uses: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_with: Optional[dict] = None,
uvicorn_kwargs: Optional[dict] = None,
workspace: Optional[str] = None,
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina gateway` CLI.
:param compression: The compression mechanism used when sending requests from the Head to the WorkerRuntimes. For more details, check https://grpc.github.io/grpc/python/grpc.html#compression.
:param cors: If set, a CORS middleware is added to FastAPI frontend to allow cross-origin access.
:param deployments_addresses: JSON dictionary with the input addresses of each Deployment
:param deployments_metadata: JSON dictionary with the request metadata for each Deployment
:param deployments_no_reduce: list JSON disabling the built-in merging mechanism for each Deployment listed
:param description: The description of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param docker_kwargs: Dictionary of kwargs arguments that will be passed to Docker SDK when starting the docker '
container.
More details can be found in the Docker SDK docs: https://docker-py.readthedocs.io/en/stable/
:param entrypoint: The entrypoint command overrides the ENTRYPOINT in Docker image. when not set then the Docker image ENTRYPOINT takes effective.
:param env: The map of environment variables that are available inside runtime
:param expose_endpoints: A JSON string that represents a map from executor endpoints (`@requests(on=...)`) to HTTP endpoints.
:param expose_graphql_endpoint: If set, /graphql endpoint is added to HTTP interface.
:param floating: If set, the current Pod/Deployment can not be further chained, and the next `.add()` will chain after the last Pod/Deployment not this current one.
:param graph_conditions: Dictionary stating which filtering conditions each Executor in the graph requires to receive Documents.
:param graph_description: Routing graph for the gateway
:param grpc_channel_options: Dictionary of kwargs arguments that will be passed to the grpc channel as options when creating a channel, example : {'grpc.max_send_message_length': -1}. When max_attempts > 1, the 'grpc.service_config' option will not be applicable.
:param grpc_server_options: Dictionary of kwargs arguments that will be passed to the grpc server as options when starting the server, example : {'grpc.max_send_message_length': -1}
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param log_config: The config name or the absolute path to the YAML config file of the logger used in this object.
:param metrics: If set, the sdk implementation of the OpenTelemetry metrics will be available for default monitoring and custom measurements. Otherwise a no-op implementation will be provided.
:param metrics_exporter_host: If tracing is enabled, this hostname will be used to configure the metrics exporter agent.
:param metrics_exporter_port: If tracing is enabled, this port will be used to configure the metrics exporter agent.
:param monitoring: If set, spawn an http server with a prometheus endpoint to expose metrics
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param no_crud_endpoints: If set, `/index`, `/search`, `/update`, `/delete` endpoints are removed from HTTP interface.
Any executor that has `@requests(on=...)` bound with those values will receive data requests.
:param no_debug_endpoints: If set, `/status` `/post` endpoints are removed from HTTP interface.
:param port: The port for input data to bind the gateway server to, by default, random ports between range [49152, 65535] will be assigned. The port argument can be either 1 single value in case only 1 protocol is used or multiple values when many protocols are used.
:param port_monitoring: The port on which the prometheus server is exposed, default is a random port between [49152, 65535]
:param prefetch: Number of requests fetched from the client before feeding into the first Executor.
Used to control the speed of data input into a Flow. 0 disables prefetch (1000 requests is the default)
:param protocol: Communication protocol of the server exposed by the Gateway. This can be a single value or a list of protocols, depending on your chosen Gateway. Choose the convenient protocols from: ['GRPC', 'HTTP', 'WEBSOCKET'].
:param provider: If set, Executor is translated to a custom container compatible with the chosen provider. Choose the convenient providers from: ['NONE', 'SAGEMAKER', 'AZURE'].
:param provider_endpoint: If set, Executor endpoint will be explicitly chosen and used in the custom container operated by the provider.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
:param py_modules: The customized python modules need to be imported before loading the gateway
Note that the recommended way is to only import a single module - a simple python file, if your
gateway can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package.
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param reload: If set, the Gateway will restart while serving if YAML configuration source is changed.
:param replicas: The number of replicas of the Gateway. This replicas will only be applied when converted into Kubernetes YAML
:param retries: Number of retries per gRPC call. If <0 it defaults to max(3, num_replicas)
:param runtime_cls: The runtime class to run inside the Pod
:param ssl_certfile: the path to the certificate file
:param ssl_keyfile: the path to the key file
:param stateful: If set, start consensus module to make sure write operations are properly replicated between all the replicas
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pod waits for the runtime to be ready, -1 for waiting forever
:param timeout_send: The timeout in milliseconds used when sending data requests to Executors, -1 means no timeout, disabled by default
:param title: The title of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param traces_exporter_host: If tracing is enabled, this hostname will be used to configure the trace exporter agent.
:param traces_exporter_port: If tracing is enabled, this port will be used to configure the trace exporter agent.
:param tracing: If set, the sdk implementation of the OpenTelemetry tracer will be available and will be enabled for automatic tracing of requests and customer span creation. Otherwise a no-op implementation will be provided.
:param uses: The config of the gateway, it could be one of the followings:
* the string literal of an Gateway class name
* a Gateway YAML file (.yml, .yaml, .jaml)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_with: Dictionary of keyword arguments that will override the `with` configuration in `uses`
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
More details can be found in Uvicorn docs: https://www.uvicorn.org/settings/
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_gateway_flow
# overload_inject_start_flow
@overload
def __init__(
self,
*,
env: Optional[dict] = None,
inspect: Optional[str] = 'COLLECT',
log_config: Optional[str] = None,
name: Optional[str] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
reload: Optional[bool] = False,
suppress_root_logging: Optional[bool] = False,
uses: Optional[str] = None,
workspace: Optional[str] = None,
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina flow` CLI.
:param env: The map of environment variables that are available inside runtime
:param inspect: The strategy on those inspect deployments in the flow.
If `REMOVE` is given then all inspect deployments are removed when building the flow.
:param log_config: The config name or the absolute path to the YAML config file of the logger used in this object.
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param reload: If set, auto-reloading on file changes is enabled: the Flow will restart while blocked if YAML configuration source is changed. This also applies apply to underlying Executors, if their source code or YAML configuration has changed.
:param suppress_root_logging: If set, then no root handlers will be suppressed from logging.
:param uses: The YAML path represents a flow. It can be either a local file path or a URL.
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_flow
def __init__(
self,
args: Optional['argparse.Namespace'] = None,
**kwargs,
):
# implementation_stub_inject_start_flow
"""Create a Flow. Flow is how Jina streamlines and scales Executors.
EXAMPLE USAGE
Python API
.. code-block:: python
from jina import Flow
f = Flow().add(uses='jinahub+docker://SimpleIndexer') # create Flow and add Executor
with f:
f.bock() # serve Flow
To and from YAML configuration
.. code-block:: python
from jina import Flow
f = Flow().add(uses='jinahub+docker://SimpleIndexer') # create Flow and add Executor
f.save_config('flow.yml') # save YAML config file
f = Flow.load_config('flow.yml') # load Flow from YAML config
with f:
f.bock() # serve Flow
All arguments received by {class}`~jina.Flow()` API will be propagated to other entities (Gateway, Executor) with the following exceptions:
- `uses` and `uses_with` won't be passed to Gateway
- `port`, `port_monitoring`, `uses` and `uses_with` won't be passed to Executor
:param asyncio: If set, then the input and output of this Client work in an asynchronous manner.
:param grpc_channel_options: Dictionary of kwargs arguments that will be passed to the grpc channel as options when creating a channel, example : {'grpc.max_send_message_length': -1}. When max_attempts > 1, the 'grpc.service_config' option will not be applicable.
:param host: The host of the Gateway, which the client should connect to, by default it is 0.0.0.0.
:param log_config: The config name or the absolute path to the YAML config file of the logger used in this object.
:param metrics: If set, the sdk implementation of the OpenTelemetry metrics will be available for default monitoring and custom measurements. Otherwise a no-op implementation will be provided.
:param metrics_exporter_host: If tracing is enabled, this hostname will be used to configure the metrics exporter agent.
:param metrics_exporter_port: If tracing is enabled, this port will be used to configure the metrics exporter agent.
:param port: The port of the Gateway, which the client should connect to.
:param prefetch: Number of requests fetched from the client before feeding into the first Executor.
Used to control the speed of data input into a Flow. 0 disables prefetch (1000 requests is the default)
:param protocol: Communication protocol between server and client.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
:param reuse_session: True if HTTPClient should reuse ClientSession. If true, user will be responsible to close it
:param suppress_root_logging: If set, then no root handlers will be suppressed from logging.
:param tls: If set, connect to gateway using tls encryption
:param traces_exporter_host: If tracing is enabled, this hostname will be used to configure the trace exporter agent.
:param traces_exporter_port: If tracing is enabled, this port will be used to configure the trace exporter agent.
:param tracing: If set, the sdk implementation of the OpenTelemetry tracer will be available and will be enabled for automatic tracing of requests and customer span creation. Otherwise a no-op implementation will be provided.
:param compression: The compression mechanism used when sending requests from the Head to the WorkerRuntimes. For more details, check https://grpc.github.io/grpc/python/grpc.html#compression.
:param cors: If set, a CORS middleware is added to FastAPI frontend to allow cross-origin access.
:param deployments_addresses: JSON dictionary with the input addresses of each Deployment
:param deployments_metadata: JSON dictionary with the request metadata for each Deployment
:param deployments_no_reduce: list JSON disabling the built-in merging mechanism for each Deployment listed
:param description: The description of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param docker_kwargs: Dictionary of kwargs arguments that will be passed to Docker SDK when starting the docker '
container.
More details can be found in the Docker SDK docs: https://docker-py.readthedocs.io/en/stable/
:param entrypoint: The entrypoint command overrides the ENTRYPOINT in Docker image. when not set then the Docker image ENTRYPOINT takes effective.
:param env: The map of environment variables that are available inside runtime
:param expose_endpoints: A JSON string that represents a map from executor endpoints (`@requests(on=...)`) to HTTP endpoints.
:param expose_graphql_endpoint: If set, /graphql endpoint is added to HTTP interface.
:param floating: If set, the current Pod/Deployment can not be further chained, and the next `.add()` will chain after the last Pod/Deployment not this current one.
:param graph_conditions: Dictionary stating which filtering conditions each Executor in the graph requires to receive Documents.
:param graph_description: Routing graph for the gateway
:param grpc_channel_options: Dictionary of kwargs arguments that will be passed to the grpc channel as options when creating a channel, example : {'grpc.max_send_message_length': -1}. When max_attempts > 1, the 'grpc.service_config' option will not be applicable.
:param grpc_server_options: Dictionary of kwargs arguments that will be passed to the grpc server as options when starting the server, example : {'grpc.max_send_message_length': -1}
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param log_config: The config name or the absolute path to the YAML config file of the logger used in this object.
:param metrics: If set, the sdk implementation of the OpenTelemetry metrics will be available for default monitoring and custom measurements. Otherwise a no-op implementation will be provided.
:param metrics_exporter_host: If tracing is enabled, this hostname will be used to configure the metrics exporter agent.
:param metrics_exporter_port: If tracing is enabled, this port will be used to configure the metrics exporter agent.
:param monitoring: If set, spawn an http server with a prometheus endpoint to expose metrics
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param no_crud_endpoints: If set, `/index`, `/search`, `/update`, `/delete` endpoints are removed from HTTP interface.
Any executor that has `@requests(on=...)` bound with those values will receive data requests.
:param no_debug_endpoints: If set, `/status` `/post` endpoints are removed from HTTP interface.
:param port: The port for input data to bind the gateway server to, by default, random ports between range [49152, 65535] will be assigned. The port argument can be either 1 single value in case only 1 protocol is used or multiple values when many protocols are used.
:param port_monitoring: The port on which the prometheus server is exposed, default is a random port between [49152, 65535]
:param prefetch: Number of requests fetched from the client before feeding into the first Executor.
Used to control the speed of data input into a Flow. 0 disables prefetch (1000 requests is the default)
:param protocol: Communication protocol of the server exposed by the Gateway. This can be a single value or a list of protocols, depending on your chosen Gateway. Choose the convenient protocols from: ['GRPC', 'HTTP', 'WEBSOCKET'].
:param provider: If set, Executor is translated to a custom container compatible with the chosen provider. Choose the convenient providers from: ['NONE', 'SAGEMAKER', 'AZURE'].
:param provider_endpoint: If set, Executor endpoint will be explicitly chosen and used in the custom container operated by the provider.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
:param py_modules: The customized python modules need to be imported before loading the gateway
Note that the recommended way is to only import a single module - a simple python file, if your
gateway can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package.
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param reload: If set, the Gateway will restart while serving if YAML configuration source is changed.
:param replicas: The number of replicas of the Gateway. This replicas will only be applied when converted into Kubernetes YAML
:param retries: Number of retries per gRPC call. If <0 it defaults to max(3, num_replicas)
:param runtime_cls: The runtime class to run inside the Pod
:param ssl_certfile: the path to the certificate file
:param ssl_keyfile: the path to the key file
:param stateful: If set, start consensus module to make sure write operations are properly replicated between all the replicas
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pod waits for the runtime to be ready, -1 for waiting forever
:param timeout_send: The timeout in milliseconds used when sending data requests to Executors, -1 means no timeout, disabled by default
:param title: The title of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param traces_exporter_host: If tracing is enabled, this hostname will be used to configure the trace exporter agent.
:param traces_exporter_port: If tracing is enabled, this port will be used to configure the trace exporter agent.
:param tracing: If set, the sdk implementation of the OpenTelemetry tracer will be available and will be enabled for automatic tracing of requests and customer span creation. Otherwise a no-op implementation will be provided.
:param uses: The config of the gateway, it could be one of the followings:
* the string literal of an Gateway class name
* a Gateway YAML file (.yml, .yaml, .jaml)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_with: Dictionary of keyword arguments that will override the `with` configuration in `uses`
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
More details can be found in Uvicorn docs: https://www.uvicorn.org/settings/
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
:param env: The map of environment variables that are available inside runtime
:param inspect: The strategy on those inspect deployments in the flow.
If `REMOVE` is given then all inspect deployments are removed when building the flow.
:param log_config: The config name or the absolute path to the YAML config file of the logger used in this object.
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param reload: If set, auto-reloading on file changes is enabled: the Flow will restart while blocked if YAML configuration source is changed. This also applies apply to underlying Executors, if their source code or YAML configuration has changed.
:param suppress_root_logging: If set, then no root handlers will be suppressed from logging.
:param uses: The YAML path represents a flow. It can be either a local file path or a URL.
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
.. # noqa: DAR102
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# implementation_stub_inject_end_flow
super().__init__()
self._version = '1' #: YAML version number, this will be later overridden if YAML config says the other way
self._deployment_nodes = OrderedDict() # type: Dict[str, Deployment]
self._inspect_deployments = {} # type: Dict[str, str]
self._endpoints_mapping = {} # type: Dict[str, Dict]
self._build_level = FlowBuildLevel.EMPTY
self._last_changed_deployment = [
GATEWAY_NAME
] #: default first deployment is gateway, will add when build()
self._update_args(args, **kwargs)
if isinstance(self.args, argparse.Namespace):
self.logger = JinaLogger(
self.__class__.__name__, **vars(self.args), **self._common_kwargs
)
else:
self.logger = JinaLogger(self.__class__.__name__, **self._common_kwargs)
self._client = None
def _update_args(self, args, **kwargs):
from jina.helper import ArgNamespace
from jina.parsers.flow import set_flow_parser
_flow_parser = set_flow_parser()
if args is None:
args = ArgNamespace.kwargs2namespace(
kwargs, _flow_parser, True, fallback_parsers=FALLBACK_PARSERS
)
self.args = args
# common args should be the ones that can not be parsed by _flow_parser
known_keys = list(vars(args).keys())
self._common_kwargs = {k: v for k, v in kwargs.items() if k not in known_keys}
# gateway args inherit from flow args
self._gateway_kwargs = {
k: v
for k, v in self._common_kwargs.items()
if k not in GATEWAY_ARGS_BLACKLIST
}
self._kwargs = ArgNamespace.get_non_defaults_args(
args, _flow_parser
) #: for yaml dump
if self._common_kwargs.get('asyncio', False) and not isinstance(
self, AsyncPostMixin
):
from jina.orchestrate.flow.asyncio import AsyncFlow
self.__class__ = AsyncFlow
@staticmethod
def _parse_endpoints(
op_flow, deployment_name, endpoint, connect_to_last_deployment=False
) -> Set:
# parsing needs
if isinstance(endpoint, str):
endpoint = [endpoint]
elif not endpoint:
if op_flow._last_changed_deployment and connect_to_last_deployment:
endpoint = [op_flow._last_deployment]
else:
endpoint = []
if isinstance(endpoint, (list, tuple)):
for idx, s in enumerate(endpoint):
if s == deployment_name:
raise FlowTopologyError(
'the income/output of a deployment can not be itself'
)
else:
raise ValueError(f'endpoint={endpoint} is not parsable')
# if an endpoint is being inspected, then replace it with inspected Deployment
endpoint = set(op_flow._inspect_deployments.get(ep, ep) for ep in endpoint)
return endpoint
@property
def _last_deployment(self):
"""Last deployment
.. # noqa: DAR401
.. # noqa: DAR201
"""
return self._last_changed_deployment[-1]
@_last_deployment.setter
def _last_deployment(self, name: str):
"""
Set a Deployment as the last Deployment in the Flow, useful when modifying the Flow.
.. # noqa: DAR401
:param name: the name of the existing Deployment
"""
if name not in self._deployment_nodes:
raise FlowMissingDeploymentError(f'{name} can not be found in this Flow')
if self._last_changed_deployment and name == self._last_deployment:
pass
else:
self._last_changed_deployment.append(name)
# graph is now changed so we need to
# reset the build level to the lowest
self._build_level = FlowBuildLevel.EMPTY
@allowed_levels([FlowBuildLevel.EMPTY])
def _add_gateway(
self,
needs: Union[str, Set[str]],
graph_description: Dict[str, List[str]],
deployments_addresses: Dict[str, List[str]],
deployments_metadata: Dict[str, Dict[str, str]],
graph_conditions: Dict[str, Dict],
deployments_no_reduce: List[str],
**kwargs,
):
kwargs.update(
dict(
name=GATEWAY_NAME,
ctrl_with_ipc=True, # otherwise ctrl port would be conflicted
host=self.host,
protocol=self.protocol,
port=self.port,
deployment_role=DeploymentRoleType.GATEWAY,
expose_endpoints=json.dumps(self._endpoints_mapping),
env=self.env,
log_config=(
kwargs.get('log_config')
if 'log_config' in kwargs
else self.args.log_config
),
)
)
kwargs.update(self._gateway_kwargs)
args = ArgNamespace.kwargs2namespace(kwargs, set_gateway_parser())
# We need to check later if the port was manually set or randomly
args.default_port = (
kwargs.get('port', None) is None and kwargs.get('port_expose', None) is None
)
if not args.port:
args.port = random_ports(len(args.protocol))
args.noblock_on_start = True
args.graph_description = json.dumps(graph_description)
args.graph_conditions = json.dumps(graph_conditions)
args.deployments_addresses = json.dumps(deployments_addresses)
args.deployments_metadata = json.dumps(deployments_metadata)
args.deployments_no_reduce = json.dumps(deployments_no_reduce)
self._deployment_nodes[GATEWAY_NAME] = Deployment(
args, needs, include_gateway=False, noblock_on_start=True
)
def _get_deployments_metadata(self) -> Dict[str, Dict[str, str]]:
"""Get the metadata of all deployments in the Flow
:return: a dictionary of deployment name and its metadata
"""
return {
name: deployment.grpc_metadata
for name, deployment in self._deployment_nodes.items()
if deployment.grpc_metadata
}
def _get_deployments_addresses(self) -> Dict[str, List[str]]:
graph_dict = {}
for node, deployment in self._deployment_nodes.items():
if node == GATEWAY_NAME:
continue
graph_dict[node] = deployment._get_connection_list_for_flow()
return graph_dict
def _get_k8s_deployments_addresses(
self, k8s_namespace: str
) -> Dict[str, List[str]]:
graph_dict = {}
from jina.orchestrate.deployments.config.helper import to_compatible_name
from jina.serve.networking import GrpcConnectionPool
for node, v in self._deployment_nodes.items():
if node == GATEWAY_NAME:
continue
if v.external:
deployment_k8s_address = f'{v.host}'
elif v.head_args:
deployment_k8s_address = (
f'{to_compatible_name(v.head_args.name)}.{k8s_namespace}.svc'
)
else:
deployment_k8s_address = (
f'{to_compatible_name(v.name)}.{k8s_namespace}.svc'
)
external_port = v.head_port if v.head_port else v.port
graph_dict[node] = [
f'{v.protocol.lower()}://{deployment_k8s_address}:{external_port if v.external else GrpcConnectionPool.K8S_PORT}'
]
return graph_dict if graph_dict else None
def _get_docker_compose_deployments_addresses(self) -> Dict[str, List[str]]:
graph_dict = {}
for node, v in self._deployment_nodes.items():
if node == GATEWAY_NAME:
continue
deployment_docker_compose_address = v._docker_compose_address
graph_dict[node] = deployment_docker_compose_address
return graph_dict
def _get_graph_conditions(self) -> Dict[str, Dict]:
graph_condition = {}
for node, v in self._deployment_nodes.items():
if v.args.when is not None: # condition on input docs
graph_condition[node] = v.args.when
return graph_condition
def _get_disabled_reduce_deployments(self) -> List[str]:
disabled_deployments = []
for node, v in self._deployment_nodes.items():
if v.args.no_reduce:
disabled_deployments.append(node)
return disabled_deployments
def _get_graph_representation(self) -> Dict[str, List[str]]:
def _add_node(graph, n):
# in the graph we need to distinguish between start and end gateway, although they are the same deployment
if n == GATEWAY_NAME:
n = 'start-gateway'
if n not in graph:
graph[n] = []
return n
graph_dict = {}
for node, v in self._deployment_nodes.items():
node = _add_node(graph_dict, node)
if node == 'start-gateway':
continue
for need in sorted(v.needs):
need = _add_node(graph_dict, need)
graph_dict[need].append(node)
# find all non floating leafs
last_deployment = self._last_deployment
if last_deployment != 'gateway':
graph_dict[last_deployment].append('end-gateway')
return graph_dict
@allowed_levels([FlowBuildLevel.EMPTY])
def needs(
self, needs: Union[Tuple[str], List[str]], name: str = 'joiner', *args, **kwargs
) -> 'Flow':
"""
Add a blocker to the Flow, wait until all pods defined in **needs** completed.
.. # noqa: DAR401
:param needs: list of service names to wait
:param name: the name of this joiner, by default is ``joiner``
:param args: additional positional arguments forwarded to the add function
:param kwargs: additional key value arguments forwarded to the add function
:return: the modified Flow
"""
if len(needs) <= 1:
raise FlowTopologyError(
'no need to wait for a single service, need len(needs) > 1'
)
return self.add(
name=name,
needs=needs,
deployment_role=DeploymentRoleType.JOIN,
*args,
**kwargs,
)
@allowed_levels([FlowBuildLevel.EMPTY])
def needs_all(self, name: str = 'joiner', *args, **kwargs) -> 'Flow':
"""
Collect all floating Deployments so far and add a blocker to the Flow; wait until all handing pods completed.
:param name: the name of this joiner (default is ``joiner``)
:param args: additional positional arguments which are forwarded to the add and needs function
:param kwargs: additional key value arguments which are forwarded to the add and needs function
:return: the modified Flow
"""
needs = _hanging_deployments(self)
if len(needs) == 1:
return self.add(name=name, needs=needs, *args, **kwargs)
return self.needs(name=name, needs=needs, *args, **kwargs)
# overload_inject_start_deployment
@overload
def add(
self,
*,
allow_concurrent: Optional[bool] = False,
compression: Optional[str] = None,
connection_list: Optional[str] = None,
cors: Optional[bool] = False,
description: Optional[str] = None,
disable_auto_volume: Optional[bool] = False,
docker_kwargs: Optional[dict] = None,
entrypoint: Optional[str] = None,
env: Optional[dict] = None,
exit_on_exceptions: Optional[List] = [],
external: Optional[bool] = False,
floating: Optional[bool] = False,
force_update: Optional[bool] = False,
gpus: Optional[str] = None,
grpc_channel_options: Optional[dict] = None,
grpc_metadata: Optional[dict] = None,
grpc_server_options: Optional[dict] = None,
host: Optional[List] = ['0.0.0.0'],
install_requirements: Optional[bool] = False,
log_config: Optional[str] = None,
metrics: Optional[bool] = False,
metrics_exporter_host: Optional[str] = None,
metrics_exporter_port: Optional[int] = None,
monitoring: Optional[bool] = False,
name: Optional[str] = 'executor',
native: Optional[bool] = False,
no_reduce: Optional[bool] = False,
output_array_type: Optional[str] = None,
polling: Optional[str] = 'ANY',
port: Optional[int] = None,
port_monitoring: Optional[int] = None,
prefer_platform: Optional[str] = None,
protocol: Optional[Union[str, List[str]]] = ['GRPC'],
provider: Optional[str] = ['NONE'],
provider_endpoint: Optional[str] = None,
py_modules: Optional[List] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
raft_configuration: Optional[dict] = None,
reload: Optional[bool] = False,
replicas: Optional[int] = 1,
retries: Optional[int] = -1,
runtime_cls: Optional[str] = 'WorkerRuntime',
shards: Optional[int] = 1,
ssl_certfile: Optional[str] = None,
ssl_keyfile: Optional[str] = None,
stateful: Optional[bool] = False,
timeout_ctrl: Optional[int] = 60,
timeout_ready: Optional[int] = 600000,
timeout_send: Optional[int] = None,
title: Optional[str] = None,
tls: Optional[bool] = False,
traces_exporter_host: Optional[str] = None,
traces_exporter_port: Optional[int] = None,
tracing: Optional[bool] = False,
uses: Optional[Union[str, Type['BaseExecutor'], dict]] = 'BaseExecutor',
uses_after: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_after_address: Optional[str] = None,
uses_before: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_before_address: Optional[str] = None,
uses_dynamic_batching: Optional[dict] = None,
uses_metas: Optional[dict] = None,
uses_requests: Optional[dict] = None,
uses_with: Optional[dict] = None,
uvicorn_kwargs: Optional[dict] = None,
volumes: Optional[List] = None,
when: Optional[dict] = None,
workspace: Optional[str] = None,
**kwargs,
) -> Union['Flow', 'AsyncFlow']:
"""Add an Executor to the current Flow object.
:param allow_concurrent: Allow concurrent requests to be processed by the Executor. This is only recommended if the Executor is thread-safe.
:param compression: The compression mechanism used when sending requests from the Head to the WorkerRuntimes. For more details, check https://grpc.github.io/grpc/python/grpc.html#compression.
:param connection_list: dictionary JSON with a list of connections to configure
:param cors: If set, a CORS middleware is added to FastAPI frontend to allow cross-origin access.
:param description: The description of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param disable_auto_volume: Do not automatically mount a volume for dockerized Executors.
:param docker_kwargs: Dictionary of kwargs arguments that will be passed to Docker SDK when starting the docker '
container.
More details can be found in the Docker SDK docs: https://docker-py.readthedocs.io/en/stable/
:param entrypoint: The entrypoint command overrides the ENTRYPOINT in Docker image. when not set then the Docker image ENTRYPOINT takes effective.
:param env: The map of environment variables that are available inside runtime
:param exit_on_exceptions: List of exceptions that will cause the Executor to shut down.
:param external: The Deployment will be considered an external Deployment that has been started independently from the Flow.This Deployment will not be context managed by the Flow.
:param floating: If set, the current Pod/Deployment can not be further chained, and the next `.add()` will chain after the last Pod/Deployment not this current one.
:param force_update: If set, always pull the latest Hub Executor bundle even it exists on local
:param gpus: This argument allows dockerized Jina Executors to discover local gpu devices.
Note,
- To access all gpus, use `--gpus all`.
- To access multiple gpus, e.g. make use of 2 gpus, use `--gpus 2`.
- To access specified gpus based on device id, use `--gpus device=[YOUR-GPU-DEVICE-ID]`
- To access specified gpus based on multiple device id, use `--gpus device=[YOUR-GPU-DEVICE-ID1],device=[YOUR-GPU-DEVICE-ID2]`
- To specify more parameters, use `--gpus device=[YOUR-GPU-DEVICE-ID],runtime=nvidia,capabilities=display
:param grpc_channel_options: Dictionary of kwargs arguments that will be passed to the grpc channel as options when creating a channel, example : {'grpc.max_send_message_length': -1}. When max_attempts > 1, the 'grpc.service_config' option will not be applicable.
:param grpc_metadata: The metadata to be passed to the gRPC request.
:param grpc_server_options: Dictionary of kwargs arguments that will be passed to the grpc server as options when starting the server, example : {'grpc.max_send_message_length': -1}
:param host: The host of the Gateway, which the client should connect to, by default it is 0.0.0.0. In the case of an external Executor (`--external` or `external=True`) this can be a list of hosts. Then, every resulting address will be considered as one replica of the Executor.
:param install_requirements: If set, try to install `requirements.txt` from the local Executor if exists in the Executor folder. If using Hub, install `requirements.txt` in the Hub Executor bundle to local.
:param log_config: The config name or the absolute path to the YAML config file of the logger used in this object.
:param metrics: If set, the sdk implementation of the OpenTelemetry metrics will be available for default monitoring and custom measurements. Otherwise a no-op implementation will be provided.
:param metrics_exporter_host: If tracing is enabled, this hostname will be used to configure the metrics exporter agent.
:param metrics_exporter_port: If tracing is enabled, this port will be used to configure the metrics exporter agent.
:param monitoring: If set, spawn an http server with a prometheus endpoint to expose metrics
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param native: If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.
:param no_reduce: Disable the built-in reduction mechanism. Set this if the reduction is to be handled by the Executor itself by operating on a `docs_matrix` or `docs_map`
:param output_array_type: The type of array `tensor` and `embedding` will be serialized to.
Supports the same types as `docarray.to_protobuf(.., ndarray_type=...)`, which can be found
`here <https://docarray.jina.ai/fundamentals/document/serialization/#from-to-protobuf>`.
Defaults to retaining whatever type is returned by the Executor.
:param polling: The polling strategy of the Deployment and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Deployment or by endpoint.
Define per Deployment:
- ANY: only one (whoever is idle) Pod polls the message
- ALL: all Pods poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
:param port: The port for input data to bind to, default is a random port between [49152, 65535]. In the case of an external Executor (`--external` or `external=True`) this can be a list of ports. Then, every resulting address will be considered as one replica of the Executor.
:param port_monitoring: The port on which the prometheus server is exposed, default is a random port between [49152, 65535]
:param prefer_platform: The preferred target Docker platform. (e.g. "linux/amd64", "linux/arm64")
:param protocol: Communication protocol of the server exposed by the Executor. This can be a single value or a list of protocols, depending on your chosen Gateway. Choose the convenient protocols from: ['GRPC', 'HTTP', 'WEBSOCKET'].
:param provider: If set, Executor is translated to a custom container compatible with the chosen provider. Choose the convenient providers from: ['NONE', 'SAGEMAKER', 'AZURE'].
:param provider_endpoint: If set, Executor endpoint will be explicitly chosen and used in the custom container operated by the provider.
:param py_modules: The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://jina.ai/serve/concepts/executor/executor-files/>`__
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param raft_configuration: Dictionary of kwargs arguments that will be passed to the RAFT node as configuration options when starting the RAFT node.
:param reload: If set, the Executor will restart while serving if YAML configuration source or Executor modules are changed. If YAML configuration is changed, the whole deployment is reloaded and new processes will be restarted. If only Python modules of the Executor have changed, they will be reloaded to the interpreter without restarting process.
:param replicas: The number of replicas in the deployment
:param retries: Number of retries per gRPC call. If <0 it defaults to max(3, num_replicas)
:param runtime_cls: The runtime class to run inside the Pod
:param shards: The number of shards in the deployment running at the same time. For more details check https://jina.ai/serve/concepts/flow/create-flow/#complex-flow-topologies
:param ssl_certfile: the path to the certificate file
:param ssl_keyfile: the path to the key file
:param stateful: If set, start consensus module to make sure write operations are properly replicated between all the replicas
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pod waits for the runtime to be ready, -1 for waiting forever
:param timeout_send: The timeout in milliseconds used when sending data requests to Executors, -1 means no timeout, disabled by default
:param title: The title of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param tls: If set, connect to deployment using tls encryption
:param traces_exporter_host: If tracing is enabled, this hostname will be used to configure the trace exporter agent.
:param traces_exporter_port: If tracing is enabled, this port will be used to configure the trace exporter agent.
:param tracing: If set, the sdk implementation of the OpenTelemetry tracer will be available and will be enabled for automatic tracing of requests and customer span creation. Otherwise a no-op implementation will be provided.
:param uses: The config of the executor, it could be one of the followings:
* the string literal of an Executor class name
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_after: The executor attached after the Pods described by --uses, typically used for receiving from all shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).
:param uses_after_address: The address of the uses-before runtime
:param uses_before: The executor attached before the Pods described by --uses, typically before sending to all shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).
:param uses_before_address: The address of the uses-before runtime
:param uses_dynamic_batching: Dictionary of keyword arguments that will override the `dynamic_batching` configuration in `uses`
:param uses_metas: Dictionary of keyword arguments that will override the `metas` configuration in `uses`
:param uses_requests: Dictionary of keyword arguments that will override the `requests` configuration in `uses`
:param uses_with: Dictionary of keyword arguments that will override the `with` configuration in `uses`
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
More details can be found in Uvicorn docs: https://www.uvicorn.org/settings/
:param volumes: The path on the host to be mounted inside the container.
Note,
- If separated by `:`, then the first part will be considered as the local host path and the second part is the path in the container system.
- If no split provided, then the basename of that directory will be mounted into container's root path, e.g. `--volumes="/user/test/my-workspace"` will be mounted into `/my-workspace` inside the container.
- All volumes are mounted with read-write mode.
:param when: The condition that the documents need to fulfill before reaching the Executor.The condition can be defined in the form of a `DocArray query condition <https://docarray.jina.ai/fundamentals/documentarray/find/#query-by-conditions>`
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
:return: a (new) Flow object with modification
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_deployment
@overload
def add(
self,
*,
needs: Optional[Union[str, Tuple[str], List[str]]] = None,
copy_flow: bool = True,
deployment_role: 'DeploymentRoleType' = DeploymentRoleType.DEPLOYMENT,
**kwargs,
) -> Union['Flow', 'AsyncFlow']:
"""
Add a Deployment to the current Flow object and return the new modified Flow object.
The attribute of the Deployment can be later changed with :py:meth:`set` or deleted with :py:meth:`remove`
:param needs: the name of the Deployment(s) that this Deployment receives data from.
One can also use 'gateway' to indicate the connection with the gateway.
:param deployment_role: the role of the Deployment, used for visualization and route planning
:param copy_flow: when set to true, then always copy the current Flow and do the modification on top of it then return, otherwise, do in-line modification
:param kwargs: other keyword-value arguments that the Deployment CLI supports
:return: a (new) Flow object with modification
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
.. # noqa: DAR401
"""
@allowed_levels([FlowBuildLevel.EMPTY])
def add(
self,
deployment: Union[str, Deployment] = None,
**kwargs,
) -> Union['Flow', 'AsyncFlow']:
# implementation_stub_inject_start_add
"""Add a Deployment to the current Flow object and return the new modified Flow object.
The attribute of the Deployment can be later changed with :py:meth:`set` or deleted with :py:meth:`remove`
:param allow_concurrent: Allow concurrent requests to be processed by the Executor. This is only recommended if the Executor is thread-safe.
:param compression: The compression mechanism used when sending requests from the Head to the WorkerRuntimes. For more details, check https://grpc.github.io/grpc/python/grpc.html#compression.
:param connection_list: dictionary JSON with a list of connections to configure
:param cors: If set, a CORS middleware is added to FastAPI frontend to allow cross-origin access.
:param description: The description of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param disable_auto_volume: Do not automatically mount a volume for dockerized Executors.
:param docker_kwargs: Dictionary of kwargs arguments that will be passed to Docker SDK when starting the docker '
container.
More details can be found in the Docker SDK docs: https://docker-py.readthedocs.io/en/stable/
:param entrypoint: The entrypoint command overrides the ENTRYPOINT in Docker image. when not set then the Docker image ENTRYPOINT takes effective.
:param env: The map of environment variables that are available inside runtime
:param exit_on_exceptions: List of exceptions that will cause the Executor to shut down.
:param external: The Deployment will be considered an external Deployment that has been started independently from the Flow.This Deployment will not be context managed by the Flow.
:param floating: If set, the current Pod/Deployment can not be further chained, and the next `.add()` will chain after the last Pod/Deployment not this current one.
:param force_update: If set, always pull the latest Hub Executor bundle even it exists on local
:param gpus: This argument allows dockerized Jina Executors to discover local gpu devices.
Note,
- To access all gpus, use `--gpus all`.
- To access multiple gpus, e.g. make use of 2 gpus, use `--gpus 2`.
- To access specified gpus based on device id, use `--gpus device=[YOUR-GPU-DEVICE-ID]`
- To access specified gpus based on multiple device id, use `--gpus device=[YOUR-GPU-DEVICE-ID1],device=[YOUR-GPU-DEVICE-ID2]`
- To specify more parameters, use `--gpus device=[YOUR-GPU-DEVICE-ID],runtime=nvidia,capabilities=display
:param grpc_channel_options: Dictionary of kwargs arguments that will be passed to the grpc channel as options when creating a channel, example : {'grpc.max_send_message_length': -1}. When max_attempts > 1, the 'grpc.service_config' option will not be applicable.
:param grpc_metadata: The metadata to be passed to the gRPC request.
:param grpc_server_options: Dictionary of kwargs arguments that will be passed to the grpc server as options when starting the server, example : {'grpc.max_send_message_length': -1}
:param host: The host of the Gateway, which the client should connect to, by default it is 0.0.0.0. In the case of an external Executor (`--external` or `external=True`) this can be a list of hosts. Then, every resulting address will be considered as one replica of the Executor.
:param install_requirements: If set, try to install `requirements.txt` from the local Executor if exists in the Executor folder. If using Hub, install `requirements.txt` in the Hub Executor bundle to local.
:param log_config: The config name or the absolute path to the YAML config file of the logger used in this object.
:param metrics: If set, the sdk implementation of the OpenTelemetry metrics will be available for default monitoring and custom measurements. Otherwise a no-op implementation will be provided.
:param metrics_exporter_host: If tracing is enabled, this hostname will be used to configure the metrics exporter agent.
:param metrics_exporter_port: If tracing is enabled, this port will be used to configure the metrics exporter agent.
:param monitoring: If set, spawn an http server with a prometheus endpoint to expose metrics
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param native: If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.
:param no_reduce: Disable the built-in reduction mechanism. Set this if the reduction is to be handled by the Executor itself by operating on a `docs_matrix` or `docs_map`
:param output_array_type: The type of array `tensor` and `embedding` will be serialized to.
Supports the same types as `docarray.to_protobuf(.., ndarray_type=...)`, which can be found
`here <https://docarray.jina.ai/fundamentals/document/serialization/#from-to-protobuf>`.
Defaults to retaining whatever type is returned by the Executor.
:param polling: The polling strategy of the Deployment and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Deployment or by endpoint.
Define per Deployment:
- ANY: only one (whoever is idle) Pod polls the message
- ALL: all Pods poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
:param port: The port for input data to bind to, default is a random port between [49152, 65535]. In the case of an external Executor (`--external` or `external=True`) this can be a list of ports. Then, every resulting address will be considered as one replica of the Executor.
:param port_monitoring: The port on which the prometheus server is exposed, default is a random port between [49152, 65535]
:param prefer_platform: The preferred target Docker platform. (e.g. "linux/amd64", "linux/arm64")
:param protocol: Communication protocol of the server exposed by the Executor. This can be a single value or a list of protocols, depending on your chosen Gateway. Choose the convenient protocols from: ['GRPC', 'HTTP', 'WEBSOCKET'].
:param provider: If set, Executor is translated to a custom container compatible with the chosen provider. Choose the convenient providers from: ['NONE', 'SAGEMAKER', 'AZURE'].
:param provider_endpoint: If set, Executor endpoint will be explicitly chosen and used in the custom container operated by the provider.
:param py_modules: The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://jina.ai/serve/concepts/executor/executor-files/>`__
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param raft_configuration: Dictionary of kwargs arguments that will be passed to the RAFT node as configuration options when starting the RAFT node.
:param reload: If set, the Executor will restart while serving if YAML configuration source or Executor modules are changed. If YAML configuration is changed, the whole deployment is reloaded and new processes will be restarted. If only Python modules of the Executor have changed, they will be reloaded to the interpreter without restarting process.
:param replicas: The number of replicas in the deployment
:param retries: Number of retries per gRPC call. If <0 it defaults to max(3, num_replicas)
:param runtime_cls: The runtime class to run inside the Pod
:param shards: The number of shards in the deployment running at the same time. For more details check https://jina.ai/serve/concepts/flow/create-flow/#complex-flow-topologies
:param ssl_certfile: the path to the certificate file
:param ssl_keyfile: the path to the key file
:param stateful: If set, start consensus module to make sure write operations are properly replicated between all the replicas
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pod waits for the runtime to be ready, -1 for waiting forever
:param timeout_send: The timeout in milliseconds used when sending data requests to Executors, -1 means no timeout, disabled by default
:param title: The title of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param tls: If set, connect to deployment using tls encryption
:param traces_exporter_host: If tracing is enabled, this hostname will be used to configure the trace exporter agent.
:param traces_exporter_port: If tracing is enabled, this port will be used to configure the trace exporter agent.
:param tracing: If set, the sdk implementation of the OpenTelemetry tracer will be available and will be enabled for automatic tracing of requests and customer span creation. Otherwise a no-op implementation will be provided.
:param uses: The config of the executor, it could be one of the followings:
* the string literal of an Executor class name
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_after: The executor attached after the Pods described by --uses, typically used for receiving from all shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).
:param uses_after_address: The address of the uses-before runtime
:param uses_before: The executor attached before the Pods described by --uses, typically before sending to all shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).
:param uses_before_address: The address of the uses-before runtime
:param uses_dynamic_batching: Dictionary of keyword arguments that will override the `dynamic_batching` configuration in `uses`
:param uses_metas: Dictionary of keyword arguments that will override the `metas` configuration in `uses`
:param uses_requests: Dictionary of keyword arguments that will override the `requests` configuration in `uses`
:param uses_with: Dictionary of keyword arguments that will override the `with` configuration in `uses`
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
More details can be found in Uvicorn docs: https://www.uvicorn.org/settings/
:param volumes: The path on the host to be mounted inside the container.
Note,
- If separated by `:`, then the first part will be considered as the local host path and the second part is the path in the container system.
- If no split provided, then the basename of that directory will be mounted into container's root path, e.g. `--volumes="/user/test/my-workspace"` will be mounted into `/my-workspace` inside the container.
- All volumes are mounted with read-write mode.
:param when: The condition that the documents need to fulfill before reaching the Executor.The condition can be defined in the form of a `DocArray query condition <https://docarray.jina.ai/fundamentals/documentarray/find/#query-by-conditions>`
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
:param needs: the name of the Deployment(s) that this Deployment receives data from. One can also use "gateway" to indicate the connection with the gateway.
:param deployment_role: the role of the Deployment, used for visualization and route planning
:param copy_flow: when set to true, then always copy the current Flow and do the modification on top of it then return, otherwise, do in-line modification
:param kwargs: other keyword-value arguments that the Deployment CLI supports
:return: a (new) Flow object with modification
:return: a (new) Flow object with modification
.. # noqa: DAR102
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# implementation_stub_inject_end_add
needs = kwargs.pop('needs', None)
copy_flow = kwargs.pop('copy_flow', True)
deployment_role = kwargs.get('deployment_role', DeploymentRoleType.DEPLOYMENT)
op_flow = copy.deepcopy(self) if copy_flow else self
# deployment naming logic
deployment_name = kwargs.get('name', None)
if deployment_name in op_flow._deployment_nodes:
new_name = f'{deployment_name}{len(op_flow._deployment_nodes)}'
self.logger.debug(
f'"{deployment_name}" is used in this Flow already! renamed it to "{new_name}"'
)
deployment_name = new_name
if not deployment_name:
deployment_name = f'executor{len(op_flow._deployment_nodes)}'
if not deployment_name.isidentifier():
# hyphen - can not be used in the name
raise ValueError(
f'name: {deployment_name} is invalid, please follow the python variable name conventions'
)
# needs logic
needs = op_flow._parse_endpoints(
op_flow, deployment_name, needs, connect_to_last_deployment=True
)
if deployment is None:
# set the kwargs inherit from `Flow(kwargs1=..., kwargs2=)`
for key, value in op_flow._common_kwargs.items():
# do not inherit from all the argument from the flow and respect EXECUTOR_ARGS_BLACKLIST
if key not in kwargs and key not in EXECUTOR_ARGS_BLACKLIST:
kwargs[key] = value
# update kwargs of this Deployment
kwargs.update(
dict(
name=deployment_name,
deployment_role=deployment_role,
log_config=(
kwargs.get('log_config')
if 'log_config' in kwargs
else self.args.log_config
),
)
)
parser = set_deployment_parser()
if deployment_role == DeploymentRoleType.GATEWAY:
parser = set_gateway_parser()
args = ArgNamespace.kwargs2namespace(
kwargs, parser, True, fallback_parsers=FALLBACK_PARSERS
)
# deployment workspace if not set then derive from flow workspace
if args.workspace:
args.workspace = os.path.abspath(args.workspace)
else:
args.workspace = self.workspace
args.noblock_on_start = True
if len(needs) > 1 and args.external and args.no_reduce:
raise ValueError(
'External Executors with multiple needs have to do auto reduce.'
)
deployment = Deployment(
args, needs, include_gateway=False, noblock_on_start=True
)
floating = args.floating
elif isinstance(deployment, str):
deployment = Deployment.load_config(
deployment, needs=needs, include_gateway=False, noblock_on_start=True
)
floating = deployment.args.floating
else:
deployment.needs = needs
floating = deployment.args.floating
op_flow._deployment_nodes[deployment_name] = deployment
if not floating:
op_flow._last_deployment = deployment_name
return op_flow
# overload_inject_start_config_gateway
@overload
def config_gateway(
self,
*,
compression: Optional[str] = None,
cors: Optional[bool] = False,
deployments_addresses: Optional[str] = '{}',
deployments_metadata: Optional[str] = '{}',
deployments_no_reduce: Optional[str] = '[]',
description: Optional[str] = None,
docker_kwargs: Optional[dict] = None,
entrypoint: Optional[str] = None,
env: Optional[dict] = None,
expose_endpoints: Optional[str] = None,
expose_graphql_endpoint: Optional[bool] = False,
floating: Optional[bool] = False,
graph_conditions: Optional[str] = '{}',
graph_description: Optional[str] = '{}',
grpc_channel_options: Optional[dict] = None,
grpc_server_options: Optional[dict] = None,
host: Optional[str] = '0.0.0.0',
log_config: Optional[str] = None,
metrics: Optional[bool] = False,
metrics_exporter_host: Optional[str] = None,
metrics_exporter_port: Optional[int] = None,
monitoring: Optional[bool] = False,
name: Optional[str] = 'gateway',
no_crud_endpoints: Optional[bool] = False,
no_debug_endpoints: Optional[bool] = False,
port: Optional[int] = None,
port_monitoring: Optional[int] = None,
prefetch: Optional[int] = 1000,
protocol: Optional[Union[str, List[str]]] = ['GRPC'],
provider: Optional[str] = ['NONE'],
provider_endpoint: Optional[str] = None,
proxy: Optional[bool] = False,
py_modules: Optional[List] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
reload: Optional[bool] = False,
replicas: Optional[int] = 1,
retries: Optional[int] = -1,
runtime_cls: Optional[str] = 'GatewayRuntime',
ssl_certfile: Optional[str] = None,
ssl_keyfile: Optional[str] = None,
stateful: Optional[bool] = False,
timeout_ctrl: Optional[int] = 60,
timeout_ready: Optional[int] = 600000,
timeout_send: Optional[int] = None,
title: Optional[str] = None,
traces_exporter_host: Optional[str] = None,
traces_exporter_port: Optional[int] = None,
tracing: Optional[bool] = False,
uses: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_with: Optional[dict] = None,
uvicorn_kwargs: Optional[dict] = None,
workspace: Optional[str] = None,
**kwargs,
):
"""Configure the Gateway inside a Flow. The Gateway exposes your Flow logic as a service to the internet according to the protocol and configuration you choose.
:param compression: The compression mechanism used when sending requests from the Head to the WorkerRuntimes. For more details, check https://grpc.github.io/grpc/python/grpc.html#compression.
:param cors: If set, a CORS middleware is added to FastAPI frontend to allow cross-origin access.
:param deployments_addresses: JSON dictionary with the input addresses of each Deployment
:param deployments_metadata: JSON dictionary with the request metadata for each Deployment
:param deployments_no_reduce: list JSON disabling the built-in merging mechanism for each Deployment listed
:param description: The description of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param docker_kwargs: Dictionary of kwargs arguments that will be passed to Docker SDK when starting the docker '
container.
More details can be found in the Docker SDK docs: https://docker-py.readthedocs.io/en/stable/
:param entrypoint: The entrypoint command overrides the ENTRYPOINT in Docker image. when not set then the Docker image ENTRYPOINT takes effective.
:param env: The map of environment variables that are available inside runtime
:param expose_endpoints: A JSON string that represents a map from executor endpoints (`@requests(on=...)`) to HTTP endpoints.
:param expose_graphql_endpoint: If set, /graphql endpoint is added to HTTP interface.
:param floating: If set, the current Pod/Deployment can not be further chained, and the next `.add()` will chain after the last Pod/Deployment not this current one.
:param graph_conditions: Dictionary stating which filtering conditions each Executor in the graph requires to receive Documents.
:param graph_description: Routing graph for the gateway
:param grpc_channel_options: Dictionary of kwargs arguments that will be passed to the grpc channel as options when creating a channel, example : {'grpc.max_send_message_length': -1}. When max_attempts > 1, the 'grpc.service_config' option will not be applicable.
:param grpc_server_options: Dictionary of kwargs arguments that will be passed to the grpc server as options when starting the server, example : {'grpc.max_send_message_length': -1}
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param log_config: The config name or the absolute path to the YAML config file of the logger used in this object.
:param metrics: If set, the sdk implementation of the OpenTelemetry metrics will be available for default monitoring and custom measurements. Otherwise a no-op implementation will be provided.
:param metrics_exporter_host: If tracing is enabled, this hostname will be used to configure the metrics exporter agent.
:param metrics_exporter_port: If tracing is enabled, this port will be used to configure the metrics exporter agent.
:param monitoring: If set, spawn an http server with a prometheus endpoint to expose metrics
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param no_crud_endpoints: If set, `/index`, `/search`, `/update`, `/delete` endpoints are removed from HTTP interface.
Any executor that has `@requests(on=...)` bound with those values will receive data requests.
:param no_debug_endpoints: If set, `/status` `/post` endpoints are removed from HTTP interface.
:param port: The port for input data to bind the gateway server to, by default, random ports between range [49152, 65535] will be assigned. The port argument can be either 1 single value in case only 1 protocol is used or multiple values when many protocols are used.
:param port_monitoring: The port on which the prometheus server is exposed, default is a random port between [49152, 65535]
:param prefetch: Number of requests fetched from the client before feeding into the first Executor.
Used to control the speed of data input into a Flow. 0 disables prefetch (1000 requests is the default)
:param protocol: Communication protocol of the server exposed by the Gateway. This can be a single value or a list of protocols, depending on your chosen Gateway. Choose the convenient protocols from: ['GRPC', 'HTTP', 'WEBSOCKET'].
:param provider: If set, Executor is translated to a custom container compatible with the chosen provider. Choose the convenient providers from: ['NONE', 'SAGEMAKER', 'AZURE'].
:param provider_endpoint: If set, Executor endpoint will be explicitly chosen and used in the custom container operated by the provider.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
:param py_modules: The customized python modules need to be imported before loading the gateway
Note that the recommended way is to only import a single module - a simple python file, if your
gateway can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package.
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param reload: If set, the Gateway will restart while serving if YAML configuration source is changed.
:param replicas: The number of replicas of the Gateway. This replicas will only be applied when converted into Kubernetes YAML
:param retries: Number of retries per gRPC call. If <0 it defaults to max(3, num_replicas)
:param runtime_cls: The runtime class to run inside the Pod
:param ssl_certfile: the path to the certificate file
:param ssl_keyfile: the path to the key file
:param stateful: If set, start consensus module to make sure write operations are properly replicated between all the replicas
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pod waits for the runtime to be ready, -1 for waiting forever
:param timeout_send: The timeout in milliseconds used when sending data requests to Executors, -1 means no timeout, disabled by default
:param title: The title of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param traces_exporter_host: If tracing is enabled, this hostname will be used to configure the trace exporter agent.
:param traces_exporter_port: If tracing is enabled, this port will be used to configure the trace exporter agent.
:param tracing: If set, the sdk implementation of the OpenTelemetry tracer will be available and will be enabled for automatic tracing of requests and customer span creation. Otherwise a no-op implementation will be provided.
:param uses: The config of the gateway, it could be one of the followings:
* the string literal of an Gateway class name
* a Gateway YAML file (.yml, .yaml, .jaml)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_with: Dictionary of keyword arguments that will override the `with` configuration in `uses`
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
More details can be found in Uvicorn docs: https://www.uvicorn.org/settings/
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_config_gateway
@allowed_levels([FlowBuildLevel.EMPTY])
def config_gateway(
self,
args: Optional['argparse.Namespace'] = None,
**kwargs,
) -> Union['Flow', 'AsyncFlow']:
# implementation_stub_inject_start_config_gateway
"""Configure the Gateway inside a Flow. The Gateway exposes your Flow logic as a service to the internet according to the protocol and configuration you choose.
:param compression: The compression mechanism used when sending requests from the Head to the WorkerRuntimes. For more details, check https://grpc.github.io/grpc/python/grpc.html#compression.
:param cors: If set, a CORS middleware is added to FastAPI frontend to allow cross-origin access.
:param deployments_addresses: JSON dictionary with the input addresses of each Deployment
:param deployments_metadata: JSON dictionary with the request metadata for each Deployment
:param deployments_no_reduce: list JSON disabling the built-in merging mechanism for each Deployment listed
:param description: The description of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param docker_kwargs: Dictionary of kwargs arguments that will be passed to Docker SDK when starting the docker '
container.
More details can be found in the Docker SDK docs: https://docker-py.readthedocs.io/en/stable/
:param entrypoint: The entrypoint command overrides the ENTRYPOINT in Docker image. when not set then the Docker image ENTRYPOINT takes effective.
:param env: The map of environment variables that are available inside runtime
:param expose_endpoints: A JSON string that represents a map from executor endpoints (`@requests(on=...)`) to HTTP endpoints.
:param expose_graphql_endpoint: If set, /graphql endpoint is added to HTTP interface.
:param floating: If set, the current Pod/Deployment can not be further chained, and the next `.add()` will chain after the last Pod/Deployment not this current one.
:param graph_conditions: Dictionary stating which filtering conditions each Executor in the graph requires to receive Documents.
:param graph_description: Routing graph for the gateway
:param grpc_channel_options: Dictionary of kwargs arguments that will be passed to the grpc channel as options when creating a channel, example : {'grpc.max_send_message_length': -1}. When max_attempts > 1, the 'grpc.service_config' option will not be applicable.
:param grpc_server_options: Dictionary of kwargs arguments that will be passed to the grpc server as options when starting the server, example : {'grpc.max_send_message_length': -1}
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param log_config: The config name or the absolute path to the YAML config file of the logger used in this object.
:param metrics: If set, the sdk implementation of the OpenTelemetry metrics will be available for default monitoring and custom measurements. Otherwise a no-op implementation will be provided.
:param metrics_exporter_host: If tracing is enabled, this hostname will be used to configure the metrics exporter agent.
:param metrics_exporter_port: If tracing is enabled, this port will be used to configure the metrics exporter agent.
:param monitoring: If set, spawn an http server with a prometheus endpoint to expose metrics
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param no_crud_endpoints: If set, `/index`, `/search`, `/update`, `/delete` endpoints are removed from HTTP interface.
Any executor that has `@requests(on=...)` bound with those values will receive data requests.
:param no_debug_endpoints: If set, `/status` `/post` endpoints are removed from HTTP interface.
:param port: The port for input data to bind the gateway server to, by default, random ports between range [49152, 65535] will be assigned. The port argument can be either 1 single value in case only 1 protocol is used or multiple values when many protocols are used.
:param port_monitoring: The port on which the prometheus server is exposed, default is a random port between [49152, 65535]
:param prefetch: Number of requests fetched from the client before feeding into the first Executor.
Used to control the speed of data input into a Flow. 0 disables prefetch (1000 requests is the default)
:param protocol: Communication protocol of the server exposed by the Gateway. This can be a single value or a list of protocols, depending on your chosen Gateway. Choose the convenient protocols from: ['GRPC', 'HTTP', 'WEBSOCKET'].
:param provider: If set, Executor is translated to a custom container compatible with the chosen provider. Choose the convenient providers from: ['NONE', 'SAGEMAKER', 'AZURE'].
:param provider_endpoint: If set, Executor endpoint will be explicitly chosen and used in the custom container operated by the provider.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
:param py_modules: The customized python modules need to be imported before loading the gateway
Note that the recommended way is to only import a single module - a simple python file, if your
gateway can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package.
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param reload: If set, the Gateway will restart while serving if YAML configuration source is changed.
:param replicas: The number of replicas of the Gateway. This replicas will only be applied when converted into Kubernetes YAML
:param retries: Number of retries per gRPC call. If <0 it defaults to max(3, num_replicas)
:param runtime_cls: The runtime class to run inside the Pod
:param ssl_certfile: the path to the certificate file
:param ssl_keyfile: the path to the key file
:param stateful: If set, start consensus module to make sure write operations are properly replicated between all the replicas
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pod waits for the runtime to be ready, -1 for waiting forever
:param timeout_send: The timeout in milliseconds used when sending data requests to Executors, -1 means no timeout, disabled by default
:param title: The title of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param traces_exporter_host: If tracing is enabled, this hostname will be used to configure the trace exporter agent.
:param traces_exporter_port: If tracing is enabled, this port will be used to configure the trace exporter agent.
:param tracing: If set, the sdk implementation of the OpenTelemetry tracer will be available and will be enabled for automatic tracing of requests and customer span creation. Otherwise a no-op implementation will be provided.
:param uses: The config of the gateway, it could be one of the followings:
* the string literal of an Gateway class name
* a Gateway YAML file (.yml, .yaml, .jaml)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_with: Dictionary of keyword arguments that will override the `with` configuration in `uses`
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
More details can be found in Uvicorn docs: https://www.uvicorn.org/settings/
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
:return: the new Flow object
.. # noqa: DAR102
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# implementation_stub_inject_end_config_gateway
copy_flow = kwargs.pop('copy_flow', True)
op_flow = copy.deepcopy(self) if copy_flow else self
# override gateway args inherited from Flow API
for key, value in kwargs.items():
op_flow._gateway_kwargs[key] = value
return op_flow
@allowed_levels([FlowBuildLevel.EMPTY])
def inspect(self, name: str = 'inspect', *args, **kwargs) -> 'Flow':
"""Add an inspection on the last changed Deployment in the Flow
Internally, it adds two Deployments to the Flow. But don't worry, the overhead is minimized and you
can remove them by simply using `Flow(inspect=FlowInspectType.REMOVE)` before using the Flow.
.. highlight:: bash
.. code-block:: bash
Flow -- PUB-SUB -- Deployment(_pass) -- Flow
|
-- PUB-SUB -- InspectDeployment (Hanging)
In this way, :class:`InspectDeployment` looks like a simple ``_pass`` from outside and
does not introduce side-effects (e.g. changing the socket type) to the original Flow.
The original incoming and outgoing socket types are preserved.
This function is very handy for introducing an Evaluator into the Flow.
.. seealso::
:meth:`gather_inspect`
:param name: name of the Deployment
:param args: args for .add()
:param kwargs: kwargs for .add()
:return: the new instance of the Flow
"""
_last_deployment = self._last_deployment
op_flow = self.add(
name=name,
needs=_last_deployment,
deployment_role=DeploymentRoleType.INSPECT,
*args,
**kwargs,
)
# now remove uses and add an auxiliary Deployment
if 'uses' in kwargs:
kwargs.pop('uses')
op_flow = op_flow.add(
name=f'_aux_{name}',
needs=_last_deployment,
deployment_role=DeploymentRoleType.INSPECT_AUX_PASS,
*args,
**kwargs,
)
# register any future connection to _last_deployment by the auxiliary Deployment
op_flow._inspect_deployments[_last_deployment] = op_flow._last_deployment
return op_flow
@allowed_levels([FlowBuildLevel.EMPTY])
def gather_inspect(
self,
name: str = 'gather_inspect',
include_last_deployment: bool = True,
*args,
**kwargs,
) -> 'Flow':
"""Gather all inspect Deployments output into one Deployment. When the Flow has no inspect Deployment then the Flow itself
is returned.
.. note::
If ``--no-inspect`` is **not** given, then :meth:`gather_inspect` is auto called before :meth:`build`. So
in general you don't need to manually call :meth:`gather_inspect`.
:param name: the name of the gather Deployment
:param include_last_deployment: if to include the last modified Deployment in the Flow
:param args: args for .add()
:param kwargs: kwargs for .add()
:return: the modified Flow or the copy of it
.. seealso::
:meth:`inspect`
"""
needs = [
k
for k, v in self._deployment_nodes.items()
if v.role == DeploymentRoleType.INSPECT
]
if needs:
if include_last_deployment:
needs.append(self._last_deployment)
return self.add(
name=name,
needs=needs,
deployment_role=DeploymentRoleType.JOIN_INSPECT,
*args,
**kwargs,
)
else:
# no inspect node is in the graph, return the current graph
return self
def _get_gateway_target(self, prefix):
gateway_deployment = self._deployment_nodes[GATEWAY_NAME]
return (
f'{prefix}-{GATEWAY_NAME}',
{
'host': gateway_deployment.head_host,
'port': gateway_deployment.head_port,
'expected_parts': 0,
},
)
@allowed_levels([FlowBuildLevel.EMPTY])
def build(self, copy_flow: bool = False, **kwargs) -> 'Flow':
"""
Build the current Flow and make it ready to use
.. note::
No need to manually call it since 0.0.8. When using Flow with the
context manager, or using :meth:`start`, :meth:`build` will be invoked.
:param copy_flow: when set to true, then always copy the current Flow and do the modification on top of it then return, otherwise, do in-line modification
:param kwargs: kwargs for backward compatibility
:return: the current Flow (by default)
.. note::
``copy_flow=True`` is recommended if you are building the same Flow multiple times in a row. e.g.
.. highlight:: python
.. code-block:: python
f = Flow()
with f:
f.index()
with f.build(copy_flow=True) as fl:
fl.search()
.. # noqa: DAR401
"""
if multiprocessing.get_start_method().lower() == 'fork':
os.environ['GRPC_ENABLE_FORK_SUPPORT'] = '1'
op_flow = copy.deepcopy(self) if copy_flow else self
if op_flow.args.inspect == FlowInspectType.COLLECT:
op_flow.gather_inspect(copy_flow=False)
if GATEWAY_NAME not in op_flow._deployment_nodes:
op_flow._add_gateway(
needs={op_flow._last_deployment},
graph_description=op_flow._get_graph_representation(),
deployments_addresses=op_flow._get_deployments_addresses(),
deployments_metadata=op_flow._get_deployments_metadata(),
graph_conditions=op_flow._get_graph_conditions(),
deployments_no_reduce=op_flow._get_disabled_reduce_deployments(),
uses=op_flow.gateway_args.uses,
)
removed_deployments = []
# if set no_inspect then all inspect related nodes are removed
if op_flow.args.inspect == FlowInspectType.REMOVE:
filtered_deployment_nodes = OrderedDict()
for k, v in op_flow._deployment_nodes.items():
if not v.role.is_inspect:
filtered_deployment_nodes[k] = v
else:
removed_deployments.append(v.name)
op_flow._deployment_nodes = filtered_deployment_nodes
reverse_inspect_map = {
v: k for k, v in op_flow._inspect_deployments.items()
}
while (
len(op_flow._last_changed_deployment) > 0
and len(removed_deployments) > 0
and op_flow._last_deployment in removed_deployments
):
op_flow._last_changed_deployment.pop()
for end, deployment in op_flow._deployment_nodes.items():
# if an endpoint is being inspected, then replace it with inspected Deployment
# but not those inspect related node
if op_flow.args.inspect.is_keep:
deployment.needs = set(
(
ep
if deployment.role.is_inspect
else op_flow._inspect_deployments.get(ep, ep)
)
for ep in deployment.needs
)
else:
deployment.needs = set(
reverse_inspect_map.get(ep, ep) for ep in deployment.needs
)
hanging_deployments = _hanging_deployments(op_flow)
if hanging_deployments:
op_flow.logger.warning(
f'{hanging_deployments} are "floating" in this flow with no deployment receiving from them, '
f'you may want to double check if it is intentional or some mistake'
)
op_flow._build_level = FlowBuildLevel.GRAPH
if len(removed_deployments) > 0:
# very dirty
op_flow._deployment_nodes[GATEWAY_NAME].args.graph_description = json.dumps(
op_flow._get_graph_representation()
)
op_flow._deployment_nodes[GATEWAY_NAME].args.deployments_addresses = (
json.dumps(op_flow._get_deployments_addresses())
)
op_flow._deployment_nodes[GATEWAY_NAME].update_pod_args()
return op_flow
def __call__(self, *args, **kwargs):
"""Builds the Flow
:param args: args for build
:param kwargs: kwargs for build
:return: the built Flow
"""
return self.build(*args, **kwargs)
def __exit__(self, exc_type, exc_val, exc_tb):
super().__exit__(exc_type, exc_val, exc_tb)
if self._client:
self._client.teardown_instrumentation()
self._client = None
# unset all envs to avoid any side-effect
if self.args.env:
for k in self.args.env.keys():
os.environ.pop(k, None)
# do not know why but removing these 2 lines make 2 tests fail
if GATEWAY_NAME in self._deployment_nodes:
self._deployment_nodes.pop(GATEWAY_NAME)
self._build_level = FlowBuildLevel.EMPTY
self._stop_time = time.time()
send_telemetry_event(
event='stop',
obj_cls_name=self.__class__.__name__,
entity_id=self._entity_id,
duration=self._stop_time - self._start_time,
exc_type=str(exc_type),
)
self.logger.debug('flow is closed!')
self.logger.close()
@allowed_levels([FlowBuildLevel.EMPTY, FlowBuildLevel.GRAPH])
def start(self):
"""Start to run all Deployments in this Flow.
Remember to close the Flow with :meth:`close`.
Note that this method has a timeout of ``timeout_ready`` set in CLI,
which is inherited all the way from :class:`jina.orchestrate.pods.Pod`
.. # noqa: DAR401
:return: this instance
"""
self._start_time = time.time()
if self._build_level.value < FlowBuildLevel.GRAPH.value:
self.build(copy_flow=False)
port_gateway = self._deployment_nodes[GATEWAY_NAME].args.port
host_gateway = self._deployment_nodes[GATEWAY_NAME].args.host
if not (
is_port_free(host_gateway, port_gateway)
): # we check if the port is not used at parsing time as well for robustness
raise PortAlreadyUsed(f'port:{port_gateway}')
# set env only before the Deployment get started
if self.args.env:
for k, v in self.args.env.items():
os.environ[k] = str(v)
for depl_name, deployment in self:
if not deployment.external:
self.enter_context(deployment)
self._wait_until_all_ready()
self._build_level = FlowBuildLevel.RUNNING
send_telemetry_event(
event='start',
obj_cls_name=self.__class__.__name__,
entity_id=self._entity_id,
)
return self
def _wait_until_all_ready(self):
import warnings
with warnings.catch_warnings():
results = {}
results_lock = threading.Lock()
async def _async_wait_ready(_deployment_name, _deployment):
try:
if not _deployment.external:
with results_lock:
results[_deployment_name] = 'pending'
await _deployment.async_wait_start_success()
with results_lock:
results[_deployment_name] = 'done'
except Exception as ex:
results[_deployment_name] = repr(ex)
raise ex
def _wait_ready(_deployment_name, _deployment):
try:
if not _deployment.external:
with results_lock:
results[_deployment_name] = 'pending'
_deployment.wait_start_success()
with results_lock:
results[_deployment_name] = 'done'
except Exception as ex:
with results_lock:
results[_deployment_name] = repr(ex)
def _polling_status(num_tasks_to_wait):
progress = Progress(
SpinnerColumn(),
TextColumn(
'Waiting [b]{task.fields[pending_str]}[/]...', justify='right'
),
BarColumn(),
MofNCompleteColumn(),
TimeElapsedColumn(),
transient=True,
)
with progress:
task = progress.add_task(
'wait', total=num_tasks_to_wait, pending_str='', start=False
)
with results_lock:
progress.update(task, total=len(results))
progress.start_task(task)
while True:
num_done = 0
pendings = []
one_failing = False
with results_lock:
for _k, _v in results.items():
sys.stdout.flush()
if _v == 'pending':
pendings.append(_k)
elif _v == 'done':
num_done += 1
else:
one_failing = True
if 'JINA_EARLY_STOP' in os.environ:
self.logger.error(
f'Flow is aborted due to {_k} {_v}.'
)
os._exit(1)
else:
break
if one_failing:
break
pending_str = ' '.join(pendings)
progress.update(
task, completed=num_done, pending_str=pending_str
)
if not pendings:
break
time.sleep(0.1)
wait_for_ready_coros = []
for k, v in self:
wait_for_ready_coros.append(_async_wait_ready(k, v))
async def _async_wait_all():
wrapped_tasks = [
asyncio.create_task(coro) for coro in wait_for_ready_coros
]
done, pending = await asyncio.wait(
wrapped_tasks, return_when=asyncio.FIRST_EXCEPTION
)
try:
for task in done:
try:
task.result() # This raises an exception if the task had an exception
except Exception as e:
self.logger.error(f"An exception occurred: {str(e)}")
finally:
for task in pending:
task.cancel()
if 'GITHUB_WORKFLOW' not in os.environ:
# kick off spinner thread
polling_status_thread = threading.Thread(
target=_polling_status,
args=(len(wait_for_ready_coros),),
daemon=True,
)
polling_status_thread.start()
# kick off all deployments wait-ready tasks
try:
_ = asyncio.get_event_loop()
except:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
async def _f():
pass
running_in_event_loop = False
try:
asyncio.get_event_loop().run_until_complete(_f())
except:
running_in_event_loop = True
wait_ready_threads = []
if not running_in_event_loop:
asyncio.get_event_loop().run_until_complete(_async_wait_all())
else:
for k, v in self:
wait_ready_threads.append(
threading.Thread(target=_wait_ready, args=(k, v), daemon=True)
)
for t in wait_ready_threads:
t.start()
if 'GITHUB_WORKFLOW' not in os.environ:
polling_status_thread.join()
for t in wait_ready_threads:
t.join()
error_deployments = [k for k, v in results.items() if v != 'done']
if error_deployments:
self.logger.error(
f'Flow is aborted due to {error_deployments} can not be started.'
)
self.close()
raise RuntimeFailToStart
from rich.rule import Rule
all_panels = []
self._get_summary_table(all_panels)
print(
Rule(':tada: Flow is ready to serve!'), *all_panels
) # can't use logger here see : https://github.com/Textualize/rich/discussions/2024
self.logger.debug(
f'{self.num_deployments} Deployments (i.e. {self.num_pods} Pods) are running in this Flow'
)
@property
def num_deployments(self) -> int:
"""Get the number of Deployments in this Flow
.. # noqa: DAR201"""
return len(self._deployment_nodes)
@property
def num_pods(self) -> int:
"""Get the number of pods (shards count) in this Flow
.. # noqa: DAR201"""
return sum(v.num_pods for v in self._deployment_nodes.values())
def __eq__(self, other: 'Flow') -> bool:
"""
Compare the topology of a Flow with another Flow.
Identification is defined by whether two flows share the same set of edges.
:param other: the second Flow object
:return: result of equality check
"""
if self._build_level.value < FlowBuildLevel.GRAPH.value:
op_flow = copy.deepcopy(self)
a = op_flow.build()
else:
a = self
if other._build_level.value < FlowBuildLevel.GRAPH.value:
op_flow_b = copy.deepcopy(other)
b = op_flow_b.build()
else:
b = other
return a._deployment_nodes == b._deployment_nodes
@property
def client(self) -> 'BaseClient':
"""Return a :class:`BaseClient` object attach to this Flow.
.. # noqa: DAR201"""
if not self._client:
kwargs = dict(
host=self.host,
port=self.port,
protocol=self.protocol,
log_config=self.args.log_config,
)
kwargs.update(self._gateway_kwargs)
self._client = Client(**kwargs)
return self._client
@property
def _mermaid_str(self):
mermaid_graph = [
'''
%%{init:{
"theme": "base",
"themeVariables": {
"primaryColor": "#fff",
"primaryBorderColor": "#fff",
"mainBkg": "#32C8CD",
"clusterBkg": "#EEEDE78C",
"secondaryBorderColor": "none",
"tertiaryBorderColor": "none",
"lineColor": "#a6d8da"
}
}}%%'''.replace(
'\n', ''
),
'flowchart LR;',
]
deployment_nodes = []
# plot subgraphs
for node, v in self._deployment_nodes.items():
deployment_nodes.append(v.name)
deployment_mermaid = v._mermaid_str
mermaid_graph.extend(deployment_mermaid)
for node, v in self._deployment_nodes.items():
for need in sorted(v.needs):
need_print = need
if need == 'gateway':
need_print = 'gatewaystart[gateway]'
node_print = node
if node == 'gateway':
node_print = 'gatewayend[gateway]'
_s_role = self._deployment_nodes[need].role
_e_role = self._deployment_nodes[node].role
if self._deployment_nodes[need].external:
_s_role = 'EXTERNAL'
if self._deployment_nodes[node].external:
_e_role = 'EXTERNAL'
line_st = '-->'
if (
_s_role == DeploymentRoleType.INSPECT
or _e_role == DeploymentRoleType.INSPECT
):
line_st = '-.->'
mermaid_graph.append(
f'{need_print}:::{str(_s_role)} {line_st} {node_print}:::{str(_e_role)};'
)
mermaid_graph.append(
f'classDef {str(DeploymentRoleType.INSPECT)} stroke:#F29C9F'
)
mermaid_graph.append(
f'classDef {str(DeploymentRoleType.JOIN_INSPECT)} stroke:#F29C9F'
)
mermaid_graph.append(
f'classDef {str(DeploymentRoleType.GATEWAY)} fill:none,color:#000,stroke:none'
)
mermaid_graph.append(
f'classDef {str(DeploymentRoleType.INSPECT_AUX_PASS)} stroke-dasharray: 2 2'
)
mermaid_graph.append(f'classDef HEADTAIL fill:#32C8CD1D')
mermaid_graph.append(f'\nclassDef EXTERNAL fill:#fff,stroke:#32C8CD')
return '\n'.join(mermaid_graph)
def plot(
self,
output: Optional[str] = None,
vertical_layout: bool = False,
inline_display: bool = False,
build: bool = True,
copy_flow: bool = True,
) -> 'Flow':
"""
Visualize the Flow up to the current point
If a file name is provided it will create a jpg image with that name,
otherwise it will display the URL for mermaid.
If called within IPython notebook, it will be rendered inline,
otherwise an image will be created.
Example,
.. highlight:: python
.. code-block:: python
flow = Flow().add(name='deployment_a').plot('flow.svg')
:param output: a filename specifying the name of the image to be created,
the suffix svg/jpg determines the file type of the output image
:param vertical_layout: top-down or left-right layout
:param inline_display: show image directly inside the Jupyter Notebook
:param build: build the Flow first before plotting, gateway connection can be better showed
:param copy_flow: when set to true, then always copy the current Flow and
do the modification on top of it then return, otherwise, do in-line modification
:return: the Flow
"""
# deepcopy causes the below error while reusing a Flow in Jupyter
# 'Pickling an AuthenticationString object is disallowed for security reasons'
# no need to deep copy if the Graph is built because no change will be made to the Flow
op_flow = (
copy.deepcopy(self)
if (copy_flow and self._build_level.value == FlowBuildLevel.EMPTY)
else self
)
if build and op_flow._build_level.value == FlowBuildLevel.EMPTY:
op_flow.build(copy_flow=False)
mermaid_str = op_flow._mermaid_str
if vertical_layout:
mermaid_str = mermaid_str.replace('flowchart LR', 'flowchart TD')
image_type = 'svg'
if output and not output.endswith('svg'):
image_type = 'img'
url = op_flow._mermaid_to_url(mermaid_str, image_type)
showed = False
if inline_display:
try:
from IPython.display import Image, display
display(Image(url=url))
showed = True
except:
# no need to panic users
pass
if output:
download_mermaid_url(url, output)
elif not showed:
print(f'[link={url}]Click here to see the visualization in browser[/]')
return self
def _ipython_display_(self):
"""Displays the object in IPython as a side effect"""
self.plot(
inline_display=True, build=(self._build_level != FlowBuildLevel.GRAPH)
)
def _mermaid_to_url(self, mermaid_str: str, img_type: str) -> str:
"""
Render the current Flow as URL points to a SVG. It needs internet connection
:param mermaid_str: the mermaid representation
:param img_type: image type (svg/jpg)
:return: the url points to a SVG
"""
encoded_str = base64.b64encode(bytes(mermaid_str, 'utf-8')).decode('utf-8')
return f'https://mermaid.ink/{img_type}/{encoded_str}'
@property
def port(self) -> Union[List[int], Optional[int]]:
"""Return the exposed port of the gateway
.. # noqa: DAR201
"""
if GATEWAY_NAME in self._deployment_nodes:
res = self._deployment_nodes[GATEWAY_NAME].first_pod_args.port
else:
res = self._gateway_kwargs.get('port', None) or self._gateway_kwargs.get(
'ports', None
)
if not isinstance(res, list):
return res
elif len(res) == 1:
return res[0]
else:
return res
@port.setter
def port(self, value: Union[int, List[int]]):
"""Set the new exposed port of the Flow (affects Gateway and Client)
:param value: the new port to expose
"""
if isinstance(value, int):
self._gateway_kwargs['port'] = [value]
elif isinstance(value, list):
self._gateway_kwargs['port'] = value
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.port = self._gateway_kwargs['port']
# Flow is running already, then close the existing gateway
if self._build_level >= FlowBuildLevel.RUNNING:
self[GATEWAY_NAME].close()
self.enter_context(self[GATEWAY_NAME])
self[GATEWAY_NAME].wait_start_success()
@property
def host(self) -> str:
"""Return the local address of the gateway
.. # noqa: DAR201
"""
if GATEWAY_NAME in self._deployment_nodes:
return self._deployment_nodes[GATEWAY_NAME].host
else:
return self._gateway_kwargs.get('host', __default_host__)
@host.setter
def host(self, value: str):
"""Set the new host of the Flow (affects Gateway and Client)
:param value: the new port to expose
"""
self._gateway_kwargs['host'] = value
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.host = self._gateway_kwargs['host']
# Flow is running already, then close the existing gateway
if self._build_level >= FlowBuildLevel.RUNNING:
self[GATEWAY_NAME].close()
self.enter_context(self[GATEWAY_NAME])
self[GATEWAY_NAME].wait_start_success()
@property
def monitoring(self) -> bool:
"""Return if the monitoring is enabled
.. # noqa: DAR201
"""
if GATEWAY_NAME in self._deployment_nodes:
return self[GATEWAY_NAME].args.monitoring
else:
return False
@property
def port_monitoring(self) -> Optional[int]:
"""Return if the monitoring is enabled
.. # noqa: DAR201
"""
if GATEWAY_NAME in self._deployment_nodes:
return self[GATEWAY_NAME].args.port_monitoring
else:
return self._gateway_kwargs.get('port_monitoring', None)
def __iter__(self):
return self._deployment_nodes.items().__iter__()
def _get_summary_table(self, all_panels: List[Panel]):
address_table = self._init_table()
if not isinstance(self.protocol, list):
_protocols = [str(self.protocol)]
else:
_protocols = [str(_p) for _p in self.protocol]
if not isinstance(self.port, list):
_ports = [self.port]
else:
_ports = [str(_p) for _p in self.port]
swagger_ui_link = None
redoc_link = None
graphql_ui_link = None
for _port, _protocol in zip(_ports, _protocols):
if self.gateway_args.ssl_certfile and self.gateway_args.ssl_keyfile:
_protocol = f'{_protocol}S'
address_table.add_row(
':chains:', 'Protocol', f':closed_lock_with_key: {_protocol}'
)
else:
address_table.add_row(':chains:', 'Protocol', _protocol)
_protocol = _protocol.lower()
address_table.add_row(
':house:',
'Local',
f'[link={_protocol}://{self.host}:{_port}]{self.host}:{_port}[/]',
)
address_table.add_row(
':lock:',
'Private',
f'[link={_protocol}://{self.address_private}:{_port}]{self.address_private}:{_port}[/]',
)
if self.address_public:
address_table.add_row(
':earth_africa:',
'Public',
f'[link={_protocol}://{self.address_public}:{_port}]{self.address_public}:{_port}[/]',
)
if _protocol.lower() == ProtocolType.HTTP.to_string().lower():
swagger_ui_link = f'[link={_protocol}://{self.host}:{_port}/docs]{self.host}:{_port}/docs'
redoc_link = f'[link={_protocol}://{self.host}:{_port}/redoc]{self.host}:{_port}/redoc'
graphql_ui_link = f'[link={_protocol}://{self.host}:{_port}/graphql]{self.host}:{_port}/graphql'
all_panels.append(
Panel(
address_table,
title=':link: [b]Endpoint[/]',
expand=False,
)
)
if ProtocolType.HTTP.to_string().lower() in [p.lower() for p in _protocols]:
http_ext_table = self._init_table()
_protocol = ProtocolType.HTTP.to_string()
http_ext_table.add_row(
':speech_balloon:',
'Swagger UI',
swagger_ui_link,
)
http_ext_table.add_row(':books:', 'Redoc', redoc_link)
if self.gateway_args.expose_graphql_endpoint:
http_ext_table.add_row(':strawberry:', 'GraphQL UI', graphql_ui_link)
all_panels.append(
Panel(
http_ext_table,
title=':gem: [b]HTTP extension[/]',
expand=False,
)
)
if self.monitoring:
monitor_ext_table = self._init_table()
for name, deployment in self:
if deployment.args.monitoring:
for replica in deployment.pod_args['pods'][0]:
_address = [
f'[link=http://localhost:{replica.port_monitoring}]Local[/]',
f'[link=http://{self.address_private}:{replica.port_monitoring}]Private[/]',
]
if self.address_public:
_address.append(
f'[link=http://{self.address_public}:{deployment.args.port_monitoring}]Public[/]'
)
_name = (
name
if len(deployment.pod_args['pods'][0]) == 1
else replica.name
)
monitor_ext_table.add_row(
':flashlight:', # upstream issue: they dont have :torch: emoji, so we use :flashlight:
# to represent observability of Prometheus (even they have :torch: it will be a war
# between AI community and Cloud-native community fighting on this emoji)
_name,
f'...[b]:{replica.port_monitoring}[/]',
)
all_panels.append(
Panel(
monitor_ext_table,
title=':gem: [b]Prometheus extension[/]',
expand=False,
)
)
return all_panels
@allowed_levels([FlowBuildLevel.RUNNING])
def block(
self, stop_event: Optional[Union[threading.Event, multiprocessing.Event]] = None
):
"""Block the Flow until `stop_event` is set or user hits KeyboardInterrupt
:param stop_event: a threading event or a multiprocessing event that onces set will resume the control Flow
to main thread.
"""
def _reload_flow(changed_file):
self.logger.info(
f'change in Flow YAML {changed_file} observed, reloading Flow'
)
self.__exit__(None, None, None)
new_flow = Flow.load_config(changed_file)
self.__dict__ = new_flow.__dict__
self.__enter__()
def _reload_deployment(deployment, changed_file):
self.logger.info(
f'change in Executor configuration YAML {changed_file} observed, reloading Executor deployment'
)
deployment.__exit__(None, None, None)
old_args, old_needs = deployment.args, deployment.needs
new_deployment = Deployment(old_args, old_needs, include_gateway=False)
deployment.__dict__ = new_deployment.__dict__
deployment.__enter__()
try:
watch_changes = self.args.reload or any(
[
deployment.args.reload
for deployment in list(self._deployment_nodes.values())
]
)
watch_files_from_deployments = {}
for name, deployment in self._deployment_nodes.items():
if deployment.args.reload:
if deployment._is_executor_from_yaml:
watch_files_from_deployments[deployment.args.uses] = name
watch_files_list = list(watch_files_from_deployments.keys())
config_loaded = getattr(self, '_config_loaded', '')
if config_loaded.endswith('yml') or config_loaded.endswith('yaml'):
watch_files_list.append(config_loaded)
if watch_changes and len(watch_files_list) > 0:
with ImportExtensions(
required=True,
logger=self.logger,
help_text='''reload requires watchfiles dependency to be installed. You can do `pip install
watchfiles''',
):
from watchfiles import watch
new_stop_event = stop_event or threading.Event()
if len(watch_files_list) > 0:
for changes in watch(*watch_files_list, stop_event=new_stop_event):
for _, changed_file in changes:
if changed_file not in watch_files_from_deployments:
# maybe changed_file is the absolute path of one in watch_files_from_deployments
is_absolute_path = False
for (
file,
deployment_name,
) in watch_files_from_deployments.items():
if changed_file.endswith(file):
is_absolute_path = True
_reload_deployment(
self._deployment_nodes[deployment_name],
changed_file,
)
break
if not is_absolute_path:
_reload_flow(changed_file)
else:
_reload_deployment(
self._deployment_nodes[
watch_files_from_deployments[changed_file]
],
changed_file,
)
else:
wait_event = stop_event
if not wait_event:
self._stop_event = threading.Event()
wait_event = self._stop_event
if not __windows__:
wait_event.wait()
else:
while True:
if wait_event.is_set():
break
time.sleep(0.5)
except KeyboardInterrupt:
pass
@property
def protocol(self) -> Union[ProtocolType, List[ProtocolType]]:
"""Return the protocol of this Flow
:return: the protocol of this Flow, if only 1 protocol is supported otherwise returns the list of protocols
"""
v = (
self._gateway_kwargs.get('protocol', None)
or self._gateway_kwargs.get('protocols', None)
or [ProtocolType.GRPC]
)
if not isinstance(v, list):
v = [v]
v = ProtocolType.from_string_list(v)
if len(v) == 1:
return v[0]
else:
return v
@protocol.setter
def protocol(
self,
value: Union[str, ProtocolType, List[str], List[ProtocolType]],
):
"""Set the protocol of this Flow, can only be set before the Flow has been started
:param value: the protocol to set
"""
# Flow is running already, protocol cant be changed anymore
if self._build_level >= FlowBuildLevel.RUNNING:
raise RuntimeError('Protocol can not be changed after the Flow has started')
if isinstance(value, str):
self._gateway_kwargs['protocol'] = [ProtocolType.from_string(value)]
elif isinstance(value, ProtocolType):
self._gateway_kwargs['protocol'] = [value]
elif isinstance(value, list):
self._gateway_kwargs['protocol'] = ProtocolType.from_string_list(value)
else:
raise TypeError(
f'{value} must be either `str` or `ProtocolType` or list of protocols'
)
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.protocol = self._gateway_kwargs['protocol']
def __getitem__(self, item):
if isinstance(item, str):
return self._deployment_nodes[item]
elif isinstance(item, int):
return list(self._deployment_nodes.values())[item]
else:
raise TypeError(f'{typename(item)} is not supported')
@property
def workspace(self) -> str:
"""Return the workspace path of the flow.
.. # noqa: DAR201"""
if self.args.workspace is not None:
return os.path.abspath(self.args.workspace)
else:
return None
@workspace.setter
def workspace(self, value: str):
"""set workspace dir for flow & all deployments
:param value: workspace to be set
"""
self.args.workspace = value
for k, p in self:
p.args.workspace = value
p.update_pod_args()
@property
def workspace_id(self) -> Dict[str, str]:
"""Get all Deployments' ``workspace_id`` values in a dict
.. # noqa: DAR201"""
return {
k: p.args.workspace_id for k, p in self if hasattr(p.args, 'workspace_id')
}
@workspace_id.setter
def workspace_id(self, value: str):
"""Set all Deployments' ``workspace_id`` to ``value``
:param value: a hexadecimal UUID string
"""
uuid.UUID(value)
for k, p in self:
if hasattr(p.args, 'workspace_id'):
p.args.workspace_id = value
args = getattr(p, 'pod_args', getattr(p, 'shards_args', None))
if args is None:
raise ValueError(
f'could not find "pod_args" or "shards_args" on {p}'
)
values = None
if isinstance(args, dict):
values = args.values()
elif isinstance(args, list):
values = args
for v in values:
if v and isinstance(v, argparse.Namespace):
v.workspace_id = value
if v and isinstance(v, List):
for i in v:
i.workspace_id = value
@property
def env(self) -> Optional[Dict]:
"""Get all envs to be set in the Flow
:return: envs as dict
"""
return self.args.env
@env.setter
def env(self, value: Dict[str, str]):
"""set env vars for flow & all deployments.
This can be used by jinad to set envs for Flow and all child objects
:param value: value to be set
"""
self.args.env = value
for k, v in self:
v.args.env = value
v.update_pod_args()
@overload
def expose_endpoint(self, exec_endpoint: str, path: Optional[str] = None):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
:param exec_endpoint: the endpoint string, by convention starts with `/`
:param path: the HTTP endpoint string, when not given, it is `exec_endpoint`
"""
...
@overload
def expose_endpoint(
self,
exec_endpoint: str,
*,
path: Optional[str] = None,
status_code: int = 200,
tags: Optional[List[str]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = 'Successful Response',
deprecated: Optional[bool] = None,
methods: Optional[List[str]] = None,
operation_id: Optional[str] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
name: Optional[str] = None,
):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
Use this method to specify your HTTP endpoint with richer semantic and schema.
:param exec_endpoint: the endpoint string, by convention starts with `/`
# noqa: DAR101
"""
...
@allowed_levels([FlowBuildLevel.EMPTY])
def expose_endpoint(self, exec_endpoint: str, **kwargs):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
:param exec_endpoint: the endpoint string, by convention starts with `/`
# noqa: DAR101
# noqa: DAR102
"""
self._endpoints_mapping[exec_endpoint] = kwargs
def to_kubernetes_yaml(
self,
output_base_path: str,
k8s_namespace: Optional[str] = None,
include_gateway: bool = True,
):
"""
Converts the Flow into a set of yaml deployments to deploy in Kubernetes.
If you don't want to rebuild image on Jina Hub,
you can set `JINA_HUB_NO_IMAGE_REBUILD` environment variable.
:param output_base_path: The base path where to dump all the yaml files
:param k8s_namespace: The name of the k8s namespace to set for the configurations. If None, the name of the Flow will be used.
:param include_gateway: Defines if the gateway deployment should be included, defaults to True
"""
if self._build_level.value < FlowBuildLevel.GRAPH.value:
self.build(copy_flow=False)
k8s_namespace = k8s_namespace or self.args.name or 'default'
for node, v in self._deployment_nodes.items():
if node == 'gateway' and not include_gateway:
continue
deployment_base = os.path.join(output_base_path, node)
v._to_kubernetes_yaml(
deployment_base,
k8s_namespace=k8s_namespace,
k8s_deployments_addresses=self._get_k8s_deployments_addresses(
k8s_namespace
),
)
self.logger.info(
f'K8s yaml files have been created under [b]{output_base_path}[/]. You can use it by running [b]kubectl apply -R -f {output_base_path}[/]'
)
to_k8s_yaml = to_kubernetes_yaml
def to_docker_compose_yaml(
self,
output_path: Optional[str] = None,
network_name: Optional[str] = None,
include_gateway: bool = True,
):
"""
Converts the Flow into a yaml file to run with `docker-compose up`
:param output_path: The output path for the yaml file
:param network_name: The name of the network that will be used by the deployment name
:param include_gateway: Defines if the gateway deployment should be included, defaults to True
"""
import yaml
if self._build_level.value < FlowBuildLevel.GRAPH.value:
self.build(copy_flow=False)
output_path = output_path or 'docker-compose.yml'
network_name = network_name or 'jina-network'
docker_compose_dict = {
'version': '3.3',
'networks': {network_name: {'driver': 'bridge'}},
}
services = {}
for node, v in self._deployment_nodes.items():
if v.external or (node == 'gateway' and not include_gateway):
continue
service_configs = v._to_docker_compose_config(
deployments_addresses=self._get_docker_compose_deployments_addresses(),
)
for service_name, service in service_configs:
service['networks'] = [network_name]
services[service_name] = service
docker_compose_dict['services'] = services
with open(output_path, 'w+', encoding='utf-8') as fp:
yaml.dump(docker_compose_dict, fp, sort_keys=False)
command = (
'docker compose up'
if output_path is None
else f'docker compose -f {output_path} up'
)
self.logger.info(
f'Docker compose file has been created under [b]{output_path}[/b]. You can use it by running [b]{command}[/b]'
)
@property
def gateway_args(self) -> argparse.Namespace:
"""Get Gateway settings.
# noqa: DAR201
"""
return ArgNamespace.kwargs2namespace(self._gateway_kwargs, set_gateway_parser())
def _update_network_interface(self, **kwargs):
"""Update the network interface of this Flow (affects Gateway & Client)
:param kwargs: new network settings
"""
self._gateway_kwargs.update(kwargs)
# reset client
self._client = None
def __getattribute__(self, item):
obj = super().__getattribute__(item)
if (
item == 'load_config' and inspect.ismethod(obj) and obj.__self__ is Flow
): # check if obj load config call from an instance and not the Class
warnings.warn(
"Calling `load_config` from a Flow instance will override all of the instance's initial parameters. We recommend to use `Flow.load_config(...)` instead"
)
return obj
|
Flow
|
python
|
weaviate__weaviate-python-client
|
weaviate/users/users.py
|
{
"start": 343,
"end": 440
}
|
class ____:
user_id: str
role_names: List[str]
user_type: UserTypes
@dataclass
|
UserBase
|
python
|
plotly__plotly.py
|
plotly/graph_objs/scatterpolar/selected/_marker.py
|
{
"start": 233,
"end": 3609
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatterpolar.selected"
_path_str = "scatterpolar.selected.marker"
_valid_props = {"color", "opacity", "size"}
@property
def color(self):
"""
Sets the marker color of selected points.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def opacity(self):
"""
Sets the marker opacity of selected points.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def size(self):
"""
Sets the marker size of selected points.
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the marker color of selected points.
opacity
Sets the marker opacity of selected points.
size
Sets the marker size of selected points.
"""
def __init__(self, arg=None, color=None, opacity=None, size=None, **kwargs):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatterpolar.selected.Marker`
color
Sets the marker color of selected points.
opacity
Sets the marker opacity of selected points.
size
Sets the marker size of selected points.
Returns
-------
Marker
"""
super().__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatterpolar.selected.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolar.selected.Marker`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("opacity", arg, opacity)
self._set_property("size", arg, size)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Marker
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/constrainedTypeVar1.py
|
{
"start": 366,
"end": 997
}
|
class ____(Generic[U]):
def generic_func1(self, a: U, b: str = "", **kwargs: U) -> U:
return a
a1 = ClassA[str]()
r1 = a1.generic_func1("hi")
reveal_type(r1, expected_text="str")
r2 = a1.generic_func1("hi", test="hi")
reveal_type(r2, expected_text="str")
# This should generate an error.
r3 = a1.generic_func1("hi", test=3)
reveal_type(r3, expected_text="str")
# This should generate an error.
r4 = a1.generic_func1("hi", 3)
reveal_type(r4, expected_text="str")
a2: ClassA[int]
# This should generate an error.
a3: ClassA[Never]
ClassAAlias = ClassA[U]
# This should generate an error.
a4: ClassAAlias[Never]
|
ClassA
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximize-sum-of-at-most-k-distinct-elements.py
|
{
"start": 329,
"end": 801
}
|
class ____(object):
def maxKDistinct(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
min_heap = []
for x in set(nums):
heapq.heappush(min_heap, x)
if len(min_heap) == k+1:
heapq.heappop(min_heap)
result = []
while min_heap:
result.append(heapq.heappop(min_heap))
result.reverse()
return result
|
Solution2
|
python
|
huggingface__transformers
|
tests/models/internvl/test_modeling_internvl.py
|
{
"start": 25862,
"end": 45440
}
|
class ____(unittest.TestCase):
def setUp(self):
self.small_model_checkpoint = "OpenGVLab/InternVL2_5-2B-MPO-hf"
self.medium_model_checkpoint = "OpenGVLab/InternVL2_5-8B-MPO-hf"
cleanup(torch_device, gc_collect=True)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def test_llama_small_model_integration_generate(self):
processor = AutoProcessor.from_pretrained(self.small_model_checkpoint)
model = InternVLForConditionalGeneration.from_pretrained(
self.small_model_checkpoint, device_map=torch_device, dtype=torch.float16
)
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image = Image.open(requests.get(url, stream=True).raw)
prompt = (
"<|im_start|>user\n<IMG_CONTEXT>\nPlease describe the image explicitly.<|im_end|>\n<|im_start|>assistant\n"
)
inputs = processor(images=image, text=prompt, return_tensors="pt").to(torch_device, dtype=torch.float16)
with torch.no_grad():
generate_ids = model.generate(**inputs, max_new_tokens=20, do_sample=False)
decoded_output = processor.decode(
generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True
)
expected_output = "The image shows two cats sleeping on a pink couch. They are lying side by side, with their"
self.assertEqual(decoded_output, expected_output)
def test_llama_small_model_integration_forward(self):
processor = AutoProcessor.from_pretrained(self.small_model_checkpoint)
model = InternVLForConditionalGeneration.from_pretrained(
self.small_model_checkpoint, device_map=torch_device, dtype=torch.float16
)
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image = Image.open(requests.get(url, stream=True).raw)
prompt = (
"<|im_start|>user\n<IMG_CONTEXT>\nPlease describe the image explicitly.<|im_end|>\n<|im_start|>assistant\n"
)
inputs = processor(images=image, text=prompt, return_tensors="pt").to(torch_device, dtype=torch.float16)
# Forward
with torch.inference_mode():
output = model(**inputs)
actual_logits = output.logits[0, -1, :5].cpu()
expected_logits_all = Expectations(
{
("xpu", 3): [-9.8828, -0.4954, 1.4561, -10.3438, -10.3438],
("cuda", 7): [-9.8750, -0.4861, 1.4648, -10.3359, -10.3359],
("cuda", 8): [-9.8906, -0.4995, 1.4473, -10.3359, -10.3438],
("rocm", (9, 4)): [ -9.8828, -0.5005, 1.4697, -10.3438, -10.3438],
("rocm", (9, 5)): [ -9.8906, -0.4976, 1.4502, -10.3359, -10.3438],
}
) # fmt: skip
expected_logits = torch.tensor(expected_logits_all.get_expectation(), dtype=torch.float16)
# The original implementation and the transformers implementation do not match exactly, hence the higher tolerance.
# The difference is likely due to the different implementations of the attention mechanism (different order of operations)
# between the transformers Llama model and the original InternLM model.
# The difference has almost no effect on the output tokens, but it does affect the logits a lot more.
self.assertTrue(
torch.allclose(actual_logits, expected_logits, atol=1e-3),
f"Actual logits: {actual_logits}"
f"\nExpected logits: {expected_logits}"
f"\nDifference: {torch.abs(actual_logits - expected_logits)}",
)
def test_llama_small_model_integration_generate_text_only(self):
processor = AutoProcessor.from_pretrained(self.small_model_checkpoint)
model = InternVLForConditionalGeneration.from_pretrained(
self.small_model_checkpoint, device_map=torch_device, dtype=torch.float16
)
prompt = "<|im_start|>user\nWrite a haiku<|im_end|>\n<|im_start|>assistant\n"
inputs = processor(text=prompt, return_tensors="pt").to(torch_device, dtype=torch.float16)
with torch.no_grad():
generate_ids = model.generate(**inputs, max_new_tokens=200, do_sample=False)
decoded_output = processor.decode(
generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True
)
expected_outputs = Expectations(
{
("xpu", 3): "Autumn leaves fall,\nNature's breath, a season's sigh,\nSilent woods awake.",
("cuda", 7): "Autumn leaves fall,\nNature's breath, a gentle sigh,\nSilent whispers.",
("cuda", 8): "Autumn leaves fall,\nNature's breath, a silent sigh,\nWinter's chill approaches.",
}
)
expected_output = expected_outputs.get_expectation()
self.assertEqual(decoded_output, expected_output)
def test_llama_small_model_integration_generate_chat_template(self):
processor = AutoProcessor.from_pretrained(self.small_model_checkpoint)
model = InternVLForConditionalGeneration.from_pretrained(
self.small_model_checkpoint, device_map=torch_device, dtype=torch.float16
)
messages = [
{
"role": "user",
"content": [
{"type": "image", "url": "http://images.cocodataset.org/val2017/000000039769.jpg"},
{"type": "text", "text": "Please describe the image explicitly."},
],
}
]
inputs = processor.apply_chat_template(
messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt"
).to(torch_device, dtype=torch.float16)
with torch.no_grad():
generate_ids = model.generate(**inputs, max_new_tokens=20, do_sample=False)
decoded_output = processor.decode(
generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True
)
expected_output = "The image shows two cats sleeping on a pink couch. They are lying side by side, with their"
self.assertEqual(decoded_output, expected_output)
def test_llama_small_model_integration_batched_generate(self):
processor = AutoProcessor.from_pretrained(self.small_model_checkpoint)
model = InternVLForConditionalGeneration.from_pretrained(
self.small_model_checkpoint, device_map=torch_device, dtype=torch.float16
)
# Prepare inputs
prompt = [
"<|im_start|>user\n<IMG_CONTEXT>\nWrite a haiku for this image<|im_end|>\n<|im_start|>assistant\n",
"<|im_start|>user\n<IMG_CONTEXT>\nDescribe this image<|im_end|>\n<|im_start|>assistant\n",
]
image1 = Image.open(requests.get("https://llava-vl.github.io/static/images/view.jpg", stream=True).raw)
image2 = Image.open(
requests.get(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg",
stream=True,
).raw
)
inputs = processor(text=prompt, images=[[image1], [image2]], padding=True, return_tensors="pt").to(
torch_device, dtype=torch.float16
)
output = model.generate(**inputs, do_sample=False, max_new_tokens=25)
# Check first output
decoded_output = processor.decode(output[0], skip_special_tokens=True)
expected_outputs = Expectations(
{
("xpu", 3): "user\n\nWrite a haiku for this image\nassistant\nMajestic snow-capped peaks,\nWooden dock stretches to the sea,\nSilent water mirrors.",
("cuda", 7): 'user\n\nWrite a haiku for this image\nassistant\nMajestic snow-capped peaks,\nWooden dock stretches to the sea,\nSilent water mirrors.',
("cuda", 8): 'user\n\nWrite a haiku for this image\nassistant\nMajestic snow-capped peaks,\nWooden dock stretches to the sea,\nSilent water mirrors.',
}
) # fmt: skip
expected_output = expected_outputs.get_expectation()
self.assertEqual(
decoded_output,
expected_output,
f"Decoded output: {decoded_output}\nExpected output: {expected_output}",
)
# Check second output
decoded_output = processor.decode(output[1], skip_special_tokens=True)
expected_output = "user\n\nDescribe this image\nassistant\nThe image shows a street scene with a traditional Chinese gate in the background, adorned with red and gold colors and Chinese characters"
self.assertEqual(
decoded_output,
expected_output,
f"Decoded output: {decoded_output}\nExpected output: {expected_output}",
)
def test_llama_small_model_integration_batched_generate_multi_image(self):
processor = AutoProcessor.from_pretrained(self.small_model_checkpoint)
model = InternVLForConditionalGeneration.from_pretrained(
self.small_model_checkpoint, device_map=torch_device, dtype=torch.float16
)
# Prepare inputs
prompt = [
"<|im_start|>user\n<IMG_CONTEXT>\nWrite a haiku for this image<|im_end|>\n<|im_start|>assistant\n",
"<|im_start|>user\n<IMG_CONTEXT><IMG_CONTEXT>\nWhat are the difference between these two images?<|im_end|>\n<|im_start|>assistant\n",
]
image1 = Image.open(requests.get("https://llava-vl.github.io/static/images/view.jpg", stream=True).raw)
image2 = Image.open(
BytesIO(
requests.get(
"https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
).content
)
)
image3 = Image.open(
BytesIO(
requests.get(
"https://thumbs.dreamstime.com/b/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg"
).content
)
)
inputs = processor(text=prompt, images=[[image1], [image2, image3]], padding=True, return_tensors="pt").to(
torch_device, dtype=torch.float16
)
output = model.generate(**inputs, do_sample=False, max_new_tokens=25)
# Check first output
decoded_output = processor.decode(output[0], skip_special_tokens=True)
# Batching seems to alter the output slightly, but it is also the case in the original implementation. This seems to be expected: https://github.com/huggingface/transformers/issues/23017#issuecomment-1649630232
expected_output = "user\n\nWrite a haiku for this image\nassistant\nMajestic snow-capped peaks,\nWooden dock stretches to the sea,\nSilent water mirrors."
self.assertEqual(
decoded_output,
expected_output,
f"Decoded output: {decoded_output}\nExpected output: {expected_output}",
)
# Check second output
decoded_output = processor.decode(output[1], skip_special_tokens=True)
expected_output = "user\n\nWhat are the difference between these two images?\nassistant\nI apologize for the confusion in my previous response. After closely examining the images again, I can see that there are several differences"
self.assertEqual(
decoded_output,
expected_output,
f"Decoded output: {decoded_output}\nExpected output: {expected_output}",
)
@require_av
@require_bitsandbytes
def test_llama_medium_model_integration_video(self):
processor = AutoProcessor.from_pretrained(self.medium_model_checkpoint)
quantization_config = BitsAndBytesConfig(load_in_4bit=True)
model = InternVLForConditionalGeneration.from_pretrained(
self.medium_model_checkpoint, quantization_config=quantization_config
)
# Prepare inputs
messages = [
{
"role": "user",
"content": [
{
"type": "video",
"url": "https://huggingface.co/datasets/hf-internal-testing/fixtures_videos/resolve/main/tennis.mp4",
},
{"type": "text", "text": "What type of shot is the man performing?"},
],
}
]
inputs = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
num_frames=8,
).to(torch_device, dtype=torch.float16)
output = model.generate(**inputs, do_sample=False, max_new_tokens=25)
decoded_output = processor.decode(output[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True)
expected_output = "The man is performing a forehand shot."
self.assertEqual(
decoded_output,
expected_output,
f"Decoded output: {decoded_output}\nExpected output: {expected_output}",
)
@require_av
def test_llama_small_model_integration_interleaved_images_videos(self):
processor = AutoProcessor.from_pretrained(self.small_model_checkpoint)
model = InternVLForConditionalGeneration.from_pretrained(
self.small_model_checkpoint, dtype=torch.float16, device_map=torch_device
)
messages = [
[
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg",
},
{
"type": "image",
"url": "https://thumbs.dreamstime.com/b/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg",
},
{"type": "text", "text": "What are the difference between these two images?"},
],
},
],
[
{
"role": "user",
"content": [
{
"type": "video",
"url": "https://huggingface.co/datasets/hf-internal-testing/fixtures_videos/resolve/main/tennis.mp4",
},
{"type": "text", "text": "What type of shot is the man performing?"},
],
},
],
[
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://llava-vl.github.io/static/images/view.jpg",
},
{"type": "text", "text": "Write a haiku for this image"},
],
}
],
]
inputs = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
padding=True,
num_frames=8,
).to(torch_device, dtype=torch.float16)
output = model.generate(**inputs, do_sample=False, max_new_tokens=25)
decoded_output = processor.decode(output[0], skip_special_tokens=True)
# Batching seems to alter the output slightly, but it is also the case in the original implementation. This seems to be expected: https://github.com/huggingface/transformers/issues/23017#issuecomment-1649630232
expected_outputs = Expectations(
{
("xpu", 3): "user\n\n\nWhat are the difference between these two images?\nassistant\nI apologize for the confusion in my previous response. Upon closer inspection, the differences between the two images are:\n\n1. **",
("cuda", 7): 'user\n\n\nWhat are the difference between these two images?\nassistant\nI apologize for the confusion in my previous response. Upon closer inspection, the differences between the two images are:\n\n1. **',
("cuda", 8): 'user\n\n\nWhat are the difference between these two images?\nassistant\nI apologize for the confusion in my previous response. After re-examining the images, I can see that there are no',
("rocm", (9, 4)): 'user\n\n\nWhat are the difference between these two images?\nassistant\nI apologize for the confusion in my previous response. Upon closer inspection, the differences between the two images are:\n\n1. **',
("rocm", (9, 5)): 'user\n\n\nWhat are the difference between these two images?\nassistant\nI apologize for the confusion in my previous response. After re-examining the images, I can see that there are no',
}
) # fmt: skip
expected_output = expected_outputs.get_expectation()
self.assertEqual(
decoded_output,
expected_output,
f"Decoded output: {decoded_output}\nExpected output: {expected_output}",
)
# Check second output
decoded_output = processor.decode(output[1], skip_special_tokens=True)
expected_outputs = Expectations(
{
("xpu", 3): "user\nFrame1: \nFrame2: \nFrame3: \nFrame4: \nFrame5: \nFrame6: \nFrame7: \nFrame8: \nWhat type of shot is the man performing?\nassistant\nThe man is performing a forehand shot. This is a common stroke in tennis where the player swings the racket across their",
("cuda", 7): 'user\nFrame1: \nFrame2: \nFrame3: \nFrame4: \nFrame5: \nFrame6: \nFrame7: \nFrame8: \nWhat type of shot is the man performing?\nassistant\nThe man is performing a forehand shot. This is a common stroke in tennis where the player swings the racket across their',
("cuda", 8): 'user\nFrame1: \nFrame2: \nFrame3: \nFrame4: \nFrame5: \nFrame6: \nFrame7: \nFrame8: \nWhat type of shot is the man performing?\nassistant\nThe man is performing a forehand shot. This is a common stroke in tennis where the player swings the racket across their',
}
) # fmt: skip
expected_output = expected_outputs.get_expectation()
self.assertEqual(
decoded_output,
expected_output,
f"Decoded output: {decoded_output}\nExpected output: {expected_output}",
)
# Check third output
decoded_output = processor.decode(output[2], skip_special_tokens=True)
expected_outputs = Expectations(
{
("xpu", 3): "user\n\nWrite a haiku for this image\nassistant\nMajestic snow-capped peaks,\nWooden dock stretches to the sea,\nSilent water mirrors.",
("cuda", 7): 'user\n\nWrite a haiku for this image\nassistant\nMajestic snow-capped peaks,\nWooden dock stretches to the sea,\nSilent water mirrors.',
("cuda", 8): 'user\n\nWrite a haiku for this image\nassistant\nMajestic snow-capped peaks,\nWooden dock stretches to the sea,\nSilent water mirrors.',
}
) # fmt: skip
expected_output = expected_outputs.get_expectation()
self.assertEqual(
decoded_output,
expected_output,
f"Decoded output: {decoded_output}\nExpected output: {expected_output}",
)
|
InternVLLlamaIntegrationTest
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pydocstyle/D.py
|
{
"start": 11994,
"end": 14791
}
|
class ____: # noqa: D203,D213
"""A Blah.
Parameters
----------
x : int
"""
def __init__(self, x):
pass
expect(os.path.normcase(__file__ if __file__[-1] != 'c' else __file__[:-1]),
'D100: Missing docstring in public module')
@expect('D201: No blank lines allowed before function docstring (found 1)')
@expect('D213: Multi-line docstring summary should start at the second line')
def multiline_leading_space():
"""Leading space.
More content.
"""
@expect('D202: No blank lines allowed after function docstring (found 1)')
@expect('D213: Multi-line docstring summary should start at the second line')
def multiline_trailing_space():
"""Leading space.
More content.
"""
pass
@expect('D201: No blank lines allowed before function docstring (found 1)')
@expect('D202: No blank lines allowed after function docstring (found 1)')
@expect('D213: Multi-line docstring summary should start at the second line')
def multiline_trailing_and_leading_space():
"""Trailing and leading space.
More content.
"""
pass
@expect('D210: No whitespaces allowed surrounding docstring text')
@expect("D400: First line should end with a period (not '\"')")
@expect("D415: First line should end with a period, question mark, "
"or exclamation point (not '\"')")
def endswith_quote():
"""Whitespace at the end, but also a quote" """
@expect('D209: Multi-line docstring closing quotes should be on a separate '
'line')
@expect('D213: Multi-line docstring summary should start at the second line')
def asdfljdjgf24():
"""Summary.
Description. """
@expect('D200: One-line docstring should fit on one line with quotes '
'(found 3)')
@expect('D212: Multi-line docstring summary should start at the first line')
def one_liner():
"""
Wrong."""
@expect('D200: One-line docstring should fit on one line with quotes '
'(found 3)')
@expect('D212: Multi-line docstring summary should start at the first line')
def one_liner():
r"""Wrong.
"""
@expect('D200: One-line docstring should fit on one line with quotes '
'(found 3)')
@expect('D212: Multi-line docstring summary should start at the first line')
def one_liner():
"""Wrong."
"""
@expect('D200: One-line docstring should fit on one line with quotes '
'(found 3)')
@expect('D212: Multi-line docstring summary should start at the first line')
def one_liner():
"""
"Wrong."""
@expect('D404: First word of the docstring should not be "This"')
def starts_with_this():
"""This is a docstring."""
@expect('D404: First word of the docstring should not be "This"')
def starts_with_space_then_this():
""" This is a docstring that starts with a space.""" # noqa: D210
|
Blah
|
python
|
streamlit__streamlit
|
lib/streamlit/runtime/scriptrunner_utils/script_requests.py
|
{
"start": 5876,
"end": 12776
}
|
class ____:
"""An interface for communicating with a ScriptRunner. Thread-safe.
AppSession makes requests of a ScriptRunner through this class, and
ScriptRunner handles those requests.
"""
def __init__(self) -> None:
self._lock = threading.Lock()
self._state = ScriptRequestType.CONTINUE
self._rerun_data = RerunData()
def request_stop(self) -> None:
"""Request that the ScriptRunner stop running. A stopped ScriptRunner
can't be used anymore. STOP requests succeed unconditionally.
"""
with self._lock:
self._state = ScriptRequestType.STOP
def request_rerun(self, new_data: RerunData) -> bool:
"""Request that the ScriptRunner rerun its script.
If the ScriptRunner has been stopped, this request can't be honored:
return False.
Otherwise, record the request and return True. The ScriptRunner will
handle the rerun request as soon as it reaches an interrupt point.
"""
with self._lock:
if self._state == ScriptRequestType.STOP:
# We can't rerun after being stopped.
return False
if self._state == ScriptRequestType.CONTINUE:
# The script is currently running, and we haven't received a request to
# rerun it as of yet. We can handle a rerun request unconditionally so
# just change self._state and set self._rerun_data.
self._state = ScriptRequestType.RERUN
# Convert from a single fragment_id into fragment_id_queue.
if new_data.fragment_id:
new_data = replace(
new_data,
fragment_id=None,
fragment_id_queue=[new_data.fragment_id],
)
self._rerun_data = new_data
return True
if self._state == ScriptRequestType.RERUN:
# We already have an existing Rerun request, so we can coalesce the new
# rerun request into the existing one.
coalesced_states = _coalesce_widget_states(
self._rerun_data.widget_states, new_data.widget_states
)
if new_data.fragment_id:
# This RERUN request corresponds to a new fragment run. We append
# the new fragment ID to the end of the current fragment_id_queue if
# it isn't already contained in it.
fragment_id_queue = [*self._rerun_data.fragment_id_queue]
if new_data.fragment_id not in fragment_id_queue:
fragment_id_queue.append(new_data.fragment_id)
elif new_data.fragment_id_queue:
# new_data contains a new fragment_id_queue, so we just use it.
fragment_id_queue = new_data.fragment_id_queue
else:
# Otherwise, this is a request to rerun the full script, so we want
# to clear out any fragments we have queued to run since they'll all
# be run with the full script anyway.
fragment_id_queue = []
self._rerun_data = RerunData(
query_string=new_data.query_string,
widget_states=coalesced_states,
page_script_hash=new_data.page_script_hash,
page_name=new_data.page_name,
fragment_id_queue=fragment_id_queue,
cached_message_hashes=new_data.cached_message_hashes,
is_fragment_scoped_rerun=new_data.is_fragment_scoped_rerun,
is_auto_rerun=new_data.is_auto_rerun,
context_info=new_data.context_info,
)
return True
# We'll never get here
raise RuntimeError(f"Unrecognized ScriptRunnerState: {self._state}")
def on_scriptrunner_yield(self) -> ScriptRequest | None:
"""Called by the ScriptRunner when it's at a yield point.
If we have no request or a RERUN request corresponding to one or more fragments
(that is not a fragment-scoped rerun), return None.
If we have a (full script or fragment-scoped) RERUN request, return the request
and set our internal state to CONTINUE.
If we have a STOP request, return the request and remain stopped.
"""
if self._state == ScriptRequestType.CONTINUE or (
self._state == ScriptRequestType.RERUN
and _fragment_run_should_not_preempt_script(
self._rerun_data.fragment_id_queue,
self._rerun_data.is_fragment_scoped_rerun,
)
):
# We avoid taking the lock in the common cases described above. If a STOP or
# preempting RERUN request is received after we've taken this code path, it
# will be handled at the next `on_scriptrunner_yield`, or when
# `on_scriptrunner_ready` is called.
return None
with self._lock:
if self._state == ScriptRequestType.RERUN:
# We already made this check in the fast-path above but need to do so
# again in case our state changed while we were waiting on the lock.
if _fragment_run_should_not_preempt_script(
self._rerun_data.fragment_id_queue,
self._rerun_data.is_fragment_scoped_rerun,
):
return None
self._state = ScriptRequestType.CONTINUE
return ScriptRequest(ScriptRequestType.RERUN, self._rerun_data)
if self._state != ScriptRequestType.STOP:
raise RuntimeError(
f"Unrecognized ScriptRunnerState: {self._state}. This should never happen."
)
return ScriptRequest(ScriptRequestType.STOP)
def on_scriptrunner_ready(self) -> ScriptRequest:
"""Called by the ScriptRunner when it's about to run its script for
the first time, and also after its script has successfully completed.
If we have a RERUN request, return the request and set
our internal state to CONTINUE.
If we have a STOP request or no request, set our internal state
to STOP.
"""
with self._lock:
if self._state == ScriptRequestType.RERUN:
self._state = ScriptRequestType.CONTINUE
return ScriptRequest(ScriptRequestType.RERUN, self._rerun_data)
# If we don't have a rerun request, unconditionally change our
# state to STOP.
self._state = ScriptRequestType.STOP
return ScriptRequest(ScriptRequestType.STOP)
|
ScriptRequests
|
python
|
Farama-Foundation__Gymnasium
|
tests/vector/testing_utils.py
|
{
"start": 3103,
"end": 4698
}
|
class ____(gym.Env):
"""An environment with custom spaces for observation and action spaces."""
def __init__(self):
"""Initialise the environment."""
super().__init__()
self.observation_space = CustomSpace()
self.action_space = CustomSpace()
def reset(self, *, seed: int | None = None, options: dict | None = None):
"""Resets the environment."""
super().reset(seed=seed)
return "reset", {}
def step(self, action):
"""Steps through the environment."""
observation = f"step({action:s})"
reward, terminated, truncated = 0.0, False, False
return observation, reward, terminated, truncated, {}
def make_env(env_name, seed, **kwargs):
"""Creates an environment."""
def _make():
env = gym.make(env_name, disable_env_checker=True, **kwargs)
env.action_space.seed(seed)
env.reset(seed=seed)
return env
return _make
def make_slow_env(slow_reset, seed):
"""Creates an environment with slow reset."""
def _make():
env = SlowEnv(slow_reset=slow_reset)
env.reset(seed=seed)
return env
return _make
def make_custom_space_env(seed):
"""Creates a custom space environment."""
def _make():
env = CustomSpaceEnv()
env.reset(seed=seed)
return env
return _make
def assert_rng_equal(rng_1: np.random.Generator, rng_2: np.random.Generator):
"""Tests whether two random number generators are equal."""
assert rng_1.bit_generator.state == rng_2.bit_generator.state
|
CustomSpaceEnv
|
python
|
great-expectations__great_expectations
|
docs/sphinx_api_docs_source/public_api_report.py
|
{
"start": 3886,
"end": 8049
}
|
class ____:
"""Parse examples from docs to find classes, methods and functions used."""
def __init__(self, file_contents: Set[FileContents]) -> None:
self.file_contents = file_contents
def get_names_from_usage_in_docs_examples(self) -> Set[str]:
"""Get names in docs examples of classes, methods and functions used.
Usages are retrieved from imports and function / method calls.
Returns:
Names of classes, methods and functions as a set of strings.
"""
all_usages = set()
for file_contents in self.file_contents:
file_usages = self._get_names_of_all_usages_in_file(
file_contents=file_contents
)
all_usages |= file_usages
return all_usages
def _get_names_of_all_usages_in_file(self, file_contents: FileContents) -> Set[str]:
"""Retrieve the names of all class, method + functions used in file_contents."""
tree = ast.parse(file_contents.contents)
function_calls = self._get_all_function_calls(tree=tree)
function_names = self._get_non_private_function_names(calls=function_calls)
logger.debug(f"function_names: {function_names}")
gx_imports = self._list_all_gx_imports(tree=tree)
import_names = self._get_non_private_gx_import_names(imports=gx_imports)
logger.debug(f"import_names: {import_names}")
pattern = re.compile(r"class_name: (\w+)")
matches = re.finditer(pattern, file_contents.contents)
yaml_names = {m.group(1) for m in matches}
return function_names | import_names | yaml_names
def _list_all_gx_imports(
self, tree: ast.AST
) -> List[Union[ast.Import, ast.ImportFrom]]:
"""Get all the GX related imports in an ast tree."""
imports: List[Union[ast.Import, ast.ImportFrom]] = []
for node in ast.walk(tree):
node_is_imported_from_gx = isinstance(
node, ast.ImportFrom
) and node.module.startswith( # type: ignore[union-attr]
"great_expectations"
)
node_is_gx_import = isinstance(node, ast.Import) and any(
n.name.startswith("great_expectations") for n in node.names
)
if node_is_imported_from_gx:
cast("ast.ImportFrom", node)
imports.append(node) # type: ignore[arg-type]
elif node_is_gx_import:
cast("ast.Import", node)
imports.append(node) # type: ignore[arg-type]
return imports
def _get_non_private_gx_import_names(
self, imports: List[Union[ast.Import, ast.ImportFrom]]
) -> Set[str]:
"""From ast trees, get names of all non private GX related imports."""
names = []
for import_ in imports:
if not isinstance(import_, (ast.Import, ast.ImportFrom)):
raise TypeError( # noqa: TRY003
f"`imports` should only contain ast.Import, ast.ImportFrom types, you provided {type(import_)}"
)
# Generally there is only 1 alias,
# but we add all names if there are multiple aliases to be safe.
names.extend([n.name for n in import_.names if not n.name.startswith("_")])
return set(names)
def _get_all_function_calls(self, tree: ast.AST) -> List[ast.Call]:
"""Get all the function calls from an ast tree."""
calls = []
for node in ast.walk(tree):
if isinstance(node, ast.Call):
calls.append(node)
return calls
def _get_non_private_function_names(self, calls: List[ast.Call]) -> Set[str]:
"""Get function names that are not private from ast.Call objects."""
names = []
for call in calls:
name = None
if isinstance(call.func, ast.Attribute):
name = call.func.attr
elif isinstance(call.func, ast.Name):
name = call.func.id
if name and not name.startswith("_"):
names.append(name)
return set(names)
|
DocsExampleParser
|
python
|
PrefectHQ__prefect
|
src/prefect/settings/models/cloud.py
|
{
"start": 719,
"end": 2171
}
|
class ____(PrefectBaseSettings):
"""
Settings for interacting with Prefect Cloud
"""
model_config: ClassVar[SettingsConfigDict] = build_settings_config(("cloud",))
api_url: str = Field(
default="https://api.prefect.cloud/api",
description="API URL for Prefect Cloud. Used for authentication with Prefect Cloud.",
)
enable_orchestration_telemetry: bool = Field(
default=True,
description="Whether or not to enable orchestration telemetry.",
)
max_log_size: int = Field(
default=25_000,
description="Maximum size in characters for a single log when sending logs to Prefect Cloud.",
)
ui_url: Optional[str] = Field(
default=None,
description="The URL of the Prefect Cloud UI. If not set, the client will attempt to infer it.",
)
@model_validator(mode="after")
def post_hoc_settings(self) -> Self:
"""refactor on resolution of https://github.com/pydantic/pydantic/issues/9789
we should not be modifying __pydantic_fields_set__ directly, but until we can
define dependencies between defaults in a first-class way, we need clean up
post-hoc default assignments to keep set/unset fields correct after instantiation.
"""
if self.ui_url is None:
self.ui_url = default_cloud_ui_url(self)
self.__pydantic_fields_set__.remove("ui_url")
return self
|
CloudSettings
|
python
|
doocs__leetcode
|
solution/1800-1899/1814.Count Nice Pairs in an Array/Solution.py
|
{
"start": 0,
"end": 349
}
|
class ____:
def countNicePairs(self, nums: List[int]) -> int:
def rev(x):
y = 0
while x:
y = y * 10 + x % 10
x //= 10
return y
cnt = Counter(x - rev(x) for x in nums)
mod = 10**9 + 7
return sum(v * (v - 1) // 2 for v in cnt.values()) % mod
|
Solution
|
python
|
pypa__pip
|
src/pip/_vendor/urllib3/request.py
|
{
"start": 213,
"end": 6691
}
|
class ____(object):
"""
Convenience mixin for classes who implement a :meth:`urlopen` method, such
as :class:`urllib3.HTTPConnectionPool` and
:class:`urllib3.PoolManager`.
Provides behavior for making common types of HTTP request methods and
decides which type of request field encoding to use.
Specifically,
:meth:`.request_encode_url` is for sending requests whose fields are
encoded in the URL (such as GET, HEAD, DELETE).
:meth:`.request_encode_body` is for sending requests whose fields are
encoded in the *body* of the request using multipart or www-form-urlencoded
(such as for POST, PUT, PATCH).
:meth:`.request` is for making any kind of request, it will look up the
appropriate encoding format and use one of the above two methods to make
the request.
Initializer parameters:
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
"""
_encode_url_methods = {"DELETE", "GET", "HEAD", "OPTIONS"}
def __init__(self, headers=None):
self.headers = headers or {}
def urlopen(
self,
method,
url,
body=None,
headers=None,
encode_multipart=True,
multipart_boundary=None,
**kw
): # Abstract
raise NotImplementedError(
"Classes extending RequestMethods must implement "
"their own ``urlopen`` method."
)
def request(self, method, url, fields=None, headers=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the appropriate encoding of
``fields`` based on the ``method`` used.
This is a convenience method that requires the least amount of manual
effort. It can be used in most situations, while still having the
option to drop down to more specific methods when necessary, such as
:meth:`request_encode_url`, :meth:`request_encode_body`,
or even the lowest level :meth:`urlopen`.
"""
method = method.upper()
urlopen_kw["request_url"] = url
if method in self._encode_url_methods:
return self.request_encode_url(
method, url, fields=fields, headers=headers, **urlopen_kw
)
else:
return self.request_encode_body(
method, url, fields=fields, headers=headers, **urlopen_kw
)
def request_encode_url(self, method, url, fields=None, headers=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the url. This is useful for request methods like GET, HEAD, DELETE, etc.
"""
if headers is None:
headers = self.headers
extra_kw = {"headers": headers}
extra_kw.update(urlopen_kw)
if fields:
url += "?" + urlencode(fields)
return self.urlopen(method, url, **extra_kw)
def request_encode_body(
self,
method,
url,
fields=None,
headers=None,
encode_multipart=True,
multipart_boundary=None,
**urlopen_kw
):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the body. This is useful for request methods like POST, PUT, PATCH, etc.
When ``encode_multipart=True`` (default), then
:func:`urllib3.encode_multipart_formdata` is used to encode
the payload with the appropriate content type. Otherwise
:func:`urllib.parse.urlencode` is used with the
'application/x-www-form-urlencoded' content type.
Multipart encoding must be used when posting files, and it's reasonably
safe to use it in other times too. However, it may break request
signing, such as with OAuth.
Supports an optional ``fields`` parameter of key/value strings AND
key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
the MIME type is optional. For example::
fields = {
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(),
'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
}
When uploading a file, providing a filename (the first parameter of the
tuple) is optional but recommended to best mimic behavior of browsers.
Note that if ``headers`` are supplied, the 'Content-Type' header will
be overwritten because it depends on the dynamic random boundary string
which is used to compose the body of the request. The random boundary
string can be explicitly set with the ``multipart_boundary`` parameter.
"""
if headers is None:
headers = self.headers
extra_kw = {"headers": {}}
if fields:
if "body" in urlopen_kw:
raise TypeError(
"request got values for both 'fields' and 'body', can only specify one."
)
if encode_multipart:
body, content_type = encode_multipart_formdata(
fields, boundary=multipart_boundary
)
else:
body, content_type = (
urlencode(fields),
"application/x-www-form-urlencoded",
)
extra_kw["body"] = body
extra_kw["headers"] = {"Content-Type": content_type}
extra_kw["headers"].update(headers)
extra_kw.update(urlopen_kw)
return self.urlopen(method, url, **extra_kw)
if not six.PY2:
class RequestModule(sys.modules[__name__].__class__):
def __call__(self, *args, **kwargs):
"""
If user tries to call this module directly urllib3 v2.x style raise an error to the user
suggesting they may need urllib3 v2
"""
raise TypeError(
"'module' object is not callable\n"
"urllib3.request() method is not supported in this release, "
"upgrade to urllib3 v2 to use it\n"
"see https://urllib3.readthedocs.io/en/stable/v2-migration-guide.html"
)
sys.modules[__name__].__class__ = RequestModule
|
RequestMethods
|
python
|
readthedocs__readthedocs.org
|
readthedocs/core/db.py
|
{
"start": 158,
"end": 1011
}
|
class ____:
"""
Router to map Django applications to a specific database.
:py:attr:`apps_to_db` is used to map an application to a database,
if an application isn't listed here, it will use the ``default`` database.
"""
def __init__(self):
self.apps_to_db = defaultdict(lambda: "default")
self.apps_to_db.update({"telemetry": "telemetry"})
def db_for_read(self, model, **hints):
return self.apps_to_db[model._meta.app_label]
def db_for_write(self, model, **hints):
return self.apps_to_db[model._meta.app_label]
def allow_relation(self, obj1, obj2, **hints):
return self.apps_to_db[obj1._meta.app_label] == self.apps_to_db[obj2._meta.app_label]
def allow_migrate(self, db, app_label, model_name=None, **hints):
return self.apps_to_db[app_label] == db
|
MapAppsRouter
|
python
|
django-haystack__django-haystack
|
haystack/views.py
|
{
"start": 4355,
"end": 6902
}
|
class ____(SearchView):
def __init__(self, *args, **kwargs):
# Needed to switch out the default form class.
if kwargs.get("form_class") is None:
kwargs["form_class"] = FacetedSearchForm
super().__init__(*args, **kwargs)
def build_form(self, form_kwargs=None):
if form_kwargs is None:
form_kwargs = {}
# This way the form can always receive a list containing zero or more
# facet expressions:
form_kwargs["selected_facets"] = self.request.GET.getlist("selected_facets")
return super().build_form(form_kwargs)
def extra_context(self):
extra = super().extra_context()
extra["request"] = self.request
extra["facets"] = self.results.facet_counts()
return extra
def basic_search(
request,
template="search/search.html",
load_all=True,
form_class=ModelSearchForm,
searchqueryset=None,
extra_context=None,
results_per_page=None,
):
"""
A more traditional view that also demonstrate an alternative
way to use Haystack.
Useful as an example of for basing heavily custom views off of.
Also has the benefit of thread-safety, which the ``SearchView`` class may
not be.
Template:: ``search/search.html``
Context::
* form
An instance of the ``form_class``. (default: ``ModelSearchForm``)
* page
The current page of search results.
* paginator
A paginator instance for the results.
* query
The query received by the form.
"""
query = ""
results = EmptySearchQuerySet()
if request.GET.get("q"):
form = form_class(request.GET, searchqueryset=searchqueryset, load_all=load_all)
if form.is_valid():
query = form.cleaned_data["q"]
results = form.search()
else:
form = form_class(searchqueryset=searchqueryset, load_all=load_all)
paginator = Paginator(results, results_per_page or RESULTS_PER_PAGE)
try:
page = paginator.page(int(request.GET.get("page", 1)))
except InvalidPage:
raise Http404("No such page of results!")
context = {
"form": form,
"page": page,
"paginator": paginator,
"query": query,
"suggestion": None,
}
if results.query.backend.include_spelling:
context["suggestion"] = form.get_suggestion()
if extra_context:
context.update(extra_context)
return render(request, template, context)
|
FacetedSearchView
|
python
|
pytorch__pytorch
|
torch/testing/_internal/common_quantization.py
|
{
"start": 74314,
"end": 74833
}
|
class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc1 = torch.nn.Conv2d(3, 5, 3).to(dtype=torch.float)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Conv2d(5, 5, 1).to(dtype=torch.float)
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
return x
def get_example_inputs(self) -> tuple[Any, ...]:
return (torch.rand(1, 3, 5, 5),)
# TODO: self.fc should be self.conv
|
ConvReluConvModel
|
python
|
ray-project__ray
|
python/ray/llm/_internal/common/utils/cloud_utils.py
|
{
"start": 1008,
"end": 1093
}
|
class ____(BaseModelExtended):
bucket_uri: str
destination_path: str
|
ExtraFiles
|
python
|
tiangolo__fastapi
|
docs_src/response_model/tutorial001_py39.py
|
{
"start": 109,
"end": 556
}
|
class ____(BaseModel):
name: str
description: Union[str, None] = None
price: float
tax: Union[float, None] = None
tags: list[str] = []
@app.post("/items/", response_model=Item)
async def create_item(item: Item) -> Any:
return item
@app.get("/items/", response_model=list[Item])
async def read_items() -> Any:
return [
{"name": "Portal Gun", "price": 42.0},
{"name": "Plumbus", "price": 32.0},
]
|
Item
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typeNarrowingIsinstance6.py
|
{
"start": 1731,
"end": 1980
}
|
class ____(ParentD[int]): ...
def func7(a: ParentD[_T1]) -> _T1 | None:
if isinstance(a, ChildD1):
reveal_type(a, expected_text="ChildD1[_T1@func7]")
elif isinstance(a, ChildD2):
reveal_type(a, expected_text="ChildD2")
|
ChildD2
|
python
|
great-expectations__great_expectations
|
tests/datasource/fluent/test_sql_datasources.py
|
{
"start": 3459,
"end": 14115
}
|
class ____:
def test_kwargs_passed_to_create_engine(
self,
create_engine_spy: mock.MagicMock, # noqa: TID251 # FIXME CoP
monkeypatch: pytest.MonkeyPatch,
ephemeral_context_with_defaults: EphemeralDataContext,
ds_kwargs: dict,
filter_gx_datasource_warnings: None,
):
monkeypatch.setenv("MY_CONN_STR", "sqlite:///")
context = ephemeral_context_with_defaults
ds = context.data_sources.add_or_update_sql(name="my_datasource", **ds_kwargs)
print(ds)
ds.test_connection()
create_engine_spy.assert_called_once_with(
"sqlite:///",
**{
**ds.dict(include={"kwargs"}, exclude_unset=False)["kwargs"],
**ds_kwargs.get("kwargs", {}),
},
)
def test_ds_config_passed_to_gx_sqlalchemy_execution_engine(
self,
gx_sqlalchemy_execution_engine_spy: mock.MagicMock, # noqa: TID251 # FIXME CoP
monkeypatch: pytest.MonkeyPatch,
ephemeral_context_with_defaults: EphemeralDataContext,
ds_kwargs: dict,
filter_gx_datasource_warnings: None,
):
monkeypatch.setenv("MY_CONN_STR", "sqlite:///")
context = ephemeral_context_with_defaults
ds = context.data_sources.add_or_update_sql(name="my_datasource", **ds_kwargs)
print(ds)
gx_execution_engine: SqlAlchemyExecutionEngine = ds.get_execution_engine()
print(f"{gx_execution_engine=}")
expected_args: dict[str, Any] = {
# kwargs that we expect are passed to SqlAlchemyExecutionEngine
# including datasource field default values
**ds.dict(
exclude_unset=False,
exclude={"kwargs", *ds_kwargs.keys(), *ds._get_exec_engine_excludes()},
),
**{k: v for k, v in ds_kwargs.items() if k not in ["kwargs"]},
**ds_kwargs.get("kwargs", {}),
# config substitution should have been performed
**ds.dict(include={"connection_string"}, config_provider=ds._config_provider),
}
assert "create_temp_table" in expected_args
print(f"\nExpected SqlAlchemyExecutionEngine arguments:\n{pf(expected_args)}")
gx_sqlalchemy_execution_engine_spy.assert_called_once_with(**expected_args)
@pytest.mark.unit
def test_table_quoted_name_type_does_not_exist(
mocker,
):
"""
DBMS entity names (table, column, etc.) must adhere to correct case insensitivity standards. All upper case is
standard for Oracle, DB2, and Snowflake, while all lowercase is standard for SQLAlchemy; hence, proper conversion to
quoted names must occur. This test ensures that mechanism for detection of non-existent table_nam" works correctly.
""" # noqa: E501 # FIXME CoP
table_names_in_dbms_schema: list[str] = [
"table_name_0",
"table_name_1",
"table_name_2",
"table_name_3",
]
with mock.patch(
"great_expectations.datasource.fluent.sql_datasource.TableAsset.datasource",
new_callable=mock.PropertyMock,
return_value=SQLDatasource(
name="my_snowflake_datasource",
connection_string="snowflake://<user_login_name>:<password>@<account_identifier>/<database_name>/<schema_name>?warehouse=<warehouse_name>&role=<role_name>",
),
):
table_asset = TableAsset(
name="my_table_asset",
table_name="nonexistent_table_name",
schema_name="my_schema",
)
assert table_asset.table_name not in table_names_in_dbms_schema
@pytest.mark.unit
def test_table_quoted_name_type_all_upper_case_normalizion_is_noop():
"""
DBMS entity names (table, column, etc.) must adhere to correct case insensitivity standards. All upper case is
standard for Oracle, DB2, and Snowflake, while all lowercase is standard for SQLAlchemy; hence, proper conversion to
quoted names must occur. This test ensures that all upper case entity usage does not undergo any conversion.
""" # noqa: E501 # FIXME CoP
table_names_in_dbms_schema: list[str] = [
"ACTORS",
"ARTISTS",
"ATHLETES",
"BUSINESS_PEOPLE",
"HEALTHCARE_WORKERS",
"ENGINEERS",
"LAWYERS",
"MUSICIANS",
"SCIENTISTS",
"LITERARY_PROFESSIONALS",
]
asset_name: str
table_name: str
with mock.patch(
"great_expectations.datasource.fluent.sql_datasource.TableAsset.datasource",
new_callable=mock.PropertyMock,
return_value=SQLDatasource(
name="my_snowflake_datasource",
connection_string="snowflake://<user_login_name>:<password>@<account_identifier>/<database_name>/<schema_name>?warehouse=<warehouse_name>&role=<role_name>",
),
):
for table_name in table_names_in_dbms_schema:
asset_name = f"{table_name}_asset"
table_asset = TableAsset(
name=asset_name,
table_name=table_name,
schema_name="my_schema",
)
assert str(table_asset.table_name) == table_name
assert str(table_asset.table_name.casefold()) != table_name
assert isinstance(table_asset.table_name, sqlalchemy.quoted_name)
assert table_asset.table_name in table_names_in_dbms_schema
@pytest.mark.unit
def test_table_quoted_name_type_all_lower_case_normalizion_full():
"""
DBMS entity names (table, column, etc.) must adhere to correct case insensitivity standards. All upper case is
standard for Oracle, DB2, and Snowflake, while all lowercase is standard for SQLAlchemy; hence, proper conversion to
quoted names must occur. This test ensures that all lower case entity usage undergo conversion to quoted literals.
""" # noqa: E501 # FIXME CoP
table_names_in_dbms_schema: list[str] = [
"actors",
"artists",
"athletes",
"business_people",
"healthcare_workers",
"engineers",
"lawyers",
"musicians",
"scientists",
"literary_professionals",
]
name: str
quoted_table_names: list[sqlalchemy.quoted_name] = [
sqlalchemy.quoted_name(value="actors", quote=True),
sqlalchemy.quoted_name(value="artists", quote=True),
sqlalchemy.quoted_name(value="athletes", quote=True),
sqlalchemy.quoted_name(value="business_people", quote=True),
sqlalchemy.quoted_name(value="healthcare_workers", quote=True),
sqlalchemy.quoted_name(value="engineers", quote=True),
sqlalchemy.quoted_name(value="lawyers", quote=True),
sqlalchemy.quoted_name(value="musicians", quote=True),
sqlalchemy.quoted_name(value="scientists", quote=True),
sqlalchemy.quoted_name(value="literary_professionals", quote=True),
]
asset_name: str
table_name: str
with mock.patch(
"great_expectations.datasource.fluent.sql_datasource.TableAsset.datasource",
new_callable=mock.PropertyMock,
return_value=SQLDatasource(
name="my_snowflake_datasource",
connection_string="snowflake://<user_login_name>:<password>@<account_identifier>/<database_name>/<schema_name>?warehouse=<warehouse_name>&role=<role_name>",
),
):
for table_name in table_names_in_dbms_schema:
asset_name = f"{table_name}_asset"
table_asset = TableAsset(
name=asset_name,
table_name=table_name,
schema_name="my_schema",
)
assert str(table_asset.table_name) == table_name
assert str(table_asset.table_name.casefold()) == table_name
assert isinstance(table_asset.table_name, sqlalchemy.quoted_name)
assert table_asset.table_name in table_names_in_dbms_schema
assert table_asset.table_name in quoted_table_names
@pytest.mark.big
@pytest.mark.parametrize(
["connection_string", "suggested_datasource_class"],
[
("gregshift://", None),
("sqlite:///", "SqliteDatasource"),
("snowflake+pyodbc://", "SnowflakeDatasource"),
("postgresql+psycopg2://bob:secret@localhost:5432/my_db", "PostgresDatasource"),
("${MY_PG_CONN_STR}", "PostgresDatasource"),
("databricks://", "DatabricksSQLDatasource"),
],
)
def test_specific_datasource_warnings(
create_engine_fake: None,
ephemeral_context_with_defaults: EphemeralDataContext,
monkeypatch: pytest.MonkeyPatch,
connection_string: str,
suggested_datasource_class: str | None,
):
"""
This test ensures that a warning is raised when a specific datasource class is suggested.
"""
context = ephemeral_context_with_defaults
monkeypatch.setenv("MY_PG_CONN_STR", "postgresql://bob:secret@localhost:5432/bobs_db")
if suggested_datasource_class:
with pytest.warns(GxDatasourceWarning, match=suggested_datasource_class):
context.data_sources.add_sql(name="my_datasource", connection_string=connection_string)
else:
with warnings.catch_warnings():
context.data_sources.add_sql(
name="my_datasource", connection_string=connection_string
).test_connection()
@pytest.mark.unit
@pytest.mark.parametrize(
["input_", "expected_output", "quote_characters"],
[
("my_schema", "my_schema", DEFAULT_INITIAL_QUOTE_CHARACTERS),
("MY_SCHEMA", "my_schema", DEFAULT_INITIAL_QUOTE_CHARACTERS),
("My_Schema", "my_schema", DEFAULT_INITIAL_QUOTE_CHARACTERS),
('"my_schema"', '"my_schema"', DEFAULT_INITIAL_QUOTE_CHARACTERS),
('"MY_SCHEMA"', '"MY_SCHEMA"', DEFAULT_INITIAL_QUOTE_CHARACTERS),
('"My_Schema"', '"My_Schema"', DEFAULT_INITIAL_QUOTE_CHARACTERS),
("'my_schema'", "'my_schema'", DEFAULT_INITIAL_QUOTE_CHARACTERS),
("'MY_SCHEMA'", "'MY_SCHEMA'", DEFAULT_INITIAL_QUOTE_CHARACTERS),
("'My_Schema'", "'My_Schema'", DEFAULT_INITIAL_QUOTE_CHARACTERS),
(None, None, DEFAULT_INITIAL_QUOTE_CHARACTERS),
("", "", DEFAULT_INITIAL_QUOTE_CHARACTERS),
("`My_Schema`", "`My_Schema`", DEFAULT_INITIAL_QUOTE_CHARACTERS),
("'My_Schema'", "'my_schema'", ("`")),
("[My_Schema]", "[My_Schema]", DEFAULT_INITIAL_QUOTE_CHARACTERS),
],
ids=lambda x: str(x),
)
def test_to_lower_if_not_quoted(
input_: str | None, expected_output: str | None, quote_characters: tuple[str, ...]
):
assert to_lower_if_not_quoted(input_, quote_characters=quote_characters) == expected_output
@pytest.mark.unit
|
TestConfigPasstrough
|
python
|
facebook__pyre-check
|
source/interprocedural_analyses/taint/test/integration/combinatory_ports.py
|
{
"start": 671,
"end": 807
}
|
class ____(Base):
def __init__(self, base: Base) -> None:
self.a: Base = base
def method(self):
self.a.method()
|
A
|
python
|
fastapi__sqlmodel
|
docs_src/tutorial/many_to_many/tutorial002.py
|
{
"start": 616,
"end": 3203
}
|
class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
teams: List[Team] = Relationship(back_populates="heroes", link_model=HeroTeamLink)
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
with Session(engine) as session:
team_preventers = Team(name="Preventers", headquarters="Sharp Tower")
team_z_force = Team(name="Z-Force", headquarters="Sister Margaret's Bar")
hero_deadpond = Hero(
name="Deadpond",
secret_name="Dive Wilson",
teams=[team_z_force, team_preventers],
)
hero_rusty_man = Hero(
name="Rusty-Man",
secret_name="Tommy Sharp",
age=48,
teams=[team_preventers],
)
hero_spider_boy = Hero(
name="Spider-Boy", secret_name="Pedro Parqueador", teams=[team_preventers]
)
session.add(hero_deadpond)
session.add(hero_rusty_man)
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_deadpond)
session.refresh(hero_rusty_man)
session.refresh(hero_spider_boy)
print("Deadpond:", hero_deadpond)
print("Deadpond teams:", hero_deadpond.teams)
print("Rusty-Man:", hero_rusty_man)
print("Rusty-Man Teams:", hero_rusty_man.teams)
print("Spider-Boy:", hero_spider_boy)
print("Spider-Boy Teams:", hero_spider_boy.teams)
def update_heroes():
with Session(engine) as session:
hero_spider_boy = session.exec(
select(Hero).where(Hero.name == "Spider-Boy")
).one()
team_z_force = session.exec(select(Team).where(Team.name == "Z-Force")).one()
team_z_force.heroes.append(hero_spider_boy)
session.add(team_z_force)
session.commit()
print("Updated Spider-Boy's Teams:", hero_spider_boy.teams)
print("Z-Force heroes:", team_z_force.heroes)
hero_spider_boy.teams.remove(team_z_force)
session.add(team_z_force)
session.commit()
print("Reverted Z-Force's heroes:", team_z_force.heroes)
print("Reverted Spider-Boy's teams:", hero_spider_boy.teams)
def main():
create_db_and_tables()
create_heroes()
update_heroes()
if __name__ == "__main__":
main()
|
Hero
|
python
|
networkx__networkx
|
networkx/algorithms/assortativity/tests/test_mixing.py
|
{
"start": 144,
"end": 1134
}
|
class ____(BaseTestDegreeMixing):
def test_degree_mixing_dict_undirected(self):
d = nx.degree_mixing_dict(self.P4)
d_result = {1: {2: 2}, 2: {1: 2, 2: 2}}
assert d == d_result
def test_degree_mixing_dict_undirected_normalized(self):
d = nx.degree_mixing_dict(self.P4, normalized=True)
d_result = {1: {2: 1.0 / 3}, 2: {1: 1.0 / 3, 2: 1.0 / 3}}
assert d == d_result
def test_degree_mixing_dict_directed(self):
d = nx.degree_mixing_dict(self.D)
d_result = {1: {3: 2}, 2: {1: 1, 3: 1}, 3: {}}
assert d == d_result
def test_degree_mixing_dict_multigraph(self):
d = nx.degree_mixing_dict(self.M)
d_result = {1: {2: 1}, 2: {1: 1, 3: 3}, 3: {2: 3}}
assert d == d_result
def test_degree_mixing_dict_weighted(self):
d = nx.degree_mixing_dict(self.W, weight="weight")
d_result = {0.5: {1.5: 1}, 1.5: {1.5: 6, 0.5: 1}}
assert d == d_result
|
TestDegreeMixingDict
|
python
|
pennersr__django-allauth
|
allauth/headless/tokens/strategies/sessions.py
|
{
"start": 239,
"end": 817
}
|
class ____(AbstractTokenStrategy):
def create_session_token(self, request: HttpRequest) -> str:
if not request.session.session_key:
request.session.save()
key = request.session.session_key
# We did save
assert isinstance(key, str) # nosec
return key
def lookup_session(self, session_token: str) -> typing.Optional[SessionBase]:
session_key = session_token
if sessionkit.session_store().exists(session_key):
return sessionkit.session_store(session_key)
return None
|
SessionTokenStrategy
|
python
|
Pylons__pyramid
|
src/pyramid/httpexceptions.py
|
{
"start": 36083,
"end": 36544
}
|
class ____(HTTPServerError):
"""
subclass of :class:`~HTTPServerError`
This indicates that the server is currently unable to handle the
request due to a temporary overloading or maintenance of the server.
code: 503, title: Service Unavailable
"""
code = 503
title = 'Service Unavailable'
explanation = (
'The server is currently unavailable. '
'Please try again at a later time.'
)
|
HTTPServiceUnavailable
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-zendesk-talk/components.py
|
{
"start": 1313,
"end": 2041
}
|
class ____(DeclarativeAuthenticator):
config: Mapping[str, Any]
legacy_basic_auth: BasicHttpAuthenticator
basic_auth: BasicHttpAuthenticator
oauth: BearerAuthenticator
def __new__(cls, legacy_basic_auth, basic_auth, oauth, config, *args, **kwargs):
credentials = config.get("credentials", {})
if config.get("access_token", {}) and config.get("email", {}):
return legacy_basic_auth
elif credentials["auth_type"] == "api_token":
return basic_auth
elif credentials["auth_type"] == "oauth2.0":
return oauth
else:
raise Exception(f"Missing valid authenticator for auth_type: {credentials['auth_type']}")
|
ZendeskTalkAuthenticator
|
python
|
miyuchina__mistletoe
|
test/test_latex_renderer.py
|
{
"start": 5154,
"end": 5454
}
|
class ____(TestCase):
def test_html_entity(self):
self.assertIn('hello \\& goodbye', markdown('hello & goodbye', LaTeXRenderer))
def test_html_entity_in_link_target(self):
self.assertIn('\\href{foo}{hello}', markdown('[hello](foo)', LaTeXRenderer))
|
TestHtmlEntity
|
python
|
great-expectations__great_expectations
|
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_daytime.py
|
{
"start": 2247,
"end": 5362
}
|
class ____(ColumnMapExpectation):
"""Expect the provided timestamp is daytime at the given GPS coordinate (latitude, longitude)."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"all_daytime": [
"2022-04-01 12:00:00",
"2022-04-02 14:31:14",
"2022-04-03 09:01:12",
"2021-12-01 13:45:32",
"2021-11-02 12:01:01",
],
"some_other": [
"2022-04-01 12:00:00",
"2022-04-02 14:31:14",
"2022-04-03 09:01:12",
"2021-12-01 13:45:32",
"2021-11-02 21:01:01",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "all_daytime",
"lat": "47.458593",
"lon": "19.030024",
},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "some_other",
"lat": "47.458593",
"lon": "19.030024",
"mostly": 0.9,
},
"out": {
"success": False,
},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.daytime"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = (
"mostly",
"lat",
"lon",
)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": [
"hackathon-22",
"experimental",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@szecsip", # Don't forget to add your github handle here!
],
"requirements": ["ephem"],
}
success_keys = (
"lat",
"lon",
"mostly",
)
if __name__ == "__main__":
ExpectColumnValuesToBeDaytime().print_diagnostic_checklist()
|
ExpectColumnValuesToBeDaytime
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/operators/test_glacier.py
|
{
"start": 1428,
"end": 2291
}
|
class ____:
op_class: type[AwsBaseOperator]
default_op_kwargs: dict[str, Any]
def test_base_aws_op_attributes(self):
op = self.op_class(**self.default_op_kwargs)
assert op.hook.aws_conn_id == "aws_default"
assert op.hook._region_name is None
assert op.hook._verify is None
assert op.hook._config is None
op = self.op_class(
**self.default_op_kwargs,
aws_conn_id="aws-test-custom-conn",
region_name="eu-west-1",
verify=False,
botocore_config={"read_timeout": 42},
)
assert op.hook.aws_conn_id == "aws-test-custom-conn"
assert op.hook._region_name == "eu-west-1"
assert op.hook._verify is False
assert op.hook._config is not None
assert op.hook._config.read_timeout == 42
|
BaseGlacierOperatorsTests
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-shopify/source_shopify/streams/streams.py
|
{
"start": 9880,
"end": 9987
}
|
class ____(IncrementalShopifyGraphQlBulkStream):
bulk_query: InventoryItem = InventoryItem
|
InventoryItems
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_probe.py
|
{
"start": 383,
"end": 13723
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'_exec': 'V1ExecAction',
'failure_threshold': 'int',
'grpc': 'V1GRPCAction',
'http_get': 'V1HTTPGetAction',
'initial_delay_seconds': 'int',
'period_seconds': 'int',
'success_threshold': 'int',
'tcp_socket': 'V1TCPSocketAction',
'termination_grace_period_seconds': 'int',
'timeout_seconds': 'int'
}
attribute_map = {
'_exec': 'exec',
'failure_threshold': 'failureThreshold',
'grpc': 'grpc',
'http_get': 'httpGet',
'initial_delay_seconds': 'initialDelaySeconds',
'period_seconds': 'periodSeconds',
'success_threshold': 'successThreshold',
'tcp_socket': 'tcpSocket',
'termination_grace_period_seconds': 'terminationGracePeriodSeconds',
'timeout_seconds': 'timeoutSeconds'
}
def __init__(self, _exec=None, failure_threshold=None, grpc=None, http_get=None, initial_delay_seconds=None, period_seconds=None, success_threshold=None, tcp_socket=None, termination_grace_period_seconds=None, timeout_seconds=None, local_vars_configuration=None): # noqa: E501
"""V1Probe - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self.__exec = None
self._failure_threshold = None
self._grpc = None
self._http_get = None
self._initial_delay_seconds = None
self._period_seconds = None
self._success_threshold = None
self._tcp_socket = None
self._termination_grace_period_seconds = None
self._timeout_seconds = None
self.discriminator = None
if _exec is not None:
self._exec = _exec
if failure_threshold is not None:
self.failure_threshold = failure_threshold
if grpc is not None:
self.grpc = grpc
if http_get is not None:
self.http_get = http_get
if initial_delay_seconds is not None:
self.initial_delay_seconds = initial_delay_seconds
if period_seconds is not None:
self.period_seconds = period_seconds
if success_threshold is not None:
self.success_threshold = success_threshold
if tcp_socket is not None:
self.tcp_socket = tcp_socket
if termination_grace_period_seconds is not None:
self.termination_grace_period_seconds = termination_grace_period_seconds
if timeout_seconds is not None:
self.timeout_seconds = timeout_seconds
@property
def _exec(self):
"""Gets the _exec of this V1Probe. # noqa: E501
:return: The _exec of this V1Probe. # noqa: E501
:rtype: V1ExecAction
"""
return self.__exec
@_exec.setter
def _exec(self, _exec):
"""Sets the _exec of this V1Probe.
:param _exec: The _exec of this V1Probe. # noqa: E501
:type: V1ExecAction
"""
self.__exec = _exec
@property
def failure_threshold(self):
"""Gets the failure_threshold of this V1Probe. # noqa: E501
Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. # noqa: E501
:return: The failure_threshold of this V1Probe. # noqa: E501
:rtype: int
"""
return self._failure_threshold
@failure_threshold.setter
def failure_threshold(self, failure_threshold):
"""Sets the failure_threshold of this V1Probe.
Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. # noqa: E501
:param failure_threshold: The failure_threshold of this V1Probe. # noqa: E501
:type: int
"""
self._failure_threshold = failure_threshold
@property
def grpc(self):
"""Gets the grpc of this V1Probe. # noqa: E501
:return: The grpc of this V1Probe. # noqa: E501
:rtype: V1GRPCAction
"""
return self._grpc
@grpc.setter
def grpc(self, grpc):
"""Sets the grpc of this V1Probe.
:param grpc: The grpc of this V1Probe. # noqa: E501
:type: V1GRPCAction
"""
self._grpc = grpc
@property
def http_get(self):
"""Gets the http_get of this V1Probe. # noqa: E501
:return: The http_get of this V1Probe. # noqa: E501
:rtype: V1HTTPGetAction
"""
return self._http_get
@http_get.setter
def http_get(self, http_get):
"""Sets the http_get of this V1Probe.
:param http_get: The http_get of this V1Probe. # noqa: E501
:type: V1HTTPGetAction
"""
self._http_get = http_get
@property
def initial_delay_seconds(self):
"""Gets the initial_delay_seconds of this V1Probe. # noqa: E501
Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes # noqa: E501
:return: The initial_delay_seconds of this V1Probe. # noqa: E501
:rtype: int
"""
return self._initial_delay_seconds
@initial_delay_seconds.setter
def initial_delay_seconds(self, initial_delay_seconds):
"""Sets the initial_delay_seconds of this V1Probe.
Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes # noqa: E501
:param initial_delay_seconds: The initial_delay_seconds of this V1Probe. # noqa: E501
:type: int
"""
self._initial_delay_seconds = initial_delay_seconds
@property
def period_seconds(self):
"""Gets the period_seconds of this V1Probe. # noqa: E501
How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. # noqa: E501
:return: The period_seconds of this V1Probe. # noqa: E501
:rtype: int
"""
return self._period_seconds
@period_seconds.setter
def period_seconds(self, period_seconds):
"""Sets the period_seconds of this V1Probe.
How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. # noqa: E501
:param period_seconds: The period_seconds of this V1Probe. # noqa: E501
:type: int
"""
self._period_seconds = period_seconds
@property
def success_threshold(self):
"""Gets the success_threshold of this V1Probe. # noqa: E501
Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. # noqa: E501
:return: The success_threshold of this V1Probe. # noqa: E501
:rtype: int
"""
return self._success_threshold
@success_threshold.setter
def success_threshold(self, success_threshold):
"""Sets the success_threshold of this V1Probe.
Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. # noqa: E501
:param success_threshold: The success_threshold of this V1Probe. # noqa: E501
:type: int
"""
self._success_threshold = success_threshold
@property
def tcp_socket(self):
"""Gets the tcp_socket of this V1Probe. # noqa: E501
:return: The tcp_socket of this V1Probe. # noqa: E501
:rtype: V1TCPSocketAction
"""
return self._tcp_socket
@tcp_socket.setter
def tcp_socket(self, tcp_socket):
"""Sets the tcp_socket of this V1Probe.
:param tcp_socket: The tcp_socket of this V1Probe. # noqa: E501
:type: V1TCPSocketAction
"""
self._tcp_socket = tcp_socket
@property
def termination_grace_period_seconds(self):
"""Gets the termination_grace_period_seconds of this V1Probe. # noqa: E501
Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. # noqa: E501
:return: The termination_grace_period_seconds of this V1Probe. # noqa: E501
:rtype: int
"""
return self._termination_grace_period_seconds
@termination_grace_period_seconds.setter
def termination_grace_period_seconds(self, termination_grace_period_seconds):
"""Sets the termination_grace_period_seconds of this V1Probe.
Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. # noqa: E501
:param termination_grace_period_seconds: The termination_grace_period_seconds of this V1Probe. # noqa: E501
:type: int
"""
self._termination_grace_period_seconds = termination_grace_period_seconds
@property
def timeout_seconds(self):
"""Gets the timeout_seconds of this V1Probe. # noqa: E501
Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes # noqa: E501
:return: The timeout_seconds of this V1Probe. # noqa: E501
:rtype: int
"""
return self._timeout_seconds
@timeout_seconds.setter
def timeout_seconds(self, timeout_seconds):
"""Sets the timeout_seconds of this V1Probe.
Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes # noqa: E501
:param timeout_seconds: The timeout_seconds of this V1Probe. # noqa: E501
:type: int
"""
self._timeout_seconds = timeout_seconds
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Probe):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Probe):
return True
return self.to_dict() != other.to_dict()
|
V1Probe
|
python
|
requests__requests-oauthlib
|
tests/test_compliance_fixes.py
|
{
"start": 3447,
"end": 4583
}
|
class ____(TestCase):
def setUp(self):
mocker = requests_mock.Mocker()
mocker.post(
"https://login.mailchimp.com/oauth2/token",
json={"access_token": "mailchimp", "expires_in": 0, "scope": None},
)
mocker.start()
self.addCleanup(mocker.stop)
mailchimp = OAuth2Session("someclientid", redirect_uri="https://i.b")
self.session = mailchimp_compliance_fix(mailchimp)
def test_fetch_access_token(self):
token = self.session.fetch_token(
"https://login.mailchimp.com/oauth2/token",
client_secret="someclientsecret",
authorization_response="https://i.b/?code=hello",
)
# Times should be close
approx_expires_at = round(time.time()) + 3600
actual_expires_at = token.pop("expires_at")
self.assertAlmostEqual(actual_expires_at, approx_expires_at, places=2)
# Other token values exact
self.assertEqual(token, {"access_token": "mailchimp", "expires_in": 3600})
# And no scope at all
self.assertNotIn("scope", token)
|
MailChimpComplianceFixTest
|
python
|
keras-team__keras
|
keras/src/optimizers/adamw.py
|
{
"start": 171,
"end": 3785
}
|
class ____(adam.Adam):
"""Optimizer that implements the AdamW algorithm.
AdamW optimization is a stochastic gradient descent method that is based on
adaptive estimation of first-order and second-order moments with an added
method to decay weights per the techniques discussed in the paper,
'Decoupled Weight Decay Regularization' by
[Loshchilov, Hutter et al., 2019](https://arxiv.org/abs/1711.05101).
According to
[Kingma et al., 2014](http://arxiv.org/abs/1412.6980),
the underlying Adam method is "*computationally
efficient, has little memory requirement, invariant to diagonal rescaling of
gradients, and is well suited for problems that are large in terms of
data/parameters*".
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`.
beta_1: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 1st moment estimates.
Defaults to `0.9`.
beta_2: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 2nd moment estimates.
Defaults to `0.999`.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just
before Section 2.1), not the epsilon in Algorithm 1 of the paper.
Defaults to 1e-7.
amsgrad: Boolean. Whether to apply AMSGrad variant of this algorithm
from the paper "On the Convergence of Adam and beyond".
Defaults to `False`.
{{base_optimizer_keyword_args}}
References:
- [Loshchilov et al., 2019](https://arxiv.org/abs/1711.05101)
- [Kingma et al., 2014](http://arxiv.org/abs/1412.6980) for `adam`
- [Reddi et al., 2018](
https://openreview.net/pdf?id=ryQu7f-RZ) for `amsgrad`.
"""
def __init__(
self,
learning_rate=0.001,
weight_decay=0.004,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
amsgrad=False,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="adamw",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
amsgrad=amsgrad,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
**kwargs,
)
if self.weight_decay is None:
raise ValueError(
"Argument `weight_decay` must be a float. Received: "
"weight_decay=None"
)
AdamW.__doc__ = AdamW.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
|
AdamW
|
python
|
pydata__xarray
|
asv_bench/benchmarks/repr.py
|
{
"start": 62,
"end": 663
}
|
class ____:
def setup(self):
a = np.arange(0, 100)
data_vars = dict()
for i in a:
data_vars[f"long_variable_name_{i}"] = xr.DataArray(
name=f"long_variable_name_{i}",
data=np.arange(0, 20),
dims=[f"long_coord_name_{i}_x"],
coords={f"long_coord_name_{i}_x": np.arange(0, 20) * 2},
)
self.ds = xr.Dataset(data_vars)
self.ds.attrs = {f"attr_{k}": 2 for k in a}
def time_repr(self):
repr(self.ds)
def time_repr_html(self):
self.ds._repr_html_()
|
Repr
|
python
|
django__django
|
tests/fixtures/models.py
|
{
"start": 1035,
"end": 1377
}
|
class ____(models.Model):
name = models.CharField(max_length=100)
featured = models.ForeignKey(
Article, models.CASCADE, related_name="fixtures_featured_set"
)
articles = models.ManyToManyField(
Article, blank=True, related_name="fixtures_articles_set"
)
def __str__(self):
return self.name
|
Blog
|
python
|
pytorch__pytorch
|
test/dynamo/cpython/3_13/test_operator.py
|
{
"start": 29271,
"end": 29448
}
|
class ____(OperatorPickleTestCase, __TestCase):
module = py_operator
module2 = py_operator
@unittest.skipUnless(c_operator, 'requires _operator')
|
PyPyOperatorPickleTestCase
|
python
|
pypa__pipenv
|
pipenv/patched/pip/_internal/commands/hash.py
|
{
"start": 434,
"end": 1763
}
|
class ____(Command):
"""
Compute a hash of a local package archive.
These can be used with --hash in a requirements file to do repeatable
installs.
"""
usage = "%prog [options] <file> ..."
ignore_require_venv = True
def add_options(self) -> None:
self.cmd_opts.add_option(
"-a",
"--algorithm",
dest="algorithm",
choices=STRONG_HASHES,
action="store",
default=FAVORITE_HASH,
help="The hash algorithm to use: one of {}".format(
", ".join(STRONG_HASHES)
),
)
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options: Values, args: List[str]) -> int:
if not args:
self.parser.print_usage(sys.stderr)
return ERROR
algorithm = options.algorithm
for path in args:
write_output(
"%s:\n--hash=%s:%s", path, algorithm, _hash_of_file(path, algorithm)
)
return SUCCESS
def _hash_of_file(path: str, algorithm: str) -> str:
"""Return the hash digest of a file."""
with open(path, "rb") as archive:
hash = hashlib.new(algorithm)
for chunk in read_chunks(archive):
hash.update(chunk)
return hash.hexdigest()
|
HashCommand
|
python
|
wandb__wandb
|
wandb/sdk/artifacts/_generated/input_types.py
|
{
"start": 3177,
"end": 3328
}
|
class ____(GQLInput):
artifact_collection_name: str = Field(alias="artifactCollectionName")
alias: str = Field(max_length=128)
|
ArtifactAliasInput
|
python
|
walkccc__LeetCode
|
solutions/3548. Equal Sum Grid Partition II/3548.py
|
{
"start": 1,
"end": 689
}
|
class ____:
def canPartitionGrid(self, grid: list[list[int]]) -> bool:
summ = sum(map(sum, grid))
def canPartition(grid: list[list[int]]) -> bool:
topSum = 0
seen = set()
for i, row in enumerate(grid):
topSum += sum(row)
botSum = summ - topSum
seen |= set(row)
if topSum - botSum in (0, grid[0][0], grid[0][-1], row[0]):
return True
if len(grid[0]) > 1 and i > 0 and topSum - botSum in seen:
return True
return False
return (canPartition(grid) or
canPartition(grid[::-1]) or
canPartition(list(zip(*grid))[::-1]) or
canPartition(list(zip(*grid))))
|
Solution
|
python
|
apache__airflow
|
dev/breeze/tests/test_publish_docs_to_s3.py
|
{
"start": 974,
"end": 10930
}
|
class ____:
def setup_method(self):
self.publish_docs_to_s3 = S3DocsPublish(
source_dir_path="source_dir_path",
destination_location="destination_location",
exclude_docs="exclude_docs",
dry_run=False,
overwrite=False,
parallelism=1,
)
@patch("os.listdir")
def test_get_all_docs(self, mock_listdir):
mock_listdir.return_value = ["apache-airflow-providers-amazon"]
assert self.publish_docs_to_s3.get_all_docs == ["apache-airflow-providers-amazon"]
def test_get_all_docs_exception(self):
with patch("os.listdir", side_effect=FileNotFoundError):
with pytest.raises(SystemExit):
self.publish_docs_to_s3.get_all_docs()
def test_get_all_excluded_docs(self):
self.publish_docs_to_s3.exclude_docs = "amazon google apache-airflow"
assert self.publish_docs_to_s3.get_all_excluded_docs == ["amazon", "google", "apache-airflow"]
@patch("os.listdir")
def test_get_all_eligible_docs(self, mock_listdir):
mock_listdir.return_value = [
"apache-airflow-providers-amazon",
"apache-airflow-providers-google",
"apache-airflow",
"docker-stack",
"apache-airflow-providers-apache-kafka",
"apache-airflow-providers-apache-cassandra",
"helm-chart",
"apache-airflow-ctl",
]
self.publish_docs_to_s3.exclude_docs = "amazon docker-stack apache.kafka"
assert sorted(self.publish_docs_to_s3.get_all_eligible_docs) == sorted(
[
"apache-airflow-providers-google",
"apache-airflow",
"apache-airflow-providers-apache-cassandra",
"helm-chart",
"apache-airflow-ctl",
]
)
@patch("os.listdir")
def test_get_all_eligible_docs_should_raise_when_empty(self, mock_listdir):
mock_listdir.return_value = [
"apache-airflow-providers-amazon",
"apache-airflow",
"apache-airflow-providers-apache-kafka",
]
self.publish_docs_to_s3.exclude_docs = "amazon apache-airflow apache.kafka"
with pytest.raises(SystemExit):
self.publish_docs_to_s3.get_all_eligible_docs
@pytest.mark.parametrize(
("all_eligible_docs", "doc_exists", "overwrite", "expected_source_dest_mapping"),
[
(
["apache-airflow-providers-amazon", "apache-airflow-providers-google", "apache-airflow"],
False,
False,
[
(
"/tmp/docs-archive/apache-airflow-providers-amazon/1.0.0/",
"s3://dummy-docs/docs/apache-airflow-providers-amazon/1.0.0/",
),
(
"/tmp/docs-archive/apache-airflow-providers-amazon/1.0.0/",
"s3://dummy-docs/docs/apache-airflow-providers-amazon/stable/",
),
(
"/tmp/docs-archive/apache-airflow-providers-google/1.0.0/",
"s3://dummy-docs/docs/apache-airflow-providers-google/1.0.0/",
),
(
"/tmp/docs-archive/apache-airflow-providers-google/1.0.0/",
"s3://dummy-docs/docs/apache-airflow-providers-google/stable/",
),
("/tmp/docs-archive/apache-airflow/1.0.0/", "s3://dummy-docs/docs/apache-airflow/1.0.0/"),
(
"/tmp/docs-archive/apache-airflow/1.0.0/",
"s3://dummy-docs/docs/apache-airflow/stable/",
),
],
),
(
["apache-airflow-providers-amazon", "apache-airflow-providers-google", "apache-airflow"],
True,
False,
[],
),
(
["apache-airflow-providers-amazon", "apache-airflow-providers-google", "apache-airflow"],
True,
True,
[
(
"/tmp/docs-archive/apache-airflow-providers-amazon/1.0.0/",
"s3://dummy-docs/docs/apache-airflow-providers-amazon/1.0.0/",
),
(
"/tmp/docs-archive/apache-airflow-providers-amazon/1.0.0/",
"s3://dummy-docs/docs/apache-airflow-providers-amazon/stable/",
),
(
"/tmp/docs-archive/apache-airflow-providers-google/1.0.0/",
"s3://dummy-docs/docs/apache-airflow-providers-google/1.0.0/",
),
(
"/tmp/docs-archive/apache-airflow-providers-google/1.0.0/",
"s3://dummy-docs/docs/apache-airflow-providers-google/stable/",
),
("/tmp/docs-archive/apache-airflow/1.0.0/", "s3://dummy-docs/docs/apache-airflow/1.0.0/"),
(
"/tmp/docs-archive/apache-airflow/1.0.0/",
"s3://dummy-docs/docs/apache-airflow/stable/",
),
],
),
(
[],
True,
False,
[],
),
],
ids=[
"no_doc_version_exists_in_destination",
"doc_version_exists_in_destination",
"overwrite_existing_doc",
"no_docs_to_publish",
],
)
@patch.object(S3DocsPublish, "run_publish")
@patch("builtins.open", new_callable=mock_open, read_data="1.0.0")
@patch.object(S3DocsPublish, "get_all_eligible_docs", new_callable=PropertyMock)
@patch("os.path.exists")
@patch.object(S3DocsPublish, "doc_exists")
def test_publish_stable_version_docs(
self,
mock_doc_exists,
mock_path_exists,
mock_get_all_eligible_docs,
mock_open,
mock_run_publish,
all_eligible_docs,
doc_exists,
overwrite,
expected_source_dest_mapping,
):
mock_path_exists.return_value = True
mock_doc_exists.return_value = doc_exists
mock_get_all_eligible_docs.return_value = all_eligible_docs
self.publish_docs_to_s3.overwrite = overwrite
self.publish_docs_to_s3.source_dir_path = "/tmp/docs-archive"
self.publish_docs_to_s3.destination_location = "s3://dummy-docs/docs"
mock_run_publish.return_value = MagicMock()
self.publish_docs_to_s3.publish_stable_version_docs()
assert self.publish_docs_to_s3.source_dest_mapping == expected_source_dest_mapping
@pytest.mark.parametrize(
("all_eligible_docs", "doc_exists", "overwrite", "expected_source_dest_mapping"),
[
(
["apache-airflow-providers-amazon", "apache-airflow-providers-google", "apache-airflow"],
False,
False,
[
(
"/tmp/docs-archive/apache-airflow-providers-amazon/",
"s3://dummy-docs/docs/apache-airflow-providers-amazon/",
),
(
"/tmp/docs-archive/apache-airflow-providers-google/",
"s3://dummy-docs/docs/apache-airflow-providers-google/",
),
("/tmp/docs-archive/apache-airflow/", "s3://dummy-docs/docs/apache-airflow/"),
],
),
(
["apache-airflow-providers-amazon", "apache-airflow-providers-google", "apache-airflow"],
True,
False,
[],
),
(
["apache-airflow-providers-amazon", "apache-airflow-providers-google", "apache-airflow"],
True,
True,
[
(
"/tmp/docs-archive/apache-airflow-providers-amazon/",
"s3://dummy-docs/docs/apache-airflow-providers-amazon/",
),
(
"/tmp/docs-archive/apache-airflow-providers-google/",
"s3://dummy-docs/docs/apache-airflow-providers-google/",
),
("/tmp/docs-archive/apache-airflow/", "s3://dummy-docs/docs/apache-airflow/"),
],
),
(
[],
True,
False,
[],
),
],
ids=[
"no_doc_version_exists_in_destination",
"doc_version_exists_in_destination",
"overwrite_existing_doc",
"no_docs_to_publish",
],
)
@patch.object(S3DocsPublish, "run_publish")
@patch.object(S3DocsPublish, "get_all_eligible_docs", new_callable=PropertyMock)
@patch.object(S3DocsPublish, "doc_exists")
def test_publish_all_docs(
self,
mock_doc_exists,
mock_get_all_eligible_docs,
mock_run_publish,
all_eligible_docs,
doc_exists,
overwrite,
expected_source_dest_mapping,
):
mock_doc_exists.return_value = doc_exists
mock_get_all_eligible_docs.return_value = all_eligible_docs
self.publish_docs_to_s3.overwrite = overwrite
self.publish_docs_to_s3.source_dir_path = "/tmp/docs-archive"
self.publish_docs_to_s3.destination_location = "s3://dummy-docs/docs"
mock_run_publish.return_value = MagicMock()
self.publish_docs_to_s3.publish_all_docs()
assert self.publish_docs_to_s3.source_dest_mapping == expected_source_dest_mapping
|
TestPublishDocsToS3
|
python
|
walkccc__LeetCode
|
solutions/3. Longest Substring Without Repeating Characters/3-2.py
|
{
"start": 0,
"end": 435
}
|
class ____:
def lengthOfLongestSubstring(self, s: str) -> int:
ans = 0
# The substring s[j + 1..i] has no repeating characters.
j = -1
# lastSeen[c] := the index of the last time c appeared
lastSeen = {}
for i, c in enumerate(s):
# Update j to lastSeen[c], so the window must start from j + 1.
j = max(j, lastSeen.get(c, -1))
ans = max(ans, i - j)
lastSeen[c] = i
return ans
|
Solution
|
python
|
pennersr__django-allauth
|
allauth/socialaccount/providers/xing/provider.py
|
{
"start": 564,
"end": 1059
}
|
class ____(OAuthProvider):
id = "xing"
name = "Xing"
account_class = XingAccount
oauth_adapter_class = XingOAuthAdapter
def extract_uid(self, data):
return data["id"]
def extract_common_fields(self, data):
return dict(
email=data.get("active_email"),
username=data.get("page_name"),
first_name=data.get("first_name"),
last_name=data.get("last_name"),
)
provider_classes = [XingProvider]
|
XingProvider
|
python
|
explosion__spaCy
|
spacy/pipeline/_edit_tree_internals/schemas.py
|
{
"start": 946,
"end": 1669
}
|
class ____(BaseModel):
__root__: Union[MatchNodeSchema, SubstNodeSchema]
def validate_edit_tree(obj: Dict[str, Any]) -> List[str]:
"""Validate edit tree.
obj (Dict[str, Any]): JSON-serializable data to validate.
RETURNS (List[str]): A list of error messages, if available.
"""
try:
EditTreeSchema.parse_obj(obj)
return []
except ValidationError as e:
errors = e.errors()
data = defaultdict(list)
for error in errors:
err_loc = " -> ".join([str(p) for p in error.get("loc", [])])
data[err_loc].append(error.get("msg"))
return [f"[{loc}] {', '.join(msg)}" for loc, msg in data.items()] # type: ignore[arg-type]
|
EditTreeSchema
|
python
|
django__django
|
tests/schema/models.py
|
{
"start": 4303,
"end": 4544
}
|
class ____(models.Model):
title = models.CharField(max_length=255)
slug2 = models.SlugField(unique=True)
class Meta:
apps = new_apps
db_table = "schema_tag"
# Based on tests/reserved_names/models.py
|
TagUniqueRename
|
python
|
tiangolo__fastapi
|
tests/test_security_api_key_header.py
|
{
"start": 217,
"end": 1944
}
|
class ____(BaseModel):
username: str
def get_current_user(oauth_header: str = Security(api_key)):
user = User(username=oauth_header)
return user
@app.get("/users/me")
def read_current_user(current_user: User = Depends(get_current_user)):
return current_user
client = TestClient(app)
def test_security_api_key():
response = client.get("/users/me", headers={"key": "secret"})
assert response.status_code == 200, response.text
assert response.json() == {"username": "secret"}
def test_security_api_key_no_key():
response = client.get("/users/me")
assert response.status_code == 401, response.text
assert response.json() == {"detail": "Not authenticated"}
assert response.headers["WWW-Authenticate"] == "APIKey"
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/users/me": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"summary": "Read Current User",
"operationId": "read_current_user_users_me_get",
"security": [{"APIKeyHeader": []}],
}
}
},
"components": {
"securitySchemes": {
"APIKeyHeader": {"type": "apiKey", "name": "key", "in": "header"}
}
},
}
|
User
|
python
|
apache__airflow
|
providers/openlineage/src/airflow/providers/openlineage/utils/utils.py
|
{
"start": 27712,
"end": 29744
}
|
class ____(InfoJsonEncodable):
"""Defines encoding DagRun object to JSON."""
includes = [
"conf",
"dag_id",
"data_interval_start",
"data_interval_end",
"external_trigger", # Removed in Airflow 3, use run_type instead
"execution_date", # Airflow 2
"logical_date", # Airflow 3
"run_after", # Airflow 3
"run_id",
"run_type",
"start_date",
"end_date",
]
casts = {
"duration": lambda dagrun: DagRunInfo.duration(dagrun),
"dag_bundle_name": lambda dagrun: DagRunInfo.dag_version_info(dagrun, "bundle_name"),
"dag_bundle_version": lambda dagrun: DagRunInfo.dag_version_info(dagrun, "bundle_version"),
"dag_version_id": lambda dagrun: DagRunInfo.dag_version_info(dagrun, "version_id"),
"dag_version_number": lambda dagrun: DagRunInfo.dag_version_info(dagrun, "version_number"),
}
@classmethod
def duration(cls, dagrun: DagRun) -> float | None:
if not getattr(dagrun, "end_date", None) or not isinstance(dagrun.end_date, datetime.datetime):
return None
if not getattr(dagrun, "start_date", None) or not isinstance(dagrun.start_date, datetime.datetime):
return None
return (dagrun.end_date - dagrun.start_date).total_seconds()
@classmethod
def dag_version_info(cls, dagrun: DagRun, key: str) -> str | int | None:
# AF2 DagRun and AF3 DagRun SDK model (on worker) do not have this information
if not getattr(dagrun, "dag_versions", []):
return None
current_version = dagrun.dag_versions[-1]
if key == "bundle_name":
return current_version.bundle_name
if key == "bundle_version":
return current_version.bundle_version
if key == "version_id":
return str(current_version.id)
if key == "version_number":
return current_version.version_number
raise ValueError(f"Unsupported key: {key}`")
|
DagRunInfo
|
python
|
numba__numba
|
numba/tests/test_cli.py
|
{
"start": 3934,
"end": 8361
}
|
class ____(TestCase):
def setUp(self):
# Mock the entire class, to report valid things,
# then override bits of it locally to check failures etc.
self._patches = []
mock_init = lambda self: None
self._patches.append(mock.patch.object(_GDBTestWrapper, '__init__',
mock_init))
bpath = 'numba.misc.numba_gdbinfo._GDBTestWrapper.gdb_binary'
self._patches.append(mock.patch(bpath, 'PATH_TO_GDB'))
def _patch(fnstr, func):
self._patches.append(mock.patch.object(_GDBTestWrapper, fnstr,
func))
def mock_check_launch(self):
return CompletedProcess('COMMAND STRING', 0)
_patch('check_launch', mock_check_launch)
# NOTE: The Python and NumPy versions are set to something unsupported!
def mock_check_python(self):
return CompletedProcess('COMMAND STRING', 0,
stdout='(3, 2)',
stderr='')
_patch('check_python', mock_check_python)
def mock_check_numpy(self):
return CompletedProcess('COMMAND STRING', 0, stdout='True',
stderr='')
_patch('check_numpy', mock_check_numpy)
def mock_check_numpy_version(self):
return CompletedProcess('COMMAND STRING', 0, stdout='1.15',
stderr='')
_patch('check_numpy_version', mock_check_numpy_version)
# start the patching
for p in self._patches:
p.start()
def tearDown(self):
# stop the patching
for p in self._patches:
p.stop()
def test_valid(self):
collected = collect_gdbinfo()
self.assertEqual(collected.binary_loc, 'PATH_TO_GDB')
extp = os.path.exists(os.path.abspath(collected.extension_loc))
self.assertTrue(extp)
self.assertEqual(collected.py_ver, '3.2')
self.assertEqual(collected.np_ver, '1.15')
self.assertIn('Full', collected.supported)
def test_invalid_binary(self):
def mock_fn(self):
return CompletedProcess('INVALID_BINARY', 1)
with mock.patch.object(_GDBTestWrapper, 'check_launch', mock_fn):
info = collect_gdbinfo()
self.assertIn("Testing gdb binary failed.", info.binary_loc)
self.assertIn("gdb at 'PATH_TO_GDB' does not appear to work",
info.binary_loc)
def test_no_python(self):
def mock_fn(self):
return CompletedProcess('NO PYTHON', 1)
with mock.patch.object(_GDBTestWrapper, 'check_python', mock_fn):
collected = collect_gdbinfo()
self.assertEqual(collected.py_ver, 'No Python support')
self.assertEqual(collected.supported, 'None')
def test_unparsable_python_version(self):
def mock_fn(self):
return CompletedProcess('NO PYTHON', 0, stdout='(NOT A VERSION)')
with mock.patch.object(_GDBTestWrapper, 'check_python', mock_fn):
collected = collect_gdbinfo()
self.assertEqual(collected.py_ver, 'No Python support')
def test_no_numpy(self):
def mock_fn(self):
return CompletedProcess('NO NUMPY', 1)
with mock.patch.object(_GDBTestWrapper, 'check_numpy', mock_fn):
collected = collect_gdbinfo()
self.assertEqual(collected.np_ver, 'No NumPy support')
self.assertEqual(collected.py_ver, '3.2')
self.assertIn('Partial', collected.supported)
def test_no_numpy_version(self):
def mock_fn(self):
return CompletedProcess('NO NUMPY VERSION', 1)
with mock.patch.object(_GDBTestWrapper, 'check_numpy_version', mock_fn):
collected = collect_gdbinfo()
self.assertEqual(collected.np_ver, 'Unknown')
def test_traceback_in_numpy_version(self):
def mock_fn(self):
return CompletedProcess('NO NUMPY VERSION', 0,
stdout='(NOT A VERSION)',
stderr='Traceback')
with mock.patch.object(_GDBTestWrapper, 'check_numpy_version', mock_fn):
collected = collect_gdbinfo()
self.assertEqual(collected.np_ver, 'Unknown')
@linux_only
|
TestGDBCLIInfo
|
python
|
sympy__sympy
|
sympy/diffgeom/diffgeom.py
|
{
"start": 44628,
"end": 46866
}
|
class ____(Expr):
"""Lie derivative with respect to a vector field.
Explanation
===========
The transport operator that defines the Lie derivative is the pushforward of
the field to be derived along the integral curve of the field with respect
to which one derives.
Examples
========
>>> from sympy.diffgeom.rn import R2_r, R2_p
>>> from sympy.diffgeom import (LieDerivative, TensorProduct)
>>> fx, fy = R2_r.base_scalars()
>>> e_x, e_y = R2_r.base_vectors()
>>> e_rho, e_theta = R2_p.base_vectors()
>>> dx, dy = R2_r.base_oneforms()
>>> LieDerivative(e_x, fy)
0
>>> LieDerivative(e_x, fx)
1
>>> LieDerivative(e_x, e_x)
0
The Lie derivative of a tensor field by another tensor field is equal to
their commutator:
>>> LieDerivative(e_x, e_rho)
Commutator(e_x, e_rho)
>>> LieDerivative(e_x + e_y, fx)
1
>>> tp = TensorProduct(dx, dy)
>>> LieDerivative(e_x, tp)
LieDerivative(e_x, TensorProduct(dx, dy))
>>> LieDerivative(e_x, tp)
LieDerivative(e_x, TensorProduct(dx, dy))
"""
def __new__(cls, v_field, expr):
expr_form_ord = covariant_order(expr)
if contravariant_order(v_field) != 1 or covariant_order(v_field):
raise ValueError('Lie derivatives are defined only with respect to'
' vector fields. The supplied argument was not a '
'vector field.')
if expr_form_ord > 0:
obj = super().__new__(cls, v_field, expr)
# deprecated assignments
obj._v_field = v_field
obj._expr = expr
return obj
if expr.atoms(BaseVectorField):
return Commutator(v_field, expr)
else:
return v_field.rcall(expr)
@property
def v_field(self):
return self.args[0]
@property
def expr(self):
return self.args[1]
def __call__(self, *args):
v = self.v_field
expr = self.expr
lead_term = v(expr(*args))
rest = Add(*[Mul(*args[:i] + (Commutator(v, args[i]),) + args[i + 1:])
for i in range(len(args))])
return lead_term - rest
|
LieDerivative
|
python
|
dask__distributed
|
distributed/client.py
|
{
"start": 21214,
"end": 23736
}
|
class ____(Exception):
"""Raised when an action with a closed client can't be performed"""
def _handle_print(event):
_, msg = event
if not isinstance(msg, dict):
# someone must have manually logged a print event with a hand-crafted
# payload, rather than by calling worker.print(). In that case simply
# print the payload and hope it works.
print(msg)
return
args = msg.get("args")
if not isinstance(args, tuple):
# worker.print() will always send us a tuple of args, even if it's an
# empty tuple.
raise TypeError(
f"_handle_print: client received non-tuple print args: {args!r}"
)
file = msg.get("file")
if file == 1:
file = sys.stdout
elif file == 2:
file = sys.stderr
elif file is not None:
raise TypeError(
f"_handle_print: client received unsupported file kwarg: {file!r}"
)
print(
*args, sep=msg.get("sep"), end=msg.get("end"), file=file, flush=msg.get("flush")
)
def _handle_warn(event):
_, msg = event
if not isinstance(msg, dict):
# someone must have manually logged a warn event with a hand-crafted
# payload, rather than by calling worker.warn(). In that case simply
# warn the payload and hope it works.
warnings.warn(msg)
else:
if "message" not in msg:
# TypeError makes sense here because it's analogous to calling a
# function without a required positional argument
raise TypeError(
"_handle_warn: client received a warn event missing the required "
'"message" argument.'
)
if "category" in msg:
category = pickle.loads(msg["category"])
else:
category = None
warnings.warn(
pickle.loads(msg["message"]),
category=category,
)
def _maybe_call_security_loader(address):
security_loader_term = dask.config.get("distributed.client.security-loader")
if security_loader_term:
try:
security_loader = import_term(security_loader_term)
except Exception as exc:
raise ImportError(
f"Failed to import `{security_loader_term}` configured at "
f"`distributed.client.security-loader` - is this module "
f"installed?"
) from exc
return security_loader({"address": address})
return None
|
ClosedClientError
|
python
|
allegroai__clearml
|
examples/frameworks/ignite/cifar_ignite.py
|
{
"start": 3215,
"end": 7140
}
|
class ____(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 3)
self.conv2 = nn.Conv2d(6, 16, 3)
self.pool = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(16 * 6 * 6, 120)
self.fc2 = nn.Linear(120, 84)
self.dorpout = nn.Dropout(p=params.get('dropout', 0.25))
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 6 * 6)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(self.dorpout(x))
return x
# Training
def run(epochs, lr, momentum, log_interval):
device = "cuda" if torch.cuda.is_available() else "cpu"
net = Net().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=lr, momentum=momentum)
trainer = create_supervised_trainer(net, optimizer, criterion, device=device)
trainer.logger = setup_logger("trainer")
val_metrics = {"accuracy": Accuracy(),"loss": Loss(criterion), "recall": Recall()}
evaluator = create_supervised_evaluator(net, metrics=val_metrics, device=device)
evaluator.logger = setup_logger("evaluator")
# Attach handler to plot trainer's loss every 100 iterations
tb_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED(every=params.get('loss_report')),
tag="training",
output_transform=lambda loss: {"loss": loss},
)
# Attach handler to dump evaluator's metrics every epoch completed
for tag, evaluator in [("training", trainer), ("validation", evaluator)]:
tb_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag=tag,
metric_names="all",
global_step_transform=global_step_from_engine(trainer),
)
# Attach function to build debug images and report every epoch end
tb_logger.attach(
evaluator,
log_handler=predictions_gt_images_handler,
event_name=Events.EPOCH_COMPLETED(once=1),
);
desc = "ITERATION - loss: {:.2f}"
pbar = tqdm(initial=0, leave=False, total=len(trainloader), desc=desc.format(0))
@trainer.on(Events.ITERATION_COMPLETED(every=log_interval))
def log_training_loss(engine):
pbar.desc = desc.format(engine.state.output)
pbar.update(log_interval)
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
pbar.refresh()
evaluator.run(trainloader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["loss"]
tqdm.write(
"Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format(
engine.state.epoch, avg_accuracy, avg_nll
)
)
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(testloader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["loss"]
tqdm.write(
"Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format(
engine.state.epoch, avg_accuracy, avg_nll
)
)
pbar.n = pbar.last_print_n = 0
@trainer.on(Events.EPOCH_COMPLETED | Events.COMPLETED)
def log_time():
tqdm.write(
"{} took {} seconds".format(trainer.last_event_name.name, trainer.state.times[trainer.last_event_name.name])
)
trainer.run(trainloader, max_epochs=epochs)
pbar.close()
PATH = './cifar_net.pth'
torch.save(net.state_dict(), PATH)
print('Finished Training')
print('Task ID number is: {}'.format(task.id))
run(params.get('number_of_epochs'), params.get('base_lr'), params.get('momentum'), 10)
|
Net
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_13/projects.py
|
{
"start": 95760,
"end": 97971
}
|
class ____(Response):
"""
Response of projects.get_task_tags endpoint.
:param tags: The list of unique tag values
:type tags: Sequence[str]
:param system_tags: The list of unique system tag values. Returned only if
'include_system' is set to 'true' in the request
:type system_tags: Sequence[str]
"""
_service = "projects"
_action = "get_task_tags"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"system_tags": {
"description": "The list of unique system tag values. Returned only if 'include_system' is set to 'true' in the request",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "The list of unique tag values",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(
self, tags: Optional[List[str]] = None, system_tags: Optional[List[str]] = None, **kwargs: Any
) -> None:
super(GetTaskTagsResponse, self).__init__(**kwargs)
self.tags = tags
self.system_tags = system_tags
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
|
GetTaskTagsResponse
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/src/hypothesis/errors.py
|
{
"start": 1957,
"end": 2059
}
|
class ____(HypothesisException):
"""An internal error raised by choice_from_index."""
|
ChoiceTooLarge
|
python
|
openai__openai-python
|
src/openai/types/responses/response.py
|
{
"start": 1171,
"end": 1552
}
|
class ____(BaseModel):
reason: Optional[Literal["max_output_tokens", "content_filter"]] = None
"""The reason why the response is incomplete."""
ToolChoice: TypeAlias = Union[
ToolChoiceOptions,
ToolChoiceAllowed,
ToolChoiceTypes,
ToolChoiceFunction,
ToolChoiceMcp,
ToolChoiceCustom,
ToolChoiceApplyPatch,
ToolChoiceShell,
]
|
IncompleteDetails
|
python
|
getsentry__sentry
|
tests/sentry/ratelimits/utils/test_enforce_rate_limit.py
|
{
"start": 826,
"end": 1169
}
|
class ____(RateLimitTestEndpoint):
enforce_rate_limit = False
urlpatterns = [
re_path(r"^/enforced$", RateLimitEnforcedEndpoint.as_view(), name="enforced-endpoint"),
re_path(r"^/unenforced$", RateLimitUnenforcedEndpoint.as_view(), name="unenforced-endpoint"),
]
@override_settings(ROOT_URLCONF=__name__)
|
RateLimitUnenforcedEndpoint
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 189717,
"end": 190821
}
|
class ____(sgqlc.types.Input):
"""Autogenerated input type of CreateProjectV2Field"""
__schema__ = github_schema
__field_names__ = ("project_id", "data_type", "name", "single_select_options", "client_mutation_id")
project_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="projectId")
"""The ID of the Project to create the field in."""
data_type = sgqlc.types.Field(sgqlc.types.non_null(ProjectV2CustomFieldType), graphql_name="dataType")
"""The data type of the field."""
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
"""The name of the field."""
single_select_options = sgqlc.types.Field(
sgqlc.types.list_of(sgqlc.types.non_null("ProjectV2SingleSelectFieldOptionInput")), graphql_name="singleSelectOptions"
)
"""Options for a single select field. At least one value is required
if data_type is SINGLE_SELECT
"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
|
CreateProjectV2FieldInput
|
python
|
conda__conda
|
conda/auxlib/entity.py
|
{
"start": 21097,
"end": 21151
}
|
class ____(ListField):
_type = list
|
MutableListField
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/batch/base.py
|
{
"start": 32923,
"end": 56099
}
|
class ____:
def __init__(
self,
connection: ConnectionSync,
consistency_level: Optional[ConsistencyLevel],
results: _BatchDataWrapper,
batch_mode: _BatchMode,
executor: ThreadPoolExecutor,
vectorizer_batching: bool,
objects: Optional[ObjectsBatchRequest[batch_pb2.BatchObject]] = None,
references: Optional[ReferencesBatchRequest] = None,
) -> None:
self.__batch_objects = objects or ObjectsBatchRequest[batch_pb2.BatchObject]()
self.__batch_references = references or ReferencesBatchRequest[batch_pb2.BatchReference]()
self.__connection = connection
self.__consistency_level: ConsistencyLevel = consistency_level or ConsistencyLevel.QUORUM
self.__batch_size = 100
self.__batch_grpc = _BatchGRPC(
connection._weaviate_version, self.__consistency_level, connection._grpc_max_msg_size
)
# lookup table for objects that are currently being processed - is used to not send references from objects that have not been added yet
self.__uuid_lookup: Set[str] = set()
# we do not want that users can access the results directly as they are not thread-safe
self.__results_for_wrapper_backup = results
self.__results_for_wrapper = _BatchDataWrapper()
self.__objs_count = 0
self.__refs_count = 0
self.__uuid_lookup_lock = threading.Lock()
self.__results_lock = threading.Lock()
self.__bg_thread_exception: Optional[Exception] = None
self.__is_shutting_down = threading.Event()
self.__is_shutdown = threading.Event()
self.__objs_cache_lock = threading.Lock()
self.__refs_cache_lock = threading.Lock()
self.__objs_cache: dict[str, BatchObject] = {}
self.__refs_cache: dict[str, BatchReference] = {}
# maxsize=1 so that __batch_send does not run faster than generator for __batch_recv
# thereby using too much buffer in case of server-side shutdown
self.__reqs: Queue[Optional[batch_pb2.BatchStreamRequest]] = Queue(maxsize=1)
self.__stop = False
self.__batch_mode = batch_mode
self.__total = 0
@property
def number_errors(self) -> int:
"""Return the number of errors in the batch."""
return len(self.__results_for_wrapper.failed_objects) + len(
self.__results_for_wrapper.failed_references
)
def __all_threads_alive(self) -> bool:
return self.__bg_threads is not None and all(
thread.is_alive() for thread in self.__bg_threads
)
def __any_threads_alive(self) -> bool:
return self.__bg_threads is not None and any(
thread.is_alive() for thread in self.__bg_threads
)
def _start(self) -> None:
assert isinstance(self.__batch_mode, _ServerSideBatching), (
"Only server-side batching is supported in this mode"
)
self.__bg_threads = [
self.__start_bg_threads() for _ in range(self.__batch_mode.concurrency)
]
logger.warning(
f"Provisioned {len(self.__bg_threads)} stream(s) to the server for batch processing"
)
now = time.time()
while not self.__all_threads_alive():
# wait for the stream to be started by __batch_stream
time.sleep(0.01)
if time.time() - now > 10:
raise WeaviateBatchValidationError(
"Batch stream was not started within 10 seconds. Please check your connection."
)
def _shutdown(self) -> None:
# Shutdown the current batch and wait for all requests to be finished
self.flush()
self.__stop = True
# we are done, wait for bg threads to finish
# self.__batch_stream will set the shutdown event when it receives
# the stop message from the server
while self.__any_threads_alive():
time.sleep(1)
logger.warning("Send & receive threads finished.")
# copy the results to the public results
self.__results_for_wrapper_backup.results = self.__results_for_wrapper.results
self.__results_for_wrapper_backup.failed_objects = self.__results_for_wrapper.failed_objects
self.__results_for_wrapper_backup.failed_references = (
self.__results_for_wrapper.failed_references
)
self.__results_for_wrapper_backup.imported_shards = (
self.__results_for_wrapper.imported_shards
)
def __batch_send(self) -> None:
refresh_time: float = 0.01
while (
self.__shut_background_thread_down is not None
and not self.__shut_background_thread_down.is_set()
):
if len(self.__batch_objects) + len(self.__batch_references) > 0:
self._batch_send = True
start = time.time()
while (len_o := len(self.__batch_objects)) + (
len_r := len(self.__batch_references)
) < self.__batch_size:
# wait for more objects to be added up to the batch size
time.sleep(0.01)
if (
self.__shut_background_thread_down is not None
and self.__shut_background_thread_down.is_set()
):
logger.warning("Threads were shutdown, exiting batch send loop")
# shutdown was requested, exit early
self.__reqs.put(None)
return
if time.time() - start >= 1 and (
len_o == len(self.__batch_objects) or len_r == len(self.__batch_references)
):
# no new objects were added in the last second, exit the loop
break
objs = self.__batch_objects.pop_items(self.__batch_size)
refs = self.__batch_references.pop_items(
self.__batch_size - len(objs),
uuid_lookup=self.__uuid_lookup,
)
with self.__uuid_lookup_lock:
self.__uuid_lookup.difference_update(obj.uuid for obj in objs)
for req in self.__generate_stream_requests(objs, refs):
logged = False
while self.__is_shutting_down.is_set() or self.__is_shutdown.is_set():
# if we were shutdown by the node we were connected to, we need to wait for the stream to be restarted
# so that the connection is refreshed to a new node where the objects can be accepted
# otherwise, we wait until the stream has been started by __batch_stream to send the first batch
if not logged:
logger.warning("Waiting for stream to be re-established...")
logged = True
# put sentinel into our queue to signal the end of the current stream
self.__reqs.put(None)
time.sleep(1)
if logged:
logger.warning("Stream re-established, resuming sending batches")
self.__reqs.put(req)
elif self.__stop:
# we are done, send the sentinel into our queue to be consumed by the batch sender
self.__reqs.put(None) # signal the end of the stream
logger.warning("Batching finished, sent stop signal to batch stream")
return
time.sleep(refresh_time)
def __generate_stream_requests(
self,
objs: List[batch_pb2.BatchObject],
refs: List[batch_pb2.BatchReference],
) -> Generator[batch_pb2.BatchStreamRequest, None, None]:
per_object_overhead = 4 # extra overhead bytes per object in the request
def request_maker():
return batch_pb2.BatchStreamRequest()
request = request_maker()
total_size = request.ByteSize()
for obj in objs:
obj_size = obj.ByteSize() + per_object_overhead
if total_size + obj_size >= self.__batch_grpc.grpc_max_msg_size:
yield request
request = request_maker()
total_size = request.ByteSize()
request.data.objects.values.append(obj)
total_size += obj_size
for ref in refs:
ref_size = ref.ByteSize() + per_object_overhead
if total_size + ref_size >= self.__batch_grpc.grpc_max_msg_size:
yield request
request = request_maker()
total_size = request.ByteSize()
request.data.references.values.append(ref)
total_size += ref_size
if len(request.data.objects.values) > 0 or len(request.data.references.values) > 0:
yield request
def __generate_stream_requests_for_grpc(
self,
) -> Generator[batch_pb2.BatchStreamRequest, None, None]:
yield batch_pb2.BatchStreamRequest(
start=batch_pb2.BatchStreamRequest.Start(
consistency_level=self.__batch_grpc._consistency_level,
),
)
while (
self.__shut_background_thread_down is not None
and not self.__shut_background_thread_down.is_set()
):
req = self.__reqs.get()
if req is not None:
self.__total += len(req.data.objects.values) + len(req.data.references.values)
yield req
continue
if self.__stop and not (
self.__is_shutting_down.is_set() or self.__is_shutdown.is_set()
):
logger.warning("Batching finished, closing the client-side of the stream")
yield batch_pb2.BatchStreamRequest(stop=batch_pb2.BatchStreamRequest.Stop())
return
if self.__is_shutting_down.is_set():
logger.warning("Server shutting down, closing the client-side of the stream")
return
logger.warning("Received sentinel, but not stopping, continuing...")
def __batch_recv(self) -> None:
for message in self.__batch_grpc.stream(
connection=self.__connection,
requests=self.__generate_stream_requests_for_grpc(),
):
if message.HasField("started"):
logger.warning("Batch stream started successfully")
for threads in self.__bg_threads:
threads.start_send()
if message.HasField("backoff"):
if (
message.backoff.batch_size != self.__batch_size
and not self.__is_shutting_down.is_set()
and not self.__is_shutdown.is_set()
and not self.__stop
):
self.__batch_size = message.backoff.batch_size
logger.warning(
f"Updated batch size to {self.__batch_size} as per server request"
)
if message.HasField("results"):
result_objs = BatchObjectReturn()
result_refs = BatchReferenceReturn()
failed_objs: List[ErrorObject] = []
failed_refs: List[ErrorReference] = []
for error in message.results.errors:
if error.HasField("uuid"):
try:
cached = self.__objs_cache.pop(error.uuid)
except KeyError:
continue
err = ErrorObject(
message=error.error,
object_=cached,
)
result_objs += BatchObjectReturn(
_all_responses=[err],
errors={cached.index: err},
)
failed_objs.append(err)
logger.warning(
{
"error": error.error,
"object": error.uuid,
"action": "use {client,collection}.batch.failed_objects to access this error",
}
)
if error.HasField("beacon"):
try:
cached = self.__refs_cache.pop(error.beacon)
except KeyError:
continue
err = ErrorReference(
message=error.error,
reference=error.beacon, # pyright: ignore
)
failed_refs.append(err)
result_refs += BatchReferenceReturn(
errors={cached.index: err},
)
logger.warning(
{
"error": error.error,
"reference": error.beacon,
"action": "use {client,collection}.batch.failed_references to access this error",
}
)
for success in message.results.successes:
if success.HasField("uuid"):
try:
cached = self.__objs_cache.pop(success.uuid)
except KeyError:
continue
uuid = uuid_package.UUID(success.uuid)
result_objs += BatchObjectReturn(
_all_responses=[uuid],
uuids={cached.index: uuid},
)
if success.HasField("beacon"):
try:
self.__refs_cache.pop(success.beacon, None)
except KeyError:
continue
with self.__results_lock:
self.__results_for_wrapper.results.objs += result_objs
self.__results_for_wrapper.results.refs += result_refs
self.__results_for_wrapper.failed_objects.extend(failed_objs)
self.__results_for_wrapper.failed_references.extend(failed_refs)
elif message.HasField("shutting_down"):
logger.warning(
"Received shutting down message from server, pausing sending until stream is re-established"
)
self.__is_shutting_down.set()
elif message.HasField("shutdown"):
logger.warning("Received shutdown finished message from server")
self.__is_shutdown.set()
self.__is_shutting_down.clear()
self.__reconnect()
# restart the stream if we were shutdown by the node we were connected to ensuring that the index is
# propagated properly from it to the new one
if self.__is_shutdown.is_set():
logger.warning("Restarting batch recv after shutdown...")
self.__is_shutdown.clear()
return self.__batch_recv()
else:
logger.warning("Server closed the stream from its side, shutting down batch")
return
def __reconnect(self, retry: int = 0) -> None:
if self.__consistency_level == ConsistencyLevel.ALL:
# check that all nodes are available before reconnecting
cluster = _ClusterBatch(self.__connection)
while len(nodes := cluster.get_nodes_status()) != 3 or any(
node["status"] != "HEALTHY" for node in nodes
):
logger.warning(
"Waiting for all nodes to be HEALTHY before reconnecting to batch stream due to CL=ALL..."
)
time.sleep(5)
try:
logger.warning(f"Trying to reconnect after shutdown... {retry + 1}/{5}")
self.__connection.close("sync")
self.__connection.connect(force=True)
logger.warning("Reconnected successfully")
except (WeaviateStartUpError, WeaviateGRPCUnavailableError) as e:
if retry < 5:
time.sleep(2**retry)
self.__reconnect(retry + 1)
else:
logger.error("Failed to reconnect after 5 attempts")
self.__bg_thread_exception = e
def __start_bg_threads(self) -> _BgThreads:
"""Create a background thread that periodically checks how congested the batch queue is."""
self.__shut_background_thread_down = threading.Event()
def batch_send_wrapper() -> None:
try:
self.__batch_send()
logger.warning("exited batch send thread")
except Exception as e:
logger.error(e)
self.__bg_thread_exception = e
def batch_recv_wrapper() -> None:
socket_hung_up = False
try:
self.__batch_recv()
logger.warning("exited batch receive thread")
except Exception as e:
if isinstance(e, WeaviateBatchStreamError) and (
"Socket closed" in e.message or "context canceled" in e.message
):
socket_hung_up = True
else:
logger.error(e)
logger.error(type(e))
self.__bg_thread_exception = e
if socket_hung_up:
# this happens during ungraceful shutdown of the coordinator
# lets restart the stream and add the cached objects again
logger.warning("Stream closed unexpectedly, restarting...")
self.__reconnect()
# server sets this whenever it restarts, gracefully or unexpectedly, so need to clear it now
self.__is_shutting_down.clear()
with self.__objs_cache_lock:
logger.warning(
f"Re-adding {len(self.__objs_cache)} cached objects to the batch"
)
self.__batch_objects.prepend(
[
self.__batch_grpc.grpc_object(o._to_internal())
for o in self.__objs_cache.values()
]
)
with self.__refs_cache_lock:
self.__batch_references.prepend(
[
self.__batch_grpc.grpc_reference(o._to_internal())
for o in self.__refs_cache.values()
]
)
# start a new stream with a newly reconnected channel
return batch_recv_wrapper()
threads = _BgThreads(
send=threading.Thread(
target=batch_send_wrapper,
daemon=True,
name="BgBatchSend",
),
recv=threading.Thread(
target=batch_recv_wrapper,
daemon=True,
name="BgBatchRecv",
),
)
threads.start_recv()
return threads
def flush(self) -> None:
"""Flush the batch queue and wait for all requests to be finished."""
# bg thread is sending objs+refs automatically, so simply wait for everything to be done
while len(self.__batch_objects) > 0 or len(self.__batch_references) > 0:
time.sleep(0.01)
self.__check_bg_threads_alive()
def _add_object(
self,
collection: str,
properties: Optional[WeaviateProperties] = None,
references: Optional[ReferenceInputs] = None,
uuid: Optional[UUID] = None,
vector: Optional[VECTORS] = None,
tenant: Optional[str] = None,
) -> UUID:
self.__check_bg_threads_alive()
try:
batch_object = BatchObject(
collection=collection,
properties=properties,
references=references,
uuid=uuid,
vector=vector,
tenant=tenant,
index=self.__objs_count,
)
self.__results_for_wrapper.imported_shards.add(
Shard(collection=collection, tenant=tenant)
)
except ValidationError as e:
raise WeaviateBatchValidationError(repr(e))
uuid = str(batch_object.uuid)
with self.__uuid_lookup_lock:
self.__uuid_lookup.add(uuid)
self.__batch_objects.add(self.__batch_grpc.grpc_object(batch_object._to_internal()))
with self.__objs_cache_lock:
self.__objs_cache[uuid] = batch_object
self.__objs_count += 1
# block if queue gets too long or weaviate is overloaded - reading files is faster them sending them so we do
# not need a long queue
while len(self.__batch_objects) >= self.__batch_size * 2:
self.__check_bg_threads_alive()
time.sleep(0.01)
assert batch_object.uuid is not None
return batch_object.uuid
def _add_reference(
self,
from_object_uuid: UUID,
from_object_collection: str,
from_property_name: str,
to: ReferenceInput,
tenant: Optional[str] = None,
) -> None:
self.__check_bg_threads_alive()
if isinstance(to, ReferenceToMulti):
to_strs: Union[List[str], List[UUID]] = to.uuids_str
elif isinstance(to, str) or isinstance(to, uuid_package.UUID):
to_strs = [to]
else:
to_strs = list(to)
for uid in to_strs:
try:
batch_reference = BatchReference(
from_object_collection=from_object_collection,
from_object_uuid=from_object_uuid,
from_property_name=from_property_name,
to_object_collection=(
to.target_collection if isinstance(to, ReferenceToMulti) else None
),
to_object_uuid=uid,
tenant=tenant,
index=self.__refs_count,
)
except ValidationError as e:
raise WeaviateBatchValidationError(repr(e))
self.__batch_references.add(
self.__batch_grpc.grpc_reference(batch_reference._to_internal())
)
with self.__refs_cache_lock:
self.__refs_cache[batch_reference._to_beacon()] = batch_reference
self.__refs_count += 1
def __check_bg_threads_alive(self) -> None:
if self.__any_threads_alive():
return
raise self.__bg_thread_exception or Exception("Batch thread died unexpectedly")
|
_BatchBaseNew
|
python
|
plotly__plotly.py
|
tests/test_optional/test_tools/test_figure_factory.py
|
{
"start": 29438,
"end": 44984
}
|
class ____(TestCaseNoTemplate, NumpyTestUtilsMixin):
def test_unequal_z_text_size(self):
# check: PlotlyError if z and text are not the same dimensions
kwargs = {"z": [[1, 2], [1, 2]], "annotation_text": [[1, 2, 3], [1]]}
self.assertRaises(PlotlyError, ff.create_annotated_heatmap, **kwargs)
kwargs = {"z": [[1], [1]], "annotation_text": [[1], [1], [1]]}
self.assertRaises(PlotlyError, ff.create_annotated_heatmap, **kwargs)
def test_incorrect_x_size(self):
# check: PlotlyError if x is the wrong size
kwargs = {"z": [[1, 2], [1, 2]], "x": ["A"]}
self.assertRaises(PlotlyError, ff.create_annotated_heatmap, **kwargs)
def test_incorrect_y_size(self):
# check: PlotlyError if y is the wrong size
kwargs = {"z": [[1, 2], [1, 2]], "y": [1, 2, 3]}
self.assertRaises(PlotlyError, ff.create_annotated_heatmap, **kwargs)
def test_simple_annotated_heatmap(self):
# we should be able to create a heatmap with annotated values with a
# logical text color
z = [[1, 0, 0.5], [0.25, 0.75, 0.45]]
a_heat = ff.create_annotated_heatmap(z)
expected_a_heat = {
"data": [
{
"colorscale": [
[0.0, "#0d0887"],
[0.1111111111111111, "#46039f"],
[0.2222222222222222, "#7201a8"],
[0.3333333333333333, "#9c179e"],
[0.4444444444444444, "#bd3786"],
[0.5555555555555556, "#d8576b"],
[0.6666666666666666, "#ed7953"],
[0.7777777777777778, "#fb9f3a"],
[0.8888888888888888, "#fdca26"],
[1.0, "#f0f921"],
],
"showscale": False,
"reversescale": False,
"type": "heatmap",
"z": [[1, 0, 0.5], [0.25, 0.75, 0.45]],
}
],
"layout": {
"annotations": [
{
"font": {"color": "#000000"},
"showarrow": False,
"text": "1",
"x": 0,
"xref": "x",
"y": 0,
"yref": "y",
},
{
"font": {"color": "#FFFFFF"},
"showarrow": False,
"text": "0",
"x": 1,
"xref": "x",
"y": 0,
"yref": "y",
},
{
"font": {"color": "#000000"},
"showarrow": False,
"text": "0.5",
"x": 2,
"xref": "x",
"y": 0,
"yref": "y",
},
{
"font": {"color": "#FFFFFF"},
"showarrow": False,
"text": "0.25",
"x": 0,
"xref": "x",
"y": 1,
"yref": "y",
},
{
"font": {"color": "#000000"},
"showarrow": False,
"text": "0.75",
"x": 1,
"xref": "x",
"y": 1,
"yref": "y",
},
{
"font": {"color": "#FFFFFF"},
"showarrow": False,
"text": "0.45",
"x": 2,
"xref": "x",
"y": 1,
"yref": "y",
},
],
"xaxis": {
"gridcolor": "rgb(0, 0, 0)",
"showticklabels": False,
"side": "top",
"ticks": "",
},
"yaxis": {"showticklabels": False, "ticks": "", "ticksuffix": " "},
},
}
self.assert_fig_equal(a_heat["data"][0], expected_a_heat["data"][0])
self.assert_fig_equal(a_heat["layout"], expected_a_heat["layout"])
def test_annotated_heatmap_kwargs(self):
# we should be able to create an annotated heatmap with x and y axes
# labels, a defined colorscale, and supplied text.
z = [[1, 0], [0.25, 0.75], [0.45, 0.5]]
text = [["first", "second"], ["third", "fourth"], ["fifth", "sixth"]]
a = ff.create_annotated_heatmap(
z,
x=["A", "B"],
y=["One", "Two", "Three"],
annotation_text=text,
colorscale=[[0, "rgb(255,255,255)"], [1, "#e6005a"]],
)
expected_a = {
"data": [
{
"colorscale": [[0, "rgb(255,255,255)"], [1, "#e6005a"]],
"showscale": False,
"reversescale": False,
"type": "heatmap",
"x": ["A", "B"],
"y": ["One", "Two", "Three"],
"z": [[1, 0], [0.25, 0.75], [0.45, 0.5]],
}
],
"layout": {
"annotations": [
{
"font": {"color": "#FFFFFF"},
"showarrow": False,
"text": "first",
"x": "A",
"xref": "x",
"y": "One",
"yref": "y",
},
{
"font": {"color": "#000000"},
"showarrow": False,
"text": "second",
"x": "B",
"xref": "x",
"y": "One",
"yref": "y",
},
{
"font": {"color": "#000000"},
"showarrow": False,
"text": "third",
"x": "A",
"xref": "x",
"y": "Two",
"yref": "y",
},
{
"font": {"color": "#FFFFFF"},
"showarrow": False,
"text": "fourth",
"x": "B",
"xref": "x",
"y": "Two",
"yref": "y",
},
{
"font": {"color": "#000000"},
"showarrow": False,
"text": "fifth",
"x": "A",
"xref": "x",
"y": "Three",
"yref": "y",
},
{
"font": {"color": "#FFFFFF"},
"showarrow": False,
"text": "sixth",
"x": "B",
"xref": "x",
"y": "Three",
"yref": "y",
},
],
"xaxis": {
"dtick": 1,
"gridcolor": "rgb(0, 0, 0)",
"side": "top",
"ticks": "",
},
"yaxis": {"dtick": 1, "ticks": "", "ticksuffix": " "},
},
}
self.assert_fig_equal(a["data"][0], expected_a["data"][0])
self.assert_fig_equal(a["layout"], expected_a["layout"])
def test_annotated_heatmap_reversescale(self):
# we should be able to create an annotated heatmap with x and y axes
# labels, a defined colorscale, and supplied text.
z = [[1, 0], [0.25, 0.75], [0.45, 0.5]]
text = [["first", "second"], ["third", "fourth"], ["fifth", "sixth"]]
a = ff.create_annotated_heatmap(
z,
x=["A", "B"],
y=["One", "Two", "Three"],
annotation_text=text,
reversescale=True,
colorscale=[[0, "rgb(255,255,255)"], [1, "#e6005a"]],
)
expected_a = {
"data": [
{
"colorscale": [[0, "rgb(255,255,255)"], [1, "#e6005a"]],
"showscale": False,
"reversescale": True,
"type": "heatmap",
"x": ["A", "B"],
"y": ["One", "Two", "Three"],
"z": [[1, 0], [0.25, 0.75], [0.45, 0.5]],
}
],
"layout": {
"annotations": [
{
"font": {"color": "#000000"},
"showarrow": False,
"text": "first",
"x": "A",
"xref": "x",
"y": "One",
"yref": "y",
},
{
"font": {"color": "#FFFFFF"},
"showarrow": False,
"text": "second",
"x": "B",
"xref": "x",
"y": "One",
"yref": "y",
},
{
"font": {"color": "#FFFFFF"},
"showarrow": False,
"text": "third",
"x": "A",
"xref": "x",
"y": "Two",
"yref": "y",
},
{
"font": {"color": "#000000"},
"showarrow": False,
"text": "fourth",
"x": "B",
"xref": "x",
"y": "Two",
"yref": "y",
},
{
"font": {"color": "#FFFFFF"},
"showarrow": False,
"text": "fifth",
"x": "A",
"xref": "x",
"y": "Three",
"yref": "y",
},
{
"font": {"color": "#000000"},
"showarrow": False,
"text": "sixth",
"x": "B",
"xref": "x",
"y": "Three",
"yref": "y",
},
],
"xaxis": {
"dtick": 1,
"gridcolor": "rgb(0, 0, 0)",
"side": "top",
"ticks": "",
},
"yaxis": {"dtick": 1, "ticks": "", "ticksuffix": " "},
},
}
self.assert_fig_equal(a["data"][0], expected_a["data"][0])
self.assert_fig_equal(a["layout"], expected_a["layout"])
def test_bug_1300(self):
# https://github.com/plotly/plotly.py/issues/1300
sub_z = [[0.1, 0.0, 0.0], [0.0, 1.0, 0.1]]
# sub_z = sub_z.tolist()
# Standard scale direction
fig = ff.create_annotated_heatmap(
sub_z, colorscale="Greens", showscale=True, reversescale=True
)
expected = graph_objs.Figure(
{
"data": [
{
"colorscale": [
[0.0, "rgb(247,252,245)"],
[0.125, "rgb(229,245,224)"],
[0.25, "rgb(199,233,192)"],
[0.375, "rgb(161,217,155)"],
[0.5, "rgb(116,196,118)"],
[0.625, "rgb(65,171,93)"],
[0.75, "rgb(35,139,69)"],
[0.875, "rgb(0,109,44)"],
[1.0, "rgb(0,68,27)"],
],
"reversescale": True,
"showscale": True,
"type": "heatmap",
"z": [[0.1, 0.0, 0.0], [0.0, 1.0, 0.1]],
}
],
"layout": {
"annotations": [
{
"font": {"color": "#FFFFFF"},
"showarrow": False,
"text": "0.1",
"x": 0,
"xref": "x",
"y": 0,
"yref": "y",
},
{
"font": {"color": "#FFFFFF"},
"showarrow": False,
"text": "0.0",
"x": 1,
"xref": "x",
"y": 0,
"yref": "y",
},
{
"font": {"color": "#FFFFFF"},
"showarrow": False,
"text": "0.0",
"x": 2,
"xref": "x",
"y": 0,
"yref": "y",
},
{
"font": {"color": "#FFFFFF"},
"showarrow": False,
"text": "0.0",
"x": 0,
"xref": "x",
"y": 1,
"yref": "y",
},
{
"font": {"color": "#000000"},
"showarrow": False,
"text": "1.0",
"x": 1,
"xref": "x",
"y": 1,
"yref": "y",
},
{
"font": {"color": "#FFFFFF"},
"showarrow": False,
"text": "0.1",
"x": 2,
"xref": "x",
"y": 1,
"yref": "y",
},
],
"xaxis": {
"gridcolor": "rgb(0, 0, 0)",
"showticklabels": False,
"side": "top",
"ticks": "",
},
"yaxis": {"showticklabels": False, "ticks": "", "ticksuffix": " "},
},
}
)
# Remove uids
for trace in fig.data:
trace.update(uid=None)
for trace in expected.data:
trace.update(uid=None)
# Perform comparison
self.assert_fig_equal(fig, expected)
|
TestAnnotatedHeatmap
|
python
|
doocs__leetcode
|
solution/1500-1599/1507.Reformat Date/Solution.py
|
{
"start": 0,
"end": 282
}
|
class ____:
def reformatDate(self, date: str) -> str:
s = date.split()
s.reverse()
months = " JanFebMarAprMayJunJulAugSepOctNovDec"
s[1] = str(months.index(s[1]) // 3 + 1).zfill(2)
s[2] = s[2][:-2].zfill(2)
return "-".join(s)
|
Solution
|
python
|
apache__airflow
|
airflow-core/tests/unit/serialization/test_serde.py
|
{
"start": 5376,
"end": 5444
}
|
class ____:
__version__: ClassVar[int] = 2
x: int
@dataclass
|
W
|
python
|
geekcomputers__Python
|
insta_monitering/insta_datafetcher.py
|
{
"start": 427,
"end": 2759
}
|
class ____(object):
def __init__(self):
filename = os.getcwd() + "/" + "ipList.txt"
with open(filename, "r") as f:
ipdata = f.read()
self._IP = random.choice(ipdata.split(","))
def __call__(self, function_to_call_for_appling_proxy):
SOCKS5_PROXY_HOST = self._IP
# default_socket = socket.socket
socks.set_default_proxy(
socks.SOCKS5,
SOCKS5_PROXY_HOST,
config.SOCKS5_PROXY_PORT,
True,
config.auth,
config.passcode,
)
socket.socket = socks.socksocket
def wrapper_function(url):
# this is used for applyting socks5 proxy over the request
return function_to_call_for_appling_proxy(url)
socks.set_default_proxy()
return wrapper_function
async def dataprocess(htmldata):
bs4obj = bs4.BeautifulSoup(htmldata, "html.parser")
scriptsdata = bs4obj.findAll("script", {"type": "text/javascript"})
datatext = ""
for i in scriptsdata:
datatext = i.text
if "window._sharedData =" in datatext:
break
datajson = re.findall("{(.*)}", datatext)
datajson = "{" + datajson[0] + "}"
datadict = ujson.loads(datajson)
maindict = {}
datadict = datadict["entry_data"]["PostPage"][0]["graphql"]["shortcode_media"]
tofind = ["owner", "location"]
for i in tofind:
try:
maindict[i] = datadict[i]
except Exception as e:
print(e)
pass
return maindict
async def datapullpost(future, url):
while True:
@PorxyApplyingDecorator()
async def request_pull(url):
data = None
print(url)
urllib3.disable_warnings()
user_agent = {"User-agent": "Mozilla/17.0"}
try:
data = requests.get(
url=url, headers=user_agent, timeout=10, verify=False
).text
except Exception as e:
print(e)
data = None
finally:
return data
data = await request_pull(url)
if data != None:
break
data = await dataprocess(htmldata=data)
# here processing of data has to occur
future.set_result(data)
|
PorxyApplyingDecorator
|
python
|
scipy__scipy
|
scipy/stats/tests/test_morestats.py
|
{
"start": 93533,
"end": 100055
}
|
class ____:
@pytest.mark.parametrize("dtype", ["float32", "float64"])
def test_basic(self, dtype, xp):
dt = getattr(xp, dtype)
x = stats.norm.rvs(size=10000, loc=10, random_state=54321)
lmbda = 1
llf = stats.boxcox_llf(lmbda, xp.asarray(x, dtype=dt))
llf_expected = -x.size / 2. * np.log(np.sum(x.std()**2))
xp_assert_close(llf, xp.asarray(llf_expected, dtype=dt))
@skip_xp_backends(np_only=True,
reason='array-likes only accepted for NumPy backend.')
def test_array_like(self, xp):
x = stats.norm.rvs(size=100, loc=10, random_state=54321)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, list(x))
xp_assert_close(llf, llf2, rtol=1e-12)
def test_2d_input(self, xp):
# Note: boxcox_llf() was already working with 2-D input (sort of), so
# keep it like that. boxcox() doesn't work with 2-D input though, due
# to brent() returning a scalar.
x = stats.norm.rvs(size=100, loc=10, random_state=54321)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T)
xp_assert_close(xp.asarray([llf, llf]), xp.asarray(llf2), rtol=1e-12)
def test_empty(self, xp):
message = "One or more sample arguments is too small..."
with eager_warns(SmallSampleWarning, match=message, xp=xp):
assert xp.isnan(xp.asarray(stats.boxcox_llf(1, xp.asarray([]))))
def test_gh_6873(self, xp):
# Regression test for gh-6873.
# This example was taken from gh-7534, a duplicate of gh-6873.
data = xp.asarray([198.0, 233.0, 233.0, 392.0])
llf = stats.boxcox_llf(-8, data)
# The expected value was computed with mpmath.
xp_assert_close(llf, xp.asarray(-17.93934208579061))
def test_instability_gh20021(self, xp):
data = xp.asarray([2003, 1950, 1997, 2000, 2009], dtype=xp.float64)
llf = stats.boxcox_llf(1e-8, data)
# The expected value was computed with mpsci, set mpmath.mp.dps=100
# expect float64 output for integer input
xp_assert_close(llf, xp.asarray(-15.32401272869016598, dtype=xp.float64),
rtol=5e-7) # bumped tolerance from 1e-7 for Accelerate
def test_axis(self, xp):
data = xp.asarray([[100, 200], [300, 400]])
llf_axis_0 = stats.boxcox_llf(1, data, axis=0)
llf_0 = xp.stack([
stats.boxcox_llf(1, data[:, 0]),
stats.boxcox_llf(1, data[:, 1]),
])
xp_assert_close(llf_axis_0, llf_0)
llf_axis_1 = stats.boxcox_llf(1, data, axis=1)
llf_1 = xp.stack([
stats.boxcox_llf(1, data[0, :]),
stats.boxcox_llf(1, data[1, :]),
])
xp_assert_close(llf_axis_1, llf_1)
# This is the data from GitHub user Qukaiyi, given as an example
# of a data set that caused boxcox to fail.
_boxcox_data = [
15957, 112079, 1039553, 711775, 173111, 307382, 183155, 53366, 760875,
207500, 160045, 473714, 40194, 440319, 133261, 265444, 155590, 36660,
904939, 55108, 138391, 339146, 458053, 63324, 1377727, 1342632, 41575,
68685, 172755, 63323, 368161, 199695, 538214, 167760, 388610, 398855,
1001873, 364591, 1320518, 194060, 194324, 2318551, 196114, 64225, 272000,
198668, 123585, 86420, 1925556, 695798, 88664, 46199, 759135, 28051,
345094, 1977752, 51778, 82746, 638126, 2560910, 45830, 140576, 1603787,
57371, 548730, 5343629, 2298913, 998813, 2156812, 423966, 68350, 145237,
131935, 1600305, 342359, 111398, 1409144, 281007, 60314, 242004, 113418,
246211, 61940, 95858, 957805, 40909, 307955, 174159, 124278, 241193,
872614, 304180, 146719, 64361, 87478, 509360, 167169, 933479, 620561,
483333, 97416, 143518, 286905, 597837, 2556043, 89065, 69944, 196858,
88883, 49379, 916265, 1527392, 626954, 54415, 89013, 2883386, 106096,
402697, 45578, 349852, 140379, 34648, 757343, 1305442, 2054757, 121232,
606048, 101492, 51426, 1820833, 83412, 136349, 1379924, 505977, 1303486,
95853, 146451, 285422, 2205423, 259020, 45864, 684547, 182014, 784334,
174793, 563068, 170745, 1195531, 63337, 71833, 199978, 2330904, 227335,
898280, 75294, 2011361, 116771, 157489, 807147, 1321443, 1148635, 2456524,
81839, 1228251, 97488, 1051892, 75397, 3009923, 2732230, 90923, 39735,
132433, 225033, 337555, 1204092, 686588, 1062402, 40362, 1361829, 1497217,
150074, 551459, 2019128, 39581, 45349, 1117187, 87845, 1877288, 164448,
10338362, 24942, 64737, 769946, 2469124, 2366997, 259124, 2667585, 29175,
56250, 74450, 96697, 5920978, 838375, 225914, 119494, 206004, 430907,
244083, 219495, 322239, 407426, 618748, 2087536, 2242124, 4736149, 124624,
406305, 240921, 2675273, 4425340, 821457, 578467, 28040, 348943, 48795,
145531, 52110, 1645730, 1768364, 348363, 85042, 2673847, 81935, 169075,
367733, 135474, 383327, 1207018, 93481, 5934183, 352190, 636533, 145870,
55659, 146215, 73191, 248681, 376907, 1606620, 169381, 81164, 246390,
236093, 885778, 335969, 49266, 381430, 307437, 350077, 34346, 49340,
84715, 527120, 40163, 46898, 4609439, 617038, 2239574, 159905, 118337,
120357, 430778, 3799158, 3516745, 54198, 2970796, 729239, 97848, 6317375,
887345, 58198, 88111, 867595, 210136, 1572103, 1420760, 574046, 845988,
509743, 397927, 1119016, 189955, 3883644, 291051, 126467, 1239907, 2556229,
411058, 657444, 2025234, 1211368, 93151, 577594, 4842264, 1531713, 305084,
479251, 20591, 1466166, 137417, 897756, 594767, 3606337, 32844, 82426,
1294831, 57174, 290167, 322066, 813146, 5671804, 4425684, 895607, 450598,
1048958, 232844, 56871, 46113, 70366, 701618, 97739, 157113, 865047,
194810, 1501615, 1765727, 38125, 2733376, 40642, 437590, 127337, 106310,
4167579, 665303, 809250, 1210317, 45750, 1853687, 348954, 156786, 90793,
1885504, 281501, 3902273, 359546, 797540, 623508, 3672775, 55330, 648221,
266831, 90030, 7118372, 735521, 1009925, 283901, 806005, 2434897, 94321,
309571, 4213597, 2213280, 120339, 64403, 8155209, 1686948, 4327743,
1868312, 135670, 3189615, 1569446, 706058, 58056, 2438625, 520619, 105201,
141961, 179990, 1351440, 3148662, 2804457, 2760144, 70775, 33807, 1926518,
2362142, 186761, 240941, 97860, 1040429, 1431035, 78892, 484039, 57845,
724126, 3166209, 175913, 159211, 1182095, 86734, 1921472, 513546, 326016,
1891609
]
|
TestBoxcox_llf
|
python
|
jina-ai__jina
|
jina/clients/mixin.py
|
{
"start": 6217,
"end": 6886
}
|
class ____:
"""The Profile Mixin for Client and Flow to expose `profile` API"""
def profiling(self, show_table: bool = True) -> Dict[str, float]:
"""Profiling a single query's roundtrip including network and computation latency. Results is summarized in a Dict.
:param show_table: whether to show the table or not.
:return: the latency report in a dict.
"""
from docarray import Document
st = time.perf_counter()
r = self.client.post(on='/', inputs=Document(), return_responses=True)
ed = time.perf_counter()
return _render_response_table(r[0], st, ed, show_table=show_table)
|
ProfileMixin
|
python
|
django__django
|
tests/queries/tests.py
|
{
"start": 178537,
"end": 178889
}
|
class ____(TestCase):
def test_filter_rejects_invalid_arguments(self):
school = School.objects.create()
msg = "The following kwargs are invalid: '_connector', '_negated'"
with self.assertRaisesMessage(TypeError, msg):
School.objects.filter(pk=school.pk, _negated=True, _connector="evil")
|
TestInvalidFilterArguments
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/resources.py
|
{
"start": 701,
"end": 851
}
|
class ____(graphene.Enum):
VALUE = "VALUE"
ENV_VAR = "ENV_VAR"
class Meta:
name = "ConfiguredValueType"
|
GrapheneConfiguredValueType
|
python
|
apache__airflow
|
providers/openlineage/tests/unit/openlineage/dags/test_openlineage_execution.py
|
{
"start": 1089,
"end": 2783
}
|
class ____(BaseOperator):
def __init__(self, *, stall_amount=0, fail=False, **kwargs) -> None:
super().__init__(**kwargs)
self.stall_amount = stall_amount
self.fail = fail
def execute(self, context):
self.log.error("STALL AMOUNT %s", self.stall_amount)
time.sleep(1)
if self.fail:
raise Exception("Failed")
def get_openlineage_facets_on_start(self):
return OperatorLineage(inputs=[Dataset(namespace="test", name="on-start")])
def get_openlineage_facets_on_complete(self, task_instance):
self.log.error("STALL AMOUNT %s", self.stall_amount)
time.sleep(self.stall_amount)
return OperatorLineage(inputs=[Dataset(namespace="test", name="on-complete")])
def get_openlineage_facets_on_failure(self, task_instance):
self.log.error("STALL AMOUNT %s", self.stall_amount)
time.sleep(self.stall_amount)
return OperatorLineage(inputs=[Dataset(namespace="test", name="on-failure")])
with DAG(
dag_id="test_openlineage_execution",
default_args={"owner": "airflow", "retries": 3, "start_date": datetime.datetime(2022, 1, 1)},
schedule="0 0 * * *",
dagrun_timeout=datetime.timedelta(minutes=60),
):
no_stall = OpenLineageExecutionOperator(task_id="execute_no_stall")
short_stall = OpenLineageExecutionOperator(task_id="execute_short_stall", stall_amount=5)
mid_stall = OpenLineageExecutionOperator(task_id="execute_mid_stall", stall_amount=15)
long_stall = OpenLineageExecutionOperator(task_id="execute_long_stall", stall_amount=30)
fail = OpenLineageExecutionOperator(task_id="execute_fail", fail=True)
|
OpenLineageExecutionOperator
|
python
|
getsentry__sentry
|
tests/sentry/rules/conditions/test_existing_high_priority_issue.py
|
{
"start": 312,
"end": 1467
}
|
class ____(RuleTestCase):
rule_cls = ExistingHighPriorityIssueCondition
def setUp(self) -> None:
self.rule = Rule(environment_id=1, project=self.project, label="label")
def test_applies_correctly(self) -> None:
rule = self.get_rule(rule=self.rule)
# This will never pass for non-new or non-escalating issuesalways pass
self.event.group.update(priority=PriorityLevel.HIGH)
self.assertPasses(rule, is_new=False, has_reappeared=True, has_escalated=True)
self.assertPasses(rule, is_new=False, has_reappeared=False, has_escalated=True)
# This will never pass
self.assertDoesNotPass(rule, is_new=True, has_reappeared=False, has_escalated=False)
self.assertDoesNotPass(rule, is_new=True, has_reappeared=True, has_escalated=False)
self.event.group.update(priority=PriorityLevel.LOW)
self.assertDoesNotPass(rule, is_new=False, has_reappeared=True, has_escalated=False)
self.event.group.update(priority=PriorityLevel.MEDIUM)
self.assertDoesNotPass(rule, is_new=False, has_reappeared=True, has_escalated=True)
|
ExistingHighPriorityIssueConditionTest
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typeAlias4.py
|
{
"start": 216,
"end": 1553
}
|
class ____:
pass
not_a_type = "ClassA"
def requires_string(a: str):
pass
requires_string(not_a_type)
# This should generate an error because type2 should
# not be interpreted as a string.
requires_string(type2)
# This should generate an error because the symbol
# is later declared as a TypeAlias.
my_type3 = int
# This should generate an error because it is obscured
# by another type alias declaration.
my_type3: "TA" = Union[int, str]
# This should generate an error because the symbol
# was previously declared as a TypeAlias.
my_type3: TA = int
# This should generate an error because the expression
# on the RHS evaluates to an object, not a class.
my_type4: TA = 3
# This should generate an error because the expression
# on the RHS evaluates to an object, not a class.
my_type5: TA = True
# This should generate an error because the expression
# on the RHS evaluates to an object, not a class.
my_type7: TA = list()
# Verify that variables with declarations (other than explicit TypeAlias)
# are not treated as a type alias.
SimpleAlias = int
ExplicitAlias: TA = int
SimpleNonAlias: Type[int] = int
if sys.version_info > (3, 9):
reveal_type(SimpleAlias, expected_text="type[int]")
reveal_type(ExplicitAlias, expected_text="type[int]")
reveal_type(SimpleNonAlias, expected_text="type[int]")
|
ClassA
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-shopify/source_shopify/shopify_graphql/bulk/query.py
|
{
"start": 78400,
"end": 88742
}
|
class ____(ShopifyBulkQuery):
"""
{
products(query: "updated_at:>='2020-01-20T00:00:00+00:00' AND updated_at:<'2024-04-25T00:00:00+00:00'", sortKey:UPDATED_AT) {
edges {
node {
__typename
id
publishedAt
createdAt
status
vendor
updatedAt
bodyHtml
productType
tags
options {
__typename
id
values
position
}
handle
images {
edges {
node {
__typename
id
}
}
}
templateSuffix
title
variants {
edges {
node {
__typename
id
}
}
}
description
descriptionHtml
isGiftCard
legacyResourceId
media_count: mediaCount {
media_count: count
}
onlineStorePreviewUrl
onlineStoreUrl
totalInventory
tracksInventory
total_variants: variantsCount {
total_variants: count
}
hasOnlyDefaultVariant
hasOutOfStockVariants
requiresSellingPlan
priceRangeV2 {
maxVariantPrice {
amount
currencyCode
}
minVariantPrice {
amount
currencyCode
}
}
featuredImage {
altText
height
id
url
width
}
seo {
description
title
}
featuredMedia {
alt
id
mediaContentType
status
preview {
image {
id
altText
}
status
}
mediaErrors {
code
details
message
}
mediaWarnings {
code
message
}
}
feedback {
details {
app {
id
}
link {
url
}
messages {
field
message
}
}
summary
}
}
}
}
}
"""
query_name = "products"
sort_key = "UPDATED_AT"
# images property fields
images_fields: List[Field] = [Field(name="edges", fields=[Field(name="node", fields=["__typename", "id"])])]
# variants property fields, we re-use the same field names as for the `images` property
variants_fields: List[Field] = images_fields
amount_fields: List[Field] = [
"amount",
Field(name="currencyCode", alias="currency_code"),
]
price_range_v2_fields: List[Field] = [
Field(name="maxVariantPrice", alias="max_variant_price", fields=amount_fields),
Field(name="minVariantPrice", alias="min_variant_price", fields=amount_fields),
]
featured_image_fields: List[Field] = [
"height",
"id",
"url",
"width",
Field(name="altText", alias="alt_text"),
]
featured_media_fields: List[Field] = [
"alt",
"id",
"status",
Field(name="mediaContentType", alias="media_content_type"),
Field(name="preview", fields=["status", Field(name="image", fields=["id", Field(name="altText", alias="alt_text")])]),
Field(name="mediaErrors", alias="media_errors", fields=["code", "details", "message"]),
Field(name="mediaWarnings", alias="media_warnings", fields=["code", "message"]),
]
feedback_details_fields: List[Field] = [
Field(name="app", fields=["id"]),
Field(name="link", fields=["url"]),
Field(name="messages", fields=["field", "message"]),
]
feedback_fields: List[Field] = [
"summary",
Field(name="details", fields=feedback_details_fields),
]
# main query
query_nodes: List[Field] = [
"__typename",
"id",
"publishedAt",
"createdAt",
"status",
"vendor",
"updatedAt",
"bodyHtml",
"productType",
"tags",
"handle",
"templateSuffix",
"title",
"description",
"descriptionHtml",
"isGiftCard",
"legacyResourceId",
"onlineStorePreviewUrl",
"onlineStoreUrl",
"totalInventory",
"tracksInventory",
"hasOnlyDefaultVariant",
"hasOutOfStockVariants",
"requiresSellingPlan",
Field(name="priceRangeV2", fields=price_range_v2_fields),
Field(name="featuredImage", fields=featured_image_fields),
Field(name="seo", fields=["description", "title"]),
Field(name="featuredMedia", fields=featured_media_fields),
Field(name="feedback", fields=feedback_fields),
Field(name="variantsCount", alias="total_variants", fields=[Field(name="count", alias="total_variants")]),
Field(name="mediaCount", alias="media_count", fields=[Field(name="count", alias="media_count")]),
Field(name="options", fields=["id", "name", "values", "position"]),
Field(name="images", fields=images_fields),
Field(name="variants", fields=variants_fields),
]
record_composition = {
"new_record": "Product",
# each product could have `Image` and `ProductVariant` associated with the product
"record_components": ["Image", "ProductVariant"],
}
def _process_component(self, entity: List[dict]) -> List[dict]:
for item in entity:
# remove the `__parentId` from the object
if BULK_PARENT_KEY in item:
item.pop(BULK_PARENT_KEY)
# resolve the id from string
item["id"] = self.tools.resolve_str_id(item.get("id"))
return entity
def _process_options(self, options: List[dict], product_id: Optional[int] = None) -> List[dict]:
for option in options:
# add product_id to each option
option["product_id"] = product_id if product_id else None
return options
def _unnest_tags(self, record: MutableMapping[str, Any]) -> Optional[str]:
# we keep supporting 1 tag only, as it was for the REST stream,
# to avoid breaking change.
tags = record.get("tags", [])
return ", ".join(tags) if tags else None
def _process_price_range_v2(self, price_range_v2: MutableMapping[str, Any]) -> MutableMapping[str, Any]:
max_variant_price = price_range_v2.get("max_variant_price", {})
min_variant_price = price_range_v2.get("min_variant_price", {})
# cast the `amount` for each of the min/max object
if max_variant_price:
price_range_v2["max_variant_price"]["amount"] = float(max_variant_price.get("amount"))
if min_variant_price:
price_range_v2["min_variant_price"]["amount"] = float(min_variant_price.get("amount"))
return price_range_v2
def record_process_components(self, record: MutableMapping[str, Any]) -> Iterable[MutableMapping[str, Any]]:
"""
Defines how to process collected components.
"""
# get the joined record components collected for the record
record_components = record.get("record_components", {})
# process record components
if record_components:
record["images"] = self._process_component(record_components.get("Image", []))
record["variants"] = self._process_component(record_components.get("ProductVariant", []))
record["options"] = self._process_component(record.get("options", []))
# add the product_id to the `options`
product_id = record.get("id")
record["options"] = self._process_options(record.get("options", []), product_id)
record.pop("record_components")
# unnest the `tags` (the list of 1)
record["tags"] = self._unnest_tags(record)
# unnest `total_variants`
record["total_variants"] = record.get("total_variants", {}).get("total_variants")
# unnest `media_count`
record["media_count"] = record.get("media_count", {}).get("media_count")
# convert dates from ISO-8601 to RFC-3339
record["published_at"] = self.tools.from_iso8601_to_rfc3339(record, "publishedAt")
record["updatedAt"] = self.tools.from_iso8601_to_rfc3339(record, "updatedAt")
record["createdAt"] = self.tools.from_iso8601_to_rfc3339(record, "createdAt")
# process `price_range_v2`
price_range_v2 = record.get("priceRangeV2", {})
if price_range_v2:
record["priceRangeV2"] = self._process_price_range_v2(price_range_v2)
yield record
|
Product
|
python
|
django__django
|
tests/admin_widgets/models.py
|
{
"start": 5630,
"end": 5796
}
|
class ____(models.Model):
user = models.ForeignKey("auth.User", models.CASCADE, to_field="username")
def __str__(self):
return self.user.username
|
Profile
|
python
|
numpy__numpy
|
numpy/distutils/fujitsuccompiler.py
|
{
"start": 51,
"end": 834
}
|
class ____(UnixCCompiler):
"""
Fujitsu compiler.
"""
compiler_type = 'fujitsu'
cc_exe = 'fcc'
cxx_exe = 'FCC'
def __init__(self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__(self, verbose, dry_run, force)
cc_compiler = self.cc_exe
cxx_compiler = self.cxx_exe
self.set_executables(
compiler=cc_compiler +
' -O3 -Nclang -fPIC',
compiler_so=cc_compiler +
' -O3 -Nclang -fPIC',
compiler_cxx=cxx_compiler +
' -O3 -Nclang -fPIC',
linker_exe=cc_compiler +
' -lfj90i -lfj90f -lfjsrcinfo -lelf -shared',
linker_so=cc_compiler +
' -lfj90i -lfj90f -lfjsrcinfo -lelf -shared'
)
|
FujitsuCCompiler
|
python
|
kamyu104__LeetCode-Solutions
|
Python/match-substring-after-replacement.py
|
{
"start": 113,
"end": 953
}
|
class ____(object):
def matchReplacement(self, s, sub, mappings):
"""
:type s: str
:type sub: str
:type mappings: List[List[str]]
:rtype: bool
"""
def transform(x):
return ord(x)-ord('0') if x.isdigit() else ord(x)-ord('a')+10 if x.islower() else ord(x)-ord('A')+36
def check(i):
return all(sub[j] == s[i+j] or lookup[sub[j]][s[i+j]] for j in xrange(len(sub)))
lookup = [[0]*62 for _ in xrange(62)]
for a, b in mappings:
lookup[transform(a)][transform(b)] = 1
s = map(transform, s)
sub = map(transform, sub)
return any(check(i) for i in xrange(len(s)-len(sub)+1))
# Time: O(n * k), n = len(s), k = len(sub)
# Space: O(m), m = len(mappings)
import collections
# brute force
|
Solution
|
python
|
python-markdown__markdown
|
markdown/util.py
|
{
"start": 6590,
"end": 7073
}
|
class ____:
""" The base class for all processors.
Attributes:
Processor.md: The `Markdown` instance passed in an initialization.
Arguments:
md: The `Markdown` instance this processor is a part of.
"""
def __init__(self, md: Markdown | None = None):
self.md = md
if TYPE_CHECKING: # pragma: no cover
class TagData(TypedDict):
tag: str
attrs: dict[str, str]
left_index: int
right_index: int
|
Processor
|
python
|
sympy__sympy
|
sympy/solvers/ode/single.py
|
{
"start": 1755,
"end": 7133
}
|
class ____:
"""Represents an ordinary differential equation (ODE)
This class is used internally in the by dsolve and related
functions/classes so that properties of an ODE can be computed
efficiently.
Examples
========
This class is used internally by dsolve. To instantiate an instance
directly first define an ODE problem:
>>> from sympy import Function, Symbol
>>> x = Symbol('x')
>>> f = Function('f')
>>> eq = f(x).diff(x, 2)
Now you can create a SingleODEProblem instance and query its properties:
>>> from sympy.solvers.ode.single import SingleODEProblem
>>> problem = SingleODEProblem(f(x).diff(x), f(x), x)
>>> problem.eq
Derivative(f(x), x)
>>> problem.func
f(x)
>>> problem.sym
x
"""
# Instance attributes:
eq: Expr
func: AppliedUndef
sym: Symbol
_order: int
_eq_expanded: Expr
_eq_preprocessed: Expr
_eq_high_order_free = None
def __init__(self, eq: Expr, func: AppliedUndef, sym: Symbol, prep: bool = True, **kwargs):
self.eq = eq
self.func = func
self.sym = sym
self.prep = prep
self.params = kwargs
@cached_property
def order(self) -> int:
return ode_order(self.eq, self.func)
@cached_property
def eq_preprocessed(self) -> Expr:
return self._get_eq_preprocessed()
@cached_property
def eq_high_order_free(self) -> Expr:
a = Wild('a', exclude=[self.func])
c1 = Wild('c1', exclude=[self.sym])
# Precondition to try remove f(x) from highest order derivative
reduced_eq = None
if self.eq.is_Add:
deriv_coef = self.eq.coeff(self.func.diff(self.sym, self.order))
if deriv_coef not in (1, 0):
r = deriv_coef.match(a*self.func**c1)
if r and r[c1]:
den = self.func**r[c1]
reduced_eq = Add(*[arg/den for arg in self.eq.args])
if reduced_eq is None:
reduced_eq = expand(self.eq)
return reduced_eq
@cached_property
def eq_expanded(self) -> Expr:
return expand(self.eq_preprocessed)
def _get_eq_preprocessed(self) -> Expr:
if self.prep:
process_eq, process_func = _preprocess(self.eq, self.func)
if process_func != self.func:
raise ValueError
else:
process_eq = self.eq
return process_eq
def get_numbered_constants(self, num=1, start=1, prefix='C') -> list[Symbol]:
"""
Returns a list of constants that do not occur
in eq already.
"""
ncs = self.iter_numbered_constants(start, prefix)
Cs = [next(ncs) for i in range(num)]
return Cs
def iter_numbered_constants(self, start=1, prefix='C') -> Iterator[Symbol]:
"""
Returns an iterator of constants that do not occur
in eq already.
"""
atom_set = self.eq.free_symbols
func_set = self.eq.atoms(Function)
if func_set:
atom_set |= {Symbol(str(f.func)) for f in func_set}
return numbered_symbols(start=start, prefix=prefix, exclude=atom_set)
@cached_property
def is_autonomous(self):
u = Dummy('u')
x = self.sym
syms = self.eq.subs(self.func, u).free_symbols
return x not in syms
def get_linear_coefficients(self, eq, func, order):
r"""
Matches a differential equation to the linear form:
.. math:: a_n(x) y^{(n)} + \cdots + a_1(x)y' + a_0(x) y + B(x) = 0
Returns a dict of order:coeff terms, where order is the order of the
derivative on each term, and coeff is the coefficient of that derivative.
The key ``-1`` holds the function `B(x)`. Returns ``None`` if the ODE is
not linear. This function assumes that ``func`` has already been checked
to be good.
Examples
========
>>> from sympy import Function, cos, sin
>>> from sympy.abc import x
>>> from sympy.solvers.ode.single import SingleODEProblem
>>> f = Function('f')
>>> eq = f(x).diff(x, 3) + 2*f(x).diff(x) + \
... x*f(x).diff(x, 2) + cos(x)*f(x).diff(x) + x - f(x) - \
... sin(x)
>>> obj = SingleODEProblem(eq, f(x), x)
>>> obj.get_linear_coefficients(eq, f(x), 3)
{-1: x - sin(x), 0: -1, 1: cos(x) + 2, 2: x, 3: 1}
>>> eq = f(x).diff(x, 3) + 2*f(x).diff(x) + \
... x*f(x).diff(x, 2) + cos(x)*f(x).diff(x) + x - f(x) - \
... sin(f(x))
>>> obj = SingleODEProblem(eq, f(x), x)
>>> obj.get_linear_coefficients(eq, f(x), 3) == None
True
"""
f = func.func
x = func.args[0]
symset = {Derivative(f(x), x, i) for i in range(order+1)}
try:
rhs, lhs_terms = _lin_eq2dict(eq, symset)
except PolyNonlinearError:
return None
if rhs.has(func) or any(c.has(func) for c in lhs_terms.values()):
return None
terms = {i: lhs_terms.get(f(x).diff(x, i), S.Zero) for i in range(order+1)}
terms[-1] = rhs
return terms
# TODO: Add methods that can be used by many ODE solvers:
# order
# is_linear()
# get_linear_coefficients()
# eq_prepared (the ODE in prepared form)
|
SingleODEProblem
|
python
|
django__django
|
django/db/models/fields/__init__.py
|
{
"start": 48710,
"end": 51352
}
|
class ____:
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_mutually_exclusive_options(),
*self._check_fix_default_value(),
]
def _check_mutually_exclusive_options(self):
# auto_now, auto_now_add, and default are mutually exclusive
# options. The use of more than one of these options together
# will trigger an Error
mutually_exclusive_options = [
self.auto_now_add,
self.auto_now,
self.has_default(),
]
enabled_options = [
option not in (None, False) for option in mutually_exclusive_options
].count(True)
if enabled_options > 1:
return [
checks.Error(
"The options auto_now, auto_now_add, and default "
"are mutually exclusive. Only one of these options "
"may be present.",
obj=self,
id="fields.E160",
)
]
else:
return []
def _check_fix_default_value(self):
return []
# Concrete subclasses use this in their implementations of
# _check_fix_default_value().
def _check_if_value_fixed(self, value, now=None):
"""
Check if the given value appears to have been provided as a "fixed"
time value, and include a warning in the returned list if it does. The
value argument must be a date object or aware/naive datetime object. If
now is provided, it must be a naive datetime object.
"""
if now is None:
now = _get_naive_now()
offset = datetime.timedelta(seconds=10)
lower = now - offset
upper = now + offset
if isinstance(value, datetime.datetime):
value = _to_naive(value)
else:
assert isinstance(value, datetime.date)
lower = lower.date()
upper = upper.date()
if lower <= value <= upper:
return [
checks.Warning(
"Fixed default value provided.",
hint=(
"It seems you set a fixed date / time / datetime "
"value as default for this field. This may not be "
"what you want. If you want to have the current date "
"as default, use `django.utils.timezone.now`"
),
obj=self,
id="fields.W161",
)
]
return []
|
DateTimeCheckMixin
|
python
|
PrefectHQ__prefect
|
tests/server/models/test_block_schemas.py
|
{
"start": 500,
"end": 11579
}
|
class ____:
async def test_create_block_schema(self, session, block_type_x):
block_schema = await models.block_schemas.create_block_schema(
session=session,
block_schema=schemas.actions.BlockSchemaCreate(
fields={
"title": "x",
"type": "object",
"properties": {
"access_key_id": {"title": "Access Key Id", "type": "string"},
"secret_access_key": {
"title": "Secret Access Key",
"type": "string",
},
"session_token": {"title": "Session Token", "type": "string"},
},
"block_type_slug": "x",
"block_schema_references": {},
},
block_type_id=block_type_x.id,
capabilities=["this block can test"],
),
)
assert block_schema.fields == {
"title": "x",
"type": "object",
"properties": {
"access_key_id": {"title": "Access Key Id", "type": "string"},
"secret_access_key": {"title": "Secret Access Key", "type": "string"},
"session_token": {"title": "Session Token", "type": "string"},
},
"block_type_slug": "x",
"block_schema_references": {},
}
assert (
block_schema.checksum
== "sha256:4448d5cf2ddb989f7fde8b2c36ec89527ca30e0e8ef041eed8bd15c11fe6cfee"
)
assert block_schema.block_type_id == block_type_x.id
db_block_schema = await models.block_schemas.read_block_schema(
session=session, block_schema_id=block_schema.id
)
assert db_block_schema.checksum == block_schema.checksum
assert db_block_schema.fields == block_schema.fields
assert db_block_schema.block_type_id == block_schema.block_type_id
assert db_block_schema.capabilities == ["this block can test"]
async def test_create_nested_block_schema(self, session, block_type_x):
class Y(Block):
a: str
b: str
class X(Block):
_block_type_id = block_type_x.id
_block_type_name = block_type_x.name
y: Y
z: str
await models.block_types.create_block_type(
session=session, block_type=Y._to_block_type()
)
await models.block_schemas.create_block_schema(
session=session, block_schema=X._to_block_schema()
)
nested_block_schema = await read_block_schema_by_checksum(
session=session,
checksum=Y._calculate_schema_checksum(),
)
assert nested_block_schema is not None
assert nested_block_schema.fields == {
"block_schema_references": {},
"block_type_slug": "y",
"properties": {
"a": {"title": "A", "type": "string"},
"b": {"title": "B", "type": "string"},
},
"required": ["a", "b"],
"title": "Y",
"type": "object",
"secret_fields": [],
}
assert nested_block_schema.fields == Y.model_json_schema()
async def test_create_multiply_nested_block_schema(self, session, block_type_x):
class A(Block):
d: str
e: str
class Z(Block):
a: A
c: int
class Y(Block):
b: str
c: int
class X(Block):
_block_type_id = block_type_x.id
_block_type_name = block_type_x.name
y: Y
z: Z
await models.block_types.create_block_type(
session=session, block_type=A._to_block_type()
)
await models.block_types.create_block_type(
session=session, block_type=Z._to_block_type()
)
await models.block_types.create_block_type(
session=session, block_type=Y._to_block_type()
)
await models.block_schemas.create_block_schema(
session=session, block_schema=X._to_block_schema()
)
block_schemas = await models.block_schemas.read_block_schemas(session=session)
assert len(block_schemas) == 4
nested_block_schema = await read_block_schema_by_checksum(
session=session,
checksum=A._calculate_schema_checksum(),
)
assert nested_block_schema is not None
assert nested_block_schema.fields == {
"block_schema_references": {},
"block_type_slug": "a",
"properties": {
"d": {"title": "D", "type": "string"},
"e": {"title": "E", "type": "string"},
},
"required": ["d", "e"],
"title": "A",
"type": "object",
"secret_fields": [],
}
assert nested_block_schema.fields == A.model_json_schema()
async def test_create_nested_block_schema_with_multiply_used_blocks(self, session):
warnings.filterwarnings("ignore", category=UserWarning)
class A(Block):
d: str
e: str
class Z(Block):
a: A
c: int
class Y(Block):
a: A
b: str
c: int
class X(Block):
y: Y
z: Z
await models.block_types.create_block_type(
session=session, block_type=A._to_block_type()
)
await models.block_types.create_block_type(
session=session, block_type=Z._to_block_type()
)
await models.block_types.create_block_type(
session=session, block_type=Y._to_block_type()
)
block_type_x = await models.block_types.create_block_type(
session=session, block_type=X._to_block_type()
)
await models.block_schemas.create_block_schema(
session=session,
block_schema=X._to_block_schema(block_type_id=block_type_x.id),
)
block_schemas = await models.block_schemas.read_block_schemas(session=session)
assert len(block_schemas) == 4
nested_block_schema_a = await read_block_schema_by_checksum(
session=session,
checksum=A._calculate_schema_checksum(),
)
assert nested_block_schema_a is not None
assert nested_block_schema_a.fields == {
"block_schema_references": {},
"block_type_slug": "a",
"properties": {
"d": {"title": "D", "type": "string"},
"e": {"title": "E", "type": "string"},
},
"required": ["d", "e"],
"title": "A",
"type": "object",
"secret_fields": [],
}
assert nested_block_schema_a.fields == A.model_json_schema()
nested_block_schema_z = (
await models.block_schemas.read_block_schema_by_checksum(
session=session, checksum=Z._calculate_schema_checksum()
)
)
assert nested_block_schema_z is not None
assert nested_block_schema_z.fields == Z.model_json_schema()
assert (
Z.model_json_schema()["block_schema_references"]["a"][
"block_schema_checksum"
]
== A._calculate_schema_checksum()
)
nested_block_schema_y = (
await models.block_schemas.read_block_schema_by_checksum(
session=session, checksum=Y._calculate_schema_checksum()
)
)
assert nested_block_schema_y is not None
assert nested_block_schema_y.fields == Y.model_json_schema()
assert (
Y.model_json_schema()["block_schema_references"]["a"][
"block_schema_checksum"
]
== A._calculate_schema_checksum()
)
async def test_create_block_schema_with_union(
self, session, block_type_x, block_type_y, block_type_z
):
class Z(Block):
_block_type_id = block_type_z.id
_block_type_name = block_type_z.name
b: str
class Y(Block):
_block_type_id = block_type_y.id
_block_type_name = block_type_y.name
a: str
class X(Block):
_block_type_id = block_type_x.id
_block_type_name = block_type_x.name
y_or_z: Union[Y, Z]
block_schema = await models.block_schemas.create_block_schema(
session=session,
block_schema=X._to_block_schema(),
)
assert block_schema.checksum == X._calculate_schema_checksum()
assert block_schema.fields == X.model_json_schema()
async def test_create_block_schema_is_idempotent(self, session, block_type_x):
first_create_response = await models.block_schemas.create_block_schema(
session=session,
block_schema=schemas.actions.BlockSchemaCreate(
fields={},
block_type_id=block_type_x.id,
),
)
# Should not raise
second_create_response = await models.block_schemas.create_block_schema(
session=session,
block_schema=schemas.actions.BlockSchemaCreate(
fields={},
block_type_id=block_type_x.id,
),
)
# Should not raise
third_create_response = await models.block_schemas.create_block_schema(
session=session,
block_schema=schemas.actions.BlockSchemaCreate(
fields={},
block_type_id=block_type_x.id,
),
)
assert (
first_create_response.id
== second_create_response.id
== third_create_response.id
)
async def test_create_block_schema_is_idempotent_for_nested_blocks(self, session):
class Child(Block):
age: int
class Parent(Block):
child: Child
parent_block_type = await models.block_types.create_block_type(
session=session, block_type=Parent._to_block_type()
)
child_block_type = await models.block_types.create_block_type(
session=session, block_type=Child._to_block_type()
)
parent_create_response = await models.block_schemas.create_block_schema(
session=session,
block_schema=Parent._to_block_schema(block_type_id=parent_block_type.id),
)
# Should not raise
child_create_response = await models.block_schemas.create_block_schema(
session=session,
block_schema=Child._to_block_schema(block_type_id=child_block_type.id),
)
assert (
parent_create_response.fields["block_schema_references"]["child"][
"block_schema_checksum"
]
== child_create_response.checksum
)
|
TestCreateBlockSchema
|
python
|
getsentry__sentry
|
tests/sentry/hybridcloud/models/test_outbox.py
|
{
"start": 1840,
"end": 5177
}
|
class ____(TestCase):
region = Region("eu", 1, "http://eu.testserver", RegionCategory.MULTI_TENANT)
region_config = (region,)
def test_skip_shards(self) -> None:
with self.options({"hybrid_cloud.authentication.disabled_user_shards": [100]}):
assert ControlOutbox(
shard_scope=OutboxScope.USER_SCOPE, shard_identifier=100
).should_skip_shard()
assert not ControlOutbox(
shard_scope=OutboxScope.USER_SCOPE, shard_identifier=101
).should_skip_shard()
assert not ControlOutbox(
shard_scope=OutboxScope.USER_SCOPE, shard_identifier=100
).should_skip_shard()
def test_control_sharding_keys(self) -> None:
with assume_test_silo_mode(SiloMode.REGION):
org = Factories.create_organization()
user1 = Factories.create_user()
user2 = Factories.create_user()
with assume_test_silo_mode(SiloMode.REGION):
expected_region_name = get_local_region().name
om = OrganizationMember.objects.create(
organization_id=org.id,
user_id=user1.id,
role=org.default_role,
)
om.outbox_for_update().drain_shard()
om = OrganizationMember.objects.create(
organization_id=org.id,
user_id=user2.id,
role=org.default_role,
)
om.outbox_for_update().drain_shard()
with outbox_context(flush=False):
for inst in User.outboxes_for_user_update(user1.id):
inst.save()
for inst in User.outboxes_for_user_update(user2.id):
inst.save()
shards = {
(row["shard_scope"], row["shard_identifier"], row["region_name"])
for row in ControlOutbox.find_scheduled_shards()
}
assert shards == {
(OutboxScope.USER_SCOPE.value, user1.id, expected_region_name),
(OutboxScope.USER_SCOPE.value, user2.id, expected_region_name),
}
def test_prepare_next_from_shard_no_conflict_with_processing(self) -> None:
with outbox_runner():
org = Factories.create_organization()
user1 = Factories.create_user()
Factories.create_member(organization_id=org.id, user_id=user1.id)
with outbox_context(flush=False):
outbox = user1.outboxes_for_update()[0]
outbox.save()
with outbox.process_shard(None) as next_shard_row:
assert next_shard_row is not None
def test_with_other_connection() -> None:
try:
assert (
ControlOutbox.prepare_next_from_shard(
{
k: getattr(next_shard_row, k)
for k in ControlOutbox.sharding_columns
}
)
is None
)
finally:
for c in connections.all():
c.close()
t = threading.Thread(target=test_with_other_connection)
t.start()
t.join()
|
ControlOutboxTest
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/dialects/mysql/types.py
|
{
"start": 5605,
"end": 6939
}
|
class ____(_FloatType, sqltypes.DOUBLE[Union[decimal.Decimal, float]]):
"""MySQL DOUBLE type."""
__visit_name__ = "DOUBLE"
def __init__(
self,
precision: Optional[int] = None,
scale: Optional[int] = None,
asdecimal: bool = True,
**kw: Any,
):
"""Construct a DOUBLE.
.. note::
The :class:`.DOUBLE` type by default converts from float
to Decimal, using a truncation that defaults to 10 digits.
Specify either ``scale=n`` or ``decimal_return_scale=n`` in order
to change this scale, or ``asdecimal=False`` to return values
directly as Python floating points.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super().__init__(
precision=precision, scale=scale, asdecimal=asdecimal, **kw
)
|
DOUBLE
|
python
|
networkx__networkx
|
networkx/algorithms/isomorphism/tests/test_vf2pp.py
|
{
"start": 4439,
"end": 21961
}
|
class ____:
def test_custom_graph1_same_labels(self):
G1 = nx.Graph()
mapped = {1: "A", 2: "B", 3: "C", 4: "D", 5: "Z", 6: "E"}
edges1 = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 6), (3, 4), (5, 1), (5, 2)]
G1.add_edges_from(edges1)
G2 = nx.relabel_nodes(G1, mapped)
nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_same))), "label")
nx.set_node_attributes(G2, dict(zip(G2, it.cycle(labels_same))), "label")
assert vf2pp_isomorphism(G1, G2, node_label="label")
# Add edge making G1 symmetrical
G1.add_edge(3, 7)
G1.nodes[7]["label"] = "blue"
assert vf2pp_isomorphism(G1, G2, node_label="label") is None
# Make G2 isomorphic to G1
G2.add_edges_from([(mapped[3], "X"), (mapped[6], mapped[5])])
G1.add_edge(4, 7)
G2.nodes["X"]["label"] = "blue"
assert vf2pp_isomorphism(G1, G2, node_label="label")
# Re-structure maintaining isomorphism
G1.remove_edges_from([(1, 4), (1, 3)])
G2.remove_edges_from([(mapped[1], mapped[5]), (mapped[1], mapped[2])])
assert vf2pp_isomorphism(G1, G2, node_label="label")
def test_custom_graph1_different_labels(self):
G1 = nx.Graph()
mapped = {1: "A", 2: "B", 3: "C", 4: "D", 5: "Z", 6: "E"}
edges1 = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 6), (3, 4), (5, 1), (5, 2)]
G1.add_edges_from(edges1)
G2 = nx.relabel_nodes(G1, mapped)
nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_many))), "label")
nx.set_node_attributes(
G2,
dict(zip([mapped[n] for n in G1], it.cycle(labels_many))),
"label",
)
assert vf2pp_isomorphism(G1, G2, node_label="label") == mapped
def test_custom_graph2_same_labels(self):
G1 = nx.Graph()
mapped = {1: "A", 2: "C", 3: "D", 4: "E", 5: "G", 7: "B", 6: "F"}
edges1 = [(1, 2), (1, 5), (5, 6), (2, 3), (2, 4), (3, 4), (4, 5), (2, 7)]
G1.add_edges_from(edges1)
G2 = nx.relabel_nodes(G1, mapped)
nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_same))), "label")
nx.set_node_attributes(G2, dict(zip(G2, it.cycle(labels_same))), "label")
assert vf2pp_isomorphism(G1, G2, node_label="label")
# Obtain two isomorphic subgraphs from the graph
G2.remove_edge(mapped[1], mapped[2])
G2.add_edge(mapped[1], mapped[4])
H1 = nx.Graph(G1.subgraph([2, 3, 4, 7]))
H2 = nx.Graph(G2.subgraph([mapped[1], mapped[4], mapped[5], mapped[6]]))
assert vf2pp_isomorphism(H1, H2, node_label="label")
# Add edges maintaining isomorphism
H1.add_edges_from([(3, 7), (4, 7)])
H2.add_edges_from([(mapped[1], mapped[6]), (mapped[4], mapped[6])])
assert vf2pp_isomorphism(H1, H2, node_label="label")
def test_custom_graph2_different_labels(self):
G1 = nx.Graph()
mapped = {1: "A", 2: "C", 3: "D", 4: "E", 5: "G", 7: "B", 6: "F"}
edges1 = [(1, 2), (1, 5), (5, 6), (2, 3), (2, 4), (3, 4), (4, 5), (2, 7)]
G1.add_edges_from(edges1)
G2 = nx.relabel_nodes(G1, mapped)
nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_many))), "label")
nx.set_node_attributes(
G2,
dict(zip([mapped[n] for n in G1], it.cycle(labels_many))),
"label",
)
# Adding new nodes
G1.add_node(0)
G2.add_node("Z")
G1.nodes[0]["label"] = G1.nodes[1]["label"]
G2.nodes["Z"]["label"] = G1.nodes[1]["label"]
mapped.update({0: "Z"})
assert vf2pp_isomorphism(G1, G2, node_label="label") == mapped
# Change the color of one of the nodes
G2.nodes["Z"]["label"] = G1.nodes[2]["label"]
assert vf2pp_isomorphism(G1, G2, node_label="label") is None
# Add an extra edge
G1.nodes[0]["label"] = "blue"
G2.nodes["Z"]["label"] = "blue"
G1.add_edge(0, 1)
assert vf2pp_isomorphism(G1, G2, node_label="label") is None
# Add extra edge to both
G2.add_edge("Z", "A")
assert vf2pp_isomorphism(G1, G2, node_label="label") == mapped
def test_custom_graph3_same_labels(self):
G1 = nx.Graph()
mapped = {1: 9, 2: 8, 3: 7, 4: 6, 5: 3, 8: 5, 9: 4, 7: 1, 6: 2}
edges1 = [
(1, 2),
(1, 3),
(2, 3),
(3, 4),
(4, 5),
(4, 7),
(4, 9),
(5, 8),
(8, 9),
(5, 6),
(6, 7),
(5, 2),
]
G1.add_edges_from(edges1)
G2 = nx.relabel_nodes(G1, mapped)
nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_same))), "label")
nx.set_node_attributes(G2, dict(zip(G2, it.cycle(labels_same))), "label")
assert vf2pp_isomorphism(G1, G2, node_label="label")
# Connect nodes maintaining symmetry
G1.add_edges_from([(6, 9), (7, 8)])
G2.add_edges_from([(mapped[6], mapped[8]), (mapped[7], mapped[9])])
assert vf2pp_isomorphism(G1, G2, node_label="label") is None
# Make isomorphic
G1.add_edges_from([(6, 8), (7, 9)])
G2.add_edges_from([(mapped[6], mapped[9]), (mapped[7], mapped[8])])
assert vf2pp_isomorphism(G1, G2, node_label="label")
# Connect more nodes
G1.add_edges_from([(2, 7), (3, 6)])
G2.add_edges_from([(mapped[2], mapped[7]), (mapped[3], mapped[6])])
G1.add_node(10)
G2.add_node("Z")
G1.nodes[10]["label"] = "blue"
G2.nodes["Z"]["label"] = "blue"
assert vf2pp_isomorphism(G1, G2, node_label="label")
# Connect the newly added node, to opposite sides of the graph
G1.add_edges_from([(10, 1), (10, 5), (10, 8)])
G2.add_edges_from([("Z", mapped[1]), ("Z", mapped[4]), ("Z", mapped[9])])
assert vf2pp_isomorphism(G1, G2, node_label="label")
# Get two subgraphs that are not isomorphic but are easy to make
H1 = nx.Graph(G1.subgraph([2, 3, 4, 5, 6, 7, 10]))
H2 = nx.Graph(
G2.subgraph(
[mapped[4], mapped[5], mapped[6], mapped[7], mapped[8], mapped[9], "Z"]
)
)
assert vf2pp_isomorphism(H1, H2, node_label="label") is None
# Restructure both to make them isomorphic
H1.add_edges_from([(10, 2), (10, 6), (3, 6), (2, 7), (2, 6), (3, 7)])
H2.add_edges_from(
[("Z", mapped[7]), (mapped[6], mapped[9]), (mapped[7], mapped[8])]
)
assert vf2pp_isomorphism(H1, H2, node_label="label")
# Add edges with opposite direction in each Graph
H1.add_edge(3, 5)
H2.add_edge(mapped[5], mapped[7])
assert vf2pp_isomorphism(H1, H2, node_label="label") is None
def test_custom_graph3_different_labels(self):
G1 = nx.Graph()
mapped = {1: 9, 2: 8, 3: 7, 4: 6, 5: 3, 8: 5, 9: 4, 7: 1, 6: 2}
edges1 = [
(1, 2),
(1, 3),
(2, 3),
(3, 4),
(4, 5),
(4, 7),
(4, 9),
(5, 8),
(8, 9),
(5, 6),
(6, 7),
(5, 2),
]
G1.add_edges_from(edges1)
G2 = nx.relabel_nodes(G1, mapped)
nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_many))), "label")
nx.set_node_attributes(
G2,
dict(zip([mapped[n] for n in G1], it.cycle(labels_many))),
"label",
)
assert vf2pp_isomorphism(G1, G2, node_label="label") == mapped
# Add extra edge to G1
G1.add_edge(1, 7)
assert vf2pp_isomorphism(G1, G2, node_label="label") is None
# Compensate in G2
G2.add_edge(9, 1)
assert vf2pp_isomorphism(G1, G2, node_label="label") == mapped
# Add extra node
G1.add_node("A")
G2.add_node("K")
G1.nodes["A"]["label"] = "green"
G2.nodes["K"]["label"] = "green"
mapped.update({"A": "K"})
assert vf2pp_isomorphism(G1, G2, node_label="label") == mapped
# Connect A to one side of G1 and K to the opposite
G1.add_edge("A", 6)
G2.add_edge("K", 5)
assert vf2pp_isomorphism(G1, G2, node_label="label") is None
# Make the graphs symmetrical
G1.add_edge(1, 5)
G1.add_edge(2, 9)
G2.add_edge(9, 3)
G2.add_edge(8, 4)
assert vf2pp_isomorphism(G1, G2, node_label="label") is None
# Assign same colors so the two opposite sides are identical
for node in G1.nodes():
color = "red"
G1.nodes[node]["label"] = color
G2.nodes[mapped[node]]["label"] = color
assert vf2pp_isomorphism(G1, G2, node_label="label")
def test_custom_graph4_different_labels(self):
G1 = nx.Graph()
edges1 = [
(1, 2),
(2, 3),
(3, 8),
(3, 4),
(4, 5),
(4, 6),
(3, 6),
(8, 7),
(8, 9),
(5, 9),
(10, 11),
(11, 12),
(12, 13),
(11, 13),
]
mapped = {
1: "n",
2: "m",
3: "l",
4: "j",
5: "k",
6: "i",
7: "g",
8: "h",
9: "f",
10: "b",
11: "a",
12: "d",
13: "e",
}
G1.add_edges_from(edges1)
G2 = nx.relabel_nodes(G1, mapped)
nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_many))), "label")
nx.set_node_attributes(
G2,
dict(zip([mapped[n] for n in G1], it.cycle(labels_many))),
"label",
)
assert vf2pp_isomorphism(G1, G2, node_label="label") == mapped
def test_custom_graph4_same_labels(self):
G1 = nx.Graph()
edges1 = [
(1, 2),
(2, 3),
(3, 8),
(3, 4),
(4, 5),
(4, 6),
(3, 6),
(8, 7),
(8, 9),
(5, 9),
(10, 11),
(11, 12),
(12, 13),
(11, 13),
]
mapped = {
1: "n",
2: "m",
3: "l",
4: "j",
5: "k",
6: "i",
7: "g",
8: "h",
9: "f",
10: "b",
11: "a",
12: "d",
13: "e",
}
G1.add_edges_from(edges1)
G2 = nx.relabel_nodes(G1, mapped)
nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_same))), "label")
nx.set_node_attributes(G2, dict(zip(G2, it.cycle(labels_same))), "label")
assert vf2pp_isomorphism(G1, G2, node_label="label")
# Add nodes of different label
G1.add_node(0)
G2.add_node("z")
G1.nodes[0]["label"] = "green"
G2.nodes["z"]["label"] = "blue"
assert vf2pp_isomorphism(G1, G2, node_label="label") is None
# Make the labels identical
G2.nodes["z"]["label"] = "green"
assert vf2pp_isomorphism(G1, G2, node_label="label")
# Change the structure of the graphs, keeping them isomorphic
G1.add_edge(2, 5)
G2.remove_edge("i", "l")
G2.add_edge("g", "l")
G2.add_edge("m", "f")
assert vf2pp_isomorphism(G1, G2, node_label="label")
# Change the structure of the disconnected sub-graph, keeping it isomorphic
G1.remove_node(13)
G2.remove_node("d")
assert vf2pp_isomorphism(G1, G2, node_label="label")
# Connect the newly added node to the disconnected graph, which now is just a path of size 3
G1.add_edge(0, 10)
G2.add_edge("e", "z")
assert vf2pp_isomorphism(G1, G2, node_label="label")
# Connect the two disconnected sub-graphs, forming a single graph
G1.add_edge(11, 3)
G1.add_edge(0, 8)
G2.add_edge("a", "l")
G2.add_edge("z", "j")
assert vf2pp_isomorphism(G1, G2, node_label="label")
def test_custom_graph5_same_labels(self):
G1 = nx.Graph()
edges1 = [
(1, 5),
(1, 2),
(1, 4),
(2, 3),
(2, 6),
(3, 4),
(3, 7),
(4, 8),
(5, 8),
(5, 6),
(6, 7),
(7, 8),
]
mapped = {1: "a", 2: "h", 3: "d", 4: "i", 5: "g", 6: "b", 7: "j", 8: "c"}
G1.add_edges_from(edges1)
G2 = nx.relabel_nodes(G1, mapped)
nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_same))), "label")
nx.set_node_attributes(G2, dict(zip(G2, it.cycle(labels_same))), "label")
assert vf2pp_isomorphism(G1, G2, node_label="label")
# Add different edges in each graph, maintaining symmetry
G1.add_edges_from([(3, 6), (2, 7), (2, 5), (1, 3), (4, 7), (6, 8)])
G2.add_edges_from(
[
(mapped[6], mapped[3]),
(mapped[2], mapped[7]),
(mapped[1], mapped[6]),
(mapped[5], mapped[7]),
(mapped[3], mapped[8]),
(mapped[2], mapped[4]),
]
)
assert vf2pp_isomorphism(G1, G2, node_label="label")
# Obtain two different but isomorphic subgraphs from G1 and G2
H1 = nx.Graph(G1.subgraph([1, 5, 8, 6, 7, 3]))
H2 = nx.Graph(
G2.subgraph(
[mapped[1], mapped[4], mapped[8], mapped[7], mapped[3], mapped[5]]
)
)
assert vf2pp_isomorphism(H1, H2, node_label="label")
# Delete corresponding node from the two graphs
H1.remove_node(8)
H2.remove_node(mapped[7])
assert vf2pp_isomorphism(H1, H2, node_label="label")
# Re-orient, maintaining isomorphism
H1.add_edge(1, 6)
H1.remove_edge(3, 6)
assert vf2pp_isomorphism(H1, H2, node_label="label")
def test_custom_graph5_different_labels(self):
G1 = nx.Graph()
edges1 = [
(1, 5),
(1, 2),
(1, 4),
(2, 3),
(2, 6),
(3, 4),
(3, 7),
(4, 8),
(5, 8),
(5, 6),
(6, 7),
(7, 8),
]
mapped = {1: "a", 2: "h", 3: "d", 4: "i", 5: "g", 6: "b", 7: "j", 8: "c"}
G1.add_edges_from(edges1)
G2 = nx.relabel_nodes(G1, mapped)
colors = ["red", "blue", "grey", "none", "brown", "solarized", "yellow", "pink"]
nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_many))), "label")
nx.set_node_attributes(
G2,
dict(zip([mapped[n] for n in G1], it.cycle(labels_many))),
"label",
)
assert vf2pp_isomorphism(G1, G2, node_label="label") == mapped
# Assign different colors to matching nodes
c = 0
for node in G1.nodes():
color1 = colors[c]
color2 = colors[(c + 3) % len(colors)]
G1.nodes[node]["label"] = color1
G2.nodes[mapped[node]]["label"] = color2
c += 1
assert vf2pp_isomorphism(G1, G2, node_label="label") is None
# Get symmetrical sub-graphs of G1,G2 and compare them
H1 = G1.subgraph([1, 5])
H2 = G2.subgraph(["i", "c"])
c = 0
for node1, node2 in zip(H1.nodes(), H2.nodes()):
H1.nodes[node1]["label"] = "red"
H2.nodes[node2]["label"] = "red"
c += 1
assert vf2pp_isomorphism(H1, H2, node_label="label")
def test_disconnected_graph_all_same_labels(self):
G1 = nx.Graph()
G1.add_nodes_from(list(range(10)))
mapped = {0: 9, 1: 8, 2: 7, 3: 6, 4: 5, 5: 4, 6: 3, 7: 2, 8: 1, 9: 0}
G2 = nx.relabel_nodes(G1, mapped)
nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_same))), "label")
nx.set_node_attributes(G2, dict(zip(G2, it.cycle(labels_same))), "label")
assert vf2pp_isomorphism(G1, G2, node_label="label")
def test_disconnected_graph_all_different_labels(self):
G1 = nx.Graph()
G1.add_nodes_from(list(range(10)))
mapped = {0: 9, 1: 8, 2: 7, 3: 6, 4: 5, 5: 4, 6: 3, 7: 2, 8: 1, 9: 0}
G2 = nx.relabel_nodes(G1, mapped)
nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_many))), "label")
nx.set_node_attributes(
G2,
dict(zip([mapped[n] for n in G1], it.cycle(labels_many))),
"label",
)
assert vf2pp_isomorphism(G1, G2, node_label="label") == mapped
def test_disconnected_graph_some_same_labels(self):
G1 = nx.Graph()
G1.add_nodes_from(list(range(10)))
mapped = {0: 9, 1: 8, 2: 7, 3: 6, 4: 5, 5: 4, 6: 3, 7: 2, 8: 1, 9: 0}
G2 = nx.relabel_nodes(G1, mapped)
colors = [
"white",
"white",
"white",
"purple",
"purple",
"red",
"red",
"pink",
"pink",
"pink",
]
nx.set_node_attributes(G1, dict(zip(G1, it.cycle(colors))), "label")
nx.set_node_attributes(
G2, dict(zip([mapped[n] for n in G1], it.cycle(colors))), "label"
)
assert vf2pp_isomorphism(G1, G2, node_label="label")
|
TestGraphISOVF2pp
|
python
|
spack__spack
|
lib/spack/spack/container/writers.py
|
{
"start": 11910,
"end": 12689
}
|
class ____(PathContext):
"""Context used to instantiate a Singularity definition file"""
#: Name of the template used for Singularity definition files
template_name = "container/singularity.def"
@property
def singularity_config(self):
return self.container_config.get("singularity", {})
@tengine.context_property
def runscript(self):
return self.singularity_config.get("runscript", "")
@tengine.context_property
def startscript(self):
return self.singularity_config.get("startscript", "")
@tengine.context_property
def test(self):
return self.singularity_config.get("test", "")
@tengine.context_property
def help(self):
return self.singularity_config.get("help", "")
|
SingularityContext
|
python
|
falconry__falcon
|
tests/test_middleware.py
|
{
"start": 28657,
"end": 29573
}
|
class ____(TestMiddleware):
@pytest.mark.parametrize('independent_middleware', [True, False])
def test_can_access_resource_params(self, asgi, util, independent_middleware):
"""Test that params can be accessed from within process_resource"""
global context
class Resource:
def on_get(self, req, resp, **params):
resp.text = json.dumps(params)
app = util.create_app(
asgi,
middleware=AccessParamsMiddleware(),
independent_middleware=independent_middleware,
)
app.add_route('/path/{id}', Resource())
client = testing.TestClient(app)
response = client.simulate_request(path='/path/22')
assert 'params' in context
assert context['params']
assert context['params']['id'] == '22'
assert response.json == {'added': True, 'id': '22'}
|
TestResourceMiddleware
|
python
|
langchain-ai__langchain
|
libs/core/langchain_core/tracers/base.py
|
{
"start": 847,
"end": 14311
}
|
class ____(_TracerCore, BaseCallbackHandler, ABC):
"""Base interface for tracers."""
@abstractmethod
def _persist_run(self, run: Run) -> None:
"""Persist a run."""
def _start_trace(self, run: Run) -> None:
"""Start a trace for a run."""
super()._start_trace(run)
self._on_run_create(run)
def _end_trace(self, run: Run) -> None:
"""End a trace for a run."""
if not run.parent_run_id:
self._persist_run(run)
self.run_map.pop(str(run.id))
self._on_run_update(run)
def on_chat_model_start(
self,
serialized: dict[str, Any],
messages: list[list[BaseMessage]],
*,
run_id: UUID,
tags: list[str] | None = None,
parent_run_id: UUID | None = None,
metadata: dict[str, Any] | None = None,
name: str | None = None,
**kwargs: Any,
) -> Run:
"""Start a trace for an LLM run.
Args:
serialized: The serialized model.
messages: The messages to start the chat with.
run_id: The run ID.
tags: The tags for the run.
parent_run_id: The parent run ID.
metadata: The metadata for the run.
name: The name of the run.
**kwargs: Additional arguments.
Returns:
The run.
"""
chat_model_run = self._create_chat_model_run(
serialized=serialized,
messages=messages,
run_id=run_id,
parent_run_id=parent_run_id,
tags=tags,
metadata=metadata,
name=name,
**kwargs,
)
self._start_trace(chat_model_run)
self._on_chat_model_start(chat_model_run)
return chat_model_run
def on_llm_start(
self,
serialized: dict[str, Any],
prompts: list[str],
*,
run_id: UUID,
tags: list[str] | None = None,
parent_run_id: UUID | None = None,
metadata: dict[str, Any] | None = None,
name: str | None = None,
**kwargs: Any,
) -> Run:
"""Start a trace for an LLM run.
Args:
serialized: The serialized model.
prompts: The prompts to start the LLM with.
run_id: The run ID.
tags: The tags for the run.
parent_run_id: The parent run ID.
metadata: The metadata for the run.
name: The name of the run.
**kwargs: Additional arguments.
Returns:
The run.
"""
llm_run = self._create_llm_run(
serialized=serialized,
prompts=prompts,
run_id=run_id,
parent_run_id=parent_run_id,
tags=tags,
metadata=metadata,
name=name,
**kwargs,
)
self._start_trace(llm_run)
self._on_llm_start(llm_run)
return llm_run
@override
def on_llm_new_token(
self,
token: str,
*,
chunk: GenerationChunk | ChatGenerationChunk | None = None,
run_id: UUID,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> Run:
"""Run on new LLM token. Only available when streaming is enabled.
Args:
token: The token.
chunk: The chunk.
run_id: The run ID.
parent_run_id: The parent run ID.
**kwargs: Additional arguments.
Returns:
The run.
"""
# "chat_model" is only used for the experimental new streaming_events format.
# This change should not affect any existing tracers.
llm_run = self._llm_run_with_token_event(
token=token,
run_id=run_id,
chunk=chunk,
parent_run_id=parent_run_id,
)
self._on_llm_new_token(llm_run, token, chunk)
return llm_run
@override
def on_retry(
self,
retry_state: RetryCallState,
*,
run_id: UUID,
**kwargs: Any,
) -> Run:
"""Run on retry.
Args:
retry_state: The retry state.
run_id: The run ID.
**kwargs: Additional arguments.
Returns:
The run.
"""
return self._llm_run_with_retry_event(
retry_state=retry_state,
run_id=run_id,
)
@override
def on_llm_end(self, response: LLMResult, *, run_id: UUID, **kwargs: Any) -> Run:
"""End a trace for an LLM run.
Args:
response: The response.
run_id: The run ID.
**kwargs: Additional arguments.
Returns:
The run.
"""
# "chat_model" is only used for the experimental new streaming_events format.
# This change should not affect any existing tracers.
llm_run = self._complete_llm_run(
response=response,
run_id=run_id,
)
self._end_trace(llm_run)
self._on_llm_end(llm_run)
return llm_run
def on_llm_error(
self,
error: BaseException,
*,
run_id: UUID,
**kwargs: Any,
) -> Run:
"""Handle an error for an LLM run.
Args:
error: The error.
run_id: The run ID.
**kwargs: Additional arguments.
Returns:
The run.
"""
# "chat_model" is only used for the experimental new streaming_events format.
# This change should not affect any existing tracers.
llm_run = self._errored_llm_run(
error=error, run_id=run_id, response=kwargs.pop("response", None)
)
self._end_trace(llm_run)
self._on_llm_error(llm_run)
return llm_run
@override
def on_chain_start(
self,
serialized: dict[str, Any],
inputs: dict[str, Any],
*,
run_id: UUID,
tags: list[str] | None = None,
parent_run_id: UUID | None = None,
metadata: dict[str, Any] | None = None,
run_type: str | None = None,
name: str | None = None,
**kwargs: Any,
) -> Run:
"""Start a trace for a chain run.
Args:
serialized: The serialized chain.
inputs: The inputs for the chain.
run_id: The run ID.
tags: The tags for the run.
parent_run_id: The parent run ID.
metadata: The metadata for the run.
run_type: The type of the run.
name: The name of the run.
**kwargs: Additional arguments.
Returns:
The run.
"""
chain_run = self._create_chain_run(
serialized=serialized,
inputs=inputs,
run_id=run_id,
tags=tags,
parent_run_id=parent_run_id,
metadata=metadata,
run_type=run_type,
name=name,
**kwargs,
)
self._start_trace(chain_run)
self._on_chain_start(chain_run)
return chain_run
@override
def on_chain_end(
self,
outputs: dict[str, Any],
*,
run_id: UUID,
inputs: dict[str, Any] | None = None,
**kwargs: Any,
) -> Run:
"""End a trace for a chain run.
Args:
outputs: The outputs for the chain.
run_id: The run ID.
inputs: The inputs for the chain.
**kwargs: Additional arguments.
Returns:
The run.
"""
chain_run = self._complete_chain_run(
outputs=outputs,
run_id=run_id,
inputs=inputs,
)
self._end_trace(chain_run)
self._on_chain_end(chain_run)
return chain_run
@override
def on_chain_error(
self,
error: BaseException,
*,
inputs: dict[str, Any] | None = None,
run_id: UUID,
**kwargs: Any,
) -> Run:
"""Handle an error for a chain run.
Args:
error: The error.
inputs: The inputs for the chain.
run_id: The run ID.
**kwargs: Additional arguments.
Returns:
The run.
"""
chain_run = self._errored_chain_run(
error=error,
run_id=run_id,
inputs=inputs,
)
self._end_trace(chain_run)
self._on_chain_error(chain_run)
return chain_run
def on_tool_start(
self,
serialized: dict[str, Any],
input_str: str,
*,
run_id: UUID,
tags: list[str] | None = None,
parent_run_id: UUID | None = None,
metadata: dict[str, Any] | None = None,
name: str | None = None,
inputs: dict[str, Any] | None = None,
**kwargs: Any,
) -> Run:
"""Start a trace for a tool run.
Args:
serialized: The serialized tool.
input_str: The input string.
run_id: The run ID.
tags: The tags for the run.
parent_run_id: The parent run ID.
metadata: The metadata for the run.
name: The name of the run.
inputs: The inputs for the tool.
**kwargs: Additional arguments.
Returns:
The run.
"""
tool_run = self._create_tool_run(
serialized=serialized,
input_str=input_str,
run_id=run_id,
tags=tags,
parent_run_id=parent_run_id,
metadata=metadata,
name=name,
inputs=inputs,
**kwargs,
)
self._start_trace(tool_run)
self._on_tool_start(tool_run)
return tool_run
@override
def on_tool_end(self, output: Any, *, run_id: UUID, **kwargs: Any) -> Run:
"""End a trace for a tool run.
Args:
output: The output for the tool.
run_id: The run ID.
**kwargs: Additional arguments.
Returns:
The run.
"""
tool_run = self._complete_tool_run(
output=output,
run_id=run_id,
)
self._end_trace(tool_run)
self._on_tool_end(tool_run)
return tool_run
@override
def on_tool_error(
self,
error: BaseException,
*,
run_id: UUID,
**kwargs: Any,
) -> Run:
"""Handle an error for a tool run.
Args:
error: The error.
run_id: The run ID.
**kwargs: Additional arguments.
Returns:
The run.
"""
tool_run = self._errored_tool_run(
error=error,
run_id=run_id,
)
self._end_trace(tool_run)
self._on_tool_error(tool_run)
return tool_run
def on_retriever_start(
self,
serialized: dict[str, Any],
query: str,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
name: str | None = None,
**kwargs: Any,
) -> Run:
"""Run when the Retriever starts running.
Args:
serialized: The serialized retriever.
query: The query.
run_id: The run ID.
parent_run_id: The parent run ID.
tags: The tags for the run.
metadata: The metadata for the run.
name: The name of the run.
**kwargs: Additional arguments.
Returns:
The run.
"""
retrieval_run = self._create_retrieval_run(
serialized=serialized,
query=query,
run_id=run_id,
parent_run_id=parent_run_id,
tags=tags,
metadata=metadata,
name=name,
**kwargs,
)
self._start_trace(retrieval_run)
self._on_retriever_start(retrieval_run)
return retrieval_run
@override
def on_retriever_error(
self,
error: BaseException,
*,
run_id: UUID,
**kwargs: Any,
) -> Run:
"""Run when Retriever errors.
Args:
error: The error.
run_id: The run ID.
**kwargs: Additional arguments.
Returns:
The run.
"""
retrieval_run = self._errored_retrieval_run(
error=error,
run_id=run_id,
)
self._end_trace(retrieval_run)
self._on_retriever_error(retrieval_run)
return retrieval_run
@override
def on_retriever_end(
self, documents: Sequence[Document], *, run_id: UUID, **kwargs: Any
) -> Run:
"""Run when the Retriever ends running.
Args:
documents: The documents.
run_id: The run ID.
**kwargs: Additional arguments.
Returns:
The run.
"""
retrieval_run = self._complete_retrieval_run(
documents=documents,
run_id=run_id,
)
self._end_trace(retrieval_run)
self._on_retriever_end(retrieval_run)
return retrieval_run
def __deepcopy__(self, memo: dict) -> BaseTracer:
"""Return self."""
return self
def __copy__(self) -> BaseTracer:
"""Return self."""
return self
|
BaseTracer
|
python
|
viewflow__viewflow
|
viewflow/forms/renderers.py
|
{
"start": 2542,
"end": 2614
}
|
class ____(WidgetRenderer):
tag = "vf-field-checkbox"
|
CheckboxRenderer
|
python
|
h5py__h5py
|
h5py/tests/test_group.py
|
{
"start": 7105,
"end": 8186
}
|
class ____(BaseGroup):
"""
Feature: Objects can be unlinked via "del" operator
"""
def test_delete(self):
""" Object deletion via "del" """
name = make_name()
self.f.create_group(name)
self.assertIn(name, self.f)
del self.f[name]
self.assertNotIn(name, self.f)
def test_nonexisting(self):
""" Deleting non-existent object raises KeyError """
with self.assertRaises(KeyError):
del self.f['foo']
def test_readonly_delete_exception(self):
""" Deleting object in readonly file raises KeyError """
# Note: it is impossible to restore the old behavior (ValueError)
# without breaking the above test (non-existing objects)
fname = self.mktemp()
hfile = File(fname, 'w')
try:
hfile.create_group('foo')
finally:
hfile.close()
hfile = File(fname, 'r')
try:
with self.assertRaises(KeyError):
del hfile['foo']
finally:
hfile.close()
|
TestDelete
|
python
|
scipy__scipy
|
scipy/signal/tests/test_peak_finding.py
|
{
"start": 2611,
"end": 4989
}
|
class ____:
def test_empty(self):
"""Test with empty signal."""
x = np.array([], dtype=np.float64)
for array in _local_maxima_1d(x):
xp_assert_equal(array, np.array([]), check_dtype=False)
assert array.base is None
def test_linear(self):
"""Test with linear signal."""
x = np.linspace(0, 100)
for array in _local_maxima_1d(x):
xp_assert_equal(array, np.array([], dtype=np.intp))
assert array.base is None
def test_simple(self):
"""Test with simple signal."""
x = np.linspace(-10, 10, 50)
x[2::3] += 1
expected = np.arange(2, 50, 3, dtype=np.intp)
for array in _local_maxima_1d(x):
# For plateaus of size 1, the edges are identical with the
# midpoints
xp_assert_equal(array, expected, check_dtype=False)
assert array.base is None
def test_flat_maxima(self):
"""Test if flat maxima are detected correctly."""
x = np.array([-1.3, 0, 1, 0, 2, 2, 0, 3, 3, 3, 2.99, 4, 4, 4, 4, -10,
-5, -5, -5, -5, -5, -10])
midpoints, left_edges, right_edges = _local_maxima_1d(x)
xp_assert_equal(midpoints, np.array([2, 4, 8, 12, 18]), check_dtype=False)
xp_assert_equal(left_edges, np.array([2, 4, 7, 11, 16]), check_dtype=False)
xp_assert_equal(right_edges, np.array([2, 5, 9, 14, 20]), check_dtype=False)
@pytest.mark.parametrize('x', [
np.array([1., 0, 2]),
np.array([3., 3, 0, 4, 4]),
np.array([5., 5, 5, 0, 6, 6, 6]),
])
def test_signal_edges(self, x):
"""Test if behavior on signal edges is correct."""
for array in _local_maxima_1d(x):
xp_assert_equal(array, np.array([], dtype=np.intp))
assert array.base is None
def test_exceptions(self):
"""Test input validation and raised exceptions."""
with raises(ValueError, match="wrong number of dimensions"):
_local_maxima_1d(np.ones((1, 1)))
with raises(ValueError, match="expected 'const float64_t'"):
_local_maxima_1d(np.ones(1, dtype=int))
with raises(TypeError, match="list"):
_local_maxima_1d([1., 2.])
with raises(TypeError, match="'x' must not be None"):
_local_maxima_1d(None)
|
TestLocalMaxima1d
|
python
|
ZoranPandovski__al-go-rithms
|
data_structures/Graphs/graphsearch/a-star-search/python/util/node.py
|
{
"start": 0,
"end": 940
}
|
class ____:
def __init__(self, id, heuristic, f):
self.id=id
self.heuristic=heuristic
self.totalCost=0
self.f=f
self.parent=None
def setParent(self,parent):
self.parent=parent
def __lt__(self, other):
return self.f < other.f
def __repr__(self):
#return "node:[id={}, heuristic={}, totalCost={}, cost={}, parent={}]".format(self.id,self.heuristic,self.totalCost,self.cost,self.parent)
if(self.parent != None):
return "node:[id={}, parentId={}, f={}, heuristic={}]".format(self.id,self.parent.id,self.f, self.heuristic)
else:
return "node:[id={}], f={}, heuristic={}]".format(self.id,self.f, self.heuristic)
def copy(self):
"""
Sometimes we need a new copy of a node if you find it again as a neighbor of another node.
:return:
"""
return node(self.id,self.heuristic,self.f)
|
node
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.