code
stringlengths 10
805k
| def_use_chains
sequencelengths 0
667
|
---|---|
""" parquet compat """
from __future__ import annotations
import io
import os
from typing import Any
from warnings import catch_warnings
from pandas._typing import (
FilePath,
ReadBuffer,
StorageOptions,
WriteBuffer,
)
from pandas.compat._optional import import_optional_dependency
from pandas.errors import AbstractMethodError
from pandas.util._decorators import doc
from pandas import (
DataFrame,
MultiIndex,
get_option,
)
from pandas.core.shared_docs import _shared_docs
from pandas.util.version import Version
from pandas.io.common import (
IOHandles,
get_handle,
is_fsspec_url,
is_url,
stringify_path,
)
def get_engine(engine: str) -> BaseImpl:
"""return our implementation"""
if engine == "auto":
engine = get_option("io.parquet.engine")
if engine == "auto":
# try engines in this order
engine_classes = [PyArrowImpl, FastParquetImpl]
error_msgs = ""
for engine_class in engine_classes:
try:
return engine_class()
except ImportError as err:
error_msgs += "\n - " + str(err)
raise ImportError(
"Unable to find a usable engine; "
"tried using: 'pyarrow', 'fastparquet'.\n"
"A suitable version of "
"pyarrow or fastparquet is required for parquet "
"support.\n"
"Trying to import the above resulted in these errors:"
f"{error_msgs}"
)
if engine == "pyarrow":
return PyArrowImpl()
elif engine == "fastparquet":
return FastParquetImpl()
raise ValueError("engine must be one of 'pyarrow', 'fastparquet'")
def _get_path_or_handle(
path: FilePath | ReadBuffer[bytes] | WriteBuffer[bytes],
fs: Any,
storage_options: StorageOptions = None,
mode: str = "rb",
is_dir: bool = False,
) -> tuple[
FilePath | ReadBuffer[bytes] | WriteBuffer[bytes], IOHandles[bytes] | None, Any
]:
"""File handling for PyArrow."""
path_or_handle = stringify_path(path)
if is_fsspec_url(path_or_handle) and fs is None:
fsspec = import_optional_dependency("fsspec")
fs, path_or_handle = fsspec.core.url_to_fs(
path_or_handle, **(storage_options or {})
)
elif storage_options and (not is_url(path_or_handle) or mode != "rb"):
# can't write to a remote url
# without making use of fsspec at the moment
raise ValueError("storage_options passed with buffer, or non-supported URL")
handles = None
if (
not fs
and not is_dir
and isinstance(path_or_handle, str)
and not os.path.isdir(path_or_handle)
):
# use get_handle only when we are very certain that it is not a directory
# fsspec resources can also point to directories
# this branch is used for example when reading from non-fsspec URLs
handles = get_handle(
path_or_handle, mode, is_text=False, storage_options=storage_options
)
fs = None
path_or_handle = handles.handle
return path_or_handle, handles, fs
class BaseImpl:
@staticmethod
def validate_dataframe(df: DataFrame):
if not isinstance(df, DataFrame):
raise ValueError("to_parquet only supports IO with DataFrames")
# must have value column names for all index levels (strings only)
if isinstance(df.columns, MultiIndex):
if not all(
x.inferred_type in {"string", "empty"} for x in df.columns.levels
):
raise ValueError(
"""
parquet must have string column names for all values in
each level of the MultiIndex
"""
)
else:
if df.columns.inferred_type not in {"string", "empty"}:
raise ValueError("parquet must have string column names")
# index level names must be strings
valid_names = all(
isinstance(name, str) for name in df.index.names if name is not None
)
if not valid_names:
raise ValueError("Index level names must be strings")
def write(self, df: DataFrame, path, compression, **kwargs):
raise AbstractMethodError(self)
def read(self, path, columns=None, **kwargs):
raise AbstractMethodError(self)
class PyArrowImpl(BaseImpl):
def __init__(self):
import_optional_dependency(
"pyarrow", extra="pyarrow is required for parquet support."
)
import pyarrow.parquet
# import utils to register the pyarrow extension types
import pandas.core.arrays._arrow_utils # noqa:F401
self.api = pyarrow
def write(
self,
df: DataFrame,
path: FilePath | WriteBuffer[bytes],
compression: str | None = "snappy",
index: bool | None = None,
storage_options: StorageOptions = None,
partition_cols: list[str] | None = None,
**kwargs,
):
self.validate_dataframe(df)
from_pandas_kwargs: dict[str, Any] = {"schema": kwargs.pop("schema", None)}
if index is not None:
from_pandas_kwargs["preserve_index"] = index
table = self.api.Table.from_pandas(df, **from_pandas_kwargs)
path_or_handle, handles, kwargs["filesystem"] = _get_path_or_handle(
path,
kwargs.pop("filesystem", None),
storage_options=storage_options,
mode="wb",
is_dir=partition_cols is not None,
)
try:
if partition_cols is not None:
# writes to multiple files under the given path
self.api.parquet.write_to_dataset(
table,
path_or_handle,
compression=compression,
partition_cols=partition_cols,
**kwargs,
)
else:
# write to single output file
self.api.parquet.write_table(
table, path_or_handle, compression=compression, **kwargs
)
finally:
if handles is not None:
handles.close()
def read(
self,
path,
columns=None,
use_nullable_dtypes=False,
storage_options: StorageOptions = None,
**kwargs,
):
kwargs["use_pandas_metadata"] = True
to_pandas_kwargs = {}
if use_nullable_dtypes:
import pandas as pd
mapping = {
self.api.int8(): pd.Int8Dtype(),
self.api.int16(): pd.Int16Dtype(),
self.api.int32(): pd.Int32Dtype(),
self.api.int64(): pd.Int64Dtype(),
self.api.uint8(): pd.UInt8Dtype(),
self.api.uint16(): pd.UInt16Dtype(),
self.api.uint32(): pd.UInt32Dtype(),
self.api.uint64(): pd.UInt64Dtype(),
self.api.bool_(): pd.BooleanDtype(),
self.api.string(): pd.StringDtype(),
}
to_pandas_kwargs["types_mapper"] = mapping.get
manager = get_option("mode.data_manager")
if manager == "array":
to_pandas_kwargs["split_blocks"] = True # type: ignore[assignment]
path_or_handle, handles, kwargs["filesystem"] = _get_path_or_handle(
path,
kwargs.pop("filesystem", None),
storage_options=storage_options,
mode="rb",
)
try:
result = self.api.parquet.read_table(
path_or_handle, columns=columns, **kwargs
).to_pandas(**to_pandas_kwargs)
if manager == "array":
result = result._as_manager("array", copy=False)
return result
finally:
if handles is not None:
handles.close()
class FastParquetImpl(BaseImpl):
def __init__(self):
# since pandas is a dependency of fastparquet
# we need to import on first use
fastparquet = import_optional_dependency(
"fastparquet", extra="fastparquet is required for parquet support."
)
self.api = fastparquet
def write(
self,
df: DataFrame,
path,
compression="snappy",
index=None,
partition_cols=None,
storage_options: StorageOptions = None,
**kwargs,
):
self.validate_dataframe(df)
# thriftpy/protocol/compact.py:339:
# DeprecationWarning: tostring() is deprecated.
# Use tobytes() instead.
if "partition_on" in kwargs and partition_cols is not None:
raise ValueError(
"Cannot use both partition_on and "
"partition_cols. Use partition_cols for partitioning data"
)
elif "partition_on" in kwargs:
partition_cols = kwargs.pop("partition_on")
if partition_cols is not None:
kwargs["file_scheme"] = "hive"
# cannot use get_handle as write() does not accept file buffers
path = stringify_path(path)
if is_fsspec_url(path):
fsspec = import_optional_dependency("fsspec")
# if filesystem is provided by fsspec, file must be opened in 'wb' mode.
kwargs["open_with"] = lambda path, _: fsspec.open(
path, "wb", **(storage_options or {})
).open()
elif storage_options:
raise ValueError(
"storage_options passed with file object or non-fsspec file path"
)
with catch_warnings(record=True):
self.api.write(
path,
df,
compression=compression,
write_index=index,
partition_on=partition_cols,
**kwargs,
)
def read(
self, path, columns=None, storage_options: StorageOptions = None, **kwargs
):
parquet_kwargs: dict[str, Any] = {}
use_nullable_dtypes = kwargs.pop("use_nullable_dtypes", False)
if Version(self.api.__version__) >= Version("0.7.1"):
# We are disabling nullable dtypes for fastparquet pending discussion
parquet_kwargs["pandas_nulls"] = False
if use_nullable_dtypes:
raise ValueError(
"The 'use_nullable_dtypes' argument is not supported for the "
"fastparquet engine"
)
path = stringify_path(path)
handles = None
if is_fsspec_url(path):
fsspec = import_optional_dependency("fsspec")
if Version(self.api.__version__) > Version("0.6.1"):
parquet_kwargs["fs"] = fsspec.open(
path, "rb", **(storage_options or {})
).fs
else:
parquet_kwargs["open_with"] = lambda path, _: fsspec.open(
path, "rb", **(storage_options or {})
).open()
elif isinstance(path, str) and not os.path.isdir(path):
# use get_handle only when we are very certain that it is not a directory
# fsspec resources can also point to directories
# this branch is used for example when reading from non-fsspec URLs
handles = get_handle(
path, "rb", is_text=False, storage_options=storage_options
)
path = handles.handle
parquet_file = self.api.ParquetFile(path, **parquet_kwargs)
result = parquet_file.to_pandas(columns=columns, **kwargs)
if handles is not None:
handles.close()
return result
@doc(storage_options=_shared_docs["storage_options"])
def to_parquet(
df: DataFrame,
path: FilePath | WriteBuffer[bytes] | None = None,
engine: str = "auto",
compression: str | None = "snappy",
index: bool | None = None,
storage_options: StorageOptions = None,
partition_cols: list[str] | None = None,
**kwargs,
) -> bytes | None:
"""
Write a DataFrame to the parquet format.
Parameters
----------
df : DataFrame
path : str, path object, file-like object, or None, default None
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a binary ``write()`` function. If None, the result is
returned as bytes. If a string, it will be used as Root Directory path
when writing a partitioned dataset. The engine fastparquet does not
accept file-like objects.
.. versionchanged:: 1.2.0
engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
compression : {{'snappy', 'gzip', 'brotli', 'lz4', 'zstd', None}},
default 'snappy'. Name of the compression to use. Use ``None``
for no compression. The supported compression methods actually
depend on which engine is used. For 'pyarrow', 'snappy', 'gzip',
'brotli', 'lz4', 'zstd' are all supported. For 'fastparquet',
only 'gzip' and 'snappy' are supported.
index : bool, default None
If ``True``, include the dataframe's index(es) in the file output. If
``False``, they will not be written to the file.
If ``None``, similar to ``True`` the dataframe's index(es)
will be saved. However, instead of being saved as values,
the RangeIndex will be stored as a range in the metadata so it
doesn't require much space and is faster. Other indexes will
be included as columns in the file output.
partition_cols : str or list, optional, default None
Column names by which to partition the dataset.
Columns are partitioned in the order they are given.
Must be None if path is not a string.
{storage_options}
.. versionadded:: 1.2.0
kwargs
Additional keyword arguments passed to the engine
Returns
-------
bytes if no path argument is provided else None
"""
if isinstance(partition_cols, str):
partition_cols = [partition_cols]
impl = get_engine(engine)
path_or_buf: FilePath | WriteBuffer[bytes] = io.BytesIO() if path is None else path
impl.write(
df,
path_or_buf,
compression=compression,
index=index,
partition_cols=partition_cols,
storage_options=storage_options,
**kwargs,
)
if path is None:
assert isinstance(path_or_buf, io.BytesIO)
return path_or_buf.getvalue()
else:
return None
@doc(storage_options=_shared_docs["storage_options"])
def read_parquet(
path,
engine: str = "auto",
columns=None,
storage_options: StorageOptions = None,
use_nullable_dtypes: bool = False,
**kwargs,
):
"""
Load a parquet object from the file path, returning a DataFrame.
Parameters
----------
path : str, path object or file-like object
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a binary ``read()`` function.
The string could be a URL. Valid URL schemes include http, ftp, s3,
gs, and file. For file URLs, a host is expected. A local file could be:
``file://localhost/path/to/table.parquet``.
A file URL can also be a path to a directory that contains multiple
partitioned parquet files. Both pyarrow and fastparquet support
paths to directories as well as file URLs. A directory path could be:
``file://localhost/path/to/tables`` or ``s3://bucket/partition_dir``.
engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
columns : list, default=None
If not None, only these columns will be read from the file.
{storage_options}
.. versionadded:: 1.3.0
use_nullable_dtypes : bool, default False
If True, use dtypes that use ``pd.NA`` as missing value indicator
for the resulting DataFrame. (only applicable for the ``pyarrow``
engine)
As new dtypes are added that support ``pd.NA`` in the future, the
output with this option will change to use those dtypes.
Note: this is an experimental option, and behaviour (e.g. additional
support dtypes) may change without notice.
.. versionadded:: 1.2.0
**kwargs
Any additional kwargs are passed to the engine.
Returns
-------
DataFrame
"""
impl = get_engine(engine)
return impl.read(
path,
columns=columns,
storage_options=storage_options,
use_nullable_dtypes=use_nullable_dtypes,
**kwargs,
)
| [
[
[
46,
57
]
],
[
[
66,
68
],
[
14464,
14466
],
[
14772,
14774
]
],
[
[
76,
78
],
[
2674,
2676
],
[
11128,
11130
]
],
[
[
98,
101
],
[
1986,
1989
],
[
1797,
1800
],
[
5152,
5155
],
[
10095,
10098
]
],
[
[
123,
137
],
[
9696,
9710
]
],
[
[
172,
180
],
[
1910,
1918
],
[
1738,
1746
],
[
4845,
4853
],
[
11854,
11862
],
[
14432,
14440
]
],
[
[
186,
196
],
[
1921,
1931
],
[
1749,
1759
]
],
[
[
202,
216
],
[
1823,
1837
],
[
4980,
4994
],
[
6413,
6427
],
[
8468,
8482
],
[
10022,
10036
],
[
12017,
12031
],
[
15001,
15015
]
],
[
[
222,
233
],
[
1941,
1952
],
[
1769,
1780
],
[
4856,
4867
],
[
11865,
11876
],
[
14443,
14454
]
],
[
[
273,
299
],
[
2142,
2168
],
[
4485,
4511
],
[
8148,
8174
],
[
9265,
9291
],
[
10675,
10701
]
],
[
[
326,
345
],
[
4305,
4324
],
[
4396,
4415
]
],
[
[
382,
385
],
[
11756,
11759
],
[
14855,
14858
]
],
[
[
412,
421
],
[
3211,
3220
],
[
3254,
3263
],
[
4250,
4259
],
[
4820,
4829
],
[
8339,
8348
],
[
11833,
11842
]
],
[
[
427,
437
],
[
3452,
3462
]
],
[
[
443,
453
],
[
783,
793
],
[
7235,
7245
]
],
[
[
493,
505
],
[
11776,
11788
],
[
14875,
14887
]
],
[
[
538,
545
],
[
10187,
10194
],
[
10220,
10227
],
[
10728,
10735
],
[
10760,
10767
]
],
[
[
582,
591
],
[
1961,
1970
]
],
[
[
597,
607
],
[
2944,
2954
],
[
11398,
11408
]
],
[
[
613,
626
],
[
2079,
2092
],
[
9223,
9236
],
[
10633,
10646
]
],
[
[
632,
638
],
[
2330,
2336
]
],
[
[
644,
658
],
[
2051,
2065
],
[
9191,
9205
],
[
10578,
10592
]
],
[
[
668,
678
],
[
14395,
14405
],
[
16983,
16993
]
],
[
[
1707,
1726
],
[
5412,
5431
],
[
7435,
7454
]
],
[
[
3152,
3160
],
[
4442,
4450
],
[
7996,
8004
],
[
695,
703
]
],
[
[
4430,
4441
],
[
903,
914
],
[
1548,
1559
]
],
[
[
7980,
7995
],
[
916,
931
],
[
1611,
1626
]
],
[
[
11813,
11823
]
],
[
[
14912,
14924
]
]
] |
from builtins import zip
from builtins import range
from builtins import object
import re
import csv
import unicodecsv
from bs4 import BeautifulSoup
from openelex.base.load import BaseLoader
from openelex.models import RawResult
from openelex.lib.text import ocd_type_id, slugify
from .datasource import Datasource
class LoadResults(object):
"""Entry point for data loading.
Determines appropriate loader for file and triggers load process.
"""
def run(self, mapping):
election_id = mapping['election']
if '2002' in election_id:
loader = MDLoader2002()
elif '2000' in election_id and 'primary' in election_id:
loader = MDLoader2000Primary()
elif '2008' in election_id and 'special' in election_id:
loader = MDLoader2008Special()
else:
loader = MDLoader()
loader.run(mapping)
class CountyOCDMixin(object):
"""
Loader mixin that adds convenience method for generating county-level
OCD IDs
"""
def _get_county_ocd_id(self, jurisdiction):
"""
Build an OCD ID for a county-level jurisdiction when the mapping
reflects the state OCD ID.
"""
# Baltimore City is treated like a county in the results, but we
# should use the city's OCD ID
if jurisdiction == "Baltimore City":
ocd_id = "{}/place:baltimore".format(self.mapping['ocd_id'])
else:
ocd_id = "{}/county:{}".format(self.mapping['ocd_id'],
ocd_type_id(jurisdiction))
return ocd_id
class MDBaseLoader(CountyOCDMixin, BaseLoader):
datasource = Datasource()
target_offices = set([
'President - Vice Pres',
'President and Vice President of the United States',
'U.S. Senator',
'U.S. Congress',
'Representative in Congress',
'Governor / Lt. Governor',
'Comptroller',
'Attorney General',
'State Senator',
'House of Delegates',
])
district_offices = set([
'U.S. Congress',
'Representative in Congress',
'State Senator',
"House of Delegates",
])
def _skip_row(self, row):
"""
Should this row be skipped?
This should be implemented in subclasses.
"""
return False
class MDLoader(MDBaseLoader):
"""
Parse Maryland election results for the 2000 general election and
all elections after 2002.
"""
def load(self):
with self._file_handle as csvfile:
results = []
reader = unicodecsv.DictReader(csvfile)
for row in reader:
# Skip non-target offices
if self._skip_row(row):
continue
elif 'state_legislative' in self.source:
results.extend(self._prep_state_leg_results(row))
elif 'precinct' in self.source:
results.append(self._prep_precinct_result(row))
else:
results.append(self._prep_county_result(row))
RawResult.objects.insert(results)
def _skip_row(self, row):
if row['Office Name'] == None:
return True
return row['Office Name'].strip() not in self.target_offices
def _build_contest_kwargs(self, row, primary_type):
kwargs = {
'office': row['Office Name'].strip(),
'district': row['Office District'].strip(),
}
# Add party if it's a primary
#TODO: QUESTION: Should semi-closed also have party?
if primary_type == 'closed':
kwargs['primary_party'] = row['Party'].strip()
return kwargs
def _build_candidate_kwargs(self, row):
try:
full_name = row['Candidate Name'].strip()
except KeyError:
# 2000 results use "Candidate" for the column name
full_name = row['Candidate'].strip()
slug = slugify(full_name, substitute='-')
kwargs = {
'full_name': full_name,
#TODO: QUESTION: Do we need this? if so, needs a matching model field on RawResult
'name_slug': slug,
}
return kwargs
def _base_kwargs(self, row):
"Build base set of kwargs for RawResult"
# TODO: Can this just be called once?
kwargs = self._build_common_election_kwargs()
contest_kwargs = self._build_contest_kwargs(row, kwargs['primary_type'])
candidate_kwargs = self._build_candidate_kwargs(row)
kwargs.update(contest_kwargs)
kwargs.update(candidate_kwargs)
return kwargs
def _get_state_ocd_id(self):
"""
Get the state portion of the mapping's OCD ID
This is neccessary because the mappings for some files have OCD IDs
like 'ocd-division/country:us/state:md/sldl:all'. We need to extract
the state portion, 'ocd-division/country:us/state:md' to build OCD
IDs for lower jurisdictions.
"""
bits = []
state_bit = "state:"+ self.state
for bit in self.mapping['ocd_id'].split('/'):
bits.append(bit)
if bit == state_bit:
break
return '/'.join(bits)
def _prep_state_leg_results(self, row):
kwargs = self._base_kwargs(row)
kwargs.update({
'reporting_level': 'state_legislative',
'winner': row['Winner'].strip(),
'write_in': self._writein(row),
'party': row['Party'].strip(),
})
try:
kwargs['write_in'] = row['Write-In?'].strip() # at the contest-level
except KeyError as e:
pass
results = []
for field, val in list(row.items()):
clean_field = field.strip()
# Legislative fields prefixed with LEGS
if not clean_field.startswith('LEGS'):
continue
kwargs.update({
'jurisdiction': clean_field,
# Remove the "LEGS " from the ocd_id. This is a somewhat
# transformy action, but do it here in order to make the OCD IDs
# as usable as possible when we bake out raw results
'ocd_id': "{}/sldl:{}".format(self._get_state_ocd_id(),
ocd_type_id(clean_field.replace("LEGS ", ""))),
'votes': self._votes(val),
})
results.append(RawResult(**kwargs))
return results
def _prep_county_result(self, row):
kwargs = self._base_kwargs(row)
vote_brkdown_fields = [
('election_day', 'Election Night Votes'),
('absentee', 'Absentees Votes'),
('provisional', 'Provisional Votes'),
('second_absentee', '2nd Absentees Votes'),
]
vote_breakdowns = {}
for field, key in vote_brkdown_fields:
try:
vote_breakdowns[field] = self._votes(row[key].strip())
except KeyError:
pass
kwargs.update({
'reporting_level': 'county',
'jurisdiction': self.mapping['name'],
'ocd_id': self.mapping['ocd_id'],
'party': row['Party'].strip(),
'votes': self._votes(row['Total Votes']),
'vote_breakdowns': vote_breakdowns,
})
if (kwargs['office'] not in self.district_offices
and kwargs['district'] != ''):
kwargs['reporting_level'] = 'congressional_district_by_county'
kwargs['reporting_district'] = kwargs['district']
del kwargs['district']
return RawResult(**kwargs)
def _prep_precinct_result(self, row):
kwargs = self._base_kwargs(row)
precinct = "%s-%s" % (row['Election District'], row['Election Precinct'].strip())
ocd_id = "{}/precinct:{}".format(self.mapping['ocd_id'],
ocd_type_id(precinct))
kwargs.update({
'reporting_level': 'precinct',
'jurisdiction': precinct,
'parent_jurisdiction': self.mapping['name'],
'ocd_id': ocd_id,
'party': row['Party'].strip(),
'votes': self._votes(row['Election Night Votes']),
'votes_type': 'election_day',
'winner': row['Winner'],
'write_in': self._writein(row),
})
return RawResult(**kwargs)
def _votes(self, val):
"""
Returns cleaned version of votes or 0 if it's a non-numeric value.
"""
if val.strip() == '':
return 0
try:
return int(float(val))
except ValueError:
# Count'y convert value from string
return 0
def _writein(self, row):
# sometimes write-in field not present
try:
write_in = row['Write-In?'].strip()
except KeyError:
write_in = None
return write_in
class MDLoader2002(MDBaseLoader):
"""
Loads Maryland results for 2002.
Format:
Maryland results for 2002 are in a delimited text file where the delimiter
is '|'.
Fields:
0: Office
1: Office District - '-' is used to denote null values
2: County
3: Last Name - "zz998" is used for write-in candidates
4: Middle Name - "\\N" is used to denote null values
5: First Name - "Other Write-Ins" is used for write-in candidates
6: Party
7: Winner - Value is 0 or 1
8: UNKNOWN - Values are "(Vote for One)", "(Vote for No More Than Three)", etc.
9: Votes
10: UNKNOWN - Values are "\\N" for every row
Sample row:
House of Delegates |32 |Anne Arundel County |Burton |W. |Robert |Republican | 0|(Vote for No More Than Three) | 1494|\\N
Notes:
In the general election file, there are rows for judges and for
"Statewide Ballot Questions". The columns in these rows are shifted over,
but we can ignore these rows since we're not interested in these offices.
"""
def load(self):
headers = [
'office',
'district',
'jurisdiction',
'family_name',
'additional_name',
'given_name',
'party',
'winner',
'vote_type',
'votes',
'fill2'
]
self._common_kwargs = self._build_common_election_kwargs()
self._common_kwargs['reporting_level'] = 'county'
# Store result instances for bulk loading
results = []
with self._file_handle as csvfile:
reader = unicodecsv.DictReader(csvfile, fieldnames=headers, delimiter='|')
for row in reader:
if self._skip_row(row):
continue
rr_kwargs = self._common_kwargs.copy()
if rr_kwargs['primary_type'] == 'closed':
rr_kwargs['primary_party'] = row['party'].strip()
rr_kwargs.update(self._build_contest_kwargs(row))
rr_kwargs.update(self._build_candidate_kwargs(row))
jurisdiction = row['jurisdiction'].strip()
rr_kwargs.update({
'party': row['party'].strip(),
'jurisdiction': jurisdiction,
'ocd_id': self._get_county_ocd_id(jurisdiction),
'office': row['office'].strip(),
'district': row['district'].strip(),
'votes': int(row['votes'].strip()),
})
results.append(RawResult(**rr_kwargs))
RawResult.objects.insert(results)
def _skip_row(self, row):
return row['office'].strip() not in self.target_offices
def _build_contest_kwargs(self, row):
return {
'office': row['office'].strip(),
'district': row['district'].strip(),
}
def _build_candidate_kwargs(self, row):
return {
'family_name': row['family_name'].strip(),
'given_name': row['given_name'].strip(),
'additional_name': row['additional_name'].strip(),
}
class MDLoader2000Primary(MDBaseLoader):
office_choices = [
"President and Vice President of the United States",
"U.S. Senator",
"Representative in Congress",
"Judge of the Circuit Court",
"Female Delegates and Alternate to the Democratic National Convention",
"Female Delegates to the Democratic National Convention",
"Male Delegates to the Democratic National Convention",
"Male Delegates and Alternate to the Democratic National Convention",
"Delegates to the Republican National Convention",
]
def load(self):
candidates = {}
results = []
last_office = None
last_party = None
last_district = None
common_kwargs = self._build_common_election_kwargs()
with self._file_handle as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if not len(row):
continue # Skip blank lines
# determine if this is a row with an office
office, party, district = self._parse_header(row)
if office:
# It's a header row
if office in self.target_offices:
# It's an office we care about. Save the office and
# party for the next row
last_office = office
last_party = party
last_district = district
else:
last_office = None
last_party = None
last_district = None
elif last_office and row[0] == '':
# Candidate name row
candidates, winner_name = self._parse_candidates(row)
elif last_office: # has to be a county result
new_results = self._parse_results(row, last_office,
last_party, last_district,
candidates, winner_name, common_kwargs)
results.extend(new_results)
RawResult.objects.insert(results)
def _parse_header(self, row):
"""
Returns a tuple of office and party and congressional district
if the row is a header.
Returns (None, None, None) for a non-header row.
Note that the district doesn't represent the district of the office
"""
office = self._parse_office(row)
if office:
party = self._parse_party(row)
district = self._parse_district(row)
else:
party = None
district = None
return office, party, district
def _parse_office(self, row):
for o in self.office_choices:
if o in row[0]:
return o
return None
def _parse_party(self, row):
if 'Democratic' in row[0]:
return 'Democratic'
elif 'Republican' in row[0]:
return 'Republican'
else:
return None
def _parse_district(self, row):
if 'District' not in row[0]:
return None
return re.search(r'(\d+)', row[0]).groups(0)[0]
def _parse_candidates(self, row):
candidates = []
for col in row:
if col != '':
full_name = col.strip()
if 'Winner' in full_name:
# Trim winner from candidate name
full_name, remainder = full_name.split(' Winner')
winner = full_name
candidates.append(full_name)
return candidates, winner
# TODO: QUESTION: How to handle "Uncomitted to any ..." values
def _parse_results(self, row, office, party, district, candidates,
winner_name, common_kwargs):
results = []
cols = [x.strip() for x in row if x != '']
county = cols[0].strip()
cand_results = list(zip(candidates, cols[1:]))
for cand, votes in cand_results:
result_kwargs = common_kwargs.copy()
result_kwargs.update({
'jurisdiction': county,
'ocd_id': self._get_county_ocd_id(county),
'office': office,
'party': party,
'full_name': cand,
'votes': int(votes),
})
if result_kwargs['primary_type'] == 'closed':
result_kwargs['primary_party'] = party
if office == "Representative in Congress":
# In the case of U.S. representatives, the district represents
# the office district. In all other cases, it just
# represents the level of result aggregation.
result_kwargs['district'] = district
if cand == winner_name:
result_kwargs['winner'] = 'Winner'
# Try to figure out if this is a case where results are
# provided by congressional district split by county and
# record this.
result_kwargs['reporting_level'] = self._get_reporting_level(district)
if result_kwargs['reporting_level'] == 'congressional_district_by_county':
result_kwargs['reporting_district'] = district
results.append(RawResult(**result_kwargs))
return results
def _get_reporting_level(self, district):
"""
Returns the reporting level based on the value of the results' district.
This deals with the way in which results for 2000 primaries are
returned broken down by both congressional district, split by county.
"""
if district:
return "congressional_district_by_county"
else:
return "county"
class MDLoader2008Special(CountyOCDMixin, BaseLoader):
"""
Loader for the Maryland 2008 4th Congressional District Special election results
"""
datasource = Datasource()
def load(self):
table = self._get_html_table()
rows = self._parse_html_table(table)
winner_name = self._parse_winner_name(table)
candidate_attrs = self._parse_candidates_and_parties(rows[0],
winner_name)
results = self._parse_results(rows[1:3], candidate_attrs)
RawResult.objects.insert(results)
def _get_html_table(self):
soup = BeautifulSoup(self._file_handle, 'html.parser')
return soup.find(text=re.compile("Donna Edwards")).parent.parent.parent
def _parse_html_table(self, table):
rows = []
for tr in table.find_all('tr'):
rows.append(self._parse_html_table_row(tr))
return rows
def _parse_html_table_row(self, tr):
row = []
cells = tr.find_all('th') + tr.find_all('td')
for cell in cells:
row.append(cell.text.strip())
return row
def _parse_winner_name(self, table):
cell = table.select('th > img')[0].parent
return self._parse_name(cell.text.strip())
def _parse_candidates_and_parties(self, row, winner_name):
candidate_attrs = []
for cell in row[1:]:
# Skip the first cell. It's a header, "County"
attrs = {
'full_name': self._parse_name(cell),
'party': self._parse_party(cell),
'write_in': self._parse_write_in(cell),
}
if attrs['full_name'] == winner_name:
attrs['contest_winner'] = True
candidate_attrs.append(attrs)
return candidate_attrs
def _parse_name(self, s):
if s == "Other Write-Ins":
return s
# We know that all the candidate names are just first and last names
bits = re.split(r'\s', s)
return ' '.join(bits[:2])
def _parse_party(self, s):
if s == "Other Write-Ins":
return None
bits = re.split(r'\s', s)
return bits[2]
def _parse_write_in(self, s):
if s == "Other Write-Ins":
return s
elif "Write-In" in s:
return "Write-In"
else:
return ""
def _parse_results(self, rows, candidate_attrs):
# These raw result attributes will be the same for every result.
common_kwargs = self._build_common_election_kwargs()
common_kwargs.update({
'office': "Representative in Congress",
'district': '4',
'reporting_level': "county",
})
results = []
for row in rows:
county = row[0]
for i in range(1, len(row)):
kwargs = common_kwargs.copy()
kwargs.update(candidate_attrs[i-1])
kwargs['jurisdiction'] = county
kwargs['ocd_id'] = self._get_county_ocd_id(county)
kwargs['votes'] = self._parse_votes(row[i])
results.append(RawResult(**kwargs))
return results
def _parse_votes(self, s):
return int(s.split(' ')[0].replace(',', ''))
| [
[
[
21,
24
],
[
16391,
16394
]
],
[
[
46,
51
],
[
21021,
21026
]
],
[
[
73,
79
],
[
336,
342
],
[
916,
922
]
],
[
[
87,
89
],
[
15594,
15596
],
[
18886,
18888
],
[
20185,
20187
],
[
20344,
20346
]
],
[
[
97,
100
],
[
13265,
13268
]
],
[
[
108,
118
],
[
2598,
2608
],
[
10879,
10889
]
],
[
[
136,
149
],
[
18808,
18821
]
],
[
[
182,
192
],
[
1622,
1632
],
[
18256,
18266
]
],
[
[
221,
230
],
[
3114,
3123
],
[
6466,
6475
],
[
7658,
7667
],
[
8398,
8407
],
[
11843,
11852
],
[
11875,
11884
],
[
14535,
14544
],
[
17740,
17749
],
[
18727,
18736
],
[
21345,
21354
]
],
[
[
261,
272
],
[
1535,
1546
],
[
6333,
6344
],
[
7928,
7939
]
],
[
[
274,
281
],
[
3984,
3991
]
],
[
[
306,
316
],
[
1652,
1662
],
[
18387,
18397
]
],
[
[
324,
335
]
],
[
[
901,
915
],
[
1606,
1620
],
[
18240,
18254
]
],
[
[
1593,
1605
],
[
2357,
2369
],
[
8977,
8989
],
[
12439,
12451
]
],
[
[
2348,
2356
],
[
854,
862
]
],
[
[
8964,
8976
],
[
588,
600
]
],
[
[
12419,
12438
],
[
689,
708
]
],
[
[
18220,
18239
],
[
797,
816
]
]
] |
""" If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9.
The sum of these multiples is 23. Find the sum of all the multiples of 3 or 5 below 1000.
"""
def mul_sum(a: int=3, b: int=5):
max_num = 1000
all_nums = [x for x in range(1, max_num) if (x % 3 == 0) | (x % 5 == 0)]
return sum(all_nums)
if __name__ == "__main__":
result = mul_sum()
print(result) | [
[
[
199,
206
],
[
392,
399
]
],
[
[
383,
389
],
[
412,
418
]
]
] |
from ..adapter import CustomSocialAccountAdapter
def test_authentication_error_logs(mocker):
mocker.patch(
"allauth.socialaccount.adapter.DefaultSocialAccountAdapter.authentication_error"
) # noqa
error = mocker.patch("{{cookiecutter.project_slug}}.multisalesforce.adapter.logger.error")
adapter = CustomSocialAccountAdapter()
adapter.authentication_error()
assert error.called
| [
[
[
22,
48
],
[
325,
351
]
],
[
[
55,
85
]
]
] |
from pydantic import BaseSettings
class Settings(BaseSettings):
APP_ENDPOINT: str = 'localhost:8080'
CONFIG_PATH: str = None
DATACENTER_ID: int = 0
WORKER_ID: int = 0
| [
[
[
21,
33
],
[
51,
63
]
],
[
[
42,
50
]
]
] |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'offline.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
[
[
91,
93
],
[
159,
161
]
],
[
[
101,
104
],
[
613,
616
]
],
[
[
111,
115
],
[
656,
660
]
]
] |
import os
from PIL import Image
#Vérification
rep_cour=os.getcwd()
if rep_cour!="C:\Documents and Settings\Administrateur\Bureau\ISN/trait_img":
os.chdir("C:\Documents and Settings\Administrateur\Bureau\ISN/trait_img")
print(os.getcwd())
print("Tout est en ordre!")
#Paramètres de l'image + son affichage
nom_image=("img_base.pgm")
img_in=Image.open(nom_image)
print("Nom de l'image :",nom_image)
print("Format de l'image :",img_in.format)
print("Taille de l'image :",img_in.size)
print("Mode de l'image :",img_in.mode)
#img_in.show()
#Création d'une copie nb
taille=img_in.size
col=taille[0]
lgn=taille[1]
img_out=Image.new(img_in.mode,img_in.size)
y=0
x=0
while (x<col):
while(y<lgn):
p=img_in.getpixel((x,y))
if p<175:
img_out.putpixel((x,y),p)
else:
img_out.putpixel((x,y),255)
y=y+1
p=0
y=0
x=x+1
nom_copie_image=("img_copie.pgm")
img_out.save(nom_copie_image)
img_in_1=Image.open(nom_copie_image)
#img_in_1.show()
#Création d'une copie négatif
img_out=Image.new(img_in.mode,img_in.size)
y=0
x=0
while (x<col):
while(y<lgn):
p=img_in.getpixel((x,y))
p=255-p
img_out.putpixel((x,y),p)
y=y+1
p=0
y=0
x=x+1
nom_copie_image=("img_copie_negatif.pgm")
img_out.save(nom_copie_image)
img_in_2=Image.open(nom_copie_image)
#img_in_2.show()
#Création d'une copie réduction
img_out=Image.new(img_in.mode,(int(col/2)+1,int(lgn/2)+1))
y=0
x=0
y1=0
x1=0
while (x<col):
while(y<lgn):
p=img_in.getpixel((x,y))
img_out.putpixel((x1,y1),p)
y=y+2
y1=y1+1
p=0
y1=0
y=0
x1=x1+1
x=x+2
nom_copie_image=("img_copie_reduc.pgm")
img_out.save(nom_copie_image)
img_in_3=Image.open(nom_copie_image)
#img_in_3.show()
#Création d'une copie réduction
img_out=Image.new(img_in.mode,img_in.size)
y=0
x=0
y1=0
x1=0
while (x<col):
while(y<lgn):
p=img_in.getpixel((x,y))
img_out.putpixel((x1+int(col/2),y1),p)
img_out.putpixel((x1,y1+int(lgn/2)),p)
img_out.putpixel((x1,y1),p)
img_out.putpixel((x1+int(col/2),y1+int(lgn/2)),p)
y=y+2
y1=y1+1
p=0
y1=0
y=0
x1=x1+1
x=x+2
nom_copie_image=("img_copie_photomaton.pgm")
img_out.save(nom_copie_image)
img_in_4=Image.open(nom_copie_image)
#img_in_4.show()
#Création d'une copie effet de bord
img_out=Image.new(img_in.mode,img_in.size)
y=1
x=1
while (x<col-1):
while(y<lgn-1):
b=img_in.getpixel((x+1,y),p)
c=img_in.getpixel((x,y+1),p)
d=img_in.getpixel((x-1,y),p)
e=img_in.getpixel((x,y-1),p)
t=((b-d)**2+(c-e)**2)**0.5
if t>25:
p=255
else:
p=0
b=img_out.putpixel((x,y),p)
y=y+1
p=0
y=0
x=x+1
nom_copie_image=("img_copie_effetbord.pgm")
img_out.save(nom_copie_image)
img_in_5=Image.open(nom_copie_image)
img_in_5.show()
| [
[
[
7,
9
],
[
56,
58
],
[
155,
157
],
[
239,
241
]
],
[
[
26,
31
],
[
355,
360
],
[
633,
638
],
[
972,
977
],
[
1058,
1063
],
[
1344,
1349
],
[
1432,
1437
],
[
1766,
1771
],
[
1854,
1859
],
[
2329,
2334
],
[
2421,
2426
],
[
2927,
2932
]
],
[
[
47,
55
],
[
71,
79
]
],
[
[
321,
330
],
[
366,
375
],
[
402,
411
]
],
[
[
348,
354
],
[
441,
447
],
[
484,
490
],
[
523,
529
],
[
584,
590
],
[
643,
649
],
[
655,
661
],
[
719,
725
],
[
1068,
1074
],
[
1080,
1086
],
[
1144,
1150
],
[
1442,
1448
],
[
1545,
1551
],
[
1864,
1870
],
[
1876,
1882
],
[
1951,
1957
],
[
2431,
2437
],
[
2443,
2449
],
[
2512,
2518
],
[
2549,
2555
],
[
2586,
2592
],
[
2623,
2629
]
],
[
[
577,
583
],
[
600,
606
],
[
614,
620
]
],
[
[
596,
599
],
[
685,
688
],
[
1110,
1113
],
[
1459,
1462
],
[
1511,
1514
],
[
1917,
1920
],
[
2007,
2010
],
[
2137,
2140
],
[
2474,
2477
]
],
[
[
610,
613
],
[
703,
706
],
[
1128,
1131
],
[
1472,
1475
],
[
1529,
1532
],
[
1935,
1938
],
[
2057,
2060
],
[
2151,
2154
],
[
2494,
2497
]
],
[
[
625,
632
],
[
772,
779
],
[
825,
832
],
[
933,
940
]
],
[
[
668,
669
],
[
701,
702
],
[
738,
739
],
[
792,
793
],
[
845,
846
],
[
863,
864
]
],
[
[
672,
673
],
[
683,
684
],
[
736,
737
],
[
790,
791
],
[
843,
844
],
[
893,
894
]
],
[
[
717,
718
],
[
753,
754
],
[
795,
796
]
],
[
[
861,
862
],
[
701,
702
],
[
738,
739
],
[
792,
793
],
[
845,
846
],
[
863,
864
]
],
[
[
875,
876
],
[
2536,
2537
],
[
2573,
2574
],
[
2610,
2611
],
[
2647,
2648
]
],
[
[
883,
884
],
[
701,
702
],
[
738,
739
],
[
792,
793
],
[
845,
846
],
[
863,
864
]
],
[
[
891,
892
],
[
683,
684
],
[
736,
737
],
[
790,
791
],
[
843,
844
],
[
893,
894
]
],
[
[
898,
913
],
[
946,
961
],
[
983,
998
]
],
[
[
963,
971
]
],
[
[
1050,
1057
],
[
1191,
1198
],
[
1305,
1312
]
],
[
[
1093,
1094
],
[
1126,
1127
],
[
1163,
1164
],
[
1211,
1212
],
[
1227,
1228
]
],
[
[
1097,
1098
],
[
1108,
1109
],
[
1161,
1162
],
[
1209,
1210
],
[
1257,
1258
]
],
[
[
1142,
1143
],
[
1181,
1182
]
],
[
[
1175,
1176
],
[
1214,
1215
]
],
[
[
1225,
1226
],
[
1126,
1127
],
[
1163,
1164
],
[
1211,
1212
],
[
1227,
1228
]
],
[
[
1239,
1240
],
[
2536,
2537
],
[
2573,
2574
],
[
2610,
2611
],
[
2647,
2648
]
],
[
[
1247,
1248
],
[
1126,
1127
],
[
1163,
1164
],
[
1211,
1212
],
[
1227,
1228
]
],
[
[
1255,
1256
],
[
1108,
1109
],
[
1161,
1162
],
[
1209,
1210
],
[
1257,
1258
]
],
[
[
1262,
1277
],
[
1318,
1333
],
[
1355,
1370
]
],
[
[
1335,
1343
]
],
[
[
1424,
1431
],
[
1576,
1583
],
[
1727,
1734
]
],
[
[
1483,
1484
],
[
1527,
1528
],
[
1564,
1565
],
[
1614,
1615
]
],
[
[
1487,
1488
],
[
1509,
1510
],
[
1562,
1563
],
[
1681,
1682
]
],
[
[
1491,
1493
],
[
1597,
1599
],
[
1629,
1631
]
],
[
[
1496,
1498
],
[
1594,
1596
],
[
1670,
1672
]
],
[
[
1543,
1544
],
[
1601,
1602
]
],
[
[
1612,
1613
],
[
1527,
1528
],
[
1564,
1565
],
[
1614,
1615
]
],
[
[
1626,
1628
],
[
1597,
1599
],
[
1629,
1631
]
],
[
[
1642,
1643
],
[
2536,
2537
],
[
2573,
2574
],
[
2610,
2611
],
[
2647,
2648
]
],
[
[
1650,
1652
],
[
1597,
1599
],
[
1629,
1631
]
],
[
[
1659,
1660
],
[
1527,
1528
],
[
1564,
1565
],
[
1614,
1615
]
],
[
[
1667,
1669
],
[
1594,
1596
],
[
1670,
1672
]
],
[
[
1679,
1680
],
[
1509,
1510
],
[
1562,
1563
],
[
1681,
1682
]
],
[
[
1686,
1701
],
[
1740,
1755
],
[
1777,
1792
]
],
[
[
1757,
1765
]
],
[
[
1846,
1853
],
[
1982,
1989
],
[
2029,
2036
],
[
2076,
2083
],
[
2112,
2119
],
[
2290,
2297
]
],
[
[
1889,
1890
],
[
1933,
1934
],
[
1970,
1971
],
[
2172,
2173
]
],
[
[
1893,
1894
],
[
1915,
1916
],
[
1968,
1969
],
[
2239,
2240
]
],
[
[
1897,
1899
],
[
2014,
2016
],
[
2050,
2052
],
[
2097,
2099
],
[
2144,
2146
],
[
2187,
2189
]
],
[
[
1902,
1904
],
[
2000,
2002
],
[
2047,
2049
],
[
2094,
2096
],
[
2130,
2132
],
[
2228,
2230
]
],
[
[
1949,
1950
],
[
2018,
2019
],
[
2065,
2066
],
[
2101,
2102
],
[
2159,
2160
]
],
[
[
2170,
2171
],
[
1933,
1934
],
[
1970,
1971
],
[
2172,
2173
]
],
[
[
2184,
2186
],
[
2014,
2016
],
[
2050,
2052
],
[
2097,
2099
],
[
2144,
2146
],
[
2187,
2189
]
],
[
[
2200,
2201
],
[
2536,
2537
],
[
2573,
2574
],
[
2610,
2611
],
[
2647,
2648
]
],
[
[
2208,
2210
],
[
2014,
2016
],
[
2050,
2052
],
[
2097,
2099
],
[
2144,
2146
],
[
2187,
2189
]
],
[
[
2217,
2218
],
[
1933,
1934
],
[
1970,
1971
],
[
2172,
2173
]
],
[
[
2225,
2227
],
[
2000,
2002
],
[
2047,
2049
],
[
2094,
2096
],
[
2130,
2132
],
[
2228,
2230
]
],
[
[
2237,
2238
],
[
1915,
1916
],
[
1968,
1969
],
[
2239,
2240
]
],
[
[
2244,
2259
],
[
2303,
2318
],
[
2340,
2355
]
],
[
[
2320,
2328
]
],
[
[
2413,
2420
],
[
2763,
2770
],
[
2888,
2895
]
],
[
[
2456,
2457
],
[
2492,
2493
],
[
2533,
2534
],
[
2568,
2569
],
[
2607,
2608
],
[
2642,
2643
],
[
2783,
2784
],
[
2808,
2809
]
],
[
[
2460,
2461
],
[
2472,
2473
],
[
2529,
2530
],
[
2566,
2567
],
[
2603,
2604
],
[
2640,
2641
],
[
2781,
2782
],
[
2838,
2839
]
],
[
[
2510,
2511
],
[
2663,
2664
]
],
[
[
2547,
2548
],
[
2672,
2673
]
],
[
[
2584,
2585
],
[
2665,
2666
]
],
[
[
2621,
2622
],
[
2674,
2675
]
],
[
[
2659,
2660
],
[
2698,
2699
]
],
[
[
2716,
2717
],
[
2786,
2787
]
],
[
[
2748,
2749
],
[
2786,
2787
]
],
[
[
2761,
2762
]
],
[
[
2806,
2807
],
[
2492,
2493
],
[
2533,
2534
],
[
2568,
2569
],
[
2607,
2608
],
[
2642,
2643
],
[
2783,
2784
],
[
2808,
2809
]
],
[
[
2820,
2821
],
[
2536,
2537
],
[
2573,
2574
],
[
2610,
2611
],
[
2647,
2648
]
],
[
[
2828,
2829
],
[
2492,
2493
],
[
2533,
2534
],
[
2568,
2569
],
[
2607,
2608
],
[
2642,
2643
],
[
2783,
2784
],
[
2808,
2809
]
],
[
[
2836,
2837
],
[
2472,
2473
],
[
2529,
2530
],
[
2566,
2567
],
[
2603,
2604
],
[
2640,
2641
],
[
2781,
2782
],
[
2838,
2839
]
],
[
[
2843,
2858
],
[
2901,
2916
],
[
2938,
2953
]
],
[
[
2918,
2926
],
[
2955,
2963
]
]
] |
# -*- coding: utf-8 -*-
"""Test sequences for graphiness.
"""
# Copyright (C) 2004-2018 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import heapq
import networkx as nx
__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult (dschult@colgate.edu)'
'Joel Miller (joel.c.miller.research@gmail.com)'
'Ben Edwards'
'Brian Cloteaux <brian.cloteaux@nist.gov>'])
__all__ = ['is_graphical',
'is_multigraphical',
'is_pseudographical',
'is_digraphical',
'is_valid_degree_sequence_erdos_gallai',
'is_valid_degree_sequence_havel_hakimi',
]
def is_graphical(sequence, method='eg'):
"""Returns True if sequence is a valid degree sequence.
A degree sequence is valid if some graph can realize it.
Parameters
----------
sequence : list or iterable container
A sequence of integer node degrees
method : "eg" | "hh"
The method used to validate the degree sequence.
"eg" corresponds to the Erdős-Gallai algorithm, and
"hh" to the Havel-Hakimi algorithm.
Returns
-------
valid : bool
True if the sequence is a valid degree sequence and False if not.
Examples
--------
>>> G = nx.path_graph(4)
>>> sequence = (d for n, d in G.degree())
>>> nx.is_graphical(sequence)
True
References
----------
Erdős-Gallai
[EG1960]_, [choudum1986]_
Havel-Hakimi
[havel1955]_, [hakimi1962]_, [CL1996]_
"""
if method == 'eg':
valid = is_valid_degree_sequence_erdos_gallai(list(sequence))
elif method == 'hh':
valid = is_valid_degree_sequence_havel_hakimi(list(sequence))
else:
msg = "`method` must be 'eg' or 'hh'"
raise nx.NetworkXException(msg)
return valid
def _basic_graphical_tests(deg_sequence):
# Sort and perform some simple tests on the sequence
if not nx.utils.is_list_of_ints(deg_sequence):
raise nx.NetworkXUnfeasible
p = len(deg_sequence)
num_degs = [0] * p
dmax, dmin, dsum, n = 0, p, 0, 0
for d in deg_sequence:
# Reject if degree is negative or larger than the sequence length
if d < 0 or d >= p:
raise nx.NetworkXUnfeasible
# Process only the non-zero integers
elif d > 0:
dmax, dmin, dsum, n = max(dmax, d), min(dmin, d), dsum + d, n + 1
num_degs[d] += 1
# Reject sequence if it has odd sum or is oversaturated
if dsum % 2 or dsum > n * (n - 1):
raise nx.NetworkXUnfeasible
return dmax, dmin, dsum, n, num_degs
def is_valid_degree_sequence_havel_hakimi(deg_sequence):
r"""Returns True if deg_sequence can be realized by a simple graph.
The validation proceeds using the Havel-Hakimi theorem.
Worst-case run time is $O(s)$ where $s$ is the sum of the sequence.
Parameters
----------
deg_sequence : list
A list of integers where each element specifies the degree of a node
in a graph.
Returns
-------
valid : bool
True if deg_sequence is graphical and False if not.
Notes
-----
The ZZ condition says that for the sequence d if
.. math::
|d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)}
then d is graphical. This was shown in Theorem 6 in [1]_.
References
----------
.. [1] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory
of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992).
[havel1955]_, [hakimi1962]_, [CL1996]_
"""
try:
dmax, dmin, dsum, n, num_degs = _basic_graphical_tests(deg_sequence)
except nx.NetworkXUnfeasible:
return False
# Accept if sequence has no non-zero degrees or passes the ZZ condition
if n == 0 or 4 * dmin * n >= (dmax + dmin + 1) * (dmax + dmin + 1):
return True
modstubs = [0] * (dmax + 1)
# Successively reduce degree sequence by removing the maximum degree
while n > 0:
# Retrieve the maximum degree in the sequence
while num_degs[dmax] == 0:
dmax -= 1
# If there are not enough stubs to connect to, then the sequence is
# not graphical
if dmax > n - 1:
return False
# Remove largest stub in list
num_degs[dmax], n = num_degs[dmax] - 1, n - 1
# Reduce the next dmax largest stubs
mslen = 0
k = dmax
for i in range(dmax):
while num_degs[k] == 0:
k -= 1
num_degs[k], n = num_degs[k] - 1, n - 1
if k > 1:
modstubs[mslen] = k - 1
mslen += 1
# Add back to the list any non-zero stubs that were removed
for i in range(mslen):
stub = modstubs[i]
num_degs[stub], n = num_degs[stub] + 1, n + 1
return True
def is_valid_degree_sequence_erdos_gallai(deg_sequence):
r"""Returns True if deg_sequence can be realized by a simple graph.
The validation is done using the Erdős-Gallai theorem [EG1960]_.
Parameters
----------
deg_sequence : list
A list of integers
Returns
-------
valid : bool
True if deg_sequence is graphical and False if not.
Notes
-----
This implementation uses an equivalent form of the Erdős-Gallai criterion.
Worst-case run time is $O(n)$ where $n$ is the length of the sequence.
Specifically, a sequence d is graphical if and only if the
sum of the sequence is even and for all strong indices k in the sequence,
.. math::
\sum_{i=1}^{k} d_i \leq k(k-1) + \sum_{j=k+1}^{n} \min(d_i,k)
= k(n-1) - ( k \sum_{j=0}^{k-1} n_j - \sum_{j=0}^{k-1} j n_j )
A strong index k is any index where d_k >= k and the value n_j is the
number of occurrences of j in d. The maximal strong index is called the
Durfee index.
This particular rearrangement comes from the proof of Theorem 3 in [2]_.
The ZZ condition says that for the sequence d if
.. math::
|d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)}
then d is graphical. This was shown in Theorem 6 in [2]_.
References
----------
.. [1] A. Tripathi and S. Vijay. "A note on a theorem of Erdős & Gallai",
Discrete Mathematics, 265, pp. 417-420 (2003).
.. [2] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory
of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992).
[EG1960]_, [choudum1986]_
"""
try:
dmax, dmin, dsum, n, num_degs = _basic_graphical_tests(deg_sequence)
except nx.NetworkXUnfeasible:
return False
# Accept if sequence has no non-zero degrees or passes the ZZ condition
if n == 0 or 4 * dmin * n >= (dmax + dmin + 1) * (dmax + dmin + 1):
return True
# Perform the EG checks using the reformulation of Zverovich and Zverovich
k, sum_deg, sum_nj, sum_jnj = 0, 0, 0, 0
for dk in range(dmax, dmin - 1, -1):
if dk < k + 1: # Check if already past Durfee index
return True
if num_degs[dk] > 0:
run_size = num_degs[dk] # Process a run of identical-valued degrees
if dk < k + run_size: # Check if end of run is past Durfee index
run_size = dk - k # Adjust back to Durfee index
sum_deg += run_size * dk
for v in range(run_size):
sum_nj += num_degs[k + v]
sum_jnj += (k + v) * num_degs[k + v]
k += run_size
if sum_deg > k * (n - 1) - k * sum_nj + sum_jnj:
return False
return True
def is_multigraphical(sequence):
"""Returns True if some multigraph can realize the sequence.
Parameters
----------
deg_sequence : list
A list of integers
Returns
-------
valid : bool
True if deg_sequence is a multigraphic degree sequence and False if not.
Notes
-----
The worst-case run time is $O(n)$ where $n$ is the length of the sequence.
References
----------
.. [1] S. L. Hakimi. "On the realizability of a set of integers as
degrees of the vertices of a linear graph", J. SIAM, 10, pp. 496-506
(1962).
"""
deg_sequence = list(sequence)
if not nx.utils.is_list_of_ints(deg_sequence):
return False
dsum, dmax = 0, 0
for d in deg_sequence:
if d < 0:
return False
dsum, dmax = dsum + d, max(dmax, d)
if dsum % 2 or dsum < 2 * dmax:
return False
return True
def is_pseudographical(sequence):
"""Returns True if some pseudograph can realize the sequence.
Every nonnegative integer sequence with an even sum is pseudographical
(see [1]_).
Parameters
----------
sequence : list or iterable container
A sequence of integer node degrees
Returns
-------
valid : bool
True if the sequence is a pseudographic degree sequence and False if not.
Notes
-----
The worst-case run time is $O(n)$ where n is the length of the sequence.
References
----------
.. [1] F. Boesch and F. Harary. "Line removal algorithms for graphs
and their degree lists", IEEE Trans. Circuits and Systems, CAS-23(12),
pp. 778-782 (1976).
"""
s = list(sequence)
if not nx.utils.is_list_of_ints(s):
return False
return sum(s) % 2 == 0 and min(s) >= 0
def is_digraphical(in_sequence, out_sequence):
r"""Returns True if some directed graph can realize the in- and out-degree
sequences.
Parameters
----------
in_sequence : list or iterable container
A sequence of integer node in-degrees
out_sequence : list or iterable container
A sequence of integer node out-degrees
Returns
-------
valid : bool
True if in and out-sequences are digraphic False if not.
Notes
-----
This algorithm is from Kleitman and Wang [1]_.
The worst case runtime is $O(s \times \log n)$ where $s$ and $n$ are the
sum and length of the sequences respectively.
References
----------
.. [1] D.J. Kleitman and D.L. Wang
Algorithms for Constructing Graphs and Digraphs with Given Valences
and Factors, Discrete Mathematics, 6(1), pp. 79-88 (1973)
"""
in_deg_sequence = list(in_sequence)
out_deg_sequence = list(out_sequence)
if not nx.utils.is_list_of_ints(in_deg_sequence):
return False
if not nx.utils.is_list_of_ints(out_deg_sequence):
return False
# Process the sequences and form two heaps to store degree pairs with
# either zero or non-zero out degrees
sumin, sumout, nin, nout = 0, 0, len(in_deg_sequence), len(out_deg_sequence)
maxn = max(nin, nout)
maxin = 0
if maxn == 0:
return True
stubheap, zeroheap = [], []
for n in range(maxn):
in_deg, out_deg = 0, 0
if n < nout:
out_deg = out_deg_sequence[n]
if n < nin:
in_deg = in_deg_sequence[n]
if in_deg < 0 or out_deg < 0:
return False
sumin, sumout, maxin = sumin + in_deg, sumout + out_deg, max(maxin, in_deg)
if in_deg > 0:
stubheap.append((-1 * out_deg, -1 * in_deg))
elif out_deg > 0:
zeroheap.append(-1 * out_deg)
if sumin != sumout:
return False
heapq.heapify(stubheap)
heapq.heapify(zeroheap)
modstubs = [(0, 0)] * (maxin + 1)
# Successively reduce degree sequence by removing the maximum out degree
while stubheap:
# Take the first value in the sequence with non-zero in degree
(freeout, freein) = heapq.heappop(stubheap)
freein *= -1
if freein > len(stubheap) + len(zeroheap):
return False
# Attach out stubs to the nodes with the most in stubs
mslen = 0
for i in range(freein):
if zeroheap and (not stubheap or stubheap[0][0] > zeroheap[0]):
stubout = heapq.heappop(zeroheap)
stubin = 0
else:
(stubout, stubin) = heapq.heappop(stubheap)
if stubout == 0:
return False
# Check if target is now totally connected
if stubout + 1 < 0 or stubin < 0:
modstubs[mslen] = (stubout + 1, stubin)
mslen += 1
# Add back the nodes to the heap that still have available stubs
for i in range(mslen):
stub = modstubs[i]
if stub[1] < 0:
heapq.heappush(stubheap, stub)
else:
heapq.heappush(zeroheap, stub[0])
if freeout < 0:
heapq.heappush(zeroheap, freeout)
return True
| [
[
[
255,
260
],
[
11648,
11653
],
[
11676,
11681
],
[
11935,
11940
],
[
12272,
12277
],
[
12377,
12382
],
[
12823,
12828
],
[
12888,
12893
],
[
12958,
12963
]
],
[
[
268,
282
],
[
2019,
2021
],
[
2174,
2176
],
[
2228,
2230
],
[
2483,
2485
],
[
2790,
2792
],
[
3916,
3918
],
[
6875,
6877
],
[
8560,
8562
],
[
9610,
9612
],
[
10677,
10679
],
[
10752,
10754
]
],
[
[
283,
293
]
],
[
[
639,
646
]
],
[
[
883,
895
]
],
[
[
2068,
2090
],
[
3868,
3890
],
[
6827,
6849
]
],
[
[
2859,
2896
],
[
1895,
1932
]
],
[
[
5125,
5162
],
[
1800,
1837
]
],
[
[
7915,
7932
]
],
[
[
8836,
8854
]
],
[
[
9709,
9723
]
]
] |
import logging
import operator
import time
import traceback
from pathlib import Path
from typing import List, Type, Set, Tuple, Optional
from PyQt5.QtCore import QEvent, Qt, pyqtSignal
from PyQt5.QtGui import QIcon, QWindowStateChangeEvent, QCursor
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QCheckBox, QHeaderView, QToolBar, \
QLabel, QPlainTextEdit, QProgressBar, QPushButton, QComboBox, QApplication, QListView, QSizePolicy, \
QMenu, QHBoxLayout
from bauh.api import user
from bauh.api.abstract.cache import MemoryCache
from bauh.api.abstract.context import ApplicationContext
from bauh.api.abstract.controller import SoftwareManager, SoftwareAction
from bauh.api.abstract.model import SoftwarePackage
from bauh.api.abstract.view import MessageType
from bauh.api.http import HttpClient
from bauh.api.paths import LOGS_DIR
from bauh.commons.html import bold
from bauh.context import set_theme
from bauh.stylesheet import read_all_themes_metadata, ThemeMetadata
from bauh.view.core.config import CoreConfigManager
from bauh.view.core.tray_client import notify_tray
from bauh.view.qt import dialog, commons, qt_utils
from bauh.view.qt.about import AboutDialog
from bauh.view.qt.apps_table import PackagesTable, UpgradeToggleButton
from bauh.view.qt.commons import sum_updates_displayed
from bauh.view.qt.components import new_spacer, IconButton, QtComponentsManager, to_widget, QSearchBar, \
QCustomMenuAction, QCustomToolbar
from bauh.view.qt.dialog import ConfirmationDialog
from bauh.view.qt.history import HistoryDialog
from bauh.view.qt.info import InfoDialog
from bauh.view.qt.root import RootDialog
from bauh.view.qt.screenshots import ScreenshotsDialog
from bauh.view.qt.settings import SettingsWindow
from bauh.view.qt.thread import UpgradeSelected, RefreshApps, UninstallPackage, DowngradePackage, ShowPackageInfo, \
ShowPackageHistory, SearchPackages, InstallPackage, AnimateProgress, NotifyPackagesReady, FindSuggestions, \
ListWarnings, \
AsyncAction, LaunchPackage, ApplyFilters, CustomSoftwareAction, ShowScreenshots, CustomAction, \
NotifyInstalledLoaded, \
IgnorePackageUpdates, SaveTheme, StartAsyncAction
from bauh.view.qt.view_model import PackageView, PackageViewStatus
from bauh.view.util import util, resource
from bauh.view.util.translation import I18n
DARK_ORANGE = '#FF4500'
# action ids
ACTION_APPLY_FILTERS = 1
ACTION_SEARCH = 2
ACTION_INSTALL = 3
ACTION_UNINSTALL = 4
ACTION_INFO = 5
ACTION_HISTORY = 6
ACTION_DOWNGRADE = 7
ACTION_UPGRADE = 8
ACTION_LAUNCH = 9
ACTION_CUSTOM_ACTION = 10
ACTION_SCREENSHOTS = 11
ACTION_IGNORE_UPDATES = 12
# components ids
SEARCH_BAR = 1
BT_INSTALLED = 2
BT_REFRESH = 3
BT_SUGGESTIONS = 4
BT_UPGRADE = 5
CHECK_UPDATES = 6
CHECK_APPS = 7
COMBO_TYPES = 8
COMBO_CATEGORIES = 9
INP_NAME = 10
CHECK_DETAILS = 11
BT_SETTINGS = 12
BT_CUSTOM_ACTIONS = 13
BT_ABOUT = 14
BT_THEMES = 15
# component groups ids
GROUP_FILTERS = 1
GROUP_VIEW_INSTALLED = 2
GROUP_VIEW_SEARCH = 3
GROUP_UPPER_BAR = 4
GROUP_LOWER_BTS = 5
class ManageWindow(QWidget):
signal_user_res = pyqtSignal(bool)
signal_root_password = pyqtSignal(bool, str)
signal_table_update = pyqtSignal()
signal_stop_notifying = pyqtSignal()
def __init__(self, i18n: I18n, icon_cache: MemoryCache, manager: SoftwareManager, screen_size, config: dict,
context: ApplicationContext, http_client: HttpClient, logger: logging.Logger, icon: QIcon):
super(ManageWindow, self).__init__()
self.setObjectName('manage_window')
self.comp_manager = QtComponentsManager()
self.i18n = i18n
self.logger = logger
self.manager = manager
self.working = False # restrict the number of threaded actions
self.installed_loaded = False # used to control the state when the interface is set to not load the apps on startup
self.pkgs = [] # packages current loaded in the table
self.pkgs_available = [] # all packages loaded in memory
self.pkgs_installed = [] # cached installed packages
self.display_limit = config['ui']['table']['max_displayed']
self.icon_cache = icon_cache
self.screen_size = screen_size
self.config = config
self.context = context
self.http_client = http_client
self.icon_app = icon
self.setWindowIcon(self.icon_app)
self.layout = QVBoxLayout()
self.setLayout(self.layout)
self.toolbar_status = QToolBar()
self.toolbar_status.setObjectName('toolbar_status')
self.toolbar_status.addWidget(new_spacer())
self.label_status = QLabel()
self.label_status.setObjectName('label_status')
self.label_status.setText('')
self.toolbar_status.addWidget(self.label_status)
self.search_bar = QSearchBar(search_callback=self.search)
self.search_bar.set_placeholder(i18n['window_manage.search_bar.placeholder'] + "...")
self.search_bar.set_tooltip(i18n['window_manage.search_bar.tooltip'])
self.search_bar.set_button_tooltip(i18n['window_manage.search_bar.button_tooltip'])
self.comp_manager.register_component(SEARCH_BAR, self.search_bar, self.toolbar_status.addWidget(self.search_bar))
self.toolbar_status.addWidget(new_spacer())
self.layout.addWidget(self.toolbar_status)
self.toolbar_filters = QWidget()
self.toolbar_filters.setObjectName('table_filters')
self.toolbar_filters.setLayout(QHBoxLayout())
self.toolbar_filters.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)
self.toolbar_filters.setContentsMargins(0, 0, 0, 0)
self.check_updates = QCheckBox()
self.check_updates.setObjectName('check_updates')
self.check_updates.setCursor(QCursor(Qt.PointingHandCursor))
self.check_updates.setText(self.i18n['updates'].capitalize())
self.check_updates.stateChanged.connect(self._handle_updates_filter)
self.check_updates.sizePolicy().setRetainSizeWhenHidden(True)
self.toolbar_filters.layout().addWidget(self.check_updates)
self.comp_manager.register_component(CHECK_UPDATES, self.check_updates)
self.check_apps = QCheckBox()
self.check_apps.setObjectName('check_apps')
self.check_apps.setCursor(QCursor(Qt.PointingHandCursor))
self.check_apps.setText(self.i18n['manage_window.checkbox.only_apps'])
self.check_apps.setChecked(True)
self.check_apps.stateChanged.connect(self._handle_filter_only_apps)
self.check_apps.sizePolicy().setRetainSizeWhenHidden(True)
self.toolbar_filters.layout().addWidget(self.check_apps)
self.comp_manager.register_component(CHECK_APPS, self.check_apps)
self.any_type_filter = 'any'
self.cache_type_filter_icons = {}
self.combo_filter_type = QComboBox()
self.combo_filter_type.setObjectName('combo_types')
self.combo_filter_type.setCursor(QCursor(Qt.PointingHandCursor))
self.combo_filter_type.setView(QListView())
self.combo_filter_type.view().setCursor(QCursor(Qt.PointingHandCursor))
self.combo_filter_type.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self.combo_filter_type.setEditable(True)
self.combo_filter_type.lineEdit().setReadOnly(True)
self.combo_filter_type.lineEdit().setAlignment(Qt.AlignCenter)
self.combo_filter_type.activated.connect(self._handle_type_filter)
self.combo_filter_type.addItem('--- {} ---'.format(self.i18n['type'].capitalize()), self.any_type_filter)
self.combo_filter_type.sizePolicy().setRetainSizeWhenHidden(True)
self.toolbar_filters.layout().addWidget(self.combo_filter_type)
self.comp_manager.register_component(COMBO_TYPES, self.combo_filter_type)
self.any_category_filter = 'any'
self.combo_categories = QComboBox()
self.combo_categories.setObjectName('combo_categories')
self.combo_categories.setCursor(QCursor(Qt.PointingHandCursor))
self.combo_categories.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self.combo_categories.view().setCursor(QCursor(Qt.PointingHandCursor))
self.combo_categories.setEditable(True)
self.combo_categories.lineEdit().setReadOnly(True)
self.combo_categories.lineEdit().setAlignment(Qt.AlignCenter)
self.combo_categories.activated.connect(self._handle_category_filter)
self.combo_categories.sizePolicy().setRetainSizeWhenHidden(True)
self.combo_categories.addItem('--- {} ---'.format(self.i18n['category'].capitalize()), self.any_category_filter)
self.toolbar_filters.layout().addWidget(self.combo_categories)
self.comp_manager.register_component(COMBO_CATEGORIES, self.combo_categories)
self.input_name = QSearchBar(search_callback=self.begin_apply_filters)
self.input_name.palette().swap(self.combo_categories.palette())
self.input_name.setObjectName('name_filter')
self.input_name.set_placeholder(self.i18n['manage_window.name_filter.placeholder'] + '...')
self.input_name.set_tooltip(self.i18n['manage_window.name_filter.tooltip'])
self.input_name.set_button_tooltip(self.i18n['manage_window.name_filter.button_tooltip'])
self.input_name.sizePolicy().setRetainSizeWhenHidden(True)
self.toolbar_filters.layout().addWidget(self.input_name)
self.comp_manager.register_component(INP_NAME, self.input_name)
self.toolbar_filters.layout().addWidget(new_spacer())
toolbar_bts = []
bt_inst = QPushButton()
bt_inst.setObjectName('bt_installed')
bt_inst.setProperty('root', 'true')
bt_inst.setCursor(QCursor(Qt.PointingHandCursor))
bt_inst.setToolTip(self.i18n['manage_window.bt.installed.tooltip'])
bt_inst.setText(self.i18n['manage_window.bt.installed.text'].capitalize())
bt_inst.clicked.connect(self._begin_loading_installed)
bt_inst.sizePolicy().setRetainSizeWhenHidden(True)
toolbar_bts.append(bt_inst)
self.toolbar_filters.layout().addWidget(bt_inst)
self.comp_manager.register_component(BT_INSTALLED, bt_inst)
bt_ref = QPushButton()
bt_ref.setObjectName('bt_refresh')
bt_ref.setProperty('root', 'true')
bt_ref.setCursor(QCursor(Qt.PointingHandCursor))
bt_ref.setToolTip(i18n['manage_window.bt.refresh.tooltip'])
bt_ref.setText(self.i18n['manage_window.bt.refresh.text'])
bt_ref.clicked.connect(self.begin_refresh_packages)
bt_ref.sizePolicy().setRetainSizeWhenHidden(True)
toolbar_bts.append(bt_ref)
self.toolbar_filters.layout().addWidget(bt_ref)
self.comp_manager.register_component(BT_REFRESH, bt_ref)
self.bt_upgrade = QPushButton()
self.bt_upgrade.setProperty('root', 'true')
self.bt_upgrade.setObjectName('bt_upgrade')
self.bt_upgrade.setCursor(QCursor(Qt.PointingHandCursor))
self.bt_upgrade.setToolTip(i18n['manage_window.bt.upgrade.tooltip'])
self.bt_upgrade.setText(i18n['manage_window.bt.upgrade.text'])
self.bt_upgrade.clicked.connect(self.upgrade_selected)
self.bt_upgrade.sizePolicy().setRetainSizeWhenHidden(True)
toolbar_bts.append(self.bt_upgrade)
self.toolbar_filters.layout().addWidget(self.bt_upgrade)
self.comp_manager.register_component(BT_UPGRADE, self.bt_upgrade)
# setting all buttons to the same size:
bt_biggest_size = 0
for bt in toolbar_bts:
bt_width = bt.sizeHint().width()
if bt_width > bt_biggest_size:
bt_biggest_size = bt_width
for bt in toolbar_bts:
bt_width = bt.sizeHint().width()
if bt_biggest_size > bt_width:
bt.setFixedWidth(bt_biggest_size)
self.layout.addWidget(self.toolbar_filters)
self.table_container = QWidget()
self.table_container.setObjectName('table_container')
self.table_container.setContentsMargins(0, 0, 0, 0)
self.table_container.setLayout(QVBoxLayout())
self.table_container.layout().setContentsMargins(0, 0, 0, 0)
self.table_apps = PackagesTable(self, self.icon_cache, download_icons=bool(self.config['download']['icons']))
self.table_apps.change_headers_policy()
self.table_container.layout().addWidget(self.table_apps)
self.layout.addWidget(self.table_container)
self.toolbar_console = QWidget()
self.toolbar_console.setObjectName('console_toolbar')
self.toolbar_console.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)
self.toolbar_console.setLayout(QHBoxLayout())
self.toolbar_console.setContentsMargins(0, 0, 0, 0)
self.check_details = QCheckBox()
self.check_details.setObjectName('check_details')
self.check_details.setCursor(QCursor(Qt.PointingHandCursor))
self.check_details.setText(self.i18n['manage_window.checkbox.show_details'])
self.check_details.stateChanged.connect(self._handle_console)
self.toolbar_console.layout().addWidget(self.check_details)
self.comp_manager.register_component(CHECK_DETAILS, self.check_details)
self.toolbar_console.layout().addWidget(new_spacer())
self.label_displayed = QLabel()
self.label_displayed.setObjectName('apps_displayed')
self.label_displayed.setCursor(QCursor(Qt.WhatsThisCursor))
self.label_displayed.setToolTip(self.i18n['manage_window.label.apps_displayed.tip'])
self.toolbar_console.layout().addWidget(self.label_displayed)
self.label_displayed.hide()
self.layout.addWidget(self.toolbar_console)
self.textarea_details = QPlainTextEdit(self)
self.textarea_details.setObjectName('textarea_details')
self.textarea_details.setProperty('console', 'true')
self.textarea_details.resize(self.table_apps.size())
self.layout.addWidget(self.textarea_details)
self.textarea_details.setVisible(False)
self.textarea_details.setReadOnly(True)
self.toolbar_substatus = QToolBar()
self.toolbar_substatus.setObjectName('toolbar_substatus')
self.toolbar_substatus.addWidget(new_spacer())
self.label_substatus = QLabel()
self.label_substatus.setObjectName('label_substatus')
self.label_substatus.setCursor(QCursor(Qt.WaitCursor))
self.toolbar_substatus.addWidget(self.label_substatus)
self.toolbar_substatus.addWidget(new_spacer())
self.layout.addWidget(self.toolbar_substatus)
self._change_label_substatus('')
self.thread_update = self._bind_async_action(UpgradeSelected(self.manager, context.internet_checker, self.i18n), finished_call=self._finish_upgrade_selected)
self.thread_refresh = self._bind_async_action(RefreshApps(self.manager), finished_call=self._finish_refresh_packages, only_finished=True)
self.thread_uninstall = self._bind_async_action(UninstallPackage(self.manager, self.icon_cache, self.i18n), finished_call=self._finish_uninstall)
self.thread_show_info = self._bind_async_action(ShowPackageInfo(self.manager), finished_call=self._finish_show_info)
self.thread_show_history = self._bind_async_action(ShowPackageHistory(self.manager, self.i18n), finished_call=self._finish_show_history)
self.thread_search = self._bind_async_action(SearchPackages(self.manager), finished_call=self._finish_search, only_finished=True)
self.thread_downgrade = self._bind_async_action(DowngradePackage(self.manager, self.i18n), finished_call=self._finish_downgrade)
self.thread_suggestions = self._bind_async_action(FindSuggestions(man=self.manager), finished_call=self._finish_load_suggestions, only_finished=True)
self.thread_launch = self._bind_async_action(LaunchPackage(self.manager), finished_call=self._finish_launch_package, only_finished=False)
self.thread_custom_action = self._bind_async_action(CustomAction(manager=self.manager, i18n=self.i18n), finished_call=self._finish_execute_custom_action)
self.thread_screenshots = self._bind_async_action(ShowScreenshots(self.manager), finished_call=self._finish_show_screenshots)
self.thread_apply_filters = ApplyFilters()
self.thread_apply_filters.signal_finished.connect(self._finish_apply_filters)
self.thread_apply_filters.signal_table.connect(self._update_table_and_upgrades)
self.signal_table_update.connect(self.thread_apply_filters.stop_waiting)
self.thread_install = InstallPackage(manager=self.manager, icon_cache=self.icon_cache, i18n=self.i18n)
self._bind_async_action(self.thread_install, finished_call=self._finish_install)
self.thread_animate_progress = AnimateProgress()
self.thread_animate_progress.signal_change.connect(self._update_progress)
self.thread_notify_pkgs_ready = NotifyPackagesReady()
self.thread_notify_pkgs_ready.signal_changed.connect(self._update_package_data)
self.thread_notify_pkgs_ready.signal_finished.connect(self._update_state_when_pkgs_ready)
self.signal_stop_notifying.connect(self.thread_notify_pkgs_ready.stop_working)
self.thread_ignore_updates = IgnorePackageUpdates(manager=self.manager)
self._bind_async_action(self.thread_ignore_updates, finished_call=self.finish_ignore_updates)
self.thread_reload = StartAsyncAction(delay_in_milis=5)
self.thread_reload.signal_start.connect(self._reload)
self.container_bottom = QWidget()
self.container_bottom.setObjectName('container_bottom')
self.container_bottom.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)
self.container_bottom.setLayout(QHBoxLayout())
self.container_bottom.layout().setContentsMargins(0, 0, 0, 0)
self.container_bottom.layout().addWidget(new_spacer())
if config['suggestions']['enabled']:
bt_sugs = IconButton(action=lambda: self._begin_load_suggestions(filter_installed=True),
i18n=i18n,
tooltip=self.i18n['manage_window.bt.suggestions.tooltip'])
bt_sugs.setObjectName('suggestions')
self.container_bottom.layout().addWidget(bt_sugs)
self.comp_manager.register_component(BT_SUGGESTIONS, bt_sugs)
bt_themes = IconButton(self.show_themes,
i18n=self.i18n,
tooltip=self.i18n['manage_window.bt_themes.tip'])
bt_themes.setObjectName('themes')
self.container_bottom.layout().addWidget(bt_themes)
self.comp_manager.register_component(BT_THEMES, bt_themes)
self.custom_actions = [a for a in manager.gen_custom_actions()]
bt_custom_actions = IconButton(action=self.show_custom_actions,
i18n=self.i18n,
tooltip=self.i18n['manage_window.bt_custom_actions.tip'])
bt_custom_actions.setObjectName('custom_actions')
bt_custom_actions.setVisible(bool(self.custom_actions))
self.container_bottom.layout().addWidget(bt_custom_actions)
self.comp_manager.register_component(BT_CUSTOM_ACTIONS, bt_custom_actions)
bt_settings = IconButton(action=self.show_settings,
i18n=self.i18n,
tooltip=self.i18n['manage_window.bt_settings.tooltip'])
bt_settings.setObjectName('settings')
self.container_bottom.layout().addWidget(bt_settings)
self.comp_manager.register_component(BT_SETTINGS, bt_settings)
bt_about = IconButton(action=self._show_about,
i18n=self.i18n,
tooltip=self.i18n['manage_window.settings.about'])
bt_about.setObjectName('about')
self.container_bottom.layout().addWidget(bt_about)
self.comp_manager.register_component(BT_ABOUT, bt_about)
self.layout.addWidget(self.container_bottom)
self.container_progress = QCustomToolbar(spacing=0, policy_height=QSizePolicy.Fixed)
self.container_progress.setObjectName('container_progress')
self.container_progress.add_space()
self.progress_bar = QProgressBar()
self.progress_bar.setObjectName('progress_manage')
self.progress_bar.setCursor(QCursor(Qt.WaitCursor))
self.progress_bar.setTextVisible(False)
self.container_progress.add_widget(self.progress_bar)
self.container_progress.add_space()
self.layout.addWidget(self.container_progress)
qt_utils.centralize(self)
self.filter_only_apps = True
self.type_filter = self.any_type_filter
self.category_filter = self.any_category_filter
self.filter_updates = False
self._maximized = False
self.progress_controll_enabled = True
self.recent_uninstall = False
self.types_changed = False
self.dialog_about = None
self.load_suggestions = bool(config['suggestions']['enabled'])
self.suggestions_requested = False
self.first_refresh = True
self.thread_warnings = ListWarnings(man=manager, i18n=i18n)
self.thread_warnings.signal_warnings.connect(self._show_warnings)
self.settings_window = None
self.search_performed = False
self.thread_save_theme = SaveTheme(theme_key='')
self.thread_load_installed = NotifyInstalledLoaded()
self.thread_load_installed.signal_loaded.connect(self._finish_loading_installed)
self.setMinimumHeight(int(screen_size.height() * 0.5))
self.setMinimumWidth(int(screen_size.width() * 0.6))
self._register_groups()
def _register_groups(self):
filters = (CHECK_APPS, CHECK_UPDATES, COMBO_CATEGORIES, COMBO_TYPES, INP_NAME)
self.comp_manager.register_group(GROUP_FILTERS, False, *filters)
self.comp_manager.register_group(GROUP_VIEW_SEARCH, False,
COMBO_CATEGORIES, COMBO_TYPES, INP_NAME, # filters
BT_INSTALLED, BT_SUGGESTIONS) # buttons
self.comp_manager.register_group(GROUP_VIEW_INSTALLED, False,
BT_REFRESH, BT_UPGRADE, # buttons
*filters)
self.comp_manager.register_group(GROUP_UPPER_BAR, False,
CHECK_APPS, CHECK_UPDATES, COMBO_CATEGORIES, COMBO_TYPES, INP_NAME,
BT_INSTALLED, BT_SUGGESTIONS, BT_REFRESH, BT_UPGRADE)
self.comp_manager.register_group(GROUP_LOWER_BTS, False, BT_SUGGESTIONS, BT_THEMES, BT_CUSTOM_ACTIONS, BT_SETTINGS, BT_ABOUT)
def update_custom_actions(self):
self.custom_actions = [a for a in self.manager.gen_custom_actions()]
def _update_process_progress(self, val: int):
if self.progress_controll_enabled:
self.thread_animate_progress.set_progress(val)
def _change_status(self, status: str = None):
if status:
self.label_status.setText(status + '...')
self.label_status.setCursor(QCursor(Qt.WaitCursor))
else:
self.label_status.setText('')
self.label_status.unsetCursor()
def _set_table_enabled(self, enabled: bool):
self.table_apps.setEnabled(enabled)
if enabled:
self.table_container.unsetCursor()
else:
self.table_container.setCursor(QCursor(Qt.WaitCursor))
def begin_apply_filters(self):
self.stop_notifying_package_states()
self._begin_action(action_label=self.i18n['manage_window.status.filtering'],
action_id=ACTION_APPLY_FILTERS)
self.comp_manager.disable_visible_from_groups(GROUP_UPPER_BAR, GROUP_LOWER_BTS)
self.comp_manager.set_component_read_only(INP_NAME, True)
self.thread_apply_filters.filters = self._gen_filters()
self.thread_apply_filters.pkgs = self.pkgs_available
self.thread_apply_filters.start()
self.setFocus(Qt.NoFocusReason)
def _finish_apply_filters(self):
self._finish_action(ACTION_APPLY_FILTERS)
self.update_bt_upgrade()
def stop_notifying_package_states(self):
if self.thread_notify_pkgs_ready.isRunning():
self.signal_stop_notifying.emit()
self.thread_notify_pkgs_ready.wait(1000)
def _update_table_and_upgrades(self, pkgs_info: dict):
self._update_table(pkgs_info=pkgs_info, signal=True)
if self.pkgs:
self._update_state_when_pkgs_ready()
self.stop_notifying_package_states()
self.thread_notify_pkgs_ready.pkgs = self.pkgs
self.thread_notify_pkgs_ready.work = True
self.thread_notify_pkgs_ready.start()
def _bind_async_action(self, action: AsyncAction, finished_call, only_finished: bool = False) -> AsyncAction:
action.signal_finished.connect(finished_call)
if not only_finished:
action.signal_confirmation.connect(self._ask_confirmation)
action.signal_output.connect(self._update_action_output)
action.signal_message.connect(self._show_message)
action.signal_status.connect(self._change_label_status)
action.signal_substatus.connect(self._change_label_substatus)
action.signal_progress.connect(self._update_process_progress)
action.signal_progress_control.connect(self.set_progress_controll)
action.signal_root_password.connect(self._pause_and_ask_root_password)
self.signal_user_res.connect(action.confirm)
self.signal_root_password.connect(action.set_root_password)
return action
def _ask_confirmation(self, msg: dict):
self.thread_animate_progress.pause()
extra_widgets = [to_widget(comp=c, i18n=self.i18n) for c in msg['components']] if msg.get('components') else None
diag = ConfirmationDialog(title=msg['title'],
body=msg['body'],
i18n=self.i18n,
widgets=extra_widgets,
confirmation_label=msg['confirmation_label'],
deny_label=msg['deny_label'],
deny_button=msg['deny_button'],
window_cancel=msg['window_cancel'],
confirmation_button=msg.get('confirmation_button', True))
diag.ask()
res = diag.confirmed
self.thread_animate_progress.animate()
self.signal_user_res.emit(res)
def _pause_and_ask_root_password(self):
self.thread_animate_progress.pause()
valid, password = RootDialog.ask_password(self.context, i18n=self.i18n, comp_manager=self.comp_manager)
self.thread_animate_progress.animate()
self.signal_root_password.emit(valid, password)
def _show_message(self, msg: dict):
self.thread_animate_progress.pause()
dialog.show_message(title=msg['title'], body=msg['body'], type_=msg['type'])
self.thread_animate_progress.animate()
def _show_warnings(self, warnings: List[str]):
if warnings:
dialog.show_message(title=self.i18n['warning'].capitalize(), body='<p>{}</p>'.format('<br/><br/>'.join(warnings)), type_=MessageType.WARNING)
def show(self):
super(ManageWindow, self).show()
if not self.thread_warnings.isFinished():
self.thread_warnings.start()
qt_utils.centralize(self)
def verify_warnings(self):
self.thread_warnings.start()
def _begin_loading_installed(self):
if self.installed_loaded:
self.search_bar.clear()
self.input_name.set_text('')
self._begin_action(self.i18n['manage_window.status.installed'])
self._handle_console_option(False)
self.comp_manager.set_components_visible(False)
self.suggestions_requested = False
self.search_performed = False
self.thread_load_installed.start()
else:
self.load_suggestions = False
self.begin_refresh_packages()
def _finish_loading_installed(self):
self._finish_action()
self.comp_manager.set_group_visible(GROUP_VIEW_INSTALLED, True)
self.update_pkgs(new_pkgs=None, as_installed=True)
self._hide_filters_no_packages()
self._update_bts_installed_and_suggestions()
self._set_lower_buttons_visible(True)
self._reorganize()
def _update_bts_installed_and_suggestions(self):
available_types = len(self.manager.get_managed_types())
self.comp_manager.set_component_visible(BT_INSTALLED, available_types > 0 and any([self.suggestions_requested, self.search_performed]))
self.comp_manager.set_component_visible(BT_SUGGESTIONS, available_types > 0)
def _hide_filters_no_packages(self):
if not self.pkgs:
self.comp_manager.set_group_visible(GROUP_FILTERS, False)
def _show_about(self):
if self.dialog_about is None:
self.dialog_about = AboutDialog(self.config)
self.dialog_about.show()
def _handle_updates_filter(self, status: int):
self.filter_updates = status == 2
self.begin_apply_filters()
def _handle_filter_only_apps(self, status: int):
self.filter_only_apps = status == 2
self.begin_apply_filters()
def _handle_type_filter(self, idx: int):
self.type_filter = self.combo_filter_type.itemData(idx)
self.combo_filter_type.adjustSize()
self.begin_apply_filters()
def _handle_category_filter(self, idx: int):
self.category_filter = self.combo_categories.itemData(idx)
self.begin_apply_filters()
def _update_state_when_pkgs_ready(self):
if self.progress_bar.isVisible():
return
self._reload_categories()
self._reorganize()
def _update_package_data(self, idx: int):
if self.table_apps.isEnabled():
pkg = self.pkgs[idx]
pkg.status = PackageViewStatus.READY
self.table_apps.update_package(pkg)
def _reload_categories(self):
categories = set()
for p in self.pkgs_available:
if p.model.categories:
for c in p.model.categories:
if c:
cat = c.strip().lower()
if cat:
categories.add(cat)
if categories:
self._update_categories(categories, keep_selected=True)
def changeEvent(self, e: QEvent):
if isinstance(e, QWindowStateChangeEvent):
self._maximized = self.isMaximized()
self.table_apps.change_headers_policy(maximized=self._maximized)
def _handle_console(self, checked: bool):
if checked:
self.textarea_details.show()
else:
self.textarea_details.hide()
def _handle_console_option(self, enable: bool):
if enable:
self.textarea_details.clear()
self.comp_manager.set_component_visible(CHECK_DETAILS, enable)
self.check_details.setChecked(False)
self.textarea_details.hide()
def begin_refresh_packages(self, pkg_types: Optional[Set[Type[SoftwarePackage]]] = None):
self.search_bar.clear()
self._begin_action(self.i18n['manage_window.status.refreshing'])
self.comp_manager.set_components_visible(False)
self._handle_console_option(False)
self.suggestions_requested = False
self.search_performed = False
self.thread_refresh.pkg_types = pkg_types
self.thread_refresh.start()
def _finish_refresh_packages(self, res: dict, as_installed: bool = True):
self._finish_action()
self._set_lower_buttons_visible(True)
self.comp_manager.set_component_visible(SEARCH_BAR, True)
if self.search_performed or self.suggestions_requested:
self.comp_manager.set_group_visible(GROUP_VIEW_SEARCH, True)
else:
self.comp_manager.set_group_visible(GROUP_VIEW_INSTALLED, True)
if self.update_pkgs(res['installed'], as_installed=as_installed, types=res['types']):
self._hide_filters_no_packages()
self._update_bts_installed_and_suggestions()
self._reorganize()
self.load_suggestions = False
self.types_changed = False
def load_without_packages(self):
self.load_suggestions = False
self._handle_console_option(False)
self._finish_refresh_packages({'installed': None, 'types': None}, as_installed=False)
def _begin_load_suggestions(self, filter_installed: bool):
self.search_bar.clear()
self._begin_action(self.i18n['manage_window.status.suggestions'])
self._handle_console_option(False)
self.comp_manager.set_components_visible(False)
self.suggestions_requested = True
self.thread_suggestions.filter_installed = filter_installed
self.thread_suggestions.start()
def _finish_load_suggestions(self, res: dict):
self._finish_search(res)
def begin_uninstall(self, pkg: PackageView):
pwd, proceed = self._ask_root_password(SoftwareAction.UNINSTALL, pkg)
if not proceed:
return
self._begin_action(action_label='{} {}'.format(self.i18n['manage_window.status.uninstalling'], pkg.model.name),
action_id=ACTION_UNINSTALL)
self.comp_manager.set_groups_visible(False, GROUP_UPPER_BAR, GROUP_LOWER_BTS)
self._handle_console_option(True)
self.thread_uninstall.pkg = pkg
self.thread_uninstall.root_pwd = pwd
self.thread_uninstall.start()
def _finish_uninstall(self, res: dict):
self._finish_action(action_id=ACTION_UNINSTALL)
if res['success']:
src_pkg = res['pkg']
if self._can_notify_user():
util.notify_user('{} ({}) {}'.format(src_pkg.model.name, src_pkg.model.get_type(), self.i18n['uninstalled']))
if res['removed']:
for list_idx, pkg_list in enumerate((self.pkgs_available, self.pkgs, self.pkgs_installed)):
if pkg_list:
removed_idxs = []
for pkgv_idx, pkgv in enumerate(pkg_list):
if len(removed_idxs) == len(res['removed']):
break
for model in res['removed']:
if pkgv.model == model:
if list_idx == 0: # updates the model
pkgv.update_model(model)
if not self.search_performed or list_idx == 2: # always from the installed packages
removed_idxs.append(pkgv_idx)
if self.search_performed and list_idx == 1: # only for displayed
self.table_apps.update_package(pkgv, change_update_col=True)
break # as the model has been found, stops the loop
if removed_idxs:
# updating the list
removed_idxs.sort()
for decrement, pkg_idx in enumerate(removed_idxs):
del pkg_list[pkg_idx - decrement]
if list_idx == 1: # updates the rows if the current list reprents the displayed packages:
for decrement, idx in enumerate(removed_idxs):
self.table_apps.removeRow(idx - decrement)
self._update_table_indexes()
self.update_bt_upgrade()
self.update_custom_actions()
self._show_console_checkbox_if_output()
notify_tray()
else:
self._show_console_errors()
if self._can_notify_user():
util.notify_user('{}: {}'.format(res['pkg'].model.name, self.i18n['notification.uninstall.failed']))
def _update_table_indexes(self):
if self.pkgs:
for new_idx, pkgv in enumerate(self.pkgs): # updating the package indexes
pkgv.table_index = new_idx
def begin_launch_package(self, pkg: PackageView):
self._begin_action(action_label=self.i18n['manage_window.status.running_app'].format(pkg.model.name),
action_id=ACTION_LAUNCH)
self.comp_manager.disable_visible()
self.thread_launch.pkg = pkg
self.thread_launch.start()
def _finish_launch_package(self, success: bool):
self._finish_action(action_id=ACTION_LAUNCH)
def _can_notify_user(self):
return bool(self.config['system']['notifications']) and (self.isHidden() or self.isMinimized())
def _change_label_status(self, status: str):
self.label_status.setText(status)
def _change_label_substatus(self, substatus: str):
self.label_substatus.setText('<p>{}</p>'.format(substatus))
if not substatus:
self.toolbar_substatus.hide()
elif not self.toolbar_substatus.isVisible() and self.progress_bar.isVisible():
self.toolbar_substatus.show()
def _reorganize(self):
if not self._maximized:
self.table_apps.change_headers_policy(QHeaderView.Stretch)
self.table_apps.change_headers_policy()
self._resize(accept_lower_width=len(self.pkgs) > 0)
def _update_table(self, pkgs_info: dict, signal: bool = False):
self.pkgs = pkgs_info['pkgs_displayed']
if pkgs_info['not_installed'] == 0:
update_check = sum_updates_displayed(pkgs_info) > 0
else:
update_check = False
self.table_apps.update_packages(self.pkgs, update_check_enabled=update_check)
if not self._maximized:
self.label_displayed.show()
self.table_apps.change_headers_policy(QHeaderView.Stretch)
self.table_apps.change_headers_policy()
self._resize(accept_lower_width=len(self.pkgs) > 0)
if len(self.pkgs) == 0 and len(self.pkgs_available) == 0:
self.label_displayed.setText('')
else:
self.label_displayed.setText('{} / {}'.format(len(self.pkgs), len(self.pkgs_available)))
else:
self.label_displayed.hide()
if signal:
self.signal_table_update.emit()
def update_bt_upgrade(self, pkgs_info: dict = None):
show_bt_upgrade = False
if not any([self.suggestions_requested, self.search_performed]) and (not pkgs_info or pkgs_info['not_installed'] == 0):
for pkg in (pkgs_info['pkgs_displayed'] if pkgs_info else self.pkgs):
if not pkg.model.is_update_ignored() and pkg.update_checked:
show_bt_upgrade = True
break
self.comp_manager.set_component_visible(BT_UPGRADE, show_bt_upgrade)
if show_bt_upgrade:
self._reorganize()
def change_update_state(self, pkgs_info: dict, trigger_filters: bool = True, keep_selected: bool = False):
self.update_bt_upgrade(pkgs_info)
if pkgs_info['updates'] > 0:
if pkgs_info['not_installed'] == 0:
if not self.comp_manager.is_visible(CHECK_UPDATES):
self.comp_manager.set_component_visible(CHECK_UPDATES, True)
if not self.filter_updates and not keep_selected:
self._change_checkbox(self.check_updates, True, 'filter_updates', trigger_filters)
if pkgs_info['napp_updates'] > 0 and self.filter_only_apps and not keep_selected:
self._change_checkbox(self.check_apps, False, 'filter_only_apps', trigger_filters)
else:
if not keep_selected:
self._change_checkbox(self.check_updates, False, 'filter_updates', trigger_filters)
self.comp_manager.set_component_visible(CHECK_UPDATES, False)
def _change_checkbox(self, checkbox: QCheckBox, checked: bool, attr: str = None, trigger: bool = True):
if not trigger:
checkbox.blockSignals(True)
checkbox.setChecked(checked)
if not trigger:
setattr(self, attr, checked)
checkbox.blockSignals(False)
def _gen_filters(self, ignore_updates: bool = False) -> dict:
return {
'only_apps': False if self.search_performed else self.filter_only_apps,
'type': self.type_filter,
'category': self.category_filter,
'updates': False if ignore_updates else self.filter_updates,
'name': self.input_name.text().lower() if self.input_name.text() else None,
'display_limit': None if self.filter_updates else self.display_limit
}
def update_pkgs(self, new_pkgs: Optional[List[SoftwarePackage]], as_installed: bool, types: Optional[Set[type]] = None, ignore_updates: bool = False, keep_filters: bool = False) -> bool:
self.input_name.set_text('')
pkgs_info = commons.new_pkgs_info()
filters = self._gen_filters(ignore_updates=ignore_updates)
if new_pkgs is not None:
old_installed = None
if as_installed:
old_installed = self.pkgs_installed
self.pkgs_installed = []
for pkg in new_pkgs:
app_model = PackageView(model=pkg, i18n=self.i18n)
commons.update_info(app_model, pkgs_info)
commons.apply_filters(app_model, filters, pkgs_info)
if old_installed and types:
for pkgv in old_installed:
if pkgv.model.__class__ not in types:
commons.update_info(pkgv, pkgs_info)
commons.apply_filters(pkgv, filters, pkgs_info)
else: # use installed
for pkgv in self.pkgs_installed:
commons.update_info(pkgv, pkgs_info)
commons.apply_filters(pkgv, filters, pkgs_info)
if pkgs_info['apps_count'] == 0:
if self.load_suggestions or self.types_changed:
if as_installed:
self.pkgs_installed = pkgs_info['pkgs']
self._begin_load_suggestions(filter_installed=False)
self.load_suggestions = False
return False
else:
if not keep_filters:
self._change_checkbox(self.check_apps, False, 'filter_only_apps', trigger=False)
self.check_apps.setCheckable(False)
else:
if not keep_filters:
self.check_apps.setCheckable(True)
self._change_checkbox(self.check_apps, True, 'filter_only_apps', trigger=False)
self.change_update_state(pkgs_info=pkgs_info, trigger_filters=False, keep_selected=keep_filters and bool(pkgs_info['pkgs_displayed']))
self._update_categories(pkgs_info['categories'], keep_selected=keep_filters and bool(pkgs_info['pkgs_displayed']))
self._update_type_filters(pkgs_info['available_types'], keep_selected=keep_filters and bool(pkgs_info['pkgs_displayed']))
self._apply_filters(pkgs_info, ignore_updates=ignore_updates)
self.change_update_state(pkgs_info=pkgs_info, trigger_filters=False, keep_selected=keep_filters and bool(pkgs_info['pkgs_displayed']))
self.pkgs_available = pkgs_info['pkgs']
if as_installed:
self.pkgs_installed = pkgs_info['pkgs']
self.pkgs = pkgs_info['pkgs_displayed']
self._update_table(pkgs_info=pkgs_info)
if new_pkgs:
self.stop_notifying_package_states()
self.thread_notify_pkgs_ready.work = True
self.thread_notify_pkgs_ready.pkgs = self.pkgs
self.thread_notify_pkgs_ready.start()
self._resize(accept_lower_width=bool(self.pkgs_installed))
if self.first_refresh:
qt_utils.centralize(self)
self.first_refresh = False
if not self.installed_loaded and as_installed:
self.installed_loaded = True
return True
def _apply_filters(self, pkgs_info: dict, ignore_updates: bool):
pkgs_info['pkgs_displayed'] = []
filters = self._gen_filters(ignore_updates=ignore_updates)
for pkgv in pkgs_info['pkgs']:
commons.apply_filters(pkgv, filters, pkgs_info)
def _clean_combo_types(self):
if self.combo_filter_type.count() > 1:
for _ in range(self.combo_filter_type.count() - 1):
self.combo_filter_type.removeItem(1)
def _update_type_filters(self, available_types: dict = None, keep_selected: bool = False):
if available_types is None:
self.comp_manager.set_component_visible(COMBO_TYPES, self.combo_filter_type.count() > 2)
else:
keeping_selected = keep_selected and available_types and self.type_filter in available_types
if not keeping_selected:
self.type_filter = self.any_type_filter
if not available_types:
self._clean_combo_types()
if available_types:
self._clean_combo_types()
sel_type = -1
for idx, item in enumerate(available_types.items()):
app_type, icon_path, label = item[0], item[1]['icon'], item[1]['label']
icon = self.cache_type_filter_icons.get(app_type)
if not icon:
icon = QIcon(icon_path)
self.cache_type_filter_icons[app_type] = icon
self.combo_filter_type.addItem(icon, label, app_type)
if keeping_selected and app_type == self.type_filter:
sel_type = idx + 1
self.combo_filter_type.blockSignals(True)
self.combo_filter_type.setCurrentIndex(sel_type if sel_type > -1 else 0)
self.combo_filter_type.blockSignals(False)
self.comp_manager.set_component_visible(COMBO_TYPES, len(available_types) > 1)
else:
self.comp_manager.set_component_visible(COMBO_TYPES, False)
def _update_categories(self, categories: Set[str] = None, keep_selected: bool = False):
if categories is None:
self.comp_manager.set_component_visible(COMBO_CATEGORIES, self.combo_categories.count() > 1)
else:
keeping_selected = keep_selected and categories and self.category_filter in categories
if not keeping_selected:
self.category_filter = self.any_category_filter
if categories:
if self.combo_categories.count() > 1:
for _ in range(self.combo_categories.count() - 1):
self.combo_categories.removeItem(1)
selected_cat = -1
cat_list = list(categories)
cat_list.sort()
for idx, c in enumerate(cat_list):
self.__add_category(c)
if keeping_selected and c == self.category_filter:
selected_cat = idx + 1
self.combo_categories.blockSignals(True)
self.combo_categories.setCurrentIndex(selected_cat if selected_cat > -1 else 0)
self.combo_categories.blockSignals(False)
self.comp_manager.set_component_visible(COMBO_CATEGORIES, True)
else:
self.comp_manager.set_component_visible(COMBO_CATEGORIES, False)
def __add_category(self, category: str):
i18n_cat = self.i18n.get('category.{}'.format(category), self.i18n.get(category, category))
self.combo_categories.addItem(i18n_cat.capitalize(), category)
def _get_current_categories(self) -> Set[str]:
if self.combo_categories.count() > 1:
return {self.combo_categories.itemData(idx) for idx in range(self.combo_categories.count()) if idx > 0}
def _resize(self, accept_lower_width: bool = True):
table_width = self.table_apps.get_width()
toolbar_width = self.toolbar_filters.sizeHint().width()
topbar_width = self.toolbar_status.sizeHint().width()
new_width = max(table_width, toolbar_width, topbar_width)
new_width *= 1.05 # this extra size is not because of the toolbar button, but the table upgrade buttons
if (self.pkgs and accept_lower_width) or new_width > self.width():
self.resize(int(new_width), self.height())
def set_progress_controll(self, enabled: bool):
self.progress_controll_enabled = enabled
def upgrade_selected(self):
body = QWidget()
body.setLayout(QHBoxLayout())
body.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Preferred)
body.layout().addWidget(QLabel(self.i18n['manage_window.upgrade_all.popup.body']))
body.layout().addWidget(UpgradeToggleButton(pkg=None, root=self, i18n=self.i18n, clickable=False))
if ConfirmationDialog(title=self.i18n['manage_window.upgrade_all.popup.title'],
i18n=self.i18n, body=None,
widgets=[body]).ask():
self._begin_action(action_label=self.i18n['manage_window.status.upgrading'],
action_id=ACTION_UPGRADE)
self.comp_manager.set_components_visible(False)
self._handle_console_option(True)
self.thread_update.pkgs = self.pkgs
self.thread_update.start()
def _finish_upgrade_selected(self, res: dict):
self._finish_action()
if res.get('id'):
output = self.textarea_details.toPlainText()
if output:
try:
Path(UpgradeSelected.UPGRADE_LOGS_DIR).mkdir(parents=True, exist_ok=True)
logs_path = '{}/{}.log'.format(UpgradeSelected.UPGRADE_LOGS_DIR, res['id'])
with open(logs_path, 'w+') as f:
f.write(output)
self.textarea_details.appendPlainText('\n*Upgrade summary generated at: {}'.format(UpgradeSelected.SUMMARY_FILE.format(res['id'])))
self.textarea_details.appendPlainText('*Upgrade logs generated at: {}'.format(logs_path))
except:
traceback.print_exc()
if res['success']:
self.comp_manager.remove_saved_state(ACTION_UPGRADE)
self.begin_refresh_packages(pkg_types=res['types'])
self._show_console_checkbox_if_output()
if self._can_notify_user():
util.notify_user('{} {}'.format(res['updated'], self.i18n['notification.update_selected.success']))
notify_tray()
else:
self.comp_manager.restore_state(ACTION_UPGRADE)
self._show_console_errors()
if self._can_notify_user():
util.notify_user(self.i18n['notification.update_selected.failed'])
self.update_custom_actions()
def _show_console_errors(self):
if self.textarea_details.toPlainText():
self.check_details.setChecked(True)
else:
self._handle_console_option(False)
self.comp_manager.set_component_visible(CHECK_DETAILS, False)
def _update_action_output(self, output: str):
self.textarea_details.appendPlainText(output)
def _begin_action(self, action_label: str, action_id: int = None):
self.thread_animate_progress.stop = False
self.thread_animate_progress.start()
self.progress_bar.setVisible(True)
if action_id is not None:
self.comp_manager.save_states(action_id, only_visible=True)
self._set_table_enabled(False)
self.comp_manager.set_component_visible(SEARCH_BAR, False)
self._change_status(action_label)
def _set_lower_buttons_visible(self, visible: bool):
self.comp_manager.set_group_visible(GROUP_LOWER_BTS, visible)
if visible:
self.comp_manager.set_component_visible(BT_CUSTOM_ACTIONS, bool(self.custom_actions))
def _finish_action(self, action_id: int = None):
self.thread_animate_progress.stop = True
self.thread_animate_progress.wait(msecs=1000)
self.progress_bar.setVisible(False)
self.progress_bar.setValue(0)
self.progress_bar.setTextVisible(False)
if action_id is not None:
self.comp_manager.restore_state(action_id)
self.comp_manager.set_component_visible(SEARCH_BAR, True)
self._change_status()
self._change_label_substatus('')
self._set_table_enabled(True)
self.progress_controll_enabled = True
def begin_downgrade(self, pkg: PackageView):
pwd, proceed = self._ask_root_password(SoftwareAction.DOWNGRADE, pkg)
if not proceed:
return
self._begin_action(action_label='{} {}'.format(self.i18n['manage_window.status.downgrading'], pkg.model.name),
action_id=ACTION_DOWNGRADE)
self.comp_manager.set_components_visible(False)
self._handle_console_option(True)
self.thread_downgrade.pkg = pkg
self.thread_downgrade.root_pwd = pwd
self.thread_downgrade.start()
def _finish_downgrade(self, res: dict):
self._finish_action()
if res['success']:
self.comp_manager.remove_saved_state(ACTION_DOWNGRADE)
if self._can_notify_user():
util.notify_user('{} {}'.format(res['app'], self.i18n['downgraded']))
self.begin_refresh_packages(pkg_types={res['app'].model.__class__} if len(self.pkgs) > 1 else None)
self._show_console_checkbox_if_output()
self.update_custom_actions()
notify_tray()
else:
self.comp_manager.restore_state(ACTION_DOWNGRADE)
self._show_console_errors()
if self._can_notify_user():
util.notify_user(self.i18n['notification.downgrade.failed'])
def begin_show_info(self, pkg: dict):
self._begin_action(self.i18n['manage_window.status.info'], action_id=ACTION_INFO)
self.comp_manager.disable_visible()
self.thread_show_info.pkg = pkg
self.thread_show_info.start()
def _finish_show_info(self, pkg_info: dict):
self._finish_action(action_id=ACTION_INFO)
if pkg_info:
if len(pkg_info) > 1:
dialog_info = InfoDialog(pkg_info=pkg_info, icon_cache=self.icon_cache,
i18n=self.i18n, screen_size=self.screen_size)
dialog_info.exec_()
else:
dialog.show_message(title=self.i18n['warning'].capitalize(),
body=self.i18n['manage_window.info.no_info'].format(bold(pkg_info['__app__'].model.name)),
type_=MessageType.WARNING)
def begin_show_screenshots(self, pkg: PackageView):
self._begin_action(action_label=self.i18n['manage_window.status.screenshots'].format(bold(pkg.model.name)),
action_id=ACTION_SCREENSHOTS)
self.comp_manager.disable_visible()
self.thread_screenshots.pkg = pkg
self.thread_screenshots.start()
def _finish_show_screenshots(self, res: dict):
self._finish_action(ACTION_SCREENSHOTS)
if res.get('screenshots'):
diag = ScreenshotsDialog(pkg=res['pkg'],
http_client=self.http_client,
icon_cache=self.icon_cache,
logger=self.logger,
i18n=self.i18n,
screenshots=res['screenshots'])
diag.exec_()
else:
dialog.show_message(title=self.i18n['error'],
body=self.i18n['popup.screenshots.no_screenshot.body'].format(bold(res['pkg'].model.name)),
type_=MessageType.ERROR)
def begin_show_history(self, pkg: PackageView):
self._begin_action(self.i18n['manage_window.status.history'], action_id=ACTION_HISTORY)
self.comp_manager.disable_visible()
self.thread_show_history.pkg = pkg
self.thread_show_history.start()
def _finish_show_history(self, res: dict):
self._finish_action(ACTION_HISTORY)
if res.get('error'):
self._handle_console_option(True)
self.textarea_details.appendPlainText(res['error'])
self.check_details.setChecked(True)
elif not res['history'].history:
dialog.show_message(title=self.i18n['action.history.no_history.title'],
body=self.i18n['action.history.no_history.body'].format(bold(res['history'].pkg.name)),
type_=MessageType.WARNING)
else:
dialog_history = HistoryDialog(res['history'], self.icon_cache, self.i18n)
dialog_history.exec_()
def _begin_search(self, word, action_id: int = None):
self.filter_updates = False
self._begin_action('{} {}'.format(self.i18n['manage_window.status.searching'], word if word else ''), action_id=action_id)
def search(self):
word = self.search_bar.text().strip()
if word:
self._handle_console(False)
self._begin_search(word, action_id=ACTION_SEARCH)
self.comp_manager.set_components_visible(False)
self.thread_search.word = word
self.thread_search.start()
def _finish_search(self, res: dict):
self._finish_action()
self.search_performed = True
if not res['error']:
self.comp_manager.set_group_visible(GROUP_VIEW_SEARCH, True)
self.update_pkgs(res['pkgs_found'], as_installed=False, ignore_updates=True)
self._set_lower_buttons_visible(True)
self._update_bts_installed_and_suggestions()
self._hide_filters_no_packages()
self._reorganize()
else:
self.comp_manager.restore_state(ACTION_SEARCH)
dialog.show_message(title=self.i18n['warning'].capitalize(), body=self.i18n[res['error']], type_=MessageType.WARNING)
def _ask_root_password(self, action: SoftwareAction, pkg: PackageView) -> Tuple[Optional[str], bool]:
pwd = None
requires_root = self.manager.requires_root(action, pkg.model)
if not user.is_root() and requires_root:
valid, pwd = RootDialog.ask_password(self.context, i18n=self.i18n, comp_manager=self.comp_manager)
if not valid:
return pwd, False
return pwd, True
def install(self, pkg: PackageView):
pwd, proceed = self._ask_root_password(SoftwareAction.INSTALL, pkg)
if not proceed:
return
self._begin_action('{} {}'.format(self.i18n['manage_window.status.installing'], pkg.model.name), action_id=ACTION_INSTALL)
self.comp_manager.set_groups_visible(False, GROUP_UPPER_BAR, GROUP_LOWER_BTS)
self._handle_console_option(True)
self.thread_install.pkg = pkg
self.thread_install.root_pwd = pwd
self.thread_install.start()
def _finish_install(self, res: dict):
self._finish_action(action_id=ACTION_INSTALL)
console_output = self.textarea_details.toPlainText()
if console_output:
log_path = f"{LOGS_DIR}/install/{res['pkg'].model.get_type()}/{res['pkg'].model.name}"
try:
Path(log_path).mkdir(parents=True, exist_ok=True)
log_file = f'{log_path}/{int(time.time())}.log'
with open(log_file, 'w+') as f:
f.write(console_output)
self.textarea_details.appendPlainText(self.i18n['console.install_logs.path'].format('"{}"'.format(log_file)))
except:
self.textarea_details.appendPlainText("[warning] Could not write install log file to '{}'".format(log_path))
if res['success']:
if self._can_notify_user():
util.notify_user(msg='{} ({}) {}'.format(res['pkg'].model.name, res['pkg'].model.get_type(), self.i18n['installed']))
models_updated = []
for key in ('installed', 'removed'):
if res.get(key):
models_updated.extend(res[key])
if models_updated:
installed_available_idxs = []
for idx, available in enumerate(self.pkgs_available):
for pidx, model in enumerate(models_updated):
if available.model == model:
available.update_model(model)
if model.installed:
installed_available_idxs.append((idx, pidx, available))
# re-indexing all installed so they always will be be displayed when no filters are applied
if installed_available_idxs:
# removing from available
installed_available_idxs.sort(key=operator.itemgetter(0))
for decrement, data in enumerate(installed_available_idxs):
del self.pkgs_available[data[0] - decrement]
# re-inserting into the available
installed_available_idxs.sort(key=operator.itemgetter(1))
for new_idx, data in enumerate(installed_available_idxs):
self.pkgs_available.insert(new_idx, data[2])
# updating the respective table rows:
for displayed in self.pkgs:
for model in models_updated:
if displayed.model == model:
self.table_apps.update_package(displayed, change_update_col=True)
self.update_bt_upgrade()
# updating installed packages
if res['removed'] and self.pkgs_installed:
to_remove = []
for idx, installed in enumerate(self.pkgs_installed):
for removed in res['removed']:
if installed.model == removed:
to_remove.append(idx)
if to_remove:
to_remove.sort()
for decrement, idx in enumerate(to_remove):
del self.pkgs_installed[idx - decrement]
if res['installed']:
for idx, model in enumerate(res['installed']):
self.pkgs_installed.insert(idx, PackageView(model, self.i18n))
self.update_custom_actions()
self.table_apps.change_headers_policy(policy=QHeaderView.Stretch, maximized=self._maximized)
self.table_apps.change_headers_policy(policy=QHeaderView.ResizeToContents, maximized=self._maximized)
self._resize(accept_lower_width=False)
else:
self._show_console_errors()
if self._can_notify_user():
util.notify_user('{}: {}'.format(res['pkg'].model.name, self.i18n['notification.install.failed']))
def _update_progress(self, value: int):
self.progress_bar.setValue(value)
def begin_execute_custom_action(self, pkg: Optional[PackageView], action: CustomSoftwareAction):
if pkg is None and action.requires_confirmation and \
not ConfirmationDialog(title=self.i18n['confirmation'].capitalize(),
body='<p>{}</p>'.format(self.i18n['custom_action.proceed_with'].capitalize().format(bold(self.i18n[action.i18n_label_key]))),
icon=QIcon(action.icon_path) if action.icon_path else QIcon(resource.get_path('img/logo.svg')),
i18n=self.i18n).ask():
return False
pwd = None
if not user.is_root() and action.requires_root:
valid, pwd = RootDialog.ask_password(self.context, i18n=self.i18n, comp_manager=self.comp_manager)
if not valid:
return
self._begin_action(action_label='{}{}'.format(self.i18n[action.i18n_status_key], ' {}'.format(pkg.model.name) if pkg else ''),
action_id=ACTION_CUSTOM_ACTION)
self.comp_manager.set_components_visible(False)
self._handle_console_option(True)
self.thread_custom_action.pkg = pkg
self.thread_custom_action.root_pwd = pwd
self.thread_custom_action.custom_action = action
self.thread_custom_action.start()
def _finish_execute_custom_action(self, res: dict):
self._finish_action()
if res['success']:
if res['action'].refresh:
self.comp_manager.remove_saved_state(ACTION_CUSTOM_ACTION)
self.begin_refresh_packages(pkg_types={res['pkg'].model.__class__} if res['pkg'] else None)
else:
self.comp_manager.restore_state(ACTION_CUSTOM_ACTION)
self._show_console_checkbox_if_output()
else:
self.comp_manager.restore_state(ACTION_CUSTOM_ACTION)
self._show_console_errors()
if res['error']:
dialog.show_message(title=self.i18n['warning' if res['error_type'] == MessageType.WARNING else 'error'].capitalize(),
body=self.i18n[res['error']],
type_=res['error_type'])
def _show_console_checkbox_if_output(self):
if self.textarea_details.toPlainText():
self.comp_manager.set_component_visible(CHECK_DETAILS, True)
else:
self.comp_manager.set_component_visible(CHECK_DETAILS, False)
def show_settings(self):
if self.settings_window:
self.settings_window.handle_display()
else:
self.settings_window = SettingsWindow(self.manager, self.i18n, self.screen_size, self)
self.settings_window.setMinimumWidth(int(self.screen_size.width() / 4))
self.settings_window.resize(self.size())
self.settings_window.adjustSize()
qt_utils.centralize(self.settings_window)
self.settings_window.show()
def _map_custom_action(self, action: CustomSoftwareAction, parent: QWidget) -> QCustomMenuAction:
if action.icon_path:
try:
if action.icon_path.startswith('/'):
icon = QIcon(action.icon_path)
else:
icon = QIcon.fromTheme(action.icon_path)
except:
icon = None
else:
icon = None
return QCustomMenuAction(parent=parent,
label=self.i18n[action.i18n_label_key],
action=lambda: self.begin_execute_custom_action(None, action),
icon=icon)
def show_custom_actions(self):
if self.custom_actions:
menu_row = QMenu()
menu_row.setCursor(QCursor(Qt.PointingHandCursor))
actions = [self._map_custom_action(a, menu_row) for a in self.custom_actions]
menu_row.addActions(actions)
menu_row.adjustSize()
menu_row.popup(QCursor.pos())
menu_row.exec_()
def begin_ignore_updates(self, pkg: PackageView):
status_key = 'ignore_updates' if not pkg.model.is_update_ignored() else 'ignore_updates_reverse'
self._begin_action(action_label=self.i18n['manage_window.status.{}'.format(status_key)].format(pkg.model.name),
action_id=ACTION_IGNORE_UPDATES)
self.comp_manager.disable_visible()
self.thread_ignore_updates.pkg = pkg
self.thread_ignore_updates.start()
def finish_ignore_updates(self, res: dict):
self._finish_action(action_id=ACTION_IGNORE_UPDATES)
if res['success']:
hide_package = commons.is_package_hidden(res['pkg'], self._gen_filters())
if hide_package:
idx_to_remove = None
for pkg in self.pkgs:
if pkg == res['pkg']:
idx_to_remove = pkg.table_index
break
if idx_to_remove is not None:
del self.pkgs[idx_to_remove]
self.table_apps.removeRow(idx_to_remove)
self._update_table_indexes()
self.update_bt_upgrade()
else:
for pkg in self.pkgs:
if pkg == res['pkg']:
pkg.update_model(res['pkg'].model)
self.table_apps.update_package(pkg, change_update_col=not any([self.search_performed, self.suggestions_requested]))
self.update_bt_upgrade()
break
for pkg_list in (self.pkgs_available, self.pkgs_installed):
if pkg_list:
for pkg in pkg_list:
if pkg == res['pkg']:
pkg.update_model(res['pkg'].model)
break
self._add_pkg_categories(res['pkg'])
dialog.show_message(title=self.i18n['success'].capitalize(),
body=self.i18n['action.{}.success'.format(res['action'])].format(bold(res['pkg'].model.name)),
type_=MessageType.INFO)
else:
dialog.show_message(title=self.i18n['fail'].capitalize(),
body=self.i18n['action.{}.fail'.format(res['action'])].format(bold(res['pkg'].model.name)),
type_=MessageType.ERROR)
def _add_pkg_categories(self, pkg: PackageView):
if pkg.model.categories:
pkg_categories = {c.strip().lower() for c in pkg.model.categories if c and c.strip()}
if pkg_categories:
current_categories = self._get_current_categories()
if current_categories:
pkg_categories = {c.strip().lower() for c in pkg.model.categories if c}
if pkg_categories:
categories_to_add = {c for c in pkg_categories if c and c not in current_categories}
if categories_to_add:
for cat in categories_to_add:
self.__add_category(cat)
else:
self._update_categories(pkg_categories)
def _map_theme_action(self, theme: ThemeMetadata, menu: QMenu) -> QCustomMenuAction:
def _change_theme():
set_theme(theme_key=theme.key, app=QApplication.instance(), logger=self.context.logger)
self.thread_save_theme.theme_key = theme.key
self.thread_save_theme.start()
return QCustomMenuAction(label=theme.get_i18n_name(self.i18n),
action=_change_theme,
parent=menu,
tooltip=theme.get_i18n_description(self.i18n))
def show_themes(self):
menu_row = QMenu()
menu_row.setCursor(QCursor(Qt.PointingHandCursor))
menu_row.addActions(self._map_theme_actions(menu_row))
menu_row.adjustSize()
menu_row.popup(QCursor.pos())
menu_row.exec_()
def _map_theme_actions(self, menu: QMenu) -> List[QCustomMenuAction]:
core_config = CoreConfigManager().get_config()
current_theme_key, current_action = core_config['ui']['theme'], None
actions = []
for t in read_all_themes_metadata():
if not t.abstract:
action = self._map_theme_action(t, menu)
if current_action is None and current_theme_key is not None and current_theme_key == t.key:
action.button.setProperty('current', 'true')
current_action = action
else:
actions.append(action)
if not current_action:
invalid_action = QCustomMenuAction(label=self.i18n['manage_window.bt_themes.option.invalid'], parent=menu)
invalid_action.button.setProperty('current', 'true')
current_action = invalid_action
actions.sort(key=lambda a: a.get_label())
actions.insert(0, current_action)
return actions
def reload(self):
self.thread_reload.start()
def _reload(self):
self.update_custom_actions()
self.verify_warnings()
self.types_changed = True
self.begin_refresh_packages()
| [
[
[
7,
14
],
[
3403,
3410
]
],
[
[
22,
30
],
[
61728,
61736
],
[
62010,
62018
]
],
[
[
38,
42
],
[
60259,
60263
]
],
[
[
50,
59
],
[
50867,
50876
]
],
[
[
80,
84
],
[
50297,
50301
],
[
60163,
60167
]
],
[
[
104,
108
],
[
27397,
27401
],
[
41326,
41330
],
[
72067,
72071
]
],
[
[
110,
114
],
[
31548,
31552
]
],
[
[
116,
119
],
[
31544,
31547
],
[
41386,
41389
],
[
46749,
46752
],
[
48336,
48339
]
],
[
[
121,
126
],
[
58940,
58945
]
],
[
[
128,
136
],
[
31535,
31543
],
[
41317,
41325
],
[
41377,
41385
],
[
58946,
58954
],
[
63907,
63915
]
],
[
[
163,
169
],
[
30869,
30875
]
],
[
[
171,
173
],
[
5777,
5779
],
[
6299,
6301
],
[
6959,
6961
],
[
7091,
7093
],
[
7358,
7360
],
[
7989,
7991
],
[
8146,
8148
],
[
8331,
8333
],
[
9713,
9715
],
[
10330,
10332
],
[
10950,
10952
],
[
12915,
12917
],
[
13454,
13456
],
[
14432,
14434
],
[
20465,
20467
],
[
23298,
23300
],
[
23640,
23642
],
[
24225,
24227
],
[
67691,
67693
],
[
71837,
71839
]
],
[
[
175,
185
],
[
3064,
3074
],
[
3108,
3118
],
[
3156,
3166
],
[
3197,
3207
]
],
[
[
210,
215
],
[
3425,
3430
],
[
46027,
46032
],
[
64317,
64322
],
[
64366,
64371
],
[
67098,
67103
],
[
67171,
67176
]
],
[
[
217,
240
],
[
30903,
30926
]
],
[
[
242,
249
],
[
5769,
5776
],
[
6291,
6298
],
[
6951,
6958
],
[
7083,
7090
],
[
7981,
7988
],
[
8138,
8145
],
[
9705,
9712
],
[
10322,
10329
],
[
10942,
10949
],
[
12907,
12914
],
[
13446,
13453
],
[
14424,
14431
],
[
20457,
20464
],
[
23290,
23297
],
[
23632,
23639
],
[
67683,
67690
],
[
67907,
67914
],
[
71829,
71836
],
[
71977,
71984
]
],
[
[
278,
285
],
[
3032,
3039
],
[
5365,
5372
],
[
11929,
11936
],
[
12501,
12508
],
[
17600,
17607
],
[
49202,
49209
],
[
66940,
66947
]
],
[
[
287,
298
],
[
4383,
4394
],
[
12100,
12111
]
],
[
[
300,
309
],
[
5662,
5671
],
[
6193,
6202
],
[
12800,
12809
],
[
40500,
40509
]
],
[
[
311,
322
],
[
37779,
37790
],
[
38399,
38410
],
[
63350,
63361
],
[
63455,
63466
]
],
[
[
324,
332
],
[
4464,
4472
],
[
14150,
14158
]
],
[
[
340,
346
],
[
4616,
4622
],
[
13337,
13343
],
[
14314,
14320
],
[
49362,
49368
]
],
[
[
348,
362
],
[
13760,
13774
]
],
[
[
364,
376
],
[
20347,
20359
]
],
[
[
378,
389
],
[
9575,
9586
],
[
10197,
10208
],
[
10790,
10801
]
],
[
[
391,
400
],
[
6838,
6847
],
[
7166,
7175
],
[
7865,
7874
],
[
8063,
8072
]
],
[
[
402,
414
],
[
71341,
71353
]
],
[
[
416,
425
],
[
7022,
7031
]
],
[
[
427,
438
],
[
5532,
5543
],
[
5553,
5564
],
[
12616,
12627
],
[
12637,
12648
],
[
17718,
17729
],
[
17739,
17750
],
[
20187,
20198
],
[
49277,
49288
],
[
49307,
49318
]
],
[
[
446,
451
],
[
67644,
67649
],
[
71236,
71241
],
[
71794,
71799
],
[
72057,
72062
]
],
[
[
453,
464
],
[
5474,
5485
],
[
12695,
12706
],
[
17798,
17809
],
[
49235,
49246
]
],
[
[
487,
491
],
[
59073,
59077
],
[
64531,
64535
]
],
[
[
528,
539
],
[
3258,
3269
]
],
[
[
578,
596
],
[
3350,
3368
]
],
[
[
638,
653
],
[
3280,
3295
]
],
[
[
655,
669
],
[
33520,
33534
],
[
53341,
53355
],
[
58903,
58917
],
[
59393,
59407
]
],
[
[
706,
721
],
[
31553,
31568
],
[
41331,
41346
]
],
[
[
757,
768
],
[
27563,
27574
],
[
55464,
55475
],
[
56601,
56612
],
[
57461,
57472
],
[
58840,
58851
],
[
65932,
65943
],
[
70082,
70093
],
[
70346,
70357
]
],
[
[
795,
805
],
[
3383,
3393
]
],
[
[
833,
841
],
[
60057,
60065
]
],
[
[
872,
876
],
[
55383,
55387
],
[
55635,
55639
],
[
56533,
56537
],
[
57391,
57395
],
[
64231,
64235
],
[
70014,
70018
],
[
70278,
70282
]
],
[
[
902,
911
],
[
71306,
71315
]
],
[
[
940,
964
],
[
72265,
72289
]
],
[
[
966,
979
],
[
71215,
71228
]
],
[
[
1014,
1031
],
[
72114,
72131
]
],
[
[
1071,
1082
],
[
36263,
36274
],
[
51268,
51279
],
[
54328,
54339
]
],
[
[
1108,
1114
],
[
27233,
27239
],
[
27442,
27448
],
[
55234,
55240
],
[
56393,
56399
],
[
57231,
57237
],
[
58743,
58749
],
[
65862,
65868
],
[
69856,
69862
],
[
70126,
70132
]
],
[
[
1116,
1123
],
[
41529,
41536
],
[
41927,
41934
],
[
41985,
41992
],
[
42204,
42211
],
[
42265,
42272
],
[
42406,
42413
],
[
42459,
42466
],
[
44845,
44852
],
[
68588,
68595
]
],
[
[
1125,
1133
],
[
20700,
20708
],
[
27747,
27755
],
[
44433,
44441
],
[
66786,
66794
]
],
[
[
1165,
1176
],
[
29364,
29375
]
],
[
[
1213,
1226
],
[
12211,
12224
]
],
[
[
1228,
1247
],
[
49453,
49472
]
],
[
[
1281,
1302
],
[
38105,
38126
]
],
[
[
1339,
1349
],
[
4573,
4583
],
[
5268,
5278
],
[
9516,
9526
],
[
13291,
13301
],
[
14268,
14278
],
[
14552,
14562
],
[
17933,
17943
]
],
[
[
1351,
1361
],
[
18015,
18025
],
[
18436,
18446
],
[
18863,
18873
],
[
19356,
19366
],
[
19731,
19741
]
],
[
[
1363,
1382
],
[
3550,
3569
]
],
[
[
1384,
1393
],
[
26017,
26026
]
],
[
[
1395,
1405
],
[
4803,
4813
],
[
8803,
8813
]
],
[
[
1413,
1430
],
[
66952,
66969
],
[
67307,
67324
],
[
71246,
71263
],
[
71510,
71527
],
[
72072,
72089
],
[
72725,
72742
]
],
[
[
1432,
1446
],
[
20147,
20161
]
],
[
[
1479,
1497
],
[
26129,
26147
],
[
49539,
49557
],
[
64043,
64061
]
],
[
[
1531,
1544
],
[
57525,
57538
]
],
[
[
1575,
1585
],
[
55019,
55029
]
],
[
[
1616,
1626
],
[
26949,
26959
],
[
59132,
59142
],
[
64597,
64607
]
],
[
[
1664,
1681
],
[
55997,
56014
]
],
[
[
1716,
1730
],
[
66527,
66541
]
],
[
[
1763,
1778
],
[
14715,
14730
],
[
50302,
50317
],
[
50422,
50437
],
[
50664,
50679
]
],
[
[
1780,
1791
],
[
14882,
14893
]
],
[
[
1793,
1809
],
[
15030,
15046
]
],
[
[
1811,
1827
],
[
15592,
15608
]
],
[
[
1829,
1844
],
[
15184,
15199
]
],
[
[
1852,
1870
],
[
15312,
15330
]
],
[
[
1872,
1886
],
[
15451,
15465
]
],
[
[
1888,
1902
],
[
16611,
16625
]
],
[
[
1904,
1919
],
[
16821,
16836
]
],
[
[
1921,
1940
],
[
16962,
16981
]
],
[
[
1942,
1957
],
[
15731,
15746
]
],
[
[
1965,
1977
],
[
21269,
21281
]
],
[
[
1985,
1996
],
[
25071,
25082
],
[
25011,
25022
]
],
[
[
1998,
2011
],
[
15884,
15897
]
],
[
[
2013,
2025
],
[
16310,
16322
]
],
[
[
2027,
2047
],
[
63938,
63958
],
[
66910,
66930
]
],
[
[
2049,
2064
],
[
16197,
16212
]
],
[
[
2066,
2078
],
[
16037,
16049
]
],
[
[
2086,
2107
],
[
21550,
21571
]
],
[
[
2115,
2135
],
[
17295,
17315
]
],
[
[
2137,
2146
],
[
21488,
21497
]
],
[
[
2148,
2164
],
[
17470,
17486
]
],
[
[
2201,
2212
],
[
33459,
33470
],
[
36720,
36731
],
[
41872,
41883
],
[
53280,
53291
],
[
55528,
55539
],
[
56659,
56670
],
[
58924,
58935
],
[
59332,
59343
],
[
63220,
63231
],
[
63916,
63927
],
[
67992,
68003
],
[
70405,
70416
]
],
[
[
2214,
2231
],
[
30340,
30357
]
],
[
[
2259,
2263
],
[
34241,
34245
],
[
36388,
36392
],
[
51155,
51159
],
[
51453,
51457
],
[
54040,
54044
],
[
54515,
54519
],
[
60727,
60731
],
[
63673,
63677
]
],
[
[
2265,
2273
],
[
64372,
64380
]
],
[
[
2313,
2317
],
[
3240,
3244
]
],
[
[
2319,
2330
]
],
[
[
2358,
2378
],
[
23859,
23879
],
[
24309,
24329
]
],
[
[
2383,
2396
],
[
58017,
58030
],
[
58716,
58729
]
],
[
[
2401,
2415
],
[
59582,
59596
],
[
59925,
59939
]
],
[
[
2420,
2436
],
[
33753,
33769
],
[
34106,
34122
]
],
[
[
2441,
2452
],
[
54696,
54707
],
[
54920,
54931
]
],
[
[
2457,
2471
],
[
56753,
56767
],
[
56974,
56988
]
],
[
[
2476,
2492
],
[
53573,
53589
],
[
53965,
53981
],
[
54400,
54416
]
],
[
[
2497,
2511
],
[
49857,
49871
],
[
50966,
50980
],
[
51340,
51354
]
],
[
[
2516,
2529
],
[
36881,
36894
],
[
37104,
37117
]
],
[
[
2534,
2554
],
[
64906,
64926
],
[
65425,
65445
],
[
65621,
65641
],
[
65754,
65774
]
],
[
[
2560,
2578
],
[
55695,
55713
],
[
55922,
55940
]
],
[
[
2584,
2605
],
[
68268,
68289
],
[
68510,
68531
]
],
[
[
2629,
2639
],
[
5152,
5162
],
[
32158,
32168
],
[
52336,
52346
],
[
53071,
53081
]
],
[
[
2644,
2656
],
[
10156,
10168
],
[
22214,
22226
],
[
22669,
22681
],
[
28947,
28959
]
],
[
[
2661,
2671
],
[
10743,
10753
],
[
22367,
22377
],
[
22699,
22709
]
],
[
[
2676,
2690
],
[
18390,
18404
],
[
22228,
22242
],
[
22683,
22697
],
[
22789,
22803
],
[
29091,
29105
]
],
[
[
2695,
2705
],
[
11406,
11416
],
[
22379,
22389
],
[
22711,
22721
],
[
39393,
39403
]
],
[
[
2710,
2723
],
[
6131,
6144
],
[
21883,
21896
],
[
22572,
22585
],
[
39774,
39787
],
[
39850,
39863
],
[
40436,
40449
]
],
[
[
2728,
2738
],
[
6696,
6706
],
[
21871,
21881
],
[
22560,
22570
]
],
[
[
2743,
2754
],
[
7754,
7765
],
[
21916,
21927
],
[
22139,
22150
],
[
22605,
22616
],
[
45276,
45287
],
[
46570,
46581
],
[
46683,
46694
]
],
[
[
2759,
2775
],
[
8735,
8751
],
[
21898,
21914
],
[
22121,
22137
],
[
22587,
22603
],
[
46879,
46895
],
[
47953,
47969
],
[
48052,
48068
]
],
[
[
2780,
2788
],
[
9440,
9448
],
[
21929,
21937
],
[
22152,
22160
],
[
22618,
22626
],
[
24019,
24027
]
],
[
[
2794,
2807
],
[
13207,
13220
],
[
31381,
31394
],
[
51804,
51817
],
[
66256,
66269
],
[
66343,
66356
]
],
[
[
2813,
2824
],
[
19685,
19696
],
[
22835,
22846
]
],
[
[
2830,
2847
],
[
19295,
19312
],
[
22816,
22833
],
[
52598,
52615
]
],
[
[
2853,
2861
],
[
20038,
20046
],
[
22848,
22856
]
],
[
[
2867,
2876
],
[
18740,
18749
],
[
22805,
22814
]
],
[
[
2906,
2919
],
[
21980,
21993
],
[
29244,
29257
]
],
[
[
2924,
2944
],
[
22297,
22317
],
[
28527,
28547
],
[
32376,
32396
]
],
[
[
2949,
2966
],
[
22054,
22071
],
[
32289,
32306
],
[
58361,
58378
]
],
[
[
2971,
2986
],
[
22495,
22510
],
[
23935,
23950
],
[
33823,
33838
],
[
59650,
59665
]
],
[
[
2991,
3006
],
[
22765,
22780
],
[
23952,
23967
],
[
33840,
33855
],
[
52499,
52514
],
[
59667,
59682
]
],
[
[
3019,
3031
],
[
3447,
3459
],
[
27619,
27631
]
]
] |
from fastapi import FastAPI
from fastapi.testclient import TestClient
app = FastAPI()
@app.get("/api/v1/healthcheck")
async def read_main():
return "OK"
@app.post("/api/v1/query")
async def query():
return [{"event_date": "20210105"}]
client = TestClient(app)
def test_read_main():
response = client.get("/api/v1/healthcheck")
assert response.status_code == 200
assert response.json() == "OK"
def test_query():
response = client.post(
"/api/v1/query",
json={
"type": "service_account",
"date": "20210105",
"projet_id": "test-project",
"private_key_id": "test",
"private_key": "testkey",
"client_email": "test@test.com",
"client_id": "test",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "ttps://oauth2.googleapis.com/token",
"auth_provider_x509_cert_url": "test",
"client_x509_cert_url": "test"
}
)
assert response.status_code == 200
assert response.json() == [{"event_date": "20210105"}] | [
[
[
20,
27
],
[
77,
84
]
],
[
[
59,
69
],
[
257,
267
]
],
[
[
71,
74
],
[
89,
92
],
[
161,
164
],
[
268,
271
]
],
[
[
120,
158
]
],
[
[
187,
245
]
],
[
[
248,
254
],
[
312,
318
],
[
454,
460
]
],
[
[
279,
293
]
],
[
[
425,
435
]
]
] |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from yandex.cloud.datasphere.v1 import app_token_service_pb2 as yandex_dot_cloud_dot_datasphere_dot_v1_dot_app__token__service__pb2
class AppTokenServiceStub(object):
"""A set of methods for managing app tokens.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Validate = channel.unary_unary(
'/yandex.cloud.datasphere.v1.AppTokenService/Validate',
request_serializer=yandex_dot_cloud_dot_datasphere_dot_v1_dot_app__token__service__pb2.AppTokenValidateRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class AppTokenServiceServicer(object):
"""A set of methods for managing app tokens.
"""
def Validate(self, request, context):
"""Validates app token.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_AppTokenServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Validate': grpc.unary_unary_rpc_method_handler(
servicer.Validate,
request_deserializer=yandex_dot_cloud_dot_datasphere_dot_v1_dot_app__token__service__pb2.AppTokenValidateRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'yandex.cloud.datasphere.v1.AppTokenService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class AppTokenService(object):
"""A set of methods for managing app tokens.
"""
@staticmethod
def Validate(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.datasphere.v1.AppTokenService/Validate',
yandex_dot_cloud_dot_datasphere_dot_v1_dot_app__token__service__pb2.AppTokenValidateRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| [
[
[
153,
157
],
[
1165,
1169
],
[
1427,
1431
],
[
1791,
1795
],
[
2422,
2426
]
],
[
[
187,
234
],
[
884,
918
],
[
1688,
1722
],
[
2660,
2694
]
],
[
[
274,
366
],
[
735,
802
],
[
1544,
1611
],
[
2537,
2604
]
],
[
[
375,
394
]
],
[
[
963,
986
]
],
[
[
1318,
1355
]
],
[
[
2018,
2033
]
]
] |
import pytest
@pytest.mark.usefixtures("smart_setup")
class TestObjectValue:
def test_get_sheet_object_value(self, smart_setup):
smart = smart_setup['smart']
sheet = smart.Sheets.get_sheet(smart_setup['sheet'].id, include='objectValue')
assert isinstance(sheet.rows[0].cells[0].object_value, smart.models.StringObjectValue)
assert isinstance(sheet, smart.models.Sheet)
def test_predecessors(self, smart_setup):
smart = smart_setup['smart']
templates = smart.Templates.list_public_templates(include_all=True)
for template in templates.data:
if template.name == 'Basic Project with Gantt & Dependencies':
break
sheet = smart.models.Sheet({
'name': 'example_project_python_sdk' + smart_setup['now'],
'fromId': template.id
})
action = smart.Home.create_sheet_from_template(sheet)
sheet = action.result
assert action.message == 'SUCCESS'
sheet = smart.Sheets.get_sheet(sheet.id)
# add 'Task1'
row = smart.models.Row()
row.to_bottom = True
for col in sheet.columns:
if col.primary:
row.cells.append({
'column_id': col.id,
'value': 'Task1'
})
break
action = smart.Sheets.add_rows(sheet.id, [row])
task1_row = action.result[0]
assert isinstance(task1_row, smart.models.row.Row)
assert action.request_response.status_code == 200
# add 'Task2' with 'Task1' predecessor
p1 = smart.models.Predecessor()
p1.type = 'FS'
p1.row_id = task1_row.id
predecessor_list = smart.models.PredecessorList()
predecessor_list.predecessors = [p1]
row = smart.models.Row()
row.to_bottom = True
for col in sheet.columns:
if col.primary:
row.cells.append({
'column_id': col.id,
'value': 'Task2'
})
if col.type == 'PREDECESSOR':
row.cells.append({
'column_id': col.id,
'object_value': predecessor_list
})
action = smart.Sheets.add_rows(sheet.id, [row])
task2_row = action.result[0]
assert isinstance(task2_row, smart.models.row.Row)
assert action.request_response.status_code == 200
# add 'Task3' with 'Task1','Task2' predecessors
p1 = smart.models.Predecessor()
p1.type = 'FS'
p1.row_id = task1_row.id
p2 = smart.models.Predecessor()
p2.type = 'FS'
p2.row_id = task2_row.id
predecessor_list = smart.models.PredecessorList()
predecessor_list.predecessors = [p1, p2]
row = smart.models.Row()
row.to_bottom = True
for col in sheet.columns:
if col.primary:
row.cells.append({
'column_id': col.id,
'value': 'Task3'
})
if col.type == 'PREDECESSOR':
row.cells.append({
'column_id': col.id,
'object_value': predecessor_list
})
action = smart.Sheets.add_rows(sheet.id, [row])
task3_row = action.result[0]
assert isinstance(task3_row, smart.models.row.Row)
assert action.request_response.status_code == 200
# clear the predecessor list from task 3
row = smart.models.Row()
row.id = task3_row.id
for col in sheet.columns:
if col.type == 'PREDECESSOR':
row.cells.append({
'column_id': col.id,
'value': smart.models.ExplicitNull()
})
break
action = smart.Sheets.update_rows(sheet.id, [row])
assert action.request_response.status_code == 200
for cell in action.data[0].cells:
if cell.column_id == col.id:
break;
assert cell.object_value is None
# clean up
action = smart.Sheets.delete_sheet(sheet.id)
assert action.message == 'SUCCESS'
| [
[
[
7,
13
],
[
17,
23
]
],
[
[
62,
77
]
]
] |
import random
import time
class Athlete():
name = ""
health = 100
def __init__(self, newName):
self.name = newName
print("На ринге появляется новый боец, его имя - ", self.name )
print()
def punch(self, other):
time.sleep(1)
print(self.name, "наносит удар бойцу ", other.name)
other.health -= 20
print("Уровень физического состояния бойца ", other.name, " - ", other.health)
print()
fighter1 = Athlete("Владимир")
fighter2 = Athlete("Николай")
while (fighter1.health != 0) and (fighter2.health != 0):
fighters = [fighter1, fighter2]
if fighters[random.randint(0,1)] == fighter1:
fighter1.punch(fighter2)
else:
fighter2.punch(fighter1)
print("Победу в поединке одержал " + (fighter1.name if fighter1.health > 0 else fighter2.name) + "!")
| [
[
[
7,
13
],
[
580,
586
]
],
[
[
21,
25
],
[
231,
235
]
],
[
[
34,
41
],
[
424,
431
],
[
455,
462
]
],
[
[
413,
421
],
[
482,
490
],
[
546,
554
],
[
604,
612
],
[
616,
624
],
[
666,
674
],
[
755,
763
],
[
738,
746
]
],
[
[
444,
452
],
[
509,
517
],
[
556,
564
],
[
631,
639
],
[
651,
659
],
[
780,
788
]
],
[
[
534,
542
],
[
571,
579
]
]
] |
import bagel
import numpy as np
from sklearn.metrics import precision_recall_curve
from typing import Sequence, Tuple, Dict, Optional
def _adjust_scores(labels: np.ndarray,
scores: np.ndarray,
delay: Optional[int] = None,
inplace: bool = False) -> np.ndarray:
if np.shape(scores) != np.shape(labels):
raise ValueError('`labels` and `scores` must have same shape')
if delay is None:
delay = len(scores)
splits = np.where(labels[1:] != labels[:-1])[0] + 1
is_anomaly = labels[0] == 1
adjusted_scores = np.copy(scores) if not inplace else scores
pos = 0
for part in splits:
if is_anomaly:
ptr = min(pos + delay + 1, part)
adjusted_scores[pos: ptr] = np.max(adjusted_scores[pos: ptr])
adjusted_scores[ptr: part] = np.maximum(adjusted_scores[ptr: part], adjusted_scores[pos])
is_anomaly = not is_anomaly
pos = part
part = len(labels)
if is_anomaly:
ptr = min(pos + delay + 1, part)
adjusted_scores[pos: part] = np.max(adjusted_scores[pos: ptr])
return adjusted_scores
def _ignore_missing(series_list: Sequence, missing: np.ndarray) -> Tuple[np.ndarray, ...]:
ret = []
for series in series_list:
series = np.copy(series)
ret.append(series[missing != 1])
return tuple(ret)
def _best_f1score(labels: np.ndarray, scores: np.ndarray) -> Tuple[float, float, float, float]:
precision, recall, thresholds = precision_recall_curve(y_true=labels, probas_pred=scores)
f1score = 2 * precision * recall / np.clip(precision + recall, a_min=1e-8, a_max=None)
best_threshold = thresholds[np.argmax(f1score)]
best_precision = precision[np.argmax(f1score)]
best_recall = recall[np.argmax(f1score)]
return best_threshold, best_precision, best_recall, np.max(f1score)
def get_test_results(labels: np.ndarray,
scores: np.ndarray,
missing: np.ndarray,
window_size: int,
delay: Optional[int] = None) -> Dict:
labels = labels[window_size - 1:]
scores = scores[window_size - 1:]
missing = missing[window_size - 1:]
adjusted_scores = _adjust_scores(labels=labels, scores=scores, delay=delay)
adjusted_labels, adjusted_scores = _ignore_missing([labels, adjusted_scores], missing=missing)
threshold, precision, recall, f1score = _best_f1score(labels=adjusted_labels, scores=adjusted_scores)
return {'threshold': threshold,
'precision': precision,
'recall': recall,
'f1score': f1score}
class KPIStats:
def __init__(self, kpi: bagel.data.KPI):
self.num_points = len(kpi.values)
self.num_missing = len(kpi.missing[kpi.missing == 1])
self.num_anomaly = len(kpi.labels[kpi.labels == 1])
self.missing_rate = self.num_missing / self.num_points
self.anomaly_rate = self.num_anomaly / self.num_points
def get_kpi_stats(*kpis: bagel.data.KPI) -> Tuple[KPIStats, ...]:
ret = []
for kpi in kpis:
ret.append(KPIStats(kpi))
return tuple(ret)
| [
[
[
7,
12
],
[
2699,
2704
],
[
3033,
3038
]
],
[
[
20,
31
],
[
308,
310
],
[
164,
166
],
[
203,
205
],
[
327,
329
],
[
347,
349
],
[
499,
501
],
[
596,
598
],
[
783,
785
],
[
858,
860
],
[
1094,
1096
],
[
1230,
1232
],
[
1209,
1211
],
[
1309,
1311
],
[
1416,
1418
],
[
1436,
1438
],
[
1619,
1621
],
[
1704,
1706
],
[
1755,
1757
],
[
1800,
1802
],
[
1877,
1879
],
[
1924,
1926
],
[
1965,
1967
],
[
2007,
2009
]
],
[
[
61,
83
],
[
1522,
1544
]
],
[
[
103,
111
],
[
1190,
1198
]
],
[
[
113,
118
],
[
1224,
1229
],
[
1451,
1456
],
[
3052,
3057
]
],
[
[
120,
124
],
[
2111,
2115
]
],
[
[
126,
134
],
[
241,
249
],
[
2086,
2094
]
],
[
[
141,
155
],
[
2255,
2269
]
],
[
[
1161,
1176
],
[
2352,
2367
]
],
[
[
1394,
1407
],
[
2456,
2469
]
],
[
[
1899,
1915
]
],
[
[
2660,
2668
],
[
3058,
3066
],
[
3127,
3135
]
],
[
[
3012,
3025
]
]
] |
"""
This is a library for defining and using particle filters.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import copy
from contextlib import contextmanager
from yt.fields.field_info_container import \
NullFunc, TranslationFunc
from yt.funcs import mylog
from yt.utilities.exceptions import YTIllDefinedFilter
# One to one mapping
filter_registry = {}
class DummyFieldInfo(object):
particle_type = True
dfi = DummyFieldInfo()
class ParticleFilter(object):
def __init__(self, name, function, requires, filtered_type):
self.name = name
self.function = function
self.requires = requires[:]
self.filtered_type = filtered_type
@contextmanager
def apply(self, dobj):
with dobj._chunked_read(dobj._current_chunk):
with dobj._field_type_state(self.filtered_type, dfi):
# We won't be storing the field data from the whole read, so we
# start by filtering now.
filter = self.function(self, dobj)
yield
# Retain a reference here, and we'll filter all appropriate fields
# later.
fd = dobj.field_data
for f, tr in fd.items():
if f[0] != self.filtered_type: continue
if tr.shape != filter.shape and tr.shape[0] != filter.shape[0]:
raise YTIllDefinedFilter(self, tr.shape, filter.shape)
else:
d = tr[filter]
dobj.field_data[self.name, f[1]] = d
def available(self, field_list):
# Note that this assumes that all the fields in field_list have the
# same form as the 'requires' attributes. This won't be true if the
# fields are implicitly "all" or something.
return all((self.filtered_type, field) in field_list for field in self.requires)
def missing(self, field_list):
return list((self.filtered_type, field) for field in self.requires if
(self.filtered_type, field) not in field_list)
def wrap_func(self, field_name, old_fi):
new_fi = copy.copy(old_fi)
new_fi.name = (self.name, field_name[1])
if old_fi._function == NullFunc:
new_fi._function = TranslationFunc(old_fi.name)
# Marking the field as inherited
new_fi._inherited_particle_filter = True
return new_fi
def add_particle_filter(name, function, requires=None, filtered_type="all"):
r"""Create a new particle filter in the global namespace of filters
A particle filter is a short name that corresponds to an algorithm for
filtering a set of particles into a subset. This is useful for creating new
particle types based on a cut on a particle field, such as particle mass, ID
or type. After defining a new filter, it still needs to be added to the
dataset by calling
:func:`~yt.data_objects.static_output.add_particle_filter`.
.. note::
Alternatively, you can make use of the
:func:`~yt.data_objects.particle_filters.particle_filter` decorator to
define a new particle filter.
Parameters
----------
name : string
The name of the particle filter. New particle fields with particle type
set by this name will be added to any dataset that enables this particle
filter.
function : reference to a function
The function that defines the particle filter. The function should
accept two arguments: a reference to a particle filter object and a
reference to an abstract yt data object. See the example below.
requires : a list of field names
A list of field names required by the particle filter definition.
filtered_type : string
The name of the particle type to be filtered.
Example
-------
>>> import yt
>>> def _stars(pfilter, data):
... return data[(pfilter.filtered_type, 'particle_type')] == 2
>>> yt.add_particle_filter("stars", function=_stars, filtered_type='all',
... requires=["particle_type"])
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> ds.add_particle_filter('stars')
>>> ad = ds.all_data()
>>> print (ad['stars', 'particle_mass'])
[ 1.68243760e+38 1.65690882e+38 1.65813321e+38 ..., 2.04238266e+38
2.04523901e+38 2.04770938e+38] g
"""
if requires is None:
requires = []
filter = ParticleFilter(name, function, requires, filtered_type)
if filter_registry.get(name, None) is not None:
mylog.warning('The %s particle filter already exists. Overriding.' % name)
filter_registry[name] = filter
def particle_filter(name=None, requires=None, filtered_type='all'):
r"""A decorator that adds a new particle filter
A particle filter is a short name that corresponds to an algorithm for
filtering a set of particles into a subset. This is useful for creating new
particle types based on a cut on a particle field, such as particle mass, ID
or type.
.. note::
Alternatively, you can make use of the
:func:`~yt.data_objects.particle_filters.add_particle_filter` function
to define a new particle filter using a more declarative syntax.
Parameters
----------
name : string
The name of the particle filter. New particle fields with particle type
set by this name will be added to any dataset that enables this particle
filter. If not set, the name will be inferred from the name of the
filter function.
function : reference to a function
The function that defines the particle filter. The function should
accept two arguments: a reference to a particle filter object and a
reference to an abstract yt data object. See the example below.
requires : a list of field names
A list of field names required by the particle filter definition.
filtered_type : string
The name of the particle type to be filtered.
Example
-------
>>> import yt
>>> # define a filter named "stars"
>>> @yt.particle_filter(requires=["particle_type"], filtered_type='all')
>>> def stars(pfilter, data):
... return data[(pfilter.filtered_type, 'particle_type')] == 2
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> ds.add_particle_filter('stars')
>>> ad = ds.all_data()
>>> print (ad['stars', 'particle_mass'])
[ 1.68243760e+38 1.65690882e+38 1.65813321e+38 ..., 2.04238266e+38
2.04523901e+38 2.04770938e+38] g
"""
def wrapper(function):
if name is None:
used_name = function.__name__
else:
used_name = name
return add_particle_filter(used_name, function, requires, filtered_type)
return wrapper
| [
[
[
423,
427
],
[
2393,
2397
]
],
[
[
451,
465
],
[
985,
999
]
],
[
[
516,
524
],
[
2491,
2499
]
],
[
[
526,
541
],
[
2532,
2547
]
],
[
[
563,
568
],
[
4852,
4857
]
],
[
[
605,
623
],
[
1670,
1688
]
],
[
[
646,
661
],
[
4799,
4814
],
[
4931,
4946
]
],
[
[
674,
688
],
[
729,
743
]
],
[
[
723,
726
],
[
1141,
1144
]
],
[
[
753,
767
],
[
4736,
4750
]
],
[
[
2679,
2698
],
[
7036,
7055
]
],
[
[
4968,
4983
]
]
] |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Mayo Clinic
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the <ORGANIZATION> nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.f
import dirlistproc
def proc_xml(input_fn: str, output_fn: str, _) -> bool:
print("Converting %s to %s" % (input_fn, output_fn))
return True
def main():
dlp = dirlistproc.DirectoryListProcessor(None, "Convert XML to Text", ".xml", ".txt")
nfiles, nsuccess = dlp.run(proc_xml)
print("Total=%d Successful=%d" % (nfiles, nsuccess))
if __name__ == '__main__':
main()
| [
[
[
1581,
1592
],
[
1748,
1759
]
],
[
[
1599,
1607
],
[
1859,
1867
]
],
[
[
1730,
1734
],
[
1959,
1963
]
]
] |
from .connection import Connection
| [
[
[
24,
34
]
]
] |
#Split one picture
import cv2
import numpy.random as random
import numpy as np
import os
import time
#borders
#mitochondria
#mitochondria borders
#PSD
#vesicles
def is_Img(name):
img_type = ('.png', '.jpg', '.jpeg')
if name.endswith((img_type)):
return True
else:
return False
file_dir_arr = ["axon", "mitochondria", "PSD", "vesicles", "boundaries","mitochondrial boundaries"]
name_list = []
mask_list = []
out_dir = "cutting data"
size_data = 256
size_step = 128
if not os.path.isdir(out_dir):
print("создаю out_dir:" + out_dir)
os.makedirs(out_dir)
dir_input_img = "original data/original/"
dir_input_mask ="original data/"
###########################################################
img_name = "training075.png"
###########################################################
if is_Img(os.path.join(dir_input_img, img_name)):
count = 0
img = cv2.imread(os.path.join(dir_input_img, img_name), 0)
h,w = img.shape[0:2]
if not os.path.isdir(out_dir+"/original"):
print("создаю out_dir:" + "original")
os.makedirs(out_dir+"/original")
for start_y in range(0,h, size_step):
if (h - start_y < size_data):
continue
for start_x in range(0,w, size_step):
if (w - start_x < size_data):
continue
cutting_img = img[start_y:start_y+size_data, start_x:start_x+size_data]
cv2.imwrite(out_dir + "/original/" + img_name + "_" + str(size_data) +"_" + str(size_step) +"_" +str(count)+".png", cutting_img)
count+=1
for i,dir_name in enumerate(file_dir_arr):
if is_Img(os.path.join(dir_input_mask + dir_name, img_name)):
img = cv2.imread(os.path.join(dir_input_mask +dir_name, img_name), 0)
img[img < 128] = 0
img[img > 127] = 255
if name_list.count(img_name) == 0:
name_list.append(img_name)
mask_list.append(np.zeros((len(file_dir_arr),)+ img.shape, np.uint8))
index = name_list.index(img_name)
mask_list[index][i] = img
print(name_list)
for index, mask_stack in enumerate(mask_list):
count = 0
for i,dir_name in enumerate(file_dir_arr):
local_count = count
mask_write = mask_stack[i]
h,w = mask_write.shape[0:2]
if not os.path.isdir(out_dir+"/"+dir_name):
print("создаю out_dir:" + "mask")
os.makedirs(out_dir+"/"+dir_name )
for start_y in range(0,h, size_step):
if (h - start_y < size_data):
continue
for start_x in range(0,w, size_step):
if (w - start_x < size_data):
continue
cutting_mask = mask_write[start_y:start_y+size_data, start_x:start_x+size_data]
cv2.imwrite(out_dir+"/"+dir_name +"/" + name_list[index] + "_" + str(size_data) +"_" + str(size_step) +"_" +str(local_count)+".png", cutting_mask)
local_count+=1
| [
[
[
27,
30
],
[
866,
869
],
[
1341,
1344
],
[
1602,
1605
],
[
2537,
2540
]
],
[
[
38,
60
]
],
[
[
68,
79
],
[
1805,
1807
],
[
1847,
1849
]
],
[
[
87,
89
],
[
487,
489
],
[
548,
550
],
[
806,
808
],
[
877,
879
],
[
957,
959
],
[
1037,
1039
],
[
1539,
1541
],
[
1613,
1615
],
[
2149,
2151
],
[
2226,
2228
]
],
[
[
97,
101
]
],
[
[
168,
174
],
[
799,
805
],
[
1532,
1538
]
],
[
[
288,
300
],
[
1512,
1524
],
[
1819,
1831
],
[
2039,
2051
]
],
[
[
389,
398
],
[
1723,
1732
],
[
1758,
1767
],
[
1872,
1881
],
[
1939,
1948
],
[
2577,
2586
]
],
[
[
405,
414
],
[
1788,
1797
],
[
1900,
1909
],
[
1987,
1996
]
],
[
[
421,
428
],
[
501,
508
],
[
544,
551
],
[
560,
567
],
[
971,
978
],
[
1049,
1056
],
[
1353,
1360
],
[
2163,
2170
],
[
2238,
2245
],
[
2549,
2556
]
],
[
[
447,
456
],
[
1137,
1146
],
[
1225,
1234
],
[
1289,
1298
],
[
1316,
1325
],
[
1399,
1408
],
[
2325,
2334
],
[
2413,
2422
],
[
2485,
2494
],
[
2512,
2521
],
[
2606,
2615
]
],
[
[
463,
472
],
[
1104,
1113
],
[
1191,
1200
],
[
1421,
1430
],
[
2292,
2301
],
[
2379,
2388
],
[
2628,
2637
]
],
[
[
570,
583
],
[
819,
832
],
[
890,
903
]
],
[
[
612,
626
],
[
1552,
1566
],
[
1626,
1640
]
],
[
[
706,
714
],
[
834,
842
],
[
905,
913
],
[
1378,
1386
],
[
1579,
1587
],
[
1652,
1660
],
[
1739,
1747
],
[
1775,
1783
],
[
1888,
1896
]
],
[
[
848,
853
],
[
1442,
1447
],
[
1474,
1479
]
],
[
[
860,
863
],
[
927,
930
],
[
1269,
1272
]
],
[
[
921,
922
],
[
1101,
1102
],
[
1123,
1124
]
],
[
[
923,
924
],
[
1188,
1189
],
[
1211,
1212
]
],
[
[
1082,
1089
],
[
1127,
1134
],
[
1273,
1280
],
[
1281,
1288
]
],
[
[
1169,
1176
],
[
1215,
1222
],
[
1300,
1307
],
[
1308,
1315
]
],
[
[
1255,
1266
],
[
1457,
1468
]
],
[
[
1488,
1489
],
[
1917,
1918
]
],
[
[
1490,
1498
],
[
1569,
1577
],
[
1642,
1650
]
],
[
[
1596,
1599
],
[
1672,
1675
],
[
1676,
1679
],
[
1693,
1696
],
[
1697,
1700
],
[
1836,
1839
],
[
1922,
1925
]
],
[
[
1864,
1869
],
[
1910,
1915
]
],
[
[
1956,
1961
],
[
2587,
2592
]
],
[
[
1963,
1973
],
[
2091,
2101
]
],
[
[
2000,
2005
],
[
2070,
2075
]
],
[
[
2015,
2016
],
[
2102,
2103
]
],
[
[
2017,
2025
],
[
2175,
2183
],
[
2250,
2258
],
[
2561,
2569
]
],
[
[
2056,
2067
],
[
2649,
2660
],
[
2688,
2699
]
],
[
[
2078,
2088
],
[
2116,
2126
],
[
2458,
2468
]
],
[
[
2110,
2111
],
[
2289,
2290
],
[
2311,
2312
]
],
[
[
2112,
2113
],
[
2376,
2377
],
[
2399,
2400
]
],
[
[
2270,
2277
],
[
2315,
2322
],
[
2469,
2476
],
[
2477,
2484
]
],
[
[
2357,
2364
],
[
2403,
2410
],
[
2496,
2503
],
[
2504,
2511
]
],
[
[
2443,
2455
],
[
2670,
2682
]
]
] |
"""payu.cli
========
Command line interface tools
:copyright: Copyright 2011 Marshall Ward, see AUTHORS for details.
:license: Apache License, Version 2.0, see LICENSE for details
"""
import argparse
from distutils import sysconfig
import importlib
import os
import pkgutil
import shlex
import subprocess
import sys
import payu
import payu.envmod as envmod
from payu.models import index as supported_models
import payu.subcommands
# Default configuration
DEFAULT_CONFIG = 'config.yaml'
def parse():
"""Parse the command line inputs and execute the subcommand."""
# Build the list of subcommand modules
modnames = [mod for (_, mod, _)
in pkgutil.iter_modules(payu.subcommands.__path__,
prefix=payu.subcommands.__name__ + '.')
if mod.endswith('_cmd')]
subcmds = [importlib.import_module(mod) for mod in modnames]
# Construct the subcommand parser
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version',
version='payu {0}'.format(payu.__version__))
subparsers = parser.add_subparsers()
for cmd in subcmds:
cmd_parser = subparsers.add_parser(cmd.title, **cmd.parameters)
cmd_parser.set_defaults(run_cmd=cmd.runcmd)
for arg in cmd.arguments:
cmd_parser.add_argument(*arg['flags'], **arg['parameters'])
# Display help if no arguments are provided
if len(sys.argv) == 1:
parser.print_help()
else:
args = vars(parser.parse_args())
run_cmd = args.pop('run_cmd')
run_cmd(**args)
def get_model_type(model_type, config):
"""Determine and validate the active model type."""
# If no model type is given, then check the config file
if not model_type:
model_type = config.get('model')
# If there is still no model type, try the parent directory
if not model_type:
model_type = os.path.basename(os.path.abspath(os.pardir))
print('payu: warning: Assuming model is {0} based on parent directory '
'name.'.format(model_type))
if model_type not in supported_models:
print('payu: error: Unknown model {0}'.format(model_type))
sys.exit(-1)
def set_env_vars(init_run=None, n_runs=None, lab_path=None, dir_path=None,
reproduce=None):
"""Construct the environment variables used by payu for resubmissions."""
payu_env_vars = {}
# Setup Python dynamic library link
lib_paths = sysconfig.get_config_vars('LIBDIR')
payu_env_vars['LD_LIBRARY_PATH'] = ':'.join(lib_paths)
if 'PYTHONPATH' in os.environ:
payu_env_vars['PYTHONPATH'] = os.environ['PYTHONPATH']
# Set (or import) the path to the PAYU scripts (PAYU_PATH)
# NOTE: We may be able to use sys.path[0] here.
payu_binpath = os.environ.get('PAYU_PATH')
if not payu_binpath or not os.path.isdir(payu_binpath):
payu_binpath = os.path.dirname(sys.argv[0])
payu_env_vars['PAYU_PATH'] = payu_binpath
# Set the run counters
if init_run:
init_run = int(init_run)
assert init_run >= 0
payu_env_vars['PAYU_CURRENT_RUN'] = init_run
if n_runs:
n_runs = int(n_runs)
assert n_runs > 0
payu_env_vars['PAYU_N_RUNS'] = n_runs
# Import explicit project paths
if lab_path:
payu_env_vars['PAYU_LAB_PATH'] = os.path.normpath(lab_path)
if dir_path:
payu_env_vars['PAYU_DIR_PATH'] = os.path.normpath(dir_path)
if reproduce:
payu_env_vars['PAYU_REPRODUCE'] = reproduce
return payu_env_vars
def submit_job(pbs_script, pbs_config, pbs_vars=None):
"""Submit a userscript the scheduler."""
# Initialisation
if pbs_vars is None:
pbs_vars = {}
pbs_flags = []
pbs_queue = pbs_config.get('queue', 'normal')
pbs_flags.append('-q {queue}'.format(queue=pbs_queue))
pbs_project = pbs_config.get('project', os.environ['PROJECT'])
pbs_flags.append('-P {project}'.format(project=pbs_project))
pbs_resources = ['walltime', 'ncpus', 'mem', 'jobfs']
for res_key in pbs_resources:
res_flags = []
res_val = pbs_config.get(res_key)
if res_val:
res_flags.append('{key}={val}'.format(key=res_key, val=res_val))
if res_flags:
pbs_flags.append('-l {res}'.format(res=','.join(res_flags)))
# TODO: Need to pass lab.config_path somehow...
pbs_jobname = pbs_config.get('jobname', os.path.basename(os.getcwd()))
if pbs_jobname:
# PBSPro has a 15-character jobname limit
pbs_flags.append('-N {name}'.format(name=pbs_jobname[:15]))
pbs_priority = pbs_config.get('priority')
if pbs_priority:
pbs_flags.append('-p {priority}'.format(priority=pbs_priority))
pbs_flags.append('-l wd')
pbs_join = pbs_config.get('join', 'n')
if pbs_join not in ('oe', 'eo', 'n'):
print('payu: error: unknown qsub IO stream join setting.')
sys.exit(-1)
else:
pbs_flags.append('-j {join}'.format(join=pbs_join))
# Append environment variables to qsub command
# TODO: Support full export of environment variables: `qsub -V`
pbs_vstring = ','.join('{0}={1}'.format(k, v)
for k, v in pbs_vars.items())
pbs_flags.append('-v ' + pbs_vstring)
# Append any additional qsub flags here
pbs_flags_extend = pbs_config.get('qsub_flags')
if pbs_flags_extend:
pbs_flags.append(pbs_flags_extend)
if not os.path.isabs(pbs_script):
# NOTE: PAYU_PATH is always set if `set_env_vars` was always called.
# This is currently always true, but is not explicitly enforced.
# So this conditional check is a bit redundant.
payu_bin = pbs_vars.get('PAYU_PATH', os.path.dirname(sys.argv[0]))
pbs_script = os.path.join(payu_bin, pbs_script)
assert os.path.isfile(pbs_script)
# Set up environment modules here for PBS.
envmod.setup()
envmod.module('load', 'pbs')
# Construct job submission command
cmd = 'qsub {flags} -- {python} {script}'.format(
flags=' '.join(pbs_flags),
python=sys.executable,
script=pbs_script
)
print(cmd)
subprocess.check_call(shlex.split(cmd))
| [
[
[
206,
214
],
[
972,
980
]
],
[
[
237,
246
],
[
2537,
2546
]
],
[
[
254,
263
],
[
870,
879
]
],
[
[
271,
273
],
[
1969,
1971
],
[
1986,
1988
],
[
2002,
2004
],
[
2655,
2657
],
[
2705,
2707
],
[
2865,
2867
],
[
2925,
2927
],
[
2977,
2979
],
[
3425,
3427
],
[
3511,
3513
],
[
3981,
3983
],
[
4518,
4520
],
[
4535,
4537
],
[
5548,
5550
],
[
5838,
5840
],
[
5889,
5891
],
[
5939,
5941
]
],
[
[
281,
288
],
[
685,
692
]
],
[
[
296,
301
],
[
6300,
6305
]
],
[
[
309,
319
],
[
6278,
6288
]
],
[
[
327,
330
],
[
1480,
1483
],
[
2255,
2258
],
[
2993,
2996
],
[
5019,
5022
],
[
5854,
5857
],
[
6210,
6213
]
],
[
[
339,
343
]
],
[
[
351,
372
],
[
6018,
6024
],
[
6037,
6043
]
],
[
[
397,
422
],
[
2162,
2178
]
],
[
[
430,
446
],
[
706,
710
],
[
780,
784
],
[
1103,
1107
]
],
[
[
472,
486
]
],
[
[
509,
514
]
],
[
[
1643,
1657
]
],
[
[
2274,
2286
]
],
[
[
3641,
3651
]
]
] |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, BaseUserManager
from django.conf import settings
class UserProfileManager(BaseUserManager):
"""Manager for user profiles"""
def create_user(self, email, name, password=None):
""" Create a new user profile"""
if not email:
raise ValueError('Users must have an email address')
email =self.normalize_email(email)
user = self.model(email=email, name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
""" Create a new superuser profile"""
user = self.create_user(email, name, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""Database model for users in system"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name']
def get_full_name(self):
"""Retrieve full name"""
return self.name
def get_short_name(self):
"""Retrieve shot name of user"""
return self.name
def __str__(self):
""" Return string representation for our users"""
return self.email
class ProfileFeedItem(models.Model):
""" Profile status update """
user_profile = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete = models.CASCADE,
)
status_text = models.CharField(max_length=255)
created_on = models.DateTimeField(auto_now_add=True)
def __str__(self):
""" Return the model as string """
return self.status_text
| [
[
[
22,
28
],
[
993,
999
],
[
1051,
1057
],
[
1100,
1106
],
[
1149,
1155
],
[
1602,
1608
],
[
1670,
1676
],
[
1735,
1741
],
[
1776,
1782
],
[
1826,
1832
]
],
[
[
68,
84
],
[
898,
914
]
],
[
[
86,
102
],
[
916,
932
]
],
[
[
104,
119
],
[
179,
194
]
],
[
[
144,
152
],
[
1693,
1701
]
],
[
[
160,
178
],
[
1200,
1218
]
],
[
[
886,
897
]
],
[
[
1586,
1601
]
]
] |
import dash
import dash_bootstrap_components as dbc
# bootstrap theme
# https://bootswatch.com/lux/
external_stylesheets = [dbc.themes.YETI]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets,
suppress_callback_exceptions=True)
server = app.server | [
[
[
7,
11
],
[
149,
153
]
],
[
[
19,
51
],
[
125,
128
]
],
[
[
101,
121
],
[
190,
210
]
],
[
[
143,
146
],
[
272,
275
]
],
[
[
263,
269
]
]
] |
# coding: utf-8
__author__ = 'cleardusk'
import os.path as osp
import time
import numpy as np
import cv2
import torch
from torchvision.transforms import Compose
import torch.backends.cudnn as cudnn
import _3DDFA_V2.models as models
from _3DDFA_V2.bfm import BFMModel
from _3DDFA_V2.utils.io import _load
from _3DDFA_V2.utils.functions import (
crop_img, parse_roi_box_from_bbox, parse_roi_box_from_landmark,
)
from _3DDFA_V2.utils.tddfa_util import (
load_model, _parse_param, similar_transform,
ToTensorGjz, NormalizeGjz
)
make_abs_path = lambda fn: osp.join(osp.dirname(osp.realpath(__file__)), fn)
class TDDFA(object):
"""TDDFA: named Three-D Dense Face Alignment (TDDFA)"""
def __init__(self, **kvs):
torch.set_grad_enabled(False)
print(make_abs_path('configs/bfm_noneck_v3.pkl'))
# load BFM
self.bfm = BFMModel(
bfm_fp=kvs.get('bfm_fp', make_abs_path('configs/bfm_noneck_v3.pkl')),
shape_dim=kvs.get('shape_dim', 40),
exp_dim=kvs.get('exp_dim', 10)
)
self.tri = self.bfm.tri
# config
self.gpu_mode = kvs.get('gpu_mode', False)
self.gpu_id = kvs.get('gpu_id', 0)
self.size = kvs.get('size', 120)
param_mean_std_fp = kvs.get(
'param_mean_std_fp', make_abs_path(f'configs/param_mean_std_62d_{self.size}x{self.size}.pkl')
)
# load model, default output is dimension with length 62 = 12(pose) + 40(shape) +10(expression)
model = getattr(models, kvs.get('arch'))(
num_classes=kvs.get('num_params', 62),
widen_factor=kvs.get('widen_factor', 1),
size=self.size,
mode=kvs.get('mode', 'small')
)
model = load_model(model, kvs.get('checkpoint_fp'))
if self.gpu_mode:
cudnn.benchmark = True
model = model.cuda(device=self.gpu_id)
self.model = model
self.model.eval() # eval mode, fix BN
# data normalization
transform_normalize = NormalizeGjz(mean=127.5, std=128)
transform_to_tensor = ToTensorGjz()
transform = Compose([transform_to_tensor, transform_normalize])
self.transform = transform
# params normalization config
r = _load(param_mean_std_fp)
self.param_mean = r.get('mean')
self.param_std = r.get('std')
# print('param_mean and param_srd', self.param_mean, self.param_std)
def __call__(self, img_ori, objs, **kvs):
"""The main call of TDDFA, given image and box / landmark, return 3DMM params and roi_box
:param img_ori: the input image
:param objs: the list of box or landmarks
:param kvs: options
:return: param list and roi_box list
"""
# Crop image, forward to get the param
param_lst = []
roi_box_lst = []
crop_policy = kvs.get('crop_policy', 'box')
for obj in objs:
if crop_policy == 'box':
# by face box
roi_box = parse_roi_box_from_bbox(obj)
elif crop_policy == 'landmark':
# by landmarks
roi_box = parse_roi_box_from_landmark(obj)
else:
raise ValueError(f'Unknown crop policy {crop_policy}')
roi_box_lst.append(roi_box)
img = crop_img(img_ori, roi_box)
img = cv2.resize(img, dsize=(self.size, self.size), interpolation=cv2.INTER_LINEAR)
inp = self.transform(img).unsqueeze(0)
if self.gpu_mode:
inp = inp.cuda(device=self.gpu_id)
if kvs.get('timer_flag', False):
end = time.time()
param = self.model(inp)
elapse = f'Inference: {(time.time() - end) * 1000:.1f}ms'
print(elapse)
else:
param = self.model(inp)
param = param.squeeze().cpu().numpy().flatten().astype(np.float32)
param = param * self.param_std + self.param_mean # re-scale
# print('output', param)
param_lst.append(param)
return param_lst, roi_box_lst
def recon_vers(self, param_lst, roi_box_lst, **kvs):
dense_flag = kvs.get('dense_flag', False)
size = self.size
ver_lst = []
for param, roi_box in zip(param_lst, roi_box_lst):
if dense_flag:
R, offset, alpha_shp, alpha_exp = _parse_param(param)
pts3d = R @ (self.bfm.u + self.bfm.w_shp @ alpha_shp + self.bfm.w_exp @ alpha_exp). \
reshape(3, -1, order='F') + offset
pts3d = similar_transform(pts3d, roi_box, size)
else:
R, offset, alpha_shp, alpha_exp = _parse_param(param)
pts3d = R @ (self.bfm.u_base + self.bfm.w_shp_base @ alpha_shp + self.bfm.w_exp_base @ alpha_exp). \
reshape(3, -1, order='F') + offset
pts3d = similar_transform(pts3d, roi_box, size)
ver_lst.append(pts3d)
return ver_lst
| [
[
[
17,
27
]
],
[
[
50,
64
],
[
567,
570
],
[
576,
579
],
[
588,
591
]
],
[
[
72,
76
],
[
3683,
3687
],
[
3775,
3779
]
],
[
[
84,
95
],
[
3965,
3967
]
],
[
[
103,
106
],
[
3404,
3407
],
[
3464,
3467
]
],
[
[
114,
119
],
[
740,
745
]
],
[
[
155,
162
],
[
2143,
2150
]
],
[
[
170,
199
],
[
1836,
1841
]
],
[
[
208,
234
],
[
1527,
1533
]
],
[
[
261,
269
],
[
866,
874
]
],
[
[
301,
306
],
[
2281,
2286
]
],
[
[
351,
359
],
[
3359,
3367
]
],
[
[
361,
384
],
[
3048,
3071
]
],
[
[
386,
413
],
[
3178,
3205
]
],
[
[
462,
472
],
[
1753,
1763
]
],
[
[
474,
486
],
[
4453,
4465
],
[
4762,
4774
]
],
[
[
488,
505
],
[
4654,
4671
],
[
4978,
4995
]
],
[
[
511,
522
],
[
2109,
2120
]
],
[
[
524,
536
],
[
2045,
2057
]
],
[
[
540,
553
],
[
784,
797
],
[
913,
926
],
[
1315,
1328
]
],
[
[
625,
630
]
]
] |
import sys
def sol():
input = sys.stdin.readline
N = int(input())
node = [[] for i in range(N)]
for i in range(N):
vector = list(map(int, input().split(" ")))
for j in range(N):
if vector[j] == 1:
node[i].append(j)
for i in range(N):
visited = ["0"] * N
dfs(node, visited, i)
print(" ".join(visited))
def bfs(N, node, i):
queue = []
visited = [False] * N
queue.append(i)
while len(queue) > 0:
v = queue.pop(0)
for w in node[v]:
if not visited[w]:
visited[w] = True
queue.append(w)
result = []
for check in visited:
if check:
result.append("1")
else:
result.append("0")
return result
def dfs(node, visited, v):
for w in node[v]:
if visited[w] == "0":
visited[w] = "1"
dfs(node, visited, w)
if __name__ == "__main__":
sol()
| [
[
[
7,
10
],
[
36,
39
]
],
[
[
17,
20
],
[
980,
983
]
],
[
[
397,
400
]
],
[
[
809,
812
],
[
336,
339
],
[
925,
928
]
]
] |
from .._sign.sphincs_sha256_128f_robust import ffi as __ffi, lib as __lib
from .common import _sign_generate_keypair_factory, _sign_sign_factory, _sign_verify_factory
PUBLIC_KEY_SIZE = __lib.CRYPTO_PUBLICKEYBYTES
SECRET_KEY_SIZE = __lib.CRYPTO_SECRETKEYBYTES
SIGNATURE_SIZE = __lib.CRYPTO_BYTES
generate_keypair = _sign_generate_keypair_factory(__ffi, __lib)
sign = _sign_sign_factory(__ffi, __lib)
verify = _sign_verify_factory(__ffi, __lib)
| [
[
[
47,
59
],
[
347,
352
],
[
387,
392
],
[
431,
436
]
],
[
[
61,
73
],
[
186,
191
],
[
232,
237
],
[
277,
282
],
[
354,
359
],
[
394,
399
],
[
438,
443
]
],
[
[
94,
124
],
[
316,
346
]
],
[
[
126,
144
],
[
368,
386
]
],
[
[
146,
166
],
[
410,
430
]
],
[
[
168,
183
]
],
[
[
214,
229
]
],
[
[
260,
274
]
],
[
[
297,
313
]
],
[
[
361,
365
]
],
[
[
401,
407
]
]
] |
from n0s3p4ss.domain_list import SubdomainList
from n0s3p4ss.attack_surface_discoverer import discover
from n0s3p4ss.sniffer_switcher_http_status_based import apply_flow_for
def sniff(target_domains):
subdomains = SubdomainList().list_each_domain_subdomains(target_domains)
attack_surfaces = [discover(subdomain) for subdomain in subdomains]
return [
apply_flow_for(attack_surface) for attack_surface in attack_surfaces
]
| [
[
[
33,
46
],
[
220,
233
]
],
[
[
94,
102
],
[
303,
311
]
],
[
[
159,
173
],
[
373,
387
]
],
[
[
180,
185
]
]
] |
import DBinterface as DB
import random
import datetime as dt
def print_ranking(my_ranking,ranking_size,top_or_bottom):
Tweet=""
if top_or_bottom == True:
Tweet += ("The first " + ranking_size + " cities with more CO2 emissions due to traffic are: \r\n ")
else:
Tweet += ("The first " + ranking_size + " cities with less CO2 emissions due to traffic are: \r\n" +
"Congratulations!!!!! The Earth loves you :D \r\n")
for i in range(ranking_size):
Tweet += (str((i+1)) + "º " + str(my_ranking[i][0]) + " with a CO2 value of " + str(my_ranking[i][1]) + "\r\n")
return(Tweet)
def rank(api):
interface = DB.nasaDBinterface()
ranking_size = random.randint(2,10)
top_or_bottom = random.choice([True, False])
my_ranking = interface.getranking(ranking_size, top_or_bottom)
Tweet=print_ranking(my_ranking,ranking_size,top_or_bottom)
api.update_status(status=Tweet)
def leer_hashtag(T):
L=list(T)
L.append(" ")
for a in range(len(L)):
if L[a]=="#":
a=a+1
ht=[]
while L[a]!=" ":
ht.append(L[a])
a=a+1
ht_salida= ""
for e in ht:
ht_salida += e
return ht_salida
def get_city(TEXT):
L=TEXT.split()
c=""
ciudad=""
for a in range(len(L)):
if L[a]=="#consulta":
break
if L[a]=="City:":
for i in range(len(L)-a-2):
c += L[a+i+1] + " "
x=c.split()
for i in range(len(x)-1):
ciudad += x[i]+" "
if len(x) != 1:
ciudad += x[len(x)-1]
return ciudad.lower() | [
[
[
7,
24
],
[
666,
668
]
],
[
[
32,
38
],
[
706,
712
],
[
748,
754
]
],
[
[
47,
61
]
],
[
[
67,
80
],
[
855,
868
]
],
[
[
638,
642
]
],
[
[
951,
963
]
],
[
[
1254,
1262
]
]
] |
from experiments.experiments.PubIntegBackground import PubIntegBackground
import numpy as np
if __name__ == "__main__":
for i in np.arange(0.0, 10.0, 0.1):
PubIntegBackground(correlation=False, listing=True, pub='None', intensity=i)
| [
[
[
55,
73
],
[
169,
187
]
],
[
[
81,
92
],
[
134,
136
]
],
[
[
129,
130
],
[
243,
244
]
]
] |
import logging
from typing import Any, Dict, List, Union
import bleach
import cssutils
import markdown
from django.conf import settings
from django.core.mail import EmailMultiAlternatives, get_connection
from django.template.loader import get_template
from django.utils.translation import ugettext as _
from i18nfield.strings import LazyI18nString
from inlinestyler.utils import inline_css
from pretix.base.i18n import language
from pretix.base.models import Event, InvoiceAddress, Order
from pretix.celery_app import app
from pretix.multidomain.urlreverse import build_absolute_uri
logger = logging.getLogger('pretix.base.mail')
INVALID_ADDRESS = 'invalid-pretix-mail-address'
cssutils.log.setLevel(logging.CRITICAL)
class TolerantDict(dict):
def __missing__(self, key):
return key
class SendMailException(Exception):
pass
def mail(email: str, subject: str, template: Union[str, LazyI18nString],
context: Dict[str, Any]=None, event: Event=None, locale: str=None,
order: Order=None, headers: dict=None, sender: str=None):
"""
Sends out an email to a user. The mail will be sent synchronously or asynchronously depending on the installation.
:param email: The email address of the recipient
:param subject: The email subject. Should be localized to the recipients's locale or a lazy object that will be
localized by being casted to a string.
:param template: The filename of a template to be used. It will be rendered with the locale given in the locale
argument and the context given in the next argument. Alternatively, you can pass a LazyI18nString and
``context`` will be used as the argument to a Python ``.format_map()`` call on the template.
:param context: The context for rendering the template (see ``template`` parameter)
:param event: The event this email is related to (optional). If set, this will be used to determine the sender,
a possible prefix for the subject and the SMTP server that should be used to send this email.
:param order: The order this email is related to (optional). If set, this will be used to include a link to the
order below the email.
:param headers: A dict of custom mail headers to add to the mail
:param locale: The locale to be used while evaluating the subject and the template
:param sender: Set the sender email address. If not set and ``event`` is set, the event's default will be used,
otherwise the system default.
:raises MailOrderException: on obvious, immediate failures. Not raising an exception does not necessarily mean
that the email has been sent, just that it has been queued by the email backend.
"""
if email == INVALID_ADDRESS:
return
headers = headers or {}
with language(locale):
if isinstance(context, dict) and order:
try:
context.update({
'invoice_name': order.invoice_address.name,
'invoice_company': order.invoice_address.company
})
except InvoiceAddress.DoesNotExist:
context.update({
'invoice_name': '',
'invoice_company': ''
})
body, body_md = render_mail(template, context)
sender = sender or (event.settings.get('mail_from') if event else settings.MAIL_FROM)
subject = str(subject)
body_plain = body
htmlctx = {
'site': settings.PRETIX_INSTANCE_NAME,
'site_url': settings.SITE_URL,
'body': body_md,
'color': '#8E44B3'
}
if event:
htmlctx['event'] = event
htmlctx['color'] = event.settings.primary_color
if event.settings.mail_from == settings.DEFAULT_FROM_EMAIL and event.settings.contact_mail:
headers['Reply-To'] = event.settings.contact_mail
prefix = event.settings.get('mail_prefix')
if prefix:
subject = "[%s] %s" % (prefix, subject)
body_plain += "\r\n\r\n-- \r\n"
signature = str(event.settings.get('mail_text_signature'))
if signature:
signature = signature.format(event=event.name)
signature_md = signature.replace('\n', '<br>\n')
signature_md = bleach.linkify(bleach.clean(markdown.markdown(signature_md), tags=bleach.ALLOWED_TAGS + ['p', 'br']))
htmlctx['signature'] = signature_md
body_plain += signature
body_plain += "\r\n\r\n-- \r\n"
if order:
body_plain += _(
"You are receiving this email because you placed an order for {event}."
).format(event=event.name)
htmlctx['order'] = order
body_plain += "\r\n"
body_plain += _(
"You can view your order details at the following URL:\n{orderurl}."
).replace("\n", "\r\n").format(
event=event.name, orderurl=build_absolute_uri(
order.event, 'presale:event.order', kwargs={
'order': order.code,
'secret': order.secret
}
)
)
body_plain += "\r\n"
tpl = get_template('pretixbase/email/plainwrapper.html')
body_html = tpl.render(htmlctx)
return mail_send([email], subject, body_plain, body_html, sender, event.id if event else None, headers)
@app.task
def mail_send_task(to: List[str], subject: str, body: str, html: str, sender: str,
event: int=None, headers: dict=None, bcc: List[str]=None) -> bool:
email = EmailMultiAlternatives(subject, body, sender, to=to, bcc=bcc, headers=headers)
if html is not None:
email.attach_alternative(inline_css(html), "text/html")
if event:
event = Event.objects.get(id=event)
backend = event.get_mail_backend()
else:
backend = get_connection(fail_silently=False)
try:
backend.send_messages([email])
except Exception:
logger.exception('Error sending email')
raise SendMailException('Failed to send an email to {}.'.format(to))
def mail_send(*args, **kwargs):
mail_send_task.apply_async(args=args, kwargs=kwargs)
def render_mail(template, context):
if isinstance(template, LazyI18nString):
body = str(template)
if context:
body = body.format_map(TolerantDict(context))
body_md = bleach.linkify(bleach.clean(markdown.markdown(body), tags=bleach.ALLOWED_TAGS + [
'p', 'pre'
]))
else:
tpl = get_template(template)
body = tpl.render(context)
body_md = bleach.linkify(markdown.markdown(body))
return body, body_md
| [
[
[
7,
14
],
[
595,
602
],
[
703,
710
]
],
[
[
34,
37
],
[
951,
954
]
],
[
[
39,
43
],
[
941,
945
]
],
[
[
45,
49
],
[
5644,
5648
],
[
5765,
5769
]
],
[
[
51,
56
],
[
895,
900
]
],
[
[
65,
71
],
[
4375,
4381
],
[
4390,
4396
],
[
4441,
4447
],
[
6630,
6636
],
[
6645,
6651
],
[
6688,
6694
],
[
6847,
6853
]
],
[
[
79,
87
],
[
681,
689
]
],
[
[
95,
103
],
[
4403,
4411
],
[
6658,
6666
],
[
6862,
6870
]
],
[
[
128,
136
],
[
3388,
3396
],
[
3507,
3515
],
[
3562,
3570
],
[
3811,
3819
]
],
[
[
166,
188
],
[
5802,
5824
]
],
[
[
190,
204
],
[
6099,
6113
]
],
[
[
240,
252
],
[
5406,
5418
],
[
6771,
6783
]
],
[
[
290,
303
],
[
4670,
4671
],
[
4916,
4917
]
],
[
[
334,
348
],
[
906,
920
],
[
6488,
6502
]
],
[
[
380,
390
],
[
5939,
5949
]
],
[
[
421,
429
],
[
2809,
2817
]
],
[
[
461,
466
],
[
969,
974
],
[
6000,
6005
]
],
[
[
468,
482
],
[
3096,
3110
]
],
[
[
484,
489
],
[
1015,
1020
]
],
[
[
520,
523
],
[
5612,
5615
]
],
[
[
566,
584
],
[
5103,
5121
]
],
[
[
586,
592
],
[
6214,
6220
]
],
[
[
633,
648
],
[
2738,
2753
]
],
[
[
729,
741
],
[
6589,
6601
]
],
[
[
809,
826
],
[
6268,
6285
]
],
[
[
854,
858
]
],
[
[
5625,
5639
],
[
6369,
6383
]
],
[
[
6337,
6346
],
[
5512,
5521
]
],
[
[
6428,
6439
],
[
3283,
3294
]
]
] |
# Time: O(n)
# Space: O(1)
class Solution(object):
# @param {integer[]} nums
# @return {integer[]}
def productExceptSelf(self, nums):
if not nums:
return []
left_product = [1 for _ in xrange(len(nums))]
for i in xrange(1, len(nums)):
left_product[i] = left_product[i - 1] * nums[i - 1]
right_product = 1
for i in xrange(len(nums) - 2, -1, -1):
right_product *= nums[i + 1]
left_product[i] = left_product[i] * right_product
return left_product
| [
[
[
35,
43
]
]
] |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from neutron_lib.api import converters
from neutron_lib.api.definitions import portbindings
from neutron_lib.api.definitions import provider_net as provider
from neutron_lib.api import extensions
from neutron_lib.api import validators
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import directory
from neutron_lib.plugins.ml2 import api
from neutron_lib.services.trunk import constants
from neutron._i18n import _
from neutron.objects import trunk as trunk_objects
from neutron.services.trunk import exceptions as trunk_exc
from neutron.services.trunk import utils
# This layer is introduced for keeping business logic and
# data persistence decoupled.
def trunk_can_be_managed(context, trunk):
"""Validate that the trunk can be managed."""
if not trunk.admin_state_up:
raise trunk_exc.TrunkDisabled(trunk_id=trunk.id)
def enforce_port_deletion_rules(resource, event, trigger, payload=None):
"""Prohibit the deletion of a port that's used in a trunk."""
# NOTE: the ML2 plugin properly catches these exceptions when raised, but
# non-ML2 plugins might not. To address this we should move the callback
# registry notification emitted in the ML2 plugin's delete_port() higher
# up in the plugin hierarchy.
context = payload.context
port_id = payload.resource_id
subport_obj = trunk_objects.SubPort.get_object(context, port_id=port_id)
if subport_obj:
raise trunk_exc.PortInUseAsSubPort(port_id=port_id,
trunk_id=subport_obj.trunk_id)
trunk_obj = trunk_objects.Trunk.get_object(context, port_id=port_id)
if trunk_obj:
raise trunk_exc.PortInUseAsTrunkParent(port_id=port_id,
trunk_id=trunk_obj.id)
class TrunkPortValidator(object):
def __init__(self, port_id):
self.port_id = port_id
self._port = None
def validate(self, context, parent_port=True):
"""Validate that the port can be used in a trunk.
:param parent_port: True if the port is intended for use
as parent in a trunk.
"""
# TODO(tidwellr): there is a chance of a race between the
# time these checks are performed and the time the trunk
# creation is executed. To be revisited, if it bites.
# Validate that the given port_id is not used by a subport.
subports = trunk_objects.SubPort.get_objects(
context, port_id=self.port_id)
if subports:
raise trunk_exc.TrunkPortInUse(port_id=self.port_id)
# Validate that the given port_id is not used by a trunk.
trunks = trunk_objects.Trunk.get_objects(context, port_id=self.port_id)
if trunks:
raise trunk_exc.ParentPortInUse(port_id=self.port_id)
if parent_port:
# if the port is being used as a parent in a trunk, check if
# it can be trunked, i.e. if it is already associated to physical
# resources (namely it is bound). Bound ports may be used as
# trunk parents, but that depends on the underlying driver in
# charge.
if not self.can_be_trunked_or_untrunked(context):
raise trunk_exc.ParentPortInUse(port_id=self.port_id)
else:
# if the port is being used as subport in a trunk, check if it is a
# port that is not actively used for other purposes, e.g. a router
# port, compute port, DHCP port etc. We have no clue what the side
# effects of connecting the port to a trunk would be, and it is
# better to err on the side of caution and prevent the operation.
self.check_not_in_use(context)
return self.port_id
def is_bound(self, context):
"""Return true if the port is bound, false otherwise."""
# Validate that the given port_id does not have a port binding.
core_plugin = directory.get_plugin()
self._port = core_plugin.get_port(context, self.port_id)
return bool(self._port.get(portbindings.HOST_ID))
def can_be_trunked_or_untrunked(self, context):
""""Return true if a port can be trunked."""
if not self.is_bound(context):
# An unbound port can be trunked, always.
return True
trunk_plugin = directory.get_plugin('trunk')
vif_type = self._port.get(portbindings.VIF_TYPE)
binding_host = self._port.get(portbindings.HOST_ID)
# Determine the driver that will be in charge of the trunk: this
# can be determined based on the vif type, whether or not the
# driver is agent-based, and whether the host is running the agent
# associated to the driver itself.
host_agent_types = utils.get_agent_types_by_host(context, binding_host)
drivers = [
driver for driver in trunk_plugin.registered_drivers
if utils.is_driver_compatible(
context, driver, vif_type, host_agent_types)
]
if len(drivers) > 1:
raise trunk_exc.TrunkPluginDriverConflict()
elif len(drivers) == 1:
return drivers[0].can_trunk_bound_port
else:
return False
def check_not_in_use(self, context):
"""Raises PortInUse for ports assigned for device purposes."""
core_plugin = directory.get_plugin()
self._port = core_plugin.get_port(context, self.port_id)
# NOTE(armax): the trunk extension itself does not make use of the
# device_id field, because it has no reason to. If need be, this
# check can be altered to accommodate the change in logic.
if self._port['device_id']:
raise n_exc.PortInUse(net_id=self._port['network_id'],
port_id=self._port['id'],
device_id=self._port['device_id'])
class SubPortsValidator(object):
def __init__(self, segmentation_types, subports, trunk_port_id=None):
self._segmentation_types = segmentation_types
self.subports = subports
self.trunk_port_id = trunk_port_id
def validate(self, context,
basic_validation=False, trunk_validation=True):
"""Validate that subports can be used in a trunk."""
# Perform basic validation on subports, in case subports
# are not automatically screened by the API layer.
if basic_validation:
msg = validators.validate_subports(self.subports)
if msg:
raise n_exc.InvalidInput(error_message=msg)
if trunk_validation:
trunk_port_mtu = self._get_port_mtu(context, self.trunk_port_id)
subport_mtus = self._prepare_subports(context)
return [self._validate(context, s, trunk_port_mtu, subport_mtus)
for s in self.subports]
else:
return self.subports
def _prepare_subports(self, context):
"""Utility method to parse subports in the request
The objective of this method is two-fold:
* Update subports segmentation details if INHERIT is requested;
* Return the MTU for each of the subport in the request.
This method does two things rather than one to allow us to hit the DB
once, and thus minimize the number of lookups required to learn about
the segmentation type and the MTU of the networks on which subports
are plugged.
"""
InheritIndex = (
collections.namedtuple("InheritIndex", "index has_inherit"))
port_ids = {}
any_has_inherit = False
for i, s in enumerate(self.subports):
has_inherit = (s.get('segmentation_type') ==
constants.SEGMENTATION_TYPE_INHERIT)
any_has_inherit |= has_inherit
port_ids[s['port_id']] = (
InheritIndex(index=i, has_inherit=has_inherit))
core_plugin = directory.get_plugin()
if (any_has_inherit and
not extensions.is_extension_supported(
core_plugin, provider.ALIAS)):
msg = (_("Cannot accept segmentation type %s") %
constants.SEGMENTATION_TYPE_INHERIT)
raise n_exc.InvalidInput(error_message=msg)
ports = core_plugin.get_ports(context, filters={'id': port_ids})
network_port_map = collections.defaultdict(list)
for p in ports:
network_port_map[p['network_id']].append({'port_id': p['id']})
networks = core_plugin.get_networks(
context.elevated(), filters={'id': network_port_map})
subport_mtus = {}
for net in networks:
for port in network_port_map[net['id']]:
if port_ids[port['port_id']].has_inherit:
port.update(
{'segmentation_id': net[provider.SEGMENTATION_ID],
'segmentation_type': net[provider.NETWORK_TYPE]})
self.subports[port_ids[port['port_id']].index] = port
# To speed up the request, record the network MTU for each
# subport to avoid hitting the DB more than necessary. Do
# that only if the extension is available.
if extensions.is_extension_supported(core_plugin, 'net-mtu'):
subport_mtus[port['port_id']] = net[api.MTU]
return subport_mtus
def _get_port_mtu(self, context, port_id):
"""Get port MTU
Return MTU for the network where the given port belongs to.
If the network or port cannot be obtained, or if MTU is not defined,
returns None.
"""
core_plugin = directory.get_plugin()
if not extensions.is_extension_supported(core_plugin, 'net-mtu'):
return
try:
port = core_plugin.get_port(context, port_id)
return core_plugin.get_network(
context, port['network_id'])[api.MTU]
except (n_exc.PortNotFound, n_exc.NetworkNotFound):
# A concurrent request might have made the port or network
# disappear; though during DB insertion, the subport request
# will fail on integrity constraint, it is safer to return
# a None MTU here.
return
def _raise_subport_is_parent_port(self, context, subport):
if subport['port_id'] == self.trunk_port_id:
raise trunk_exc.ParentPortInUse(port_id=subport['port_id'])
def _raise_subport_invalid_mtu(self, context, subport, trunk_port_mtu,
subport_mtus):
# Check MTU sanity - subport MTU must not exceed trunk MTU.
# If for whatever reason trunk_port_mtu is not available,
# the MTU sanity check cannot be enforced.
if trunk_port_mtu:
# missing MTUs for subports is not an error condition: the
# subport UUID may be invalid or non existent.
subport_mtu = subport_mtus.get(subport['port_id'])
if subport_mtu and subport_mtu > trunk_port_mtu:
raise trunk_exc.SubPortMtuGreaterThanTrunkPortMtu(
port_id=subport['port_id'],
port_mtu=subport_mtu,
trunk_id=self.trunk_port_id,
trunk_mtu=trunk_port_mtu
)
def _raise_if_segmentation_details_missing(self, subport):
try:
segmentation_type = subport["segmentation_type"]
segmentation_id = (
converters.convert_to_int(subport["segmentation_id"]))
return (segmentation_type, segmentation_id)
except KeyError:
msg = _("Invalid subport details '%s': missing segmentation "
"information. Must specify both segmentation_id and "
"segmentation_type") % subport
raise n_exc.InvalidInput(error_message=msg)
except n_exc.InvalidInput:
msg = _("Invalid subport details: segmentation_id '%s' is "
"not an integer") % subport["segmentation_id"]
raise n_exc.InvalidInput(error_message=msg)
def _raise_if_segmentation_details_invalid(self,
segmentation_type,
segmentation_id):
if segmentation_type not in self._segmentation_types:
msg = _("Unknown segmentation_type '%s'") % segmentation_type
raise n_exc.InvalidInput(error_message=msg)
if not self._segmentation_types[segmentation_type](segmentation_id):
msg = _("Segmentation ID '%s' is not in range") % segmentation_id
raise n_exc.InvalidInput(error_message=msg)
def _raise_if_subport_is_used_in_other_trunk(self, context, subport):
trunk_validator = TrunkPortValidator(subport['port_id'])
trunk_validator.validate(context, parent_port=False)
def _validate(self, context, subport, trunk_port_mtu, subport_mtus):
self._raise_subport_is_parent_port(context, subport)
self._raise_subport_invalid_mtu(
context, subport, trunk_port_mtu, subport_mtus)
segmentation_type, segmentation_id = (
self._raise_if_segmentation_details_missing(subport))
self._raise_if_segmentation_details_invalid(
segmentation_type, segmentation_id)
self._raise_if_subport_is_used_in_other_trunk(context, subport)
return subport
| [
[
[
605,
616
],
[
8187,
8198
],
[
9073,
9084
]
],
[
[
646,
656
],
[
12238,
12248
]
],
[
[
697,
709
],
[
4722,
4734
],
[
5056,
5068
],
[
5117,
5129
]
],
[
[
750,
774
],
[
8781,
8789
],
[
9561,
9569
],
[
9638,
9646
]
],
[
[
803,
813
],
[
8713,
8723
],
[
9964,
9974
],
[
10429,
10439
]
],
[
[
842,
852
],
[
7127,
7137
]
],
[
[
877,
896
],
[
6379,
6384
],
[
7213,
7218
],
[
8934,
8939
],
[
10693,
10698
],
[
10713,
10718
],
[
12591,
12596
],
[
12644,
12649
],
[
12821,
12826
],
[
13198,
13203
],
[
13410,
13415
]
],
[
[
929,
938
],
[
4599,
4608
],
[
4992,
5001
],
[
6022,
6031
],
[
8638,
8647
],
[
10390,
10399
]
],
[
[
975,
978
],
[
10079,
10082
],
[
10668,
10671
]
],
[
[
1018,
1027
],
[
8432,
8441
],
[
8879,
8888
]
],
[
[
1055,
1056
],
[
8818,
8819
],
[
12392,
12393
],
[
12682,
12683
],
[
13124,
13125
],
[
13332,
13333
]
],
[
[
1085,
1107
],
[
1971,
1984
],
[
2200,
2213
],
[
3055,
3068
],
[
3303,
3316
]
],
[
[
1143,
1166
],
[
1439,
1448
],
[
2064,
2073
],
[
2289,
2298
],
[
3172,
3181
],
[
3403,
3412
],
[
3880,
3889
],
[
5727,
5736
],
[
11137,
11146
],
[
11805,
11814
]
],
[
[
1202,
1207
],
[
5428,
5433
],
[
5581,
5586
]
],
[
[
1304,
1324
]
],
[
[
1488,
1515
]
],
[
[
2417,
2435
],
[
13549,
13567
]
],
[
[
6565,
6582
]
]
] |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from gaiatest import GaiaTestCase
from gaiatest.apps.settings.app import Settings
class TestChangeLanguage(GaiaTestCase):
def test_change_language_settings(self):
lang_name = self.marionette.execute_script("""
var qps = window.wrappedJSObject.navigator.mozL10n.qps;
return qps['qps-ploc'].name;
""")
header = self.marionette.execute_script("""
var qps = window.wrappedJSObject.navigator.mozL10n.qps;
return qps['qps-ploc'].translate('Settings');
""")
self.data_layer.set_setting('devtools.qps.enabled', True)
settings = Settings(self.marionette)
settings.launch()
language_settings = settings.open_language_settings()
language_settings.select_language(lang_name)
self.wait_for_condition(lambda m: language_settings.current_language == 'qps-ploc')
language_settings.go_back()
# Verify that language has changed
self.wait_for_condition(lambda m: settings.header_text == header)
self.assertEqual(self.data_layer.get_setting('language.current'), "qps-ploc")
| [
[
[
221,
233
],
[
309,
321
]
],
[
[
273,
281
],
[
824,
832
]
],
[
[
290,
308
]
]
] |
# esle stmt
# using else block after for loop
s = 0
for i in range(1, 6):
s += i
else:
print("end of for loop!")
print("sum =",s)
# using else blokc after while loop
r = n = 1
while n <= 5:
r *= n
n += 1
else:
print("end of while loop!")
print("5! = " + str(r))
if r==3:
pass
| [
[
[
48,
49
],
[
82,
83
]
],
[
[
59,
60
],
[
87,
88
]
],
[
[
189,
190
],
[
303,
304
],
[
219,
220
],
[
313,
314
]
],
[
[
193,
194
],
[
206,
207
],
[
224,
225
],
[
231,
232
]
]
] |
# Generated by Django 3.0.3 on 2020-04-22 13:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0018_auto_20200422_1314'),
]
operations = [
migrations.AlterField(
model_name='user_movie',
name='insert_date',
field=models.DateTimeField(default='2020-04-22T13:20:19.335148', editable=False),
),
]
| [
[
[
71,
81
],
[
108,
118
],
[
232,
242
]
],
[
[
83,
89
],
[
342,
348
]
],
[
[
98,
107
]
]
] |
import os
import re
import subprocess
import logging
"""
Uses command line pdfinfo utility (from poppler pakage) for various
small operations (e.g. get pdf page count).
"""
logger = logging.getLogger(__name__)
def get_pagecount(filepath):
"""
Returns the number of pages in a PDF document as integer.
filepath - is filesystem path to a PDF document
"""
if not os.path.isfile(filepath):
raise ValueError("Filepath %s is not a file" % filepath)
if os.path.isdir(filepath):
raise ValueError("Filepath %s is a directory!" % filepath)
base, ext = os.path.splitext(filepath)
# pure images (png, jpeg) have only one page :)
if ext and ext.lower() in ('.jpeg', '.png', '.jpg'):
# whatever png/jpg image is there - it is
# considered by default one page document.
return 1
if ext and ext.lower() not in ('.pdf',):
raise ValueError(
"Only jpeg, png and pdf are handlerd by this"
" method"
)
# pdfinfo "${PDFFILE}" | grep Pages
cmd = ["/usr/bin/pdfinfo", filepath]
compl = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
if compl.returncode:
logger.error(
"get_pagecount: cmd=%s args=%s stdout=%s stderr=%s code=%s",
cmd,
compl.args,
compl.stdout,
compl.stderr,
compl.returncode,
stack_info=True
)
raise Exception("Error occured while getting document page count.")
lines = compl.stdout.decode('utf-8').split('\n')
# look up for the line containing "Pages: 11"
for line in lines:
x = re.match("Pages:\W+(\d+)$", line.strip())
if x:
return int(x.group(1))
return 0
| [
[
[
7,
9
],
[
385,
387
],
[
484,
486
],
[
593,
595
]
],
[
[
17,
19
],
[
1702,
1704
]
],
[
[
27,
37
],
[
1105,
1115
],
[
1149,
1159
],
[
1181,
1191
]
],
[
[
45,
52
],
[
184,
191
]
],
[
[
175,
181
],
[
1238,
1244
]
],
[
[
218,
231
]
]
] |
from .models import Restriction
from django.db.models.signals import post_save
from django.dispatch import receiver
@receiver(post_save, sender=Restriction)
def post_save_restriction(sender, **kwargs):
msg = "worked"
pass
| [
[
[
20,
31
],
[
146,
157
]
],
[
[
69,
78
],
[
128,
137
]
],
[
[
107,
115
],
[
119,
127
]
],
[
[
163,
184
]
]
] |
#!/usr/bin/python
from flask import Flask, request, flash, redirect, render_template, jsonify
from flaskext.mysql import MySQL
from flask_wtf import Form
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired
import twilio.twiml
import random
import requests
import json
import omdb
from googleplaces import GooglePlaces, types, lang
from microsofttranslator import Translator
from yahoo_finance import Share
from twilio.rest import TwilioRestClient
gp_api_key = 'AIzaSyAX_75N29J--rh3Qj9gXjMBVx9IuD_Um74'
google_places = GooglePlaces(gp_api_key)
bing_api_key = 'oeToVPEyRZIASRK2n2byOU1x0EMatLIpd8kCIvwXmMw'
# Credentials owner: avikantsainidbz@gmail.com
# Find these values at https://twilio.com/user/account
twilio_account_sid = "ACab3e465e67051257d227bf49a3c9a58e"
twilio_auth_token = "ca96731e12b0442bcf5b1c8f7dedc58d"
admin_phone = "+918095138333"
# admin_phone = "+918095718111"
# Returns a JSON formatted data with a HTTP status code
def dataFormatter(code, message, data):
resp = jsonify({
'code': code,
'message': message,
'data': data
})
resp.status_code = code
return resp
def get_verify_name(id, s, e):
verify_url = "http://api.tvmaze.com/shows/" + str(id) + "/episodebynumber?season=" + str(s) + "&number=" + str(e)
resp = requests.get(verify_url)
j = json.loads(resp.text)
name = j['name']
return name
test_mode = False
app = Flask(__name__)
app.config.from_object('config')
mysql = MySQL()
app.config['MYSQL_DATABASE_USER'] = 'b4dea37336a229'
app.config['MYSQL_DATABASE_PASSWORD'] = '423dbfab'
app.config['MYSQL_DATABASE_DB'] = 'heroku_d5dd20eac082bba'
app.config['MYSQL_DATABASE_HOST'] = 'us-cdbr-iron-east-03.cleardb.net'
mysql.init_app(app)
# Main route
class SMSForm(Form):
phone_number = StringField('phone_number', validators=[DataRequired()])
query_string = StringField('query_string', validators=[DataRequired()])
# password_field = PasswordField('password_field', validators=[DataRequired()])
@app.route("/", methods=['GET', 'POST'])
def home_page():
form = SMSForm()
if form.validate_on_submit():
query = str(form.query_string.data)
number = str(form.phone_number.data)
# password = str(form.password_field.data)
# if password == get_verify_name(2, 4, 2):
print("Sending sms to " + number + " with query \'" + query + "\'.")
# message = process_query(query)
message = ""
if query.lower().startswith('subscribe'):
print("Subscribing...")
words = query.split()
ph_no = words[1]
city = words[2]
state = ""
for w in words[3:]:
state = state + w
subscriptions(ph_no, city.lower(), state.lower())
message = "Successfully subscribed to emergency services. Thank you for using hello_friend."
else:
message = process_query(query)
send_sms_to_number(message, number)
flash("Sent SMS to " + number + ": \'" + message + "\'.")
# else:
# flash("Invalid secret code, admins are not pleased.")
return render_template('index.html', form=form, number=number, query=query, showdetails=False)
return render_template('index.html', form=form, showdetails=True)
class EmergencyForm(Form):
message_field = StringField('message_field', validators=[DataRequired()])
location_field = StringField('location_field', validators=[DataRequired()])
class EmergencyForm2(Form):
phone_field = StringField('phone_field')
city_field = StringField('city_field')
state_field = StringField('state_field')
@app.route("/emergency/", methods=['GET', 'POST'])
def emergency_page():
form = EmergencyForm()
if form.validate_on_submit():
message = str(form.message_field.data)
state = str(form.location_field.data)
print("Broadcasting SMSs to people in state " + str(state))
# Send SMS to people here...
conn = mysql.connect()
try:
cursor = conn.cursor()
cursor.execute("SELECT ph_no FROM subscribers WHERE state = %s", (state))
data = cursor.fetchall()
for value in data:
phone_no = value[0]
print("Sending Broadcast message to " + str(phone_no));
send_sms_to_number(message, str(phone_no))
cursor.close()
conn.close()
except:
cursor.close()
conn.close()
return render_template('emergency.html', form=form, showdetails=False)
form2 = EmergencyForm2()
if form2.validate_on_submit():
phone = str(form2.phone_field.data)
city = str(form2.city_field.data)
state = str(form2.state_field.data)
print("Adding subscription")
subscriptions(phone, city, state)
flash("Successfully subscribed to emergency services. Thank you for using hello_friend.")
return render_template('emergency.html', form=form, showdetails=True)
@app.route("/emergency_list/", methods=['GET'])
def emergency_list():
conn = mysql.connect()
try:
cursor = conn.cursor()
cursor.execute("SELECT * FROM subscribers")
values = cursor.fetchall()
data = []
for value in values:
d = [value[0], value[1], value[2]]
data.append(d)
return dataFormatter(200, "LEL", data)
except:
return dataFormatter(400, "LEL", [])
@app.route("/add_s", methods=['GET', 'POST'])
def add_subscription():
form2 = EmergencyForm2()
if form2.validate_on_submit():
phone = str(form2.phone_field.data)
city = str(form2.city_field.data)
state = str(form2.state_field.data)
print("Adding subscription")
subscriptions(phone, city, state)
flash("Successfully subscribed to emergency services. Thank you for using hello_friend.")
return render_template('add.html', form2=form2, showdetails=True)
# Test routes
def send_sms_to_number(message, number):
client = TwilioRestClient(twilio_account_sid, twilio_auth_token)
message = client.messages.create(to=number, from_="+13609001701", body=message)
def send_sms_to_admin(message):
send_sms_to_number(message, admin_phone)
# Test routing to specific phone number
@app.route("/test_phone/<phone>", methods=['POST'])
def test_method(phone):
try:
query = request.form.get('query')
msg = process_query(query)
send_sms_to_number(str(msg), phone)
return "Message \'\'\'" + str(msg) + "\'\'\' sent to " + str(phone) + ".\n"
except:
return "Failed to send message. :(\n"
# Main routes
noIntent = [
"I'm having trouble understanding you, could you rephrase your question?",
"I didn't catch that, could you rephrase your query?",
"Sorry, I didn't understand that. Try rephrasing your request."
]
examples = [
"Navigate from Lucknow to Kanpur",
"Will it rain in New York today",
"SOS Whitefield, Bangalore",
"Translate \'Have you gone crazy\'' to german",
"How do you say Madrid I'm finally here in spanish",
"imdb inception",
"stocks AAPL",
"atm near rajendra nagar hyderabad",
"Define Hitler",
"Show me sports news",
"Directions from Lucknow to Kanpur",
]
technicalIssues = [
"Looks like we are facing technical difficulties, please try again in sometime.",
"Looks like the server is taking to long to respond, please try again in sometime.",
"Looks like we have too many requests to handle at the moment, please try again in sometime.",
"Our monkeys are fixing some bugs in the server, please try again in sometime."
]
@app.route("/no_intent", methods=['POST'])
def no_intent():
message = random.choice(noIntent)
message += "\n\nDid you know you can try something like: \"" + random.choice(examples) + "\"\n\n- hello_friend."
return message
@app.route("/network_error", methods=['POST'])
def technical_issues():
message = random.choice(technicalIssues)
message += "\n\nDid you know you can try something like: \"" + random.choice(examples) + "\"\n\n- hello_friend."
return message
@app.route("/sos", methods=["POST"])
def sos(dict_response):
message = ""
# try:
query_text = dict_response["_text"].lower()
# remove sos prefix and clean location string
issos = False
if query_text.find("sos ") != -1:
query_text = query_text[4:]
issos = True
if query_text.find(" sos") != -1:
query_text = query_text[:-4]
issos = True
if query_text.find("help ") != -1:
query_text = query_text[5:]
if query_text.find(" help") != -1:
query_text = query_text[:-5]
query_result = google_places.nearby_search(location=query_text, keyword='hospital', radius=5000, types=[types.TYPE_HOSPITAL])
number_of_places = 0
message = "Nearby hospitals: \n"
for place in query_result.places:
if number_of_places < 3:
number_of_places += 1
message += place.name
place_info = place.get_details()
message += ", Ph: " + place.local_phone_number + "\n"
else:
break
if issos:
query_result = google_places.nearby_search(location=query_text, keyword='police', radius=5000, types=[types.TYPE_POLICE])
if len(query_result.places) > 0:
place = query_result.places[0]
place.get_details()
message += "\nNearest police station: " + place.name
message += ", Ph: " + place.local_phone_number + "\n"
# except:
# message = technical_issues()
return message
@app.route("/weather", methods=['POST'])
def weather(entities):
message = ""
try:
try:
location = entities['location'][0]['value'].lower()
except:
location = entities['local_search_query'][0]['value']
response = requests.get(url="http://api.openweathermap.org/data/2.5/weather?q=" + location + "&APPID=500d01a6ece6498b1cbf94ed23519119")
dict_response = json.loads(response.text)
temperature_in_celsius = round(dict_response['main']['temp'] - 273.15, 2)
humidity = dict_response['main']['humidity']
weather_description = dict_response['weather'][0]['description']
message = "The weather in " + location + ": " + weather_description + ". "
message += "Average: " + str(temperature_in_celsius) + " C, "
message += "Humidity: " + str(humidity) + "%"
try:
wind_speed = dict_response['wind']['speed']
message += ", Wind: " + str(wind_speed) + " km/h"
except:
message += "."
except:
message = technical_issues()
return message
@app.route("/navigate", methods=['POST'])
def navigate(entities):
try:
try:
destination = entities['to'][0]['value']
except:
destination = entities['search_query'][0]['value']
try:
origin = entities['from'][0]['value']
except:
try:
origin = entities['local_search_query'][0]['value']
except:
origin = entities['location'][0]['value']
print("Navigating from " + origin + " to " + destination + ".")
message = "Directions from " + origin + " to " + destination + ":\n\n"
key = "GSC5hkB0CEmUyk4nI2MY~HxNEzo1P1bHB1sX8EzDJpA~AmYeCHqvBerEI06DBSKWfo4pgB1w9Krgk7EH6lhGqqf3s5RaJArOzWJ-SL6AYVVw"
try:
try:
bingMapsResponse = requests.get(url="http://dev.virtualearth.net/REST/V1/Routes/Driving?wp.0=" + origin + "&wp.1=" + destination + "&avoid=minimizeTolls&key="+key)
bingMaps_dict = json.loads(bingMapsResponse.text)
except:
message = network_error()
return message
print(bingMaps_dict)
resources = bingMaps_dict.get('resourceSets')[0].get('resources')
routeLegs = resources[0].get('routeLegs')
distance = routeLegs[0].get('routeSubLegs')[0].get('travelDistance')
message += "Total Trip Distance: " + str(distance) + " km\n"
duration = routeLegs[0].get('routeSubLegs')[0].get('travelDuration')
message += "Total Trip Duration: " + str(duration/60) + " min \n"
itineraryItems = routeLegs[0].get('itineraryItems')
count = 1
for item in itineraryItems:
message += str(count) + ". " + item.get('instruction').get('text') + " ("
message += str(item.get('travelDistance')) + " km, "
message += str(item.get('travelDuration') / 60 ) + " min)"
message += "\n"
count +=1
except:
message = "We could not find a route from " + origin + " to " + destination + ". Please bear with us as we try to resolve this issue."
# Precaution
if (len(message) > 1536):
message = message[:1533] + "...";
except:
message = technical_issues()
return message
@app.route("/translate", methods=['POST'])
def translate(entities):
message = ""
try:
text_for_translation = entities['phrase_to_translate'][0]['value']
lang = entities['language'][0]['value'].lower()
language = ""
if lang == "spanish":
language = "es"
elif lang == "french":
language = "fr"
elif lang == "german":
language = "de"
elif lang == "chinese":
language = "zh-CHS"
else:
message = "We don't support translation to " + lang + " as of now. Check back later as support is being added."
return message
message = "\"" + text_for_translation + "\" in " + lang + " is \'"
translator = Translator('SMSAssistant', 'fhV+AdYFiK0QfQ4PFys+oQ/T0xiBBVQa32kxxbP55Ks=')
message += translator.translate(text_for_translation, language) + "\'"
if test_mode:
send_sms_to_admin(message)
except:
message = technical_issues()
return message
@app.route("/news", methods=['POST'])
def getNews(entities):
message = ""
try:
try:
newstopic = entities['news_topic'][0]['value'].lower()
# default topic
if newstopic is None:
newstopic = "world"
except:
newstopic = "world"
response = requests.get(url='https://api.datamarket.azure.com/Bing/Search/News?$format=json&Query=%27' + newstopic + "%27", \
auth=(bing_api_key, bing_api_key))
news_dict = json.loads(response.text)
news = news_dict.get('d').get('results')
message = ""
if len(news) >= 5:
message = "Here are the top 5 stories about " + newstopic + ":\n"
for x in range(0, 5):
message += str(x+1) + ". " + news[x].get('Title') + ".\n"
else:
message = "Here are the top news stories about " + newstopic + ":\n"
for item in news:
message += "- " + item.get('Title') + "\n"
if test_mode:
send_sms_to_admin(message)
except:
message = technical_issues()
return message
@app.route("/imdb", methods=['POST'])
def imdb(dict_response):
message = ""
try:
query_text = dict_response['_text'].lower()
if query_text.find("imdb ") != -1:
query_text = query_text[5:]
response = omdb.request(t='' + query_text + '', r='json')
data = json.loads(response.text)
mediatype = data["Type"]
year = data["Year"]
title = data["Title"]
if mediatype == "movie":
message += "Found a Movie, \"" + title + "\" (" + year + ")\n"
elif mediatype == "series":
message += "Found a TV show, \"" + title + "\" (" + year + ")\n"
for key in data:
if key in ["Rated", "Runtime", "Genre", "Director", "Writer"]:
if data[key] != "N/A":
message += key + ": " + data[key] + "\n"
if key == "imdbRating":
message += "IMDB: " + data[key] + "\n"
if data["Plot"] != "N/A":
message += "Plot: " + data["Plot"]
except:
message = technical_issues()
return message
@app.route("/stocks", methods=['POST'])
def stocks(dict_response):
message = ""
try:
query_text = dict_response['_text'].lower()
if query_text.find("stocks ") != -1:
query_text = query_text[7:]
y = Share(query_text)
message += "Trading information for " + y.get_name() + " (" + query_text + ") :\n"
message += "Opened: " + y.get_open() + "\n"
message += "Current: " + y.get_price() + "\n"
message += "Earnings share: " + y.get_earnings_share() + "\n"
message += "Short ratio: " + y.get_short_ratio() + "\n"
message += "Previous close: " + y.get_prev_close() + "\n"
except:
message = technical_issues()
return message
@app.route("/atm", methods=['POST'])
def atm(dict_response):
message = ""
try:
query_text = dict_response['_text'].lower()
if query_text.find("atm near ") != -1:
query_text = query_text[9:]
query_result = google_places.nearby_search(location=query_text, keyword='atm', radius=5000, types=[types.TYPE_ATM])
number_of_places = 0
message = "ATM's near \'" + query_text + "\':\n"
for place in query_result.places:
if number_of_places < 5:
number_of_places += 1
message = message + place.name
place_info = place.get_details()
if place.local_phone_number != None:
message = message + " " + place.local_phone_number
message = message + "\n"
else:
break
except:
message = technical_issues()
return message
@app.route("/define", methods=['POST'])
def define(dict_response):
message = ""
try:
query_text = dict_response['_text'].lower()
if query_text.find("define ") != -1:
topic = query_text[7:]
r = requests.get(url='http://api.duckduckgo.com/?q=' + topic + '&format=json&pretty=1')
message = ""
topic_response = json.loads(r.text)
all_definitions = topic_response['RelatedTopics']
if len(all_definitions) > 0:
top_definitions = all_definitions[0]
definition = top_definitions['Text']
message = "\"" + topic + "\": " + definition
else:
message = "Definition for " + topic + " was not found. We're working on this."
except:
message = technical_issues()
return message
def subscriptions(ph_no, city, state):
conn = mysql.connect()
try:
cursor = conn.cursor()
cursor.execute("INSERT INTO subscribers VALUES (%s, %s, %s)", (ph_no, city, state))
conn.commit()
cursor.close()
conn.close()
except:
cursor.close()
conn.close()
# Main SMS webhook
def process_query(query):
msg = ""
try:
response = requests.get(url='https://api.wit.ai/message?v=20161022&q='+query,headers={'Authorization': 'Bearer TUDKLORVVMITDT4FCJFMAARQAWB2NLJ2'})
except:
msg = technical_issues()
return msg
dict_response = json.loads(response.text)
print(dict_response);
intent = None
confidence = None
entities = None
msg = None
try:
if dict_response['entities']['intent']:
intent = dict_response['entities']['intent'][0]['value']
confidence = dict_response['entities']['intent'][0]['confidence']
entities = dict_response['entities']
print("Entities: ")
print(entities)
except:
msg = no_intent()
return msg
if intent is None or confidence < 0.2:
msg = no_intent()
elif intent == "weather":
msg = weather(entities)
elif intent == "navigate":
msg = navigate(entities)
elif intent == "sos":
msg = sos(dict_response)
elif intent == "translate":
msg = translate(entities)
elif intent == "news":
msg = getNews(entities)
elif intent == "imdb":
msg = imdb(dict_response)
elif intent == "atm":
msg = atm(dict_response)
elif intent == "stocks":
msg = stocks(dict_response)
elif intent == "define":
msg = define(dict_response)
else:
msg = "Feature not supported"
return msg
@app.route("/sms", methods=['POST'])
def sms():
query = request.values.get('Body', None)
resp = twilio.twiml.Response()
msg = ""
if query.lower().startswith('subscribe'):
print("Subscribing...")
words = query.split()
ph_no = words[1]
city = words[2]
state = ""
for w in words[3:]:
state = state + w
subscriptions(ph_no, city.lower(), state.lower())
msg = "Successfully subscribed to emergency services. Thank you for using hello_friend."
else:
msg = process_query(query)
if test_mode:
send_sms_to_admin(query)
resp.message(msg)
return str(resp)
# ------
# Update the json file.
def saveFile():
with open('data/voice.json', 'w') as outfile:
json.dump(data, outfile)
# Open the given json file in data.
try:
with open('data/voice.json') as data_file:
data = json.load(data_file)
except:
data = []
saveFile();
class VoiceForm(Form):
phone_number = StringField('phone_number', validators=[DataRequired()])
title_field = StringField('title_field', validators=[DataRequired()])
password_field = PasswordField('password_field', validators=[DataRequired()])
@app.route("/voice/", methods=['GET', 'POST'])
def voice_page():
form = VoiceForm()
if form.validate_on_submit():
title = str(form.title_field.data)
number = str(form.phone_number.data)
password = str(form.password_field.data)
if password == get_verify_name(2, 4, 2):
client = TwilioRestClient(twilio_account_sid, twilio_auth_token)
routex = "http://hello-frrriend.herokuapp.com/voice/" + str(title)
call = client.calls.create(url=routex, to=number, from_="+13609001701")
flash("Rung " + number + ".")
else:
flash("Invalid secret code, admins are not pleased.")
return render_template('voice.html', form=form, number=number, title=title, showdetails=False, data=data)
return render_template('voice.html', form=form, showdetails=True, data=data)
@app.route('/voice/list', methods=['GET'])
def voice_list():
return dataFormatter(200, "Listing data", data)
@app.route('/voice/add', methods=['POST'])
def voice_add():
d = {}
title = request.values.get('title')
if title is None:
return dataFormatter(404, "Bad request, need title.", [])
d['title'] = title
message = request.values.get('message')
if message is not None:
d['message'] = message
url = request.values.get('url')
if url is not None:
d['url'] = url
p = None
for x in data:
if x['title'] == title:
p = x
if p is not None:
data.remove(p)
data.append(d)
saveFile()
return dataFormatter(201, "Updated", data)
data.append(d)
saveFile()
return dataFormatter(201, "Appended", data)
def voice_add_util(title, message, url):
d = {}
d['title'] = title
if len(message) > 0:
d['message'] = message
if len(url) > 0:
d['url'] = url
p = None
for x in data:
if x['title'] == title:
p = x
if p is not None:
data.remove(p)
data.append(d)
saveFile()
return;
data.append(d)
saveFile()
@app.route('/voice/<title>', methods=['POST', 'GET'])
def voice_title(title):
d = None
for x in data:
if x['title'] == title:
d = x
break
resp = twilio.twiml.Response()
print(d)
if d is None:
resp.say("Don't talk please")
else:
try:
message = d['message']
resp.say(d['message'], voice='Alice', language='en-IN')
except:
print("No message ofr the ")
try:
url = d['url']
resp.play(d['url'])
except:
print("No url in the query")
return str(resp)
if __name__ == "__main__":
app.run(debug=True)
| [
[
[
37,
42
],
[
1460,
1465
]
],
[
[
44,
51
],
[
6471,
6478
],
[
20789,
20796
],
[
23016,
23023
],
[
23169,
23176
],
[
23268,
23275
]
],
[
[
53,
58
],
[
3039,
3044
],
[
4915,
4920
],
[
5881,
5886
],
[
22513,
22518
],
[
22569,
22574
]
],
[
[
60,
68
]
],
[
[
70,
85
],
[
3196,
3211
],
[
3295,
3310
],
[
4570,
4585
],
[
5016,
5031
],
[
5982,
5997
],
[
22638,
22653
],
[
22748,
22763
]
],
[
[
87,
94
],
[
1033,
1040
]
],
[
[
122,
127
],
[
1518,
1523
]
],
[
[
150,
154
],
[
1810,
1814
],
[
3375,
3379
],
[
3562,
3566
],
[
21713,
21717
]
],
[
[
175,
186
],
[
1836,
1847
],
[
1912,
1923
],
[
3402,
3413
],
[
3481,
3492
],
[
3587,
3598
],
[
3631,
3642
],
[
3675,
3686
],
[
21739,
21750
],
[
21814,
21825
]
],
[
[
188,
201
],
[
21891,
21904
]
],
[
[
233,
245
],
[
1876,
1888
],
[
1952,
1964
],
[
3443,
3455
],
[
3523,
3535
],
[
21779,
21791
],
[
21853,
21865
],
[
21935,
21947
]
],
[
[
253,
265
],
[
20833,
20839
],
[
24229,
24235
]
],
[
[
273,
279
],
[
7821,
7827
],
[
7912,
7918
],
[
8067,
8073
],
[
8165,
8171
]
],
[
[
287,
295
],
[
1342,
1350
],
[
9994,
10002
],
[
11635,
11643
],
[
14542,
14550
],
[
18330,
18338
],
[
19313,
19321
]
],
[
[
303,
307
],
[
21637,
21641
],
[
1375,
1379
],
[
10143,
10147
],
[
11812,
11816
],
[
14722,
14726
],
[
15662,
15666
],
[
18462,
18466
],
[
19533,
19537
],
[
21508,
21512
]
],
[
[
315,
319
],
[
15600,
15604
]
],
[
[
345,
357
],
[
559,
571
]
],
[
[
359,
364
],
[
8892,
8897
],
[
9384,
9389
],
[
17501,
17506
]
],
[
[
366,
370
]
],
[
[
403,
413
],
[
13922,
13932
]
],
[
[
440,
445
],
[
16684,
16689
]
],
[
[
470,
486
],
[
6111,
6127
],
[
22282,
22298
]
],
[
[
488,
498
],
[
572,
582
]
],
[
[
543,
556
],
[
8803,
8816
],
[
9297,
9310
],
[
17417,
17430
]
],
[
[
585,
597
],
[
14672,
14684
],
[
14686,
14698
]
],
[
[
749,
767
],
[
6128,
6146
],
[
22299,
22317
]
],
[
[
807,
824
],
[
6148,
6165
],
[
22319,
22336
]
],
[
[
863,
874
],
[
6316,
6327
]
],
[
[
986,
999
],
[
5440,
5453
],
[
5499,
5512
],
[
22891,
22904
],
[
23081,
23094
],
[
23525,
23538
],
[
23606,
23619
]
],
[
[
1186,
1201
],
[
22235,
22250
]
],
[
[
1435,
1444
],
[
14088,
14097
],
[
15236,
15245
],
[
21311,
21320
]
],
[
[
1454,
1457
],
[
1476,
1479
],
[
1527,
1530
],
[
1580,
1583
],
[
1631,
1634
],
[
1690,
1693
],
[
1776,
1779
],
[
2055,
2058
],
[
3704,
3707
],
[
5081,
5084
],
[
5531,
5534
],
[
6371,
6374
],
[
7748,
7751
],
[
7983,
7986
],
[
8236,
8239
],
[
9727,
9730
],
[
10832,
10835
],
[
13172,
13175
],
[
14210,
14213
],
[
15357,
15360
],
[
16443,
16446
],
[
17169,
17172
],
[
18094,
18097
],
[
20730,
20733
],
[
21954,
21957
],
[
22820,
22823
],
[
22934,
22937
],
[
24041,
24044
],
[
24687,
24690
]
],
[
[
1510,
1515
],
[
1761,
1766
],
[
4050,
4055
],
[
5161,
5166
],
[
18955,
18960
]
],
[
[
1802,
1809
],
[
2123,
2130
]
],
[
[
2099,
2108
]
],
[
[
3361,
3374
],
[
3787,
3800
]
],
[
[
3547,
3561
],
[
4646,
4660
],
[
5612,
5626
]
],
[
[
3758,
3772
]
],
[
[
5132,
5146
]
],
[
[
5580,
5596
]
],
[
[
6061,
6079
],
[
2995,
3013
],
[
4392,
4410
],
[
6288,
6306
],
[
6540,
6558
]
],
[
[
6256,
6273
],
[
14111,
14128
],
[
15259,
15276
],
[
21330,
21347
]
],
[
[
6426,
6437
]
],
[
[
6738,
6746
],
[
7835,
7843
]
],
[
[
6960,
6968
],
[
7926,
7934
],
[
8179,
8187
]
],
[
[
7366,
7381
],
[
8081,
8096
]
],
[
[
7794,
7803
],
[
20002,
20011
],
[
20092,
20101
]
],
[
[
8033,
8049
],
[
10791,
10807
],
[
13131,
13147
],
[
14169,
14185
],
[
15316,
15332
],
[
16403,
16419
],
[
17129,
17145
],
[
18054,
18070
],
[
18866,
18882
],
[
19475,
19491
]
],
[
[
8276,
8279
],
[
20270,
20273
]
],
[
[
9771,
9778
],
[
20148,
20155
]
],
[
[
10877,
10885
],
[
20211,
20219
]
],
[
[
13218,
13227
],
[
20335,
20344
]
],
[
[
14251,
14258
],
[
20396,
20403
]
],
[
[
15398,
15402
],
[
20455,
20459
]
],
[
[
16486,
16492
],
[
20577,
20583
]
],
[
[
17209,
17212
],
[
20515,
20518
]
],
[
[
18137,
18143
],
[
20642,
20648
]
],
[
[
18909,
18922
],
[
2775,
2788
],
[
4873,
4886
],
[
5839,
5852
],
[
21112,
21125
]
],
[
[
19250,
19263
],
[
2966,
2979
],
[
6511,
6524
],
[
21283,
21296
]
],
[
[
20770,
20773
]
],
[
[
21438,
21446
],
[
21684,
21692
],
[
23499,
23507
],
[
23584,
23592
],
[
23977,
23985
],
[
24027,
24035
]
],
[
[
21611,
21620
],
[
21647,
21656
]
],
[
[
21630,
21634
],
[
21518,
21522
],
[
22731,
22735
],
[
22812,
22816
],
[
22926,
22930
],
[
23367,
23371
],
[
23453,
23457
],
[
23476,
23480
],
[
23555,
23559
],
[
23565,
23569
],
[
23637,
23641
],
[
23845,
23849
],
[
23931,
23935
],
[
23954,
23958
],
[
24008,
24012
],
[
24144,
24148
]
],
[
[
21670,
21674
],
[
21518,
21522
],
[
22731,
22735
],
[
22812,
22816
],
[
22926,
22930
],
[
23367,
23371
],
[
23453,
23457
],
[
23476,
23480
],
[
23555,
23559
],
[
23565,
23569
],
[
23637,
23641
],
[
23845,
23849
],
[
23931,
23935
],
[
23954,
23958
],
[
24008,
24012
],
[
24144,
24148
]
],
[
[
21703,
21712
],
[
22029,
22038
]
],
[
[
22004,
22014
]
],
[
[
22866,
22876
]
],
[
[
22980,
22989
]
],
[
[
23648,
23662
]
],
[
[
24098,
24109
]
]
] |
AMOUNTS = [
99999999999999999999999999999,
0x0,
0x1,
0x1000000000000000000000000,
0x30000000000000,
1000000000000000000,
0x180000000000000,
100000000000000000,
10000000000000000,
1000000000000000,
0x2,
5000000000000000,
0x20,
0x700000000000000,
0x8,
0x3c00000000000,
0xe00000000000000,
0x400000000000000000000000,
50000000000000000,
500000000000000000,
0x18000000000000,
0x3,
0x80,
0x300000000000000,
0x1000000000000000000000001,
5000000000000000000,
0x1c00000000000000,
0x4,
10000000000000000000,
0xc000000000000,
0x2000,
20000000000000000,
0x40,
200000000000000000,
2000000000000000,
0x800000000000000000000,
0x800000000000000000000000,
0x1000000000000000000000002,
0x400,
0x80000000000000,
0x100000000000000,
0xc00000000000,
0x1800000000000000000,
0x800000000000000000,
0x70000000000000,
250000000000000,
0x380000000000000,
0x8000000000000000000,
0x8000000000000000,
0x1000,
] | [
[
[
0,
7
]
]
] |
"""
Collection of functions to assist PyDoof modules.
"""
from collections import Iterable
from datetime import date
from enum import Enum
def parse_query_params(params):
"""
Parses a query-parameters dictionary into their proper parameters schema.
Each key value of the dictionary represents a parameter and its value. The
function parses each key-value based on the value type.
* Parses dates into a string following the "YYYYMMDD" format.
* Parses dictionaries like `parameter: {key: value}` into parameter
`parameter[key]: value`.
* Parses lists like `parameter: [val0, val1]` into parameter
`parameter[]: [val0, val1]`.
* Excludes parameters where its value is `None`.
"""
query_params = {}
for param, value in params.items():
query_params.update(
_parse_param(param, value)
)
return query_params
def _parse_param(param, value):
query_params = {}
if isinstance(value, date):
query_params[param] = value.strftime("%Y%m%d")
elif isinstance(value, dict):
for k, v in value.items():
query_params.update(
_parse_param(f'{param}[{k}]', v)
)
elif isinstance(value, Enum):
query_params[param] = value.value
elif not isinstance(value, str) and isinstance(value, Iterable):
query_params.update(
_dicts_appends(_parse_param(f'{param}[]', v) for v in value)
)
elif value is not None:
query_params[param] = value
return query_params
def _dicts_appends(dicts):
dict_join = {}
for dict_ in dicts:
for key, value in dict_.items():
if key in dict_join:
try:
dict_join[key].append(value)
except AttributeError:
dict_join[key] = [dict_join[key], value]
else:
dict_join[key] = value
return dict_join
| [
[
[
82,
90
],
[
1337,
1345
]
],
[
[
112,
116
],
[
976,
980
]
],
[
[
134,
138
],
[
1230,
1234
]
],
[
[
145,
163
]
],
[
[
900,
912
],
[
833,
845
],
[
1156,
1168
],
[
1404,
1416
]
],
[
[
1555,
1569
],
[
1389,
1403
]
]
] |
# coding=utf-8
# flake8: noqa E302
"""
Test plugin infrastructure and hooks.
"""
import sys
import pytest
# Python 3.5 had some regressions in the unitest.mock module, so use 3rd party mock if available
try:
import mock
except ImportError:
from unittest import mock
import cmd2
from cmd2 import plugin
class Plugin:
"""A mixin class for testing hook registration and calling"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.reset_counters()
def reset_counters(self):
self.called_preparse = 0
self.called_postparsing = 0
self.called_precmd = 0
self.called_postcmd = 0
self.called_cmdfinalization = 0
###
#
# preloop and postloop hooks
# which share the same signature and are thus interchangable
#
###
def prepost_hook_one(self) -> None:
"""Method used for preloop or postloop hooks"""
self.poutput("one")
def prepost_hook_two(self) -> None:
"""Another method used for preloop or postloop hooks"""
self.poutput("two")
def prepost_hook_too_many_parameters(self, param) -> None:
"""A preloop or postloop hook with too many parameters"""
pass
def prepost_hook_with_wrong_return_annotation(self) -> bool:
"""A preloop or postloop hook with incorrect return type"""
pass
###
#
# preparse hook
#
###
def preparse(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""Preparsing hook"""
self.called_preparse += 1
return data
###
#
# Postparsing hooks
#
###
def postparse_hook(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""A postparsing hook"""
self.called_postparsing += 1
return data
def postparse_hook_stop(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""A postparsing hook with requests application exit"""
self.called_postparsing += 1
data.stop = True
return data
def postparse_hook_emptystatement(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""A postparsing hook with raises an EmptyStatement exception"""
self.called_postparsing += 1
raise cmd2.EmptyStatement
def postparse_hook_exception(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""A postparsing hook which raises an exception"""
self.called_postparsing += 1
raise ValueError
def postparse_hook_too_many_parameters(self, data1, data2) -> cmd2.plugin.PostparsingData:
"""A postparsing hook with too many parameters"""
pass
def postparse_hook_undeclared_parameter_annotation(self, data) -> cmd2.plugin.PostparsingData:
"""A postparsing hook with an undeclared parameter type"""
pass
def postparse_hook_wrong_parameter_annotation(self, data: str) -> cmd2.plugin.PostparsingData:
"""A postparsing hook with the wrong parameter type"""
pass
def postparse_hook_undeclared_return_annotation(self, data: cmd2.plugin.PostparsingData):
"""A postparsing hook with an undeclared return type"""
pass
def postparse_hook_wrong_return_annotation(self, data: cmd2.plugin.PostparsingData) -> str:
"""A postparsing hook with the wrong return type"""
pass
###
#
# precommand hooks, some valid, some invalid
#
###
def precmd(self, statement: cmd2.Statement) -> cmd2.Statement:
"""Override cmd.Cmd method"""
self.called_precmd += 1
return statement
def precmd_hook(self, data: plugin.PrecommandData) -> plugin.PrecommandData:
"""A precommand hook"""
self.called_precmd += 1
return data
def precmd_hook_emptystatement(self, data: plugin.PrecommandData) -> plugin.PrecommandData:
"""A precommand hook which raises an EmptyStatement exception"""
self.called_precmd += 1
raise cmd2.EmptyStatement
def precmd_hook_exception(self, data: plugin.PrecommandData) -> plugin.PrecommandData:
"""A precommand hook which raises an exception"""
self.called_precmd += 1
raise ValueError
def precmd_hook_not_enough_parameters(self) -> plugin.PrecommandData:
"""A precommand hook with no parameters"""
pass
def precmd_hook_too_many_parameters(self, one: plugin.PrecommandData, two: str) -> plugin.PrecommandData:
"""A precommand hook with too many parameters"""
return one
def precmd_hook_no_parameter_annotation(self, data) -> plugin.PrecommandData:
"""A precommand hook with no type annotation on the parameter"""
return data
def precmd_hook_wrong_parameter_annotation(self, data: str) -> plugin.PrecommandData:
"""A precommand hook with the incorrect type annotation on the parameter"""
return data
def precmd_hook_no_return_annotation(self, data: plugin.PrecommandData):
"""A precommand hook with no type annotation on the return value"""
return data
def precmd_hook_wrong_return_annotation(self, data: plugin.PrecommandData) -> cmd2.Statement:
return self.statement_parser.parse('hi there')
###
#
# postcommand hooks, some valid, some invalid
#
###
def postcmd(self, stop: bool, statement: cmd2.Statement) -> bool:
"""Override cmd.Cmd method"""
self.called_postcmd += 1
return stop
def postcmd_hook(self, data: plugin.PostcommandData) -> plugin.PostcommandData:
"""A postcommand hook"""
self.called_postcmd += 1
return data
def postcmd_hook_exception(self, data: plugin.PostcommandData) -> plugin.PostcommandData:
"""A postcommand hook with raises an exception"""
self.called_postcmd += 1
raise ZeroDivisionError
def postcmd_hook_not_enough_parameters(self) -> plugin.PostcommandData:
"""A precommand hook with no parameters"""
pass
def postcmd_hook_too_many_parameters(self, one: plugin.PostcommandData, two: str) -> plugin.PostcommandData:
"""A precommand hook with too many parameters"""
return one
def postcmd_hook_no_parameter_annotation(self, data) -> plugin.PostcommandData:
"""A precommand hook with no type annotation on the parameter"""
return data
def postcmd_hook_wrong_parameter_annotation(self, data: str) -> plugin.PostcommandData:
"""A precommand hook with the incorrect type annotation on the parameter"""
return data
def postcmd_hook_no_return_annotation(self, data: plugin.PostcommandData):
"""A precommand hook with no type annotation on the return value"""
return data
def postcmd_hook_wrong_return_annotation(self, data: plugin.PostcommandData) -> cmd2.Statement:
return self.statement_parser.parse('hi there')
###
#
# command finalization hooks, some valid, some invalid
#
###
def cmdfinalization_hook(self, data: plugin.CommandFinalizationData) -> plugin.CommandFinalizationData:
"""A command finalization hook."""
self.called_cmdfinalization += 1
return data
def cmdfinalization_hook_stop(self, data: cmd2.plugin.CommandFinalizationData) -> cmd2.plugin.CommandFinalizationData:
"""A command finalization hook which requests application exit"""
self.called_cmdfinalization += 1
data.stop = True
return data
def cmdfinalization_hook_exception(self, data: cmd2.plugin.CommandFinalizationData) -> cmd2.plugin.CommandFinalizationData:
"""A command finalization hook which raises an exception"""
self.called_cmdfinalization += 1
raise ValueError
def cmdfinalization_hook_not_enough_parameters(self) -> plugin.CommandFinalizationData:
"""A command finalization hook with no parameters."""
pass
def cmdfinalization_hook_too_many_parameters(self, one: plugin.CommandFinalizationData, two: str) -> plugin.CommandFinalizationData:
"""A command finalization hook with too many parameters."""
return one
def cmdfinalization_hook_no_parameter_annotation(self, data) -> plugin.CommandFinalizationData:
"""A command finalization hook with no type annotation on the parameter."""
return data
def cmdfinalization_hook_wrong_parameter_annotation(self, data: str) -> plugin.CommandFinalizationData:
"""A command finalization hook with the incorrect type annotation on the parameter."""
return data
def cmdfinalization_hook_no_return_annotation(self, data: plugin.CommandFinalizationData):
"""A command finalizationhook with no type annotation on the return value."""
return data
def cmdfinalization_hook_wrong_return_annotation(self, data: plugin.CommandFinalizationData) -> cmd2.Statement:
"""A command finalization hook with the wrong return type annotation."""
return self.statement_parser.parse('hi there')
class PluggedApp(Plugin, cmd2.Cmd):
"""A sample app with a plugin mixed in"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def do_say(self, statement):
"""Repeat back the arguments"""
self.poutput(statement)
###
#
# test pre and postloop hooks
#
###
def test_register_preloop_hook_too_many_parameters():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_preloop_hook(app.prepost_hook_too_many_parameters)
def test_register_preloop_hook_with_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_preloop_hook(app.prepost_hook_with_wrong_return_annotation)
def test_preloop_hook(capsys):
# Need to patch sys.argv so cmd2 doesn't think it was called with arguments equal to the py.test args
testargs = ["prog", "say hello", 'quit']
with mock.patch.object(sys, 'argv', testargs):
app = PluggedApp()
app.register_preloop_hook(app.prepost_hook_one)
app.cmdloop()
out, err = capsys.readouterr()
assert out == 'one\nhello\n'
assert not err
def test_preloop_hooks(capsys):
# Need to patch sys.argv so cmd2 doesn't think it was called with arguments equal to the py.test args
testargs = ["prog", "say hello", 'quit']
with mock.patch.object(sys, 'argv', testargs):
app = PluggedApp()
app.register_preloop_hook(app.prepost_hook_one)
app.register_preloop_hook(app.prepost_hook_two)
app.cmdloop()
out, err = capsys.readouterr()
assert out == 'one\ntwo\nhello\n'
assert not err
def test_register_postloop_hook_too_many_parameters():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postloop_hook(app.prepost_hook_too_many_parameters)
def test_register_postloop_hook_with_wrong_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postloop_hook(app.prepost_hook_with_wrong_return_annotation)
def test_postloop_hook(capsys):
# Need to patch sys.argv so cmd2 doesn't think it was called with arguments equal to the py.test args
testargs = ["prog", "say hello", 'quit']
with mock.patch.object(sys, 'argv', testargs):
app = PluggedApp()
app.register_postloop_hook(app.prepost_hook_one)
app.cmdloop()
out, err = capsys.readouterr()
assert out == 'hello\none\n'
assert not err
def test_postloop_hooks(capsys):
# Need to patch sys.argv so cmd2 doesn't think it was called with arguments equal to the py.test args
testargs = ["prog", "say hello", 'quit']
with mock.patch.object(sys, 'argv', testargs):
app = PluggedApp()
app.register_postloop_hook(app.prepost_hook_one)
app.register_postloop_hook(app.prepost_hook_two)
app.cmdloop()
out, err = capsys.readouterr()
assert out == 'hello\none\ntwo\n'
assert not err
###
#
# test preparse hook
#
###
def test_preparse(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.preparse)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_preparse == 1
###
#
# test postparsing hooks
#
###
def test_postparsing_hook_too_many_parameters():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postparsing_hook(app.postparse_hook_too_many_parameters)
def test_postparsing_hook_undeclared_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postparsing_hook(app.postparse_hook_undeclared_parameter_annotation)
def test_postparsing_hook_wrong_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postparsing_hook(app.postparse_hook_wrong_parameter_annotation)
def test_postparsing_hook_undeclared_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postparsing_hook(app.postparse_hook_undeclared_return_annotation)
def test_postparsing_hook_wrong_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postparsing_hook(app.postparse_hook_wrong_return_annotation)
def test_postparsing_hook(capsys):
app = PluggedApp()
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert not app.called_postparsing
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_postparsing == 1
# register the function again, so it should be called twice
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_postparsing == 2
def test_postparsing_hook_stop_first(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.postparse_hook_stop)
stop = app.onecmd_plus_hooks('say hello')
assert app.called_postparsing == 1
assert stop
# register another function but it shouldn't be called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
assert app.called_postparsing == 1
assert stop
def test_postparsing_hook_stop_second(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
assert app.called_postparsing == 1
assert not stop
# register another function and make sure it gets called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook_stop)
stop = app.onecmd_plus_hooks('say hello')
assert app.called_postparsing == 2
assert stop
# register a third function which shouldn't be called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
assert app.called_postparsing == 2
assert stop
def test_postparsing_hook_emptystatement_first(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.postparse_hook_emptystatement)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
assert app.called_postparsing == 1
# register another function but it shouldn't be called
app.reset_counters()
stop = app.register_postparsing_hook(app.postparse_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
assert app.called_postparsing == 1
def test_postparsing_hook_emptystatement_second(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert not err
assert app.called_postparsing == 1
# register another function and make sure it gets called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook_emptystatement)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
assert app.called_postparsing == 2
# register a third function which shouldn't be called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
assert app.called_postparsing == 2
def test_postparsing_hook_exception(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.postparse_hook_exception)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert err
assert app.called_postparsing == 1
# register another function, but it shouldn't be called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert err
assert app.called_postparsing == 1
###
#
# test precmd hooks
#
#####
def test_register_precmd_hook_parameter_count():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_not_enough_parameters)
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_too_many_parameters)
def test_register_precmd_hook_no_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_no_parameter_annotation)
def test_register_precmd_hook_wrong_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_wrong_parameter_annotation)
def test_register_precmd_hook_no_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_no_return_annotation)
def test_register_precmd_hook_wrong_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_wrong_return_annotation)
def test_precmd_hook(capsys):
app = PluggedApp()
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# without registering any hooks, precmd() should be called
assert app.called_precmd == 1
app.reset_counters()
app.register_precmd_hook(app.precmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# with one hook registered, we should get precmd() and the hook
assert app.called_precmd == 2
# register the function again, so it should be called twice
app.reset_counters()
app.register_precmd_hook(app.precmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# with two hooks registered, we should get precmd() and both hooks
assert app.called_precmd == 3
def test_precmd_hook_emptystatement_first(capsys):
app = PluggedApp()
app.register_precmd_hook(app.precmd_hook_emptystatement)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
# since the registered hooks are called before precmd(), if a registered
# hook throws an exception, precmd() is never called
assert app.called_precmd == 1
# register another function but it shouldn't be called
app.reset_counters()
stop = app.register_precmd_hook(app.precmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
# the exception raised by the first hook should prevent the second
# hook from being called, and it also prevents precmd() from being
# called
assert app.called_precmd == 1
def test_precmd_hook_emptystatement_second(capsys):
app = PluggedApp()
app.register_precmd_hook(app.precmd_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert not err
# with one hook registered, we should get precmd() and the hook
assert app.called_precmd == 2
# register another function and make sure it gets called
app.reset_counters()
app.register_precmd_hook(app.precmd_hook_emptystatement)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
# since the registered hooks are called before precmd(), if a registered
# hook throws an exception, precmd() is never called
assert app.called_precmd == 2
# register a third function which shouldn't be called
app.reset_counters()
app.register_precmd_hook(app.precmd_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
# the exception raised by the second hook should prevent the third
# hook from being called. since the registered hooks are called before precmd(),
# if a registered hook throws an exception, precmd() is never called
assert app.called_precmd == 2
###
#
# test postcmd hooks
#
####
def test_register_postcmd_hook_parameter_count():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_not_enough_parameters)
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_too_many_parameters)
def test_register_postcmd_hook_no_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_no_parameter_annotation)
def test_register_postcmd_hook_wrong_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_wrong_parameter_annotation)
def test_register_postcmd_hook_no_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_no_return_annotation)
def test_register_postcmd_hook_wrong_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_wrong_return_annotation)
def test_postcmd(capsys):
app = PluggedApp()
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# without registering any hooks, postcmd() should be called
assert app.called_postcmd == 1
app.reset_counters()
app.register_postcmd_hook(app.postcmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# with one hook registered, we should get precmd() and the hook
assert app.called_postcmd == 2
# register the function again, so it should be called twice
app.reset_counters()
app.register_postcmd_hook(app.postcmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# with two hooks registered, we should get precmd() and both hooks
assert app.called_postcmd == 3
def test_postcmd_exception_first(capsys):
app = PluggedApp()
app.register_postcmd_hook(app.postcmd_hook_exception)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert err
# since the registered hooks are called before postcmd(), if a registered
# hook throws an exception, postcmd() is never called. So we should have
# a count of one because we called the hook that raised the exception
assert app.called_postcmd == 1
# register another function but it shouldn't be called
app.reset_counters()
stop = app.register_postcmd_hook(app.postcmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert err
# the exception raised by the first hook should prevent the second
# hook from being called, and it also prevents postcmd() from being
# called
assert app.called_postcmd == 1
def test_postcmd_exception_second(capsys):
app = PluggedApp()
app.register_postcmd_hook(app.postcmd_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert not err
# with one hook registered, we should get the hook and postcmd()
assert app.called_postcmd == 2
# register another function which should be called
app.reset_counters()
stop = app.register_postcmd_hook(app.postcmd_hook_exception)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert err
# the exception raised by the first hook should prevent the second
# hook from being called, and it also prevents postcmd() from being
# called. So we have the first hook, and the second hook, which raised
# the exception
assert app.called_postcmd == 2
##
#
# command finalization
#
###
def test_register_cmdfinalization_hook_parameter_count():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_not_enough_parameters)
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_too_many_parameters)
def test_register_cmdfinalization_hook_no_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_no_parameter_annotation)
def test_register_cmdfinalization_hook_wrong_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_wrong_parameter_annotation)
def test_register_cmdfinalization_hook_no_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_no_return_annotation)
def test_register_cmdfinalization_hook_wrong_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_wrong_return_annotation)
def test_cmdfinalization(capsys):
app = PluggedApp()
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_cmdfinalization == 0
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_cmdfinalization == 1
# register the function again, so it should be called twice
app.reset_counters()
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_cmdfinalization == 2
def test_cmdfinalization_stop_first(capsys):
app = PluggedApp()
app.register_cmdfinalization_hook(app.cmdfinalization_hook_stop)
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_cmdfinalization == 2
assert stop
def test_cmdfinalization_stop_second(capsys):
app = PluggedApp()
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
app.register_cmdfinalization_hook(app.cmdfinalization_hook_stop)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_cmdfinalization == 2
assert stop
def test_cmdfinalization_hook_exception(capsys):
app = PluggedApp()
app.register_cmdfinalization_hook(app.cmdfinalization_hook_exception)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert err
assert app.called_cmdfinalization == 1
# register another function, but it shouldn't be called
app.reset_counters()
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert err
assert app.called_cmdfinalization == 1
| [
[
[
88,
91
],
[
10017,
10020
],
[
10438,
10441
],
[
11308,
11311
],
[
11731,
11734
]
],
[
[
100,
106
],
[
9511,
9517
],
[
9699,
9705
],
[
10792,
10798
],
[
10988,
10994
],
[
12407,
12413
],
[
12605,
12611
],
[
12810,
12816
],
[
13012,
13018
],
[
13211,
13217
],
[
17620,
17626
],
[
17727,
17733
],
[
17913,
17919
],
[
18106,
18112
],
[
18296,
18302
],
[
18483,
18489
],
[
21890,
21896
],
[
21999,
22005
],
[
22188,
22194
],
[
22384,
22390
],
[
22577,
22583
],
[
22767,
22773
],
[
25850,
25856
],
[
25975,
25981
],
[
26188,
26194
],
[
26408,
26414
],
[
26625,
26631
],
[
26839,
26845
]
],
[
[
221,
225
],
[
9999,
10003
],
[
10420,
10424
],
[
11290,
11294
],
[
11713,
11717
]
],
[
[
271,
275
],
[
9999,
10003
],
[
10420,
10424
],
[
11290,
11294
],
[
11713,
11717
]
],
[
[
284,
288
],
[
9136,
9140
],
[
1495,
1499
],
[
1463,
1467
],
[
1728,
1732
],
[
1696,
1700
],
[
1920,
1924
],
[
1888,
1892
],
[
2178,
2182
],
[
2146,
2150
],
[
2331,
2335
],
[
2429,
2433
],
[
2397,
2401
],
[
2646,
2650
],
[
2817,
2821
],
[
2997,
3001
],
[
3167,
3171
],
[
3334,
3338
],
[
3573,
3577
],
[
3554,
3558
],
[
4066,
4070
],
[
5247,
5251
],
[
5442,
5446
],
[
6914,
6918
],
[
7372,
7376
],
[
7332,
7336
],
[
7661,
7665
],
[
7621,
7625
],
[
8957,
8961
]
],
[
[
306,
312
],
[
3743,
3749
],
[
3717,
3723
],
[
3924,
3930
],
[
3898,
3904
],
[
4155,
4161
],
[
4129,
4135
],
[
4345,
4351
],
[
4520,
4526
],
[
4484,
4490
],
[
4679,
4685
],
[
4863,
4869
],
[
5044,
5050
],
[
5221,
5227
],
[
5619,
5625
],
[
5592,
5598
],
[
5800,
5806
],
[
5773,
5779
],
[
6000,
6006
],
[
6178,
6184
],
[
6141,
6147
],
[
6339,
6345
],
[
6525,
6531
],
[
6708,
6714
],
[
6887,
6893
],
[
7149,
7155
],
[
7114,
7120
],
[
7893,
7899
],
[
8106,
8112
],
[
8061,
8067
],
[
8294,
8300
],
[
8507,
8513
],
[
8717,
8723
],
[
8922,
8928
]
],
[
[
321,
327
],
[
9128,
9134
]
],
[
[
9117,
9127
],
[
9489,
9499
],
[
9677,
9687
],
[
10055,
10065
],
[
10476,
10486
],
[
10770,
10780
],
[
10966,
10976
],
[
11346,
11356
],
[
11769,
11779
],
[
12070,
12080
],
[
12385,
12395
],
[
12583,
12593
],
[
12788,
12798
],
[
12990,
13000
],
[
13189,
13199
],
[
13365,
13375
],
[
14138,
14148
],
[
14609,
14619
],
[
15334,
15344
],
[
15979,
15989
],
[
16939,
16949
],
[
17598,
17608
],
[
17891,
17901
],
[
18084,
18094
],
[
18274,
18284
],
[
18461,
18471
],
[
18624,
18634
],
[
19574,
19584
],
[
20477,
20487
],
[
21868,
21878
],
[
22166,
22176
],
[
22362,
22372
],
[
22555,
22565
],
[
22745,
22755
],
[
22906,
22916
],
[
23855,
23865
],
[
24856,
24866
],
[
25828,
25838
],
[
26166,
26176
],
[
26386,
26396
],
[
26603,
26613
],
[
26817,
26827
],
[
27002,
27012
],
[
27782,
27792
],
[
28172,
28182
],
[
28565,
28575
]
],
[
[
9429,
9475
]
],
[
[
9614,
9663
]
],
[
[
9811,
9828
]
],
[
[
10231,
10249
]
],
[
[
10709,
10756
]
],
[
[
10896,
10952
]
],
[
[
11101,
11119
]
],
[
[
11523,
11542
]
],
[
[
12037,
12050
]
],
[
[
12330,
12371
]
],
[
[
12516,
12569
]
],
[
[
12726,
12774
]
],
[
[
12926,
12976
]
],
[
[
13130,
13175
]
],
[
[
13324,
13345
]
],
[
[
14086,
14118
]
],
[
[
14556,
14589
]
],
[
[
15272,
15314
]
],
[
[
15916,
15959
]
],
[
[
16888,
16919
]
],
[
[
17543,
17584
]
],
[
[
17828,
17877
]
],
[
[
18018,
18070
]
],
[
[
18214,
18260
]
],
[
[
18398,
18447
]
],
[
[
18588,
18604
]
],
[
[
19517,
19554
]
],
[
[
20419,
20457
]
],
[
[
21812,
21854
]
],
[
[
22102,
22152
]
],
[
[
22295,
22348
]
],
[
[
22494,
22541
]
],
[
[
22681,
22731
]
],
[
[
22874,
22886
]
],
[
[
23807,
23835
]
],
[
[
24807,
24836
]
],
[
[
25764,
25814
]
],
[
[
26094,
26152
]
],
[
[
26311,
26372
]
],
[
[
26534,
26589
]
],
[
[
26745,
26803
]
],
[
[
26962,
26982
]
],
[
[
27731,
27762
]
],
[
[
28120,
28152
]
],
[
[
28510,
28545
]
]
] |
import json
import os
from FastAutoAugment.common.common import get_logger, common_init, expdir_abspath
from FastAutoAugment.data_aug.train import train_and_eval
if __name__ == '__main__':
conf = common_init(config_filepath='confs/aug_train_cifar.yaml',
param_args=["--autoaug.loader.aug", "fa_reduced_cifar10",
"--common.experiment_name", "autoaug_train"])
logger = get_logger()
import time
t = time.time()
save_path = expdir_abspath('model.pth')
# result = train_and_eval(conf, val_ratio=conf['val_ratio'], val_fold=conf['val_fold'],
# save_path=save_path, only_eval=conf['only_eval'], metric='test')
# TODO: Will fail if val_ratio=0 since we are not using latest training infrastructure
# TODO: Move val_ratio, val_fold, metric to config file
result = train_and_eval(conf, val_ratio=0.2, val_fold=0,
save_path=save_path, only_eval=False, metric='test')
elapsed = time.time() - t
logger.info('training done.')
logger.info('model: %s' % conf['autoaug']['model'])
logger.info('augmentation: %s' % conf['autoaug']['loader']['aug'])
logger.info('\n' + json.dumps(result, indent=4))
logger.info('elapsed time: %.3f Hours' % (elapsed / 3600.))
logger.info('top1 error in testset: %.4f' % (1. - result['top1_test']))
logger.info('Save path: %s' % save_path)
| [
[
[
7,
11
],
[
1226,
1230
]
],
[
[
19,
21
]
],
[
[
64,
74
],
[
435,
445
]
],
[
[
76,
87
],
[
202,
213
]
],
[
[
89,
103
],
[
501,
515
]
],
[
[
147,
161
],
[
882,
896
]
],
[
[
195,
199
],
[
897,
901
],
[
1106,
1110
],
[
1169,
1173
]
],
[
[
426,
432
],
[
1046,
1052
],
[
1080,
1086
],
[
1136,
1142
],
[
1207,
1213
],
[
1260,
1266
],
[
1324,
1330
],
[
1400,
1406
]
],
[
[
460,
464
],
[
473,
477
],
[
1025,
1029
]
],
[
[
469,
470
],
[
1039,
1040
]
],
[
[
489,
498
],
[
968,
977
],
[
1430,
1439
]
],
[
[
873,
879
],
[
1237,
1243
],
[
1374,
1380
]
],
[
[
1015,
1022
],
[
1302,
1309
]
]
] |
# coding: utf-8
"""
ESPER API REFERENCE
OpenAPI spec version: 1.0.0
Contact: developer@esper.io
---------------------------------------------------------
Copyright 2019 Shoonya Enterprises Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pprint
import re
import six
from esperclient.models.app_install import AppInstall
class InlineResponse2005(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'count': 'int',
'next': 'str',
'previous': 'str',
'results': 'list[AppInstall]'
}
attribute_map = {
'count': 'count',
'next': 'next',
'previous': 'previous',
'results': 'results'
}
def __init__(self, count=None, next=None, previous=None, results=None):
"""InlineResponse2005 - a model defined in Swagger"""
self._count = None
self._next = None
self._previous = None
self._results = None
self.discriminator = None
self.count = count
if next is not None:
self.next = next
if previous is not None:
self.previous = previous
self.results = results
@property
def count(self):
"""Gets the count of this InlineResponse2005.
:return: The count of this InlineResponse2005.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this InlineResponse2005.
:param count: The count of this InlineResponse2005.
:type: int
"""
if count is None:
raise ValueError("Invalid value for `count`, must not be `None`")
self._count = count
@property
def next(self):
"""Gets the next of this InlineResponse2005.
:return: The next of this InlineResponse2005.
:rtype: str
"""
return self._next
@next.setter
def next(self, next):
"""Sets the next of this InlineResponse2005.
:param next: The next of this InlineResponse2005.
:type: str
"""
self._next = next
@property
def previous(self):
"""Gets the previous of this InlineResponse2005.
:return: The previous of this InlineResponse2005.
:rtype: str
"""
return self._previous
@previous.setter
def previous(self, previous):
"""Sets the previous of this InlineResponse2005.
:param previous: The previous of this InlineResponse2005.
:type: str
"""
self._previous = previous
@property
def results(self):
"""Gets the results of this InlineResponse2005.
:return: The results of this InlineResponse2005.
:rtype: list[AppInstall]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this InlineResponse2005.
:param results: The results of this InlineResponse2005.
:type: list[AppInstall]
"""
if results is None:
raise ValueError("Invalid value for `results`, must not be `None`")
self._results = results
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InlineResponse2005, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse2005):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
[
[
736,
742
],
[
5062,
5068
]
],
[
[
750,
752
]
],
[
[
761,
764
],
[
4129,
4132
]
],
[
[
809,
819
]
],
[
[
828,
846
],
[
4833,
4851
],
[
5302,
5320
]
]
] |
from . import Link
def iterate_words(lines):
for line in lines:
words = line.split()
if len(words) == 0:
continue
for word in words[:-1]:
yield word, is_stop_word(word)
yield words[-1], True # EOL is considered a stop word
def is_stop_word(word):
return any(word.endswith(stopchar) for stopchar in '.;?!')
def tokenize(source, link_length):
head = []
end = []
is_start = True
words_iter = iterate_words(source)
while len(head) < link_length - 1:
word, is_end = next(words_iter)
head += [word]
end += [is_end]
for word, is_end in iterate_words(source):
yield Link(head, word, is_start, is_end)
head = head[1:] + [word]
# If the start of the current link is a stop word, the next link
# is a starting link
is_start = end[0]
end = end[1:] + [is_end]
| [
[
[
14,
18
],
[
684,
688
]
],
[
[
24,
37
],
[
473,
486
],
[
647,
660
]
],
[
[
290,
302
],
[
204,
216
]
],
[
[
378,
386
]
]
] |
from django.shortcuts import render
from catalog.models import Book, Author, BookInstance, Genre
from django.contrib.auth.mixins import LoginRequiredMixin
def index(request):
"""View function for home page of site."""
# Generate counts of some of the main objects
num_books = Book.objects.all().count()
num_instances = BookInstance.objects.all().count()
# Available books (status = 'a')
num_instances_available = BookInstance.objects.filter(status__exact='a').count()
# The 'all()' is implied by default.
num_authors = Author.objects.count()
# Number of visits to this view, as counted in the session variable.
num_visits = request.session.get('num_visits', 1)
request.session['num_visits'] = num_visits + 1
context = {
'num_books': num_books,
'num_instances': num_instances,
'num_instances_available': num_instances_available,
'num_authors': num_authors,
'num_visits': num_visits,
}
# Render the HTML template index.html with the data in the context variable
return render(request, 'index.html', context=context)
from django.views import generic
class BookListView(generic.ListView):
model = Book
paginate_by = 2
class BookDetailView(generic.DetailView):
model = Book
def book_detail_view(request, primary_key):
try:
book = Book.objects.get(pk=primary_key)
except Book.DoesNotExist:
raise Http404('Book does not exist')
return render(request, 'catalog/book_detail.html', context={'book': book})
class AuthorListView(generic.ListView):
model = Author
paginate_by = 2
class AuthorDetailView(generic.DetailView):
model = Author
class LoanedBooksByUserListView(LoginRequiredMixin,generic.ListView):
"""Generic class-based view listing books on loan to current user."""
model = BookInstance
template_name ='catalog/bookinstance_list_borrowed_user.html'
paginate_by = 2
def get_queryset(self):
return BookInstance.objects.filter(borrower=self.request.user).filter(status__exact='o').order_by('due_back')
# Added as part of challenge!
from django.contrib.auth.mixins import PermissionRequiredMixin
class LoanedBooksAllListView(PermissionRequiredMixin, generic.ListView):
"""Generic class-based view listing all books on loan. Only visible to users with can_mark_returned permission."""
model = BookInstance
permission_required = 'catalog.can_mark_returned'
template_name = 'catalog/bookinstance_list_borrowed_all.html'
paginate_by = 10
def get_queryset(self):
return BookInstance.objects.filter(status__exact='o').order_by('due_back')
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect
from django.urls import reverse
import datetime
from django.contrib.auth.decorators import login_required, permission_required
# from .forms import RenewBookForm
from catalog.forms import RenewBookForm
@login_required
@permission_required('catalog.can_mark_returned', raise_exception=True)
def renew_book_librarian(request, pk):
"""View function for renewing a specific BookInstance by librarian."""
book_instance = get_object_or_404(BookInstance, pk=pk)
# If this is a POST request then process the Form data
if request.method == 'POST':
# Create a form instance and populate it with data from the request (binding):
form = RenewBookForm(request.POST)
# Check if the form is valid:
if form.is_valid():
# process the data in form.cleaned_data as required (here we just write it to the model due_back field)
book_instance.due_back = form.cleaned_data['renewal_date']
book_instance.save()
# redirect to a new URL:
return HttpResponseRedirect(reverse('all-borrowed'))
# If this is a GET (or any other method) create the default form
else:
proposed_renewal_date = datetime.date.today() + datetime.timedelta(weeks=3)
form = RenewBookForm(initial={'renewal_date': proposed_renewal_date})
context = {
'form': form,
'book_instance': book_instance,
}
return render(request, 'catalog/book_renew_librarian.html', context)
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from catalog.models import Author
class AuthorCreate(CreateView):
model = Author
fields = ['first_name', 'last_name', 'date_of_birth', 'date_of_death']
initial = {'date_of_death': '11/06/2020'}
class AuthorUpdate(UpdateView):
model = Author
fields = '__all__' # Not recommended (potential security issue if more fields added)
class AuthorDelete(DeleteView):
model = Author
success_url = reverse_lazy('authors')
class BookCreate(CreateView):
model = Book
fields = ['title', 'author', 'summary', 'isbn', 'genre', 'language']
# initial = {'date_of_death': '11/06/2020'} | [
[
[
29,
35
],
[
1077,
1083
],
[
1485,
1491
],
[
4182,
4188
]
],
[
[
63,
67
],
[
1209,
1213
],
[
1290,
1294
],
[
4842,
4846
],
[
292,
296
],
[
1365,
1369
],
[
1409,
1413
]
],
[
[
69,
75
],
[
1606,
1612
],
[
1691,
1697
]
],
[
[
77,
89
],
[
1856,
1868
],
[
2403,
2415
],
[
339,
351
],
[
442,
454
],
[
1999,
2011
],
[
2601,
2613
],
[
3208,
3220
]
],
[
[
91,
96
]
],
[
[
136,
154
],
[
1732,
1750
]
],
[
[
162,
167
]
],
[
[
1150,
1157
],
[
1178,
1185
],
[
1257,
1264
],
[
1575,
1582
],
[
1658,
1665
],
[
1751,
1758
],
[
2253,
2260
]
],
[
[
1165,
1177
]
],
[
[
1242,
1256
]
],
[
[
1301,
1317
]
],
[
[
1560,
1574
]
],
[
[
1641,
1657
]
],
[
[
1706,
1731
]
],
[
[
2173,
2196
],
[
2228,
2251
]
],
[
[
2205,
2227
]
],
[
[
2700,
2717
],
[
3190,
3207
]
],
[
[
2742,
2762
],
[
3797,
3817
]
],
[
[
2787,
2794
],
[
3818,
3825
]
],
[
[
2802,
2810
],
[
3955,
3963
],
[
3979,
3987
]
],
[
[
2854,
2868
],
[
2969,
2983
]
],
[
[
2870,
2889
],
[
2985,
3004
]
],
[
[
2952,
2965
],
[
3425,
3438
],
[
4022,
4035
]
],
[
[
3060,
3080
]
],
[
[
4284,
4294
],
[
4411,
4421
],
[
4817,
4827
]
],
[
[
4296,
4306
],
[
4584,
4594
]
],
[
[
4308,
4318
],
[
4725,
4735
]
],
[
[
4343,
4355
],
[
4775,
4787
]
],
[
[
4384,
4390
],
[
4436,
4442
],
[
4609,
4615
],
[
4750,
4756
],
[
557,
563
]
],
[
[
4398,
4410
]
],
[
[
4571,
4583
]
],
[
[
4712,
4724
]
],
[
[
4806,
4816
]
]
] |
import enum
@enum.unique
class Flag(enum.IntEnum):
NOT_NULL = 1
PRI_KEY = 2
UNIQUE_KEY = 4
MULTIPLE_KEY = 8
BLOB = 16
UNSIGNED = 32
ZEROFILL = 64
BINARY = 128
ENUM = 256
AUTO_INCREMENT = 512
TIMESTAMP = 1024
SET = 2048
PART_KEY = 16384
GROUP = 32767
UNIQUE = 65536
| [
[
[
7,
11
],
[
38,
42
],
[
15,
19
]
],
[
[
33,
37
]
]
] |
#!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import argparse
from distutils.version import StrictVersion
import logging as log
import os
import re
import shlex
import subprocess
import sys
# Display INFO log messages and up.
log.basicConfig(level=log.INFO, format="%(levelname)s: %(message)s")
def get_tool_requirements_path():
'''Return the path to tool_requirements.py, at the top of the repo'''
# top_src_dir is the top of the repository
top_src_dir = os.path.normpath(os.path.join(os.path.dirname(__file__),
'..'))
return os.path.join(top_src_dir, 'tool_requirements.py')
class ReqErr(Exception):
def __init__(self, path, msg):
self.path = path
self.msg = msg
def __str__(self):
return ('Error parsing tool requirements from {!r}: {}'
.format(self.path, self.msg))
class ToolReq:
# A subclass can set this to configure the command that's run to get the
# version of a tool. If tool_cmd is None, get_version will call "self.tool
# --version".
tool_cmd = None
# Used by get_version. If not None, this is a dictionary that's added to
# the environment when running the command.
tool_env = None
# A subclass can set this to configure _parse_version_output. If set, it
# should be a Regex object with a single capturing group that captures the
# version.
version_regex = None
def __init__(self, tool, min_version):
self.tool = tool
self.min_version = min_version
self.optional = False
def _get_tool_cmd(self):
'''Return the command to run to get the installed version'''
return self.tool_cmd or [self.tool, '--version']
def _get_version(self):
'''Run the tool to get the installed version.
Raises a RuntimeError on failure. The default version uses the class
variable tool_cmd to figure out what to run.
'''
def _parse_version_output(self, stdout):
'''Parse the nonempty stdout to get a version number
Raises a ValueError on failure. The default implementation returns the
last word of the first line if version_regex is None or the first match
for version_regex if it is not None.
'''
if self.version_regex is None:
line0 = stdout.split('\n', 1)[0]
words = line0.rsplit(None, 1)
if not words:
raise ValueError('Empty first line.')
return words[-1]
for line in stdout.split('\n'):
match = self.version_regex.match(line.rstrip())
if match is not None:
return match.group(1)
raise ValueError('No line matched version regex.')
def get_version(self):
'''Run the tool to get a version.
Returns a version string on success. Raises a RuntimeError on failure.
The default version uses the class variable tool_cmd to figure out what
to run.
'''
cmd = self._get_tool_cmd()
cmd_txt = ' '.join(shlex.quote(w) for w in cmd)
env = None
if self.tool_env is not None:
env = os.environ.copy()
env.update(self.tool_env)
try:
proc = subprocess.run(cmd,
check=True,
stdout=subprocess.PIPE,
universal_newlines=True,
env=env)
except (subprocess.CalledProcessError, FileNotFoundError) as err:
env_msg = ('' if not self.tool_env else
' (with environment overrides: {})'
.format(', '.join('{}={}'.format(k, v)
for k, v in self.tool_env.items())))
raise RuntimeError('Failed to run {!r}{} to check version: {}'
.format(cmd_txt, env_msg, err))
if not proc.stdout:
raise RuntimeError('No output from running {!r} to check version.'
.format(cmd_txt))
try:
return self._parse_version_output(proc.stdout)
except ValueError as err:
raise RuntimeError('Bad output from running {!r} '
'to check version: {}'
.format(cmd_txt, err))
def to_semver(self, version, from_req):
'''Convert a tool version to semantic versioning format
If from_req is true, this version comes from the requirements file
(rather than being reported from an installed application). That might
mean stricter checking. If version is not a known format, raises a
ValueError.
'''
return version
def check(self):
'''Get the installed version and check it matches the requirements
Returns (is_good, msg). is_good is true if we matched the requirements
and false otherwise. msg describes what happened (an error message on
failure, or extra information on success).
'''
try:
min_semver = self.to_semver(self.min_version, True)
except ValueError as err:
return (False,
'Failed to convert requirement to semantic version: {}'
.format(err))
try:
min_sv = StrictVersion(min_semver)
except ValueError as err:
return (False,
'Bad semver inferred from required version ({}): {}'
.format(min_semver, err))
try:
actual_version = self.get_version()
except RuntimeError as err:
return (False, str(err))
try:
actual_semver = self.to_semver(actual_version, False)
except ValueError as err:
return (False,
'Failed to convert installed to semantic version: {}'
.format(err))
try:
actual_sv = StrictVersion(actual_semver)
except ValueError as err:
return (False,
'Bad semver inferred from installed version ({}): {}'
.format(actual_semver, err))
if actual_sv < min_sv:
return (False,
'Installed version is too old: '
'found version {}, but need at least {}'
.format(actual_version, self.min_version))
return (True,
'Sufficiently recent version (found {}; needed {})'
.format(actual_version, self.min_version))
class VerilatorToolReq(ToolReq):
def get_version(self):
try:
# Note: "verilator" needs to be called through a shell and with all
# arguments in a string, as it doesn't have a shebang, but instead
# relies on perl magic to parse command line arguments.
version_str = subprocess.run('verilator --version', shell=True,
check=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise RuntimeError('Unable to call Verilator to check version: {}'
.format(err)) from None
return version_str.stdout.split(' ')[1].strip()
class VeribleToolReq(ToolReq):
tool_cmd = ['verible-verilog-lint', '--version']
def to_semver(self, version, from_req):
# Drop the hash suffix and convert into version string that
# is compatible with StrictVersion in check_version below.
# Example: v0.0-808-g1e17daa -> 0.0.808
m = re.fullmatch(r'v([0-9]+)\.([0-9]+)-([0-9]+)-g[0-9a-f]+$', version)
if m is None:
raise ValueError("{} has invalid version string format."
.format(version))
return '.'.join(m.group(1, 2, 3))
class VivadoToolReq(ToolReq):
tool_cmd = ['vivado', '-version']
version_regex = re.compile(r'Vivado v(.*)\s')
def to_semver(self, version, from_req):
# Regular Vivado releases just have a major and minor version.
# In this case, we set the patch level to 0.
m = re.fullmatch(r'([0-9]+)\.([0-9]+)(?:\.([0-9]+))?', version)
if m is None:
raise ValueError("{} has invalid version string format."
.format(version))
return '.'.join((m.group(1), m.group(2), m.group(3) or '0'))
class VcsToolReq(ToolReq):
tool_cmd = ['vcs', '-full64', '-ID']
tool_env = {'VCS_ARCH_OVERRIDE': 'linux'}
version_regex = re.compile(r'Compiler version = VCS [A-Z]-(.*)')
def to_semver(self, version, from_req):
# VCS has a rather strange numbering scheme, where the most general
# versions look something like this:
#
# Q-2020.03-SP1-2
#
# Our version_regex strips out the "Q" part (a "platform prefix")
# already. A version always has the "2020.03" (year and month) part,
# and may also have an -SP<n> and/or -<patch> suffix.
#
# Since StrictVersion expects a 3 digit versioning scheme, we multiply
# any SP number by 100, which should work as long as the patch version
# isn't greater than 99.
#
# Some VCS builds also report other cruft (like _Full64) after this
# version number. If from_req is False, allow (and ignore) that too.
regex = r'([0-9]+).([0-9]+)(?:-SP([0-9]+))?(?:-([0-9]+))?'
if from_req:
regex += '$'
match = re.match(regex, version)
if match is None:
raise ValueError("{!r} is not a recognised VCS version string."
.format(version))
major = match.group(1)
minor = match.group(2)
sp = int(match.group(3) or 0)
patch = int(match.group(4) or 0)
comb = str(sp * 100 + patch)
return '{}.{}{}'.format(major, minor, comb)
class PyModuleToolReq(ToolReq):
'''A tool in a Python module (its version can be found by running pip)'''
version_regex = re.compile(r'Version: (.*)')
def _get_tool_cmd(self):
return ['pip3', 'show', self.tool]
def dict_to_tool_req(path, tool, raw):
'''Parse a dict (as read from Python) as a ToolReq
Required keys: version. Optional keys: as_needed.
'''
where = 'Dict for {} in __TOOL_REQUIREMENTS__'.format(tool)
# We operate in place on the dictionary. Take a copy to avoid an
# obnoxious API.
raw = raw.copy()
if 'min_version' not in raw:
raise ReqErr(path,
'{} is missing required key: "min_version".'
.format(where))
min_version = raw['min_version']
if not isinstance(min_version, str):
raise ReqErr(path,
'{} has min_version that is not a string.'
.format(where))
del raw['min_version']
as_needed = False
if 'as_needed' in raw:
as_needed = raw['as_needed']
if not isinstance(as_needed, bool):
raise ReqErr(path,
'{} has as_needed that is not a bool.'
.format(where))
del raw['as_needed']
if raw:
raise ReqErr(path,
'{} has unexpected keys: {}.'
.format(where, ', '.join(raw.keys())))
classes = {
'edalize': PyModuleToolReq,
'vcs': VcsToolReq,
'verible': VeribleToolReq,
'verilator': VerilatorToolReq,
'vivado': VivadoToolReq,
}
cls = classes.get(tool, ToolReq)
ret = cls(tool, min_version)
ret.as_needed = as_needed
return ret
def read_tool_requirements(path=None):
'''Read tool requirements from a Python file'''
if path is None:
path = get_tool_requirements_path()
with open(path, 'r') as pyfile:
globs = {}
exec(pyfile.read(), globs)
# We expect the exec call to have populated globs with a
# __TOOL_REQUIREMENTS__ dictionary.
raw = globs.get('__TOOL_REQUIREMENTS__')
if raw is None:
raise ReqErr(path,
'The Python file at did not define '
'__TOOL_REQUIREMENTS__.')
# raw should be a dictionary (keyed by tool name)
if not isinstance(raw, dict):
raise ReqErr(path, '__TOOL_REQUIREMENTS__ is not a dict.')
reqs = {}
for tool, raw_val in raw.items():
if not isinstance(tool, str):
raise ReqErr(path,
'Invalid key in __TOOL_REQUIREMENTS__: {!r}'
.format(tool))
if isinstance(raw_val, str):
# Shorthand notation: value is just a string, which we
# interpret as a minimum version
raw_val = {'min_version': raw_val}
if not isinstance(raw_val, dict):
raise ReqErr(path,
'Value for {} in __TOOL_REQUIREMENTS__ '
'is not a string or dict.'.format(tool))
reqs[tool] = dict_to_tool_req(path, tool, raw_val)
return reqs
def main():
parser = argparse.ArgumentParser()
parser.add_argument('tool', nargs='*')
args = parser.parse_args()
# Get tool requirements
try:
tool_requirements = read_tool_requirements()
except ReqErr as err:
log.error(str(err))
return 1
pending_tools = set(args.tool)
missing_tools = []
for tool, req in tool_requirements.items():
if req.as_needed and tool not in pending_tools:
continue
pending_tools.discard(tool)
good, msg = req.check()
if not good:
log.error('Failed tool requirement for {}: {}'
.format(tool, msg))
missing_tools.append(tool)
else:
log.info('Tool {} present: {}'
.format(tool, msg))
all_good = True
if missing_tools:
log.error("Tool requirements not fulfilled. "
"Please update tools ({}) and retry."
.format(', '.join(missing_tools)))
all_good = False
if pending_tools:
log.error("Some tools specified on command line don't appear in "
"tool requirements file: {}"
.format(', '.join(sorted(pending_tools))))
all_good = False
return 0 if all_good else 1
if __name__ == "__main__":
sys.exit(main())
| [
[
[
178,
186
],
[
13498,
13506
]
],
[
[
217,
230
],
[
5517,
5530
],
[
6144,
6157
]
],
[
[
238,
252
],
[
352,
355
],
[
374,
377
],
[
13723,
13726
],
[
14046,
14049
],
[
14200,
14203
],
[
14323,
14326
],
[
14534,
14537
]
],
[
[
260,
262
],
[
596,
598
],
[
613,
615
],
[
626,
628
],
[
720,
722
],
[
3308,
3310
]
],
[
[
270,
272
],
[
8239,
8241
],
[
8854,
8856
],
[
10361,
10363
],
[
7901,
7903
],
[
8450,
8452
],
[
9825,
9827
]
],
[
[
280,
285
],
[
3203,
3208
]
],
[
[
293,
303
],
[
3397,
3407
],
[
3504,
3514
],
[
3639,
3649
],
[
7071,
7081
],
[
7181,
7191
],
[
7246,
7256
],
[
7346,
7356
]
],
[
[
311,
314
],
[
14799,
14802
]
],
[
[
427,
453
],
[
12080,
12106
]
],
[
[
778,
784
],
[
10846,
10852
],
[
11054,
11060
],
[
11344,
11350
],
[
11518,
11524
],
[
12401,
12407
],
[
12642,
12648
],
[
12820,
12826
],
[
13233,
13239
],
[
13700,
13706
]
],
[
[
1022,
1029
],
[
6768,
6775
],
[
7598,
7605
],
[
8171,
8178
],
[
8737,
8744
],
[
10253,
10260
],
[
11863,
11870
]
],
[
[
6751,
6767
],
[
11778,
11794
]
],
[
[
7583,
7597
],
[
11741,
11755
]
],
[
[
8157,
8170
],
[
11814,
11827
]
],
[
[
8726,
8736
],
[
11710,
11720
]
],
[
[
10237,
10252
],
[
11678,
11693
]
],
[
[
10469,
10485
],
[
13412,
13428
]
],
[
[
11957,
11979
],
[
13664,
13686
]
],
[
[
13477,
13481
],
[
14808,
14812
]
]
] |
# -*- coding: utf-8 -*-
# @Time : 2020/10/11 上午10:58
# @Author : TaoWang
# @Description : 参数配置
import argparse
def ArgumentParser():
parser = argparse.ArgumentParser()
parser.add_argument('--embed_size', type=int, default=300, help="embedding size of word embedding")
parser.add_argument("--epoch",type=int,default=1,help="epoch of training")
parser.add_argument("--cuda",type=bool,default=True,help="whether use gpu")
parser.add_argument("--gpu",type=int,default=0,help="whether use gpu")
parser.add_argument("--learning_rate",type=float,default=0.001,help="learning rate during training")
parser.add_argument("--batch_size",type=int,default=32,help="batch size during training")
parser.add_argument("--min_count",type=int,default=20,help="min count of words")
parser.add_argument("--window_size",type=int,default=2,help="min count of words")
parser.add_argument("--x_max",type=int,default=100,help="x_max of glove")
parser.add_argument("--alpha",type=float,default=0.75,help="alpha of glove")
return parser.parse_args(args=[])
| [
[
[
104,
112
],
[
150,
158
]
],
[
[
119,
133
]
]
] |
"""
The ``mlflow.pytorch`` module provides an API for logging and loading PyTorch models. This module
exports PyTorch models with the following flavors:
PyTorch (native) format
This is the main flavor that can be loaded back into PyTorch.
:py:mod:`mlflow.pyfunc`
Produced for use by generic pyfunc-based deployment tools and batch inference.
"""
import importlib
import logging
import os
import yaml
import cloudpickle
import numpy as np
import pandas as pd
from distutils.version import LooseVersion
import posixpath
import mlflow
import shutil
import mlflow.pyfunc.utils as pyfunc_utils
from mlflow import pyfunc
from mlflow.exceptions import MlflowException
from mlflow.models import Model, ModelSignature
from mlflow.models.model import MLMODEL_FILE_NAME
from mlflow.models.utils import ModelInputExample, _save_example
from mlflow.protos.databricks_pb2 import RESOURCE_DOES_NOT_EXIST
from mlflow.pytorch import pickle_module as mlflow_pytorch_pickle_module
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.annotations import experimental
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.utils.file_utils import _copy_file_or_tree, TempDir
from mlflow.utils.model_utils import _get_flavor_configuration
from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS
from mlflow.utils.autologging_utils import autologging_integration, safe_patch
FLAVOR_NAME = "pytorch"
_SERIALIZED_TORCH_MODEL_FILE_NAME = "model.pth"
_PICKLE_MODULE_INFO_FILE_NAME = "pickle_module_info.txt"
_EXTRA_FILES_KEY = "extra_files"
_REQUIREMENTS_FILE_KEY = "requirements_file"
_logger = logging.getLogger(__name__)
def get_default_conda_env():
"""
:return: The default Conda environment as a dictionary for MLflow Models produced by calls to
:func:`save_model()` and :func:`log_model()`.
.. code-block:: python
:caption: Example
import mlflow.pytorch
# Log PyTorch model
with mlflow.start_run() as run:
mlflow.pytorch.log_model(model, "model")
# Fetch the associated conda environment
env = mlflow.pytorch.get_default_conda_env()
print("conda env: {}".format(env))
.. code-block:: text
:caption: Output
conda env {'name': 'mlflow-env',
'channels': ['defaults', 'conda-forge', 'pytorch'],
'dependencies': ['python=3.7.5', 'pytorch=1.5.1',
'torchvision=0.6.1',
'pip', {'pip': ['mlflow', 'cloudpickle==1.6.0']}]}
"""
import torch
import torchvision
return _mlflow_conda_env(
additional_conda_deps=[
"pytorch={}".format(torch.__version__),
"torchvision={}".format(torchvision.__version__),
],
additional_pip_deps=[
# We include CloudPickle in the default environment because
# it's required by the default pickle module used by `save_model()`
# and `log_model()`: `mlflow.pytorch.pickle_module`.
"cloudpickle=={}".format(cloudpickle.__version__)
],
additional_conda_channels=["pytorch"],
)
def log_model(
pytorch_model,
artifact_path,
conda_env=None,
code_paths=None,
pickle_module=None,
registered_model_name=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,
requirements_file=None,
extra_files=None,
**kwargs
):
"""
Log a PyTorch model as an MLflow artifact for the current run.
:param pytorch_model: PyTorch model to be saved. Can be either an eager model (subclass of
``torch.nn.Module``) or scripted model prepared via ``torch.jit.script``
or ``torch.jit.trace``.
The model accept a single ``torch.FloatTensor`` as
input and produce a single output tensor.
If saving an eager model, any code dependencies of the
model's class, including the class definition itself, should be
included in one of the following locations:
- The package(s) listed in the model's Conda environment, specified
by the ``conda_env`` parameter.
- One or more of the files specified by the ``code_paths`` parameter.
:param artifact_path: Run-relative artifact path.
:param conda_env: Path to a Conda environment file. If provided, this decsribes the environment
this model should be run in. At minimum, it should specify the dependencies
contained in :func:`get_default_conda_env()`. If ``None``, the default
:func:`get_default_conda_env()` environment is added to the model. The
following is an *example* dictionary representation of a Conda environment::
{
'name': 'mlflow-env',
'channels': ['defaults'],
'dependencies': [
'python=3.7.0',
'pytorch=0.4.1',
'torchvision=0.2.1'
]
}
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path when the model is loaded.
:param pickle_module: The module that PyTorch should use to serialize ("pickle") the specified
``pytorch_model``. This is passed as the ``pickle_module`` parameter
to ``torch.save()``. By default, this module is also used to
deserialize ("unpickle") the PyTorch model at load time.
:param registered_model_name: (Experimental) If given, create a model version under
``registered_model_name``, also creating a registered model if one
with the given name does not exist.
:param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: (Experimental) Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example will be converted to a Pandas DataFrame and then
serialized to json using the Pandas split-oriented format. Bytes are
base64-encoded.
:param await_registration_for: Number of seconds to wait for the model version to finish
being created and is in ``READY`` status. By default, the function
waits for five minutes. Specify 0 or None to skip waiting.
:param requirements_file: A string containing the path to requirements file. Remote URIs
are resolved to absolute filesystem paths.
For example, consider the following ``requirements_file`` string -
requirements_file = "s3://my-bucket/path/to/my_file"
In this case, the ``"my_file"`` requirements file is downloaded from S3.
If ``None``, no requirements file is added to the model.
:param extra_files: A list containing the paths to corresponding extra files. Remote URIs
are resolved to absolute filesystem paths.
For example, consider the following ``extra_files`` list -
extra_files = ["s3://my-bucket/path/to/my_file1",
"s3://my-bucket/path/to/my_file2"]
In this case, the ``"my_file1 & my_file2"`` extra file is downloaded from S3.
If ``None``, no extra files are added to the model.
:param kwargs: kwargs to pass to ``torch.save`` method.
.. code-block:: python
:caption: Example
import numpy as np
import torch
import mlflow.pytorch
class LinearNNModel(torch.nn.Module):
def __init__(self):
super(LinearNNModel, self).__init__()
self.linear = torch.nn.Linear(1, 1) # One in and one out
def forward(self, x):
y_pred = self.linear(x)
return y_pred
def gen_data():
# Example linear model modified to use y = 2x
# from https://github.com/hunkim/PyTorchZeroToAll
# X training data, y labels
X = torch.arange(1.0, 25.0).view(-1, 1)
y = torch.from_numpy(np.array([x * 2 for x in X])).view(-1, 1)
return X, y
# Define model, loss, and optimizer
model = LinearNNModel()
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
# Training loop
epochs = 250
X, y = gen_data()
for epoch in range(epochs):
# Forward pass: Compute predicted y by passing X to the model
y_pred = model(X)
# Compute the loss
loss = criterion(y_pred, y)
# Zero gradients, perform a backward pass, and update the weights.
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Log the model
with mlflow.start_run() as run:
mlflow.pytorch.log_model(model, "model")
# convert to scripted model and log the model
scripted_pytorch_model = torch.jit.script(model)
mlflow.pytorch.log_model(scripted_pytorch_model, "scripted_model")
# Fetch the logged model artifacts
print("run_id: {}".format(run.info.run_id))
for artifact_path in ["model/data", "scripted_model/data"]:
artifacts = [f.path for f in MlflowClient().list_artifacts(run.info.run_id,
artifact_path)]
print("artifacts: {}".format(artifacts))
.. code-block:: text
:caption: Output
run_id: 1a1ec9e413ce48e9abf9aec20efd6f71
artifacts: ['model/data/model.pth',
'model/data/pickle_module_info.txt']
artifacts: ['scripted_model/data/model.pth',
'scripted_model/data/pickle_module_info.txt']
.. figure:: ../_static/images/pytorch_logged_models.png
PyTorch logged models
"""
pickle_module = pickle_module or mlflow_pytorch_pickle_module
Model.log(
artifact_path=artifact_path,
flavor=mlflow.pytorch,
pytorch_model=pytorch_model,
conda_env=conda_env,
code_paths=code_paths,
pickle_module=pickle_module,
registered_model_name=registered_model_name,
signature=signature,
input_example=input_example,
await_registration_for=await_registration_for,
requirements_file=requirements_file,
extra_files=extra_files,
**kwargs,
)
def save_model(
pytorch_model,
path,
conda_env=None,
mlflow_model=None,
code_paths=None,
pickle_module=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
requirements_file=None,
extra_files=None,
**kwargs
):
"""
Save a PyTorch model to a path on the local file system.
:param pytorch_model: PyTorch model to be saved. Can be either an eager model (subclass of
``torch.nn.Module``) or scripted model prepared via ``torch.jit.script``
or ``torch.jit.trace``.
The model accept a single ``torch.FloatTensor`` as
input and produce a single output tensor.
If saving an eager model, any code dependencies of the
model's class, including the class definition itself, should be
included in one of the following locations:
- The package(s) listed in the model's Conda environment, specified
by the ``conda_env`` parameter.
- One or more of the files specified by the ``code_paths`` parameter.
:param path: Local path where the model is to be saved.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. If provided, this decsribes the environment
this model should be run in. At minimum, it should specify the dependencies
contained in :func:`get_default_conda_env()`. If ``None``, the default
:func:`get_default_conda_env()` environment is added to the model. The
following is an *example* dictionary representation of a Conda environment::
{
'name': 'mlflow-env',
'channels': ['defaults'],
'dependencies': [
'python=3.7.0',
'pytorch=0.4.1',
'torchvision=0.2.1'
]
}
:param mlflow_model: :py:mod:`mlflow.models.Model` this flavor is being added to.
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path when the model is loaded.
:param pickle_module: The module that PyTorch should use to serialize ("pickle") the specified
``pytorch_model``. This is passed as the ``pickle_module`` parameter
to ``torch.save()``. By default, this module is also used to
deserialize ("unpickle") the PyTorch model at load time.
:param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: (Experimental) Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example will be converted to a Pandas DataFrame and then
serialized to json using the Pandas split-oriented format. Bytes are
base64-encoded.
:param requirements_file: A string containing the path to requirements file. Remote URIs
are resolved to absolute filesystem paths.
For example, consider the following ``requirements_file`` string -
requirements_file = "s3://my-bucket/path/to/my_file"
In this case, the ``"my_file"`` requirements file is downloaded from S3.
If ``None``, no requirements file is added to the model.
:param extra_files: A list containing the paths to corresponding extra files. Remote URIs
are resolved to absolute filesystem paths.
For example, consider the following ``extra_files`` list -
extra_files = ["s3://my-bucket/path/to/my_file1",
"s3://my-bucket/path/to/my_file2"]
In this case, the ``"my_file1 & my_file2"`` extra file is downloaded from S3.
If ``None``, no extra files are added to the model.
:param kwargs: kwargs to pass to ``torch.save`` method.
.. code-block:: python
:caption: Example
import os
import torch
import mlflow.pytorch
# Class defined here
class LinearNNModel(torch.nn.Module):
...
# Initialize our model, criterion and optimizer
...
# Training loop
...
# Save PyTorch models to current working directory
with mlflow.start_run() as run:
mlflow.pytorch.save_model(model, "model")
# Convert to a scripted model and save it
scripted_pytorch_model = torch.jit.script(model)
mlflow.pytorch.save_model(scripted_pytorch_model, "scripted_model")
# Load each saved model for inference
for model_path in ["model", "scripted_model"]:
model_uri = "{}/{}".format(os.getcwd(), model_path)
loaded_model = mlflow.pytorch.load_model(model_uri)
print("Loaded {}:".format(model_path))
for x in [6.0, 8.0, 12.0, 30.0]:
X = torch.Tensor([[x]])
y_pred = loaded_model(X)
print("predict X: {}, y_pred: {:.2f}".format(x, y_pred.data.item()))
print("--")
.. code-block:: text
:caption: Output
Loaded model:
predict X: 6.0, y_pred: 11.90
predict X: 8.0, y_pred: 15.92
predict X: 12.0, y_pred: 23.96
predict X: 30.0, y_pred: 60.13
--
Loaded scripted_model:
predict X: 6.0, y_pred: 11.90
predict X: 8.0, y_pred: 15.92
predict X: 12.0, y_pred: 23.96
predict X: 30.0, y_pred: 60.13
"""
import torch
pickle_module = pickle_module or mlflow_pytorch_pickle_module
if not isinstance(pytorch_model, torch.nn.Module):
raise TypeError("Argument 'pytorch_model' should be a torch.nn.Module")
if code_paths is not None:
if not isinstance(code_paths, list):
raise TypeError("Argument code_paths should be a list, not {}".format(type(code_paths)))
path = os.path.abspath(path)
if os.path.exists(path):
raise RuntimeError("Path '{}' already exists".format(path))
if mlflow_model is None:
mlflow_model = Model()
os.makedirs(path)
if signature is not None:
mlflow_model.signature = signature
if input_example is not None:
_save_example(mlflow_model, input_example, path)
model_data_subpath = "data"
model_data_path = os.path.join(path, model_data_subpath)
os.makedirs(model_data_path)
# Persist the pickle module name as a file in the model's `data` directory. This is necessary
# because the `data` directory is the only available parameter to `_load_pyfunc`, and it
# does not contain the MLmodel configuration; therefore, it is not sufficient to place
# the module name in the MLmodel
#
# TODO: Stop persisting this information to the filesystem once we have a mechanism for
# supplying the MLmodel configuration to `mlflow.pytorch._load_pyfunc`
pickle_module_path = os.path.join(model_data_path, _PICKLE_MODULE_INFO_FILE_NAME)
with open(pickle_module_path, "w") as f:
f.write(pickle_module.__name__)
# Save pytorch model
model_path = os.path.join(model_data_path, _SERIALIZED_TORCH_MODEL_FILE_NAME)
if isinstance(pytorch_model, torch.jit.ScriptModule):
torch.jit.ScriptModule.save(pytorch_model, model_path)
else:
torch.save(pytorch_model, model_path, pickle_module=pickle_module, **kwargs)
torchserve_artifacts_config = {}
if requirements_file:
if not isinstance(requirements_file, str):
raise TypeError("Path to requirements file should be a string")
with TempDir() as tmp_requirements_dir:
_download_artifact_from_uri(
artifact_uri=requirements_file, output_path=tmp_requirements_dir.path()
)
rel_path = os.path.basename(requirements_file)
torchserve_artifacts_config[_REQUIREMENTS_FILE_KEY] = {"path": rel_path}
shutil.move(tmp_requirements_dir.path(rel_path), path)
if extra_files:
torchserve_artifacts_config[_EXTRA_FILES_KEY] = []
if not isinstance(extra_files, list):
raise TypeError("Extra files argument should be a list")
with TempDir() as tmp_extra_files_dir:
for extra_file in extra_files:
_download_artifact_from_uri(
artifact_uri=extra_file, output_path=tmp_extra_files_dir.path()
)
rel_path = posixpath.join(_EXTRA_FILES_KEY, os.path.basename(extra_file),)
torchserve_artifacts_config[_EXTRA_FILES_KEY].append({"path": rel_path})
shutil.move(
tmp_extra_files_dir.path(), posixpath.join(path, _EXTRA_FILES_KEY),
)
conda_env_subpath = "conda.yaml"
if conda_env is None:
conda_env = get_default_conda_env()
elif not isinstance(conda_env, dict):
with open(conda_env, "r") as f:
conda_env = yaml.safe_load(f)
with open(os.path.join(path, conda_env_subpath), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
if code_paths is not None:
code_dir_subpath = "code"
for code_path in code_paths:
_copy_file_or_tree(src=code_path, dst=path, dst_dir=code_dir_subpath)
else:
code_dir_subpath = None
mlflow_model.add_flavor(
FLAVOR_NAME,
model_data=model_data_subpath,
pytorch_version=torch.__version__,
**torchserve_artifacts_config,
)
pyfunc.add_to_model(
mlflow_model,
loader_module="mlflow.pytorch",
data=model_data_subpath,
pickle_module_name=pickle_module.__name__,
code=code_dir_subpath,
env=conda_env_subpath,
)
mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))
def _load_model(path, **kwargs):
"""
:param path: The path to a serialized PyTorch model.
:param kwargs: Additional kwargs to pass to the PyTorch ``torch.load`` function.
"""
import torch
if os.path.isdir(path):
# `path` is a directory containing a serialized PyTorch model and a text file containing
# information about the pickle module that should be used by PyTorch to load it
model_path = os.path.join(path, "model.pth")
pickle_module_path = os.path.join(path, _PICKLE_MODULE_INFO_FILE_NAME)
with open(pickle_module_path, "r") as f:
pickle_module_name = f.read()
if "pickle_module" in kwargs and kwargs["pickle_module"].__name__ != pickle_module_name:
_logger.warning(
"Attempting to load the PyTorch model with a pickle module, '%s', that does not"
" match the pickle module that was used to save the model: '%s'.",
kwargs["pickle_module"].__name__,
pickle_module_name,
)
else:
try:
kwargs["pickle_module"] = importlib.import_module(pickle_module_name)
except ImportError as exc:
raise MlflowException(
message=(
"Failed to import the pickle module that was used to save the PyTorch"
" model. Pickle module name: `{pickle_module_name}`".format(
pickle_module_name=pickle_module_name
)
),
error_code=RESOURCE_DOES_NOT_EXIST,
) from exc
else:
model_path = path
if LooseVersion(torch.__version__) >= LooseVersion("1.5.0"):
return torch.load(model_path, **kwargs)
else:
try:
# load the model as an eager model.
return torch.load(model_path, **kwargs)
except Exception: # pylint: disable=broad-except
# If fails, assume the model as a scripted model
return torch.jit.load(model_path)
def load_model(model_uri, **kwargs):
"""
Load a PyTorch model from a local file or a run.
:param model_uri: The location, in URI format, of the MLflow model, for example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param kwargs: kwargs to pass to ``torch.load`` method.
:return: A PyTorch model.
.. code-block:: python
:caption: Example
import torch
import mlflow.pytorch
# Class defined here
class LinearNNModel(torch.nn.Module):
...
# Initialize our model, criterion and optimizer
...
# Training loop
...
# Log the model
with mlflow.start_run() as run:
mlflow.pytorch.log_model(model, "model")
# Inference after loading the logged model
model_uri = "runs:/{}/model".format(run.info.run_id)
loaded_model = mlflow.pytorch.load_model(model_uri)
for x in [4.0, 6.0, 30.0]:
X = torch.Tensor([[x]])
y_pred = loaded_model(X)
print("predict X: {}, y_pred: {:.2f}".format(x, y_pred.data.item()))
.. code-block:: text
:caption: Output
predict X: 4.0, y_pred: 7.57
predict X: 6.0, y_pred: 11.64
predict X: 30.0, y_pred: 60.48
"""
import torch
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)
try:
pyfunc_conf = _get_flavor_configuration(
model_path=local_model_path, flavor_name=pyfunc.FLAVOR_NAME
)
except MlflowException:
pyfunc_conf = {}
code_subpath = pyfunc_conf.get(pyfunc.CODE)
if code_subpath is not None:
pyfunc_utils._add_code_to_system_path(
code_path=os.path.join(local_model_path, code_subpath)
)
pytorch_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)
if torch.__version__ != pytorch_conf["pytorch_version"]:
_logger.warning(
"Stored model version '%s' does not match installed PyTorch version '%s'",
pytorch_conf["pytorch_version"],
torch.__version__,
)
torch_model_artifacts_path = os.path.join(local_model_path, pytorch_conf["model_data"])
return _load_model(path=torch_model_artifacts_path, **kwargs)
def _load_pyfunc(path, **kwargs):
"""
Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``.
:param path: Local filesystem path to the MLflow Model with the ``pytorch`` flavor.
"""
return _PyTorchWrapper(_load_model(path, **kwargs))
class _PyTorchWrapper(object):
"""
Wrapper class that creates a predict function such that
predict(data: pd.DataFrame) -> model's output as pd.DataFrame (pandas DataFrame)
"""
def __init__(self, pytorch_model):
self.pytorch_model = pytorch_model
def predict(self, data, device="cpu"):
import torch
if not isinstance(data, pd.DataFrame):
raise TypeError("Input data should be pandas.DataFrame")
self.pytorch_model.to(device)
self.pytorch_model.eval()
with torch.no_grad():
input_tensor = torch.from_numpy(data.values.astype(np.float32)).to(device)
preds = self.pytorch_model(input_tensor)
if not isinstance(preds, torch.Tensor):
raise TypeError(
"Expected PyTorch model to output a single output tensor, "
"but got output of type '{}'".format(type(preds))
)
predicted = pd.DataFrame(preds.numpy())
predicted.index = data.index
return predicted
@experimental
@autologging_integration(FLAVOR_NAME)
def autolog(log_every_n_epoch=1, log_models=True, disable=False): # pylint: disable=unused-argument
"""
Enables (or disables) and configures autologging from `PyTorch Lightning
<https://pytorch-lightning.readthedocs.io/en/latest>`_ to MLflow.
Autologging is performed when you call the `fit` method of
`pytorch_lightning.Trainer() \
<https://pytorch-lightning.readthedocs.io/en/latest/trainer.html#>`_.
Explore the complete `PyTorch MNIST \
<https://github.com/mlflow/mlflow/tree/master/examples/pytorch/MNIST/example1>`_ for
an expansive example with implementation of additional lightening steps.
**Note**: Autologging is only supported for PyTorch Lightning models,
i.e., models that subclass
`pytorch_lightning.LightningModule \
<https://pytorch-lightning.readthedocs.io/en/latest/lightning_module.html>`_.
In particular, autologging support for vanilla PyTorch models that only subclass
`torch.nn.Module <https://pytorch.org/docs/stable/generated/torch.nn.Module.html>`_
is not yet available.
:param log_every_n_epoch: If specified, logs metrics once every `n` epochs. By default, metrics
are logged after every epoch.
:param log_models: If ``True``, trained models are logged as MLflow model artifacts.
If ``False``, trained models are not logged.
:param disable: If ``True``, disables all supported autologging integrations. If ``False``,
enables all supported autologging integrations.
.. code-block:: python
:caption: Example
import os
import pytorch_lightning as pl
import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import MNIST
from pytorch_lightning.metrics.functional import accuracy
import mlflow.pytorch
from mlflow.tracking import MlflowClient
# For brevity, here is the simplest most minimal example with just a training
# loop step, (no validation, no testing). It illustrates how you can use MLflow
# to auto log parameters, metrics, and models.
class MNISTModel(pl.LightningModule):
def __init__(self):
super(MNISTModel, self).__init__()
self.l1 = torch.nn.Linear(28 * 28, 10)
def forward(self, x):
return torch.relu(self.l1(x.view(x.size(0), -1)))
def training_step(self, batch, batch_nb):
x, y = batch
loss = F.cross_entropy(self(x), y)
acc = accuracy(loss, y)
# Use the current of PyTorch logger
self.log("train_loss", loss, on_epoch=True)
self.log("acc", acc, on_epoch=True)
return loss
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=0.02)
def print_auto_logged_info(r):
tags = {k: v for k, v in r.data.tags.items() if not k.startswith("mlflow.")}
artifacts = [f.path for f in MlflowClient().list_artifacts(r.info.run_id, "model")]
print("run_id: {}".format(r.info.run_id))
print("artifacts: {}".format(artifacts))
print("params: {}".format(r.data.params))
print("metrics: {}".format(r.data.metrics))
print("tags: {}".format(tags))
# Initialize our model
mnist_model = MNISTModel()
# Initialize DataLoader from MNIST Dataset
train_ds = MNIST(os.getcwd(), train=True,
download=True, transform=transforms.ToTensor())
train_loader = DataLoader(train_ds, batch_size=32)
# Initialize a trainer
trainer = pl.Trainer(max_epochs=20, progress_bar_refresh_rate=20)
# Auto log all MLflow entities
mlflow.pytorch.autolog()
# Train the model
with mlflow.start_run() as run:
trainer.fit(mnist_model, train_loader)
# fetch the auto logged parameters and metrics
print_auto_logged_info(mlflow.get_run(run_id=run.info.run_id))
.. code-block:: text
:caption: Output
run_id: 42caa17b60cb489c8083900fb52506a7
artifacts: ['model/MLmodel', 'model/conda.yaml', 'model/data']
params: {'betas': '(0.9, 0.999)',
'weight_decay': '0',
'epochs': '20',
'eps': '1e-08',
'lr': '0.02',
'optimizer_name': 'Adam', '
amsgrad': 'False'}
metrics: {'acc_step': 0.0,
'train_loss_epoch': 1.0917967557907104,
'train_loss_step': 1.0794280767440796,
'train_loss': 1.0794280767440796,
'acc_epoch': 0.0033333334140479565,
'acc': 0.0}
tags: {'Mode': 'training'}
.. figure:: ../_static/images/pytorch_lightening_autolog.png
PyTorch autologged MLflow entities
"""
import pytorch_lightning as pl
from mlflow.pytorch._pytorch_autolog import _create_patch_fit
fit = _create_patch_fit(log_every_n_epoch=log_every_n_epoch, log_models=log_models)
safe_patch(FLAVOR_NAME, pl.Trainer, "fit", fit, manage_run=True)
| [
[
[
362,
371
],
[
24519,
24528
]
],
[
[
379,
386
],
[
1651,
1658
]
],
[
[
394,
396
],
[
19511,
19513
],
[
19540,
19542
],
[
19696,
19698
],
[
19933,
19935
],
[
19976,
19978
],
[
20522,
20524
],
[
20710,
20712
],
[
21398,
21400
],
[
22079,
22081
],
[
22568,
22570
],
[
23354,
23356
],
[
23611,
23613
],
[
23838,
23840
],
[
23899,
23901
],
[
27749,
27751
],
[
28196,
28198
]
],
[
[
404,
408
],
[
22536,
22540
],
[
22626,
22630
]
],
[
[
417,
428
],
[
3125,
3136
]
],
[
[
436,
447
],
[
29208,
29210
]
],
[
[
455,
467
],
[
28959,
28961
],
[
29562,
29564
]
],
[
[
498,
510
],
[
25094,
25106
],
[
25129,
25141
]
],
[
[
518,
527
],
[
22046,
22055
],
[
22268,
22277
]
],
[
[
536,
542
],
[
11669,
11675
]
],
[
[
550,
556
],
[
21531,
21537
],
[
22211,
22217
]
],
[
[
564,
599
],
[
27688,
27700
]
],
[
[
619,
625
],
[
23097,
23103
],
[
27517,
27523
],
[
27634,
27640
]
],
[
[
656,
671
],
[
24624,
24639
],
[
27557,
27572
]
],
[
[
698,
703
],
[
11606,
11611
],
[
19683,
19688
]
],
[
[
705,
719
],
[
3381,
3395
],
[
12245,
12259
]
],
[
[
752,
769
],
[
23373,
23390
]
],
[
[
802,
819
],
[
3423,
3440
],
[
12287,
12304
]
],
[
[
821,
834
],
[
19829,
19842
]
],
[
[
876,
899
],
[
24997,
25020
]
],
[
[
927,
972
],
[
11573,
11601
],
[
19158,
19186
]
],
[
[
1016,
1043
],
[
21244,
21271
],
[
21888,
21915
],
[
27354,
27381
]
],
[
[
1081,
1093
],
[
29663,
29675
]
],
[
[
1131,
1148
],
[
2665,
2682
]
],
[
[
1185,
1203
],
[
22803,
22821
]
],
[
[
1205,
1212
],
[
21197,
21204
],
[
21795,
21802
]
],
[
[
1250,
1275
],
[
27437,
27462
],
[
27824,
27849
]
],
[
[
1320,
1351
],
[
3476,
3507
]
],
[
[
1395,
1418
],
[
29677,
29700
]
],
[
[
1420,
1430
],
[
34976,
34986
]
],
[
[
1432,
1443
],
[
29701,
29712
],
[
22953,
22964
],
[
27891,
27902
],
[
34987,
34998
]
],
[
[
1457,
1490
],
[
20740,
20773
]
],
[
[
1505,
1534
],
[
20552,
20581
],
[
23918,
23947
]
],
[
[
1562,
1578
],
[
21643,
21659
],
[
22061,
22077
],
[
22154,
22170
],
[
22289,
22305
]
],
[
[
1595,
1617
],
[
21474,
21496
]
],
[
[
1641,
1648
],
[
24149,
24156
],
[
27973,
27980
]
],
[
[
1685,
1706
],
[
22406,
22427
]
],
[
[
3220,
3229
]
],
[
[
12101,
12111
]
],
[
[
23399,
23410
],
[
28266,
28277
],
[
28555,
28566
]
],
[
[
25494,
25504
]
],
[
[
28327,
28339
]
],
[
[
28592,
28607
],
[
28539,
28554
]
],
[
[
29718,
29725
]
]
] |
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Copyright (c) 2017-2018 The Placeholder Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the bumpfee RPC.
Verifies that the bumpfee RPC creates replacement transactions successfully when
its preconditions are met, and returns appropriate errors in other cases.
This module consists of around a dozen individual test cases implemented in the
top-level functions named as test_<test_case_description>. The test functions
can be disabled or reordered if needed for debugging. If new test cases are
added in the future, they should try to follow the same convention and not
make assumptions about execution order.
"""
from feature_segwit import send_to_witness
from test_framework.test_framework import PlacehTestFramework
from test_framework import blocktools
from test_framework.mininode import CTransaction
from test_framework.util import *
import io
# Sequence number that is BIP 125 opt-in and BIP 68-compliant
BIP125_SEQUENCE_NUMBER = 0xfffffffd
WALLET_PASSPHRASE = "test"
WALLET_PASSPHRASE_TIMEOUT = 3600
class BumpFeeTest(PlacehTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [
["-prematurewitness", "-walletprematurewitness", "-mempoolreplacement", "-walletrbf={}".format(i)] for i in
range(self.num_nodes)]
def run_test(self):
# Encrypt wallet for test_locked_wallet_fails test
self.nodes[1].node_encrypt_wallet(WALLET_PASSPHRASE)
self.start_node(1)
self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
peer_node, rbf_node = self.nodes
rbf_node_address = rbf_node.getnewaddress()
# fund rbf node with 10 coins of 0.001 btc (100,000 satoshis)
self.log.info("Mining blocks...")
peer_node.generate(110)
self.sync_all()
for i in range(25):
peer_node.sendtoaddress(rbf_node_address, 0.001)
self.sync_all()
peer_node.generate(1)
self.sync_all()
assert_equal(rbf_node.getbalance(), Decimal("0.025"))
self.log.info("Running tests")
dest_address = peer_node.getnewaddress()
test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address)
test_segwit_bumpfee_succeeds(rbf_node, dest_address)
test_nonrbf_bumpfee_fails(peer_node, dest_address)
test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address)
test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address)
test_small_output_fails(rbf_node, dest_address)
test_dust_to_fee(rbf_node, dest_address)
test_settxfee(rbf_node, dest_address)
test_rebumping(rbf_node, dest_address)
test_rebumping_not_replaceable(rbf_node, dest_address)
test_unconfirmed_not_spendable(rbf_node, rbf_node_address)
test_bumpfee_metadata(rbf_node, dest_address)
test_locked_wallet_fails(rbf_node, dest_address)
self.log.info("Success")
def test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbftx = rbf_node.gettransaction(rbfid)
sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert_equal(bumped_tx["errors"], [])
assert bumped_tx["fee"] - abs(rbftx["fee"]) > 0
# check that bumped_tx propagates, original tx was evicted and has a wallet conflict
sync_mempools((rbf_node, peer_node))
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert bumped_tx["txid"] in peer_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
assert rbfid not in peer_node.getrawmempool()
oldwtx = rbf_node.gettransaction(rbfid)
assert len(oldwtx["walletconflicts"]) > 0
# check wallet transaction replaces and replaced_by values
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
def test_segwit_bumpfee_succeeds(rbf_node, dest_address):
# Create a transaction with segwit output, then create an RBF transaction
# which spends it, and make sure bumpfee can be called on it.
segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.001"))
segwit_out = rbf_node.validateaddress(rbf_node.getnewaddress())
rbf_node.addwitnessaddress(segwit_out["address"])
segwitid = send_to_witness(
use_p2wsh=False,
node=rbf_node,
utxo=segwit_in,
pubkey=segwit_out["pubkey"],
encode_p2sh=False,
amount=Decimal("0.0009"),
sign=True)
rbfraw = rbf_node.createrawtransaction([{
'txid': segwitid,
'vout': 0,
"sequence": BIP125_SEQUENCE_NUMBER
}], {dest_address: Decimal("0.0005"),
rbf_node.getrawchangeaddress(): Decimal("0.0003")})
rbfsigned = rbf_node.signrawtransaction(rbfraw)
rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"])
assert rbfid in rbf_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
def test_nonrbf_bumpfee_fails(peer_node, dest_address):
# cannot replace a non RBF transaction (from node which did not enable RBF)
not_rbfid = peer_node.sendtoaddress(dest_address, Decimal("0.00090000"))
assert_raises_rpc_error(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
# cannot bump fee unless the tx has only inputs that we own.
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
utxos = [node.listunspent()[-1] for node in (rbf_node, peer_node)]
inputs = [{
"txid": utxo["txid"],
"vout": utxo["vout"],
"address": utxo["address"],
"sequence": BIP125_SEQUENCE_NUMBER
} for utxo in utxos]
output_val = sum(utxo["amount"] for utxo in utxos) - Decimal("0.001")
rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val})
signedtx = rbf_node.signrawtransaction(rawtx)
signedtx = peer_node.signrawtransaction(signedtx["hex"])
rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
assert_raises_rpc_error(-4, "Transaction contains inputs that don't belong to this wallet",
rbf_node.bumpfee, rbfid)
def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address):
# cannot bump fee if the transaction has a descendant
# parent is send-to-self, so we don't have to check which output is change when creating the child tx
parent_id = spend_one_input(rbf_node, rbf_node_address)
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.00020000})
tx = rbf_node.signrawtransaction(tx)
rbf_node.sendrawtransaction(tx["hex"])
assert_raises_rpc_error(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
def test_small_output_fails(rbf_node, dest_address):
# cannot bump fee with a too-small output
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.bumpfee(rbfid, {"totalFee": 50000})
rbfid = spend_one_input(rbf_node, dest_address)
assert_raises_rpc_error(-4, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 50001})
def test_dust_to_fee(rbf_node, dest_address):
# check that if output is reduced to dust, it will be converted to fee
# the bumped tx sets fee=49,900, but it converts to 50,000
rbfid = spend_one_input(rbf_node, dest_address)
fulltx = rbf_node.getrawtransaction(rbfid, 1)
bumped_tx = rbf_node.bumpfee(rbfid, {"totalFee": 49900})
full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1)
assert_equal(bumped_tx["fee"], Decimal("0.00050000"))
assert_equal(len(fulltx["vout"]), 2)
assert_equal(len(full_bumped_tx["vout"]), 1) # change output is eliminated
def test_settxfee(rbf_node, dest_address):
# check that bumpfee reacts correctly to the use of settxfee (paytxfee)
rbfid = spend_one_input(rbf_node, dest_address)
requested_feerate = Decimal("0.00025000")
rbf_node.settxfee(requested_feerate)
bumped_tx = rbf_node.bumpfee(rbfid)
actual_feerate = bumped_tx["fee"] * 1000 / rbf_node.getrawtransaction(bumped_tx["txid"], True)["size"]
# Assert that the difference between the requested feerate and the actual
# feerate of the bumped transaction is small.
assert_greater_than(Decimal("0.00001000"), abs(requested_feerate - actual_feerate))
rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee
def test_rebumping(rbf_node, dest_address):
# check that re-bumping the original tx fails, but bumping the bumper succeeds
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 2000})
assert_raises_rpc_error(-4, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 3000})
rbf_node.bumpfee(bumped["txid"], {"totalFee": 3000})
def test_rebumping_not_replaceable(rbf_node, dest_address):
# check that re-bumping a non-replaceable bump tx fails
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 10000, "replaceable": False})
assert_raises_rpc_error(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
{"totalFee": 20000})
def test_unconfirmed_not_spendable(rbf_node, rbf_node_address):
# check that unconfirmed outputs from bumped transactions are not spendable
rbfid = spend_one_input(rbf_node, rbf_node_address)
rbftx = rbf_node.gettransaction(rbfid)["hex"]
assert rbfid in rbf_node.getrawmempool()
bumpid = rbf_node.bumpfee(rbfid)["txid"]
assert bumpid in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
# check that outputs from the bump transaction are not spendable
# due to the replaces_txid check in CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], [])
# submit a block with the rbf tx to clear the bump tx out of the mempool,
# then call abandon to make sure the wallet doesn't attempt to resubmit the
# bump tx, then invalidate the block so the rbf tx will be put back in the
# mempool. this makes it possible to check whether the rbf tx outputs are
# spendable before the rbf tx is confirmed.
block = submit_block_with_tx(rbf_node, rbftx)
rbf_node.abandontransaction(bumpid)
rbf_node.invalidateblock(block.hash)
assert bumpid not in rbf_node.getrawmempool()
assert rbfid in rbf_node.getrawmempool()
# check that outputs from the rbf tx are not spendable before the
# transaction is confirmed, due to the replaced_by_txid check in
# CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], [])
# check that the main output from the rbf tx is spendable after confirmed
rbf_node.generate(1)
assert_equal(
sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False)
if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
def test_bumpfee_metadata(rbf_node, dest_address):
rbfid = rbf_node.sendtoaddress(dest_address, Decimal("0.00100000"), "comment value", "to value")
bumped_tx = rbf_node.bumpfee(rbfid)
bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(bumped_wtx["comment"], "comment value")
assert_equal(bumped_wtx["to"], "to value")
def test_locked_wallet_fails(rbf_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
def spend_one_input(node, dest_address):
tx_input = dict(
sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in node.listunspent() if u["amount"] == Decimal("0.00100000")))
rawtx = node.createrawtransaction(
[tx_input], {dest_address: Decimal("0.00050000"),
node.getrawchangeaddress(): Decimal("0.00049000")})
signedtx = node.signrawtransaction(rawtx)
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def submit_block_with_tx(node, tx):
ctx = CTransaction()
ctx.deserialize(io.BytesIO(hex_str_to_bytes(tx)))
tip = node.getbestblockhash()
height = node.getblockcount() + 1
block_time = node.getblockheader(tip)["mediantime"] + 1
block = blocktools.create_block(int(tip, 16), blocktools.create_coinbase(height), block_time)
block.vtx.append(ctx)
block.rehash()
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(True)))
return block
if __name__ == "__main__":
BumpFeeTest().main()
| [
[
[
830,
845
],
[
4776,
4791
]
],
[
[
888,
907
],
[
1220,
1239
]
],
[
[
935,
945
],
[
13204,
13214
],
[
13242,
13252
]
],
[
[
982,
994
],
[
12990,
13002
]
],
[
[
1027,
1028
],
[
1787,
1803
],
[
2284,
2296
],
[
2320,
2327
],
[
3416,
3429
],
[
3581,
3593
],
[
3764,
3777
],
[
4231,
4243
],
[
4295,
4307
],
[
4621,
4628
],
[
4944,
4951
],
[
5140,
5147
],
[
5200,
5207
],
[
5714,
5721
],
[
5741,
5764
],
[
6522,
6529
],
[
6789,
6812
],
[
7431,
7454
],
[
7792,
7815
],
[
8319,
8331
],
[
8350,
8357
],
[
8377,
8389
],
[
8418,
8430
],
[
8691,
8698
],
[
9033,
9052
],
[
9053,
9060
],
[
9139,
9146
],
[
9422,
9445
],
[
9828,
9851
],
[
10556,
10568
],
[
11428,
11440
],
[
11643,
11655
],
[
11931,
11938
],
[
12087,
12099
],
[
12144,
12156
],
[
12325,
12348
],
[
12634,
12641
],
[
12732,
12739
],
[
12804,
12811
],
[
13036,
13052
],
[
13426,
13442
]
],
[
[
1037,
1039
],
[
13025,
13027
]
],
[
[
1103,
1125
],
[
5094,
5116
],
[
6417,
6439
],
[
12555,
12577
]
],
[
[
1140,
1157
],
[
1647,
1664
],
[
1732,
1749
]
],
[
[
1167,
1192
],
[
1751,
1776
]
],
[
[
1208,
1219
],
[
13517,
13528
]
],
[
[
3252,
3280
],
[
2435,
2463
]
],
[
[
4349,
4377
],
[
2507,
2535
]
],
[
[
5528,
5553
],
[
2568,
2593
]
],
[
[
5832,
5858
],
[
2627,
2653
]
],
[
[
6940,
6974
],
[
2697,
2731
]
],
[
[
7539,
7562
],
[
2782,
2805
]
],
[
[
7902,
7918
],
[
2838,
2854
]
],
[
[
8500,
8513
],
[
2887,
2900
]
],
[
[
9186,
9200
],
[
2933,
2947
]
],
[
[
9576,
9606
],
[
2980,
3010
]
],
[
[
9987,
10017
],
[
3043,
3073
]
],
[
[
11835,
11856
],
[
3110,
3131
]
],
[
[
12193,
12217
],
[
3164,
3188
]
],
[
[
12480,
12495
],
[
3329,
3344
],
[
7198,
7213
],
[
7646,
7661
],
[
7748,
7763
],
[
8094,
8109
],
[
8627,
8642
],
[
9321,
9336
],
[
9704,
9719
],
[
10139,
10154
],
[
12255,
12270
]
],
[
[
12948,
12968
],
[
11040,
11060
]
]
] |
"""The Amazon Redshift dialect.
This is based on postgres dialect, since it was initially based off of Postgres 8.
We should monitor in future and see if it should be rebased off of ANSI
"""
from sqlfluff.core.parser import (
OneOf,
AnyNumberOf,
AnySetOf,
Anything,
Ref,
Sequence,
Bracketed,
BaseSegment,
Delimited,
Nothing,
OptionallyBracketed,
Matchable,
)
from sqlfluff.core.dialects import load_raw_dialect
from sqlfluff.dialects.dialect_redshift_keywords import (
redshift_reserved_keywords,
redshift_unreserved_keywords,
)
postgres_dialect = load_raw_dialect("postgres")
ansi_dialect = load_raw_dialect("ansi")
redshift_dialect = postgres_dialect.copy_as("redshift")
# Set Keywords
redshift_dialect.sets("unreserved_keywords").clear()
redshift_dialect.sets("unreserved_keywords").update(
[n.strip().upper() for n in redshift_unreserved_keywords.split("\n")]
)
redshift_dialect.sets("reserved_keywords").clear()
redshift_dialect.sets("reserved_keywords").update(
[n.strip().upper() for n in redshift_reserved_keywords.split("\n")]
)
redshift_dialect.sets("bare_functions").clear()
redshift_dialect.sets("bare_functions").update(["current_date", "sysdate"])
redshift_dialect.sets("date_part_function_name").update(
["DATEADD", "DATEDIFF", "EXTRACT", "DATE_PART"]
)
# Add datetime units
# https://docs.aws.amazon.com/redshift/latest/dg/r_Dateparts_for_datetime_functions.html
redshift_dialect.sets("datetime_units").update(
[
# millenium
"MILLENNIUM",
"MILLENNIA",
"MIL",
"MILS",
# century
"CENTURY",
"CENTURIES",
"C",
"CENT",
"CENTS",
# decade
"DECADE",
"DECADES",
"DEC",
"DECS",
# epoch
"EPOCH",
# year
"YEAR",
"YEARS",
"Y",
"YR",
"YRS",
# quarter
"QUARTER",
"QUARTERS",
"QTR",
"QTRS",
# month
"MONTH",
"MONTHS",
"MON",
"MONS",
# week
"WEEK",
"WEEKS",
"W",
# day of week
"DAYOFWEEK",
"DOW",
"DW",
"WEEKDAY",
# day of year
"DAYOFYEAR",
"DOY",
"DY",
"YEARDAY",
# day
"DAY",
"DAYS",
"D",
# hour
"HOUR",
"HOURS",
"H",
"HR",
"HRS",
# minute
"MINUTE",
"MINUTES",
"M",
"MIN",
"MINS",
# second
"SECOND",
"SECONDS",
"S",
"SEC",
"SECS",
# millisec
"MILLISECOND",
"MILLISECONDS",
"MS",
"MSEC",
"MSECS",
"MSECOND",
"MSECONDS",
"MILLISEC",
"MILLISECS",
"MILLISECON",
# microsec
"MICROSECOND",
"MICROSECONDS",
"MICROSEC",
"MICROSECS",
"MICROSECOND",
"USECOND",
"USECONDS",
"US",
"USEC",
"USECS",
# timezone
"TIMEZONE",
"TIMEZONE_HOUR",
"TIMEZONE_MINUTE",
]
)
redshift_dialect.replace(
WellKnownTextGeometrySegment=Nothing(),
JoinLikeClauseGrammar=Sequence(
AnySetOf(
Ref("FromPivotExpressionSegment"),
Ref("FromUnpivotExpressionSegment"),
min_times=1,
),
Ref("AliasExpressionSegment", optional=True),
),
)
ObjectReferenceSegment = redshift_dialect.get_segment("ObjectReferenceSegment")
redshift_dialect.add(
CompressionTypeGrammar=OneOf(
"BZIP2",
"GZIP",
"LZOP",
"ZSTD",
),
ArgModeGrammar=OneOf(
"IN",
"OUT",
"INOUT",
),
ColumnEncodingGrammar=OneOf(
"RAW",
"AZ64",
"BYTEDICT",
"DELTA",
"DELTA32K",
"LZO",
"MOSTLY8",
"MOSTLY16",
"MOSTLY32",
"RUNLENGTH",
"TEXT255",
"TEXT32K",
"ZSTD",
),
)
# need to ignore type due to mypy rules on type variables
# see https://mypy.readthedocs.io/en/stable/common_issues.html#variables-vs-type-aliases
# for details
@redshift_dialect.segment(replace=True)
class ColumnReferenceSegment(ObjectReferenceSegment): # type: ignore
"""A reference to column, field or alias.
Adjusted to support column references for Redshift's SUPER data type
(https://docs.aws.amazon.com/redshift/latest/dg/super-overview.html), which
uses a subset of the PartiQL language (https://partiql.org/) to reference
columns.
"""
type = "column_reference"
match_grammar: Matchable = Delimited(
Sequence(
Ref("SingleIdentifierGrammar"),
AnyNumberOf(Ref("ArrayAccessorSegment")),
Ref("TimeZoneGrammar", optional=True),
),
delimiter=OneOf(
Ref("DotSegment"), Sequence(Ref("DotSegment"), Ref("DotSegment"))
),
terminator=OneOf(
"ON",
"AS",
"USING",
Ref("CommaSegment"),
Ref("CastOperatorSegment"),
Ref("BinaryOperatorGrammar"),
Ref("ColonSegment"),
Ref("DelimiterSegment"),
Ref("JoinLikeClauseGrammar"),
),
allow_gaps=False,
)
@redshift_dialect.segment()
class FromUnpivotExpressionSegment(BaseSegment):
"""An UNPIVOT expression.
See
https://docs.aws.amazon.com/redshift/latest/dg/r_FROM_clause-pivot-unpivot-examples.html
for details.
"""
type = "from_unpivot_expression"
match_grammar = Sequence(
"UNPIVOT",
Sequence(
OneOf("INCLUDE", "EXCLUDE"),
"NULLS",
optional=True,
),
Bracketed(
Sequence(
Ref("ColumnReferenceSegment"),
"FOR",
Ref("ColumnReferenceSegment"),
"IN",
Bracketed(
Delimited(
Sequence(
Ref("ColumnReferenceSegment"),
Ref("AliasExpressionSegment", optional=True),
)
),
),
),
),
)
@redshift_dialect.segment()
class FromPivotExpressionSegment(BaseSegment):
"""A PIVOT expression.
See
https://docs.aws.amazon.com/redshift/latest/dg/r_FROM_clause-pivot-unpivot-examples.html
for details.
"""
type = "from_pivot_expression"
match_grammar = Sequence(
"PIVOT",
Bracketed(
Sequence(
OptionallyBracketed(Ref("FunctionSegment")),
Ref("AliasExpressionSegment", optional=True),
"FOR",
Ref("ColumnReferenceSegment"),
"IN",
Bracketed(
Delimited(
Sequence(
Ref("ExpressionSegment"),
Ref("AliasExpressionSegment", optional=True),
),
),
),
),
),
)
@redshift_dialect.segment(replace=True)
class DateTimeTypeIdentifier(BaseSegment):
"""A Date Time type."""
type = "datetime_type_identifier"
match_grammar = OneOf(
"DATE",
"DATETIME",
Sequence(
OneOf("TIME", "TIMESTAMP"),
Sequence(OneOf("WITH", "WITHOUT"), "TIME", "ZONE", optional=True),
),
OneOf("TIMETZ", "TIMESTAMPTZ"),
# INTERVAL types are not Datetime types under Redshift:
# https://docs.aws.amazon.com/redshift/latest/dg/r_Datetime_types.html
)
@redshift_dialect.segment(replace=True)
class DatatypeSegment(BaseSegment):
"""A data type segment.
Indicates a data type.
https://docs.aws.amazon.com/redshift/latest/dg/c_Supported_data_types.html
"""
type = "data_type"
match_grammar = OneOf(
# numeric types
"SMALLINT",
"INT2",
"INTEGER",
"INT",
"INT4",
"BIGINT",
"INT8",
"REAL",
"FLOAT4",
Sequence("DOUBLE", "PRECISION"),
"FLOAT8",
"FLOAT",
# numeric types [precision ["," scale])]
Sequence(
OneOf("DECIMAL", "NUMERIC"),
Bracketed(
Delimited(Ref("NumericLiteralSegment")),
optional=True,
),
),
# character types
OneOf(
Sequence(
OneOf(
"CHAR",
"CHARACTER",
"NCHAR",
"VARCHAR",
Sequence("CHARACTER", "VARYING"),
"NVARCHAR",
),
Bracketed(
OneOf(
Ref("NumericLiteralSegment"),
"MAX",
),
optional=True,
),
),
"BPCHAR",
"TEXT",
),
Sequence(
Ref("DateTimeTypeIdentifier"),
Ref("TimeZoneGrammar", optional=True),
),
# INTERVAL is a data type *only* for conversion operations
"INTERVAL",
# boolean types
OneOf("BOOLEAN", "BOOL"),
# hllsketch type
"HLLSKETCH",
# super type
"SUPER",
# spatial data
"GEOMETRY",
"GEOGRAPHY",
# binary type
Sequence(
OneOf(
"VARBYTE",
"VARBINARY",
Sequence("BINARY", "VARYING"),
),
Bracketed(
Ref("NumericLiteralSegment"),
optional=True,
),
),
)
@redshift_dialect.segment()
class DataFormatSegment(BaseSegment):
"""DataFormat segment.
Indicates data format available for COPY commands.
https://docs.aws.amazon.com/redshift/latest/dg/c_Compression_encodings.html
"""
type = "data_format_segment"
match_grammar = Sequence(
Sequence(
"FORMAT",
Ref.keyword("AS", optional=True),
optional=True,
),
OneOf(
Sequence(
"CSV",
Sequence(
"QUOTE",
Ref.keyword("AS", optional=True),
Ref("QuotedLiteralSegment"),
optional=True,
),
),
Sequence(
"SHAPEFILE",
Sequence(
"SIMPLIFY",
Ref.keyword("AUTO", optional=True),
Ref("NumericLiteralSegment", optional=True),
optional=True,
),
),
Sequence(
OneOf("AVRO", "JSON"),
Sequence(
Ref.keyword("AS", optional=True),
Ref("QuotedLiteralSegment"),
optional=True,
),
),
"PARQUET",
"ORC",
"RCFILE",
"SEQUENCEFILE",
),
)
@redshift_dialect.segment()
class AuthorizationSegment(BaseSegment):
"""Authorization segment.
Specifies authorization to access data in another AWS resource.
https://docs.aws.amazon.com/redshift/latest/dg/copy-parameters-authorization.html
"""
type = "authorization_segment"
match_grammar = AnySetOf(
OneOf(
Sequence(
"IAM_ROLE",
OneOf(
"DEFAULT",
Ref("QuotedLiteralSegment"),
),
),
Sequence(
Ref.keyword("WITH", optional=True),
"CREDENTIALS",
Ref.keyword("AS", optional=True),
Ref("QuotedLiteralSegment"),
),
Sequence(
"ACCESS_KEY_ID",
Ref("QuotedLiteralSegment"),
"SECRET_ACCESS_KEY",
Ref("QuotedLiteralSegment"),
Sequence(
"SESSION_TOKEN",
Ref("QuotedLiteralSegment"),
optional=True,
),
),
optional=False,
),
Sequence(
"KMS_KEY_ID",
Ref("QuotedLiteralSegment"),
optional=True,
),
Sequence(
"MASTER_SYMMETRIC_KEY",
Ref("QuotedLiteralSegment"),
optional=True,
),
)
@redshift_dialect.segment()
class ColumnAttributeSegment(BaseSegment):
"""Redshift specific column attributes.
As specified in
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html
"""
type = "column_attribute_segment"
match_grammar = AnySetOf(
Sequence("DEFAULT", Ref("ExpressionSegment")),
Sequence(
"IDENTITY",
Bracketed(Delimited(Ref("NumericLiteralSegment"))),
),
Sequence(
"GENERATED",
"BY",
"DEFAULT",
"AS",
"IDENTITY",
Bracketed(Delimited(Ref("NumericLiteralSegment"))),
),
Sequence("ENCODE", Ref("ColumnEncodingGrammar")),
"DISTKEY",
"SORTKEY",
Sequence("COLLATE", OneOf("CASE_SENSITIVE", "CASE_INSENSITIVE")),
)
@redshift_dialect.segment(replace=True)
class ColumnConstraintSegment(BaseSegment):
"""Redshift specific column constraints.
As specified in
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html
"""
type = "column_constraint_segment"
match_grammar = AnySetOf(
OneOf(Sequence("NOT", "NULL"), "NULL"),
OneOf("UNIQUE", Sequence("PRIMARY", "KEY")),
Sequence(
"REFERENCES",
Ref("TableReferenceSegment"),
Bracketed(Ref("ColumnReferenceSegment"), optional=True),
),
)
@redshift_dialect.segment()
class TableAttributeSegment(BaseSegment):
"""Redshift specific table attributes.
As specified in
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html
"""
type = "table_constraint"
match_grammar = AnySetOf(
Sequence("DISTSTYLE", OneOf("AUTO", "EVEN", "KEY", "ALL"), optional=True),
Sequence("DISTKEY", Bracketed(Ref("ColumnReferenceSegment")), optional=True),
OneOf(
Sequence(
OneOf("COMPOUND", "INTERLEAVED", optional=True),
"SORTKEY",
Bracketed(Delimited(Ref("ColumnReferenceSegment"))),
),
Sequence("SORTKEY", "AUTO"),
optional=True,
),
Sequence("ENCODE", "AUTO", optional=True),
)
@redshift_dialect.segment(replace=True)
class TableConstraintSegment(BaseSegment):
"""Redshift specific table constraints.
As specified in
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html
"""
type = "table_constraint"
match_grammar = AnySetOf(
Sequence("UNIQUE", Bracketed(Delimited(Ref("ColumnReferenceSegment")))),
Sequence(
"PRIMARY",
"KEY",
Bracketed(Delimited(Ref("ColumnReferenceSegment"))),
),
Sequence(
"FOREIGN",
"KEY",
Bracketed(Delimited(Ref("ColumnReferenceSegment"))),
"REFERENCES",
Ref("TableReferenceSegment"),
Sequence(Bracketed(Ref("ColumnReferenceSegment"))),
),
)
@redshift_dialect.segment(replace=True)
class LikeOptionSegment(BaseSegment):
"""Like Option Segment.
As specified in
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html
"""
type = "like_option_segment"
match_grammar = Sequence(OneOf("INCLUDING", "EXCLUDING"), "DEFAULTS")
@redshift_dialect.segment(replace=True)
class CreateTableStatementSegment(BaseSegment):
"""A `CREATE TABLE` statement.
As specified in
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html
"""
type = "create_table_statement"
match_grammar = Sequence(
"CREATE",
Ref.keyword("LOCAL", optional=True),
Ref("TemporaryGrammar", optional=True),
"TABLE",
Ref("IfNotExistsGrammar", optional=True),
Ref("TableReferenceSegment"),
Bracketed(
OneOf(
# Columns and comment syntax:
Delimited(
Sequence(
Ref("ColumnReferenceSegment"),
Ref("DatatypeSegment"),
AnyNumberOf(Ref("ColumnAttributeSegment"), optional=True),
AnyNumberOf(Ref("ColumnConstraintSegment"), optional=True),
),
Ref("TableConstraintSegment", optional=True),
),
Sequence(
"LIKE",
Ref("TableReferenceSegment"),
AnyNumberOf(Ref("LikeOptionSegment"), optional=True),
),
)
),
Sequence("BACKUP", OneOf("YES", "NO", optional=True), optional=True),
AnyNumberOf(Ref("TableAttributeSegment"), optional=True),
)
@redshift_dialect.segment(replace=True)
class CreateTableAsStatementSegment(BaseSegment):
"""A `CREATE TABLE AS` statement.
As specified in
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_AS.html
"""
type = "create_table_as_statement"
match_grammar = Sequence(
"CREATE",
Sequence(
Ref.keyword("LOCAL", optional=True),
OneOf("TEMPORARY", "TEMP"),
optional=True,
),
"TABLE",
Ref("ObjectReferenceSegment"),
Bracketed(
Delimited(
Ref("ColumnReferenceSegment"),
),
optional=True,
),
Sequence("BACKUP", OneOf("YES", "NO"), optional=True),
Ref("TableAttributeSegment", optional=True),
"AS",
OptionallyBracketed(Ref("SelectableGrammar")),
)
@redshift_dialect.segment(replace=True)
class CreateModelStatementSegment(BaseSegment):
"""A `CREATE MODEL` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_MODEL.html
NB: order of keywords matter
"""
type = "create_model_statement"
match_grammar = Sequence(
"CREATE",
"MODEL",
Ref("ObjectReferenceSegment"),
Sequence(
"FROM",
OneOf(
Ref("QuotedLiteralSegment"),
Bracketed(Ref("SelectableGrammar")),
Ref("ObjectReferenceSegment"),
),
optional=True,
),
Sequence(
"TARGET",
Ref("ColumnReferenceSegment"),
optional=True,
),
Sequence(
"FUNCTION",
Ref("ObjectReferenceSegment"),
Bracketed(
Delimited(Ref("DatatypeSegment")),
optional=True,
),
),
Sequence(
"RETURNS",
Ref("DatatypeSegment"),
optional=True,
),
Sequence(
"SAGEMAKER",
Ref("QuotedLiteralSegment"),
optional=True,
),
Sequence(
"IAM_ROLE",
OneOf(
"DEFAULT",
Ref("QuotedLiteralSegment"),
),
),
Sequence(
"AUTO",
OneOf(
"ON",
"OFF",
),
optional=True,
),
Sequence(
"MODEL_TYPE",
OneOf(
"XGBOOST",
"MLP",
"KMEANS",
),
optional=True,
),
Sequence(
"PROBLEM_TYPE",
OneOf(
"REGRESSION",
"BINARY_CLASSIFICATION",
"MULTICLASS_CLASSIFICATION",
),
optional=True,
),
Sequence(
"OBJECTIVE",
Ref("QuotedLiteralSegment"),
optional=True,
),
Sequence(
"PREPROCESSORS",
Ref("QuotedLiteralSegment"),
optional=True,
),
Sequence(
"HYPERPARAMETERS",
"DEFAULT",
Sequence(
"EXCEPT",
Bracketed(
Delimited(
Anything(),
),
),
optional=True,
),
optional=True,
),
Sequence(
"SETTINGS",
Bracketed(
Sequence(
"S3_BUCKET",
Ref("QuotedLiteralSegment"),
Sequence(
"KMS_KEY_ID",
Ref("QuotedLiteralSegment"),
optional=True,
),
Sequence(
"S3_GARBAGE_COLLECT",
OneOf(
"ON",
"OFF",
),
optional=True,
),
Sequence(
"MAX_CELLS",
Ref("NumericLiteralSegment"),
optional=True,
),
Sequence(
"MAX_RUNTIME",
Ref("NumericLiteralSegment"),
optional=True,
),
),
),
optional=True,
),
)
@redshift_dialect.segment()
class ShowModelStatementSegment(BaseSegment):
"""A `SHOW MODEL` statement.
As specified in: https://docs.aws.amazon.com/redshift/latest/dg/r_SHOW_MODEL.html
"""
type = "show_model_statement"
match_grammar = Sequence(
"SHOW",
"MODEL",
OneOf(
"ALL",
Ref("ObjectReferenceSegment"),
),
)
@redshift_dialect.segment()
class CreateExternalTableStatementSegment(BaseSegment):
"""A `CREATE EXTERNAL TABLE` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_EXTERNAL_TABLE.html
"""
type = "create_external_table_statement"
match_grammar = Sequence(
"CREATE",
"EXTERNAL",
"TABLE",
Ref("TableReferenceSegment"),
Bracketed(
# Columns and comment syntax:
Delimited(
Sequence(
Ref("ColumnReferenceSegment"),
Ref("DatatypeSegment"),
),
),
),
Ref("PartitionedBySegment", optional=True),
Sequence(
"ROW",
"FORMAT",
OneOf(
Sequence(
"DELIMITED",
Ref("RowFormatDelimitedSegment"),
),
Sequence(
"SERDE",
Ref("QuotedLiteralSegment"),
Sequence(
"WITH",
"SERDEPROPERTIES",
Bracketed(
Delimited(
Sequence(
Ref("QuotedLiteralSegment"),
Ref("EqualsSegment"),
Ref("QuotedLiteralSegment"),
),
),
),
optional=True,
),
),
),
optional=True,
),
"STORED",
"AS",
OneOf(
"PARQUET",
"RCFILE",
"SEQUENCEFILE",
"TEXTFILE",
"ORC",
"AVRO",
Sequence(
"INPUTFORMAT",
Ref("QuotedLiteralSegment"),
"OUTPUTFORMAT",
Ref("QuotedLiteralSegment"),
),
),
"LOCATION",
Ref("QuotedLiteralSegment"),
Sequence(
"TABLE",
"PROPERTIES",
Bracketed(
Delimited(
Sequence(
Ref("QuotedLiteralSegment"),
Ref("EqualsSegment"),
Ref("QuotedLiteralSegment"),
),
),
),
optional=True,
),
)
@redshift_dialect.segment()
class CreateExternalTableAsStatementSegment(BaseSegment):
"""A `CREATE EXTERNAL TABLE AS` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_EXTERNAL_TABLE.html
"""
type = "create_external_table_statement"
match_grammar = Sequence(
"CREATE",
"EXTERNAL",
"TABLE",
Ref("TableReferenceSegment"),
Ref("PartitionedBySegment", optional=True),
Sequence(
"ROW",
"FORMAT",
"DELIMITED",
Ref("RowFormatDelimitedSegment"),
optional=True,
),
"STORED",
"AS",
OneOf(
"PARQUET",
"TEXTFILE",
),
"LOCATION",
Ref("QuotedLiteralSegment"),
Sequence(
"TABLE",
"PROPERTIES",
Bracketed(
Delimited(
Sequence(
Ref("QuotedLiteralSegment"),
Ref("EqualsSegment"),
Ref("QuotedLiteralSegment"),
),
),
),
optional=True,
),
"AS",
OptionallyBracketed(Ref("SelectableGrammar")),
)
@redshift_dialect.segment()
class CreateLibraryStatementSegment(BaseSegment):
"""A `CREATE LIBRARY` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_LIBRARY.html
"""
type = "create_library_statement"
match_grammar = Sequence(
"CREATE",
Sequence(
"OR",
"REPLACE",
optional=True,
),
"LIBRARY",
Ref("ObjectReferenceSegment"),
"LANGUAGE",
"PLPYTHONU",
"FROM",
Ref("QuotedLiteralSegment"),
AnySetOf(
Ref("AuthorizationSegment", optional=False),
Sequence(
"REGION",
Ref.keyword("AS", optional=True),
Ref("QuotedLiteralSegment"),
optional=True,
),
),
)
@redshift_dialect.segment()
class UnloadStatementSegment(BaseSegment):
"""A `UNLOAD` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_UNLOAD.html
"""
type = "unload_statement"
match_grammar = Sequence(
"UNLOAD",
Bracketed(Ref("QuotedLiteralSegment")),
"TO",
Ref("QuotedLiteralSegment"),
AnySetOf(
Ref("AuthorizationSegment", optional=False),
Sequence(
"REGION",
Ref.keyword("AS", optional=True),
Ref("QuotedLiteralSegment"),
optional=True,
),
Ref("CompressionTypeGrammar", optional=True),
Sequence(
Sequence(
"FORMAT",
Ref.keyword("AS", optional=True),
optional=True,
),
OneOf(
"CSV",
"JSON",
"PARQUET",
),
optional=True,
),
Sequence(
"PARTITION",
"BY",
Ref("BracketedColumnReferenceListGrammar"),
Ref.keyword("INCLUDE", optional=True),
),
Sequence(
"PARALLEL",
OneOf(
"PRESET",
"ON",
"OFF",
"TRUE",
"FALSE",
optional=True,
),
optional=True,
),
OneOf(
Sequence(
"DELIMITER",
Ref.keyword("AS", optional=True),
Ref("QuotedLiteralSegment"),
),
Sequence(
"FIXEDWIDTH",
Ref.keyword("AS", optional=True),
Ref("QuotedLiteralSegment"),
),
optional=True,
),
Sequence(
"MANIFEST",
Ref.keyword("VERBOSE", optional=True),
optional=True,
),
Sequence(
"NULL",
"AS",
Ref("QuotedLiteralSegment"),
optional=True,
),
Sequence(
"NULL",
"AS",
Ref("QuotedLiteralSegment"),
optional=True,
),
AnySetOf(
OneOf(
"MAXFILESIZE",
"ROWGROUPSIZE",
),
Ref.keyword("AS", optional=True),
Ref("NumericLiteralSegment"),
OneOf(
"MB",
"GB",
),
optional=True,
),
Sequence(
"ENCRYPTED",
Ref.keyword("AUTO", optional=True),
optional=True,
),
Ref.keyword("ALLOWOVERWRITE", optional=True),
Ref.keyword("CLEANPATH", optional=True),
Ref.keyword("ESCAPE", optional=True),
Ref.keyword("ADDQUOTES", optional=True),
Ref.keyword("HEADER", optional=True),
),
)
@redshift_dialect.segment(replace=True)
class CopyStatementSegment(
postgres_dialect.get_segment("CopyStatementSegment") # type: ignore
):
"""A `COPY` statement.
:
- https://docs.aws.amazon.com/redshift/latest/dg/r_COPY.html
- https://docs.aws.amazon.com/redshift/latest/dg/r_COPY-parameters.html
"""
type = "copy_statement"
match_grammar = Sequence(
"COPY",
Ref("TableReferenceSegment"),
Ref("BracketedColumnReferenceListGrammar", optional=True),
"FROM",
Ref("QuotedLiteralSegment"),
AnySetOf(
Ref("AuthorizationSegment", optional=False),
Sequence(
"REGION",
Ref.keyword("AS", optional=True),
Ref("QuotedLiteralSegment"),
optional=True,
),
Ref("CompressionTypeGrammar", optional=True),
Ref("DataFormatSegment", optional=True),
OneOf(
Sequence(
"DELIMITER",
Ref.keyword("AS", optional=True),
Ref("QuotedLiteralSegment"),
),
Sequence(
"FIXEDWIDTH",
Ref.keyword("AS", optional=True),
Ref("QuotedLiteralSegment"),
),
optional=True,
),
Sequence(
"ENCRYPTED",
Ref.keyword("AUTO", optional=True),
optional=True,
),
Ref.keyword("MANIFEST", optional=True),
Sequence(
"COMPROWS",
Ref("NumericLiteralSegment"),
optional=True,
),
Sequence(
"MAXERROR",
Ref.keyword("AS", optional=True),
Ref("NumericLiteralSegment"),
optional=True,
),
Sequence(
"COMPUPDATE",
OneOf(
"PRESET",
"ON",
"OFF",
"TRUE",
"FALSE",
optional=True,
),
optional=True,
),
Sequence(
"STATUPDATE",
OneOf(
"ON",
"OFF",
"TRUE",
"FALSE",
optional=True,
),
optional=True,
),
Ref.keyword("NOLOAD", optional=True),
Ref.keyword("ACCEPTANYDATE", optional=True),
Sequence(
"ACCEPTINVCHARS",
Ref.keyword("AS", optional=True),
Ref("QuotedLiteralSegment"),
optional=True,
),
Ref.keyword("BLANKSASNULL", optional=True),
Sequence(
"DATEFORMAT",
Ref.keyword("AS", optional=True),
OneOf(
"AUTO",
Ref("QuotedLiteralSegment"),
),
optional=True,
),
Ref.keyword("EMPTYASNULL", optional=True),
Sequence(
"ENCODING",
Ref.keyword("AS", optional=True),
OneOf(
"UTF8",
"UTF16",
"UTF16BE",
"UTF16LE",
),
optional=True,
),
Ref.keyword("ESCAPE", optional=True),
Ref.keyword("EXPLICIT_IDS", optional=True),
Ref.keyword("FILLRECORD", optional=True),
Ref.keyword("IGNOREBLANKLINES", optional=True),
Sequence(
"IGNOREHEADER",
Ref.keyword("AS", optional=True),
Ref("QuotedLiteralSegment"),
optional=True,
),
Sequence(
"NULL",
"AS",
Ref("QuotedLiteralSegment"),
optional=True,
),
Sequence(
"READRATIO",
Ref("NumericLiteralSegment"),
optional=True,
),
Ref.keyword("REMOVEQUOTES", optional=True),
Ref.keyword("ROUNDEC", optional=True),
Sequence(
"TIMEFORMAT",
Ref.keyword("AS", optional=True),
OneOf(
"AUTO",
"EPOCHSECS",
"EPOCHMILLISECS",
Ref("QuotedLiteralSegment"),
),
optional=True,
),
Ref.keyword("TRIMBLANKS", optional=True),
Ref.keyword("TRUNCATECOLUMNS", optional=True),
),
)
@redshift_dialect.segment(replace=True)
class InsertStatementSegment(BaseSegment):
"""An`INSERT` statement.
Redshift has two versions of insert statements:
- https://docs.aws.amazon.com/redshift/latest/dg/r_INSERT_30.html
- https://docs.aws.amazon.com/redshift/latest/dg/r_INSERT_external_table.html
"""
# TODO: This logic can be streamlined. However, there are some odd parsing issues.
# See https://github.com/sqlfluff/sqlfluff/pull/1896
type = "insert_statement"
match_grammar = Sequence(
"INSERT",
"INTO",
Ref("TableReferenceSegment"),
OneOf(
OptionallyBracketed(Ref("SelectableGrammar")),
Sequence("DEFAULT", "VALUES"),
Sequence(
Ref("BracketedColumnReferenceListGrammar", optional=True),
OneOf(
Ref("ValuesClauseSegment"),
OptionallyBracketed(Ref("SelectableGrammar")),
),
),
),
)
@redshift_dialect.segment(replace=True)
class CreateSchemaStatementSegment(BaseSegment):
"""A `CREATE SCHEMA` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_SCHEMA.html
TODO: support optional SCHEMA_ELEMENT
"""
type = "create_schema_statement"
match_grammar = Sequence(
"CREATE",
"SCHEMA",
OneOf(
Sequence(
Ref("IfNotExistsGrammar", optional=True),
Ref("SchemaReferenceSegment"),
Sequence(
"AUTHORIZATION",
Ref("ObjectReferenceSegment"),
optional=True,
),
),
Sequence(
"AUTHORIZATION",
Ref("ObjectReferenceSegment"),
),
),
Sequence(
"QUOTA",
OneOf(
Sequence(
Ref("NumericLiteralSegment"),
OneOf(
"MB",
"GB",
"TB",
),
),
"UNLIMITED",
),
optional=True,
),
)
@redshift_dialect.segment()
class ProcedureParameterListSegment(BaseSegment):
"""The parameters for a procedure.
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_PROCEDURE.html
"""
type = "procedure_parameter_list"
# Odd syntax, but prevents eager parameters being confused for data types
_param_type = OneOf("REFCURSOR", Ref("DatatypeSegment"))
match_grammar = Bracketed(
Sequence(
AnyNumberOf(
OneOf(
Ref("ParameterNameSegment"),
exclude=OneOf(_param_type, Ref("ArgModeGrammar")),
optional=True,
),
Ref("ArgModeGrammar", optional=True),
max_times_per_element=1,
),
_param_type,
AnyNumberOf(
Sequence(
Ref("CommaSegment"),
AnyNumberOf(
OneOf(
Ref("ParameterNameSegment"),
exclude=OneOf(_param_type, Ref("ArgModeGrammar")),
optional=True,
),
Ref("ArgModeGrammar", optional=True),
max_times_per_element=1,
),
_param_type,
),
),
optional=True,
),
)
@redshift_dialect.segment(replace=True)
class CreateProcedureStatementSegment(BaseSegment):
"""A `CREATE PROCEDURE` statement.
https://www.postgresql.org/docs/14/sql-createprocedure.html
TODO: Just a basic statement for now, without full syntax.
based on CreateFunctionStatementSegment without a return type.
"""
type = "create_procedure_statement"
match_grammar = Sequence(
"CREATE",
Sequence("OR", "REPLACE", optional=True),
"PROCEDURE",
Ref("FunctionNameSegment"),
Ref("ProcedureParameterListSegment"),
Ref("FunctionDefinitionGrammar"),
)
@redshift_dialect.segment()
class AlterProcedureStatementSegment(BaseSegment):
"""An `ALTER PROCEDURE` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_ALTER_PROCEDURE.html
"""
type = "alter_procedure_statement"
match_grammar = Sequence(
"ALTER",
"PROCEDURE",
Ref("FunctionNameSegment"),
Ref("ProcedureParameterListSegment", optional=True),
OneOf(
Sequence("RENAME", "TO", Ref("FunctionNameSegment")),
Sequence(
"OWNER",
"TO",
OneOf(
OneOf(Ref("ParameterNameSegment"), Ref("QuotedIdentifierSegment")),
"CURRENT_USER",
"SESSION_USER",
),
),
),
)
@redshift_dialect.segment(replace=True)
class DropProcedureStatementSegment(BaseSegment):
"""An `DROP PROCEDURE` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_DROP_PROCEDURE.html
"""
type = "drop_procedure_statement"
match_grammar = Sequence(
"DROP",
"PROCEDURE",
Ref("IfExistsGrammar", optional=True),
Delimited(
Sequence(
Ref("FunctionNameSegment"),
Ref("ProcedureParameterListSegment", optional=True),
),
),
)
@redshift_dialect.segment()
class DeclareStatementSegment(BaseSegment):
"""A `DECLARE` statement.
As specified in
https://docs.aws.amazon.com/redshift/latest/dg/declare.html
"""
type = "declare_statement"
match_grammar = Sequence(
"DECLARE",
Ref("ObjectReferenceSegment"),
"CURSOR",
"FOR",
Ref("SelectableGrammar"),
)
@redshift_dialect.segment()
class FetchStatementSegment(BaseSegment):
"""A `FETCH` statement.
As specified in
https://docs.aws.amazon.com/redshift/latest/dg/fetch.html
"""
type = "fetch_statement"
match_grammar = Sequence(
"fetch",
OneOf(
"NEXT",
"ALL",
Sequence(
"FORWARD",
OneOf(
"ALL",
Ref("NumericLiteralSegment"),
),
),
),
"FROM",
Ref("ObjectReferenceSegment"),
)
@redshift_dialect.segment()
class CloseStatementSegment(BaseSegment):
"""A `CLOSE` statement.
As specified in
https://docs.aws.amazon.com/redshift/latest/dg/close.html
"""
type = "close_statement"
match_grammar = Sequence(
"CLOSE",
Ref("ObjectReferenceSegment"),
)
@redshift_dialect.segment()
class AltereDatashareStatementSegment(BaseSegment):
"""An `ALTER DATASHARE` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_ALTER_DATASHARE.html
"""
type = "create_datashare_statement"
match_grammar = Sequence(
"ALTER",
"DATASHARE",
Ref("ObjectReferenceSegment"),
OneOf(
# add or remove objects to the datashare
Sequence(
OneOf(
"ADD",
"REMOVE",
),
OneOf(
Sequence(
"TABLE",
Delimited(Ref("TableReferenceSegment")),
),
Sequence(
"SCHEMA",
Delimited(Ref("SchemaReferenceSegment")),
),
Sequence(
"FUNCTION",
Delimited(Ref("FunctionNameSegment")),
),
Sequence(
"ALL",
OneOf("TABLES", "FUNCTIONS"),
"IN",
"SCHEMA",
Delimited(Ref("SchemaReferenceSegment")),
),
),
),
# configure the properties of the datashare
Sequence(
"SET",
OneOf(
Sequence(
"PUBLICACCESSIBLE",
Ref("EqualsSegment", optional=True),
Ref("BooleanLiteralGrammar"),
),
Sequence(
"INCLUDENEW",
Ref("EqualsSegment", optional=True),
Ref("BooleanLiteralGrammar"),
"FOR",
"SCHEMA",
Ref("SchemaReferenceSegment"),
),
),
),
),
)
@redshift_dialect.segment()
class CreateDatashareStatementSegment(BaseSegment):
"""A `CREATE DATASHARE` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_DATASHARE.html
"""
type = "create_datashare_statement"
match_grammar = Sequence(
"CREATE",
"DATASHARE",
Ref("ObjectReferenceSegment"),
Sequence(
Ref.keyword("SET", optional=True),
"PUBLICACCESSIBLE",
Ref("EqualsSegment", optional=True),
OneOf(
"TRUE",
"FALSE",
),
optional=True,
),
)
@redshift_dialect.segment()
class DescDatashareStatementSegment(BaseSegment):
"""A `DESC DATASHARE` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_DESC_DATASHARE.html
"""
type = "desc_datashare_statement"
match_grammar = Sequence(
"DESC",
"DATASHARE",
Ref("ObjectReferenceSegment"),
Sequence(
"OF",
Sequence(
"ACCOUNT",
Ref("QuotedLiteralSegment"),
optional=True,
),
"NAMESPACE",
Ref("QuotedLiteralSegment"),
optional=True,
),
)
@redshift_dialect.segment()
class DropDatashareStatementSegment(BaseSegment):
"""A `DROP DATASHARE` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_DROP_DATASHARE.html
"""
type = "drop_datashare_statement"
match_grammar = Sequence(
"DROP",
"DATASHARE",
Ref("ObjectReferenceSegment"),
)
@redshift_dialect.segment()
class ShowDatasharesStatementSegment(BaseSegment):
"""A `SHOW DATASHARES` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_SHOW_DATASHARES.html
"""
type = "show_datashares_statement"
match_grammar = Sequence(
"SHOW",
"DATASHARES",
Sequence(
"LIKE",
Ref("QuotedLiteralSegment"),
optional=True,
),
)
@redshift_dialect.segment()
class AnalyzeCompressionStatementSegment(BaseSegment):
"""An `ANALYZE COMPRESSION` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_ANALYZE_COMPRESSION.html
"""
type = "analyze_compression_statement"
match_grammar = Sequence(
OneOf("ANALYZE", "ANALYSE"),
"COMPRESSION",
Sequence(
Ref("TableReferenceSegment"),
Bracketed(
Delimited(
Ref("ColumnReferenceSegment"),
),
optional=True,
),
Sequence(
"COMPROWS",
Ref("NumericLiteralSegment"),
optional=True,
),
optional=True,
),
)
@redshift_dialect.segment()
class VacuumStatementSegment(BaseSegment):
"""A `VACUUM` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_VACUUM_command.html
"""
type = "vacuum_statement"
match_grammar = Sequence(
"VACUUM",
OneOf(
"FULL",
"REINDEX",
"RECLUSTER",
Sequence(
OneOf(
"SORT",
"DELETE",
),
"ONLY",
),
optional=True,
),
Ref("TableReferenceSegment", optional=True),
Sequence(
"TO",
Ref("NumericLiteralSegment"),
"PERCENT",
optional=True,
),
Ref.keyword("BOOST", optional=True),
)
# Adding Redshift specific statements
@redshift_dialect.segment(replace=True)
class StatementSegment(
postgres_dialect.get_segment("StatementSegment") # type: ignore
):
"""A generic segment, to any of its child subsegments."""
type = "statement"
parse_grammar = redshift_dialect.get_segment("StatementSegment").parse_grammar.copy(
insert=[
Ref("CreateLibraryStatementSegment"),
Ref("CreateUserStatementSegment"),
Ref("CreateGroupStatementSegment"),
Ref("AlterUserStatementSegment"),
Ref("AlterGroupStatementSegment"),
Ref("CreateExternalTableAsStatementSegment"),
Ref("CreateExternalTableStatementSegment"),
Ref("DataFormatSegment"),
Ref("UnloadStatementSegment"),
Ref("CopyStatementSegment"),
Ref("ShowModelStatementSegment"),
Ref("CreateDatashareStatementSegment"),
Ref("DescDatashareStatementSegment"),
Ref("DropDatashareStatementSegment"),
Ref("ShowDatasharesStatementSegment"),
Ref("AltereDatashareStatementSegment"),
Ref("DeclareStatementSegment"),
Ref("FetchStatementSegment"),
Ref("CloseStatementSegment"),
Ref("AnalyzeCompressionStatementSegment"),
Ref("VacuumStatementSegment"),
Ref("AlterProcedureStatementSegment"),
],
)
match_grammar = redshift_dialect.get_segment(
"StatementSegment"
).match_grammar.copy()
@redshift_dialect.segment()
class PartitionedBySegment(BaseSegment):
"""Partitioned By Segment.
As specified in
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_EXTERNAL_TABLE.html
"""
type = "partitioned_by_segment"
match_grammar = Sequence(
Ref.keyword("PARTITIONED"),
"BY",
Bracketed(
Delimited(
Sequence(
Ref("ColumnReferenceSegment"),
Ref("DatatypeSegment"),
),
),
),
)
@redshift_dialect.segment()
class RowFormatDelimitedSegment(BaseSegment):
"""Row Format Delimited Segment.
As specified in
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_EXTERNAL_TABLE.html
"""
type = "row_format_deimited_segment"
match_grammar = AnySetOf(
Sequence(
"FIELDS",
"TERMINATED",
"BY",
Ref("QuotedLiteralSegment"),
),
Sequence(
"LINES",
"TERMINATED",
"BY",
Ref("QuotedLiteralSegment"),
),
optional=True,
)
@redshift_dialect.segment()
class CreateUserStatementSegment(BaseSegment):
"""`CREATE USER` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html
"""
type = "create_user"
match_grammar = Sequence(
"CREATE",
"USER",
Ref("ObjectReferenceSegment"),
Ref.keyword("WITH", optional=True),
"PASSWORD",
OneOf(Ref("QuotedLiteralSegment"), "DISABLE"),
AnySetOf(
OneOf(
"CREATEDB",
"NOCREATEDB",
),
OneOf(
"CREATEUSER",
"NOCREATEUSER",
),
Sequence(
"SYSLOG",
"ACCESS",
OneOf(
"RESTRICTED",
"UNRESTRICTED",
),
),
Sequence("IN", "GROUP", Delimited(Ref("ObjectReferenceSegment"))),
Sequence("VALID", "UNTIL", Ref("QuotedLiteralSegment")),
Sequence(
"CONNECTION",
"LIMIT",
OneOf(
Ref("NumericLiteralSegment"),
"UNLIMITED",
),
),
Sequence(
"SESSION",
"TIMEOUT",
Ref("NumericLiteralSegment"),
),
),
)
@redshift_dialect.segment()
class CreateGroupStatementSegment(BaseSegment):
"""`CREATE GROUP` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_GROUP.html
"""
type = "create_group"
match_grammar = Sequence(
"CREATE",
"GROUP",
Ref("ObjectReferenceSegment"),
Sequence(
Ref.keyword("WITH", optional=True),
"USER",
Delimited(
Ref("ObjectReferenceSegment"),
),
optional=True,
),
)
@redshift_dialect.segment()
class AlterUserStatementSegment(BaseSegment):
"""`ALTER USER` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_ALTER_USER.html
"""
type = "alter_user"
match_grammar = Sequence(
"ALTER",
"USER",
Ref("ObjectReferenceSegment"),
Ref.keyword("WITH", optional=True),
AnySetOf(
OneOf(
"CREATEDB",
"NOCREATEDB",
),
OneOf(
"CREATEUSER",
"NOCREATEUSER",
),
Sequence(
"SYSLOG",
"ACCESS",
OneOf(
"RESTRICTED",
"UNRESTRICTED",
),
),
Sequence(
"PASSWORD",
OneOf(
Ref("QuotedLiteralSegment"),
"DISABLE",
),
Sequence("VALID", "UNTIL", Ref("QuotedLiteralSegment"), optional=True),
),
Sequence(
"RENAME",
"TO",
Ref("ObjectReferenceSegment"),
),
Sequence(
"CONNECTION",
"LIMIT",
OneOf(
Ref("NumericLiteralSegment"),
"UNLIMITED",
),
),
OneOf(
Sequence(
"SESSION",
"TIMEOUT",
Ref("NumericLiteralSegment"),
),
Sequence(
"RESET",
"SESSION",
"TIMEOUT",
),
),
OneOf(
Sequence(
"SET",
Ref("ObjectReferenceSegment"),
OneOf(
"TO",
Ref("EqualsSegment"),
),
OneOf(
"DEFAULT",
Ref("LiteralGrammar"),
),
),
Sequence(
"RESET",
Ref("ObjectReferenceSegment"),
),
),
min_times=1,
),
)
@redshift_dialect.segment()
class AlterGroupStatementSegment(BaseSegment):
"""`ALTER GROUP` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_ALTER_GROUP.html
"""
type = "alter_group"
match_grammar = Sequence(
"ALTER",
"GROUP",
Ref("ObjectReferenceSegment"),
OneOf(
Sequence(
OneOf("ADD", "DROP"),
"USER",
Delimited(
Ref("ObjectReferenceSegment"),
),
),
Sequence(
"RENAME",
"TO",
Ref("ObjectReferenceSegment"),
),
),
)
@redshift_dialect.segment(replace=True)
class TransactionStatementSegment(BaseSegment):
"""A `BEGIN|START`, `COMMIT|END` or `ROLLBACK|ABORT` transaction statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_BEGIN.html
"""
type = "transaction_statement"
match_grammar = Sequence(
OneOf("BEGIN", "START", "COMMIT", "END", "ROLLBACK", "ABORT"),
OneOf("TRANSACTION", "WORK", optional=True),
Sequence(
"ISOLATION",
"LEVEL",
OneOf(
"SERIALIZABLE",
Sequence("READ", "COMMITTED"),
Sequence("READ", "UNCOMMITTED"),
Sequence("REPEATABLE", "READ"),
),
optional=True,
),
OneOf(
Sequence("READ", "ONLY"),
Sequence("READ", "WRITE"),
optional=True,
),
)
| [
[
[
231,
236
],
[
3653,
3658
],
[
3751,
3756
],
[
3837,
3842
],
[
4931,
4936
],
[
5046,
5051
],
[
5733,
5738
],
[
7389,
7394
],
[
7462,
7467
],
[
7511,
7516
],
[
7588,
7593
],
[
8035,
8040
],
[
8375,
8380
],
[
8575,
8580
],
[
8620,
8625
],
[
8900,
8905
],
[
9379,
9384
],
[
9605,
9610
],
[
10299,
10304
],
[
10915,
10920
],
[
11585,
11590
],
[
11658,
11663
],
[
13446,
13451
],
[
13812,
13817
],
[
13860,
13865
],
[
14388,
14393
],
[
14535,
14540
],
[
14580,
14585
],
[
15938,
15943
],
[
16527,
16532
],
[
17264,
17269
],
[
17787,
17792
],
[
18078,
18083
],
[
18671,
18676
],
[
19504,
19509
],
[
19659,
19664
],
[
19820,
19825
],
[
20014,
20019
],
[
21234,
21239
],
[
22156,
22161
],
[
23004,
23009
],
[
23923,
23928
],
[
25376,
25381
],
[
27661,
27666
],
[
28088,
28093
],
[
28347,
28352
],
[
29270,
29275
],
[
29479,
29484
],
[
30991,
30996
],
[
32010,
32015
],
[
32325,
32330
],
[
33020,
33025
],
[
33340,
33345
],
[
34473,
34478
],
[
35444,
35449
],
[
35666,
35671
],
[
36201,
36206
],
[
36697,
36702
],
[
36800,
36805
],
[
37365,
37370
],
[
37498,
37503
],
[
37582,
37587
],
[
37963,
37968
],
[
38063,
38068
],
[
39466,
39471
],
[
39624,
39629
],
[
39651,
39656
],
[
41060,
41065
],
[
41171,
41176
],
[
42034,
42039
],
[
42132,
42137
],
[
42231,
42236
],
[
42779,
42784
],
[
43113,
43118
],
[
44221,
44226
],
[
46046,
46051
],
[
46781,
46786
],
[
46894,
46899
],
[
50369,
50374
],
[
50446,
50451
],
[
50538,
50543
],
[
50712,
50717
],
[
51064,
51069
],
[
52267,
52272
],
[
52359,
52364
],
[
52533,
52538
],
[
52710,
52715
],
[
53144,
53149
],
[
53280,
53285
],
[
53607,
53612
],
[
53738,
53743
],
[
53864,
53869
],
[
54503,
54508
],
[
54548,
54553
],
[
55172,
55177
],
[
55243,
55248
],
[
55364,
55369
],
[
55608,
55613
]
],
[
[
242,
253
],
[
4809,
4820
],
[
16764,
16775
],
[
16847,
16858
],
[
17139,
17150
],
[
17323,
17334
],
[
37469,
37480
],
[
37826,
37837
],
[
37926,
37937
]
],
[
[
259,
267
],
[
3316,
3324
],
[
11567,
11575
],
[
12939,
12947
],
[
13794,
13802
],
[
14348,
14356
],
[
15161,
15169
],
[
26509,
26517
],
[
27145,
27153
],
[
29244,
29252
],
[
30612,
30620
],
[
49669,
49677
],
[
50424,
50432
],
[
52245,
52253
]
],
[
[
273,
281
],
[
20640,
20648
]
],
[
[
287,
290
],
[
3338,
3341
],
[
3385,
3388
],
[
3466,
3469
],
[
4765,
4768
],
[
4821,
4824
],
[
4863,
4866
],
[
4950,
4953
],
[
4978,
4981
],
[
4997,
5000
],
[
5122,
5125
],
[
5155,
5158
],
[
5195,
5198
],
[
5237,
5240
],
[
5270,
5273
],
[
5307,
5310
],
[
5878,
5881
],
[
5948,
5951
],
[
6121,
6124
],
[
6180,
6183
],
[
6717,
6720
],
[
6758,
6761
],
[
6843,
6846
],
[
7016,
7019
],
[
7070,
7073
],
[
8453,
8456
],
[
8931,
8934
],
[
9167,
9170
],
[
9210,
9213
],
[
9769,
9772
],
[
10219,
10222
],
[
10426,
10429
],
[
10480,
10483
],
[
10707,
10710
],
[
10763,
10766
],
[
10984,
10987
],
[
11038,
11041
],
[
11716,
11719
],
[
11817,
11820
],
[
11900,
11903
],
[
11950,
11953
],
[
12065,
12068
],
[
12147,
12150
],
[
12259,
12262
],
[
12452,
12455
],
[
12585,
12588
],
[
12977,
12980
],
[
13078,
13081
],
[
13279,
13282
],
[
13349,
13352
],
[
13961,
13964
],
[
14013,
14016
],
[
14479,
14482
],
[
14692,
14695
],
[
15218,
15221
],
[
15344,
15347
],
[
15480,
15483
],
[
15551,
15554
],
[
15612,
15615
],
[
16306,
16309
],
[
16351,
16354
],
[
16416,
16419
],
[
16466,
16469
],
[
16661,
16664
],
[
16716,
16719
],
[
16776,
16779
],
[
16859,
16862
],
[
16950,
16953
],
[
17089,
17092
],
[
17151,
17154
],
[
17335,
17338
],
[
17738,
17741
],
[
17878,
17881
],
[
17967,
17970
],
[
18122,
18125
],
[
18209,
18212
],
[
18590,
18593
],
[
18694,
18697
],
[
18749,
18752
],
[
18792,
18795
],
[
18928,
18931
],
[
19051,
19054
],
[
19131,
19134
],
[
19266,
19269
],
[
19383,
19386
],
[
19554,
19557
],
[
20245,
20248
],
[
20371,
20374
],
[
20922,
20925
],
[
21043,
21046
],
[
21490,
21493
],
[
21675,
21678
],
[
22194,
22197
],
[
22601,
22604
],
[
22761,
22764
],
[
22812,
22815
],
[
22889,
22892
],
[
23090,
23093
],
[
23218,
23221
],
[
23504,
23507
],
[
23569,
23572
],
[
23627,
23630
],
[
24135,
24138
],
[
24212,
24215
],
[
24295,
24298
],
[
24493,
24496
],
[
24546,
24549
],
[
24592,
24595
],
[
25086,
25089
],
[
25124,
25127
],
[
25264,
25267
],
[
25469,
25472
],
[
25667,
25670
],
[
25720,
25723
],
[
25766,
25769
],
[
25932,
25935
],
[
26376,
26379
],
[
26472,
26475
],
[
26531,
26534
],
[
26640,
26643
],
[
26690,
26693
],
[
27056,
27059
],
[
27108,
27111
],
[
27167,
27170
],
[
27276,
27279
],
[
27326,
27329
],
[
27413,
27416
],
[
27557,
27560
],
[
27908,
27911
],
[
27968,
27971
],
[
28433,
28436
],
[
28487,
28490
],
[
28615,
28618
],
[
28669,
28672
],
[
28829,
28832
],
[
28998,
29001
],
[
29157,
29160
],
[
29383,
29386
],
[
29433,
29436
],
[
29670,
29673
],
[
29764,
29767
],
[
29822,
29825
],
[
29875,
29878
],
[
29925,
29928
],
[
29978,
29981
],
[
30454,
30457
],
[
30492,
30495
],
[
30575,
30578
],
[
30634,
30637
],
[
30743,
30746
],
[
30793,
30796
],
[
30880,
30883
],
[
30938,
30941
],
[
31077,
31080
],
[
31131,
31134
],
[
31259,
31262
],
[
31313,
31316
],
[
31474,
31477
],
[
31568,
31571
],
[
31674,
31677
],
[
31816,
31819
],
[
31866,
31869
],
[
32554,
32557
],
[
32604,
32607
],
[
32721,
32724
],
[
32771,
32774
],
[
32858,
32861
],
[
32970,
32973
],
[
33075,
33078
],
[
33181,
33184
],
[
33290,
33293
],
[
33543,
33546
],
[
33593,
33596
],
[
33649,
33652
],
[
33703,
33706
],
[
33821,
33824
],
[
33871,
33874
],
[
34030,
34033
],
[
34172,
34175
],
[
34260,
34263
],
[
34316,
34319
],
[
34423,
34426
],
[
34599,
34602
],
[
34705,
34708
],
[
34759,
34762
],
[
35406,
35409
],
[
35483,
35486
],
[
35591,
35594
],
[
35693,
35696
],
[
35761,
35764
],
[
36246,
36249
],
[
36304,
36307
],
[
36418,
36421
],
[
36589,
36592
],
[
36750,
36753
],
[
37384,
37387
],
[
37525,
37528
],
[
37601,
37604
],
[
37695,
37698
],
[
37885,
37888
],
[
37998,
38001
],
[
38082,
38085
],
[
38200,
38203
],
[
38927,
38930
],
[
38963,
38966
],
[
39009,
39012
],
[
39369,
39372
],
[
39405,
39408
],
[
39510,
39513
],
[
39657,
39660
],
[
39686,
39689
],
[
40169,
40172
],
[
40265,
40268
],
[
40309,
40312
],
[
40680,
40683
],
[
40752,
40755
],
[
41225,
41228
],
[
41324,
41327
],
[
41637,
41640
],
[
41995,
41998
],
[
42335,
42338
],
[
42487,
42490
],
[
42642,
42645
],
[
42907,
42910
],
[
43218,
43221
],
[
43279,
43282
],
[
43424,
43427
],
[
43485,
43488
],
[
43604,
43607
],
[
44032,
44035
],
[
44093,
44096
],
[
44172,
44175
],
[
44649,
44652
],
[
44781,
44784
],
[
44893,
44896
],
[
45279,
45282
],
[
45676,
45679
],
[
46128,
46131
],
[
46228,
46231
],
[
46390,
46393
],
[
47063,
47066
],
[
47156,
47159
],
[
47255,
47258
],
[
47679,
47682
],
[
47729,
47732
],
[
47776,
47779
],
[
47824,
47827
],
[
47870,
47873
],
[
47917,
47920
],
[
47975,
47978
],
[
48031,
48034
],
[
48069,
48072
],
[
48112,
48115
],
[
48153,
48156
],
[
48199,
48202
],
[
48251,
48254
],
[
48301,
48304
],
[
48351,
48354
],
[
48402,
48405
],
[
48454,
48457
],
[
48498,
48501
],
[
48540,
48543
],
[
48582,
48585
],
[
48637,
48640
],
[
48680,
48683
],
[
49128,
49131
],
[
49258,
49261
],
[
49309,
49312
],
[
49775,
49778
],
[
49910,
49913
],
[
50266,
50269
],
[
50305,
50308
],
[
50375,
50378
],
[
50869,
50872
],
[
50941,
50944
],
[
51091,
51094
],
[
51280,
51283
],
[
51634,
51637
],
[
51695,
51698
],
[
51790,
51793
],
[
52162,
52165
],
[
52201,
52204
],
[
52737,
52740
],
[
52859,
52862
],
[
53005,
53008
],
[
53171,
53174
],
[
53395,
53398
],
[
53687,
53690
],
[
53799,
53802
],
[
53930,
53933
],
[
54070,
54073
],
[
54464,
54467
],
[
54641,
54644
],
[
54792,
54795
]
],
[
[
296,
304
],
[
3298,
3306
],
[
4743,
4751
],
[
4969,
4977
],
[
5674,
5682
],
[
5711,
5719
],
[
5852,
5860
],
[
6083,
6091
],
[
6613,
6621
],
[
6671,
6679
],
[
6978,
6986
],
[
7440,
7448
],
[
7502,
7510
],
[
8228,
8236
],
[
8353,
8361
],
[
8594,
8602
],
[
8768,
8776
],
[
9145,
9153
],
[
9583,
9591
],
[
9684,
9692
],
[
10157,
10165
],
[
10175,
10183
],
[
10318,
10326
],
[
10367,
10375
],
[
10590,
10598
],
[
10645,
10653
],
[
10889,
10897
],
[
10954,
10962
],
[
11604,
11612
],
[
11791,
11799
],
[
12006,
12014
],
[
12192,
12200
],
[
12404,
12412
],
[
12527,
12535
],
[
12957,
12965
],
[
13012,
13020
],
[
13129,
13137
],
[
13330,
13338
],
[
13426,
13434
],
[
13818,
13826
],
[
13876,
13884
],
[
13913,
13921
],
[
14366,
14374
],
[
14449,
14457
],
[
14554,
14562
],
[
14752,
14760
],
[
14827,
14835
],
[
15179,
15187
],
[
15260,
15268
],
[
15396,
15404
],
[
15593,
15601
],
[
15929,
15937
],
[
16270,
16278
],
[
16627,
16635
],
[
17031,
17039
],
[
17245,
17253
],
[
17680,
17688
],
[
17716,
17724
],
[
18059,
18067
],
[
18537,
18545
],
[
18629,
18637
],
[
18884,
18892
],
[
19005,
19013
],
[
19221,
19229
],
[
19336,
19344
],
[
19458,
19466
],
[
19617,
19625
],
[
19772,
19780
],
[
19964,
19972
],
[
20198,
20206
],
[
20320,
20328
],
[
20446,
20454
],
[
20522,
20530
],
[
20786,
20794
],
[
20859,
20867
],
[
20971,
20979
],
[
21154,
21162
],
[
21419,
21427
],
[
21602,
21610
],
[
22105,
22113
],
[
22528,
22536
],
[
22731,
22739
],
[
22941,
22949
],
[
23027,
23035
],
[
23159,
23167
],
[
23267,
23275
],
[
23458,
23466
],
[
24078,
24086
],
[
24332,
24340
],
[
24459,
24467
],
[
25013,
25021
],
[
25176,
25184
],
[
25506,
25514
],
[
25633,
25641
],
[
26224,
26232
],
[
26260,
26268
],
[
26588,
26596
],
[
27010,
27018
],
[
27224,
27232
],
[
27471,
27479
],
[
27497,
27505
],
[
27831,
27839
],
[
28034,
28042
],
[
28370,
28378
],
[
28551,
28559
],
[
28775,
28783
],
[
28926,
28934
],
[
29085,
29093
],
[
29615,
29623
],
[
30420,
30428
],
[
30691,
30699
],
[
31014,
31022
],
[
31195,
31203
],
[
31419,
31427
],
[
31620,
31628
],
[
31762,
31770
],
[
31954,
31962
],
[
32269,
32277
],
[
32661,
32669
],
[
32914,
32922
],
[
33236,
33244
],
[
33763,
33771
],
[
33958,
33966
],
[
34117,
34125
],
[
34367,
34375
],
[
35354,
35362
],
[
35522,
35530
],
[
35565,
35573
],
[
36147,
36155
],
[
36220,
36228
],
[
36351,
36359
],
[
36530,
36538
],
[
36654,
36662
],
[
36720,
36728
],
[
37447,
37455
],
[
37855,
37863
],
[
38820,
38828
],
[
38856,
38864
],
[
39313,
39321
],
[
39485,
39493
],
[
39551,
39559
],
[
40114,
40122
],
[
40239,
40247
],
[
40643,
40651
],
[
41025,
41033
],
[
41118,
41126
],
[
41602,
41610
],
[
41939,
41947
],
[
42106,
42114
],
[
42258,
42266
],
[
42409,
42417
],
[
42562,
42570
],
[
42714,
42722
],
[
43064,
43072
],
[
43140,
43148
],
[
43352,
43360
],
[
43975,
43983
],
[
44071,
44079
],
[
44594,
44602
],
[
44688,
44696
],
[
44728,
44736
],
[
45224,
45232
],
[
45578,
45586
],
[
45634,
45642
],
[
46028,
46036
],
[
46106,
46114
],
[
46336,
46344
],
[
46745,
46753
],
[
46868,
46876
],
[
47116,
47124
],
[
49110,
49118
],
[
49228,
49236
],
[
49687,
49695
],
[
49823,
49831
],
[
50214,
50222
],
[
50634,
50642
],
[
50835,
50843
],
[
50914,
50922
],
[
50983,
50991
],
[
51200,
51208
],
[
51581,
51589
],
[
51673,
51681
],
[
52111,
52119
],
[
52455,
52463
],
[
52656,
52664
],
[
52832,
52840
],
[
52931,
52939
],
[
53063,
53071
],
[
53303,
53311
],
[
53460,
53468
],
[
53630,
53638
],
[
54011,
54019
],
[
54412,
54420
],
[
54522,
54530
],
[
54718,
54726
],
[
55154,
55162
],
[
55296,
55304
],
[
55419,
55427
],
[
55466,
55474
],
[
55515,
55523
],
[
55627,
55635
],
[
55665,
55673
]
],
[
[
310,
319
],
[
5829,
5838
],
[
6017,
6026
],
[
6648,
6657
],
[
6912,
6921
],
[
8416,
8425
],
[
8869,
8878
],
[
9742,
9751
],
[
13058,
13067
],
[
13259,
13268
],
[
14003,
14012
],
[
14469,
14478
],
[
14672,
14681
],
[
15198,
15207
],
[
15324,
15333
],
[
15460,
15469
],
[
15602,
15611
],
[
16504,
16513
],
[
17917,
17926
],
[
18739,
18748
],
[
19094,
19103
],
[
20574,
20583
],
[
20832,
20841
],
[
22639,
22648
],
[
23376,
23385
],
[
24401,
24410
],
[
25575,
25584
],
[
27046,
27055
],
[
37428,
37437
],
[
46170,
46179
],
[
49178,
49187
]
],
[
[
325,
336
],
[
5445,
5456
],
[
6389,
6400
],
[
7288,
7299
],
[
7833,
7844
],
[
9916,
9927
],
[
11302,
11313
],
[
12717,
12728
],
[
13570,
13581
],
[
14135,
14146
],
[
14947,
14958
],
[
15728,
15739
],
[
16059,
16070
],
[
17465,
17476
],
[
18318,
18329
],
[
21907,
21918
],
[
22314,
22325
],
[
24796,
24807
],
[
26031,
26042
],
[
26841,
26852
],
[
34894,
34905
],
[
35916,
35927
],
[
37093,
37104
],
[
38501,
38512
],
[
39116,
39127
],
[
39920,
39931
],
[
40454,
40465
],
[
40842,
40853
],
[
41419,
41430
],
[
41742,
41753
],
[
43777,
43788
],
[
44402,
44413
],
[
45032,
45043
],
[
45383,
45394
],
[
45820,
45831
],
[
46569,
46580
],
[
48898,
48909
],
[
49446,
49457
],
[
50042,
50053
],
[
51406,
51417
],
[
51942,
51953
],
[
54240,
54251
],
[
54931,
54942
]
],
[
[
342,
351
],
[
4724,
4733
],
[
6048,
6057
],
[
6943,
6952
],
[
8443,
8452
],
[
13068,
13077
],
[
13269,
13278
],
[
14682,
14691
],
[
15208,
15217
],
[
15334,
15343
],
[
15470,
15479
],
[
16596,
16605
],
[
17940,
17949
],
[
19121,
19130
],
[
20605,
20614
],
[
22704,
22713
],
[
23415,
23424
],
[
24428,
24437
],
[
25602,
25611
],
[
40216,
40225
],
[
42325,
42334
],
[
42477,
42486
],
[
42632,
42641
],
[
42897,
42906
],
[
46197,
46206
],
[
49201,
49210
],
[
50859,
50868
],
[
51763,
51772
],
[
54610,
54619
]
],
[
[
357,
364
],
[
3261,
3268
]
],
[
[
370,
389
],
[
6697,
6716
],
[
18189,
18208
],
[
25912,
25931
],
[
35463,
35482
],
[
35741,
35760
]
],
[
[
395,
404
],
[
4712,
4721
]
],
[
[
443,
459
],
[
606,
622
],
[
650,
666
]
],
[
[
522,
548
],
[
1064,
1090
]
],
[
[
554,
582
],
[
885,
913
]
],
[
[
587,
603
],
[
694,
710
],
[
30107,
30123
],
[
47406,
47422
]
],
[
[
635,
647
]
],
[
[
675,
691
],
[
747,
763
],
[
800,
816
],
[
930,
946
],
[
981,
997
],
[
1107,
1123
],
[
1155,
1171
],
[
1232,
1248
],
[
1454,
1470
],
[
3202,
3218
],
[
3547,
3563
],
[
3604,
3620
],
[
4254,
4270
],
[
5383,
5399
],
[
6329,
6345
],
[
7220,
7236
],
[
7772,
7788
],
[
9865,
9881
],
[
11248,
11264
],
[
12661,
12677
],
[
13501,
13517
],
[
14080,
14096
],
[
14879,
14895
],
[
15665,
15681
],
[
15986,
16002
],
[
17390,
17406
],
[
18245,
18261
],
[
21848,
21864
],
[
22245,
22261
],
[
24725,
24741
],
[
25968,
25984
],
[
26785,
26801
],
[
30036,
30052
],
[
34826,
34842
],
[
35842,
35858
],
[
37030,
37046
],
[
38424,
38440
],
[
39052,
39068
],
[
39845,
39861
],
[
40397,
40413
],
[
40787,
40803
],
[
41364,
41380
],
[
41677,
41693
],
[
43712,
43728
],
[
44339,
44355
],
[
44969,
44985
],
[
45319,
45335
],
[
45752,
45768
],
[
46513,
46529
],
[
47339,
47355
],
[
47581,
47597
],
[
48757,
48773
],
[
48844,
48860
],
[
49387,
49403
],
[
49982,
49998
],
[
51345,
51361
],
[
51883,
51899
],
[
54180,
54196
],
[
54858,
54874
]
],
[
[
3522,
3544
],
[
4322,
4344
]
],
[
[
4299,
4321
]
],
[
[
5416,
5444
]
],
[
[
6362,
6388
]
],
[
[
7265,
7287
]
],
[
[
7817,
7832
]
],
[
[
9898,
9915
]
],
[
[
11281,
11301
]
],
[
[
12694,
12716
]
],
[
[
13546,
13569
]
],
[
[
14113,
14134
]
],
[
[
14924,
14946
]
],
[
[
15710,
15727
]
],
[
[
16031,
16058
]
],
[
[
17435,
17464
]
],
[
[
18290,
18317
]
],
[
[
21881,
21906
]
],
[
[
22278,
22313
]
],
[
[
24758,
24795
]
],
[
[
26001,
26030
]
],
[
[
26818,
26840
]
],
[
[
30081,
30101
]
],
[
[
34871,
34893
]
],
[
[
35887,
35915
]
],
[
[
37063,
37092
]
],
[
[
38469,
38500
]
],
[
[
39085,
39115
]
],
[
[
39890,
39919
]
],
[
[
40430,
40453
]
],
[
[
40820,
40841
]
],
[
[
41397,
41418
]
],
[
[
41710,
41741
]
],
[
[
43745,
43776
]
],
[
[
44372,
44401
]
],
[
[
45002,
45031
]
],
[
[
45352,
45382
]
],
[
[
45785,
45819
]
],
[
[
46546,
46568
]
],
[
[
47384,
47400
]
],
[
[
48877,
48897
]
],
[
[
49420,
49445
]
],
[
[
50015,
50041
]
],
[
[
51378,
51405
]
],
[
[
51916,
51941
]
],
[
[
54213,
54239
]
],
[
[
54903,
54930
]
]
] |
import os
import cv2
import imutils
import numpy as np
from imutils import contours
from imutils import perspective
from scipy.spatial import distance as dist
def detect_shape(filepath, min_width=15, debug=False):
image = cv2.imread(filepath, 0)
resized = imutils.resize(image, width=300)
ratio = image.shape[0] / float(resized.shape[0])
'''
blurred = cv2.GaussianBlur(resized, (5, 5), 0)
thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]
'''
gray = cv2.bilateralFilter(resized, 1, 10, 120 )
edges = cv2.Canny( gray, 10, 250 )
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))
closed = cv2.morphologyEx( edges, cv2.MORPH_CLOSE, kernel )
'''
cnts = cv2.findContours( closed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE )
gray = cv2.GaussianBlur(resized, (7, 7), 0)
edged = cv2.Canny(gray, 10, 250)
edged = cv2.dilate(edged, None, iterations=1)
edged = cv2.erode(edged, None, iterations=1)
'''
cnts = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
shapes = dict()
print(len(cnts))
for idx, c in enumerate(cnts):
try :
perimeter = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.1 * perimeter, True)
if len(approx) == 4:
(x, y, w, h) = cv2.boundingRect(approx)
shapes["rect_{}".format(idx)] = (x, y, w, h)
if(debug == True):
M = cv2.moments(c)
cX = int((M["m10"] / M["m00"]) * ratio)
cY = int((M["m01"] / M["m00"]) * ratio)
c = c.astype("float")
c *= ratio
c = c.astype("int")
cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
cv2.putText(image, "square", (cX, cY), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.resizeWindow('image', 300,300)
cv2.imshow("image", image)
cv2.waitKey(0)
except :
pass
return shapes
def midpoint(ptA, ptB):
return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)
def min_dif(list1, list2):
min_d, ind = 1000000, -1
for i in range(0, len(list1)):
for j in range(0, len(list2)):
if(list1[i]-list2[j] < min_d):
ind = j
min_d = list1[i]-list2[j]
return ind
def object_size(filepath, left_width=15):
image = cv2.imread(filepath, 0)
#gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(image, (7, 7), 0)
edged = cv2.Canny(gray, 50, 100)
edged = cv2.dilate(edged, None, iterations=1)
edged = cv2.erode(edged, None, iterations=1)
# NOTE : Contour - Outlines
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
(cnts, _) = contours.sort_contours(cnts)
pixelsPerMetric = None
dimensions = list()
for c in cnts:
if cv2.contourArea(c) < 100:
continue
orig = image.copy()
box = cv2.minAreaRect(c)
box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
box = np.array(box, dtype="int")
box = perspective.order_points(box)
(tl, tr, br, bl) = box
(tltrX, tltrY) = midpoint(tl, tr)
(blbrX, blbrY) = midpoint(bl, br)
(tlblX, tlblY) = midpoint(tl, bl)
(trbrX, trbrY) = midpoint(tr, br)
cv2.circle(orig, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)
# draw lines between the midpoints
cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)), (255, 0, 255), 2)
cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)), (255, 0, 255), 2)
dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
if pixelsPerMetric is None:
pixelsPerMetric = dB / left_width
dimA = dA / pixelsPerMetric
dimB = dB / pixelsPerMetric
cv2.putText(orig, "{:.1f}in".format(dimA), (int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2)
cv2.putText(orig, "{:.1f}in".format(dimB), (int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.resizeWindow('image', 300,300)
cv2.imshow("image", orig)
cv2.waitKey(0)
dimensions.append((dimA, dimB))
max_dim = [-1, -1]
for dims in dimensions:
if(dims[0] * dims[1] > max_dim[0] * max_dim[1] and left_width not in dims):
max_dim[0] = dims[0]
max_dim[1] = dims[1]
return max_dim
def weight(file1, file2, left_width=21, const_div=6000.0): # left_width = A4 Size
size1 = object_size(file1, left_width)
size2 = object_size(file2, left_width)
rem_ind = min_dif(size1, size2)
weight = (size1[0] * size1[1] * size2[1-rem_ind]) / const_div
return weight
if __name__ == '__main__':
print(detect_shape("img.jpg", debug=True))
| [
[
[
7,
9
]
],
[
[
17,
20
],
[
228,
231
],
[
498,
501
],
[
553,
556
],
[
593,
596
],
[
619,
622
],
[
656,
659
],
[
681,
684
],
[
1001,
1004
],
[
1033,
1036
],
[
1052,
1055
],
[
1248,
1251
],
[
1292,
1295
],
[
1399,
1402
],
[
1544,
1547
],
[
1812,
1815
],
[
1881,
1884
],
[
1920,
1923
],
[
1991,
1994
],
[
2016,
2019
],
[
2055,
2058
],
[
2110,
2113
],
[
2157,
2160
],
[
2629,
2632
],
[
2718,
2721
],
[
2770,
2773
],
[
2807,
2810
],
[
2857,
2860
],
[
2938,
2941
],
[
2969,
2972
],
[
2988,
2991
],
[
3192,
3195
],
[
3281,
3284
],
[
3314,
3317
],
[
3361,
3364
],
[
3674,
3677
],
[
3745,
3748
],
[
3816,
3819
],
[
3887,
3890
],
[
4003,
4006
],
[
4096,
4099
],
[
4467,
4470
],
[
4546,
4549
],
[
4606,
4609
],
[
4680,
4683
],
[
4740,
4743
],
[
4765,
4768
],
[
4792,
4795
],
[
4835,
4838
],
[
4869,
4872
]
],
[
[
28,
35
],
[
267,
274
],
[
1099,
1106
],
[
3035,
3042
],
[
3339,
3346
]
],
[
[
43,
54
],
[
3394,
3396
]
],
[
[
75,
83
],
[
3081,
3089
]
],
[
[
104,
115
],
[
3435,
3446
]
],
[
[
142,
158
],
[
4195,
4199
],
[
4255,
4259
]
],
[
[
165,
177
],
[
5473,
5485
]
],
[
[
2230,
2238
],
[
3522,
3530
],
[
3564,
3572
],
[
3606,
3614
],
[
3648,
3656
]
],
[
[
2317,
2324
],
[
5329,
5336
]
],
[
[
2579,
2590
],
[
5241,
5252
],
[
5284,
5295
]
],
[
[
5151,
5157
]
]
] |
import numpy as np
def euclidean_distance(p1,p2):
"""
returns euclidean distance between matrices
@params:
p1, p2: np.ndarray
matrices to perform operation to.
"""
return np.sqrt(np.sum((p1-p2)**2, axis=1))
def entropy(p):
"""
Will be our measurement for uncertainty in our construction
of descision tree
@params:
p: float
"""
if p == 0:
return 0
elif p == 1:
return 0
else:
return -(p * np.log2(p) + (1 - p) * np.log2(1 - p))
def information_gain(left_child, right_child):
"""
measurement of how much info we gained when splitting a node
using our entropy method.
@def:
takes in a list of classes from left and right child to return
the information gain of our curr split
@params:
left_child: np.ndarray
curr left child arr
right_child: np.ndarray
curr left child arr
"""
parent = left_child + right_child
p_par = parent.count(1) / len(parent) if len(parent) > 0 else 0
p_left = left_child.count(1) / len(left_child) if len(left_child) \
> 0 else 0
p_right = right_child.count(1) / len(right_child) if len(right_child) \
> 0 else 0
infogain_p = self.entropy(p_par)
infogain_l = self.entropy(p_left)
infogain_r = self.entropy(p_right)
return infogain_p - len(left_child) / len(parent) * infogain_l - \
len(right_child) / len(parent) * infogain_r
| [
[
[
7,
18
],
[
184,
186
],
[
192,
194
],
[
433,
435
],
[
456,
458
]
],
[
[
25,
43
]
],
[
[
226,
233
]
],
[
[
478,
494
]
]
] |
from django.contrib import admin
from friends.models import FriendRequest
# Register your models here.
admin.site.register(FriendRequest) | [
[
[
27,
32
],
[
104,
109
]
],
[
[
60,
73
],
[
124,
137
]
]
] |
import _plotly_utils.basevalidators
class ConnectgapsValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="connectgaps", parent_name="scattermapbox", **kwargs
):
super(ConnectgapsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
| [
[
[
7,
35
],
[
65,
78
]
],
[
[
44,
64
],
[
231,
251
]
]
] |
#!/bin/python3
__author__ = "Adam Karl"
"""Find the sum of all primes less than or equal to N"""
#https://projecteuler.net/problem=10
from math import sqrt
isPrime = []
def sieve(n):
"""fills isPrime array with booleans for whether the number at isPrime[i] is prime or not"""
"""uses a process known as the sieve of eratosthenes"""
global isPrime
isPrime = [True for i in range(n+1)] #for numbers from 0 to n inclusive
isPrime[0] = False
isPrime[1] = False
index = 2
while index <= n:
if isPrime[index]: #found a prime number
multiplier = 2
while index * multiplier <= n:
isPrime[index * multiplier] = False #all multiples of the prime are not prime
multiplier += 1
index += 1
return isPrime
def sumPrimes(n):
"""given a list of n booleans on whether an index is prime or not,
return the sum of all primes <= index"""
s = 0
for index in range(1, n+1):
if isPrime[index]:
s += index
return s
def main():
print("Find the sum of all primes below: ", end="")
n = int(input().strip())
isPrime = sieve(n) #generate isPrime
print("Sum = %d" % sumPrimes(n))
if __name__ == "__main__":
main()
| [
[
[
16,
26
]
],
[
[
153,
157
]
],
[
[
159,
166
],
[
992,
999
]
],
[
[
177,
182
],
[
1156,
1161
]
],
[
[
809,
818
],
[
1206,
1215
]
],
[
[
1049,
1053
],
[
1252,
1256
]
],
[
[
367,
374
],
[
443,
450
],
[
466,
473
],
[
532,
539
],
[
656,
663
],
[
796,
803
]
]
] |
import torch
from syft.generic import object_storage
def test_clear_objects():
obj_storage = object_storage.ObjectStorage()
x = torch.tensor(1)
obj_storage.set_obj(x)
objs = obj_storage.current_objects()
assert len(objs) == 1
assert objs[x.id] == x
ret_val = obj_storage.clear_objects()
objs = obj_storage.current_objects()
assert len(objs) == 0
assert ret_val == obj_storage
def test_clear_objects_return_None():
obj_storage = object_storage.ObjectStorage()
x = torch.tensor(1)
obj_storage.set_obj(x)
objs = obj_storage.current_objects()
assert len(objs) == 1
assert objs[x.id] == x
ret_val = obj_storage.clear_objects(return_self=False)
objs = obj_storage.current_objects()
assert len(objs) == 0
assert ret_val is None
| [
[
[
7,
12
],
[
140,
145
],
[
522,
527
]
],
[
[
39,
53
],
[
100,
114
],
[
482,
496
]
],
[
[
60,
78
]
],
[
[
430,
460
]
]
] |
from ApiManager.utils.operation import add_project_data, add_module_data, add_case_data, add_config_data, \
add_register_data, bulk_import_data
from ApiManager.models import ModuleInfo
import yaml
'''前端test信息转字典'''
def key_value_dict(mode=3, **kwargs):
if not kwargs:
return None
sorted_kwargs = sorted(kwargs.items())
kwargs.clear()
if mode == 3:
half_index = len(sorted_kwargs) // 3
for value in range(half_index):
key = sorted_kwargs[value][1]
data_type = sorted_kwargs[value + 2 * half_index][1]
value = sorted_kwargs[half_index + value][1]
if key != '' and value != '':
try:
if data_type == 'string':
value = str(value)
elif data_type == 'float':
value = float(value)
elif data_type == 'int':
value = int(value)
else:
value = bool(value)
except ValueError: # 如果类型转换失败,默认字符串保存
pass
if key != '' and value != '':
kwargs.setdefault(key, value)
else:
half_index = len(sorted_kwargs) // 2
for value in range(half_index):
key = sorted_kwargs[value][1]
value = sorted_kwargs[half_index + value][1]
if key != '' and value != '':
kwargs.setdefault(key, value)
return kwargs
'''前端test信息转列表'''
def key_value_list(mode=4, **kwargs):
if not kwargs:
return None
sorted_kwargs = sorted(kwargs.items())
lists = []
if mode == 4:
half_index = len(sorted_kwargs) // 4
for value in range(half_index):
check = sorted_kwargs[value][1]
expected = sorted_kwargs[value + half_index][1]
comparator = sorted_kwargs[value + 2 * half_index][1]
data_type = sorted_kwargs[value + 3 * half_index][1]
if check != '' and expected != '':
try:
if data_type == 'string':
expected = str(expected)
elif data_type == 'float':
expected = float(expected)
elif data_type == 'int':
expected = int(expected)
else:
expected = bool(expected)
except ValueError: # 如果类型转换失败,默认字符串保存
pass
lists.append({'check': check, 'comparator': comparator, 'expected': expected})
elif mode == 3:
half_index = len(sorted_kwargs) // 3
for value in range(half_index):
key = sorted_kwargs[value][1]
data_type = sorted_kwargs[value + 2 * half_index][1]
value = sorted_kwargs[half_index + value][1]
if key != '' and value != '':
try:
if data_type == 'string':
value = str(value)
elif data_type == 'float':
value = float(value)
elif data_type == 'int':
value = int(value)
else:
value = bool(value)
except ValueError: # 如果类型转换失败,默认字符串保存
pass
lists.append({key: value})
else:
half_index = len(sorted_kwargs) // 2
for value in range(half_index):
key = sorted_kwargs[value][1]
value = sorted_kwargs[half_index + value][1]
if key != '' and value != '':
lists.append({key: value})
if not lists:
return None
return lists
'''动态加载模块'''
def load_modules(**kwargs):
belong_project = kwargs.get('name').get('project')
module_info = list(ModuleInfo.objects.get_module_info(belong_project))
string = ''
for value in module_info:
string = string + value + 'replaceFlag'
return string[:len(string) - 11]
'''模块信息逻辑及落地'''
def module_info_logic(type=True, **kwargs):
if kwargs.get('module_name') is '':
return '模块名称不能为空'
if kwargs.get('belong_project') is '':
return '请先添加项目'
if kwargs.get('test_user') is '':
return '测试人员不能为空'
if kwargs.get('lifting_time') is '':
return '提测时间不能为空'
return add_module_data(type, **kwargs)
'''项目信息逻辑及落地'''
def project_info_logic(type=True, **kwargs):
if kwargs.get('project_name') is '':
return '项目名称不能为空'
if kwargs.get('responsible_name') is '':
return '负责人不能为空'
if kwargs.get('test_user') is '':
return '测试人员不能为空'
if kwargs.get('dev_user') is '':
return '开发人员不能为空'
if kwargs.get('publish_app') is '':
return '发布应用不能为空'
return add_project_data(type, **kwargs)
'''用例信息逻辑及落地'''
def case_info_logic(type=True, **kwargs):
test = kwargs.pop('test')
'''
动态展示模块
'''
if 'request' not in test.keys():
return load_modules(**test)
else:
if test.get('name').get('case_name') is '':
return '用例名称不可为空'
if test.get('name').get('project') is None or test.get('name').get('project') is '':
return '请先添加项目'
if test.get('name').get('module') is None or test.get('name').get('module') is '':
return '请先添加模块'
if test.get('name').get('author') is '':
return '创建者不能为空'
if test.get('request').get('url') is '':
return '接口地址不能为空'
if not test.get('validate'):
return '至少需要一个结果校验!'
name = test.pop('name')
test.setdefault('name', name.pop('case_name'))
test.setdefault('case_info', name)
validate = test.pop('validate')
test.setdefault('validate', key_value_list(**validate))
extract = test.pop('extract')
if extract:
test.setdefault('extract', key_value_list(mode=2, **extract))
request_data = test.get('request').pop('request_data')
date_type = test.get('request').pop('type')
if request_data and date_type:
test.get('request').setdefault(date_type, key_value_dict(**request_data))
headers = test.get('request').pop('headers')
if headers:
test.get('request').setdefault('headers', key_value_dict(mode=2, **headers))
variables = test.pop('variables')
if variables:
test.setdefault('variables', key_value_list(mode=3, **variables))
setup = test.pop('setup')
if setup:
test.setdefault('setup', key_value_list(mode=2, **setup))
teardown = test.pop('teardown')
if teardown:
test.setdefault('teardown', key_value_list(mode=2, **teardown))
kwargs.setdefault('test', test)
return add_case_data(type, **kwargs)
'''模块信息逻辑及落地'''
def config_info_logic(type=True, **kwargs):
config = kwargs.pop('config')
'''
动态展示模块
'''
if 'request' not in config.keys():
return load_modules(**config)
else:
if config.get('name').get('config_name') is '':
return '配置名称不可为空'
if config.get('name').get('project') is None or config.get('name').get('project') is '':
return '请先添加项目'
if config.get('name').get('config_module') is None or config.get('name').get('config_module') is '':
return '请先添加模块'
if config.get('name').get('config_author') is '':
return '创建者不能为空'
name = config.pop('name')
config.setdefault('name', name.pop('config_name'))
config.setdefault('config_info', name)
request_data = config.get('request').pop('request_data')
data_type = config.get('request').pop('type')
if request_data and data_type:
config.get('request').setdefault(data_type, key_value_dict(**request_data))
headers = config.get('request').pop('headers')
if headers:
config.get('request').setdefault('headers', key_value_dict(mode=2, **headers))
variables = config.pop('variables')
if variables:
config.setdefault('variables', key_value_list(mode=3, **variables))
kwargs.setdefault('config', config)
return add_config_data(type, **kwargs)
'''查询session'''
def set_filter_session(request):
filter_query = {'filter': '1', 'user': '', 'name': ''}
if request.method == 'POST':
request.session['filter'] = request.POST.get('filter')
request.session['user'] = request.POST.get('user')
request.session['name'] = request.POST.get('name')
try:
filter_query = {'filter': request.session['filter'], 'user': request.session['user'],
'name': request.session['name']}
except KeyError:
pass
return filter_query
'''ajax异步提示'''
def get_ajax_msg(msg, success):
if msg is 'ok':
return success
else:
return msg
'''注册信息逻辑判断'''
def register_info_logic(**kwargs):
return add_register_data(**kwargs)
'''上传yml文件内容转列表'''
def yml_parser(file_path):
with open(file_path, 'r') as f:
s = yaml.load(f)
data = {'case_info': s}
bulk_import_data(**data)
return s
| [
[
[
39,
55
],
[
4816,
4832
]
],
[
[
57,
72
],
[
4377,
4392
]
],
[
[
74,
87
],
[
6838,
6851
]
],
[
[
89,
104
],
[
8282,
8297
]
],
[
[
112,
129
],
[
9067,
9084
]
],
[
[
131,
147
],
[
9238,
9254
]
],
[
[
178,
188
],
[
3854,
3864
]
],
[
[
196,
200
],
[
9193,
9197
]
],
[
[
226,
240
],
[
6183,
6197
],
[
6343,
6357
],
[
7876,
7890
],
[
8040,
8054
]
],
[
[
1527,
1541
],
[
5813,
5827
],
[
5939,
5953
],
[
6484,
6498
],
[
6611,
6625
],
[
6746,
6760
],
[
8185,
8199
]
],
[
[
3752,
3764
],
[
5024,
5036
],
[
7051,
7063
]
],
[
[
4062,
4079
]
],
[
[
4433,
4451
]
],
[
[
4873,
4888
]
],
[
[
6892,
6909
]
],
[
[
8338,
8356
]
],
[
[
8902,
8914
]
],
[
[
9025,
9044
]
],
[
[
9122,
9132
]
]
] |
import tensorflow as tf
#from tensorflow.python.ops.rnn_cell import *
#from tensorflow.python.ops.rnn_cell_impl import _Linear
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import *
#from tensorflow import keras
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope as vs
#from keras import backend as K
def din_attention(query, facts, attention_size, mask=None, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
print ("query_size mismatch")
query = tf.concat(values = [
query,
query,
], axis=1)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)
d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)
d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])
scores = d_layer_3_all
if mask is not None:
mask = tf.equal(mask, tf.ones_like(mask))
key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(scores) * (-2 ** 32 + 1)
scores = tf.where(key_masks, scores, paddings) # [B, 1, T]
# Activation
if softmax_stag:
scores = tf.nn.softmax(scores) # [B, 1, T]
# Weighted sum
if mode == 'SUM':
output = tf.matmul(scores, facts) # [B, 1, H]
# output = tf.reshape(output, [-1, tf.shape(facts)[-1]])
else:
scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])
output = facts * tf.expand_dims(scores, -1)
output = tf.reshape(output, tf.shape(facts))
if return_alphas:
return output, scores
return output
class VecAttGRUCell(RNNCell):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078).
Args:
num_units: int, The number of units in the GRU cell.
activation: Nonlinearity to use. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
kernel_initializer: (optional) The initializer to use for the weight and
projection matrices.
bias_initializer: (optional) The initializer to use for the bias.
"""
def __init__(self,
num_units,
activation=None,
reuse=None,
kernel_initializer=None,
bias_initializer=None):
super(VecAttGRUCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._activation = activation or math_ops.tanh
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._gate_linear = None
self._candidate_linear = None
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, att_score):
return self.call(inputs, state, att_score)
def call(self, inputs, state, att_score=None):
"""Gated recurrent unit (GRU) with nunits cells."""
if self._gate_linear is None:
bias_ones = self._bias_initializer
if self._bias_initializer is None:
bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype)
with vs.variable_scope("gates"): # Reset gate and update gate.
self._gate_linear = _Linear(
[inputs, state],
2 * self._num_units,
True,
bias_initializer=bias_ones,
kernel_initializer=self._kernel_initializer)
value = math_ops.sigmoid(self._gate_linear([inputs, state]))
r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
r_state = r * state
if self._candidate_linear is None:
with vs.variable_scope("candidate"):
self._candidate_linear = _Linear(
[inputs, r_state],
self._num_units,
True,
bias_initializer=self._bias_initializer,
kernel_initializer=self._kernel_initializer)
c = self._activation(self._candidate_linear([inputs, r_state]))
u = (1.0 - att_score) * u
new_h = u * state + (1 - u) * c
return new_h, new_h
def prelu(_x, scope=''):
"""parametric ReLU activation"""
with tf.variable_scope(name_or_scope=scope, default_name="prelu"):
_alpha = tf.get_variable("prelu_"+scope, shape=_x.get_shape()[-1],
dtype=_x.dtype, initializer=tf.constant_initializer(0.1))
return tf.maximum(0.0, _x) + _alpha * tf.minimum(0.0, _x)
def calc_auc(raw_arr):
"""Summary
Args:
raw_arr (TYPE): Description
Returns:
TYPE: Description
"""
arr = sorted(raw_arr, key=lambda d:d[0], reverse=True)
pos, neg = 0., 0.
for record in arr:
if record[1] == 1.:
pos += 1
else:
neg += 1
fp, tp = 0., 0.
xy_arr = []
for record in arr:
if record[1] == 1.:
tp += 1
else:
fp += 1
xy_arr.append([fp/neg, tp/pos])
auc = 0.
prev_x = 0.
prev_y = 0.
for x, y in xy_arr:
if x != prev_x:
auc += ((x - prev_x) * (y + prev_y) / 2.)
prev_x = x
prev_y = y
return auc
def calc_gauc(raw_arr, nick_index):
"""Summary
Args:
raw_arr (TYPE): Description
Returns:
TYPE: Description
"""
last_index = 0
gauc = 0.
pv_sum = 0
for idx in xrange(len(nick_index)):
if nick_index[idx] != nick_index[last_index]:
input_arr = raw_arr[last_index:idx]
auc_val=calc_auc(input_arr)
if auc_val >= 0.0:
gauc += auc_val * len(input_arr)
pv_sum += len(input_arr)
else:
pv_sum += len(input_arr)
last_index = idx
return gauc / pv_sum
def attention(query, facts, attention_size, mask, stag='null', mode='LIST', softmax_stag=1, time_major=False, return_alphas=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
mask = tf.equal(mask, tf.ones_like(mask))
hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
input_size = query.get_shape().as_list()[-1]
# Trainable parameters
w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1))
w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1))
b = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
v = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
with tf.name_scope('v'):
# Applying fully connected layer with non-linear activation to each of the B*T timestamps;
# the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size
tmp1 = tf.tensordot(facts, w1, axes=1)
tmp2 = tf.tensordot(query, w2, axes=1)
tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]])
tmp = tf.tanh((tmp1 + tmp2) + b)
# For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector
v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape
key_masks = mask # [B, 1, T]
# key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(v_dot_tmp) * (-2 ** 32 + 1)
v_dot_tmp = tf.where(key_masks, v_dot_tmp, paddings) # [B, 1, T]
alphas = tf.nn.softmax(v_dot_tmp, name='alphas') # (B,T) shape
# Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape
#output = tf.reduce_sum(facts * tf.expand_dims(alphas, -1), 1)
output = facts * tf.expand_dims(alphas, -1)
output = tf.reshape(output, tf.shape(facts))
# output = output / (facts.get_shape().as_list()[-1] ** 0.5)
if not return_alphas:
return output
else:
return output, alphas
def din_fcn_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False, forCnn=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
# Trainable parameters
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
query = tf.layers.dense(query, facts_size, activation=None, name='f1' + stag)
query = prelu(query)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)
d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)
d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])
scores = d_layer_3_all
# Mask
if mask is not None:
# key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T]
key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(scores) * (-2 ** 32 + 1)
if not forCnn:
scores = tf.where(key_masks, scores, paddings) # [B, 1, T]
# Scale
# scores = scores / (facts.get_shape().as_list()[-1] ** 0.5)
# Activation
if softmax_stag:
scores = tf.nn.softmax(scores) # [B, 1, T]
# Weighted sum
if mode == 'SUM':
output = tf.matmul(scores, facts) # [B, 1, H]
# output = tf.reshape(output, [-1, tf.shape(facts)[-1]])
else:
scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])
output = facts * tf.expand_dims(scores, -1)
output = tf.reshape(output, tf.shape(facts))
if return_alphas:
return output, scores
return output
def self_attention(facts, ATTENTION_SIZE, mask, stag='null'):
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
def cond(batch, output, i):
return tf.less(i, tf.shape(batch)[1])
def body(batch, output, i):
self_attention_tmp = din_fcn_attention(batch[:, i, :], batch[:, 0:i+1, :],
ATTENTION_SIZE, mask[:, 0:i+1], softmax_stag=1, stag=stag,
mode='LIST')
self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1)
output = output.write(i, self_attention_tmp)
return batch, output, i + 1
output_ta = tf.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=True,
element_shape=(facts[:, 0, :].get_shape()))
_, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0])
self_attention = output_op.stack()
self_attention = tf.transpose(self_attention, perm = [1, 0, 2])
return self_attention
def self_all_attention(facts, ATTENTION_SIZE, mask, stag='null'):
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
def cond(batch, output, i):
return tf.less(i, tf.shape(batch)[1])
def body(batch, output, i):
self_attention_tmp = din_fcn_attention(batch[:, i, :], batch,
ATTENTION_SIZE, mask, softmax_stag=1, stag=stag,
mode='LIST')
self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1)
output = output.write(i, self_attention_tmp)
return batch, output, i + 1
output_ta = tf.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=True,
element_shape=(facts[:, 0, :].get_shape()))
_, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0])
self_attention = output_op.stack()
self_attention = tf.transpose(self_attention, perm = [1, 0, 2])
return self_attention
def din_fcn_shine(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
# Trainable parameters
mask = tf.equal(mask, tf.ones_like(mask))
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag)
query = prelu(query)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag)
d_layer_2_all = tf.reshape(d_layer_2_all, tf.shape(facts))
output = d_layer_2_all
return output
| [
[
[
7,
23
],
[
710,
712
],
[
784,
786
],
[
919,
921
],
[
1115,
1117
],
[
1134,
1136
],
[
1169,
1171
],
[
1189,
1191
],
[
1220,
1222
],
[
1307,
1309
],
[
1347,
1349
],
[
1404,
1406
],
[
1450,
1452
],
[
1507,
1509
],
[
1600,
1602
],
[
1634,
1636
],
[
1723,
1725
],
[
1738,
1740
],
[
1778,
1780
],
[
1833,
1835
],
[
1888,
1890
],
[
1995,
1997
],
[
2089,
2091
],
[
2219,
2221
],
[
2243,
2245
],
[
2289,
2291
],
[
2333,
2335
],
[
2352,
2354
],
[
5042,
5044
],
[
5121,
5123
],
[
5240,
5242
],
[
5285,
5287
],
[
5316,
5318
],
[
6947,
6949
],
[
7032,
7034
],
[
7085,
7087
],
[
7100,
7102
],
[
7298,
7300
],
[
7310,
7312
],
[
7380,
7382
],
[
7392,
7394
],
[
7460,
7462
],
[
7472,
7474
],
[
7528,
7530
],
[
7540,
7542
],
[
7598,
7600
],
[
7811,
7813
],
[
7858,
7860
],
[
7905,
7907
],
[
7930,
7932
],
[
7965,
7967
],
[
8101,
8103
],
[
8265,
8267
],
[
8322,
8324
],
[
8389,
8391
],
[
8626,
8628
],
[
8666,
8668
],
[
8685,
8687
],
[
9142,
9144
],
[
9224,
9226
],
[
9314,
9316
],
[
9535,
9537
],
[
9644,
9646
],
[
9663,
9665
],
[
9698,
9700
],
[
9718,
9720
],
[
9749,
9751
],
[
9836,
9838
],
[
9876,
9878
],
[
9933,
9935
],
[
9979,
9981
],
[
10036,
10038
],
[
10129,
10131
],
[
10163,
10165
],
[
10347,
10349
],
[
10402,
10404
],
[
10484,
10486
],
[
10669,
10671
],
[
10763,
10765
],
[
10893,
10895
],
[
10917,
10919
],
[
10963,
10965
],
[
11007,
11009
],
[
11026,
11028
],
[
11238,
11240
],
[
11796,
11798
],
[
11817,
11819
],
[
12015,
12017
],
[
12124,
12126
],
[
12326,
12328
],
[
12861,
12863
],
[
12882,
12884
],
[
13080,
13082
],
[
13189,
13191
],
[
13530,
13532
],
[
13615,
13617
],
[
13694,
13696
],
[
13709,
13711
],
[
13882,
13884
],
[
14003,
14005
],
[
14022,
14024
],
[
14057,
14059
],
[
14077,
14079
],
[
14108,
14110
],
[
14195,
14197
],
[
14243,
14245
],
[
14306,
14308
],
[
14360,
14362
],
[
14423,
14425
],
[
14449,
14451
],
[
11311,
11313
],
[
11322,
11324
],
[
11653,
11655
],
[
12399,
12401
],
[
12410,
12412
],
[
12718,
12720
]
],
[
[
188,
189
],
[
2475,
2482
],
[
4153,
4160
],
[
4615,
4622
],
[
6261,
6267
]
],
[
[
254,
262
],
[
3360,
3368
],
[
4352,
4360
]
],
[
[
297,
305
],
[
4000,
4008
]
],
[
[
340,
349
],
[
4416,
4425
]
],
[
[
384,
404
],
[
4066,
4068
],
[
4550,
4552
]
],
[
[
442,
455
]
],
[
[
2461,
2474
],
[
3247,
3260
]
],
[
[
4975,
4980
],
[
9617,
9622
],
[
13976,
13981
]
],
[
[
5341,
5349
],
[
6408,
6416
]
],
[
[
6056,
6065
]
],
[
[
6687,
6696
]
],
[
[
8861,
8878
],
[
11404,
11421
],
[
12492,
12509
]
],
[
[
11118,
11132
]
],
[
[
12202,
12220
]
],
[
[
13267,
13280
]
]
] |
import sys
# Alternatively just load env variables via your env/bin/activate script
if sys.platform.startswith('darwin') or sys.platform.startswith('win'):
import json
path = "Gigger/utilities/env_local.json"
with open(path) as json_file:
global CONFIG
CONFIG = json.load(json_file)
else:
import os
global CONFIG
CONFIG = {
"DEPLOYMENT": os.environ['DEPLOYMENT'],
"DB": {
"HOST": os.environ['DB_HOST'],
"USER": os.environ['DB_USER'],
"PW": os.environ['DB_PW'],
"SCHEMA": os.environ['DB_SCHEMA'],
},
"AWS": True,
"FB_APP_ID": os.environ['FB_APP_ID']
}
| [
[
[
7,
10
],
[
88,
91
],
[
125,
128
]
],
[
[
168,
172
],
[
291,
295
]
],
[
[
177,
181
],
[
232,
236
]
],
[
[
241,
250
],
[
301,
310
]
],
[
[
282,
288
]
],
[
[
329,
331
],
[
387,
389
],
[
449,
451
],
[
492,
494
],
[
533,
535
],
[
576,
578
],
[
654,
656
]
],
[
[
354,
360
]
]
] |
import logging
import six
import ddtrace
from ddtrace.compat import StringIO
from ddtrace.constants import ENV_KEY
from ddtrace.constants import VERSION_KEY
from ddtrace.contrib.logging import patch
from ddtrace.contrib.logging import unpatch
from ddtrace.contrib.logging.patch import RECORD_ATTR_SPAN_ID
from ddtrace.contrib.logging.patch import RECORD_ATTR_TRACE_ID
from ddtrace.vendor import wrapt
from tests.utils import TracerTestCase
logger = logging.getLogger()
logger.level = logging.INFO
DEFAULT_FORMAT = (
"%(message)s - dd.service=%(dd.service)s dd.version=%(dd.version)s dd.env=%(dd.env)s"
" dd.trace_id=%(dd.trace_id)s dd.span_id=%(dd.span_id)s"
)
def current_span(tracer=None):
if not tracer:
tracer = ddtrace.tracer
return tracer.current_span()
class AssertFilter(logging.Filter):
def filter(self, record):
trace_id = getattr(record, RECORD_ATTR_TRACE_ID)
assert isinstance(trace_id, six.string_types)
span_id = getattr(record, RECORD_ATTR_SPAN_ID)
assert isinstance(span_id, six.string_types)
return True
def capture_function_log(func, fmt=DEFAULT_FORMAT, logger_override=None):
if logger_override is not None:
logger_to_capture = logger_override
else:
logger_to_capture = logger
# add stream handler to capture output
out = StringIO()
sh = logging.StreamHandler(out)
try:
formatter = logging.Formatter(fmt)
sh.setFormatter(formatter)
logger_to_capture.addHandler(sh)
assert_filter = AssertFilter()
logger_to_capture.addFilter(assert_filter)
result = func()
finally:
logger_to_capture.removeHandler(sh)
logger_to_capture.removeFilter(assert_filter)
return out.getvalue().strip(), result
class LoggingTestCase(TracerTestCase):
def setUp(self):
patch()
super(LoggingTestCase, self).setUp()
def tearDown(self):
unpatch()
super(LoggingTestCase, self).tearDown()
def test_patch(self):
"""
Confirm patching was successful
"""
log = logging.getLogger()
self.assertTrue(isinstance(log.makeRecord, wrapt.BoundFunctionWrapper))
unpatch()
log = logging.getLogger()
self.assertFalse(isinstance(log.makeRecord, wrapt.BoundFunctionWrapper))
def _test_logging(self, create_span, service="", version="", env=""):
def func():
span = create_span()
logger.info("Hello!")
if span:
span.finish()
return span
with self.override_config("logging", dict(tracer=self.tracer)):
# with format string for trace info
output, span = capture_function_log(func)
trace_id = 0
span_id = 0
if span:
trace_id = span.trace_id
span_id = span.span_id
assert output == "Hello! - dd.service={} dd.version={} dd.env={} dd.trace_id={} dd.span_id={}".format(
service, version, env, trace_id, span_id
)
# without format string
output, _ = capture_function_log(func, fmt="%(message)s")
assert output == "Hello!"
def test_log_trace(self):
"""
Check logging patched and formatter including trace info
"""
def create_span():
return self.tracer.trace("test.logging")
self._test_logging(create_span=create_span)
with self.override_global_config(dict(version="global.version", env="global.env")):
self._test_logging(create_span=create_span, version="global.version", env="global.env")
def test_log_trace_service(self):
def create_span():
return self.tracer.trace("test.logging", service="logging")
self._test_logging(create_span=create_span)
with self.override_global_config(dict(version="global.version", env="global.env")):
self._test_logging(create_span=create_span, version="global.version", env="global.env")
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TAGS="service:ddtagservice,env:ddenv,version:ddversion"))
def test_log_DD_TAGS(self):
def create_span():
return self.tracer.trace("test.logging")
self._test_logging(create_span=create_span, service="ddtagservice", version="ddversion", env="ddenv")
def test_log_trace_version(self):
def create_span():
span = self.tracer.trace("test.logging")
span.set_tag(VERSION_KEY, "manual.version")
return span
self._test_logging(create_span=create_span, version="")
# Setting global config version and overriding with span specific value
# We always want the globals in the logs
with self.override_global_config(dict(version="global.version", env="global.env")):
self._test_logging(create_span=create_span, version="global.version", env="global.env")
def test_log_trace_env(self):
"""
Check logging patched and formatter including trace info
"""
def create_span():
span = self.tracer.trace("test.logging")
span.set_tag(ENV_KEY, "manual.env")
return span
self._test_logging(create_span=create_span, env="")
# Setting global config env and overriding with span specific value
# We always want the globals in the logs
with self.override_global_config(dict(version="global.version", env="global.env")):
self._test_logging(create_span=create_span, version="global.version", env="global.env")
def test_log_no_trace(self):
"""
Check traced funclogging patched and formatter not including trace info
"""
def create_span():
return None
self._test_logging(create_span=create_span)
with self.override_global_config(dict(version="global.version", env="global.env")):
self._test_logging(create_span=create_span, version="global.version", env="global.env")
| [
[
[
7,
14
],
[
453,
460
],
[
488,
495
],
[
812,
819
],
[
1375,
1382
],
[
1432,
1439
],
[
2118,
2125
],
[
2251,
2258
]
],
[
[
23,
26
],
[
952,
955
],
[
1061,
1064
]
],
[
[
35,
42
],
[
743,
750
]
],
[
[
70,
78
],
[
1355,
1363
]
],
[
[
109,
116
],
[
5235,
5242
]
],
[
[
147,
158
],
[
4563,
4574
]
],
[
[
195,
200
],
[
1869,
1874
]
],
[
[
237,
244
],
[
1955,
1962
],
[
2227,
2234
]
],
[
[
287,
306
],
[
1005,
1024
]
],
[
[
349,
369
],
[
894,
914
]
],
[
[
397,
402
],
[
2189,
2194
],
[
2323,
2328
]
],
[
[
427,
441
],
[
1823,
1837
],
[
4083,
4097
]
],
[
[
444,
450
],
[
473,
479
],
[
1294,
1300
],
[
2492,
2498
]
],
[
[
502,
516
],
[
1137,
1151
]
],
[
[
680,
692
]
],
[
[
799,
811
],
[
1555,
1567
]
],
[
[
1106,
1126
],
[
2737,
2757
],
[
3162,
3182
]
],
[
[
1807,
1822
],
[
1891,
1906
],
[
1979,
1994
]
]
] |
# -*- coding: utf-8 -*-
'''
Helpful decorators for module writing
'''
# Import python libs
from __future__ import absolute_import
import inspect
import logging
import time
from functools import wraps
from collections import defaultdict
# Import salt libs
import salt.utils
import salt.utils.args
from salt.exceptions import CommandNotFoundError, CommandExecutionError, SaltConfigurationError
from salt.version import SaltStackVersion, __saltstack_version__
from salt.log import LOG_LEVELS
# Import 3rd-party libs
import salt.ext.six as six
log = logging.getLogger(__name__)
class Depends(object):
'''
This decorator will check the module when it is loaded and check that the
dependencies passed in are in the globals of the module. If not, it will
cause the function to be unloaded (or replaced)
'''
# kind -> Dependency -> list of things that depend on it
dependency_dict = defaultdict(lambda: defaultdict(dict))
def __init__(self, *dependencies, **kwargs):
'''
The decorator is instantiated with a list of dependencies (string of
global name)
An example use of this would be:
@depends('modulename')
def test():
return 'foo'
OR
@depends('modulename', fallback_function=function)
def test():
return 'foo'
'''
log.trace(
'Depends decorator instantiated with dep list of {0}'.format(
dependencies
)
)
self.dependencies = dependencies
self.fallback_function = kwargs.get('fallback_function')
def __call__(self, function):
'''
The decorator is "__call__"d with the function, we take that function
and determine which module and function name it is to store in the
class wide depandancy_dict
'''
try:
# This inspect call may fail under certain conditions in the loader. Possibly related to
# a Python bug here:
# http://bugs.python.org/issue17735
frame = inspect.stack()[1][0]
# due to missing *.py files under esky we cannot use inspect.getmodule
# module name is something like salt.loaded.int.modules.test
_, kind, mod_name = frame.f_globals['__name__'].rsplit('.', 2)
fun_name = function.__name__
for dep in self.dependencies:
self.dependency_dict[kind][dep][(mod_name, fun_name)] = \
(frame, self.fallback_function)
except Exception as exc:
log.error('Exception encountered when attempting to inspect frame in '
'dependency decorator: {0}'.format(exc))
return function
@classmethod
def enforce_dependencies(cls, functions, kind):
'''
This is a class global method to enforce the dependencies that you
currently know about.
It will modify the "functions" dict and remove/replace modules that
are missing dependencies.
'''
for dependency, dependent_dict in six.iteritems(cls.dependency_dict[kind]):
for (mod_name, func_name), (frame, fallback_function) in six.iteritems(dependent_dict):
# check if dependency is loaded
if dependency is True:
log.trace(
'Dependency for {0}.{1} exists, not unloading'.format(
mod_name,
func_name
)
)
continue
# check if you have the dependency
if dependency in frame.f_globals \
or dependency in frame.f_locals:
log.trace(
'Dependency ({0}) already loaded inside {1}, '
'skipping'.format(
dependency,
mod_name
)
)
continue
log.trace(
'Unloading {0}.{1} because dependency ({2}) is not '
'imported'.format(
mod_name,
func_name,
dependency
)
)
# if not, unload the function
if frame:
try:
func_name = frame.f_globals['__func_alias__'][func_name]
except (AttributeError, KeyError):
pass
mod_key = '{0}.{1}'.format(mod_name, func_name)
# if we don't have this module loaded, skip it!
if mod_key not in functions:
continue
try:
if fallback_function is not None:
functions[mod_key] = fallback_function
else:
del functions[mod_key]
except AttributeError:
# we already did???
log.trace('{0} already removed, skipping'.format(mod_key))
continue
depends = Depends
def timing(function):
'''
Decorator wrapper to log execution time, for profiling purposes
'''
@wraps(function)
def wrapped(*args, **kwargs):
start_time = time.time()
ret = function(*args, **salt.utils.clean_kwargs(**kwargs))
end_time = time.time()
if function.__module__.startswith('salt.loaded.int.'):
mod_name = function.__module__[16:]
else:
mod_name = function.__module__
log.profile(
'Function {0}.{1} took {2:.20f} seconds to execute'.format(
mod_name,
function.__name__,
end_time - start_time
)
)
return ret
return wrapped
def which(exe):
'''
Decorator wrapper for salt.utils.which
'''
def wrapper(function):
def wrapped(*args, **kwargs):
if salt.utils.which(exe) is None:
raise CommandNotFoundError(
'The \'{0}\' binary was not found in $PATH.'.format(exe)
)
return function(*args, **kwargs)
return identical_signature_wrapper(function, wrapped)
return wrapper
def which_bin(exes):
'''
Decorator wrapper for salt.utils.which_bin
'''
def wrapper(function):
def wrapped(*args, **kwargs):
if salt.utils.which_bin(exes) is None:
raise CommandNotFoundError(
'None of provided binaries({0}) was not found '
'in $PATH.'.format(
['\'{0}\''.format(exe) for exe in exes]
)
)
return function(*args, **kwargs)
return identical_signature_wrapper(function, wrapped)
return wrapper
def identical_signature_wrapper(original_function, wrapped_function):
'''
Return a function with identical signature as ``original_function``'s which
will call the ``wrapped_function``.
'''
context = {'__wrapped__': wrapped_function}
function_def = compile(
'def {0}({1}):\n'
' return __wrapped__({2})'.format(
# Keep the original function name
original_function.__name__,
# The function signature including defaults, i.e., 'timeout=1'
inspect.formatargspec(
*salt.utils.args.get_function_argspec(original_function)
)[1:-1],
# The function signature without the defaults
inspect.formatargspec(
formatvalue=lambda val: '',
*salt.utils.args.get_function_argspec(original_function)
)[1:-1]
),
'<string>',
'exec'
)
six.exec_(function_def, context)
return wraps(original_function)(context[original_function.__name__])
def memoize(func):
'''
Memoize aka cache the return output of a function
given a specific set of arguments
.. versionedited:: 2016.3.4
Added **kwargs support.
'''
cache = {}
@wraps(func)
def _memoize(*args, **kwargs):
str_args = []
for arg in args:
if not isinstance(arg, six.string_types):
str_args.append(str(arg))
else:
str_args.append(arg)
args_ = ','.join(list(str_args) + ['{0}={1}'.format(k, kwargs[k]) for k in sorted(kwargs)])
if args_ not in cache:
cache[args_] = func(*args, **kwargs)
return cache[args_]
return _memoize
class _DeprecationDecorator(object):
'''
Base mix-in class for the deprecation decorator.
Takes care of a common functionality, used in its derivatives.
'''
OPT_IN = 1
OPT_OUT = 2
def __init__(self, globals, version):
'''
Constructor.
:param globals: Module globals. Important for finding out replacement functions
:param version: Expiration version
:return:
'''
self._globals = globals
self._exp_version_name = version
self._exp_version = SaltStackVersion.from_name(self._exp_version_name)
self._curr_version = __saltstack_version__.info
self._raise_later = None
self._function = None
self._orig_f_name = None
def _get_args(self, kwargs):
'''
Extract function-specific keywords from all of the kwargs.
:param kwargs:
:return:
'''
_args = list()
_kwargs = dict()
if '__pub_arg' in kwargs: # For modules
for arg_item in kwargs.get('__pub_arg', list()):
if type(arg_item) == dict:
_kwargs.update(arg_item.copy())
else:
_args.append(arg_item)
else:
_kwargs = kwargs.copy() # For states
return _args, _kwargs
def _call_function(self, kwargs):
'''
Call target function that has been decorated.
:return:
'''
if self._raise_later:
raise self._raise_later # pylint: disable=E0702
if self._function:
args, kwargs = self._get_args(kwargs)
try:
return self._function(*args, **kwargs)
except TypeError as error:
error = str(error).replace(self._function, self._orig_f_name) # Hide hidden functions
log.error('Function "{f_name}" was not properly called: {error}'.format(f_name=self._orig_f_name,
error=error))
return self._function.__doc__
except Exception as error:
log.error('Unhandled exception occurred in '
'function "{f_name}: {error}'.format(f_name=self._function.__name__,
error=error))
raise error
else:
raise CommandExecutionError("Function is deprecated, but the successor function was not found.")
def __call__(self, function):
'''
Callable method of the decorator object when
the decorated function is gets called.
:param function:
:return:
'''
self._function = function
self._orig_f_name = self._function.__name__
class _IsDeprecated(_DeprecationDecorator):
'''
This decorator should be used only with the deprecated functions
to mark them as deprecated and alter its behavior a corresponding way.
The usage is only suitable if deprecation process is renaming
the function from one to another. In case function name or even function
signature stays the same, please use 'with_deprecated' decorator instead.
It has the following functionality:
1. Put a warning level message to the log, informing that
the deprecated function has been in use.
2. Raise an exception, if deprecated function is being called,
but the lifetime of it already expired.
3. Point to the successor of the deprecated function in the
log messages as well during the blocking it, once expired.
Usage of this decorator as follows. In this example no successor
is mentioned, hence the function "foo()" will be logged with the
warning each time is called and blocked completely, once EOF of
it is reached:
from salt.util.decorators import is_deprecated
@is_deprecated(globals(), "Beryllium")
def foo():
pass
In the following example a successor function is mentioned, hence
every time the function "bar()" is called, message will suggest
to use function "baz()" instead. Once EOF is reached of the function
"bar()", an exception will ask to use function "baz()", in order
to continue:
from salt.util.decorators import is_deprecated
@is_deprecated(globals(), "Beryllium", with_successor="baz")
def bar():
pass
def baz():
pass
'''
def __init__(self, globals, version, with_successor=None):
'''
Constructor of the decorator 'is_deprecated'.
:param globals: Module globals
:param version: Version to be deprecated
:param with_successor: Successor function (optional)
:return:
'''
_DeprecationDecorator.__init__(self, globals, version)
self._successor = with_successor
def __call__(self, function):
'''
Callable method of the decorator object when
the decorated function is gets called.
:param function:
:return:
'''
_DeprecationDecorator.__call__(self, function)
def _decorate(*args, **kwargs):
'''
Decorator function.
:param args:
:param kwargs:
:return:
'''
if self._curr_version < self._exp_version:
msg = ['The function "{f_name}" is deprecated and will '
'expire in version "{version_name}".'.format(f_name=self._function.__name__,
version_name=self._exp_version_name)]
if self._successor:
msg.append('Use successor "{successor}" instead.'.format(successor=self._successor))
log.warning(' '.join(msg))
else:
msg = ['The lifetime of the function "{f_name}" expired.'.format(f_name=self._function.__name__)]
if self._successor:
msg.append('Please use its successor "{successor}" instead.'.format(successor=self._successor))
log.warning(' '.join(msg))
raise CommandExecutionError(' '.join(msg))
return self._call_function(kwargs)
return _decorate
is_deprecated = _IsDeprecated
class _WithDeprecated(_DeprecationDecorator):
'''
This decorator should be used with the successor functions
to mark them as a new and alter its behavior in a corresponding way.
It is used alone if a function content or function signature
needs to be replaced, leaving the name of the function same.
In case function needs to be renamed or just dropped, it has
to be used in pair with 'is_deprecated' decorator.
It has the following functionality:
1. Put a warning level message to the log, in case a component
is using its deprecated version.
2. Switch between old and new function in case an older version
is configured for the desired use.
3. Raise an exception, if deprecated version reached EOL and
point out for the new version.
Usage of this decorator as follows. If 'with_name' is not specified,
then the name of the deprecated function is assumed with the "_" prefix.
In this case, in order to deprecate a function, it is required:
- Add a prefix "_" to an existing function. E.g.: "foo()" to "_foo()".
- Implement a new function with exactly the same name, just without
the prefix "_".
Example:
from salt.util.decorators import with_deprecated
@with_deprecated(globals(), "Beryllium")
def foo():
"This is a new function"
def _foo():
"This is a deprecated function"
In case there is a need to deprecate a function and rename it,
the decorator should be used with the 'with_name' parameter. This
parameter is pointing to the existing deprecated function. In this
case deprecation process as follows:
- Leave a deprecated function without changes, as is.
- Implement a new function and decorate it with this decorator.
- Set a parameter 'with_name' to the deprecated function.
- If a new function has a different name than a deprecated,
decorate a deprecated function with the 'is_deprecated' decorator
in order to let the function have a deprecated behavior.
Example:
from salt.util.decorators import with_deprecated
@with_deprecated(globals(), "Beryllium", with_name="an_old_function")
def a_new_function():
"This is a new function"
@is_deprecated(globals(), "Beryllium", with_successor="a_new_function")
def an_old_function():
"This is a deprecated function"
'''
MODULE_NAME = '__virtualname__'
CFG_USE_DEPRECATED = 'use_deprecated'
CFG_USE_SUPERSEDED = 'use_superseded'
def __init__(self, globals, version, with_name=None, policy=_DeprecationDecorator.OPT_OUT):
'''
Constructor of the decorator 'with_deprecated'
:param globals:
:param version:
:param with_name:
:param policy:
:return:
'''
_DeprecationDecorator.__init__(self, globals, version)
self._with_name = with_name
self._policy = policy
def _set_function(self, function):
'''
Based on the configuration, set to execute an old or a new function.
:return:
'''
full_name = "{m_name}.{f_name}".format(
m_name=self._globals.get(self.MODULE_NAME, '') or self._globals['__name__'].split('.')[-1],
f_name=function.__name__)
if full_name.startswith("."):
self._raise_later = CommandExecutionError('Module not found for function "{f_name}"'.format(
f_name=function.__name__))
opts = self._globals.get('__opts__', '{}')
pillar = self._globals.get('__pillar__', '{}')
use_deprecated = (full_name in opts.get(self.CFG_USE_DEPRECATED, list()) or
full_name in pillar.get(self.CFG_USE_DEPRECATED, list()))
use_superseded = (full_name in opts.get(self.CFG_USE_SUPERSEDED, list()) or
full_name in pillar.get(self.CFG_USE_SUPERSEDED, list()))
if use_deprecated and use_superseded:
raise SaltConfigurationError("Function '{0}' is mentioned both in deprecated "
"and superseded sections. Please remove any of that.".format(full_name))
old_function = self._globals.get(self._with_name or "_{0}".format(function.__name__))
if self._policy == self.OPT_IN:
self._function = function if use_superseded else old_function
else:
self._function = old_function if use_deprecated else function
def _is_used_deprecated(self):
'''
Returns True, if a component configuration explicitly is
asking to use an old version of the deprecated function.
:return:
'''
func_path = "{m_name}.{f_name}".format(
m_name=self._globals.get(self.MODULE_NAME, '') or self._globals['__name__'].split('.')[-1],
f_name=self._orig_f_name)
return func_path in self._globals.get('__opts__').get(
self.CFG_USE_DEPRECATED, list()) or func_path in self._globals.get('__pillar__').get(
self.CFG_USE_DEPRECATED, list()) or (self._policy == self.OPT_IN
and not (func_path in self._globals.get('__opts__', {}).get(
self.CFG_USE_SUPERSEDED, list()))
and not (func_path in self._globals.get('__pillar__', {}).get(
self.CFG_USE_SUPERSEDED, list()))), func_path
def __call__(self, function):
'''
Callable method of the decorator object when
the decorated function is gets called.
:param function:
:return:
'''
_DeprecationDecorator.__call__(self, function)
def _decorate(*args, **kwargs):
'''
Decorator function.
:param args:
:param kwargs:
:return:
'''
self._set_function(function)
is_deprecated, func_path = self._is_used_deprecated()
if is_deprecated:
if self._curr_version < self._exp_version:
msg = list()
if self._with_name:
msg.append('The function "{f_name}" is deprecated and will '
'expire in version "{version_name}".'.format(
f_name=self._with_name.startswith("_") and self._orig_f_name or self._with_name,
version_name=self._exp_version_name))
msg.append('Use its successor "{successor}" instead.'.format(successor=self._orig_f_name))
else:
msg.append('The function "{f_name}" is using its deprecated version and will '
'expire in version "{version_name}".'.format(f_name=func_path,
version_name=self._exp_version_name))
log.warning(' '.join(msg))
else:
msg_patt = 'The lifetime of the function "{f_name}" expired.'
if '_' + self._orig_f_name == self._function.__name__:
msg = [msg_patt.format(f_name=self._orig_f_name),
'Please turn off its deprecated version in the configuration']
else:
msg = ['Although function "{f_name}" is called, an alias "{f_alias}" '
'is configured as its deprecated version.'.format(
f_name=self._orig_f_name, f_alias=self._with_name or self._orig_f_name),
msg_patt.format(f_name=self._with_name or self._orig_f_name),
'Please use its successor "{successor}" instead.'.format(successor=self._orig_f_name)]
log.error(' '.join(msg))
raise CommandExecutionError(' '.join(msg))
return self._call_function(kwargs)
_decorate.__doc__ = self._function.__doc__
return _decorate
with_deprecated = _WithDeprecated
def ignores_kwargs(*kwarg_names):
'''
Decorator to filter out unexpected keyword arguments from the call
kwarg_names:
List of argument names to ignore
'''
def _ignores_kwargs(fn):
def __ignores_kwargs(*args, **kwargs):
kwargs_filtered = kwargs.copy()
for name in kwarg_names:
if name in kwargs_filtered:
del kwargs_filtered[name]
return fn(*args, **kwargs_filtered)
return __ignores_kwargs
return _ignores_kwargs
| [
[
[
115,
130
]
],
[
[
138,
145
],
[
2099,
2106
],
[
7571,
7578
],
[
7758,
7765
]
],
[
[
153,
160
],
[
550,
557
]
],
[
[
168,
172
],
[
5473,
5477
],
[
5571,
5575
]
],
[
[
195,
200
],
[
5402,
5407
],
[
8018,
8023
],
[
8292,
8297
]
],
[
[
225,
236
],
[
909,
920
],
[
929,
940
]
],
[
[
264,
274
]
],
[
[
282,
297
],
[
7611,
7615
],
[
7842,
7846
],
[
5517,
5521
],
[
6162,
6166
],
[
6624,
6628
]
],
[
[
326,
346
],
[
6215,
6235
],
[
6682,
6702
]
],
[
[
348,
369
],
[
11208,
11229
],
[
18555,
18576
],
[
14991,
15012
],
[
23234,
23255
]
],
[
[
371,
393
],
[
19181,
19203
]
],
[
[
419,
435
],
[
9313,
9329
]
],
[
[
437,
458
],
[
9393,
9414
]
],
[
[
480,
490
]
],
[
[
523,
542
],
[
3119,
3122
],
[
3230,
3233
],
[
7974,
7977
],
[
8421,
8424
]
],
[
[
544,
547
],
[
1393,
1396
],
[
2610,
2613
],
[
3368,
3371
],
[
3790,
3793
],
[
4085,
4088
],
[
5177,
5180
],
[
10630,
10633
],
[
10931,
10934
],
[
5759,
5762
],
[
14615,
14618
],
[
14942,
14945
],
[
22267,
22270
],
[
23183,
23186
]
],
[
[
586,
593
],
[
5281,
5288
]
],
[
[
5271,
5278
]
],
[
[
5295,
5301
]
],
[
[
6011,
6016
]
],
[
[
6464,
6473
]
],
[
[
7048,
7075
],
[
6392,
6419
],
[
6976,
7003
]
],
[
[
8086,
8093
]
],
[
[
8775,
8796
],
[
11609,
11630
],
[
15156,
15177
],
[
17782,
17803
],
[
13592,
13613
],
[
13898,
13919
],
[
18016,
18037
],
[
20929,
20950
]
],
[
[
11595,
11608
],
[
15118,
15131
]
],
[
[
15102,
15115
]
],
[
[
15140,
15155
],
[
23415,
23430
]
],
[
[
23397,
23412
]
],
[
[
23437,
23451
]
]
] |
#!/usr/bin/env python3
import argparse
import boutvecma
import easyvvuq as uq
import chaospy
import os
import numpy as np
import time
import matplotlib.pyplot as plt
CAMPAIGN_NAME = "Conduction."
def refine_sampling_plan(campaign, analysis, number_of_refinements):
"""
Refine the sampling plan.
Parameters
----------
number_of_refinements (int)
The number of refinement iterations that must be performed.
Returns
-------
None. The new accepted indices are stored in analysis.l_norm and the admissible indices
in sampler.admissible_idx.
"""
sampler = campaign.get_active_sampler()
for _ in range(number_of_refinements):
# compute the admissible indices
sampler.look_ahead(analysis.l_norm)
print(f"Code will be evaluated {sampler.n_new_points[-1]} times")
# run the ensemble
campaign.execute().collate(progress_bar=True)
# accept one of the multi indices of the new admissible set
data_frame = campaign.get_collation_result()
analysis.adapt_dimension("T", data_frame)
analysis.save_state(f"{campaign.campaign_dir}/analysis.state")
def plot_grid_2D(campaign, analysis, i, filename="out.pdf"):
fig = plt.figure(figsize=[12, 4])
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
accepted_grid = campaign.get_active_sampler().generate_grid(analysis.l_norm)
ax1.plot(accepted_grid[:, 0], accepted_grid[:, 1], "o")
ax2.plot(accepted_grid[:, 2], accepted_grid[:, 3], "o")
ax1.set_title(f"iteration {i}")
fig.tight_layout()
fig.savefig(filename)
def custom_moments_plot(results, filename, i):
fig, ax = plt.subplots()
xvalues = np.arange(len(results.describe("T", "mean")))
ax.fill_between(
xvalues,
results.describe("T", "mean") - results.describe("T", "std"),
results.describe("T", "mean") + results.describe("T", "std"),
label="std",
alpha=0.2,
)
ax.plot(xvalues, results.describe("T", "mean"), label="mean")
try:
ax.plot(xvalues, results.describe("T", "1%"), "--", label="1%", color="black")
ax.plot(xvalues, results.describe("T", "99%"), "--", label="99%", color="black")
except RuntimeError:
pass
ax.grid(True)
ax.set_ylabel("T")
ax.set_xlabel(r"$\rho$")
ax.set_title("iteration " + str(i))
ax.legend()
fig.savefig(filename)
def first_time_setup():
encoder = boutvecma.BOUTEncoder(
template_input="../../models/conduction/data/BOUT.inp"
)
# decoder = boutvecma.LogDataBOUTDecoder(variables=["T"])
decoder = boutvecma.SimpleBOUTDecoder(variables=["T"])
params = {
"conduction:chi": {"type": "float", "min": 0.0, "max": 1e3, "default": 1.0},
"T:scale": {"type": "float", "min": 0.0, "max": 1e3, "default": 1.0},
"T:gauss_width": {"type": "float", "min": 0.0, "max": 1e3, "default": 0.2},
"T:gauss_centre": {
"type": "float",
"min": 0.0,
"max": 2 * np.pi,
"default": np.pi,
},
}
actions = uq.actions.local_execute(
encoder,
os.path.abspath(
"../../build/models/conduction/conduction -q -q -q -q -d . |& tee run.log"
),
decoder,
root=".",
)
campaign = uq.Campaign(name=CAMPAIGN_NAME, actions=actions, params=params)
vary = {
"conduction:chi": chaospy.Uniform(0.2, 4.0),
"T:scale": chaospy.Uniform(0.5, 1.5),
"T:gauss_width": chaospy.Uniform(0.5, 1.5),
"T:gauss_centre": chaospy.Uniform(0.5 * np.pi, 1.5 * np.pi),
}
sampler = uq.sampling.SCSampler(
vary=vary,
polynomial_order=1,
quadrature_rule="C",
sparse=True,
growth=True,
midpoint_level1=True,
dimension_adaptive=True,
)
campaign.set_sampler(sampler)
print(f"Output will be in {campaign.campaign_dir}")
sampler = campaign.get_active_sampler()
print(f"Computing {sampler.n_samples} samples")
time_start = time.time()
campaign.execute().collate(progress_bar=True)
# Create an analysis class and run the analysis.
analysis = create_analysis(campaign)
campaign.apply_analysis(analysis)
analysis.save_state(f"{campaign.campaign_dir}/analysis.state")
plot_grid_2D(campaign, analysis, 0, f"{campaign.campaign_dir}/grid0.png")
for i in np.arange(1, 10):
refine_once(campaign, analysis, i)
time_end = time.time()
print(f"Finished, took {time_end - time_start}")
return campaign
def create_analysis(campaign):
return uq.analysis.SCAnalysis(sampler=campaign.get_active_sampler(), qoi_cols=["T"])
def refine_once(campaign, analysis, iteration):
refine_sampling_plan(campaign, analysis, 1)
campaign.apply_analysis(analysis)
analysis.save_state(f"{campaign.campaign_dir}/analysis.state")
results = campaign.last_analysis
plot_grid_2D(
campaign,
analysis,
iteration,
f"{campaign.campaign_dir}/grid{iteration:02}.png",
)
moment_plot_filename = os.path.join(
f"{campaign.campaign_dir}", f"moments{iteration:02}.png"
)
sobols_plot_filename = os.path.join(
f"{campaign.campaign_dir}", f"sobols_first{iteration:02}.png"
)
results.plot_sobols_first(
"T",
ylabel=f"iteration{iteration}",
xlabel=r"$\rho$",
filename=sobols_plot_filename,
)
plt.ylim(0, 1)
plt.savefig(f"{campaign.campaign_dir}/sobols{iteration:02}.png")
custom_moments_plot(results, moment_plot_filename, iteration)
with open(f"{campaign.campaign_dir}/last_iteration", "w") as f:
f.write(f"{iteration}")
def plot_results(campaign, moment_plot_filename, sobols_plot_filename):
results = campaign.get_last_analysis()
results.plot_sobols_first("T", xlabel=r"$\rho$", filename=sobols_plot_filename)
fig, ax = plt.subplots()
xvalues = np.arange(len(results.describe("T", "mean")))
ax.fill_between(
xvalues,
results.describe("T", "mean") - results.describe("T", "std"),
results.describe("T", "mean") + results.describe("T", "std"),
label="std",
alpha=0.2,
)
ax.plot(xvalues, results.describe("T", "mean"), label="mean")
try:
ax.plot(xvalues, results.describe("T", "1%"), "--", label="1%", color="black")
ax.plot(xvalues, results.describe("T", "99%"), "--", label="99%", color="black")
except RuntimeError:
pass
ax.grid(True)
ax.set_ylabel("T")
ax.set_xlabel(r"$\rho$")
ax.legend()
fig.savefig(moment_plot_filename)
print(f"Results are in:\n\t{moment_plot_filename}\n\t{sobols_plot_filename}")
def reload_campaign(directory):
"""Reload a campaign from a directory
Returns the campaign, analysis, and last iteration number
"""
campaign = uq.Campaign(
name=CAMPAIGN_NAME,
db_location=f"sqlite:///{os.path.abspath(directory)}/campaign.db",
)
analysis = create_analysis(campaign)
analysis.load_state(f"{campaign.campaign_dir}/analysis.state")
with open(f"{campaign.campaign_dir}/last_iteration", "r") as f:
iteration = int(f.read())
return campaign, analysis, iteration
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"conduction_sc",
description="Adaptive dimension refinement for 1D conduction model",
)
parser.add_argument(
"--restart", type=str, help="Restart previous campaign", default=None
)
parser.add_argument(
"-n", "--refinement-num", type=int, default=1, help="Number of refinements"
)
args = parser.parse_args()
if args.restart is None:
first_time_setup()
else:
campaign, analysis, last_iteration = reload_campaign(args.restart)
for iteration in range(
last_iteration + 1, last_iteration + args.refinement_num + 1
):
refine_once(campaign, analysis, iteration)
| [
[
[
31,
39
],
[
7317,
7325
]
],
[
[
47,
56
],
[
2460,
2469
],
[
2628,
2637
]
],
[
[
64,
78
],
[
3107,
3109
],
[
3330,
3332
],
[
3649,
3651
],
[
4627,
4629
],
[
6899,
6901
]
],
[
[
86,
93
],
[
3434,
3441
],
[
3480,
3487
],
[
3532,
3539
],
[
3585,
3592
]
],
[
[
101,
103
],
[
3158,
3160
],
[
5111,
5113
],
[
5223,
5225
],
[
6973,
6975
]
],
[
[
111,
122
],
[
1709,
1711
],
[
3039,
3041
],
[
3069,
3071
],
[
3607,
3609
],
[
3620,
3622
],
[
4420,
4422
],
[
5970,
5972
]
],
[
[
130,
134
],
[
4066,
4070
],
[
4496,
4500
]
],
[
[
142,
166
],
[
1240,
1243
],
[
1680,
1683
],
[
5472,
5475
],
[
5491,
5494
],
[
5941,
5944
]
],
[
[
169,
182
],
[
3347,
3360
],
[
6925,
6938
]
],
[
[
205,
225
],
[
4759,
4779
]
],
[
[
1173,
1185
],
[
4332,
4344
],
[
4950,
4962
]
],
[
[
1623,
1642
],
[
5561,
5580
]
],
[
[
2426,
2442
],
[
7744,
7760
]
],
[
[
4589,
4604
],
[
4197,
4212
],
[
7036,
7051
]
],
[
[
4711,
4722
],
[
7976,
7987
],
[
4446,
4457
]
],
[
[
5730,
5742
]
],
[
[
6742,
6757
],
[
7818,
7833
]
],
[
[
7308,
7314
],
[
7454,
7460
],
[
7563,
7569
],
[
7686,
7692
]
],
[
[
7679,
7683
],
[
7714,
7718
],
[
7834,
7838
],
[
7929,
7933
]
],
[
[
7781,
7789
],
[
7988,
7996
]
],
[
[
7791,
7799
],
[
7998,
8006
]
],
[
[
7801,
7815
],
[
7892,
7906
],
[
7912,
7926
]
],
[
[
7860,
7869
],
[
8008,
8017
]
]
] |
import machine, time
from machine import Pin
__version__ = '0.2.0'
__author__ = 'Roberto Sánchez'
__license__ = "Apache License 2.0. https://www.apache.org/licenses/LICENSE-2.0"
class HCSR04:
"""
Driver to use the untrasonic sensor HC-SR04.
The sensor range is between 2cm and 4m.
The timeouts received listening to echo pin are converted to OSError('Out of range')
"""
# echo_timeout_us is based in chip range limit (400cm)
def __init__(self, trigger_pin, echo_pin, echo_timeout_us=500*2*30):
"""
trigger_pin: Output pin to send pulses
echo_pin: Readonly pin to measure the distance. The pin should be protected with 1k resistor
echo_timeout_us: Timeout in microseconds to listen to echo pin.
By default is based in sensor limit range (4m)
"""
self.echo_timeout_us = echo_timeout_us
# Init trigger pin (out)
self.trigger = Pin(trigger_pin, mode=Pin.OUT, pull=None)
self.trigger.value(0)
# Init echo pin (in)
self.echo = Pin(echo_pin, mode=Pin.IN, pull=None)
def _send_pulse_and_wait(self):
"""
Send the pulse to trigger and listen on echo pin.
We use the method `machine.time_pulse_us()` to get the microseconds until the echo is received.
"""
self.trigger.value(0) # Stabilize the sensor
time.sleep_us(5)
self.trigger.value(1)
# Send a 10us pulse.
time.sleep_us(10)
self.trigger.value(0)
try:
pulse_time = machine.time_pulse_us(self.echo, 1, self.echo_timeout_us)
return pulse_time
except OSError as ex:
if ex.args[0] == 110: # 110 = ETIMEDOUT
raise OSError('Out of range')
raise ex
def distance_mm(self):
"""
Get the distance in milimeters without floating point operations.
"""
pulse_time = self._send_pulse_and_wait()
# To calculate the distance we get the pulse_time and divide it by 2
# (the pulse walk the distance twice) and by 29.1 becasue
# the sound speed on air (343.2 m/s), that It's equivalent to
# 0.34320 mm/us that is 1mm each 2.91us
# pulse_time // 2 // 2.91 -> pulse_time // 5.82 -> pulse_time * 100 // 582
mm = pulse_time * 100 // 582
return mm
def distance_cm(self):
"""
Get the distance in centimeters with floating point operations.
It returns a float
"""
pulse_time = self._send_pulse_and_wait()
# To calculate the distance we get the pulse_time and divide it by 2
# (the pulse walk the distance twice) and by 29.1 becasue
# the sound speed on air (343.2 m/s), that It's equivalent to
# 0.034320 cm/us that is 1cm each 29.1us
cms = (pulse_time / 2) / 29.1
return cms
| [
[
[
7,
14
],
[
1581,
1588
]
],
[
[
16,
20
],
[
1405,
1409
],
[
1492,
1496
]
],
[
[
42,
45
],
[
949,
952
],
[
971,
974
],
[
1075,
1078
],
[
1094,
1097
]
],
[
[
49,
60
]
],
[
[
72,
82
]
],
[
[
104,
115
]
],
[
[
193,
199
]
]
] |
#!/usr/bin/env python
# Copyright 2021-2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import json
import multiprocessing
import os
import platform
import re
import shutil
import subprocess
import sys
import tempfile
import time
from distutils import sysconfig
import setuptools
# Flush output on newlines
sys.stdout.reconfigure(line_buffering=True)
os_name = platform.system()
# Work around breaking change in setuptools 60
setup_py_flags = []
if int(setuptools.__version__.split(".")[0]) >= 60:
setup_py_flags = ["--single-version-externally-managed", "--root=/"]
class BooleanFlag(argparse.Action):
def __init__(
self,
option_strings,
dest,
default,
required=False,
help="",
metavar=None,
):
assert all(not opt.startswith("--no") for opt in option_strings)
def flatten(list):
return [item for sublist in list for item in sublist]
option_strings = flatten(
[
[opt, "--no-" + opt[2:], "--no" + opt[2:]]
if opt.startswith("--")
else [opt]
for opt in option_strings
]
)
super().__init__(
option_strings,
dest,
nargs=0,
const=None,
default=default,
type=bool,
choices=None,
required=required,
help=help,
metavar=metavar,
)
def __call__(self, parser, namespace, values, option_string):
setattr(namespace, self.dest, not option_string.startswith("--no"))
required_thrust_version = "cuda-11.2"
# Global variable for verbose installation
verbose_global = False
def verbose_check_call(*args, **kwargs):
if verbose_global:
print('Executing: "', " ".join(*args), '" with ', kwargs)
subprocess.check_call(*args, **kwargs)
def verbose_check_output(*args, **kwargs):
if verbose_global:
print('Executing: "', " ".join(*args), '" with ', kwargs)
return subprocess.check_output(*args, **kwargs)
def find_active_python_version_and_path():
# Launching a sub-process to do this in a general way seems hard
version = (
str(sys.version_info.major)
+ "."
+ str(sys.version_info.minor)
+ "."
+ str(sys.version_info.micro)
)
cv = sysconfig.get_config_vars()
paths = [os.path.join(cv[p], cv["LDLIBRARY"]) for p in ("LIBDIR", "LIBPL")]
# ensure that static libraries are replaced with the dynamic version
paths = [
os.path.splitext(p)[0] + (".dylib" if os_name == "Darwin" else ".so")
for p in paths
]
paths = [p for p in paths if os.path.isfile(p)]
e = "Error: could not auto-locate python library."
assert paths, e
return version, paths[0]
def git_clone(repo_dir, url, branch=None, tag=None, commit=None):
assert branch is not None or tag is not None or commit is not None
if branch is not None:
verbose_check_call(
["git", "clone", "--recursive", "-b", branch, url, repo_dir]
)
elif commit is not None:
verbose_check_call(["git", "clone", "--recursive", url, repo_dir])
verbose_check_call(["git", "checkout", commit], cwd=repo_dir)
verbose_check_call(
["git", "submodule", "update", "--init"], cwd=repo_dir
)
git_reset(repo_dir, commit)
else:
verbose_check_call(
[
"git",
"clone",
"--recursive",
"--single-branch",
"-b",
tag,
url,
repo_dir,
]
)
verbose_check_call(["git", "checkout", "-b", "master"], cwd=repo_dir)
def git_reset(repo_dir, refspec):
verbose_check_call(["git", "reset", "--hard", refspec], cwd=repo_dir)
def git_update(repo_dir, branch=None, tag=None, commit=None):
if branch is not None:
verbose_check_call(["git", "fetch"], cwd=repo_dir)
verbose_check_call(["git", "checkout", branch], cwd=repo_dir)
verbose_check_call(["git", "pull", "--ff-only"], cwd=repo_dir)
else:
verbose_check_call(["git", "fetch"], cwd=repo_dir)
verbose_check_call(["git", "checkout", commit or tag], cwd=repo_dir)
def load_json_config(filename):
try:
with open(filename, "r") as f:
return json.load(f)
except IOError:
return None
def dump_json_config(filename, value):
with open(filename, "w") as f:
return json.dump(value, f)
def symlink(from_path, to_path):
if not os.path.lexists(to_path):
os.symlink(from_path, to_path)
def install_gasnet(gasnet_dir, conduit, thread_count):
print("Legate is installing GASNet into a local directory...")
temp_dir = tempfile.mkdtemp()
git_clone(
temp_dir,
url="https://github.com/StanfordLegion/gasnet.git",
branch="master",
)
# Update the configuration file with the prefix for our output
# Then we can invoke make
verbose_check_call(
[
"make",
"-j",
str(thread_count),
"CONDUIT=" + str(conduit),
"GASNET_INSTALL_DIR=" + str(gasnet_dir),
],
cwd=temp_dir,
)
shutil.rmtree(temp_dir)
def install_legion(legion_src_dir, branch, commit=None):
print("Legate is installing Legion into a local directory...")
# For now all we have to do is clone legion since we build it with Legate
git_clone(
legion_src_dir,
url="https://gitlab.com/StanfordLegion/legion.git",
branch=branch,
commit=commit,
)
def install_thrust(thrust_dir):
print("Legate is installing Thrust into a local directory...")
git_clone(
thrust_dir,
url="https://github.com/thrust/thrust.git",
tag=required_thrust_version,
)
def update_legion(legion_src_dir, branch, commit=None):
# Make sure we are on the right branch for single/multi-node
git_update(legion_src_dir, branch=branch, commit=commit)
def build_legion(
legion_src_dir,
install_dir,
cmake,
cmake_exe,
cuda_dir,
debug,
debug_release,
check_bounds,
cuda,
arch,
openmp,
march,
llvm,
hdf,
spy,
gasnet,
gasnet_dir,
conduit,
pyversion,
pylib_name,
maxdim,
maxfields,
clean_first,
extra_flags,
thread_count,
verbose,
):
no_hijack = True
if cuda and os.environ.get("USE_CUDART_HIJACK", "0") == "1":
print(
"""
#####################################################################
Warning: Realm's CUDA runtime hijack is incompatible with NCCL.
Please note that your code will crash catastrophically as soon as it
calls into NCCL either directly or through some other Legate library.
#####################################################################
"""
)
time.sleep(10)
no_hijack = False
if cmake:
build_dir = os.path.join(legion_src_dir, "build")
try:
shutil.rmtree(build_dir)
except FileNotFoundError:
pass
if not os.path.exists(build_dir):
os.mkdir(build_dir)
flags = (
[
"-DCMAKE_BUILD_TYPE=%s"
% (
"Debug"
if debug
else "RelWithDebInfo"
if debug_release
else "Release"
),
"-DLegion_MAX_DIM=%s" % (str(maxdim)),
"-DLegion_MAX_FIELDS=%s" % (str(maxfields)),
"-DLegion_USE_CUDA=%s" % ("ON" if cuda else "OFF"),
"-DLegion_GPU_ARCH=%s" % arch,
"-DLegion_USE_OpenMP=%s" % ("ON" if openmp else "OFF"),
"-DBUILD_MARCH=%s" % march,
"-DLegion_USE_LLVM=%s" % ("ON" if llvm else "OFF"),
"-DLegion_USE_GASNet=%s" % ("ON" if gasnet else "OFF"),
"-DLegion_USE_HDF5=%s" % ("ON" if hdf else "OFF"),
"-DCMAKE_INSTALL_PREFIX=%s" % (os.path.realpath(install_dir)),
"-DLegion_USE_Python=On",
"-DLegion_Python_Version=%s" % pyversion,
"-DLegion_REDOP_COMPLEX=On",
"-DLegion_REDOP_HALF=On",
"-DBUILD_SHARED_LIBS=ON",
"-DLegion_BUILD_BINDINGS=On",
]
+ extra_flags
+ (["-DLegion_BOUNDS_CHECKS=On"] if check_bounds else [])
+ (["-DLegion_HIJACK_CUDART=Off"] if no_hijack else [])
+ (
["-DGASNet_ROOT_DIR=%s" % gasnet_dir]
if gasnet_dir is not None
else []
)
+ (
["-DGASNet_CONDUIT=%s" % conduit]
if conduit is not None
else []
)
+ (
["-DCUDA_TOOLKIT_ROOT_DIR=%s" % cuda_dir]
if cuda_dir is not None
else []
)
+ (
["-DCMAKE_CXX_COMPILER=%s" % os.environ["CXX"]]
if "CXX" in os.environ
else []
)
+ (
["-DCMAKE_CXX_FLAGS=%s" % os.environ["CC_FLAGS"]]
if "CC_FLAGS" in os.environ
else []
)
)
make_flags = ["VERBOSE=1"] if verbose else []
make_flags += ["-C", os.path.realpath(build_dir)]
if spy:
raise NotImplementedError("Need support for Legion Spy with cmake")
try:
subprocess.check_output([cmake_exe, "--version"])
except OSError:
print(
"Error: CMake is not installed or otherwise not executable. "
"Please check"
)
print(
"your CMake installation and try again. You can use the "
"--with-cmake flag"
)
print("to specify the CMake executable if it is not on PATH.")
print()
print("Attempted to execute: %s" % cmake_exe)
sys.exit(1)
verbose_check_call(
[cmake_exe] + flags + [legion_src_dir], cwd=build_dir
)
verbose_check_call(
["make"] + make_flags + ["-j", str(thread_count), "install"],
cwd=build_dir,
)
# TODO: install legion spy and legion prof
else:
version = pyversion.split(".")
flags = (
[
"LG_RT_DIR=%s" % (os.path.join(legion_src_dir, "runtime")),
"DEBUG=%s" % (1 if debug else 0),
"DEBUG_RELEASE=%s" % (1 if debug_release else 0),
"MAX_DIM=%s" % (str(maxdim)),
"MAX_FIELDS=%s" % (str(maxfields)),
"USE_CUDA=%s" % (1 if cuda else 0),
"GPU_ARCH=%s" % arch,
"USE_OPENMP=%s" % (1 if openmp else 0),
"MARCH=%s" % march,
"USE_LLVM=%s" % (1 if llvm else 0),
"USE_GASNET=%s" % (1 if gasnet else 0),
"USE_HDF=%s" % (1 if hdf else 0),
"PREFIX=%s" % (os.path.realpath(install_dir)),
"PYTHON_VERSION_MAJOR=%s" % version[0],
"PYTHON_VERSION_MINOR=%s" % version[1],
"PYTHON_LIB=%s" % pylib_name,
"FORCE_PYTHON=1",
"USE_COMPLEX=1",
"USE_HALF=1",
"USE_SPY=%s" % (1 if spy else 0),
"REALM_USE_CUDART_HIJACK=%s" % (1 if not no_hijack else 0),
]
+ extra_flags
+ (["BOUNDS_CHECKS=1"] if check_bounds else [])
+ (["GASNET=%s" % gasnet_dir] if gasnet_dir is not None else [])
+ (["CONDUIT=%s" % conduit] if conduit is not None else [])
+ (["CUDA=%s" % cuda_dir] if cuda_dir is not None else [])
)
legion_python_dir = os.path.join(legion_src_dir, "bindings", "python")
if clean_first:
verbose_check_call(
["make"] + flags + ["clean"], cwd=legion_python_dir
)
# Explicitly ask for C++17, otherwise the Legion build will use C++11.
env = dict(os.environ.items())
env["CXXFLAGS"] = "-std=c++17 " + env.get("CXXFLAGS", "")
verbose_check_call(
["make"] + flags + ["-j", str(thread_count), "install"],
cwd=legion_python_dir,
env=env,
)
verbose_check_call(
[
sys.executable,
"setup.py",
"install",
"--prefix",
str(os.path.realpath(install_dir)),
]
+ setup_py_flags,
cwd=legion_python_dir,
)
verbose_check_call(
[
"cp",
"legion_spy.py",
os.path.join(install_dir, "share", "legate", "legion_spy.py"),
],
cwd=os.path.join(legion_src_dir, "tools"),
)
verbose_check_call(
[
"cp",
"legion_prof.py",
os.path.join(install_dir, "share", "legate", "legion_prof.py"),
],
cwd=os.path.join(legion_src_dir, "tools"),
)
verbose_check_call(
[
"cp",
"legion_serializer.py",
os.path.join(
install_dir, "share", "legate", "legion_serializer.py"
),
],
cwd=os.path.join(legion_src_dir, "tools"),
)
verbose_check_call(
[
"cp",
"legion_prof_copy.html.template",
os.path.join(
install_dir,
"share",
"legate",
"legion_prof_copy.html.template",
),
],
cwd=os.path.join(legion_src_dir, "tools"),
)
verbose_check_call(
[
"cp",
"-r",
"legion_prof_files",
os.path.join(install_dir, "share", "legate", "legion_prof_files"),
],
cwd=os.path.join(legion_src_dir, "tools"),
)
def build_legate_core(
install_dir,
legate_core_dir,
cmake,
cmake_exe,
cuda_dir,
nccl_dir,
debug,
debug_release,
cuda,
arch,
openmp,
march,
spy,
gasnet,
clean_first,
thread_count,
verbose,
unknown,
):
src_dir = os.path.join(legate_core_dir, "src")
if cmake:
print("Warning: CMake is currently not supported for Legate build.")
print("Using GNU Make for now.")
make_flags = [
"LEGATE_DIR=%s" % install_dir,
"DEBUG=%s" % (1 if debug else 0),
"DEBUG_RELEASE=%s" % (1 if debug_release else 0),
"USE_CUDA=%s" % (1 if cuda else 0),
"USE_OPENMP=%s" % (1 if openmp else 0),
"MARCH=%s" % march,
"GPU_ARCH=%s" % arch,
"PREFIX=%s" % str(install_dir),
"USE_GASNET=%s" % (1 if gasnet else 0),
"NCCL_DIR=%s" % nccl_dir,
] + (["CUDA=%s" % cuda_dir] if cuda_dir is not None else [])
if clean_first:
verbose_check_call(["make"] + make_flags + ["clean"], cwd=src_dir)
verbose_check_call(
["make"] + make_flags + ["-j", str(thread_count), "install"],
cwd=src_dir,
)
# Fill in config.mk.in and copy it to the target destination
with open(os.path.join(src_dir, "config.mk.in")) as f:
content = f.read()
content = content.format(
debug=repr(1 if debug else 0),
debug_release=repr(1 if debug_release else 0),
cuda=repr(1 if cuda else 0),
arch=(arch if arch is not None else ""),
cuda_dir=(cuda_dir if cuda_dir is not None else ""),
openmp=repr(1 if openmp else 0),
march=march,
gasnet=repr(1 if gasnet else 0),
)
with open(os.path.join(src_dir, "config.mk"), "wb") as f:
f.write(content.encode("utf-8"))
cmd = ["cp", "config.mk", os.path.join(install_dir, "share", "legate")]
verbose_check_call(cmd, cwd=src_dir)
# Then run setup.py
cmd = [
sys.executable,
"setup.py",
"install",
"--recurse",
] + setup_py_flags
if unknown is not None:
try:
prefix_loc = unknown.index("--prefix")
cmd.extend(unknown[prefix_loc : prefix_loc + 2])
except ValueError:
cmd += ["--prefix", str(install_dir)]
else:
cmd += ["--prefix", str(install_dir)]
verbose_check_call(cmd, cwd=legate_core_dir)
def install(
gasnet,
cuda,
arch,
openmp,
march,
hdf,
llvm,
spy,
conduit,
nccl_dir,
cmake,
cmake_exe,
install_dir,
gasnet_dir,
pylib_name,
cuda_dir,
maxdim,
maxfields,
debug,
debug_release,
check_bounds,
clean_first,
extra_flags,
thread_count,
verbose,
thrust_dir,
legion_branch,
unknown,
):
global verbose_global
verbose_global = verbose
legate_core_dir = os.path.dirname(os.path.realpath(__file__))
cmake_config = os.path.join(legate_core_dir, ".cmake.json")
dump_json_config(cmake_config, cmake)
if pylib_name is None:
pyversion, pylib_name = find_active_python_version_and_path()
else:
f_name = os.path.split(pylib_name)[-1]
match = re.match(r"^libpython(\d\d?\.\d\d?)", f_name)
e = "Unable to get version from library name {}".format(pylib_name)
assert match, e
pyversion = match.group(1)
print("Using python lib and version: {}, {}".format(pylib_name, pyversion))
install_dir_config = os.path.join(legate_core_dir, ".install-dir.json")
if install_dir is None:
install_dir = load_json_config(install_dir_config)
if install_dir is None:
install_dir = os.path.join(legate_core_dir, "install")
install_dir = os.path.realpath(install_dir)
dump_json_config(install_dir_config, install_dir)
os.makedirs(os.path.join(install_dir, "share", "legate"), exist_ok=True)
if thread_count is None:
thread_count = multiprocessing.cpu_count()
# Save the maxdim config
maxdim_config = os.path.join(legate_core_dir, ".maxdim.json")
# Check the max dimensions
if maxdim < 1 or maxdim > 9:
raise Exception(
"The maximum number of Legate dimensions must be between 1 and 9 "
"inclusive"
)
dump_json_config(maxdim_config, str(maxdim))
# Save the maxfields config
maxfields_config = os.path.join(legate_core_dir, ".maxfields.json")
# Check that max fields is between 32 and 4096 and is a power of 2
if maxfields not in [32, 64, 128, 256, 512, 1024, 2048, 4096]:
raise Exception(
"The maximum number of Legate fields must be a power of 2 between "
"32 and 4096 inclusive"
)
dump_json_config(maxfields_config, str(maxfields))
# If the user asked for a conduit and we don't have gasnet then install it
if gasnet:
conduit_config = os.path.join(legate_core_dir, ".conduit.json")
if conduit is None:
conduit = load_json_config(conduit_config)
if conduit is None:
raise Exception(
"The first time you use GASNet you need to tell us "
'which conduit to use with the "--conduit" flag'
)
dump_json_config(conduit_config, conduit)
gasnet_config = os.path.join(
legate_core_dir, ".gasnet" + str(conduit) + ".json"
)
if gasnet_dir is None:
gasnet_dir = load_json_config(gasnet_config)
if gasnet_dir is None:
gasnet_dir = os.path.join(install_dir, "gasnet")
if not os.path.exists(gasnet_dir):
install_gasnet(gasnet_dir, conduit, thread_count)
dump_json_config(gasnet_config, gasnet_dir)
# If the user asked for CUDA, make sure we know where the install
# directory is
if cuda:
cuda_config = os.path.join(legate_core_dir, ".cuda.json")
if cuda_dir is None:
cuda_dir = load_json_config(cuda_config)
if cuda_dir is None:
raise Exception(
"The first time you use CUDA you need to tell Legate "
'where CUDA is installed with the "--with-cuda" flag.'
)
dump_json_config(cuda_config, cuda_dir)
arch_config = os.path.join(legate_core_dir, ".arch.json")
if arch is None:
arch = load_json_config(arch_config)
if arch is None:
try:
import pynvml
pynvml.nvmlInit()
major, minor = pynvml.nvmlDeviceGetCudaComputeCapability(
pynvml.nvmlDeviceGetHandleByIndex(0)
)
arch = f"{major}{minor}"
pynvml.nvmlShutdown()
except Exception as exc:
raise Exception(
"Could not auto-detect CUDA GPU architecture, please "
"specify the target architecture using --arch"
) from exc
dump_json_config(arch_config, arch)
nccl_config = os.path.join(legate_core_dir, ".nccl.json")
if nccl_dir is None:
nccl_dir = load_json_config(nccl_config)
if nccl_dir is None:
raise Exception(
"The first time you use CUDA you need to tell Legate "
'where NCCL is installed with the "--with-nccl" flag.'
)
dump_json_config(nccl_config, nccl_dir)
# install a stable version of Thrust
thrust_config = os.path.join(legate_core_dir, ".thrust.json")
if thrust_dir is None:
thrust_dir = load_json_config(thrust_config)
if thrust_dir is None:
thrust_dir = os.path.join(install_dir, "thrust")
thrust_dir = os.path.realpath(thrust_dir)
if not os.path.exists(thrust_dir):
install_thrust(thrust_dir)
# Simply put Thrust into the environment.
os.environ["CXXFLAGS"] = (
"-I" + thrust_dir + " " + os.environ.get("CXXFLAGS", "")
)
dump_json_config(thrust_config, thrust_dir)
# Build Legion from scratch.
legion_src_dir = os.path.join(legate_core_dir, "legion")
if not os.path.exists(legion_src_dir):
install_legion(legion_src_dir, branch=legion_branch)
elif clean_first:
update_legion(legion_src_dir, branch=legion_branch)
build_legion(
legion_src_dir,
install_dir,
cmake,
cmake_exe,
cuda_dir,
debug,
debug_release,
check_bounds,
cuda,
arch,
openmp,
march,
llvm,
hdf,
spy,
gasnet,
gasnet_dir,
conduit,
pyversion,
pylib_name,
maxdim,
maxfields,
clean_first,
extra_flags,
thread_count,
verbose,
)
build_legate_core(
install_dir,
legate_core_dir,
cmake,
cmake_exe,
cuda_dir,
nccl_dir,
debug,
debug_release,
cuda,
arch,
openmp,
march,
spy,
gasnet,
clean_first,
thread_count,
verbose,
unknown,
)
# Copy any executables that we need for legate functionality
verbose_check_call(
["cp", "legate.py", os.path.join(install_dir, "bin", "legate")],
cwd=legate_core_dir,
)
verbose_check_call(
[
"cp",
"scripts/lgpatch.py",
os.path.join(install_dir, "bin", "lgpatch"),
],
cwd=legate_core_dir,
)
verbose_check_call(
["cp", "bind.sh", os.path.join(install_dir, "bin", "bind.sh")],
cwd=legate_core_dir,
)
if cuda:
# Copy CUDA configuration that the launcher needs to find CUDA path
verbose_check_call(
[
"cp",
".cuda.json",
os.path.join(install_dir, "share", "legate", ".cuda.json"),
],
cwd=legate_core_dir,
)
# Record the path to NCCL that was used in this build
libs_path = os.path.join(install_dir, "share", ".legate-libs.json")
try:
with open(libs_path, "r") as f:
libs_config = json.load(f)
except (FileNotFoundError, IOError, json.JSONDecodeError):
libs_config = {}
libs_config["nccl"] = nccl_dir
with open(libs_path, "w") as f:
json.dump(libs_config, f)
# Copy thrust configuration
verbose_check_call(
[
"cp",
thrust_config,
os.path.join(install_dir, "share", "legate"),
],
cwd=legate_core_dir,
)
def driver():
parser = argparse.ArgumentParser(description="Install Legate front end.")
parser.add_argument(
"--install-dir",
dest="install_dir",
metavar="DIR",
required=False,
help="Path to install all Legate-related software",
)
parser.add_argument(
"--debug",
dest="debug",
action="store_true",
required=False,
default=os.environ.get("DEBUG", "0") == "1",
help="Build Legate and Legion with no optimizations, and full "
"debugging checks.",
)
parser.add_argument(
"--debug-release",
dest="debug_release",
action="store_true",
required=False,
default=os.environ.get("DEBUG_RELEASE", "0") == "1",
help="Build Legate and Legion with optimizations enabled, but include "
"debugging symbols.",
)
parser.add_argument(
"--check-bounds",
dest="check_bounds",
action="store_true",
required=False,
default=os.environ.get("CHECK_BOUNDS", "0") == "1",
help="Build Legion with bounds checking enabled (warning: expensive).",
)
parser.add_argument(
"--max-dim",
dest="maxdim",
type=int,
default=int(os.environ.get("LEGION_MAX_DIM", 4)),
help="Maximum number of dimensions that Legate will support",
)
parser.add_argument(
"--max-fields",
dest="maxfields",
type=int,
default=int(os.environ.get("LEGION_MAX_FIELDS", 256)),
help="Maximum number of fields that Legate will support",
)
parser.add_argument(
"--gasnet",
dest="gasnet",
action="store_true",
required=False,
default=os.environ.get("USE_GASNET", "0") == "1",
help="Build Legate with GASNet.",
)
parser.add_argument(
"--with-gasnet",
dest="gasnet_dir",
metavar="DIR",
required=False,
default=os.environ.get("GASNET"),
help="Path to GASNet installation directory.",
)
parser.add_argument(
"--cuda",
action=BooleanFlag,
default=os.environ.get("USE_CUDA", "0") == "1",
help="Build Legate with CUDA support.",
)
parser.add_argument(
"--with-cuda",
dest="cuda_dir",
metavar="DIR",
required=False,
default=os.environ.get("CUDA"),
help="Path to CUDA installation directory.",
)
parser.add_argument(
"--arch",
dest="arch",
action="store",
required=False,
default=None,
help="Specify the target GPU architecture.",
)
parser.add_argument(
"--openmp",
action=BooleanFlag,
default=os.environ.get("USE_OPENMP", "0") == "1",
help="Build Legate with OpenMP support.",
)
parser.add_argument(
"--march",
dest="march",
required=False,
default="native",
help="Specify the target CPU architecture.",
)
parser.add_argument(
"--llvm",
dest="llvm",
action="store_true",
required=False,
default=os.environ.get("USE_LLVM", "0") == "1",
help="Build Legate with LLVM support.",
)
parser.add_argument(
"--hdf5",
"--hdf",
dest="hdf",
action="store_true",
required=False,
default=os.environ.get("USE_HDF", "0") == "1",
help="Build Legate with HDF support.",
)
parser.add_argument(
"--spy",
dest="spy",
action="store_true",
required=False,
default=os.environ.get("USE_SPY", "0") == "1",
help="Build Legate with detailed Legion Spy enabled.",
)
parser.add_argument(
"--conduit",
dest="conduit",
action="store",
required=False,
choices=["ibv", "ucx", "aries", "mpi", "udp"],
default=os.environ.get("CONDUIT"),
help="Build Legate with specified GASNet conduit.",
)
parser.add_argument(
"--with-nccl",
dest="nccl_dir",
metavar="DIR",
required=False,
default=os.environ.get("NCCL_PATH"),
help="Path to NCCL installation directory.",
)
parser.add_argument(
"--python-lib",
dest="pylib_name",
action="store",
required=False,
default=None,
help=(
"Build Legate against the specified Python shared library. "
"Default is to use the Python library currently executing this "
"install script."
),
)
parser.add_argument(
"--cmake",
action=BooleanFlag,
default=os.environ.get("USE_CMAKE", "0") == "1",
help="Build Legate with CMake instead of GNU Make.",
)
parser.add_argument(
"--with-cmake",
dest="cmake_exe",
metavar="EXE",
required=False,
default="cmake",
help="Path to CMake executable (if not on PATH).",
)
parser.add_argument(
"--clean",
dest="clean_first",
action=BooleanFlag,
default=True,
help="Clean before build, and pull latest Legion.",
)
parser.add_argument(
"--extra",
dest="extra_flags",
action="append",
required=False,
default=[],
help="Extra flags for make command.",
)
parser.add_argument(
"-j",
dest="thread_count",
nargs="?",
type=int,
help="Number of threads used to compile.",
)
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
action="store_true",
required=False,
help="Enable verbose build output.",
)
parser.add_argument(
"--with-thrust",
dest="thrust_dir",
metavar="DIR",
required=False,
default=os.environ.get("THRUST_PATH"),
help="Path to Thrust installation directory. The required version of "
"Thrust is " + required_thrust_version + " or compatible. If not "
"provided, Thrust will be installed automatically.",
)
parser.add_argument(
"--legion-branch",
dest="legion_branch",
required=False,
default="control_replication",
help="Legion branch to build Legate with.",
)
args, unknown = parser.parse_known_args()
install(unknown=unknown, **vars(args))
if __name__ == "__main__":
driver()
| [
[
[
620,
628
],
[
1136,
1144
],
[
25462,
25470
]
],
[
[
636,
640
],
[
4959,
4963
],
[
5103,
5107
],
[
24991,
24995
],
[
25048,
25052
],
[
25191,
25195
]
],
[
[
648,
663
],
[
18632,
18647
]
],
[
[
671,
673
],
[
2944,
2946
],
[
3106,
3108
],
[
3238,
3240
],
[
5169,
5171
],
[
5203,
5205
],
[
7066,
7068
],
[
7599,
7601
],
[
7753,
7755
],
[
7792,
7794
],
[
8695,
8697
],
[
9733,
9735
],
[
9686,
9688
],
[
9897,
9899
],
[
9840,
9842
],
[
10039,
10041
],
[
11134,
11136
],
[
11761,
11763
],
[
12533,
12535
],
[
12820,
12822
],
[
13246,
13248
],
[
13460,
13462
],
[
13546,
13548
],
[
13685,
13687
],
[
13772,
13774
],
[
13917,
13919
],
[
14040,
14042
],
[
14195,
14197
],
[
14377,
14379
],
[
14537,
14539
],
[
14627,
14629
],
[
14961,
14963
],
[
15921,
15923
],
[
16387,
16389
],
[
16506,
16508
],
[
17554,
17556
],
[
17570,
17572
],
[
17618,
17620
],
[
17830,
17832
],
[
18163,
18165
],
[
18359,
18361
],
[
18418,
18420
],
[
18506,
18508
],
[
18518,
18520
],
[
18710,
18712
],
[
19063,
19065
],
[
19576,
19578
],
[
20005,
20007
],
[
20245,
20247
],
[
20296,
20298
],
[
20563,
20565
],
[
20994,
20996
],
[
21809,
21811
],
[
22279,
22281
],
[
22461,
22463
],
[
22514,
22516
],
[
22554,
22556
],
[
22728,
22730
],
[
22667,
22669
],
[
22868,
22870
],
[
22919,
22921
],
[
24048,
24050
],
[
24226,
24228
],
[
24367,
24369
],
[
24647,
24649
],
[
24848,
24850
],
[
25341,
25343
],
[
25853,
25855
],
[
26148,
26150
],
[
26458,
26460
],
[
26695,
26697
],
[
26922,
26924
],
[
27174,
27176
],
[
27404,
27406
],
[
27578,
27580
],
[
27808,
27810
],
[
28173,
28175
],
[
28579,
28581
],
[
28822,
28824
],
[
29045,
29047
],
[
29342,
29344
],
[
29571,
29573
],
[
30105,
30107
],
[
31296,
31298
]
],
[
[
681,
689
],
[
905,
913
]
],
[
[
697,
699
],
[
17876,
17878
]
],
[
[
707,
713
],
[
5851,
5857
],
[
7662,
7668
]
],
[
[
721,
731
],
[
2393,
2403
],
[
2577,
2587
],
[
10189,
10199
]
],
[
[
739,
742
],
[
850,
853
],
[
2760,
2763
],
[
2812,
2815
],
[
2864,
2867
],
[
10713,
10716
],
[
13127,
13130
],
[
16637,
16640
]
],
[
[
750,
758
],
[
5373,
5381
]
],
[
[
766,
770
],
[
7523,
7527
]
],
[
[
793,
802
],
[
2903,
2912
]
],
[
[
811,
821
],
[
998,
1008
]
],
[
[
895,
902
],
[
3144,
3151
]
],
[
[
971,
985
],
[
13306,
13320
],
[
16721,
16735
]
],
[
[
1047,
1061
],
[
13306,
13320
],
[
16721,
16735
]
],
[
[
1124,
1135
],
[
27549,
27560
],
[
28144,
28155
],
[
30076,
30087
],
[
30512,
30523
]
],
[
[
2152,
2175
],
[
6430,
6453
],
[
31429,
31452
]
],
[
[
2234,
2248
],
[
2307,
2321
],
[
2484,
2498
]
],
[
[
2263,
2281
],
[
3535,
3553
],
[
3675,
3693
],
[
3750,
3768
],
[
3820,
3838
],
[
3971,
3989
],
[
4241,
4259
],
[
4351,
4369
],
[
4520,
4538
],
[
4579,
4597
],
[
4649,
4667
],
[
4730,
4748
],
[
4789,
4807
],
[
5617,
5635
],
[
10733,
10751
],
[
10837,
10855
],
[
12620,
12638
],
[
12914,
12932
],
[
13077,
13095
],
[
13371,
13389
],
[
13595,
13613
],
[
13821,
13839
],
[
14089,
14107
],
[
14426,
14444
],
[
15654,
15672
],
[
15725,
15743
],
[
16556,
16574
],
[
17026,
17044
],
[
24000,
24018
],
[
24132,
24150
],
[
24321,
24339
],
[
24545,
24563
],
[
25254,
25272
]
],
[
[
2438,
2458
]
],
[
[
2624,
2659
],
[
17765,
17800
]
],
[
[
3367,
3376
],
[
5396,
5405
],
[
6083,
6092
],
[
6335,
6344
]
],
[
[
4317,
4326
],
[
3925,
3934
]
],
[
[
4427,
4437
],
[
6588,
6598
]
],
[
[
4864,
4880
],
[
18264,
18280
],
[
19673,
19689
],
[
20149,
20165
],
[
20659,
20675
],
[
21082,
21098
],
[
21905,
21921
],
[
22373,
22389
]
],
[
[
5018,
5034
],
[
17667,
17683
],
[
18452,
18468
],
[
18962,
18978
],
[
19405,
19421
],
[
19939,
19955
],
[
20394,
20410
],
[
20931,
20947
],
[
21750,
21766
],
[
22177,
22193
],
[
22769,
22785
]
],
[
[
5129,
5136
]
],
[
[
5240,
5254
],
[
20336,
20350
]
],
[
[
5881,
5895
],
[
22959,
22973
]
],
[
[
6236,
6250
],
[
22590,
22604
]
],
[
[
6467,
6480
],
[
23042,
23055
]
],
[
[
6651,
6663
],
[
23098,
23110
]
],
[
[
14678,
14695
],
[
23587,
23604
]
],
[
[
17077,
17084
],
[
31802,
31809
]
],
[
[
25439,
25445
],
[
31874,
31880
]
],
[
[
17506,
17520
]
]
] |
import pytest
import os
import RaveEngine.projectManager as projectManager
import RaveEngine.botManager as botManager
import RaveEngine.configManager as configManager
import Utils.commandManager as commandManager
from flaky import flaky
import Utils.sad as sad
import Utils.utils as utils
@pytest.fixture(autouse=True)
def setup():
projectManager.createInitProject(createBasicModules=True)
yield
commandManager.runRmDirCommand(sad._CONFIG_DIR_NAME_)
commandManager.runRmDirCommand(sad._LOG_DIR_NAME_)
commandManager.runRmDirCommand(sad._MODULES_DIR_)
commandManager.runRmDirCommand(sad._OUTPUT_BOT_DIR_)
def data_generateHeaders():
return [sad._HEADER_TOKEN_FLAG]
def data_generateBot():
data = [(False, sad._HOSTING_HEROKU_OPTION_), (True, sad._HOSTING_HEROKU_OPTION_)]
data += [(False, sad._HOSTING_GAE_OPTION_), (True, sad._HOSTING_GAE_OPTION_)]
return data
@flaky(3,1)
@pytest.mark.parametrize('testFlag, hosting', data_generateBot())
def test_generateBot(testFlag, hosting):
projectManager.createInitProject(createBasicModules=True, hostingOption=hosting)
if not testFlag:
with pytest.raises(SystemExit) as pytest_wrapped_e:
botManager.generateBot(testFlag=testFlag)
assert pytest_wrapped_e.type == SystemExit
config = configManager.getConfig()
configManager.set(config, sad._CONFIG_RAVEGEN_SECTION_, sad._CONFIG_DEPLOY_URL_OPTION, "www.test.com")
botManager.generateBot(testFlag=testFlag)
assert os.path.exists(sad._OUTPUT_BOT_DIR_)
assert os.path.exists(sad.OUTPUT_BOT_PATH)
headers = utils._getHeaders()
if testFlag:
assert headers[sad._HEADER_TOKEN_FLAG] == sad._STR_TRUE_
else:
assert headers[sad._HEADER_TOKEN_FLAG] == sad._STR_FALSE_
@flaky(3,1)
@pytest.mark.parametrize('header', data_generateHeaders())
def test_generateHeaders(header):
botManager.generateBot()
headers = utils._getHeaders()
assert header in headers
| [
[
[
7,
13
],
[
291,
297
],
[
918,
924
],
[
1796,
1802
],
[
1143,
1149
]
],
[
[
21,
23
],
[
1506,
1508
],
[
1554,
1556
]
],
[
[
31,
74
],
[
337,
351
],
[
1028,
1042
]
],
[
[
82,
117
],
[
1202,
1212
],
[
1453,
1463
],
[
1892,
1902
]
],
[
[
125,
166
],
[
1312,
1325
],
[
1346,
1359
]
],
[
[
174,
212
],
[
409,
423
],
[
467,
481
],
[
522,
536
],
[
576,
590
]
],
[
[
231,
236
],
[
906,
911
],
[
1784,
1789
]
],
[
[
244,
260
],
[
440,
443
],
[
498,
501
],
[
553,
556
],
[
607,
610
],
[
670,
673
],
[
739,
742
],
[
776,
779
],
[
827,
830
],
[
861,
864
],
[
1372,
1375
],
[
1402,
1405
],
[
1521,
1524
],
[
1569,
1572
],
[
1664,
1667
],
[
1691,
1694
],
[
1739,
1742
],
[
1766,
1769
]
],
[
[
268,
288
],
[
1604,
1609
],
[
1931,
1936
]
],
[
[
324,
329
]
],
[
[
634,
654
],
[
1830,
1850
]
],
[
[
699,
715
],
[
963,
979
]
],
[
[
987,
1003
]
],
[
[
1858,
1878
]
]
] |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from conftest import Mock
import responses
class TestIP(object):
@responses.activate
def test_get_ip(self, manager):
data = Mock.mock_get('ip_address/10.1.0.101')
ip_addr = manager.get_ip('10.1.0.101')
assert type(ip_addr).__name__ == 'IPAddress'
assert ip_addr.address == '10.1.0.101'
assert ip_addr.ptr_record == 'a.ptr.record'
@responses.activate
def test_get_ips(self, manager):
data = Mock.mock_get('ip_address')
ip_addrs = manager.get_ips()
for ip_addr in ip_addrs:
assert type(ip_addr).__name__ == 'IPAddress'
@responses.activate
def test_modify_ip_oop(self, manager):
# get ip
data = Mock.mock_get('ip_address/10.1.0.101')
ip_addr = manager.get_ip('10.1.0.101')
# put ip
data = Mock.mock_put('ip_address/10.1.0.101')
ip_addr.ptr_record = 'my.ptr.record'
ip_addr.save()
assert ip_addr.ptr_record == 'my.ptr.record'
@responses.activate
def test_modify_ip(self, manager):
data = Mock.mock_put('ip_address/10.1.0.101')
ip_addr = manager.modify_ip('10.1.0.101', ptr_record='my.ptr.record')
assert ip_addr.ptr_record == 'my.ptr.record'
@responses.activate
def test_modify_ip(self, manager):
data = Mock.mock_put('ip_address/10.1.0.101')
ip_addr = manager.modify_ip('10.1.0.101', ptr_record='my.ptr.record')
assert ip_addr.ptr_record == 'my.ptr.record'
@responses.activate
def test_ip_delete(self, manager):
Mock.mock_delete('ip_address/10.1.0.101')
res = manager.release_ip('10.1.0.101')
assert res == {}
| [
[
[
23,
39
]
],
[
[
63,
77
]
],
[
[
101,
109
]
],
[
[
133,
148
]
],
[
[
171,
175
],
[
291,
295
],
[
607,
611
],
[
863,
867
],
[
982,
986
],
[
1222,
1226
],
[
1472,
1476
],
[
1714,
1718
]
],
[
[
183,
192
],
[
221,
230
],
[
536,
545
],
[
769,
778
],
[
1149,
1158
],
[
1399,
1408
],
[
1648,
1657
]
],
[
[
200,
206
]
]
] |
import tvm
import tvm._ffi
import numpy as np
from functools import reduce
from tvm.tensor_graph.core.utils import to_int, to_tuple, flatten_tir_graph, op_feature
def make_tir_graph(fwd_graph, loss=None, optimizer=None, inference=True, need_output=True, need_grad=True):
if inference:
finputs, foutputs, fweights = fwd_graph()
inputs = [x.tvm_tensor for x in finputs]
weights = [x.tvm_tensor for x in fweights]
outputs = [x.tvm_tensor for x in foutputs]
labels = []
loss = None
gradients = []
lr = None
updates = []
tir_graph = tvm.tg.make_tir_graph_inference(inputs, outputs, weights)
else:
assert loss is not None and optimizer is not None
bwd_graph = fwd_graph.make_backward(loss, optimizer)
inputs = [x.tvm_tensor for x in bwd_graph.inputs]
weights = [x.tvm_tensor for x in bwd_graph.weights]
outputs = [x.tvm_tensor for x in bwd_graph.outputs] if need_output else []
labels = [x.tvm_tensor for x in bwd_graph.labels]
loss = bwd_graph.loss.tvm_tensor
gradients = [x.tvm_tensor for x in bwd_graph.gradients] if need_grad else []
lr = optimizer.lr_tensor
updates = [x.tvm_tensor for x in bwd_graph.updates]
tir_graph = tvm.tg.make_tir_graph_training(inputs, labels, outputs, weights, loss, gradients, lr, updates)
return tir_graph
@tvm._ffi.register_func("tg.graph.partition_policy")
def partition_policy(graph, pre, post, number):
pre_stat = graph.operation_stat_dict[pre]
post_stat = graph.operation_stat_dict[post]
# root op must be separated
if pre_stat.must_compute_root:
return True
if pre_stat.num_consumers > 1:
# do not fuse multi-output
return True
# if pre_stat.injective:
# return False
# if number > 10:
# return True
if pre_stat.reductive and post_stat.reductive:
# do not fuse reductive nodes
return True
if pre_stat.injective and post_stat.injective:
return False
if pre_stat.injective and post_stat.reductive:
return False
if pre_stat.reductive and post_stat.injective:
return True
# if pre_stat.injective and post_stat.injective:
# return ((not pre_stat.merge_backward) and post_stat.merge_backward)
# if pre_stat.injective and post_stat.reductive:
# return not pre_stat.merge_backward
# if pre_stat.reductive and post_stat.injective:
# return post_stat.merge_backward
return True
def set_partition_policy(policy):
tvm._ffi.register_func("tg.graph.partition_policy", policy, True)
"""
Below are deprecated Python implementations
They'll be removed in the future
"""
def is_injective(op):
is_compute = isinstance(op, tvm.te.tensor.ComputeOp)
has_reduce = hasattr(op, "reduce_axis") and op.reduce_axis
return is_compute and (not has_reduce)
def is_reductive(op):
has_reduce = hasattr(op, "reduce_axis") and op.reduce_axis
return has_reduce
def remain_shape(op):
is_compute = isinstance(op, tvm.te.tensor.ComputeOp)
if not is_compute:
return False
ret = True
output_shape = to_tuple(op.output(0).shape)
for t in op.input_tensors:
if to_tuple(t.shape) != output_shape:
ret = False
break
return ret
def able_inline(op, down_graph):
is_compute = isinstance(op, tvm.te.tensor.ComputeOp)
has_reduce = hasattr(op, "reduce_axis") and op.reduce_axis
is_output = False
for i in range(op.num_outputs):
if op.output(i) not in down_graph:
is_output = True
break
return is_compute and (not has_reduce) and (not is_output)
class PyOpState(object):
def __init__(self):
self.injective = False
self.elementwise = False
self.reductive = False
self.num_inputs = 0
self.num_consumers = 0
self.head = True
# self.tail = False
self.reductions = []
self.output_shape = []
self.num_add = 0
self.num_mul = 0
self.num_div = 0
self.num_branch = 0
self.num_logic = 0
self.num_special = 0
self.gflop = 0
self.input_occur_count = []
# is output
self.must_root = False
def set_states(self, op, down_graph, root_ops):
assert isinstance(op, tvm.te.tensor.ComputeOp)
self.injective = is_injective(op)
# the output shapes of multi-output op are the same
self.output_shape = list(to_tuple(op.output(0).shape))
self.reductive = is_reductive(op)
self.elementwise = self.injective and remain_shape(op)
self.num_inputs = len(op.input_tensors)
for i in range(op.num_outputs):
if op.output(i) in down_graph:
self.num_consumers += len(down_graph[op.output(i)])
if self.reductive:
for iv in op.reduce_axis:
self.reductions.append(to_int(iv.dom.extent))
operation_count = tvm.tg.count_operation(op)
for (k, v) in operation_count.items():
setattr(self, k.value, v.value)
input_occur = tvm.tg.count_input_occur(op.input_tensors, op)
self.input_occur_count = [x.value for x in input_occur]
if op in root_ops:
self.must_root = True
self.gflop = reduce(lambda x, y: x * y, self.reductions, 1) * \
reduce(lambda x, y: x * y, self.output_shape, 1) * \
(self.num_add + self.num_mul + self.num_div) / 1e9
class PyTIRSubGraph(object):
def __init__(self):
self.inputs = {}
self.outputs = {}
self.labels = {}
self.weights = {}
self.loss = {}
self.gradients = {}
self.lr = {}
self.updates = {}
self.index = {}
self.connected_sets = {}
self.op_stat_dict = {}
self.op_list = []
self.ops = []
self.tensors = []
self.down_graph = {}
self.c_list = []
def __repr__(self):
ret = "PyTIRSubGraph\n"
ret += "inputs=" + str(self.inputs) + "\n"
ret += "outputs=" + str(self.outputs) + "\n"
ret += "labels=" + str(self.labels) + "\n"
ret += "weights=" + str(self.weights) + "\n"
ret += "loss=" + str(self.loss) + "\n"
ret += "gradients=" + str(self.gradients) + "\n"
ret += "lr=" + str(self.lr) + "\n"
ret += "updates=" + str(self.updates) + "\n"
return ret
def __str__(self):
return self.__repr__()
class PyTIRGraph(object):
"""PyTIRGraph
inputs : (list of) tvm Tensor
graph inputs
outputs : (list of) tvm Tensor
graph outputs
wire :
"""
def __init__(self, inputs, labels, outputs, weights, loss, gradients, lr, updates, wire=None):
if not isinstance(inputs, (list, tuple)):
inputs = [inputs]
if not isinstance(labels, (list, tuple)):
labels = [labels]
if not isinstance(outputs, (list, tuple)):
outputs = [outputs]
if not isinstance(weights, (list, tuple)):
weights = [weights]
if not isinstance(gradients, (list, tuple)):
gradients = [gradients]
if not isinstance(updates, (list, tuple)):
updates = [updates]
self.inputs = inputs
self.labels = labels
self.outputs = outputs
self.weights = weights
self.loss = loss
self.gradients = gradients
self.lr = lr
self.updates = updates
if self.loss is None:
self.root_ops = [x.op for x in outputs + gradients + updates]
else:
self.root_ops = [x.op for x in outputs + [loss] + gradients + updates]
if len(updates) > 0:
assert len(weights) == len(updates)
op_list, down_graph = flatten_tir_graph(self.root_ops)
# a list of compute op after topological sorting
self.op_list = op_list
self.num_ops = len(op_list)
self.op_feature_dict = {}
# this graph is tensor to op list
self.down_graph = down_graph
# these are runtime properties
self.ctx = None
self.tvm_array_dict = {}
# these are properties that can be modified by user
self.np_array_dict = {}
# these are properties that can be modified by scheduler
self.op_stat_dict = {}
self.subgraphs = {}
self.subgraph_features = {}
self.op_map = {}
self.call_order = []
self.schedules = {}
self.scheduled_subgraphs = set()
self.bufs = {}
self.functions = {}
self.shared_functions = {}
# initialize some of them
for op in op_list:
self.op_stat_dict[op] = PyOpState()
# get the states of each op
self._analyze()
def _analyze(self):
look_up = set(self.root_ops)
def func(op):
self.op_stat_dict[op].set_states(op, self.down_graph, look_up)
feature = op_feature(op)
self.op_feature_dict[op] = feature
return None
_ = list(map(func, self.op_list))
def partition_graph(self):
partition = PyTIRSubGraphPartition()
(subgraphs, op_map), order = partition.partion_graph(self)
self.subgraphs = subgraphs
self.op_map = op_map
self.call_order = order
def func(kv):
mark, subgraph = kv
tensors = list(set(list(subgraph.outputs.keys()) + list(subgraph.loss.keys())
+ list(subgraph.gradients.keys()) + list(subgraph.updates.keys())))
subgraph.tensors = tensors
ops = [x.op for x in tensors]
op_list, down_graph = flatten_tir_graph(ops, output_first=True)
op_stat_dict = {}
for op in op_list:
v = self.op_map[op]
if v in self.op_stat_dict:
op_stat_dict[op] = self.op_stat_dict[v]
subgraph.op_stat_dict = op_stat_dict
subgraph.ops = ops
subgraph.op_list = op_list
subgraph.down_graph = down_graph
self.subgraph_features[mark] = ";".join(map(lambda x: self.op_feature_dict[self.op_map[x]], op_list))
return None
_ = list(map(func, subgraphs.items()))
def set_inputs(self, inputs):
for tvm_tensor, np_array in inputs.items():
self.np_array_dict[tvm_tensor] = np_array
def set_lr(self, lr):
if self.lr is None:
raise RuntimeError("TIR Graph has no learning rate.")
self.np_array_dict[self.lr] = lr
def set_labels(self, labels):
for tvm_tensor, np_array in labels.items():
self.np_array_dict[tvm_tensor] = np_array
def set_weights(self, weights):
for tvm_tensor, np_array in weights.items():
self.np_array_dict[tvm_tensor] = np_array
def get_tvm_array(self, tvm_tensor):
return self.tvm_array_dict[tvm_tensor]
def get_outputs(self):
return [self.tvm_array_dict[x] for x in self.outputs]
def get_loss(self, tvm_tensor):
assert self.loss is not None
return self.tvm_array_dict[self.loss]
def get_gradients(self):
return [self.tvm_array_dict[x] for x in self.gradients]
def get_updates(self):
return [self.tvm_array_dict[x] for x in self.updates]
def clear_schedule(self):
self.op_stat_dict = {}
self.subgraphs = {}
self.subgraph_features = {}
self.op_map = {}
self.call_order = []
self.schedules = {}
self.scheduled_subgraphs = set()
self.bufs = {}
self.functions = {}
self.shared_functions = {}
# initialize some of them
for op in self.op_list:
self.op_stat_dict[op] = PyOpState()
# get the states of each op
self._analyze()
def clear_runtime(self):
self.ctx = None
self.tvm_array_dict = {}
def create_schedule_for(self, mark=0, force=False):
subgraphs = self.subgraphs
feature = self.subgraph_features[mark]
if force:
self.scheduled_subgraphs.remove(feature)
elif feature in self.scheduled_subgraphs:
return False
subgraph = subgraphs[mark]
inputs = list(subgraph.inputs.keys())
outputs = list(subgraph.outputs.keys())
weights = list(subgraph.weights.keys())
labels = list(subgraph.labels.keys())
loss = list(subgraph.loss.keys())
gradients = list(subgraph.gradients.keys())
lr = list(subgraph.lr.keys())
updates = list(subgraph.updates.keys())
sub_bufs = list(set(inputs + labels + outputs + weights + loss + gradients + lr + updates))
self.bufs[mark] = sub_bufs
ops = [x.op for x in outputs + loss + gradients + updates]
s = tvm.te.create_schedule(ops)
self.schedules[mark] = s
self.scheduled_subgraphs.add(feature)
return True
def create_schedule(self, force=False):
subgraphs = self.subgraphs
if force:
self.scheduled_subgraphs = set()
for mark, subgraph in subgraphs.items():
feature = self.subgraph_features[mark]
if feature in self.scheduled_subgraphs:
continue
inputs = list(subgraph.inputs.keys())
outputs = list(subgraph.outputs.keys())
weights = list(subgraph.weights.keys())
labels = list(subgraph.labels.keys())
loss = list(subgraph.loss.keys())
gradients = list(subgraph.gradients.keys())
lr = list(subgraph.lr.keys())
updates = list(subgraph.updates.keys())
sub_bufs = list(set(inputs + labels + outputs + weights + loss + gradients + lr + updates))
self.bufs[mark] = sub_bufs
ops = [x.op for x in outputs + loss + gradients + updates]
s = tvm.te.create_schedule(ops)
self.schedules[mark] = s
self.scheduled_subgraphs.add(feature)
def build_for(self, target, mark=0, force=False):
feature = self.subgraph_features[mark]
if force:
self.shared_functions.pop(feature)
elif feature in self.shared_functions:
self.functions[mark] = self.shared_functions[feature]
return True
bufs = self.bufs[mark]
sch = self.schedules[mark]
try:
func = tvm.build(sch, bufs, target=target)
self.functions[mark] = func
self.shared_functions[feature] = func
# print("build success for subgraph", mark)
return True
except Exception as e:
print("build error in subgraph", mark)
print(e)
# print(bufs)
# print(tvm.lower(sch, bufs, simple_mode=True))
return False
def build(self, target, force=False):
fail = 0
if force:
self.shared_functions = {}
for mark, sch in self.schedules.items():
feature = self.subgraph_features[mark]
if feature in self.shared_functions:
self.functions[mark] = self.shared_functions[feature]
continue
bufs = self.bufs[mark]
try:
func = tvm.build(sch, bufs, target=target)
self.functions[mark] = func
self.shared_functions[feature] = func
# print("build success for subgraph", mark)
except Exception as e:
fail += 1
print("build error in subgraph", mark)
print(e)
print(bufs)
print(tvm.lower(sch, bufs, simple_mode=True))
return fail == 0
def allocate_buffer(self, target, dev, force=False):
if not force and self.ctx is not None:
return
self.ctx = tvm.context(target, dev)
# inputs
for inp in self.inputs:
if inp in self.np_array_dict:
np_array = self.np_array_dict[inp].astype(inp.dtype)
else:
raise RuntimeError("Should provide input tensor for %s" % (str(inp)))
self.tvm_array_dict[inp] = tvm.nd.array(np_array, self.ctx)
# outputs
for out in self.outputs:
self.tvm_array_dict[out] = tvm.nd.empty(to_tuple(out.shape), out.dtype, ctx=self.ctx)
# labels
for label in self.labels:
if label in self.np_array_dict:
np_array = self.np_array_dict[label].astype(label.dtype)
else:
raise RuntimeError("Should provide input tensor for %s" % (str(label)))
self.tvm_array_dict[label] = tvm.nd.array(np_array, self.ctx)
# loss
if self.loss is not None:
self.tvm_array_dict[self.loss] = tvm.nd.empty(to_tuple(self.loss.shape), self.loss.dtype, ctx=self.ctx)
# weights
for weight in self.weights:
if weight in self.np_array_dict:
np_array = self.np_array_dict[weight].astype(weight.dtype)
else:
# TODO: add initializer
np_array = np.random.uniform(-1, 1, to_tuple(weight.shape)).astype(weight.dtype)
self.tvm_array_dict[weight] = tvm.nd.array(np_array, self.ctx)
# gradients
for grad in self.gradients:
self.tvm_array_dict[grad] = tvm.nd.empty(to_tuple(grad.shape), grad.dtype, ctx=self.ctx)
# lr
if self.lr is not None:
if self.lr in self.np_array_dict:
np_array = self.np_array_dict[self.lr].astype(self.lr.dtype)
else:
raise RuntimeError("Should provide learning rate.")
self.tvm_array_dict[self.lr] = tvm.nd.array(np_array, self.ctx)
# updates
for i, update in enumerate(self.updates):
self.tvm_array_dict[update] = self.tvm_array_dict[self.weights[i]]
# intermediate buffer
for subgraph in self.subgraphs.values():
for out, old_tensor in subgraph.outputs.items():
if old_tensor not in self.outputs:
# it's new output
self.tvm_array_dict[old_tensor] = tvm.nd.empty(to_tuple(old_tensor.shape), old_tensor.dtype, ctx=self.ctx)
def run(self, scheduler, target, dev):
"""
This is not enabled
"""
raise NotImplementedError()
# generate specific space
# scheduler has a cache, so multiple calls has the same effect
scheduler.add_task(self, target)
config = scheduler.propose(self, target)
scheduler.apply_config(self, target, config)
# apply config
# 1. modify op stat list -> head, tail
# 2. make subgraphs
# 3. create schedule
# 4. modify schedule
self.build(target)
# allocate buffer
# only the first call has effect
self.allocate_buffer(target, dev)
for mark in self.call_order:
func = self.functions[mark]
bufs = self.bufs[mark]
real_bufs = [self.tvm_array_dict[self.subgraphs[mark].index[x]] for x in bufs]
func(*real_bufs)
class PyTIRSubGraphPartition(object):
def __init__(self):
pass
def __call__(self, graph):
"""
graph: PyTIRGraph
"""
pass
def is_boundary(self, pre, post, graph):
pre_stat = graph.op_stat_dict[pre]
post_stat = graph.op_stat_dict[post]
# root op must be separated
if pre_stat.must_root:
return True
if pre_stat.num_consumers > 1:
# do not fuse multi-output
return True
if pre_stat.reductive and post_stat.reductive:
# do not fuse reductive nodes
return True
if pre_stat.injective and post_stat.injective:
return ((not pre_stat.head) and post_stat.head)
if pre_stat.injective and post_stat.reductive:
return not pre_stat.head
if pre_stat.reductive and post_stat.injective:
return post_stat.head
return True
def partion_graph(self, graph):
"""
graph: PyTIRGraph
returns:
list of list of tvm ComputeOp
dict from tvm ComputeOp to list of DataPort
"""
# -1 for not visited
graph_mark = {x: -1 for x in graph.op_list}
# setup initial nodes, all compute ops are included
# this guarantees no node is left
visit_stack = list(reversed(graph.op_list))
visited = set()
global_mark = -1
while len(visit_stack) > 0:
cur = visit_stack.pop()
if cur in visited:
continue
if graph_mark[cur] < 0:
# not marked
# new subgraph
global_mark += 1
graph_mark[cur] = global_mark
graph_mark[cur] = global_mark
# all the outputs
for i in range(cur.num_outputs):
t = cur.output(i)
if t in graph.down_graph:
for op in graph.down_graph[t]:
if not self.is_boundary(cur, op, graph):
if graph_mark[op] < 0:
# mark it as the same subgraph
graph_mark[op] = global_mark
# only add node within the same subgraph
visit_stack.append(op)
# all the inputs
for t in cur.input_tensors:
if isinstance(t.op, tvm.te.tensor.ComputeOp):
if not self.is_boundary(t.op, cur, graph):
if graph_mark[t.op] < 0:
# mark it as the same subgraph
graph_mark[t.op] = global_mark
# only add node within the same subgraph
visit_stack.append(t.op)
# add visit
visited.add(cur)
order = self.validate_partition(graph_mark)
return self.subgraph_rewrite(graph_mark, graph), order
def subgraph_rewrite(self, graph_mark, tgraph):
ret = tvm.tg.subgraph_partition(graph_mark, tgraph.root_ops)
op_map = {}
inputs_set = set(tgraph.inputs)
outputs_set = set(tgraph.outputs)
labels_set = set(tgraph.labels)
weights_set = set(tgraph.weights)
gradients_set = set(tgraph.gradients)
updates_set = set(tgraph.updates)
subgraphs = {}
for (old_op, mark) in graph_mark.items():
new_op = ret[old_op]
op_map[new_op] = old_op
if mark not in subgraphs:
subgraphs[mark] = PyTIRSubGraph()
for i, t in enumerate(old_op.input_tensors):
if t in inputs_set:
# new -> old
subgraphs[mark].inputs[new_op.input_tensors[i]] = t
if t in labels_set:
subgraphs[mark].labels[new_op.input_tensors[i]] = t
if t == tgraph.lr:
subgraphs[mark].lr[new_op.input_tensors[i]] = t
if t in weights_set:
subgraphs[mark].weights[new_op.input_tensors[i]] = t
# this is special
# ret contains the new placeholder op because
# this indicates an intermediate input
if new_op.input_tensors[i].op in ret:
subgraphs[mark].inputs[new_op.input_tensors[i]] = \
ret[new_op.input_tensors[i].op].output(t.value_index)
another_mark = graph_mark[ret[new_op.input_tensors[i].op]]
if another_mark not in subgraphs:
subgraphs[another_mark] = PyTIRSubGraph()
subgraphs[another_mark].outputs[ret[ret[new_op.input_tensors[i].op]].output(t.value_index)] = \
ret[new_op.input_tensors[i].op].output(t.value_index)
for i in range(old_op.num_outputs):
t = old_op.output(i)
if t in outputs_set:
subgraphs[mark].outputs[new_op.output(i)] = t
if t in gradients_set:
subgraphs[mark].gradients[new_op.output(i)] = t
if t in updates_set:
subgraphs[mark].updates[new_op.output(i)] = t
if t == tgraph.loss:
subgraphs[mark].loss[new_op.output(i)] = t
for mark, subgraph in subgraphs.items():
subgraph.index = {
**subgraph.inputs, **subgraph.outputs, **subgraph.labels, **subgraph.loss, \
**subgraph.weights, **subgraph.gradients, **subgraph.lr, **subgraph.updates}
return subgraphs, op_map
def validate_partition(self, graph_mark):
# dst -> src
order = []
ref = {}
max_mark = 0
for (op, mark) in graph_mark.items():
max_mark = max(mark, max_mark)
for inp in op.input_tensors:
if inp.op in graph_mark:
src_mark = graph_mark[inp.op]
if src_mark != mark:
if mark not in ref:
ref[mark] = set()
ref[mark].add(src_mark)
visited = set()
visiting = set()
def func(val):
if val in visited:
return
if val in visiting:
raise RuntimeError(
"The subgraph relation has a circular reference.")
visiting.add(val)
if val not in ref:
order.append(val)
visiting.remove(val)
visited.add(val)
return
for inp in ref[val]:
func(inp)
order.append(val)
visiting.remove(val)
visited.add(val)
return
for mark in range(max_mark+1):
func(mark)
return order
| [
[
[
7,
10
]
],
[
[
18,
26
],
[
1423,
1426
],
[
616,
619
],
[
1303,
1306
],
[
2595,
2598
],
[
2803,
2806
],
[
3099,
3102
],
[
3433,
3436
],
[
4401,
4404
],
[
5048,
5051
],
[
5188,
5191
],
[
13006,
13009
],
[
14084,
14087
],
[
14601,
14604
],
[
15457,
15460
],
[
15842,
15845
],
[
16050,
16053
],
[
16378,
16381
],
[
16501,
16504
],
[
16875,
16878
],
[
17002,
17005
],
[
17444,
17447
],
[
17573,
17576
],
[
17931,
17934
],
[
18394,
18397
],
[
21816,
21819
],
[
22432,
22435
]
],
[
[
34,
45
],
[
17332,
17334
]
],
[
[
68,
74
],
[
5381,
5387
],
[
5453,
5459
]
],
[
[
115,
121
],
[
4999,
5005
]
],
[
[
123,
131
],
[
3202,
3210
],
[
3273,
3281
],
[
4561,
4569
],
[
16514,
16522
],
[
17015,
17023
],
[
17357,
17365
],
[
17586,
17594
],
[
18407,
18415
]
],
[
[
133,
150
],
[
7930,
7947
],
[
9841,
9858
]
],
[
[
152,
162
],
[
9112,
9122
]
],
[
[
169,
183
]
],
[
[
1479,
1495
]
],
[
[
2561,
2581
]
],
[
[
2753,
2765
],
[
4451,
4463
]
],
[
[
2940,
2952
],
[
4616,
4628
]
],
[
[
3049,
3061
],
[
4679,
4691
]
],
[
[
3372,
3383
]
],
[
[
3739,
3748
],
[
8858,
8867
],
[
11944,
11953
]
],
[
[
5585,
5598
],
[
22974,
22987
],
[
24047,
24060
]
],
[
[
6599,
6609
]
],
[
[
19378,
19400
],
[
9293,
9315
]
]
] |
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume-related Utilities and helpers."""
import ast
import functools
import math
import operator
import re
import time
import uuid
from Crypto.Random import random
import eventlet
from eventlet import tpool
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import units
import six
from six.moves import range
from cinder.brick.local_dev import lvm as brick_lvm
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _, _LI, _LW, _LE
from cinder import objects
from cinder import rpc
from cinder import utils
from cinder.volume import group_types
from cinder.volume import throttling
from cinder.volume import volume_types
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def null_safe_str(s):
return str(s) if s else ''
def _usage_from_volume(context, volume_ref, **kw):
now = timeutils.utcnow()
launched_at = volume_ref['launched_at'] or now
created_at = volume_ref['created_at'] or now
volume_status = volume_ref['status']
if volume_status == 'error_managing_deleting':
volume_status = 'deleting'
usage_info = dict(
tenant_id=volume_ref['project_id'],
host=volume_ref['host'],
user_id=volume_ref['user_id'],
availability_zone=volume_ref['availability_zone'],
volume_id=volume_ref['id'],
volume_type=volume_ref['volume_type_id'],
display_name=volume_ref['display_name'],
launched_at=launched_at.isoformat(),
created_at=created_at.isoformat(),
status=volume_status,
snapshot_id=volume_ref['snapshot_id'],
size=volume_ref['size'],
replication_status=volume_ref['replication_status'],
replication_extended_status=volume_ref['replication_extended_status'],
replication_driver_data=volume_ref['replication_driver_data'],
metadata=volume_ref.get('volume_metadata'),)
usage_info.update(kw)
try:
attachments = db.volume_attachment_get_all_by_volume_id(
context, volume_ref['id'])
usage_info['volume_attachment'] = attachments
glance_meta = db.volume_glance_metadata_get(context, volume_ref['id'])
if glance_meta:
usage_info['glance_metadata'] = glance_meta
except exception.GlanceMetadataNotFound:
pass
except exception.VolumeNotFound:
LOG.debug("Can not find volume %s at notify usage", volume_ref['id'])
return usage_info
def _usage_from_backup(backup, **kw):
num_dependent_backups = backup.num_dependent_backups
usage_info = dict(tenant_id=backup.project_id,
user_id=backup.user_id,
availability_zone=backup.availability_zone,
backup_id=backup.id,
host=backup.host,
display_name=backup.display_name,
created_at=str(backup.created_at),
status=backup.status,
volume_id=backup.volume_id,
size=backup.size,
service_metadata=backup.service_metadata,
service=backup.service,
fail_reason=backup.fail_reason,
parent_id=backup.parent_id,
num_dependent_backups=num_dependent_backups,
snapshot_id=backup.snapshot_id,
)
usage_info.update(kw)
return usage_info
@utils.if_notifications_enabled
def notify_about_volume_usage(context, volume, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_volume(context, volume, **extra_usage_info)
rpc.get_notifier("volume", host).info(context, 'volume.%s' % event_suffix,
usage_info)
@utils.if_notifications_enabled
def notify_about_backup_usage(context, backup, event_suffix,
extra_usage_info=None,
host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_backup(backup, **extra_usage_info)
rpc.get_notifier("backup", host).info(context, 'backup.%s' % event_suffix,
usage_info)
def _usage_from_snapshot(snapshot, context, **extra_usage_info):
# (niedbalski) a snapshot might be related to a deleted
# volume, if that's the case, the volume information is still
# required for filling the usage_info, so we enforce to read
# the volume data even if the volume has been deleted.
context.read_deleted = "yes"
volume = db.volume_get(context, snapshot.volume_id)
usage_info = {
'tenant_id': snapshot.project_id,
'user_id': snapshot.user_id,
'availability_zone': volume['availability_zone'],
'volume_id': snapshot.volume_id,
'volume_size': snapshot.volume_size,
'snapshot_id': snapshot.id,
'display_name': snapshot.display_name,
'created_at': str(snapshot.created_at),
'status': snapshot.status,
'deleted': null_safe_str(snapshot.deleted),
'metadata': null_safe_str(snapshot.metadata),
}
usage_info.update(extra_usage_info)
return usage_info
@utils.if_notifications_enabled
def notify_about_snapshot_usage(context, snapshot, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_snapshot(snapshot, context, **extra_usage_info)
rpc.get_notifier('snapshot', host).info(context,
'snapshot.%s' % event_suffix,
usage_info)
def _usage_from_capacity(capacity, **extra_usage_info):
capacity_info = {
'name_to_id': capacity['name_to_id'],
'total': capacity['total'],
'free': capacity['free'],
'allocated': capacity['allocated'],
'provisioned': capacity['provisioned'],
'virtual_free': capacity['virtual_free'],
'reported_at': capacity['reported_at']
}
capacity_info.update(extra_usage_info)
return capacity_info
@utils.if_notifications_enabled
def notify_about_capacity_usage(context, capacity, suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_capacity(capacity, **extra_usage_info)
rpc.get_notifier('capacity', host).info(context,
'capacity.%s' % suffix,
usage_info)
@utils.if_notifications_enabled
def notify_about_replication_usage(context, volume, suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_volume(context, volume,
**extra_usage_info)
rpc.get_notifier('replication', host).info(context,
'replication.%s' % suffix,
usage_info)
@utils.if_notifications_enabled
def notify_about_replication_error(context, volume, suffix,
extra_error_info=None, host=None):
if not host:
host = CONF.host
if not extra_error_info:
extra_error_info = {}
usage_info = _usage_from_volume(context, volume,
**extra_error_info)
rpc.get_notifier('replication', host).error(context,
'replication.%s' % suffix,
usage_info)
def _usage_from_consistencygroup(group_ref, **kw):
usage_info = dict(tenant_id=group_ref.project_id,
user_id=group_ref.user_id,
availability_zone=group_ref.availability_zone,
consistencygroup_id=group_ref.id,
name=group_ref.name,
created_at=group_ref.created_at.isoformat(),
status=group_ref.status)
usage_info.update(kw)
return usage_info
@utils.if_notifications_enabled
def notify_about_consistencygroup_usage(context, group, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_consistencygroup(group,
**extra_usage_info)
rpc.get_notifier("consistencygroup", host).info(
context,
'consistencygroup.%s' % event_suffix,
usage_info)
def _usage_from_group(group_ref, **kw):
usage_info = dict(tenant_id=group_ref.project_id,
user_id=group_ref.user_id,
availability_zone=group_ref.availability_zone,
group_id=group_ref.id,
group_type=group_ref.group_type_id,
name=group_ref.name,
created_at=group_ref.created_at.isoformat(),
status=group_ref.status)
usage_info.update(kw)
return usage_info
@utils.if_notifications_enabled
def notify_about_group_usage(context, group, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_group(group,
**extra_usage_info)
rpc.get_notifier("group", host).info(
context,
'group.%s' % event_suffix,
usage_info)
def _usage_from_cgsnapshot(cgsnapshot, **kw):
usage_info = dict(
tenant_id=cgsnapshot.project_id,
user_id=cgsnapshot.user_id,
cgsnapshot_id=cgsnapshot.id,
name=cgsnapshot.name,
consistencygroup_id=cgsnapshot.consistencygroup_id,
created_at=cgsnapshot.created_at.isoformat(),
status=cgsnapshot.status)
usage_info.update(kw)
return usage_info
def _usage_from_group_snapshot(group_snapshot, **kw):
usage_info = dict(
tenant_id=group_snapshot.project_id,
user_id=group_snapshot.user_id,
group_snapshot_id=group_snapshot.id,
name=group_snapshot.name,
group_id=group_snapshot.group_id,
group_type=group_snapshot.group_type_id,
created_at=group_snapshot.created_at.isoformat(),
status=group_snapshot.status)
usage_info.update(kw)
return usage_info
@utils.if_notifications_enabled
def notify_about_cgsnapshot_usage(context, cgsnapshot, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_cgsnapshot(cgsnapshot,
**extra_usage_info)
rpc.get_notifier("cgsnapshot", host).info(
context,
'cgsnapshot.%s' % event_suffix,
usage_info)
@utils.if_notifications_enabled
def notify_about_group_snapshot_usage(context, group_snapshot, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_group_snapshot(group_snapshot,
**extra_usage_info)
rpc.get_notifier("group_snapshot", host).info(
context,
'group_snapshot.%s' % event_suffix,
usage_info)
def _check_blocksize(blocksize):
# Check if volume_dd_blocksize is valid
try:
# Rule out zero-sized/negative/float dd blocksize which
# cannot be caught by strutils
if blocksize.startswith(('-', '0')) or '.' in blocksize:
raise ValueError
strutils.string_to_bytes('%sB' % blocksize)
except ValueError:
LOG.warning(_LW("Incorrect value error: %(blocksize)s, "
"it may indicate that \'volume_dd_blocksize\' "
"was configured incorrectly. Fall back to default."),
{'blocksize': blocksize})
# Fall back to default blocksize
CONF.clear_override('volume_dd_blocksize')
blocksize = CONF.volume_dd_blocksize
return blocksize
def check_for_odirect_support(src, dest, flag='oflag=direct'):
# Check whether O_DIRECT is supported
try:
# iflag=direct and if=/dev/zero combination does not work
# error: dd: failed to open '/dev/zero': Invalid argument
if (src == '/dev/zero' and flag == 'iflag=direct'):
return False
else:
utils.execute('dd', 'count=0', 'if=%s' % src,
'of=%s' % dest,
flag, run_as_root=True)
return True
except processutils.ProcessExecutionError:
return False
def _copy_volume_with_path(prefix, srcstr, deststr, size_in_m, blocksize,
sync=False, execute=utils.execute, ionice=None,
sparse=False):
cmd = prefix[:]
if ionice:
cmd.extend(('ionice', ionice))
blocksize = _check_blocksize(blocksize)
size_in_bytes = size_in_m * units.Mi
cmd.extend(('dd', 'if=%s' % srcstr, 'of=%s' % deststr,
'count=%d' % size_in_bytes, 'bs=%s' % blocksize))
# Use O_DIRECT to avoid thrashing the system buffer cache
odirect = check_for_odirect_support(srcstr, deststr, 'iflag=direct')
cmd.append('iflag=count_bytes,direct' if odirect else 'iflag=count_bytes')
if check_for_odirect_support(srcstr, deststr, 'oflag=direct'):
cmd.append('oflag=direct')
odirect = True
# If the volume is being unprovisioned then
# request the data is persisted before returning,
# so that it's not discarded from the cache.
conv = []
if sync and not odirect:
conv.append('fdatasync')
if sparse:
conv.append('sparse')
if conv:
conv_options = 'conv=' + ",".join(conv)
cmd.append(conv_options)
# Perform the copy
start_time = timeutils.utcnow()
execute(*cmd, run_as_root=True)
duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
# NOTE(jdg): use a default of 1, mostly for unit test, but in
# some incredible event this is 0 (cirros image?) don't barf
if duration < 1:
duration = 1
mbps = (size_in_m / duration)
LOG.debug("Volume copy details: src %(src)s, dest %(dest)s, "
"size %(sz).2f MB, duration %(duration).2f sec",
{"src": srcstr,
"dest": deststr,
"sz": size_in_m,
"duration": duration})
LOG.info(_LI("Volume copy %(size_in_m).2f MB at %(mbps).2f MB/s"),
{'size_in_m': size_in_m, 'mbps': mbps})
def _open_volume_with_path(path, mode):
try:
with utils.temporary_chown(path):
handle = open(path, mode)
return handle
except Exception:
LOG.error(_LE("Failed to open volume from %(path)s."), {'path': path})
def _transfer_data(src, dest, length, chunk_size):
"""Transfer data between files (Python IO objects)."""
chunks = int(math.ceil(length / chunk_size))
remaining_length = length
LOG.debug("%(chunks)s chunks of %(bytes)s bytes to be transferred.",
{'chunks': chunks, 'bytes': chunk_size})
for chunk in range(0, chunks):
before = time.time()
data = tpool.execute(src.read, min(chunk_size, remaining_length))
# If we have reached end of source, discard any extraneous bytes from
# destination volume if trim is enabled and stop writing.
if data == b'':
break
tpool.execute(dest.write, data)
remaining_length -= len(data)
delta = (time.time() - before)
rate = (chunk_size / delta) / units.Ki
LOG.debug("Transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s).",
{'chunk': chunk + 1, 'chunks': chunks, 'rate': rate})
# yield to any other pending operations
eventlet.sleep(0)
tpool.execute(dest.flush)
def _copy_volume_with_file(src, dest, size_in_m):
src_handle = src
if isinstance(src, six.string_types):
src_handle = _open_volume_with_path(src, 'rb')
dest_handle = dest
if isinstance(dest, six.string_types):
dest_handle = _open_volume_with_path(dest, 'wb')
if not src_handle:
raise exception.DeviceUnavailable(
_("Failed to copy volume, source device unavailable."))
if not dest_handle:
raise exception.DeviceUnavailable(
_("Failed to copy volume, destination device unavailable."))
start_time = timeutils.utcnow()
_transfer_data(src_handle, dest_handle, size_in_m * units.Mi, units.Mi * 4)
duration = max(1, timeutils.delta_seconds(start_time, timeutils.utcnow()))
if isinstance(src, six.string_types):
src_handle.close()
if isinstance(dest, six.string_types):
dest_handle.close()
mbps = (size_in_m / duration)
LOG.info(_LI("Volume copy completed (%(size_in_m).2f MB at "
"%(mbps).2f MB/s)."),
{'size_in_m': size_in_m, 'mbps': mbps})
def copy_volume(src, dest, size_in_m, blocksize, sync=False,
execute=utils.execute, ionice=None, throttle=None,
sparse=False):
"""Copy data from the source volume to the destination volume.
The parameters 'src' and 'dest' are both typically of type str, which
represents the path to each volume on the filesystem. Connectors can
optionally return a volume handle of type RawIOBase for volumes that are
not available on the local filesystem for open/close operations.
If either 'src' or 'dest' are not of type str, then they are assumed to be
of type RawIOBase or any derivative that supports file operations such as
read and write. In this case, the handles are treated as file handles
instead of file paths and, at present moment, throttling is unavailable.
"""
if (isinstance(src, six.string_types) and
isinstance(dest, six.string_types)):
if not throttle:
throttle = throttling.Throttle.get_default()
with throttle.subcommand(src, dest) as throttle_cmd:
_copy_volume_with_path(throttle_cmd['prefix'], src, dest,
size_in_m, blocksize, sync=sync,
execute=execute, ionice=ionice,
sparse=sparse)
else:
_copy_volume_with_file(src, dest, size_in_m)
def clear_volume(volume_size, volume_path, volume_clear=None,
volume_clear_size=None, volume_clear_ionice=None,
throttle=None):
"""Unprovision old volumes to prevent data leaking between users."""
if volume_clear is None:
volume_clear = CONF.volume_clear
if volume_clear_size is None:
volume_clear_size = CONF.volume_clear_size
if volume_clear_size == 0:
volume_clear_size = volume_size
if volume_clear_ionice is None:
volume_clear_ionice = CONF.volume_clear_ionice
LOG.info(_LI("Performing secure delete on volume: %s"), volume_path)
# We pass sparse=False explicitly here so that zero blocks are not
# skipped in order to clear the volume.
if volume_clear == 'zero':
return copy_volume('/dev/zero', volume_path, volume_clear_size,
CONF.volume_dd_blocksize,
sync=True, execute=utils.execute,
ionice=volume_clear_ionice,
throttle=throttle, sparse=False)
else:
raise exception.InvalidConfigurationValue(
option='volume_clear',
value=volume_clear)
def supports_thin_provisioning():
return brick_lvm.LVM.supports_thin_provisioning(
utils.get_root_helper())
def get_all_physical_volumes(vg_name=None):
return brick_lvm.LVM.get_all_physical_volumes(
utils.get_root_helper(),
vg_name)
def get_all_volume_groups(vg_name=None):
return brick_lvm.LVM.get_all_volume_groups(
utils.get_root_helper(),
vg_name)
# Default symbols to use for passwords. Avoids visually confusing characters.
# ~6 bits per symbol
DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1
'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O
'abcdefghijkmnopqrstuvwxyz') # Removed: l
def generate_password(length=16, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
"""Generate a random password from the supplied symbol groups.
At least one symbol from each group will be included. Unpredictable
results if length is less than the number of symbol groups.
Believed to be reasonably secure (with a reasonable password length!)
"""
# NOTE(jerdfelt): Some password policies require at least one character
# from each group of symbols, so start off with one random character
# from each symbol group
password = [random.choice(s) for s in symbolgroups]
# If length < len(symbolgroups), the leading characters will only
# be from the first length groups. Try our best to not be predictable
# by shuffling and then truncating.
random.shuffle(password)
password = password[:length]
length -= len(password)
# then fill with random characters from all symbol groups
symbols = ''.join(symbolgroups)
password.extend([random.choice(symbols) for _i in range(length)])
# finally shuffle to ensure first x characters aren't from a
# predictable group
random.shuffle(password)
return ''.join(password)
def generate_username(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
# Use the same implementation as the password generation.
return generate_password(length, symbolgroups)
DEFAULT_POOL_NAME = '_pool0'
def extract_host(host, level='backend', default_pool_name=False):
"""Extract Host, Backend or Pool information from host string.
:param host: String for host, which could include host@backend#pool info
:param level: Indicate which level of information should be extracted
from host string. Level can be 'host', 'backend' or 'pool',
default value is 'backend'
:param default_pool_name: this flag specify what to do if level == 'pool'
and there is no 'pool' info encoded in host
string. default_pool_name=True will return
DEFAULT_POOL_NAME, otherwise we return None.
Default value of this parameter is False.
:return: expected information, string or None
:raises: exception.InvalidVolume
For example:
host = 'HostA@BackendB#PoolC'
ret = extract_host(host, 'host')
# ret is 'HostA'
ret = extract_host(host, 'backend')
# ret is 'HostA@BackendB'
ret = extract_host(host, 'pool')
# ret is 'PoolC'
host = 'HostX@BackendY'
ret = extract_host(host, 'pool')
# ret is None
ret = extract_host(host, 'pool', True)
# ret is '_pool0'
"""
if host is None:
msg = _("volume is not assigned to a host")
raise exception.InvalidVolume(reason=msg)
if level == 'host':
# make sure pool is not included
hst = host.split('#')[0]
return hst.split('@')[0]
elif level == 'backend':
return host.split('#')[0]
elif level == 'pool':
lst = host.split('#')
if len(lst) == 2:
return lst[1]
elif default_pool_name is True:
return DEFAULT_POOL_NAME
else:
return None
def append_host(host, pool):
"""Encode pool into host info."""
if not host or not pool:
return host
new_host = "#".join([host, pool])
return new_host
def matching_backend_name(src_volume_type, volume_type):
if src_volume_type.get('volume_backend_name') and \
volume_type.get('volume_backend_name'):
return src_volume_type.get('volume_backend_name') == \
volume_type.get('volume_backend_name')
else:
return False
def hosts_are_equivalent(host_1, host_2):
# In case host_1 or host_2 are None
if not (host_1 and host_2):
return host_1 == host_2
return extract_host(host_1) == extract_host(host_2)
def read_proc_mounts():
"""Read the /proc/mounts file.
It's a dummy function but it eases the writing of unit tests as mocking
__builtin__open() for a specific file only is not trivial.
"""
with open('/proc/mounts') as mounts:
return mounts.readlines()
def extract_id_from_volume_name(vol_name):
regex = re.compile(
CONF.volume_name_template.replace('%s', '(?P<uuid>.+)'))
match = regex.match(vol_name)
return match.group('uuid') if match else None
def check_already_managed_volume(vol_id):
"""Check cinder db for already managed volume.
:param vol_id: volume id parameter
:returns: bool -- return True, if db entry with specified
volume id exists, otherwise return False
"""
try:
return (vol_id and isinstance(vol_id, six.string_types) and
uuid.UUID(vol_id, version=4) and
objects.Volume.exists(context.get_admin_context(), vol_id))
except ValueError:
return False
def extract_id_from_snapshot_name(snap_name):
"""Return a snapshot's ID from its name on the backend."""
regex = re.compile(
CONF.snapshot_name_template.replace('%s', '(?P<uuid>.+)'))
match = regex.match(snap_name)
return match.group('uuid') if match else None
def paginate_entries_list(entries, marker, limit, offset, sort_keys,
sort_dirs):
"""Paginate a list of entries.
:param entries: list of dictionaries
:marker: The last element previously returned
:limit: The maximum number of items to return
:offset: The number of items to skip from the marker or from the first
element.
:sort_keys: A list of keys in the dictionaries to sort by
:sort_dirs: A list of sort directions, where each is either 'asc' or 'dec'
"""
comparers = [(operator.itemgetter(key.strip()), multiplier)
for (key, multiplier) in zip(sort_keys, sort_dirs)]
def comparer(left, right):
for fn, d in comparers:
left_val = fn(left)
right_val = fn(right)
if isinstance(left_val, dict):
left_val = sorted(left_val.values())[0]
if isinstance(right_val, dict):
right_val = sorted(right_val.values())[0]
if left_val == right_val:
continue
if d == 'asc':
return -1 if left_val < right_val else 1
else:
return -1 if left_val > right_val else 1
else:
return 0
sorted_entries = sorted(entries, key=functools.cmp_to_key(comparer))
start_index = 0
if offset is None:
offset = 0
if marker:
start_index = -1
for i, entry in enumerate(sorted_entries):
if entry['reference'] == marker:
start_index = i + 1
break
if start_index < 0:
msg = _('marker not found: %s') % marker
raise exception.InvalidInput(reason=msg)
range_end = start_index + limit
return sorted_entries[start_index + offset:range_end + offset]
def convert_config_string_to_dict(config_string):
"""Convert config file replication string to a dict.
The only supported form is as follows:
"{'key-1'='val-1' 'key-2'='val-2'...}"
:param config_string: Properly formatted string to convert to dict.
:response: dict of string values
"""
resultant_dict = {}
try:
st = config_string.replace("=", ":")
st = st.replace(" ", ", ")
resultant_dict = ast.literal_eval(st)
except Exception:
LOG.warning(_LW("Error encountered translating config_string: "
"%(config_string)s to dict"),
{'config_string': config_string})
return resultant_dict
def create_encryption_key(context, key_manager, volume_type_id):
encryption_key_id = None
if volume_types.is_encrypted(context, volume_type_id):
volume_type_encryption = (
volume_types.get_volume_type_encryption(context,
volume_type_id))
cipher = volume_type_encryption.cipher
length = volume_type_encryption.key_size
algorithm = cipher.split('-')[0] if cipher else None
encryption_key_id = key_manager.create_key(
context,
algorithm=algorithm,
length=length)
return encryption_key_id
def is_replicated_str(str):
spec = (str or '').split()
return (len(spec) == 2 and
spec[0] == '<is>' and strutils.bool_from_string(spec[1]))
def is_replicated_spec(extra_specs):
return (extra_specs and
is_replicated_str(extra_specs.get('replication_enabled')))
def group_get_by_id(group_id):
ctxt = context.get_admin_context()
group = db.group_get(ctxt, group_id)
return group
def is_group_a_cg_snapshot_type(group_or_snap):
LOG.debug("Checking if %s is a consistent snapshot group",
group_or_snap)
if group_or_snap["group_type_id"] is not None:
spec = group_types.get_group_type_specs(
group_or_snap["group_type_id"],
key="consistent_group_snapshot_enabled"
)
return spec == "<is> True"
return False
| [
[
[
670,
673
],
[
29338,
29341
]
],
[
[
681,
690
],
[
28358,
28367
]
],
[
[
698,
702
],
[
16450,
16454
]
],
[
[
710,
718
],
[
27614,
27622
]
],
[
[
726,
728
],
[
26102,
26104
],
[
26900,
26902
]
],
[
[
736,
740
],
[
16694,
16698
],
[
17063,
17067
]
],
[
[
748,
752
],
[
26624,
26628
]
],
[
[
780,
786
],
[
22362,
22368
],
[
22590,
22596
],
[
22796,
22802
],
[
22939,
22945
]
],
[
[
794,
802
],
[
17339,
17347
]
],
[
[
824,
829
],
[
16721,
16726
],
[
16976,
16981
],
[
17362,
17367
]
],
[
[
859,
871
],
[
14056,
14068
]
],
[
[
896,
899
],
[
1439,
1442
]
],
[
[
921,
935
],
[
1455,
1462
]
],
[
[
959,
967
],
[
13036,
13044
],
[
30353,
30361
]
],
[
[
991,
1000
],
[
1601,
1610
],
[
15343,
15352
],
[
15413,
15422
],
[
15449,
15458
],
[
17976,
17985
],
[
18099,
18108
],
[
18135,
18144
]
],
[
[
1024,
1029
],
[
14458,
14463
],
[
17123,
17128
],
[
18052,
18057
],
[
18062,
18067
]
],
[
[
1037,
1040
],
[
17484,
17487
],
[
17606,
17609
],
[
18180,
18183
],
[
18250,
18253
],
[
19355,
19358
],
[
19406,
19409
],
[
26586,
26589
]
],
[
[
1063,
1068
],
[
16659,
16664
],
[
22829,
22834
]
],
[
[
1105,
1121
],
[
21141,
21150
],
[
21273,
21282
],
[
21417,
21426
]
],
[
[
1141,
1148
],
[
26695,
26702
],
[
30571,
30578
]
],
[
[
1168,
1170
],
[
2700,
2702
],
[
2859,
2861
],
[
5514,
5516
],
[
30611,
30613
]
],
[
[
1190,
1199
],
[
3007,
3016
],
[
3065,
3074
],
[
17720,
17729
],
[
17856,
17865
],
[
20990,
20999
],
[
24614,
24623
],
[
28746,
28755
]
],
[
[
1224,
1225
],
[
17761,
17762
],
[
17897,
17898
],
[
24562,
24563
],
[
28693,
28694
]
],
[
[
1227,
1230
],
[
15951,
15954
],
[
18345,
18348
],
[
20458,
20461
]
],
[
[
1232,
1235
],
[
13123,
13126
],
[
29401,
29404
]
],
[
[
1237,
1240
],
[
16259,
16262
]
],
[
[
1260,
1267
],
[
26673,
26680
]
],
[
[
1287,
1290
],
[
4531,
4534
],
[
5022,
5025
],
[
6491,
6494
],
[
7467,
7470
],
[
8021,
8024
],
[
8587,
8590
],
[
9668,
9671
],
[
10685,
10688
],
[
12078,
12081
],
[
12614,
12617
]
],
[
[
1310,
1315
],
[
4193,
4198
],
[
4663,
4668
],
[
6143,
6148
],
[
7134,
7139
],
[
7643,
7648
],
[
8209,
8214
],
[
9265,
9270
],
[
10326,
10331
],
[
11689,
11694
],
[
12201,
12206
],
[
14236,
14241
],
[
18576,
18581
],
[
13883,
13888
],
[
16126,
16131
],
[
20836,
20841
],
[
21191,
21196
],
[
21321,
21326
],
[
21462,
21467
]
],
[
[
1342,
1353
],
[
30865,
30876
]
],
[
[
1380,
1390
],
[
19474,
19484
]
],
[
[
1417,
1429
],
[
29691,
29703
],
[
29790,
29802
]
],
[
[
1432,
1436
],
[
4382,
4386
],
[
4882,
4886
],
[
6338,
6342
],
[
7323,
7327
],
[
7836,
7840
],
[
8402,
8406
],
[
9473,
9477
],
[
10512,
10516
],
[
11890,
11894
],
[
12414,
12418
],
[
13413,
13417
],
[
13476,
13480
],
[
20176,
20180
],
[
20257,
20261
],
[
20419,
20423
],
[
20764,
20768
],
[
26122,
26126
],
[
26920,
26924
]
],
[
[
1449,
1452
],
[
3099,
3102
],
[
13111,
13114
],
[
15681,
15684
],
[
15942,
15945
],
[
16249,
16252
],
[
16517,
16520
],
[
17140,
17143
],
[
18336,
18339
],
[
20449,
20452
],
[
29389,
29392
],
[
30711,
30714
]
],
[
[
1489,
1502
],
[
5984,
5997
],
[
6037,
6050
]
],
[
[
1544,
1562
],
[
4470,
4488
],
[
7924,
7942
],
[
8490,
8508
]
],
[
[
3198,
3216
],
[
4970,
4988
]
],
[
[
4228,
4253
]
],
[
[
4698,
4723
]
],
[
[
5157,
5177
],
[
6426,
6446
]
],
[
[
6178,
6205
]
],
[
[
6676,
6696
],
[
7411,
7431
]
],
[
[
7169,
7196
]
],
[
[
7678,
7708
]
],
[
[
8244,
8274
]
],
[
[
8781,
8809
],
[
9561,
9589
]
],
[
[
9300,
9335
]
],
[
[
9806,
9823
],
[
10600,
10617
]
],
[
[
10361,
10385
]
],
[
[
10801,
10823
],
[
11978,
12000
]
],
[
[
11213,
11239
],
[
12502,
12528
]
],
[
[
11724,
11753
]
],
[
[
12236,
12269
]
],
[
[
12748,
12764
],
[
14398,
14414
]
],
[
[
13529,
13554
],
[
14670,
14695
],
[
14817,
14842
]
],
[
[
14119,
14141
],
[
19581,
19603
]
],
[
[
16068,
16090
],
[
17524,
17546
],
[
17647,
17669
]
],
[
[
16326,
16340
],
[
18000,
18014
]
],
[
[
17394,
17416
],
[
19842,
19864
]
],
[
[
18495,
18506
],
[
20680,
20691
]
],
[
[
19893,
19905
]
],
[
[
21100,
21126
]
],
[
[
21222,
21246
]
],
[
[
21369,
21390
]
],
[
[
21604,
21628
],
[
21853,
21877
],
[
23042,
23066
]
],
[
[
21811,
21828
],
[
23142,
23159
]
],
[
[
23000,
23017
]
],
[
[
23184,
23201
],
[
25012,
25029
]
],
[
[
23219,
23231
],
[
25716,
25728
],
[
25740,
25752
]
],
[
[
25074,
25085
]
],
[
[
25251,
25272
]
],
[
[
25563,
25583
]
],
[
[
25767,
25783
]
],
[
[
26051,
26078
]
],
[
[
26269,
26297
]
],
[
[
26783,
26812
]
],
[
[
27070,
27091
]
],
[
[
28890,
28919
]
],
[
[
29594,
29615
]
],
[
[
30233,
30250
],
[
30468,
30485
]
],
[
[
30395,
30413
]
],
[
[
30533,
30548
]
],
[
[
30663,
30690
]
]
] |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Persistent identifier fetchers.
A proper fetcher is defined as a function that return a
:data:`invenio_pidstore.fetchers.FetchedPID` instance.
E.g.
.. code-block:: python
def my_fetcher(record_uuid, data):
return FetchedPID(
provider=MyRecordIdProvider,
pid_type=MyRecordIdProvider.pid_type,
pid_value=extract_pid_value(data),
)
To see more about providers see :mod:`invenio_pidstore.providers`.
"""
from __future__ import absolute_import, print_function
from invenio_pidstore.fetchers import FetchedPID
from oarepo_communities.converters import CommunityPIDValue
from oarepo_communities.proxies import current_oarepo_communities
from .providers import NRNresultsIdProvider
def nr_nresults_id_fetcher(record_uuid, data):
"""Fetch a record's identifiers.
:param record_uuid: The record UUID.
:param data: The record metadata.
:returns: A :data:`invenio_pidstore.fetchers.FetchedPID` instance.
"""
id_field = "control_number"
return FetchedPID( # FetchedPID je obyčejný namedtuple
provider=NRNresultsIdProvider,
pid_type=NRNresultsIdProvider.pid_type,
pid_value=CommunityPIDValue(
str(data[id_field]),
current_oarepo_communities.get_primary_community_field(data))
)
| [
[
[
723,
738
]
],
[
[
740,
754
]
],
[
[
794,
804
],
[
1264,
1274
]
],
[
[
847,
864
],
[
1418,
1435
]
],
[
[
904,
930
],
[
1482,
1508
]
],
[
[
955,
975
],
[
1330,
1350
],
[
1369,
1389
]
],
[
[
982,
1004
]
]
] |
import logging,os
from rest import Restclient
LOCAL_DATA_FOLDER = '/DATA'
GENOTYPE_FOLDER = '/GENOTYPE'
REST_HOST = os.environ['REST_HOST']
REST_USERNAME = os.environ['REST_USERNAME']
REST_PASSWORD = os.environ['REST_PASSWORD']
restclient = Restclient(REST_HOST,REST_USERNAME,REST_PASSWORD)
class CeleryProgressLogHandler(logging.StreamHandler):
def __init__(self,task):
logging.StreamHandler.__init__(self)
self.task = task
def emit(self,record):
if 'progress' in record.__dict__:
progress = record.__dict__['progress']
msg = self.format(record)
if 'task' in record.__dict__:
msg = record.__dict__['task']
body = {'progress':progress,'task':msg}
self.task.update_state(state='PROGRESS',meta=body)
| [
[
[
7,
14
],
[
326,
333
],
[
388,
395
]
],
[
[
15,
17
],
[
118,
120
],
[
158,
160
],
[
202,
204
]
],
[
[
35,
45
],
[
244,
254
]
],
[
[
47,
64
]
],
[
[
75,
90
]
],
[
[
106,
115
],
[
255,
264
]
],
[
[
142,
155
],
[
265,
278
]
],
[
[
186,
199
],
[
279,
292
]
],
[
[
231,
241
]
],
[
[
301,
325
]
]
] |
import os
from collections import OrderedDict
import matplotlib.pyplot as plt
import pandas
_ramachandran_densities = pandas.read_csv(
'data/rama500-general.data',
skiprows=6,
delimiter=' ',
names=['phi', 'psi', 'value']
)
"""
DSSP output:
H = α-helix
B = residue in isolated β-bridge
E = extended strand, participates in β ladder
G = 3-helix (310 helix)
I = 5 helix (π-helix)
T = hydrogen bonded turn
S = bend
Colors extracted from rcsb.org.
"""
DSSP_to_color = {
'H': '#ED6161',
'B': '#CCA200',
'E': '#FFFB00',
'G': '#FFC2C2',
'I': '#900000',
'T': '#990099',
'S': '#0000FF',
'-': 'black',
}
def ramachandran_surface():
"""
Plot density surface for generic ramachandran
"""
fontsize = 18
ticks = [-180, -90, 0, 90, 180]
plt.contourf(
list(OrderedDict.fromkeys(_ramachandran_densities['phi'])),
list(OrderedDict.fromkeys(_ramachandran_densities['psi'])),
_ramachandran_densities['value'].values.reshape(180, 180).T,
levels=[0, 0.0005, 0.02, 1],
colors=['#FFFFFF', '#B3E8FF', '#7FD9FF']
)
plt.xlabel('$\phi$', fontsize=fontsize)
plt.ylabel('$\psi$', fontsize=fontsize)
plt.xticks(ticks)
plt.yticks(ticks)
plt.tick_params(direction="out")
plt.margins(0.05)
ax = plt.axes()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
def ramachandran(torsion_angles, fragment, target_pdb=None,
output_writer=None, output_dir=None):
"""
Plot ramachandran of a set of torsion angles for a given fragment
:param torsion_angles: Dictionary with torsion angles phi and psi
:param fragment: Fragment identifier, used for displaying purposes
"""
target_pdb = None
plt.figure()
ramachandran_surface()
plt.title('Ramachandran plot for ' + fragment)
plt.scatter(
x=torsion_angles['phi'],
y=torsion_angles['psi'],
s=[1.05 ** x for x in torsion_angles['identity']],
c=[DSSP_to_color[ss] for ss in torsion_angles['central_ss']],
marker='o',
alpha=0.5,
)
if target_pdb and (target_pdb in list(torsion_angles['pdb'])):
i = list(torsion_angles['pdb']).index(target_pdb)
plt.scatter(
x=torsion_angles['phi'][i],
y=torsion_angles['psi'][i],
marker='D',
c='red',
s=50
)
if output_writer:
output_writer.savefig(dpi=150)
if output_dir:
plt.savefig(
os.path.join(output_dir, 'ramachandran', fragment + '.svg'),
format='svg', dpi=300
)
plt.close()
| [
[
[
7,
9
],
[
2735,
2737
]
],
[
[
34,
45
],
[
856,
867
],
[
924,
935
]
],
[
[
54,
78
],
[
829,
832
],
[
1144,
1147
],
[
1188,
1191
],
[
1232,
1235
],
[
1254,
1257
],
[
1276,
1279
],
[
1313,
1316
],
[
1340,
1343
],
[
1976,
1979
],
[
2020,
2023
],
[
2071,
2074
],
[
2457,
2460
],
[
2710,
2713
],
[
2844,
2847
]
],
[
[
86,
92
],
[
121,
127
]
],
[
[
95,
118
],
[
877,
900
],
[
945,
968
],
[
987,
1010
]
],
[
[
497,
510
],
[
2220,
2233
]
],
[
[
681,
701
],
[
1993,
2013
]
],
[
[
1611,
1623
]
]
] |
""" This is a dummy file used only to avoid errors in ReadTheDocs. The real BF.py is created during the setup once swig is run. """
def CP():
pass
def LeP():
pass
def LaP():
pass
def HoPpro():
pass
def HoPphy():
pass
def FS():
pass
def ELMReLU():
pass
def ELMSigmoid():
pass
def ELMTanh():
pass
def ELMSin():
pass
def ELMSwish():
pass
def nCP():
pass
def nLeP():
pass
def nFS():
pass
def nELMReLU():
pass
def nELMSigmoid():
pass
def nELMTanh():
pass
def nELMSin():
pass
def nELMSwish():
pass
| [
[
[
138,
140
]
],
[
[
159,
162
]
],
[
[
181,
184
]
],
[
[
203,
209
]
],
[
[
228,
234
]
],
[
[
253,
255
]
],
[
[
274,
281
]
],
[
[
300,
310
]
],
[
[
329,
336
]
],
[
[
355,
361
]
],
[
[
380,
388
]
],
[
[
407,
410
]
],
[
[
429,
433
]
],
[
[
452,
455
]
],
[
[
474,
482
]
],
[
[
501,
512
]
],
[
[
531,
539
]
],
[
[
558,
565
]
],
[
[
584,
593
]
]
] |
seq = 'CTTCTCACGTACAACAAAATC'
symbol2number = {"A":0,"C":1,"G":2,"T":3}
def PatternToNumber(Pattern):
if not Pattern:
return 0
symbol = Pattern[-1]
prefix = Pattern[:-1]
return ((4*PatternToNumber(prefix))+symbol2number[symbol])
def NumberToPattern(index, k):
bases = ['A', 'C', 'G', 'T']
pattern = ''
for i in range(k):
pattern += bases[index % 4]
index = index // 4
return pattern[::-1]
def ComputingFrequencies(text,k):
FrequencyArray =[]
for i in range(0,((4**k))):
FrequencyArray.append(0)
for i in range(0,(len(text)-1)):
pattern = text[i:(i+k)]
j = PatternToNumber(pattern)
FrequencyArray[j] = FrequencyArray[j]+1
return FrequencyArray
def FasterFrequentWords(text,k):
FrequentPatterns = []
FrequencyArray = ComputingFrequencies(text,k)
maxCount = max(FrequencyArray)
for i in range(0,(4**k)):
if FrequencyArray[i] == maxCount:
pattern = NumberToPattern(i,k)
FrequentPatterns.append(pattern)
return FrequentPatterns
print(FasterFrequentWords("ACGCGGCTCTGAAA",2)) | [
[
[
0,
3
]
],
[
[
33,
46
],
[
247,
260
]
],
[
[
88,
103
],
[
222,
237
],
[
659,
674
]
],
[
[
277,
292
],
[
1007,
1022
]
],
[
[
454,
474
],
[
845,
865
]
],
[
[
767,
786
],
[
1112,
1131
]
]
] |
import os
from dotenv import load_dotenv, find_dotenv
#this will load all the envars from a .env file located in the project root (api)
load_dotenv(find_dotenv())
CONFIGURATION = {
"development": "config.DevConfig",
"testing": "config.TestConfig",
"production": "config.Config",
"default": "config.Config"
}
class Config(object):
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
SECRET_KEY = 'a secret'
SQLALCHEMY_TRACK_MODIFICATIONS = False
NRO_SERVICE_ACCOUNT = os.getenv('NRO_SERVICE_ACCOUNT', 'nro_service_account')
SOLR_BASE_URL = os.getenv('SOLR_BASE_URL', None)
SOLR_SYNONYMS_API_URL = os.getenv('SOLR_SYNONYMS_API_URL', None)
NRO_EXTRACTOR_URI = os.getenv('NRO_EXTRACTOR_URI', None)
ALEMBIC_INI='migrations/alembic.ini'
# POSTGRESQL
DB_USER = os.getenv('DATABASE_USERNAME', '')
DB_PASSWORD = os.getenv('DATABASE_PASSWORD','')
DB_NAME = os.getenv('DATABASE_NAME','')
DB_HOST = os.getenv('DATABASE_HOST','')
DB_PORT = os.getenv('DATABASE_PORT','5432')
SQLALCHEMY_DATABASE_URI = 'postgresql://{user}:{password}@{host}:{port}/{name}'.format(
user=DB_USER,
password=DB_PASSWORD,
host=DB_HOST,
port=int(DB_PORT),
name=DB_NAME,
)
## ORACLE - LEGACY NRO NAMESDB
NRO_USER = os.getenv('NRO_USER', '')
NRO_SCHEMA = os.getenv('NRO_SCHEMA', None)
NRO_PASSWORD = os.getenv('NRO_PASSWORD', '')
NRO_DB_NAME = os.getenv('NRO_DB_NAME', '')
NRO_HOST = os.getenv('NRO_HOST', '')
NRO_PORT = int(os.getenv('NRO_PORT', '1521'))
# JWT_OIDC Settings
JWT_OIDC_WELL_KNOWN_CONFIG = os.getenv('JWT_OIDC_WELL_KNOWN_CONFIG')
JWT_OIDC_ALGORITHMS = os.getenv('JWT_OIDC_ALGORITHMS')
JWT_OIDC_JWKS_URI = os.getenv('JWT_OIDC_JWKS_URI')
JWT_OIDC_ISSUER = os.getenv('JWT_OIDC_ISSUER')
JWT_OIDC_AUDIENCE = os.getenv('JWT_OIDC_AUDIENCE')
JWT_OIDC_CLIENT_SECRET = os.getenv('JWT_OIDC_CLIENT_SECRET')
JWT_OIDC_CACHING_ENABLED = os.getenv('JWT_OIDC_CACHING_ENABLED')
try:
JWT_OIDC_JWKS_CACHE_TIMEOUT = int(os.getenv('JWT_OIDC_JWKS_CACHE_TIMEOUT'))
except:
JWT_OIDC_JWKS_CACHE_TIMEOUT = 300
TESTING = False,
DEBUG = False
class DevConfig(Config):
TESTING = False,
DEBUG = True
class TestConfig(Config):
DEBUG = True
TESTING = True
# POSTGRESQL
DB_USER = os.getenv('DATABASE_TEST_USERNAME', '')
DB_PASSWORD = os.getenv('DATABASE_TEST_PASSWORD','')
DB_NAME = os.getenv('DATABASE_TEST_NAME','')
DB_HOST = os.getenv('DATABASE_TEST_HOST','')
DB_PORT = os.getenv('DATABASE_TEST_PORT','5432')
SQLALCHEMY_DATABASE_URI = 'postgresql://{user}:{password}@{host}:{port}/{name}'.format(
user=DB_USER,
password=DB_PASSWORD,
host=DB_HOST,
port=int(DB_PORT),
name=DB_NAME,
)
# JWT OIDC settings
## JWT_OIDC_TEST_MODE will set jwt_manager to use
JWT_OIDC_TEST_MODE = True
JWT_OIDC_TEST_AUDIENCE = os.getenv('JWT_OIDC_AUDIENCE')
JWT_OIDC_TEST_CLIENT_SECRET = os.getenv('JWT_OIDC_CLIENT_SECRET')
JWT_OIDC_TEST_ISSUER = 'https://sso-dev.pathfinder.gov.bc.ca/auth/realms/sbc'
JWT_OIDC_TEST_KEYS = {
"keys": [
{
"kid": "flask-jwt-oidc-test-client",
"kty": "RSA",
"alg": "RS256",
"use": "sig",
"n": "AN-fWcpCyE5KPzHDjigLaSUVZI0uYrcGcc40InVtl-rQRDmAh-C2W8H4_Hxhr5VLc6crsJ2LiJTV_E72S03pzpOOaaYV6-TzAjCou2GYJIXev7f6Hh512PuG5wyxda_TlBSsI-gvphRTPsKCnPutrbiukCYrnPuWxX5_cES9eStR",
"e": "AQAB"
}
]
}
JWT_OIDC_TEST_PRIVATE_KEY_JWKS = {
"keys": [
{
"kid": "flask-jwt-oidc-test-client",
"kty": "RSA",
"alg": "RS256",
"use": "sig",
"kty": "RSA",
"n": "AN-fWcpCyE5KPzHDjigLaSUVZI0uYrcGcc40InVtl-rQRDmAh-C2W8H4_Hxhr5VLc6crsJ2LiJTV_E72S03pzpOOaaYV6-TzAjCou2GYJIXev7f6Hh512PuG5wyxda_TlBSsI-gvphRTPsKCnPutrbiukCYrnPuWxX5_cES9eStR",
"e": "AQAB",
"d": "C0G3QGI6OQ6tvbCNYGCqq043YI_8MiBl7C5dqbGZmx1ewdJBhMNJPStuckhskURaDwk4-8VBW9SlvcfSJJrnZhgFMjOYSSsBtPGBIMIdM5eSKbenCCjO8Tg0BUh_xa3CHST1W4RQ5rFXadZ9AeNtaGcWj2acmXNO3DVETXAX3x0",
"p": "APXcusFMQNHjh6KVD_hOUIw87lvK13WkDEeeuqAydai9Ig9JKEAAfV94W6Aftka7tGgE7ulg1vo3eJoLWJ1zvKM",
"q": "AOjX3OnPJnk0ZFUQBwhduCweRi37I6DAdLTnhDvcPTrrNWuKPg9uGwHjzFCJgKd8KBaDQ0X1rZTZLTqi3peT43s",
"dp": "AN9kBoA5o6_Rl9zeqdsIdWFmv4DB5lEqlEnC7HlAP-3oo3jWFO9KQqArQL1V8w2D4aCd0uJULiC9pCP7aTHvBhc",
"dq": "ANtbSY6njfpPploQsF9sU26U0s7MsuLljM1E8uml8bVJE1mNsiu9MgpUvg39jEu9BtM2tDD7Y51AAIEmIQex1nM",
"qi": "XLE5O360x-MhsdFXx8Vwz4304-MJg-oGSJXCK_ZWYOB_FGXFRTfebxCsSYi0YwJo-oNu96bvZCuMplzRI1liZw"
}
]
}
JWT_OIDC_TEST_PRIVATE_KEY_PEM = """
-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQDfn1nKQshOSj8xw44oC2klFWSNLmK3BnHONCJ1bZfq0EQ5gIfg
tlvB+Px8Ya+VS3OnK7Cdi4iU1fxO9ktN6c6TjmmmFevk8wIwqLthmCSF3r+3+h4e
ddj7hucMsXWv05QUrCPoL6YUUz7Cgpz7ra24rpAmK5z7lsV+f3BEvXkrUQIDAQAB
AoGAC0G3QGI6OQ6tvbCNYGCqq043YI/8MiBl7C5dqbGZmx1ewdJBhMNJPStuckhs
kURaDwk4+8VBW9SlvcfSJJrnZhgFMjOYSSsBtPGBIMIdM5eSKbenCCjO8Tg0BUh/
xa3CHST1W4RQ5rFXadZ9AeNtaGcWj2acmXNO3DVETXAX3x0CQQD13LrBTEDR44ei
lQ/4TlCMPO5bytd1pAxHnrqgMnWovSIPSShAAH1feFugH7ZGu7RoBO7pYNb6N3ia
C1idc7yjAkEA6Nfc6c8meTRkVRAHCF24LB5GLfsjoMB0tOeEO9w9Ous1a4o+D24b
AePMUImAp3woFoNDRfWtlNktOqLel5PjewJBAN9kBoA5o6/Rl9zeqdsIdWFmv4DB
5lEqlEnC7HlAP+3oo3jWFO9KQqArQL1V8w2D4aCd0uJULiC9pCP7aTHvBhcCQQDb
W0mOp436T6ZaELBfbFNulNLOzLLi5YzNRPLppfG1SRNZjbIrvTIKVL4N/YxLvQbT
NrQw+2OdQACBJiEHsdZzAkBcsTk7frTH4yGx0VfHxXDPjfTj4wmD6gZIlcIr9lZg
4H8UZcVFN95vEKxJiLRjAmj6g273pu9kK4ymXNEjWWJn
-----END RSA PRIVATE KEY-----"""
| [
[
[
7,
9
],
[
369,
371
],
[
385,
387
],
[
512,
514
],
[
589,
591
],
[
650,
652
],
[
715,
717
],
[
826,
828
],
[
879,
881
],
[
927,
929
],
[
971,
973
],
[
1015,
1017
],
[
1325,
1327
],
[
1368,
1370
],
[
1417,
1419
],
[
1465,
1467
],
[
1509,
1511
],
[
1554,
1556
],
[
1643,
1645
],
[
1709,
1711
],
[
1766,
1768
],
[
1819,
1821
],
[
1872,
1874
],
[
1932,
1934
],
[
1999,
2001
],
[
2088,
2090
],
[
2385,
2387
],
[
2443,
2445
],
[
2496,
2498
],
[
2545,
2547
],
[
2594,
2596
],
[
2997,
2999
],
[
3062,
3064
]
],
[
[
29,
40
],
[
137,
148
]
],
[
[
42,
53
],
[
149,
160
]
],
[
[
165,
178
]
],
[
[
334,
340
],
[
2243,
2249
],
[
2309,
2315
]
],
[
[
2233,
2242
]
],
[
[
2298,
2308
]
]
] |
# Execution time : 0.003847 seconds
# Solution Explanation
# A simple brute-froce approach is enough
import time
width = 40
from functools import reduce
def solution():
v = list()
v.append([0]*23)
v.append([0]*23)
v.append([0]*23)
for line in open('input_p011.in','r'):
v.append(list(map(int,line.split())))
v[-1].extend([0,0,0])
v.append([0]*23)
v.append([0]*23)
v.append([0]*23)
ans = 0
for it1 in range(3,23):
for it2 in range(20):
ans = max(ans,reduce(lambda a,b: a*b,[v[it1][it2+k] for k in range(4)]))
ans = max(ans,reduce(lambda a,b: a*b,[v[it1+k][it2] for k in range(4)]))
ans = max(ans,reduce(lambda a,b: a*b,[v[it1-k][it2+k] for k in range(4)]))
ans = max(ans,reduce(lambda a,b: a*b,[v[it1+k][it2+k] for k in range(4)]))
return ans
if __name__=="__main__":
start_ = time.time()
print(' Answer -> %s '.center(width,'-') % ( solution() ))
print(' %f seconds '.center(width,'-') % ( time.time() - start_))
| [
[
[
112,
116
],
[
902,
906
],
[
1024,
1028
]
],
[
[
117,
122
],
[
948,
953
],
[
1009,
1014
]
],
[
[
150,
156
],
[
530,
536
],
[
615,
621
],
[
700,
706
],
[
787,
793
]
],
[
[
162,
170
],
[
963,
971
]
],
[
[
893,
899
],
[
1038,
1044
]
]
] |
import sys
config = {
"Database": {
"Address": "localhost",
"Username": "root",
"Password": "",
"Name": "Houdini",
"Driver": "PyMySQL" if sys.platform == "win32" else "MySQLdb"
},
"Redis": {
"Address": "127.0.0.1",
"Port": 6379
},
"Servers": {
"Login": {
"Address": "127.0.0.1",
"Port": 6112,
"World": False,
"Plugins": [
"Example"
],
"Logging": {
"General": "logs/login.log",
"Errors": "logs/login-errors.log",
"Level": "INFO"
},
"LoginFailureLimit": 5,
"LoginFailureTimer": 3600
},
"Wind": {
"Id": "100",
"Address": "127.0.0.1",
"Port": 9875,
"World": True,
"Capacity": 200,
"CacheExpiry": 3600,
"Plugins": [
"Commands",
"Bot",
"Rank"
],
"Logging": {
"General": "logs/wind.log",
"Errors": "logs/wind-errors.log",
"Level": "DEBUG"
}
}
},
"Tables": {
"Four": [
{ "RoomId": 220, "Tables": [205, 206, 207] },
{ "RoomId": 221, "Tables": [200, 201, 202, 203, 204] }
],
"Mancala": [
{ "RoomId": 111, "Tables": [100, 101, 102, 103, 104] }
],
"Treasure": [
{ "RoomId": 422, "Tables": [300, 301, 302, 303, 304, 305, 306, 307] }
]
},
"Waddles": {
"Sled": [
{ "RoomId": 230, "Waddles": [
{ "Id": 100, "Seats": 4 },
{ "Id": 101, "Seats": 3 },
{ "Id": 102, "Seats": 2 },
{ "Id": 103, "Seats": 2 }
]}
],
"Card": [
{ "RoomId": 320, "Waddles": [
{ "Id": 200, "Seats": 2 },
{ "Id": 201, "Seats": 2 },
{ "Id": 202, "Seats": 2 },
{ "Id": 203, "Seats": 2 }
]}
],
"CardFire": [
{ "RoomId": 812, "Waddles": [
{ "Id": 300, "Seats": 2 },
{ "Id": 301, "Seats": 2 },
{ "Id": 302, "Seats": 3 },
{ "Id": 303, "Seats": 4 }
]}
]
},
"Treasure": {
"Food": [115, 114, 109, 112, 110, 105, 113, 106, 108, 107, 111, 128],
"Furniture": [305, 313, 504, 506, 500, 503, 501, 507, 505, 502, 616, 542, 340, 150, 149, 369, 370, 300],
"Clothing": [3028, 232, 412, 112, 184, 1056, 6012, 118, 774, 366, 103, 498, 469, 1082,
5196, 790, 4039, 326, 105, 122, 5080, 111],
"Gold": {
"Clothing": [2139, 2137, 5385, 3185, 5384, 5386, 6209, 2138, 1735, 3186, 1734, 2136, 4994, 4993, 3187],
"Furniture": [2132, 2131, 2130, 2129]
},
"BorderTabby": {
"Clothing": [24073, 24075, 24078, 24074, 24080, 24076, 24081,
24071, 24072, 24077, 24079, 24070, 4414, 122],
"Furniture": [2180, 2182, 2183]
},
"Dinosaur": {
"Clothing": [24031, 24030, 24033, 24029],
"Furniture": [2180, 2182, 2183]
}
}
} | [
[
[
7,
10
],
[
149,
152
]
],
[
[
11,
17
]
]
] |
# GENERATED BY KOMAND SDK - DO NOT EDIT
from setuptools import setup, find_packages
setup(name='haveibeenpwned-rapid7-plugin',
version='4.0.2',
description='Determine if a user, domain, or password has been leaked via data available in the Have I Been Pwned database',
author='rapid7',
author_email='',
url='',
packages=find_packages(),
install_requires=['komand'], # Add third-party dependencies to requirements.txt, not here!
scripts=['bin/icon_haveibeenpwned']
)
| [
[
[
63,
68
],
[
86,
91
]
],
[
[
70,
83
],
[
358,
371
]
]
] |
#-----------------------------------------------------------------------------
# Copyright (c) 2015, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from PyInstaller.utils.hooks import collect_data_files
from PyInstaller.compat import is_win, is_darwin
import os
import sys
# The osgeo libraries require auxiliary data and may have hidden dependencies.
# There are several possible configurations on how these libraries can be
# deployed.
# This hook evaluates the cases when:
# - the `data` folder is present "in-source" (sharing the same namespace folder
# as the code libraries)
# - the `data` folder is present "out-source" (for instance, on Anaconda for
# Windows, in PYTHONHOME/Library/data)
# In this latter case, the hook also checks for the presence of `proj` library
# (e.g., on Windows in PYTHONHOME) for being added to the bundle.
#
# This hook has been tested with gdal (v.1.11.2 and 1.11.3) on:
# - Win7 64bit
# - Ubuntu 15.04 64bit
# - Mac OS X Yosemite 10.10
#
# TODO: Fix for gdal>=2.0: 'NameError: global name 'help' is not defined'
# flag used to identify an Anaconda environment
is_conda = False
# Auxiliary data:
#
# - general case (data in 'osgeo/data/gdal'):
datas = collect_data_files('osgeo', subdir=os.path.join('data', 'gdal'))
# check if the data has been effectively found in 'osgeo/data/gdal'
if len(datas) == 0:
if hasattr(sys, 'real_prefix'): # check if in a virtual environment
root_path = sys.real_prefix
else:
root_path = sys.prefix
# - conda-specific
if is_win:
tgt_gdal_data = os.path.join('Library', 'data')
src_gdal_data = os.path.join(root_path, 'Library', 'data')
else: # both linux and darwin
tgt_gdal_data = os.path.join('share', 'gdal')
src_gdal_data = os.path.join(root_path, 'share', 'gdal')
if os.path.exists(src_gdal_data):
is_conda = True
datas.append((src_gdal_data, tgt_gdal_data))
# a real-time hook takes case to define the path for `GDAL_DATA`
# Hidden dependencies
if is_conda:
# if `proj.4` is present, it provides additional functionalities
if is_win:
proj4_lib = os.path.join(root_path, 'proj.dll')
elif is_darwin:
proj4_lib = os.path.join(root_path, 'lib', 'libproj.dylib')
else: # assumed linux-like settings
proj4_lib = os.path.join(root_path, 'lib', 'libproj.so')
if os.path.exists(proj4_lib):
binaries = [(proj4_lib, ""), ]
| [
[
[
441,
459
],
[
1454,
1472
]
],
[
[
491,
497
],
[
1790,
1796
],
[
2376,
2382
]
],
[
[
499,
508
],
[
2449,
2458
]
],
[
[
517,
519
],
[
1489,
1491
],
[
1822,
1824
],
[
1878,
1880
],
[
1980,
1982
],
[
2034,
2036
],
[
2083,
2085
],
[
2404,
2406
],
[
2480,
2482
],
[
2589,
2591
],
[
2642,
2644
]
],
[
[
527,
530
],
[
1624,
1627
],
[
1702,
1705
],
[
1748,
1751
]
],
[
[
1362,
1370
],
[
2290,
2298
]
],
[
[
1446,
1451
],
[
1595,
1600
],
[
2146,
2151
]
],
[
[
1690,
1699
],
[
1891,
1900
],
[
2047,
2056
],
[
2417,
2426
],
[
2493,
2502
],
[
2602,
2611
]
],
[
[
1736,
1745
],
[
1891,
1900
],
[
2047,
2056
],
[
2417,
2426
],
[
2493,
2502
],
[
2602,
2611
]
],
[
[
1806,
1819
],
[
2175,
2188
]
],
[
[
1862,
1875
],
[
2098,
2111
],
[
2160,
2173
]
],
[
[
1964,
1977
],
[
2175,
2188
]
],
[
[
2018,
2031
],
[
2098,
2111
],
[
2160,
2173
]
],
[
[
2122,
2130
],
[
2290,
2298
]
],
[
[
2392,
2401
],
[
2657,
2666
],
[
2690,
2699
]
],
[
[
2468,
2477
],
[
2657,
2666
],
[
2690,
2699
]
],
[
[
2577,
2586
],
[
2657,
2666
],
[
2690,
2699
]
],
[
[
2677,
2685
]
]
] |
import matplotlib.pyplot as plt
import pandas as pd
from numpy import arange, array
import os
import logging
logging.basicConfig()
logger = logging.getLogger('PlotTimeCost')
logger.setLevel('INFO')
class PlotTimeCostBar:
def __init__(self, data, path, show=False):
self.data = data
self.path = path
self.show_flag = show
(filepath, tempfilename) = os.path.split(path)
if not os.path.exists(filepath):
os.makedirs(filepath)
(filename, extension) = os.path.splitext(tempfilename)
self.format = extension[1:]
def plot(self):
data = array([0, 0, 0])
data[1:] = self.data['Time Cost'].values
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(111)
width = 0.5
xticks = self.data.index
n = data.shape[0]
ind = arange(n)
data = data / 3600
colors = ['black', 'tab:blue', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown']
plt.bar(x=ind, height=data, width=width, color=colors)
ax.set_xticks(ind[1:])
ax.set_xticklabels(xticks)
# ax.set_xlabel('Multi-fidelity control strategy', fontsize=16)
ax.tick_params(labelsize=12)
ax.set_ylabel('Time Cost (h)', fontsize=16)
if self.show_flag:
plt.show()
fig.savefig(self.path, format=self.format, dpi=80, bbox_inches='tight')
| [
[
[
7,
31
],
[
699,
702
],
[
989,
992
],
[
1313,
1316
]
],
[
[
39,
51
]
],
[
[
70,
76
],
[
854,
860
]
],
[
[
78,
83
],
[
618,
623
]
],
[
[
91,
93
],
[
388,
390
],
[
423,
425
],
[
461,
463
],
[
515,
517
]
],
[
[
101,
108
],
[
110,
117
],
[
141,
148
]
],
[
[
132,
138
],
[
175,
181
]
],
[
[
207,
222
]
]
] |
"""
Copyright 2017 Arm Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
ARCH_SPECIFIC_LIBS = ['mkl', 'otherarch']
"""Libraries that are not available on aarch64."""
| [
[
[
591,
609
]
]
] |
apiKey = 'yours'
apiSecret = 'yours'
callbackUrl = 'http://fledna.duapp.com/query'
openid = 'yours'
accessToken = 'yours'
| [
[
[
0,
6
]
],
[
[
18,
27
]
],
[
[
39,
50
]
],
[
[
86,
92
]
],
[
[
104,
115
]
]
] |
# https://stockmarketmba.com/globalstockexchanges.php
exchanges = {
'USA': None,
'Germany': 'XETR',
'Hong Kong': 'XHKG',
'Japan': 'XTKS',
'France': 'XPAR',
'Canada': 'XTSE',
'United Kingdom': 'XLON',
'Switzerland': 'XSWX',
'Australia': 'XASX',
'South Korea': 'XKRX',
'The Netherlands': 'XAMS',
'Spain': 'XMAD',
'Russia': 'MISX',
'Italy': 'XMIL',
'Belgium': 'XBRU',
'Mexiko': 'XMEX',
'Sweden': 'XSTO',
'Norway': 'XOSL',
'Finland': 'XHEL',
'Denmark': 'XCSE',
'Austria': 'XWBO'
}
exchanges_untested = {
'Argentina': 'XBUE',
'Australia_XNEC': 'XNEC',
'Australia': 'XASX',
'Austria': 'XWBO',
'Bahrain': 'XBAH',
'Bangladesh': 'XDHA',
'Belgium': 'XBRU',
'Brazil': 'BVMF',
'Canada_XCNQ': 'XCNQ',
'Canada': 'XTSE',
'Canada_XTSX': 'XTSX',
'Canada_NEOE': 'NEOE',
'Chile': 'XSGO',
'China_SHG': 'XSHG',
'China': 'XSHE',
'Colombia': 'XBOG',
'Croatia': 'XZAG',
'Cyprus': 'XCYS',
'Czech Republic': 'XPRA',
'Denmark': 'XCSE',
'Egypt': 'XCAI',
'Finland': 'XHEL',
'France': 'XPAR',
'Germany_XEQT': 'XEQT',
'Germany_XBER': 'XBER',
'Germany_XDUS': 'XDUS',
'Germany_XFRA': 'XFRA',
'Germany_XMUN': 'XMUN',
'Germany_XSTU': 'XSTU',
'Germany': 'XETR',
'Germany_XQTX': 'XQTX',
'Greece': 'XATH',
'Hong Kong': 'XHKG',
'Hungary': 'XBUD',
'Iceland': 'XICE',
'India_XBOM': 'XBOM',
'India': 'XNSE',
'Indonesia': 'XIDX',
'Ireland': 'XDUB',
'Israel': 'XTAE',
'Italy': 'MTAA',
'Japan': 'XTKS',
'Jordan': 'XAMM',
'Kenya': 'XNAI',
'Kuwait': 'XKUW',
'Luxembourg': 'XLUX',
'Malaysia': 'XKLS',
'Mexico': 'XMEX',
'Morocco': 'XCAS',
'New Zealand': 'XNZE',
'Nigeria': 'XNSA',
'Norway': 'XOSL',
'Norway_NOTC': 'NOTC',
'Oman': 'XMUS',
'Pakistan': 'XKAR',
'Peru': 'XLIM',
'Philippines': 'XPHS',
'Poland': 'XWAR',
'Portugal': 'XLIS',
'Qatar': 'DSMD',
'Romania': 'XBSE',
'Russia': 'MISX',
'Saudi Arabia': 'XSAU',
'Senegal': 'XBRV',
'Singapore': 'XSES',
'Slovenia': 'XLJU',
'South Africa': 'XJSE',
'South Korea': 'XKRX',
'South Korea_XKOS': 'XKOS',
'Spain': 'XMAD',
'Sri Lanka': 'XCOL',
'Sweden_XNGM': 'XNGM',
'Sweden': 'XSTO',
'Switzerland': 'XSWX',
'Switzerland_XVTX': 'XVTX',
'Syria': 'XDSE',
'Taiwan': 'XTAI',
'Thailand': 'XBKK',
'The Netherlands_XTOMX': 'TOMX',
'The Netherlands': 'XAMS',
'Turkey': 'XIST',
'United Arab Emirates_XDFM': 'XDFM',
'United Arab Emirates_DIFX': 'DIFX',
'United Arab Emirates': 'XADS',
'United Kingdom_BATE': 'BATE',
'United Kingdom_CHIX': 'CHIX',
'United Kingdom': 'XLON',
'United Kingdom_XPOS': 'XPOS',
'United Kingdom_TRQX': 'TRQX',
'United Kingdom_BOAT': 'BOAT',
'USA_XASE': 'XASE',
'USA_BATS': 'BATS',
'USA_XNYS': 'XNYS',
'USA_ARCX': 'ARCX',
'USA_XNMS': 'XNMS',
'USA_XNCM': 'XNCM',
'USA_OOTC': 'OOTC',
'USA_XNGS': 'XNGS',
'USA': None,
'Vietnam': 'XSTC',
'Vietnam_HSTC': 'HSTC'
}
currencies = [
'ALL',
'AFN',
'ARS',
'AWG',
'AUD',
'AZN',
'BSD',
'BBD',
'BYN',
'BZD',
'BMD',
'BOB',
'BAM',
'BWP',
'BGN',
'BRL',
'BND',
'KHR',
'CAD',
'KYD',
'CLP',
'CNY',
'COP',
'CRC',
'HRK',
'CUP',
'CZK',
'DKK',
'DOP',
'XCD',
'EGP',
'SVC',
'EUR',
'FKP',
'FJD',
'GHS',
'GIP',
'GTQ',
'GGP',
'GYD',
'HNL',
'HKD',
'HUF',
'ISK',
'INR',
'IDR',
'IRR',
'IMP',
'ILS',
'JMD',
'JPY',
'JEP',
'KZT',
'KPW',
'KRW',
'KGS',
'LAK',
'LBP',
'LRD',
'MKD',
'MYR',
'MUR',
'MXN',
'MNT',
'MZN',
'NAD',
'NPR',
'ANG',
'NZD',
'NIO',
'NGN',
'NOK',
'OMR',
'PKR',
'PAB',
'PYG',
'PEN',
'PHP',
'PLN',
'QAR',
'RON',
'RUB',
'SHP',
'SAR',
'RSD',
'SCR',
'SGD',
'SBD',
'SOS',
'ZAR',
'LKR',
'SEK',
'CHF',
'SRD',
'SYP',
'TWD',
'THB',
'TTD',
'TRY',
'TVD',
'UAH',
'GBP',
'USD',
'UYU',
'UZS',
'VEF',
'VND',
'YER',
'ZWD'
]
| [
[
[
55,
64
]
],
[
[
563,
581
]
],
[
[
3157,
3167
]
]
] |
"""About and help services.
(help browser anyone?)
"""
import importlib
import importlib_metadata
from gi.repository import Gtk
from gaphor.abc import ActionProvider, Service
from gaphor.core import action
class HelpService(Service, ActionProvider):
def __init__(self, session):
self.session = session
def shutdown(self):
pass
@property
def window(self):
return self.session.get_service("main_window").window
@action(name="app.about")
def about(self):
builder = Gtk.Builder()
with importlib.resources.path(
"gaphor.services.helpservice", "about.ui"
) as glade_file:
builder.add_objects_from_file(str(glade_file), ("about",))
about = builder.get_object("about")
about.set_version(importlib_metadata.version("gaphor"))
about.set_modal(True)
about.set_transient_for(self.window)
about.show()
@action(name="app.shortcuts")
def shortcuts(self):
builder = Gtk.Builder()
with importlib.resources.path(
"gaphor.services.helpservice", "shortcuts.ui"
) as glade_file:
builder.add_objects_from_file(str(glade_file), ("shortcuts-gaphor",))
shortcuts = builder.get_object("shortcuts-gaphor")
shortcuts.set_modal(True)
shortcuts.set_transient_for(self.window)
shortcuts.show()
return shortcuts
| [
[
[
65,
74
],
[
555,
564
],
[
1043,
1052
]
],
[
[
83,
101
],
[
803,
821
]
],
[
[
128,
131
],
[
528,
531
],
[
1016,
1019
]
],
[
[
156,
170
],
[
240,
254
]
],
[
[
172,
179
],
[
231,
238
]
],
[
[
204,
210
],
[
464,
470
],
[
944,
950
]
],
[
[
219,
230
]
]
] |
import os
import cv2
import time
import argparse
import numpy as np
from mtcnn import detect_face
import tensorflow as tf
from PIL import Image, ImageDraw
## MTCNN face localizer
def mtcnn_localize_faces(image, pnet, rnet, onet, minsize=20, threshold=[0.7, 0.8, 0.85], factor=0.75):
"""
Localize faces & its landmarks in image using MTCNN
Params
:image
:minsize - min. face size
:threshold - a list/array with 3 values. The thresholds for pnet, rnet & onet, respectively
:factor - sclaing factor for image octave
Return
:bbs - list of bounding boxes
:lds - list of face landmarks
"""
image = image[:, :, 0:3]
bounding_boxes, landmarks = detect_face.detect_face(image, minsize, pnet, rnet, onet, threshold, factor)
nrof_faces = bounding_boxes.shape[0]
bbs = list()
lds = list()
if nrof_faces > 0:
det = bounding_boxes[:, 0:4]
bb = np.zeros((nrof_faces,4), dtype=np.int32)
lands = np.zeros((nrof_faces,10), dtype=np.int32)
landmarks = np.reshape(landmarks, (nrof_faces, 10))
for i in range(nrof_faces):
## Convert to int32
lands[i] = np.ravel(landmarks[i])
bb[i] = np.ravel(det[i])
# inner exception
if bb[i][0] <= 0 or bb[i][1] <= 0 or bb[i][2] >= len(image[0]) or bb[i][3] >= len(image):
print('face is inner of range!')
continue
else:
## get as top, right, bottom, left
bbs.append((bb[i][1], bb[i][2], bb[i][3], bb[i][0]))
lds.append(lands[i])
return bbs, lds
def load_images(images_path):
"""
Read images from directory
Params
:images_path - path to images
Return
:image_l - list of images as arrays
: images_name - list of images' file names
"""
# list of images, as arrays
images_l = []
# get images
images_name = os.listdir(images_path)
# read images
for i in images_name:
image = cv2.imread(os.path.join(images_path, i))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# if image.endswith(".png"):
# images_l.append(image)
images_l.append(image)
return images_l, images_name
def main(args):
st = time.time()
#check if input directory exists
if not os.path.exists(args.input_directory):
print("Error! No input direcotory", args.input_directory)
return -1
# read images
images_l, images_paths = load_images(args.input_directory)
#create tensorflow session
# init. tensorflow session
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.75)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = detect_face.create_mtcnn(sess, './mtcnn')
#localize and blur faces, iterate over images
for image, image_path in zip(images_l, images_paths):
print("Processing", image_path + "...")
bbs, lds = mtcnn_localize_faces(image, pnet, rnet, onet, minsize=20, threshold=[0.7, 0.8, 0.85], factor=0.75)
# jumpt iteration if there's no face
if len(bbs) == 0:
print("Couldn't find faces!")
continue
#get faces
for bb, ld in zip(bbs, lds):
#get bounding box
#top, righ, bottom, left
top = bb[0]
right = bb[1]
bottom = bb[2]
left = bb[3]
# build landmarks' x, y pairs
points = []
for x, y in zip(ld[:5], ld[5:]):
points.append(x)
points.append(y)
#get face thumbnail
face_image = image[top:bottom, left:right]
#blur face thumbnail
if args.blur > 0:
face_image = cv2.GaussianBlur(face_image, (105, 105), args.blur)
#black
else:
face_image = np.zeros(face_image.shape)
#write blured face to image
image[top:bottom, left:right] = face_image
#PIL image
# pil_image = Image.fromarray(image)
# pil_image_face = Image.fromarray(face_image)
#eyes' landmarks: first two pairs
# get larger rectangle
# points[0] = points[0] * 0.9
# points[1] = points[1] * 0.9
# points[2] = points[2] * 1.1
# points[3] = points[3] * 1.1
# draw = ImageDraw.Draw(pil_image)
#cover eyes with rectangle
# draw.rectangle(points[:4], fill="black")
#create output directory if it doesn't exist
if not os.path.exists(args.output_directory):
os.makedirs(args.output_directory)
#save image
pil_image = Image.fromarray(image)
pil_image.save(os.path.join(args.output_directory, image_path))
print("Total running time:", time.time() - st, "sec.")
return 0
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-id', '--input_directory', type=str, nargs='?', default="./images")
parser.add_argument('-od', '--output_directory', type=str, nargs='?', default="./blurs")
parser.add_argument('-b', '--blur', type=int, nargs='?', default=46)
args = parser.parse_args()
main(args) | [
[
[
7,
9
],
[
1983,
1985
],
[
2078,
2080
],
[
2392,
2394
],
[
5181,
5183
],
[
5240,
5242
],
[
5386,
5388
]
],
[
[
17,
20
],
[
2067,
2070
],
[
2124,
2127
],
[
2144,
2147
],
[
4175,
4178
]
],
[
[
28,
32
],
[
2332,
2336
],
[
5469,
5473
]
],
[
[
40,
48
],
[
5552,
5560
]
],
[
[
56,
67
],
[
946,
948
],
[
977,
979
],
[
1003,
1005
],
[
1035,
1037
],
[
1065,
1067
],
[
1196,
1198
],
[
1239,
1241
],
[
4317,
4319
]
],
[
[
86,
97
],
[
703,
714
],
[
2932,
2943
]
],
[
[
105,
121
],
[
2668,
2670
],
[
2715,
2717
],
[
2782,
2784
],
[
2800,
2802
]
],
[
[
138,
143
],
[
5332,
5337
]
],
[
[
145,
154
]
],
[
[
184,
204
],
[
3182,
3202
]
],
[
[
1680,
1691
],
[
2562,
2573
]
],
[
[
2311,
2315
],
[
5873,
5877
]
],
[
[
5543,
5549
],
[
5582,
5588
],
[
5675,
5681
],
[
5768,
5774
],
[
5848,
5854
]
],
[
[
5841,
5845
],
[
5878,
5882
]
]
] |
"""Encoder
Description:
This module encodes Planning Problem to Propositional Formulas in CNF
(Conjunctive Normal Form)
License:
Copyright 2021 Debby Nirwan
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .pddl_adapter import PlanningProblem
from enum import Enum
from itertools import combinations
class Operator(Enum):
AND = 0,
OR = 1,
IMPLIES = 2
class Clause(object):
def __init__(self, fluent=None):
if fluent:
self._clause = [fluent]
self._single = True
else:
self._clause = []
self._single = False
def __repr__(self):
return f"Clause object. {self._clause}"
def __len__(self):
return len(self._clause)
def __getitem__(self, item):
return self._clause[item]
def __contains__(self, item):
return True if item in self._clause else False
def __eq__(self, other):
if self._single == other.is_single and self._clause == other.clause:
return True
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def add(self, fluent, operator: Operator):
if len(self._clause) == 0:
self._single = True
else:
self._single = False
self._clause.append(operator)
self._clause.append(fluent)
return self
@property
def clause(self):
return self._clause
@property
def is_single(self):
return self._single
@property
def empty(self):
return self._clause == []
class PlanningProblemEncoder(object):
def __init__(self, dom_file: str, problem_file: str, length=1):
self._problem = PlanningProblem(dom_file, problem_file)
self._length = length
self._propositional_formulas = self._encode()
def _encode(self):
actions = self._problem.actions
fluents = self._problem.fluents
# 1. encode initial state
init_state = list(self._problem.initial_state)
init_state_clauses = []
for fluent in list(fluents):
if fluent not in init_state:
fluent = ('not',) + fluent
fluent = fluent + ('0',)
init_state_clauses.append(Clause(fluent))
# 2. encode goal state
goal_state = list(self._problem.goal_state)
goal_state_clauses = []
for goal in goal_state:
goal_state_clauses.append(Clause(goal + (str(self._length),)))
enc_actions_clauses = []
explanatory_frame_axioms = []
complete_exclusion_axiom = []
for step in range(self._length):
# 3. encode actions
for act in actions:
if act.effect_pos.issubset(act.precondition_pos):
continue
action_tuple = ('not', act, str(step))
# preconditions
for p in act.precondition_pos:
if 'adjacent' in p:
continue
action_clause = Clause(action_tuple)
p = p + (str(step),)
action_clause.add(p, Operator.OR)
enc_actions_clauses.append(action_clause)
# positive effects
for e in act.effect_pos:
e = e + (str(step + 1),)
action_clause = Clause(action_tuple)
action_clause.add(e, Operator.OR)
enc_actions_clauses.append(action_clause)
# negative effects
for e in act.effect_neg:
e = ('not',) + e + (str(step + 1),)
action_clause = Clause(action_tuple)
action_clause.add(e, Operator.OR)
enc_actions_clauses.append(action_clause)
# 4. explanatory frame axioms
for fluent in fluents:
act_with_pos_effect = []
act_with_neg_effect = []
for act in actions:
if act.effect_pos.issubset(act.precondition_pos):
continue
if fluent in act.effect_pos:
act_with_pos_effect.append(act)
elif fluent in act.effect_neg:
act_with_neg_effect.append(act)
if act_with_pos_effect:
a_pos = fluent + (str(step),)
b_pos = ('not',) + fluent + (str(step + 1),)
clause_pos = Clause(a_pos)
clause_pos.add(b_pos, Operator.OR)
for act in act_with_pos_effect:
c_pos = (act, str(step))
clause_pos.add(c_pos, Operator.OR)
explanatory_frame_axioms.append(clause_pos)
if act_with_neg_effect:
a_neg = ('not',) + fluent + (str(step),)
b_neg = fluent + (str(step + 1),)
clause_neg = Clause(a_neg)
clause_neg.add(b_neg, Operator.OR)
for act in act_with_neg_effect:
c_neg = (act, str(step))
clause_neg.add(c_neg, Operator.OR)
explanatory_frame_axioms.append(clause_neg)
# 5. complete exclusion axiom
for action_pair in combinations(actions, 2):
if action_pair[0].effect_pos.issubset(
action_pair[0].precondition_pos):
continue
if action_pair[1].effect_pos.issubset(
action_pair[1].precondition_pos):
continue
action0_tuple = ('not', action_pair[0], str(step))
action1_tuple = ('not', action_pair[1], str(step))
action_pair_clause = Clause(action0_tuple)
action_pair_clause.add(action1_tuple, Operator.OR)
complete_exclusion_axiom.append(action_pair_clause)
proposition_formulas = init_state_clauses + goal_state_clauses + \
enc_actions_clauses + explanatory_frame_axioms + \
complete_exclusion_axiom
return proposition_formulas
@property
def propositional_formulas(self):
return self._propositional_formulas
| [
[
[
761,
776
],
[
2248,
2263
]
],
[
[
794,
798
],
[
851,
855
]
],
[
[
821,
833
],
[
5911,
5923
]
],
[
[
842,
850
],
[
1688,
1696
],
[
3690,
3698
],
[
3984,
3992
],
[
4289,
4297
],
[
5119,
5127
],
[
5279,
5287
],
[
5600,
5608
],
[
5760,
5768
],
[
6468,
6476
]
],
[
[
907,
913
],
[
2794,
2800
],
[
2996,
3002
],
[
3587,
3593
],
[
3922,
3928
],
[
4227,
4233
],
[
5063,
5069
],
[
5544,
5550
],
[
6392,
6398
]
],
[
[
2123,
2145
]
]
] |
from src.main.config import config
import requests
import json
def validate_email(email):
try:
api_response = requests.post(
config.EMAIL_VERIFICATION_URL.format(config.NEVERBOUNCE_API_KEY, email)
).content
api_response = json.loads(api_response)
if api_response['result'] == 'invalid':
raise Exception('Invalid email')
except Exception:
raise Exception('Error(s) happened when validating email')
| [
[
[
28,
34
],
[
151,
157
],
[
188,
194
]
],
[
[
42,
50
],
[
124,
132
]
],
[
[
58,
62
],
[
264,
268
]
],
[
[
69,
83
]
]
] |
#!/usr/bin/env python
"""
This is the unittest for gridcellarea module.
python -m unittest -v tests/test_gridcellarea.py
python -m pytest --cov=pyjams --cov-report term-missing -v tests/test_gridcellarea.py
"""
import unittest
def _flatten(itr):
import numpy as np
fitr = np.array(itr).flatten()
if len(fitr) == 0:
return list(fitr)
else:
if isinstance(fitr[0], str):
return [ i for i in fitr ]
else:
return [ i if np.isfinite(i) else np.finfo(float).max
for i in fitr ]
class TestGridcellarea(unittest.TestCase):
"""
Tests for gridcellarea.py
"""
def test_gridcellarea(self):
import numpy as np
from pyjams import gridcellarea
lat = [0., 2.5, 5.0]
lon = [0., 3.75, 7.5]
rearth = 6371009.
fsoll = [[1.15906555e+11, 1.15906555e+11, 1.15906555e+11],
[1.15796237e+11, 1.15796237e+11, 1.15796237e+11],
[1.15465495e+11, 1.15465495e+11, 1.15465495e+11]]
rearth1 = 6371000.
fsoll1 = [[1.15906227e+11, 1.15906227e+11, 1.15906227e+11],
[1.15795910e+11, 1.15795910e+11, 1.15795910e+11],
[1.15465169e+11, 1.15465169e+11, 1.15465169e+11]]
# descending latitudes
dlat = [0., -2.5, -5.0]
# meridian within longitudes
lon360 = [360., 3.75, 7.5]
# dateline within longitudes
lon180 = [180., -180.+3.75, -180.+7.5]
# list
fout = gridcellarea(lat, lon)
assert isinstance(fout, np.ndarray)
self.assertEqual(_flatten(np.around(fout, -3)), _flatten(fsoll))
# tuple, list
fout = gridcellarea(tuple(lat), lon)
assert isinstance(fout, np.ndarray)
self.assertEqual(_flatten(np.around(fout, -3)), _flatten(fsoll))
# 2 tuple
fout = gridcellarea(tuple(lat), tuple(lon))
assert isinstance(fout, np.ndarray)
self.assertEqual(_flatten(np.around(fout, -3)), _flatten(fsoll))
# array, list
fout = gridcellarea(np.array(lat), lon)
assert isinstance(fout, np.ndarray)
self.assertEqual(_flatten(np.around(fout, -3)), _flatten(fsoll))
# 2 array
fout = gridcellarea(np.array(lat), np.array(lon))
assert isinstance(fout, np.ndarray)
self.assertEqual(_flatten(np.around(fout, -3)), _flatten(fsoll))
# rearth
fout = gridcellarea(lat, lon, rearth=rearth)
assert isinstance(fout, np.ndarray)
self.assertEqual(_flatten(np.around(fout, -3)), _flatten(fsoll))
# rearth classic
fout = gridcellarea(lat, lon, rearth=rearth1)
assert isinstance(fout, np.ndarray)
self.assertEqual(_flatten(np.around(fout, -3)), _flatten(fsoll1))
# globe
fout = gridcellarea(lat, lon, globe=True)
fsoll2 = [[3.79774834e+12, 3.79774834e+12, 3.79774834e+12],
[1.15796240e+11, 1.15796240e+11, 1.15796240e+11],
[3.61823239e+12, 3.61823239e+12, 3.61823239e+12]]
assert isinstance(fout, np.ndarray)
self.assertEqual(_flatten(np.around(fout, -4)), _flatten(fsoll2))
# descending lats
fout = gridcellarea(dlat, lon, globe=True)
assert isinstance(fout, np.ndarray)
self.assertEqual(_flatten(np.around(fout, -4)), _flatten(fsoll2))
# meridian in lon
fout = gridcellarea(lat, lon360)
assert isinstance(fout, np.ndarray)
self.assertEqual(_flatten(np.around(fout, -3)), _flatten(fsoll))
# date line in lon
fout = gridcellarea(lat, lon180)
assert isinstance(fout, np.ndarray)
self.assertEqual(_flatten(np.around(fout, -3)), _flatten(fsoll))
# errors
# lat > 90
lat1 = [0., 2.5, 95.0]
self.assertRaises(AssertionError, gridcellarea, lat1, lon)
if __name__ == "__main__":
unittest.main()
| [
[
[
220,
228
],
[
585,
593
],
[
3940,
3948
]
],
[
[
235,
243
],
[
1617,
1625
],
[
1648,
1656
],
[
1803,
1811
],
[
1834,
1842
],
[
1992,
2000
],
[
2023,
2031
],
[
2181,
2189
],
[
2212,
2220
],
[
2376,
2384
],
[
2407,
2415
],
[
2565,
2573
],
[
2596,
2604
],
[
2763,
2771
],
[
2794,
2802
],
[
3153,
3161
],
[
3184,
3192
],
[
3350,
3358
],
[
3381,
3389
],
[
3537,
3545
],
[
3568,
3576
],
[
3724,
3732
],
[
3755,
3763
]
],
[
[
568,
584
]
]
] |
from .agent import A2CAgent
| [
[
[
19,
27
]
]
] |
# This file is to get a rough estimation of how much you need to pay or how many months you need to pay for a loan
import pandas as pd
import numpy as np
from IPython.display import display
def group(number):
"""show money in laks and crores (indian way of presenting money)"""
s = '%d' % number
groups = []
groups.append(s[-3:])
s = s[:-3]
while s and s[-1].isdigit():
groups.append(s[-2:])
s = s[:-2]
return s + ','.join(reversed(groups))
class loan:
def __init__(self, R=8.1, principal=30, years=5):
"""R is yearly interest
principal is principal amount in lakhs
years = number of years
"""
self.R = R * 0.01
self.r = R * 0.01 * (1 / 12)
self.principal = principal * 100000
self.years = years
self.num_months = self.years * 12
self.months = {"Jan": 31, "Feb": 28, "Mar": 31, "Apr": 30, "May": 31, "June": 30, "Jul": 31, "Aug": 31,
"Sep": 30, "Oct": 31, "Nov": 30, "Dec": 31}
def find_monthly_emi_flat(self, print_=True):
""" find how much emi need to be paid given some principal, interest, and number of months when the interest scheme is flat"""
total = self.principal * (1 + self.R * (self.num_months / 12))
if print_:
print("------------- flat interest -------------------")
print("total amount you are paying over full period:", total)
print("monthly installment/emi : {}".format(total / self.num_months))
return total, total / self.num_months
def num_months_emi_diminishing(self, emi, principal=0, interest=0, print_=True):
"""find the number of months you need to pay for, if you are paying emi every month"""
"""emi is in rupees, principal is in lakhs, interest is yearly interest"""
"""n = np.log((E/r)/(E/r -P))/np.log(1+r) """
if not principal:
principal = self.principal
if not interest:
interest = self.r
num_months = np.log((emi / interest) / (emi / interest - principal)) / np.log(1 + interest)
if print_:
print("------------- diminishing interest -------------------")
print("you need to pay {} monthly, for {} months".format(emi, num_months))
return num_months
def find_monthly_emi_diminishing(self, num_months=0, principal=0, print_=True):
""" find how much emi need to be paid given some principal, interest, and number of months when the interest scheme is flat"""
"""P*r*(1 + 1/(np.power(1+r,60)-1))"""
if not num_months:
num_months = self.num_months
if not principal:
principal = self.principal
else:
principal *= 100000
monthly_emi = principal * self.r * (1 + 1 / (np.power(1 + self.r, num_months) - 1))
if print_:
print("------------- diminishing interest -------------------")
print(" you need to pay {} monthly, for {} months".format(monthly_emi, num_months))
print("total amount you will pay over full period is roughly {}".format(monthly_emi * num_months))
return monthly_emi
def confirm_diminishing(self, emi, print_=False):
""" function to confirm if the interest scheme is dimishing"""
principal = self.principal
i = 1
while principal > 0:
principal += ((self.r) * principal - emi)
if print_:
print(i, principal)
i += 1
if abs(principal / self.principal) < 0.001:
print("final net amount is {} after {} months".format(principal, i - 1))
return principal, i
## Usage
R = 10.5 #10.5 % monthly interest rate
principal = 30 # principal is 30 lakhs
years = 4.5 # loan term period is 4.5 years
loan1 = loan(R,principal,years) # initialize a loan instance
loan1.find_monthly_emi_flat()
loan1.num_months_emi_diminishing(35000)
loan1.find_monthly_emi_diminishing()
#-----------output-----------------------
# ------------- flat interest -------------------
# total amount you are paying over full period: 4417500.0
# monthly installment/emi : 81805.55555555556
# ------------- diminishing interest -------------------
# you need to pay 35000 monthly, for 159.1257820098328 months
# ------------- diminishing interest -------------------
# you need to pay 69948.58010333449 monthly, for 54.0 months
# total amount you will pay over full period is roughly 3777223.3255800623
def get_df():
# make a table to find how much emi to be paid for different principals over different tenure/periods
loan1 = loan(10.5,principal = 30, years =5)
# print(loan1.find_monthly_emi_diminishing())
years = [2,3,4,5]
amounts = [15,20,25]
yearss = [str(x)+'y' for x in years]
df = pd.DataFrame(columns=yearss)
total = pd.DataFrame(columns = yearss)
for amount in amounts:
arr=[]
arr1 = []
for year in years:
temp = loan1.find_monthly_emi_diminishing(num_months=year*12, principal=amount,print_ = False)
arr.append(group(round(int(temp),-2))) # rounding to closest hundred
arr1.append(group(round(int(temp*year*12),-2)))
df.loc[str(amount)+'Lks']=arr
total.loc[str(amount)+'Lks']=arr1
print("--------------------- emi ------------------")
display(df)
print("---------------------- total ---------------------")
display(total)
# get_df() | [
[
[
123,
135
],
[
4827,
4829
],
[
4868,
4870
]
],
[
[
143,
154
],
[
2041,
2043
],
[
2099,
2101
],
[
2828,
2830
]
],
[
[
183,
190
],
[
5377,
5384
],
[
5458,
5465
]
],
[
[
196,
201
],
[
5116,
5121
],
[
5198,
5203
]
],
[
[
495,
499
],
[
3838,
3842
],
[
4643,
4647
]
],
[
[
3708,
3709
],
[
3843,
3844
]
],
[
[
3747,
3756
],
[
3845,
3854
]
],
[
[
3786,
3791
],
[
3855,
3860
]
],
[
[
3830,
3835
],
[
3892,
3897
],
[
3922,
3927
],
[
3962,
3967
]
],
[
[
4514,
4520
]
]
] |
import numpy as np
import matplotlib.pyplot as plt
from tqdm import trange
import seaborn as sns
import random
# ========================== CFG =======================
class CFG:
HIT = 1
STOP = 0
actions = [STOP, HIT]
WIN = 1
DRAW = 0
LOSE = -1
# ======================== function ======================
def random_card():
card = np.random.randint(13) + 1
card = min(card, 10)
return card
def value_card(card):
if (card == 1):
return 11
else:
return card
def random_play(policy_player, policy_dealer, init_state = None, debug = False):
player_ace = 0
player_ace_1 = 0
dealer_ace = 0
dealer_ace_1 = 0
player_sum = 0
dealer_sum = 0
dealer_show = 0
his = []
if (init_state):
(player_ace, dealer_show, player_sum, action) = init_state
if (debug):
print(f'player init {player_sum} dealer show {dealer_show} action {action}')
if (dealer_show == 1):
dealer_ace += 1
dealer_sum += value_card(dealer_show)
card = random_card()
if (card == 1):
dealer_ace += 1
dealer_sum += value_card(card)
if (dealer_sum > 21):
dealer_sum -= 10
dealer_ace_1 += 1
his.append((player_ace > player_ace_1, player_sum, dealer_show, action))
if (action == CFG.HIT):
card = random_card()
if (debug):
print(f'player {player_sum} {card}')
if (card == 1):
player_ace += 1
player_sum += value_card(card)
if (player_sum > 21 and player_ace > player_ace_1):
player_sum -= 10
player_ace_1 += 1
else:
while(player_sum <12):
card = random_card()
if (card == 1):
player_ace += 1
player_sum += value_card(card)
if (player_sum > 21):
player_sum -= 10
player_ace_1 += 1
if (True):
card = random_card()
dealer_show = card
if (card == 1):
dealer_ace += 1
dealer_sum += value_card(card)
card = random_card()
if (card == 1):
dealer_ace += 1
dealer_sum += value_card(card)
if (dealer_sum > 21):
dealer_sum -= 10
dealer_ace_1 += 1
while(True):
if (player_sum > 21):
if (debug):
print(f'quát {player_sum}')
return his, -1
action = policy_player[int(player_ace > player_ace_1), player_sum, dealer_show]
his.append((player_ace > player_ace_1, player_sum, dealer_show, action))
if (action == CFG.STOP):
break
card = random_card()
if (debug):
print(f'player {player_sum} {card}')
if (card == 1):
player_ace += 1
player_sum += value_card(card)
if (player_sum > 21 and player_ace > player_ace_1):
player_sum -= 10
player_ace_1 += 1
while(True):
if (dealer_sum == 21):
if(debug):
print(f'player {player_sum} dealer {dealer_sum}')
if (player_sum == 21):
return his, 0
else:
return his, -1
if (dealer_sum > 21):
return his, 1
action = policy_dealer[dealer_sum]
if (action == CFG.STOP):
break
card = random_card()
if(debug):
print(f'dealer {dealer_sum} {card}')
if (card == 1):
dealer_ace += 1
dealer_sum += value_card(card)
if(dealer_sum > 21 and dealer_ace > dealer_ace_1):
dealer_sum -= 10
dealer_ace_1 += 1
if(debug):
print(f'player sum {player_sum} dealer sum {dealer_sum}')
if (player_sum < dealer_sum):
return his, -1
if (player_sum == dealer_sum):
return his, 0
if (player_sum > dealer_sum):
return his, 1
def MonteCarloPrediction(Num_iter, debug = False):
# ========================== init =======================
policy_dealer = np.zeros((22))
policy_dealer[:17] = CFG.HIT
policy_dealer[17:] = CFG.STOP
policy_player = np.zeros((2, 22, 11), dtype = int)
for i in range(2):
for j in range(22):
for k in range(11):
policy_player[i,j,k] = random.choice(CFG.actions)
value_action = np.zeros((2, 10, 10, 2))
cnt = np.ones((2, 10, 10, 2))
for iter in trange(Num_iter):
if (debug):
print(f'---------------- {iter} -------------------------')
check = set()
init_usable = random.choice(range(2))
init_show = random_card()
init_player_sum = random.choice(range(12,22))
init_action = random.choice(CFG.actions)
his, reward = random_play(policy_player, policy_dealer,
(init_usable, init_show, init_player_sum, init_action), debug)
if (debug):
print(his, reward)
for (usable, player_sum, dealer_show, action) in his:
if ((usable, player_sum, dealer_show, action) in check):
continue
check.add((usable, player_sum, dealer_show, action))
value_action[int(usable), player_sum - 12, dealer_show - 1, action] += reward
cnt[int(usable), player_sum - 12, dealer_show - 1, action] += 1
Q = np.zeros((2))
Q[0] = value_action[int(usable), player_sum - 12, dealer_show - 1, 0]/cnt[int(usable), player_sum - 12, dealer_show - 1, 0]
Q[1] = value_action[int(usable), player_sum - 12, dealer_show - 1, 1]/cnt[int(usable), player_sum - 12, dealer_show - 1, 1]
policy_player[int(usable), player_sum, dealer_show] = np.argmax(Q)
arr = value_action/cnt
return policy_player[0, 12:,1:], policy_player[1, 12:,1:], arr
# ======================== main ==========================
NoUsable500k, Usable500k, arr = MonteCarloPrediction(10000000)
value = np.zeros((2,10,10))
for i in range(2):
for j in range(10):
for k in range(10):
value[i,j,k] = np.max(arr[i,j,k,:])
ax = sns.heatmap(value[0,...], cmap="YlGnBu", xticklabels=range(1, 11)
,yticklabels=list(range(12, 22)))
plt.savefig('figure_5_5_value_NoUsable.png')
plt.close()
ax = sns.heatmap(value[1,...], cmap="YlGnBu", xticklabels=range(1, 11)
,yticklabels=list(range(12, 22)))
plt.savefig('figure_5_5_value_Usable.png')
plt.close()
ax = sns.heatmap(NoUsable500k, cmap="YlGnBu", xticklabels=range(1, 11)
,yticklabels=list(range(12, 22)))
plt.savefig('figure_5_5_policy_NoUsable.png')
plt.close()
ax = sns.heatmap(Usable500k, cmap="YlGnBu", xticklabels=range(1, 11)
,yticklabels=list(range(12, 22)))
plt.savefig('figure_5_5_policy_Usable.png')
plt.close() | [
[
[
7,
18
],
[
6118,
6120
],
[
6237,
6239
],
[
364,
366
],
[
4231,
4233
],
[
4334,
4336
],
[
4544,
4546
],
[
4579,
4581
],
[
5526,
5528
],
[
5878,
5880
]
],
[
[
27,
51
],
[
6369,
6372
],
[
6414,
6417
],
[
6536,
6539
],
[
6579,
6582
],
[
6701,
6704
],
[
6747,
6750
],
[
6868,
6871
],
[
6912,
6915
]
],
[
[
69,
75
],
[
4620,
4626
]
],
[
[
83,
97
],
[
6265,
6268
],
[
6432,
6435
],
[
6597,
6600
],
[
6766,
6769
]
],
[
[
105,
111
],
[
4491,
4497
],
[
4774,
4780
],
[
4858,
4864
],
[
4908,
4914
]
],
[
[
177,
180
],
[
1372,
1375
],
[
2781,
2784
],
[
3495,
3498
],
[
4271,
4274
],
[
4304,
4307
],
[
4505,
4508
],
[
4922,
4925
]
],
[
[
338,
349
],
[
1074,
1085
],
[
1401,
1412
],
[
1796,
1807
],
[
2061,
2072
],
[
2229,
2240
],
[
2825,
2836
],
[
3539,
3550
],
[
4818,
4829
]
],
[
[
436,
446
],
[
1034,
1044
],
[
1162,
1172
],
[
1578,
1588
],
[
1896,
1906
],
[
2192,
2202
],
[
2329,
2339
],
[
2982,
2992
],
[
3695,
3705
]
],
[
[
527,
538
],
[
4958,
4969
]
],
[
[
4100,
4120
],
[
6078,
6098
]
],
[
[
6046,
6058
],
[
6609,
6621
]
],
[
[
6060,
6070
],
[
6778,
6788
]
],
[
[
6072,
6075
],
[
6244,
6247
]
],
[
[
6110,
6115
],
[
6222,
6227
],
[
6277,
6282
],
[
6444,
6449
]
],
[
[
6143,
6144
],
[
6248,
6249
],
[
6228,
6229
]
],
[
[
6166,
6167
],
[
6250,
6251
],
[
6230,
6231
]
],
[
[
6194,
6195
],
[
6252,
6253
],
[
6232,
6233
]
],
[
[
6260,
6262
]
],
[
[
6427,
6429
]
],
[
[
6592,
6594
]
],
[
[
6761,
6763
]
]
] |
# Copyright (c) 2021 ICHIRO ITS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# flake8: noqa
import yakusha.data_types
# flake8: noqa
from yakusha.json_to_msg import dict_to_msg, json_to_msg
# flake8: noqa
from yakusha.msg_to_json import msg_to_dict, msg_to_json
| [
[
[
1112,
1130
]
],
[
[
1179,
1190
]
],
[
[
1192,
1203
]
],
[
[
1252,
1263
]
],
[
[
1265,
1276
]
]
] |
# Import the necessary modules
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
# Instantiate a Multinomial Naive Bayes classifier: nb_classifier
nb_classifier = MultinomialNB()
# Fit the classifier to the training data
nb_classifier.fit(count_train, y_train)
# Create the predicted tags: pred
pred = nb_classifier.predict(count_test)
# Calculate the accuracy score: score
score = metrics.accuracy_score(y_test, pred)
print(score)
# Calculate the confusion matrix: cm
cm = metrics.confusion_matrix(y_test, pred, labels=['FAKE', 'REAL'])
print(cm) | [
[
[
63,
76
],
[
188,
201
]
],
[
[
97,
104
],
[
410,
417
],
[
503,
510
]
],
[
[
172,
185
],
[
247,
260
],
[
329,
342
]
],
[
[
322,
326
],
[
441,
445
],
[
536,
540
]
],
[
[
402,
407
],
[
453,
458
]
],
[
[
498,
500
],
[
573,
575
]
]
] |
import torch
from torch.nn.modules.pooling import MaxPool2d
from .activation import ReLU6, Hardswish, ELU, LeakyReLU, Sigmoid
from .batchnorm import BatchNorm2d, BatchNorm3d
from .normalization import LayerNorm, GroupNorm, InstanceNorm1d, \
InstanceNorm2d, InstanceNorm3d
from .conv import _ConvNd, Conv1d, Conv2d, Conv3d
from .conv import ConvTranspose1d, ConvTranspose2d, ConvTranspose3d
from .linear import Linear
from .embedding_ops import Embedding, EmbeddingBag
from .functional_modules import FloatFunctional, FXFloatFunctional, QFunctional
class Quantize(torch.nn.Module):
r"""Quantizes an incoming tensor
Args:
`scale`: scale of the output Quantized Tensor
`zero_point`: zero_point of output Quantized Tensor
`dtype`: data type of output Quantized Tensor
Attributes:
`scale`, `zero_point`, `dtype`
Examples::
>>> t = torch.tensor([[1., -1.], [1., -1.]])
>>> scale, zero_point, dtype = 1.0, 2, torch.qint8
>>> qm = Quantize(scale, zero_point, dtype)
>>> qt = qm(t)
>>> print(qt)
tensor([[ 1., -1.],
[ 1., -1.]], size=(2, 2), dtype=torch.qint8, scale=1.0, zero_point=2)
"""
scale: torch.Tensor
zero_point: torch.Tensor
def __init__(self, scale, zero_point, dtype):
super(Quantize, self).__init__()
self.register_buffer('scale', torch.tensor([scale]))
self.register_buffer('zero_point', torch.tensor([zero_point], dtype=torch.long))
self.dtype = dtype
def forward(self, X):
return torch.quantize_per_tensor(X, float(self.scale),
int(self.zero_point), self.dtype)
@staticmethod
def from_float(mod):
assert hasattr(mod, 'activation_post_process')
scale, zero_point = mod.activation_post_process.calculate_qparams()
return Quantize(scale.float().item(), zero_point.long().item(), mod.activation_post_process.dtype)
def extra_repr(self):
return 'scale={}, zero_point={}, dtype={}'.format(self.scale, self.zero_point, self.dtype)
class DeQuantize(torch.nn.Module):
r"""Dequantizes an incoming tensor
Examples::
>>> input = torch.tensor([[1., -1.], [1., -1.]])
>>> scale, zero_point, dtype = 1.0, 2, torch.qint8
>>> qm = Quantize(scale, zero_point, dtype)
>>> quantized_input = qm(input)
>>> dqm = DeQuantize()
>>> dequantized = dqm(quantized_input)
>>> print(dequantized)
tensor([[ 1., -1.],
[ 1., -1.]], dtype=torch.float32)
"""
def __init__(self):
super(DeQuantize, self).__init__()
def forward(self, Xq):
return Xq.dequantize()
@staticmethod
def from_float(mod):
return DeQuantize()
__all__ = [
'BatchNorm2d',
'BatchNorm3d',
'_ConvNd',
'Conv1d',
'Conv2d',
'Conv3d',
'ConvTranspose1d',
'ConvTranspose2d',
'ConvTranspose3d',
'DeQuantize',
'ELU',
'Embedding',
'EmbeddingBag',
'GroupNorm',
'Hardswish',
'InstanceNorm1d',
'InstanceNorm2d',
'InstanceNorm3d',
'LayerNorm',
'LeakyReLU',
'Linear',
'MaxPool2d',
'Quantize',
'ReLU6',
'Sigmoid',
# Wrapper modules
'FloatFunctional',
'FXFloatFunctional',
'QFunctional',
]
| [
[
[
7,
12
],
[
571,
576
],
[
1209,
1214
],
[
1238,
1243
],
[
2112,
2117
],
[
1381,
1386
],
[
1447,
1452
],
[
1480,
1485
],
[
1562,
1567
]
],
[
[
50,
59
]
],
[
[
85,
90
]
],
[
[
92,
101
]
],
[
[
103,
106
]
],
[
[
108,
117
]
],
[
[
119,
126
]
],
[
[
150,
161
]
],
[
[
163,
174
]
],
[
[
202,
211
]
],
[
[
213,
222
]
],
[
[
224,
238
]
],
[
[
246,
260
]
],
[
[
262,
276
]
],
[
[
295,
302
]
],
[
[
304,
310
]
],
[
[
312,
318
]
],
[
[
320,
326
]
],
[
[
345,
360
]
],
[
[
362,
377
]
],
[
[
379,
394
]
],
[
[
415,
421
]
],
[
[
449,
458
]
],
[
[
460,
472
]
],
[
[
506,
521
]
],
[
[
523,
540
]
],
[
[
542,
553
]
],
[
[
562,
570
],
[
1316,
1324
],
[
1875,
1883
]
],
[
[
2101,
2111
],
[
2627,
2637
],
[
2774,
2784
]
],
[
[
2788,
2795
]
]
] |
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the Instance."""
from tests.common.gcp_type.test_data import fake_instance
from tests.unittest_utils import ForsetiTestCase
from google.cloud.forseti.common.gcp_type import instance
class InstanceTest(ForsetiTestCase):
"""Test Instance class."""
def test_network_interface_creation(self):
"""Test that network_interface creation is correct."""
network_interfaces = (instance.Instance(
'name-0', **fake_instance.FAKE_INSTANCE_RESPONSE_1)
.create_network_interfaces())
self.assertEqual(len(network_interfaces), 1)
network_interface = network_interfaces[0]
self.assertEqual('compute#networkInterface', network_interface.kind)
self.assertEqual('nic0', network_interface.name)
self.assertEqual('https://www.googleapis.com/compute/v1/projects/'
'project-1/global/networks/network-1',
network_interface.network)
self.assertEqual('000.000.000.000', network_interface.network_ip)
self.assertEqual('https://www.googleapis.com/compute/v1/projects'
'/project-1/regions/datacenter'
'/subnetworks/subnetwork-1',
network_interface.subnetwork)
self.assertEqual([{u'kind': u'compute#accessConfig',
u'type': u'ONE_TO_ONE_NAT', u'name': u'External NAT',
u'natIP': u'000.000.000.001'}],
network_interface.access_configs)
def test_recognize_two_network_interfaces(self):
"""Test that it recognizes two network_interfaces."""
network_interfaces = (instance.Instance(
'name-1', **fake_instance.FAKE_INSTANCE_RESPONSE_2)
.create_network_interfaces())
self.assertEqual(len(network_interfaces), 2)
def test_legacy_networks(self):
""" Test legacy networks without a subnet works."""
network_interfaces = (instance.Instance(
'name-0', **fake_instance.FAKE_INSTANCE_RESPONSE_LEGACY)
.create_network_interfaces())
self.assertEqual(len(network_interfaces), 1)
network_interface = network_interfaces[0]
self.assertEqual('compute#networkInterface', network_interface.kind)
self.assertEqual('nic0', network_interface.name)
self.assertEqual('https://www.googleapis.com/compute/v1/projects/'
'project-1/global/networks/network-1',
network_interface.network)
self.assertEqual('000.000.000.000', network_interface.network_ip)
self.assertEqual([{u'kind': u'compute#accessConfig',
u'type': u'ONE_TO_ONE_NAT', u'name': u'External NAT',
u'natIP': u'000.000.000.001'}],
network_interface.access_configs)
if __name__ == '__main__':
unittest.main()
| [
[
[
684,
697
],
[
1058,
1071
],
[
2349,
2362
],
[
2672,
2685
]
],
[
[
731,
746
],
[
826,
841
]
],
[
[
796,
804
],
[
1016,
1024
],
[
2307,
2315
],
[
2629,
2637
]
],
[
[
813,
825
]
]
] |
'''Tests for bdpy.preprocessor'''
from unittest import TestCase, TestLoader, TextTestRunner
import numpy as np
from scipy.signal import detrend
from bdpy import preproc
class TestPreprocessor(TestCase):
'''Tests of 'preprocessor' module'''
@classmethod
def test_average_sample(cls):
'''Test for average_sample'''
x = np.random.rand(10, 100)
group = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2, 2])
exp_output_x = np.vstack((np.average(x[0:5, :], axis=0),
np.average(x[5:10, :], axis=0)))
exp_output_ind = np.array([0, 5])
test_output_x, test_output_ind = preproc.average_sample(x, group,
verbose=True)
np.testing.assert_array_equal(test_output_x, exp_output_x)
np.testing.assert_array_equal(test_output_ind, exp_output_ind)
@classmethod
def test_detrend_sample_default(cls):
'''Test for detrend_sample (default)'''
x = np.random.rand(20, 10)
group = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
exp_output = np.vstack((detrend(x[0:10, :], axis=0, type='linear')
+ np.mean(x[0:10, :], axis=0),
detrend(x[10:20, :], axis=0, type='linear')
+ np.mean(x[10:20, :], axis=0)))
test_output = preproc.detrend_sample(x, group, verbose=True)
np.testing.assert_array_equal(test_output, exp_output)
@classmethod
def test_detrend_sample_nokeepmean(cls):
'''Test for detrend_sample (keep_mean=False)'''
x = np.random.rand(20, 10)
group = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
exp_output = np.vstack((detrend(x[0:10, :], axis=0, type='linear'),
detrend(x[10:20, :], axis=0, type='linear')))
test_output = preproc.detrend_sample(x, group, keep_mean=False,
verbose=True)
np.testing.assert_array_equal(test_output, exp_output)
@classmethod
def test_normalize_sample(cls):
'''Test for normalize_sample (default)'''
x = np.random.rand(20, 10)
group = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
mean_a = np.mean(x[0:10, :], axis=0)
mean_b = np.mean(x[10:20, :], axis=0)
exp_output = np.vstack((100 * (x[0:10, :] - mean_a) / mean_a,
100 * (x[10:20, :] - mean_b) / mean_b))
test_output = preproc.normalize_sample(x, group, verbose=True)
np.testing.assert_array_equal(test_output, exp_output)
@classmethod
def test_shift_sample_singlegroup(cls):
'''Test for shift_sample (single group, shift_size=1)'''
x = np.array([[1, 2, 3],
[11, 12, 13],
[21, 22, 23],
[31, 32, 33],
[41, 42, 43]])
grp = np.array([1, 1, 1, 1, 1])
exp_output_data = np.array([[11, 12, 13],
[21, 22, 23],
[31, 32, 33],
[41, 42, 43]])
exp_output_ind = [0, 1, 2, 3]
# Default shift_size = 1
test_output_data, test_output_ind = preproc.shift_sample(x, grp,
verbose=True)
np.testing.assert_array_equal(test_output_data, exp_output_data)
np.testing.assert_array_equal(test_output_ind, exp_output_ind)
@classmethod
def test_shift_sample_twogroup(cls):
'''Test for shift_sample (two groups, shift_size=1)'''
x = np.array([[1, 2, 3],
[11, 12, 13],
[21, 22, 23],
[31, 32, 33],
[41, 42, 43],
[51, 52, 53]])
grp = np.array([1, 1, 1, 2, 2, 2])
exp_output_data = np.array([[11, 12, 13],
[21, 22, 23],
[41, 42, 43],
[51, 52, 53]])
exp_output_ind = [0, 1, 3, 4]
# Default shift_size=1
test_output_data, test_output_ind = preproc.shift_sample(x, grp,
verbose=True)
np.testing.assert_array_equal(test_output_data, exp_output_data)
np.testing.assert_array_equal(test_output_ind, exp_output_ind)
@classmethod
def test_select_top_default(cls):
'''Test for select_top (default, axis=0)'''
test_data = np.array([[1, 2, 3, 4, 5],
[11, 12, 13, 14, 15],
[21, 22, 23, 24, 25],
[31, 32, 33, 34, 35],
[41, 42, 43, 44, 45]])
test_value = np.array([15, 3, 6, 20, 0])
test_num = 3
exp_output_data = np.array([[1, 2, 3, 4, 5],
[21, 22, 23, 24, 25],
[31, 32, 33, 34, 35]])
exp_output_index = np.array([0, 2, 3])
test_output_data, test_output_index = preproc.select_top(test_data,
test_value,
test_num)
np.testing.assert_array_equal(test_output_data, exp_output_data)
np.testing.assert_array_equal(test_output_index, exp_output_index)
@classmethod
def test_select_top_axisone(cls):
'''Test for select_top (axis=1)'''
test_data = np.array([[1, 2, 3, 4, 5],
[11, 12, 13, 14, 15],
[21, 22, 23, 24, 25],
[31, 32, 33, 34, 35],
[41, 42, 43, 44, 45]])
test_value = np.array([15, 3, 6, 20, 0])
test_num = 3
exp_output_data = np.array([[1, 3, 4],
[11, 13, 14],
[21, 23, 24],
[31, 33, 34],
[41, 43, 44]])
exp_output_index = np.array([0, 2, 3])
test_output_data, test_output_index = preproc.select_top(test_data,
test_value,
test_num,
axis=1)
np.testing.assert_array_equal(test_output_data, exp_output_data)
np.testing.assert_array_equal(test_output_index, exp_output_index)
if __name__ == '__main__':
test_suite = TestLoader().loadTestsFromTestCase(TestPreprocessor)
TextTestRunner(verbosity=2).run(test_suite)
| [
[
[
57,
65
],
[
198,
206
]
],
[
[
67,
77
],
[
6949,
6959
]
],
[
[
79,
93
],
[
7006,
7020
]
],
[
[
102,
113
],
[
353,
355
],
[
393,
395
],
[
458,
460
],
[
469,
471
],
[
534,
536
],
[
592,
594
],
[
771,
773
],
[
838,
840
],
[
1022,
1024
],
[
1061,
1063
],
[
1180,
1182
],
[
1268,
1270
],
[
1407,
1409
],
[
1517,
1519
],
[
1704,
1706
],
[
1743,
1745
],
[
1862,
1864
],
[
2136,
2138
],
[
2308,
2310
],
[
2347,
2349
],
[
2462,
2464
],
[
2507,
2509
],
[
2558,
2560
],
[
2760,
2762
],
[
2955,
2957
],
[
3137,
3139
],
[
3190,
3192
],
[
3598,
3600
],
[
3671,
3673
],
[
3869,
3871
],
[
4087,
4089
],
[
4143,
4145
],
[
4549,
4551
],
[
4622,
4624
],
[
4814,
4816
],
[
5075,
5077
],
[
5151,
5153
],
[
5326,
5328
],
[
5584,
5586
],
[
5657,
5659
],
[
5844,
5846
],
[
6105,
6107
],
[
6181,
6183
],
[
6432,
6434
],
[
6763,
6765
],
[
6836,
6838
]
],
[
[
139,
146
],
[
1191,
1198
],
[
1329,
1336
],
[
1873,
1880
],
[
1949,
1956
]
],
[
[
165,
172
],
[
651,
658
],
[
1461,
1468
],
[
2018,
2025
],
[
2702,
2709
],
[
3481,
3488
],
[
4432,
4439
],
[
5393,
5400
],
[
6499,
6506
]
],
[
[
181,
197
],
[
6984,
7000
]
],
[
[
6936,
6946
],
[
7038,
7048
]
]
] |
"""
This file offers the methods to automatically retrieve the graph Streptomyces flavidovirens.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 22:51:05.041684
The undirected graph Streptomyces flavidovirens has 6208 nodes and 745893
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.03871 and has 31 connected components, where the component
with most nodes has 6140 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 201, the mean node degree is 240.30,
and the node degree mode is 2. The top 5 most central nodes are 1123319.AUBE01000020_gene3023
(degree 2822), 1123319.AUBE01000003_gene803 (degree 1858), 1123319.AUBE01000022_gene2882
(degree 1842), 1123319.AUBE01000016_gene5937 (degree 1794) and 1123319.AUBE01000016_gene5980
(degree 1776).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import StreptomycesFlavidovirens
# Then load the graph
graph = StreptomycesFlavidovirens()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def StreptomycesFlavidovirens(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Streptomyces flavidovirens graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Streptomyces flavidovirens graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 22:51:05.041684
The undirected graph Streptomyces flavidovirens has 6208 nodes and 745893
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.03871 and has 31 connected components, where the component
with most nodes has 6140 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 201, the mean node degree is 240.30,
and the node degree mode is 2. The top 5 most central nodes are 1123319.AUBE01000020_gene3023
(degree 2822), 1123319.AUBE01000003_gene803 (degree 1858), 1123319.AUBE01000022_gene2882
(degree 1842), 1123319.AUBE01000016_gene5937 (degree 1794) and 1123319.AUBE01000016_gene5980
(degree 1776).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import StreptomycesFlavidovirens
# Then load the graph
graph = StreptomycesFlavidovirens()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="StreptomycesFlavidovirens",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| [
[
[
2872,
2876
],
[
3177,
3181
]
],
[
[
2918,
2945
],
[
6618,
6645
]
],
[
[
2977,
2991
],
[
3187,
3201
]
],
[
[
3030,
3055
]
]
] |
# -*- coding: utf-8 -*-
# --------------------------
# Copyright © 2014 - Qentinel Group.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---------------------------
import os
def is_root():
try:
# Windows doesn't have getuid. We just assume that user is not root. We
# most likely won't need proper Windows support here anyway.
uid = os.getuid() # pylint: disable=no-member
except AttributeError:
return False
# User id 0 is reserved for superuser aka root
if uid == 0:
return True
return False
def is_docker():
path = '/proc/self/cgroup'
return (
os.path.exists('/.dockerenv')
or os.path.isfile(path) and any('docker' in line for line in open(path)) # noqa: W503
)
| [
[
[
685,
687
],
[
877,
879
],
[
1142,
1144
],
[
1183,
1185
]
],
[
[
694,
701
]
],
[
[
1077,
1086
]
]
] |
# Generated by Django 2.2.10 on 2020-02-24 11:38
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('finance', '0002_auto_20200224_1125'),
]
operations = [
migrations.AlterUniqueTogether(
name='category',
unique_together={('name', 'user')},
),
]
| [
[
[
74,
82
],
[
219,
227
]
],
[
[
105,
115
],
[
134,
144
],
[
187,
197
],
[
328,
338
]
],
[
[
124,
133
]
]
] |
"""empty message
Revision ID: 156b555e16b7
Revises: fc1cedce5988
Create Date: 2020-05-04 10:39:56.803842
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '156b555e16b7'
down_revision = 'fc1cedce5988'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('posts', 'author',
existing_type=sa.VARCHAR(length=128),
nullable=False)
op.alter_column('posts', 'description',
existing_type=sa.VARCHAR(length=256),
nullable=False)
op.alter_column('posts', 'title',
existing_type=sa.VARCHAR(length=128),
nullable=False)
op.drop_index('ix_posts_timestamp', table_name='posts')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index('ix_posts_timestamp', 'posts', ['timestamp'], unique=False)
op.alter_column('posts', 'title',
existing_type=sa.VARCHAR(length=128),
nullable=True)
op.alter_column('posts', 'description',
existing_type=sa.VARCHAR(length=256),
nullable=True)
op.alter_column('posts', 'author',
existing_type=sa.VARCHAR(length=128),
nullable=True)
# ### end Alembic commands ###
| [
[
[
131,
133
],
[
384,
386
],
[
507,
509
],
[
635,
637
],
[
757,
759
],
[
937,
939
],
[
1017,
1019
],
[
1138,
1140
],
[
1265,
1267
]
],
[
[
141,
157
],
[
448,
450
],
[
576,
578
],
[
698,
700
],
[
1080,
1082
],
[
1207,
1209
],
[
1329,
1331
]
],
[
[
201,
209
]
],
[
[
227,
240
]
],
[
[
258,
271
]
],
[
[
279,
289
]
],
[
[
303,
310
]
],
[
[
854,
863
]
]
] |
import sys
def error():
quit(f'Error on Line {line_num}:\n{line}')
__author__ = 'Aarav Dave'
if len(sys.argv) > 1:
__file__ = sys.argv[1]
else:
__file__ = 'code.qps'
vars = {}
nest = []
with open(__file__) as file:
for line_num, line in enumerate(file.readlines()):
line = line.rstrip()
if (not line) or line.startswith('//'):
continue
line = line.lstrip()
current = ['']
in_string = 0
for char in line:
if char == '\'':
in_string = 1 - in_string
if char in '(). ' and not in_string:
current.append('')
continue
if char == ';':
break
current[-1] += char
while '' in current:
current.remove('')
main, *rest = current
if main == 'log':
if rest:
if len(rest) > 1:
if rest[0] in vars:
rest[0] = vars[rest[0]]
print(rest[0].strip('\''))
else:
error()
else:
print()
if main == 'var':
name, _, *rest = rest
else:
print(current)
| [
[
[
7,
10
],
[
108,
111
],
[
138,
141
]
],
[
[
17,
22
],
[
1101,
1106
]
],
[
[
75,
85
]
],
[
[
127,
135
],
[
214,
222
]
],
[
[
160,
168
],
[
214,
222
]
],
[
[
183,
187
],
[
958,
962
],
[
998,
1002
]
],
[
[
193,
197
]
],
[
[
227,
231
],
[
269,
273
]
],
[
[
241,
249
],
[
52,
60
]
],
[
[
251,
255
],
[
303,
307
]
],
[
[
296,
300
],
[
333,
337
],
[
342,
346
],
[
402,
406
],
[
65,
69
]
],
[
[
395,
399
],
[
481,
485
],
[
65,
69
]
],
[
[
424,
431
],
[
623,
630
],
[
730,
737
],
[
771,
778
],
[
792,
799
],
[
834,
841
],
[
1243,
1250
]
],
[
[
447,
456
],
[
548,
557
],
[
596,
605
]
],
[
[
473,
477
],
[
502,
506
],
[
573,
577
],
[
682,
686
],
[
745,
749
]
],
[
[
532,
541
],
[
596,
605
],
[
548,
557
]
],
[
[
820,
824
],
[
854,
858
],
[
1162,
1166
]
],
[
[
1189,
1193
]
],
[
[
1195,
1196
]
]
] |
from stanza.pipeline.core import Pipeline
from stanza.models.common.doc import Document
from stanza.utils.resources import download
from stanza._version import __version__, __resources_version__
import logging.config
logging.config.dictConfig(
{
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"standard": {
"format": "%(asctime)s %(levelname)s: %(message)s",
'datefmt': '%Y-%m-%d %H:%M:%S'
}
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "standard",
}
},
"loggers": {
"": {"handlers": ["console"]}
},
}
)
| [
[
[
33,
41
]
],
[
[
79,
87
]
],
[
[
123,
131
]
],
[
[
160,
171
]
],
[
[
173,
194
]
],
[
[
203,
217
],
[
218,
225
]
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.