hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
56684573a35ded67e1d51bead88cbf8a70a5d65c | 5,863 | py | Python | django/views/generic/list.py | vzuuum/django | b282fb06ca5d483f0166e690cbce8073940323d5 | [
"BSD-3-Clause"
] | 27 | 2015-02-11T16:31:43.000Z | 2021-12-18T04:24:19.000Z | django/views/generic/list.py | vzuuum/django | b282fb06ca5d483f0166e690cbce8073940323d5 | [
"BSD-3-Clause"
] | 1 | 2022-02-11T15:34:08.000Z | 2022-02-11T15:34:08.000Z | django/views/generic/list.py | jamespacileo/django | 9d3f86c72f5d22113b8cb5cd006abb9297f2fd4e | [
"BSD-3-Clause"
] | 14 | 2015-12-27T20:19:14.000Z | 2020-12-14T01:41:22.000Z | import re
from django.core.paginator import Paginator, InvalidPage
from django.core.exceptions import ImproperlyConfigured
from django.http import Http404
from django.utils.encoding import smart_str
from django.utils.translation import ugettext as _
from django.views.generic.base import TemplateResponseMixin, View
class MultipleObjectMixin(object):
allow_empty = True
queryset = None
model = None
paginate_by = None
context_object_name = None
paginator_class = Paginator
def get_queryset(self):
"""
Get the list of items for this view. This must be an interable, and may
be a queryset (in which qs-specific behavior will be enabled).
"""
if self.queryset is not None:
queryset = self.queryset
if hasattr(queryset, '_clone'):
queryset = queryset._clone()
elif self.model is not None:
queryset = self.model._default_manager.all()
else:
raise ImproperlyConfigured(u"'%s' must define 'queryset' or 'model'"
% self.__class__.__name__)
return queryset
def paginate_queryset(self, queryset, page_size):
"""
Paginate the queryset, if needed.
"""
paginator = self.get_paginator(queryset, page_size, allow_empty_first_page=self.get_allow_empty())
page = self.kwargs.get('page') or self.request.GET.get('page') or 1
try:
page_number = int(page)
except ValueError:
if page == 'last':
page_number = paginator.num_pages
else:
raise Http404(_(u"Page is not 'last', nor can it be converted to an int."))
try:
page = paginator.page(page_number)
return (paginator, page, page.object_list, page.has_other_pages())
except InvalidPage:
raise Http404(_(u'Invalid page (%(page_number)s)') % {
'page_number': page_number
})
def get_paginate_by(self, queryset):
"""
Get the number of items to paginate by, or ``None`` for no pagination.
"""
return self.paginate_by
def get_paginator(self, queryset, per_page, orphans=0, allow_empty_first_page=True):
"""
Return an instance of the paginator for this view.
"""
return self.paginator_class(queryset, per_page, orphans=orphans, allow_empty_first_page=allow_empty_first_page)
def get_allow_empty(self):
"""
Returns ``True`` if the view should display empty lists, and ``False``
if a 404 should be raised instead.
"""
return self.allow_empty
def get_context_object_name(self, object_list):
"""
Get the name of the item to be used in the context.
"""
if self.context_object_name:
return self.context_object_name
elif hasattr(object_list, 'model'):
return smart_str('%s_list' % object_list.model._meta.object_name.lower())
else:
return None
def get_context_data(self, **kwargs):
"""
Get the context for this view.
"""
queryset = kwargs.pop('object_list')
page_size = self.get_paginate_by(queryset)
if page_size:
paginator, page, queryset, is_paginated = self.paginate_queryset(queryset, page_size)
context = {
'paginator': paginator,
'page_obj': page,
'is_paginated': is_paginated,
'object_list': queryset
}
else:
context = {
'paginator': None,
'page_obj': None,
'is_paginated': False,
'object_list': queryset
}
context.update(kwargs)
context_object_name = self.get_context_object_name(queryset)
if context_object_name is not None:
context[context_object_name] = queryset
return context
class BaseListView(MultipleObjectMixin, View):
def get(self, request, *args, **kwargs):
self.object_list = self.get_queryset()
allow_empty = self.get_allow_empty()
if not allow_empty and len(self.object_list) == 0:
raise Http404(_(u"Empty list and '%(class_name)s.allow_empty' is False.")
% {'class_name': self.__class__.__name__})
context = self.get_context_data(object_list=self.object_list)
return self.render_to_response(context)
class MultipleObjectTemplateResponseMixin(TemplateResponseMixin):
template_name_suffix = '_list'
def get_template_names(self):
"""
Return a list of template names to be used for the request. Must return
a list. May not be called if get_template is overridden.
"""
try:
names = super(MultipleObjectTemplateResponseMixin, self).get_template_names()
except ImproperlyConfigured:
# If template_name isn't specified, it's not a problem --
# we just start with an empty list.
names = []
# If the list is a queryset, we'll invent a template name based on the
# app and model name. This name gets put at the end of the template
# name list so that user-supplied names override the automatically-
# generated ones.
if hasattr(self.object_list, 'model'):
opts = self.object_list.model._meta
names.append("%s/%s%s.html" % (opts.app_label, opts.object_name.lower(), self.template_name_suffix))
return names
class ListView(MultipleObjectTemplateResponseMixin, BaseListView):
"""
Render some list of objects, set by `self.model` or `self.queryset`.
`self.queryset` can actually be any iterable of items, not just a queryset.
"""
| 37.583333 | 119 | 0.618796 |
81439892e63ea2bed4312e5751486a4f8918f1b5 | 16,482 | py | Python | backend/server/data_anndata/anndata_adaptor.py | ihnorton/cellxgene | 28b526b3fc75a04c03dea29093e5de29b5bc6e01 | [
"MIT"
] | null | null | null | backend/server/data_anndata/anndata_adaptor.py | ihnorton/cellxgene | 28b526b3fc75a04c03dea29093e5de29b5bc6e01 | [
"MIT"
] | 7 | 2021-01-07T19:20:57.000Z | 2021-06-15T18:17:55.000Z | backend/server/data_anndata/anndata_adaptor.py | ihnorton/cellxgene | 28b526b3fc75a04c03dea29093e5de29b5bc6e01 | [
"MIT"
] | null | null | null | import warnings
from datetime import datetime
import anndata
import numpy as np
from packaging import version
from pandas.core.dtypes.dtypes import CategoricalDtype
from scipy import sparse
from server_timing import Timing as ServerTiming
import backend.common.compute.diffexp_generic as diffexp_generic
from backend.common.colors import convert_anndata_category_colors_to_cxg_category_colors
from backend.common.constants import Axis, MAX_LAYOUTS
from backend.server.common.corpora import corpora_get_props_from_anndata
from backend.common.errors import PrepareError, DatasetAccessError, FilterError
from backend.common.utils.type_conversion_utils import get_schema_type_hint_of_array
from backend.server.compute.scanpy import scanpy_umap
from backend.server.data_common.data_adaptor import DataAdaptor
from backend.common.fbs.matrix import encode_matrix_fbs
anndata_version = version.parse(str(anndata.__version__)).release
def anndata_version_is_pre_070():
major = anndata_version[0]
minor = anndata_version[1] if len(anndata_version) > 1 else 0
return major == 0 and minor < 7
class AnndataAdaptor(DataAdaptor):
def __init__(self, data_locator, app_config=None, dataset_config=None):
super().__init__(data_locator, app_config, dataset_config)
self.data = None
self._load_data(data_locator)
self._validate_and_initialize()
def cleanup(self):
pass
@staticmethod
def pre_load_validation(data_locator):
if data_locator.islocal():
# if data locator is local, apply file system conventions and other "cheap"
# validation checks. If a URI, defer until we actually fetch the data and
# try to read it. Many of these tests don't make sense for URIs (eg, extension-
# based typing).
if not data_locator.exists():
raise DatasetAccessError("does not exist")
if not data_locator.isfile():
raise DatasetAccessError("is not a file")
@staticmethod
def file_size(data_locator):
return data_locator.size() if data_locator.islocal() else 0
@staticmethod
def open(data_locator, app_config, dataset_config=None):
return AnndataAdaptor(data_locator, app_config, dataset_config)
def get_corpora_props(self):
return corpora_get_props_from_anndata(self.data)
def get_name(self):
return "cellxgene anndata adaptor version"
def get_library_versions(self):
return dict(anndata=str(anndata.__version__))
@staticmethod
def _create_unique_column_name(df, col_name_prefix):
"""given the columns of a dataframe, and a name prefix, return a column name which
does not exist in the dataframe, AND which is prefixed by `prefix`
The approach is to append a numeric suffix, starting at zero and increasing by
one, until an unused name is found (eg, prefix_0, prefix_1, ...).
"""
suffix = 0
while f"{col_name_prefix}{suffix}" in df:
suffix += 1
return f"{col_name_prefix}{suffix}"
def _alias_annotation_names(self):
"""
The front-end relies on the existance of a unique, human-readable
index for obs & var (eg, var is typically gene name, obs the cell name).
The user can specify these via the --obs-names and --var-names config.
If they are not specified, use the existing index to create them, giving
the resulting column a unique name (eg, "name").
In both cases, enforce that the result is unique, and communicate the
index column name to the front-end via the obs_names and var_names config
(which is incorporated into the schema).
"""
self.original_obs_index = self.data.obs.index
for (ax_name, var_name) in ((Axis.OBS, "obs"), (Axis.VAR, "var")):
config_name = f"single_dataset__{var_name}_names"
parameter_name = f"{var_name}_names"
name = getattr(self.server_config, config_name)
df_axis = getattr(self.data, str(ax_name))
if name is None:
# Default: create unique names from index
if not df_axis.index.is_unique:
raise KeyError(
f"Values in {ax_name}.index must be unique. "
"Please prepare data to contain unique index values, or specify an "
"alternative with --{ax_name}-name."
)
name = self._create_unique_column_name(df_axis.columns, "name_")
self.parameters[parameter_name] = name
# reset index to simple range; alias name to point at the
# previously specified index.
df_axis.rename_axis(name, inplace=True)
df_axis.reset_index(inplace=True)
elif name in df_axis.columns:
# User has specified alternative column for unique names, and it exists
if not df_axis[name].is_unique:
raise KeyError(
f"Values in {ax_name}.{name} must be unique. " "Please prepare data to contain unique values."
)
df_axis.reset_index(drop=True, inplace=True)
self.parameters[parameter_name] = name
else:
# user specified a non-existent column name
raise KeyError(f"Annotation name {name}, specified in --{ax_name}-name does not exist.")
def _create_schema(self):
self.schema = {
"dataframe": {"nObs": self.cell_count, "nVar": self.gene_count, "type": str(self.data.X.dtype)},
"annotations": {
"obs": {"index": self.parameters.get("obs_names"), "columns": []},
"var": {"index": self.parameters.get("var_names"), "columns": []},
},
"layout": {"obs": []},
}
for ax in Axis:
curr_axis = getattr(self.data, str(ax))
for ann in curr_axis:
ann_schema = {"name": ann, "writable": False}
ann_schema.update(get_schema_type_hint_of_array(curr_axis[ann]))
self.schema["annotations"][ax]["columns"].append(ann_schema)
for layout in self.get_embedding_names():
layout_schema = {"name": layout, "type": "float32", "dims": [f"{layout}_0", f"{layout}_1"]}
self.schema["layout"]["obs"].append(layout_schema)
def get_schema(self):
return self.schema
def _load_data(self, data_locator):
# as of AnnData 0.6.19, backed mode performs initial load fast, but at the
# cost of significantly slower access to X data.
try:
# there is no guarantee data_locator indicates a local file. The AnnData
# API will only consume local file objects. If we get a non-local object,
# make a copy in tmp, and delete it after we load into memory.
with data_locator.local_handle() as lh:
# as of AnnData 0.6.19, backed mode performs initial load fast, but at the
# cost of significantly slower access to X data.
backed = "r" if self.server_config.adaptor__anndata_adaptor__backed else None
self.data = anndata.read_h5ad(lh, backed=backed)
except ValueError:
raise DatasetAccessError(
"File must be in the .h5ad format. Please read "
"https://github.com/theislab/scanpy_usage/blob/master/170505_seurat/info_h5ad.md to "
"learn more about this format. You may be able to convert your file into this format "
"using `cellxgene prepare`, please run `cellxgene prepare --help` for more "
"information."
)
except MemoryError:
raise DatasetAccessError("Out of memory - file is too large for available memory.")
except Exception:
raise DatasetAccessError(
"File not found or is inaccessible. File must be an .h5ad object. "
"Please check your input and try again."
)
def _validate_and_initialize(self):
if anndata_version_is_pre_070():
warnings.warn(
"Use of anndata versions older than 0.7 will have serious issues. Please update to at "
"least anndata 0.7 or later."
)
# var and obs column names must be unique
if not self.data.obs.columns.is_unique or not self.data.var.columns.is_unique:
raise KeyError("All annotation column names must be unique.")
self._alias_annotation_names()
self._validate_data_types()
self.cell_count = self.data.shape[0]
self.gene_count = self.data.shape[1]
self._create_schema()
# heuristic
n_values = self.data.shape[0] * self.data.shape[1]
if (n_values > 1e8 and self.server_config.adaptor__anndata_adaptor__backed is True) or (n_values > 5e8):
self.parameters.update({"diffexp_may_be_slow": True})
def _is_valid_layout(self, arr):
"""return True if this layout data is a valid array for front-end presentation:
* ndarray, dtype float/int/uint
* with shape (n_obs, >= 2)
* with all values finite or NaN (no +Inf or -Inf)
"""
is_valid = type(arr) == np.ndarray and arr.dtype.kind in "fiu"
is_valid = is_valid and arr.shape[0] == self.data.n_obs and arr.shape[1] >= 2
is_valid = is_valid and not np.any(np.isinf(arr)) and not np.all(np.isnan(arr))
return is_valid
def _validate_data_types(self):
# The backed API does not support interrogation of the underlying sparsity or sparse matrix type
# Fake it by asking for a small subarray and testing it. NOTE: if the user has ignored our
# anndata <= 0.7 warning, opted for the --backed option, and specified a large, sparse dataset,
# this "small" indexing request will load the entire X array. This is due to a bug in anndata<=0.7
# which will load the entire X matrix to fullfill any slicing request if X is sparse. See
# user warning in _load_data().
X0 = self.data.X[0, 0:1]
if sparse.isspmatrix(X0) and not sparse.isspmatrix_csc(X0):
warnings.warn(
"Anndata data matrix is sparse, but not a CSC (columnar) matrix. "
"Performance may be improved by using CSC."
)
if self.data.X.dtype != "float32":
warnings.warn(
f"Anndata data matrix is in {self.data.X.dtype} format not float32. " f"Precision may be truncated."
)
for ax in Axis:
curr_axis = getattr(self.data, str(ax))
for ann in curr_axis:
datatype = curr_axis[ann].dtype
downcast_map = {
"int64": "int32",
"uint32": "int32",
"uint64": "int32",
"float64": "float32",
}
if datatype in downcast_map:
warnings.warn(
f"Anndata annotation {ax}:{ann} is in unsupported format: {datatype}. "
f"Data will be downcast to {downcast_map[datatype]}."
)
if isinstance(datatype, CategoricalDtype):
category_num = len(curr_axis[ann].dtype.categories)
if category_num > 500 and category_num > self.dataset_config.presentation__max_categories:
warnings.warn(
f"{str(ax).title()} annotation '{ann}' has {category_num} categories, this may be "
f"cumbersome or slow to display. We recommend setting the "
f"--max-category-items option to 500, this will hide categorical "
f"annotations with more than 500 categories in the UI"
)
def annotation_to_fbs_matrix(self, axis, fields=None, labels=None):
if axis == Axis.OBS:
if labels is not None and not labels.empty:
df = self.data.obs.join(labels, self.parameters.get("obs_names"))
else:
df = self.data.obs
else:
df = self.data.var
if fields is not None and len(fields) > 0:
df = df[fields]
return encode_matrix_fbs(df, col_idx=df.columns)
def get_embedding_names(self):
"""
Return pre-computed embeddings.
function:
a) generate list of default layouts
b) validate layouts are legal. remove/warn on any that are not
c) cap total list of layouts at global const MAX_LAYOUTS
"""
# load default layouts from the data.
layouts = self.dataset_config.embeddings__names
if layouts is None or len(layouts) == 0:
layouts = [key[2:] for key in self.data.obsm_keys() if type(key) == str and key.startswith("X_")]
# remove invalid layouts
valid_layouts = []
obsm_keys = self.data.obsm_keys()
for layout in layouts:
layout_name = f"X_{layout}"
if layout_name not in obsm_keys:
warnings.warn(f"Ignoring unknown layout name: {layout}.")
elif not self._is_valid_layout(self.data.obsm[layout_name]):
warnings.warn(f"Ignoring layout due to malformed shape or data type: {layout}")
else:
valid_layouts.append(layout)
if len(valid_layouts) == 0:
raise PrepareError("No valid layout data.")
# cap layouts to MAX_LAYOUTS
return valid_layouts[0:MAX_LAYOUTS]
def get_embedding_array(self, ename, dims=2):
full_embedding = self.data.obsm[f"X_{ename}"]
return full_embedding[:, 0:dims]
def compute_embedding(self, method, obsFilter):
if Axis.VAR in obsFilter:
raise FilterError("Observation filters may not contain variable conditions")
if method != "umap":
raise NotImplementedError(f"re-embedding method {method} is not available.")
try:
shape = self.get_shape()
obs_mask = self._axis_filter_to_mask(Axis.OBS, obsFilter["obs"], shape[0])
except (KeyError, IndexError):
raise FilterError("Error parsing filter")
with ServerTiming.time("layout.compute"):
X_umap = scanpy_umap(self.data, obs_mask)
# Server picks reemedding name, which must not collide with any other
# embedding name generated by this backend.
name = f"reembed:{method}_{datetime.now().isoformat(timespec='milliseconds')}"
dims = [f"{name}_0", f"{name}_1"]
layout_schema = {"name": name, "type": "float32", "dims": dims}
self.schema["layout"]["obs"].append(layout_schema)
self.data.obsm[f"X_{name}"] = X_umap
return layout_schema
def compute_diffexp_ttest(self, maskA, maskB, top_n=None, lfc_cutoff=None):
if top_n is None:
top_n = self.dataset_config.diffexp__top_n
if lfc_cutoff is None:
lfc_cutoff = self.dataset_config.diffexp__lfc_cutoff
return diffexp_generic.diffexp_ttest(self, maskA, maskB, top_n, lfc_cutoff)
def get_colors(self):
return convert_anndata_category_colors_to_cxg_category_colors(self.data)
def get_X_array(self, obs_mask=None, var_mask=None):
if obs_mask is None:
obs_mask = slice(None)
if var_mask is None:
var_mask = slice(None)
X = self.data.X[obs_mask, var_mask]
return X
def get_shape(self):
return self.data.shape
def query_var_array(self, term_name):
return getattr(self.data.var, term_name)
def query_obs_array(self, term_name):
return getattr(self.data.obs, term_name)
def get_obs_index(self):
name = self.server_config.single_dataset__obs_names
if name is None:
return self.original_obs_index
else:
return self.data.obs[name]
def get_obs_columns(self):
return self.data.obs.columns
def get_obs_keys(self):
# return list of keys
return self.data.obs.keys().to_list()
def get_var_keys(self):
# return list of keys
return self.data.var.keys().to_list()
| 44.545946 | 118 | 0.620798 |
4586055c55a866052b5271011ce8fc326fe0c676 | 2,086 | py | Python | nets/DDI_edge_classification/gin_net.py | xyc1207/benchmarking-gnns | 9ba25a2825e8c155a93730d6e8f8752090292942 | [
"MIT"
] | 1,809 | 2020-02-28T11:13:52.000Z | 2022-03-31T11:44:58.000Z | nets/DDI_edge_classification/gin_net.py | xyc1207/benchmarking-gnns | 9ba25a2825e8c155a93730d6e8f8752090292942 | [
"MIT"
] | 61 | 2020-03-08T21:24:25.000Z | 2022-03-25T21:48:17.000Z | nets/DDI_edge_classification/gin_net.py | xyc1207/benchmarking-gnns | 9ba25a2825e8c155a93730d6e8f8752090292942 | [
"MIT"
] | 348 | 2020-03-03T17:00:21.000Z | 2022-03-31T03:09:56.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
"""
GIN: Graph Isomorphism Networks
HOW POWERFUL ARE GRAPH NEURAL NETWORKS? (Keyulu Xu, Weihua Hu, Jure Leskovec and Stefanie Jegelka, ICLR 2019)
https://arxiv.org/pdf/1810.00826.pdf
"""
from layers.gin_layer import GINLayer, ApplyNodeFunc, MLP
from layers.mlp_readout_layer import MLPReadout
class GINNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
dropout = net_params['dropout']
self.n_layers = net_params['L']
n_mlp_layers = net_params['n_mlp_GIN'] # GIN
learn_eps = net_params['learn_eps_GIN'] # GIN
neighbor_aggr_type = net_params['neighbor_aggr_GIN'] # GIN
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.device = net_params['device']
self.embedding_h = nn.Linear(in_dim, hidden_dim)
# List of MLPs
self.ginlayers = torch.nn.ModuleList()
for layer in range(self.n_layers):
mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, hidden_dim)
self.ginlayers.append(GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type,
dropout, batch_norm, residual, 0, learn_eps))
self.MLP_layer = MLPReadout(2*hidden_dim, 1)
def forward(self, g, h, e):
h = self.embedding_h(h.float())
for conv in self.ginlayers:
h = conv(g, h)
g.ndata['h'] = h
return h
def edge_predictor(self, h_i, h_j):
x = torch.cat([h_i, h_j], dim=1)
x = self.MLP_layer(x)
return torch.sigmoid(x)
def loss(self, pos_out, neg_out):
pos_loss = -torch.log(pos_out + 1e-15).mean() # positive samples
neg_loss = -torch.log(1 - neg_out + 1e-15).mean() # negative samples
loss = pos_loss + neg_loss
return loss
| 33.111111 | 113 | 0.598274 |
89e00af270a0209d8d8802137d3ec9dfd3d43db8 | 16,459 | py | Python | pandas/tests/window/test_numba.py | rendner/pandas | 47494a48edf25d5a49b0fb5b896b454c15c83595 | [
"BSD-3-Clause"
] | 1 | 2019-11-01T08:44:40.000Z | 2019-11-01T08:44:40.000Z | pandas/tests/window/test_numba.py | sdrees/pandas | bef454f0893efe2fa5e49317635f89c03467d16e | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/window/test_numba.py | sdrees/pandas | bef454f0893efe2fa5e49317635f89c03467d16e | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import pytest
from pandas.compat import (
is_ci_environment,
is_platform_mac,
is_platform_windows,
)
from pandas.errors import NumbaUtilError
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Series,
option_context,
to_datetime,
)
import pandas._testing as tm
# TODO(GH#44584): Mark these as pytest.mark.single_cpu
pytestmark = pytest.mark.skipif(
is_ci_environment() and (is_platform_windows() or is_platform_mac()),
reason="On GHA CI, Windows can fail with "
"'Windows fatal exception: stack overflow' "
"and MacOS can timeout",
)
@pytest.fixture(params=["single", "table"])
def method(request):
"""method keyword in rolling/expanding/ewm constructor"""
return request.param
@pytest.fixture(
params=[
["sum", {}],
["mean", {}],
["median", {}],
["max", {}],
["min", {}],
["var", {}],
["var", {"ddof": 0}],
["std", {}],
["std", {"ddof": 0}],
]
)
def arithmetic_numba_supported_operators(request):
return request.param
@td.skip_if_no("numba")
@pytest.mark.filterwarnings("ignore")
# Filter warnings when parallel=True and the function can't be parallelized by Numba
class TestEngine:
@pytest.mark.parametrize("jit", [True, False])
def test_numba_vs_cython_apply(self, jit, nogil, parallel, nopython, center, step):
def f(x, *args):
arg_sum = 0
for arg in args:
arg_sum += arg
return np.mean(x) + arg_sum
if jit:
import numba
f = numba.jit(f)
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
args = (2,)
s = Series(range(10))
result = s.rolling(2, center=center, step=step).apply(
f, args=args, engine="numba", engine_kwargs=engine_kwargs, raw=True
)
expected = s.rolling(2, center=center, step=step).apply(
f, engine="cython", args=args, raw=True
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
DataFrame(np.eye(5)),
DataFrame(
[
[5, 7, 7, 7, np.nan, np.inf, 4, 3, 3, 3],
[5, 7, 7, 7, np.nan, np.inf, 7, 3, 3, 3],
[np.nan, np.nan, 5, 6, 7, 5, 5, 5, 5, 5],
]
).T,
Series(range(5), name="foo"),
Series([20, 10, 10, np.inf, 1, 1, 2, 3]),
Series([20, 10, 10, np.nan, 10, 1, 2, 3]),
],
)
def test_numba_vs_cython_rolling_methods(
self,
data,
nogil,
parallel,
nopython,
arithmetic_numba_supported_operators,
step,
):
method, kwargs = arithmetic_numba_supported_operators
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
roll = data.rolling(3, step=step)
result = getattr(roll, method)(
engine="numba", engine_kwargs=engine_kwargs, **kwargs
)
expected = getattr(roll, method)(engine="cython", **kwargs)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"data", [DataFrame(np.eye(5)), Series(range(5), name="foo")]
)
def test_numba_vs_cython_expanding_methods(
self, data, nogil, parallel, nopython, arithmetic_numba_supported_operators
):
method, kwargs = arithmetic_numba_supported_operators
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
data = DataFrame(np.eye(5))
expand = data.expanding()
result = getattr(expand, method)(
engine="numba", engine_kwargs=engine_kwargs, **kwargs
)
expected = getattr(expand, method)(engine="cython", **kwargs)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("jit", [True, False])
def test_cache_apply(self, jit, nogil, parallel, nopython, step):
# Test that the functions are cached correctly if we switch functions
def func_1(x):
return np.mean(x) + 4
def func_2(x):
return np.std(x) * 5
if jit:
import numba
func_1 = numba.jit(func_1)
func_2 = numba.jit(func_2)
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
roll = Series(range(10)).rolling(2, step=step)
result = roll.apply(
func_1, engine="numba", engine_kwargs=engine_kwargs, raw=True
)
expected = roll.apply(func_1, engine="cython", raw=True)
tm.assert_series_equal(result, expected)
result = roll.apply(
func_2, engine="numba", engine_kwargs=engine_kwargs, raw=True
)
expected = roll.apply(func_2, engine="cython", raw=True)
tm.assert_series_equal(result, expected)
# This run should use the cached func_1
result = roll.apply(
func_1, engine="numba", engine_kwargs=engine_kwargs, raw=True
)
expected = roll.apply(func_1, engine="cython", raw=True)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"window,window_kwargs",
[
["rolling", {"window": 3, "min_periods": 0}],
["expanding", {}],
],
)
def test_dont_cache_args(
self, window, window_kwargs, nogil, parallel, nopython, method
):
# GH 42287
def add(values, x):
return np.sum(values) + x
engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}
df = DataFrame({"value": [0, 0, 0]})
result = getattr(df, window)(method=method, **window_kwargs).apply(
add, raw=True, engine="numba", engine_kwargs=engine_kwargs, args=(1,)
)
expected = DataFrame({"value": [1.0, 1.0, 1.0]})
tm.assert_frame_equal(result, expected)
result = getattr(df, window)(method=method, **window_kwargs).apply(
add, raw=True, engine="numba", engine_kwargs=engine_kwargs, args=(2,)
)
expected = DataFrame({"value": [2.0, 2.0, 2.0]})
tm.assert_frame_equal(result, expected)
def test_dont_cache_engine_kwargs(self):
# If the user passes a different set of engine_kwargs don't return the same
# jitted function
nogil = False
parallel = True
nopython = True
def func(x):
return nogil + parallel + nopython
engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}
df = DataFrame({"value": [0, 0, 0]})
result = df.rolling(1).apply(
func, raw=True, engine="numba", engine_kwargs=engine_kwargs
)
expected = DataFrame({"value": [2.0, 2.0, 2.0]})
tm.assert_frame_equal(result, expected)
parallel = False
engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}
result = df.rolling(1).apply(
func, raw=True, engine="numba", engine_kwargs=engine_kwargs
)
expected = DataFrame({"value": [1.0, 1.0, 1.0]})
tm.assert_frame_equal(result, expected)
@td.skip_if_no("numba")
class TestEWM:
@pytest.mark.parametrize(
"grouper", [lambda x: x, lambda x: x.groupby("A")], ids=["None", "groupby"]
)
@pytest.mark.parametrize("method", ["mean", "sum"])
def test_invalid_engine(self, grouper, method):
df = DataFrame({"A": ["a", "b", "a", "b"], "B": range(4)})
with pytest.raises(ValueError, match="engine must be either"):
getattr(grouper(df).ewm(com=1.0), method)(engine="foo")
@pytest.mark.parametrize(
"grouper", [lambda x: x, lambda x: x.groupby("A")], ids=["None", "groupby"]
)
@pytest.mark.parametrize("method", ["mean", "sum"])
def test_invalid_engine_kwargs(self, grouper, method):
df = DataFrame({"A": ["a", "b", "a", "b"], "B": range(4)})
with pytest.raises(ValueError, match="cython engine does not"):
getattr(grouper(df).ewm(com=1.0), method)(
engine="cython", engine_kwargs={"nopython": True}
)
@pytest.mark.parametrize("grouper", ["None", "groupby"])
@pytest.mark.parametrize("method", ["mean", "sum"])
def test_cython_vs_numba(
self, grouper, method, nogil, parallel, nopython, ignore_na, adjust
):
if grouper == "None":
grouper = lambda x: x
warn = FutureWarning
else:
grouper = lambda x: x.groupby("A")
warn = None
if method == "sum":
adjust = True
df = DataFrame({"A": ["a", "b", "a", "b"], "B": range(4)})
ewm = grouper(df).ewm(com=1.0, adjust=adjust, ignore_na=ignore_na)
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
with tm.assert_produces_warning(warn, match="nuisance"):
# GH#42738
result = getattr(ewm, method)(engine="numba", engine_kwargs=engine_kwargs)
expected = getattr(ewm, method)(engine="cython")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("grouper", ["None", "groupby"])
def test_cython_vs_numba_times(self, grouper, nogil, parallel, nopython, ignore_na):
# GH 40951
if grouper == "None":
grouper = lambda x: x
warn = FutureWarning
else:
grouper = lambda x: x.groupby("A")
warn = None
halflife = "23 days"
times = to_datetime(
[
"2020-01-01",
"2020-01-01",
"2020-01-02",
"2020-01-10",
"2020-02-23",
"2020-01-03",
]
)
df = DataFrame({"A": ["a", "b", "a", "b", "b", "a"], "B": [0, 0, 1, 1, 2, 2]})
ewm = grouper(df).ewm(
halflife=halflife, adjust=True, ignore_na=ignore_na, times=times
)
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
with tm.assert_produces_warning(warn, match="nuisance"):
# GH#42738
result = ewm.mean(engine="numba", engine_kwargs=engine_kwargs)
expected = ewm.mean(engine="cython")
tm.assert_frame_equal(result, expected)
@td.skip_if_no("numba")
def test_use_global_config():
def f(x):
return np.mean(x) + 2
s = Series(range(10))
with option_context("compute.use_numba", True):
result = s.rolling(2).apply(f, engine=None, raw=True)
expected = s.rolling(2).apply(f, engine="numba", raw=True)
tm.assert_series_equal(expected, result)
@td.skip_if_no("numba")
def test_invalid_kwargs_nopython():
with pytest.raises(NumbaUtilError, match="numba does not support kwargs with"):
Series(range(1)).rolling(1).apply(
lambda x: x, kwargs={"a": 1}, engine="numba", raw=True
)
@td.skip_if_no("numba")
@pytest.mark.slow
@pytest.mark.filterwarnings("ignore")
# Filter warnings when parallel=True and the function can't be parallelized by Numba
class TestTableMethod:
def test_table_series_valueerror(self):
def f(x):
return np.sum(x, axis=0) + 1
with pytest.raises(
ValueError, match="method='table' not applicable for Series objects."
):
Series(range(1)).rolling(1, method="table").apply(
f, engine="numba", raw=True
)
def test_table_method_rolling_methods(
self,
axis,
nogil,
parallel,
nopython,
arithmetic_numba_supported_operators,
step,
):
method, kwargs = arithmetic_numba_supported_operators
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
df = DataFrame(np.eye(3))
roll_table = df.rolling(2, method="table", axis=axis, min_periods=0, step=step)
if method in ("var", "std"):
with pytest.raises(NotImplementedError, match=f"{method} not supported"):
getattr(roll_table, method)(
engine_kwargs=engine_kwargs, engine="numba", **kwargs
)
else:
roll_single = df.rolling(
2, method="single", axis=axis, min_periods=0, step=step
)
result = getattr(roll_table, method)(
engine_kwargs=engine_kwargs, engine="numba", **kwargs
)
expected = getattr(roll_single, method)(
engine_kwargs=engine_kwargs, engine="numba", **kwargs
)
tm.assert_frame_equal(result, expected)
def test_table_method_rolling_apply(self, axis, nogil, parallel, nopython, step):
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
def f(x):
return np.sum(x, axis=0) + 1
df = DataFrame(np.eye(3))
result = df.rolling(
2, method="table", axis=axis, min_periods=0, step=step
).apply(f, raw=True, engine_kwargs=engine_kwargs, engine="numba")
expected = df.rolling(
2, method="single", axis=axis, min_periods=0, step=step
).apply(f, raw=True, engine_kwargs=engine_kwargs, engine="numba")
tm.assert_frame_equal(result, expected)
def test_table_method_rolling_weighted_mean(self, step):
def weighted_mean(x):
arr = np.ones((1, x.shape[1]))
arr[:, :2] = (x[:, :2] * x[:, 2]).sum(axis=0) / x[:, 2].sum()
return arr
df = DataFrame([[1, 2, 0.6], [2, 3, 0.4], [3, 4, 0.2], [4, 5, 0.7]])
result = df.rolling(2, method="table", min_periods=0, step=step).apply(
weighted_mean, raw=True, engine="numba"
)
expected = DataFrame(
[
[1.0, 2.0, 1.0],
[1.8, 2.0, 1.0],
[3.333333, 2.333333, 1.0],
[1.555556, 7, 1.0],
]
)[::step]
tm.assert_frame_equal(result, expected)
def test_table_method_expanding_apply(self, axis, nogil, parallel, nopython):
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
def f(x):
return np.sum(x, axis=0) + 1
df = DataFrame(np.eye(3))
result = df.expanding(method="table", axis=axis).apply(
f, raw=True, engine_kwargs=engine_kwargs, engine="numba"
)
expected = df.expanding(method="single", axis=axis).apply(
f, raw=True, engine_kwargs=engine_kwargs, engine="numba"
)
tm.assert_frame_equal(result, expected)
def test_table_method_expanding_methods(
self, axis, nogil, parallel, nopython, arithmetic_numba_supported_operators
):
method, kwargs = arithmetic_numba_supported_operators
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
df = DataFrame(np.eye(3))
expand_table = df.expanding(method="table", axis=axis)
if method in ("var", "std"):
with pytest.raises(NotImplementedError, match=f"{method} not supported"):
getattr(expand_table, method)(
engine_kwargs=engine_kwargs, engine="numba", **kwargs
)
else:
expand_single = df.expanding(method="single", axis=axis)
result = getattr(expand_table, method)(
engine_kwargs=engine_kwargs, engine="numba", **kwargs
)
expected = getattr(expand_single, method)(
engine_kwargs=engine_kwargs, engine="numba", **kwargs
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data", [np.eye(3), np.ones((2, 3)), np.ones((3, 2))])
@pytest.mark.parametrize("method", ["mean", "sum"])
def test_table_method_ewm(self, data, method, axis, nogil, parallel, nopython):
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
df = DataFrame(data)
result = getattr(df.ewm(com=1, method="table", axis=axis), method)(
engine_kwargs=engine_kwargs, engine="numba"
)
expected = getattr(df.ewm(com=1, method="single", axis=axis), method)(
engine_kwargs=engine_kwargs, engine="numba"
)
tm.assert_frame_equal(result, expected)
| 35.168803 | 88 | 0.57701 |
588c5577200619a2249fe28af33d4faa87e5b8d7 | 32 | py | Python | code/abc159_c_01.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | 3 | 2019-08-16T16:55:48.000Z | 2021-04-11T10:21:40.000Z | code/abc159_c_01.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | null | null | null | code/abc159_c_01.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | null | null | null | L = int(input())
print((L/3)**3) | 16 | 16 | 0.53125 |
2a7491aa50de5b897a54c64458e17087567a855c | 765 | py | Python | app/core/admin.py | kfahmi/recipe-app-api | fa678005a3615bff92b94d5e852526fcb00793ed | [
"MIT"
] | null | null | null | app/core/admin.py | kfahmi/recipe-app-api | fa678005a3615bff92b94d5e852526fcb00793ed | [
"MIT"
] | null | null | null | app/core/admin.py | kfahmi/recipe-app-api | fa678005a3615bff92b94d5e852526fcb00793ed | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from core import models
from django.utils.translation import gettext as _
class UserAdmin(BaseUserAdmin):
ordering = ['id']
list_display = ['email', 'name']
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Personal Info'), {'fields': ('name',)}),
(
_('Permissions'),
{'fields': ('is_active', 'is_staff', 'is_superuser')}
),
(_('Important dates'), {'fields': ('last_login',)})
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2')
}),
)
admin.site.register(models.User, UserAdmin)
| 27.321429 | 65 | 0.562092 |
50482da70ec5161c1cd8359d91c9ddd3e89b4f59 | 14,528 | py | Python | tools/testrunner/standard_runner.py | dongAxis/to_be_a_v8_master | a63f7201063bf950483e373345b169f349dc9c4f | [
"BSD-3-Clause"
] | 192 | 2020-07-15T23:59:19.000Z | 2021-03-07T10:45:16.000Z | tools/testrunner/standard_runner.py | maldiohead/v8 | 03ff1b6dd6214d9c2f48c059184f26c4166a12ed | [
"BSD-3-Clause"
] | 333 | 2020-07-15T17:06:05.000Z | 2021-03-15T12:13:09.000Z | tools/testrunner/standard_runner.py | maldiohead/v8 | 03ff1b6dd6214d9c2f48c059184f26c4166a12ed | [
"BSD-3-Clause"
] | 25 | 2020-07-16T06:12:02.000Z | 2021-01-07T13:33:51.000Z | #!/usr/bin/env python
#
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# for py2/py3 compatibility
from __future__ import absolute_import
from __future__ import print_function
from functools import reduce
import datetime
import json
import os
import sys
import tempfile
# Adds testrunner to the path hence it has to be imported at the beggining.
from . import base_runner
from testrunner.local import utils
from testrunner.local.variants import ALL_VARIANTS
from testrunner.objects import predictable
from testrunner.testproc.execution import ExecutionProc
from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc
from testrunner.testproc.loader import LoadProc
from testrunner.testproc.seed import SeedProc
from testrunner.testproc.sequence import SequenceProc
from testrunner.testproc.variant import VariantProc
VARIANTS = ['default']
MORE_VARIANTS = [
'jitless',
'stress',
'stress_js_bg_compile_wasm_code_gc',
'stress_incremental_marking',
]
VARIANT_ALIASES = {
# The default for developer workstations.
'dev': VARIANTS,
# Additional variants, run on all bots.
'more': MORE_VARIANTS,
# Shortcut for the two above ('more' first - it has the longer running tests)
'exhaustive': MORE_VARIANTS + VARIANTS,
# Additional variants, run on a subset of bots.
'extra': ['nooptimization', 'future', 'no_wasm_traps', 'turboprop',
'instruction_scheduling', 'always_sparkplug'],
}
# Extra flags passed to all tests using the standard test runner.
EXTRA_DEFAULT_FLAGS = ['--testing-d8-test-runner']
GC_STRESS_FLAGS = ['--gc-interval=500', '--stress-compaction',
'--concurrent-recompilation-queue-length=64',
'--concurrent-recompilation-delay=500',
'--concurrent-recompilation',
'--stress-flush-code', '--flush-bytecode',
'--wasm-code-gc', '--stress-wasm-code-gc']
RANDOM_GC_STRESS_FLAGS = ['--random-gc-interval=5000',
'--stress-compaction-random']
PREDICTABLE_WRAPPER = os.path.join(
base_runner.BASE_DIR, 'tools', 'predictable_wrapper.py')
class StandardTestRunner(base_runner.BaseTestRunner):
def __init__(self, *args, **kwargs):
super(StandardTestRunner, self).__init__(*args, **kwargs)
self.sancov_dir = None
self._variants = None
@property
def framework_name(self):
return 'standard_runner'
def _get_default_suite_names(self):
return ['default']
def _add_parser_options(self, parser):
parser.add_option('--novfp3',
help='Indicates that V8 was compiled without VFP3'
' support',
default=False, action='store_true')
# Variants
parser.add_option('--no-variants', '--novariants',
help='Deprecated. '
'Equivalent to passing --variants=default',
default=False, dest='no_variants', action='store_true')
parser.add_option('--variants',
help='Comma-separated list of testing variants;'
' default: "%s"' % ','.join(VARIANTS))
parser.add_option('--exhaustive-variants',
default=False, action='store_true',
help='Deprecated. '
'Equivalent to passing --variants=exhaustive')
# Filters
parser.add_option('--slow-tests', default='dontcare',
help='Regard slow tests (run|skip|dontcare)')
parser.add_option('--pass-fail-tests', default='dontcare',
help='Regard pass|fail tests (run|skip|dontcare)')
parser.add_option('--quickcheck', default=False, action='store_true',
help=('Quick check mode (skip slow tests)'))
# Stress modes
parser.add_option('--gc-stress',
help='Switch on GC stress mode',
default=False, action='store_true')
parser.add_option('--random-gc-stress',
help='Switch on random GC stress mode',
default=False, action='store_true')
parser.add_option('--random-seed-stress-count', default=1, type='int',
dest='random_seed_stress_count',
help='Number of runs with different random seeds. Only '
'with test processors: 0 means infinite '
'generation.')
# Extra features.
parser.add_option('--max-heavy-tests', default=1, type='int',
help='Maximum number of heavy tests run in parallel')
parser.add_option('--time', help='Print timing information after running',
default=False, action='store_true')
# Noop
parser.add_option('--cfi-vptr',
help='Run tests with UBSAN cfi_vptr option.',
default=False, action='store_true')
parser.add_option('--no-sorting', '--nosorting',
help='Don\'t sort tests according to duration of last'
' run.',
default=False, dest='no_sorting', action='store_true')
parser.add_option('--no-presubmit', '--nopresubmit',
help='Skip presubmit checks (deprecated)',
default=False, dest='no_presubmit', action='store_true')
# Unimplemented for test processors
parser.add_option('--sancov-dir',
help='Directory where to collect coverage data')
parser.add_option('--cat', help='Print the source of the tests',
default=False, action='store_true')
parser.add_option('--flakiness-results',
help='Path to a file for storing flakiness json.')
parser.add_option('--warn-unused', help='Report unused rules',
default=False, action='store_true')
parser.add_option('--report', default=False, action='store_true',
help='Print a summary of the tests to be run')
def _process_options(self, options):
if options.sancov_dir:
self.sancov_dir = options.sancov_dir
if not os.path.exists(self.sancov_dir):
print('sancov-dir %s doesn\'t exist' % self.sancov_dir)
raise base_runner.TestRunnerError()
if options.gc_stress:
options.extra_flags += GC_STRESS_FLAGS
if options.random_gc_stress:
options.extra_flags += RANDOM_GC_STRESS_FLAGS
if self.build_config.asan:
options.extra_flags.append('--invoke-weak-callbacks')
if options.novfp3:
options.extra_flags.append('--noenable-vfp3')
if options.no_variants: # pragma: no cover
print ('Option --no-variants is deprecated. '
'Pass --variants=default instead.')
assert not options.variants
options.variants = 'default'
if options.exhaustive_variants: # pragma: no cover
# TODO(machenbach): Switch infra to --variants=exhaustive after M65.
print ('Option --exhaustive-variants is deprecated. '
'Pass --variants=exhaustive instead.')
# This is used on many bots. It includes a larger set of default
# variants.
# Other options for manipulating variants still apply afterwards.
assert not options.variants
options.variants = 'exhaustive'
if options.quickcheck:
assert not options.variants
options.variants = 'stress,default'
options.slow_tests = 'skip'
options.pass_fail_tests = 'skip'
if self.build_config.predictable:
options.variants = 'default'
options.extra_flags.append('--predictable')
options.extra_flags.append('--verify-predictable')
options.extra_flags.append('--no-inline-new')
# Add predictable wrapper to command prefix.
options.command_prefix = (
[sys.executable, PREDICTABLE_WRAPPER] + options.command_prefix)
# TODO(machenbach): Figure out how to test a bigger subset of variants on
# msan.
if self.build_config.msan:
options.variants = 'default'
if options.variants == 'infra_staging':
options.variants = 'exhaustive'
self._variants = self._parse_variants(options.variants)
def CheckTestMode(name, option): # pragma: no cover
if option not in ['run', 'skip', 'dontcare']:
print('Unknown %s mode %s' % (name, option))
raise base_runner.TestRunnerError()
CheckTestMode('slow test', options.slow_tests)
CheckTestMode('pass|fail test', options.pass_fail_tests)
if self.build_config.no_i18n:
base_runner.TEST_MAP['bot_default'].remove('intl')
base_runner.TEST_MAP['default'].remove('intl')
# TODO(machenbach): uncomment after infra side lands.
# base_runner.TEST_MAP['d8_default'].remove('intl')
if options.time and not options.json_test_results:
# We retrieve the slowest tests from the JSON output file, so create
# a temporary output file (which will automatically get deleted on exit)
# if the user didn't specify one.
self._temporary_json_output_file = tempfile.NamedTemporaryFile(
prefix="v8-test-runner-")
options.json_test_results = self._temporary_json_output_file.name
def _runner_flags(self):
return EXTRA_DEFAULT_FLAGS
def _parse_variants(self, aliases_str):
# Use developer defaults if no variant was specified.
aliases_str = aliases_str or 'dev'
aliases = aliases_str.split(',')
user_variants = set(reduce(
list.__add__, [VARIANT_ALIASES.get(a, [a]) for a in aliases]))
result = [v for v in ALL_VARIANTS if v in user_variants]
if len(result) == len(user_variants):
return result
for v in user_variants:
if v not in ALL_VARIANTS:
print('Unknown variant: %s' % v)
print(' Available variants: %s' % ALL_VARIANTS)
print(' Available variant aliases: %s' % VARIANT_ALIASES.keys());
raise base_runner.TestRunnerError()
assert False, 'Unreachable'
def _setup_env(self):
super(StandardTestRunner, self)._setup_env()
symbolizer_option = self._get_external_symbolizer_option()
if self.sancov_dir:
os.environ['ASAN_OPTIONS'] = ':'.join([
'coverage=1',
'coverage_dir=%s' % self.sancov_dir,
symbolizer_option,
'allow_user_segv_handler=1',
])
def _get_statusfile_variables(self, options):
variables = (
super(StandardTestRunner, self)._get_statusfile_variables(options))
variables.update({
'gc_stress': options.gc_stress or options.random_gc_stress,
'gc_fuzzer': options.random_gc_stress,
'novfp3': options.novfp3,
})
return variables
def _create_sequence_proc(self, options):
"""Create processor for sequencing heavy tests on swarming."""
return SequenceProc(options.max_heavy_tests) if options.swarming else None
def _do_execute(self, tests, args, options):
jobs = options.j
print('>>> Running with test processors')
loader = LoadProc(tests)
results = self._create_result_tracker(options)
indicators = self._create_progress_indicators(
tests.test_count_estimate, options)
outproc_factory = None
if self.build_config.predictable:
outproc_factory = predictable.get_outproc
execproc = ExecutionProc(jobs, outproc_factory)
sigproc = self._create_signal_proc()
procs = [
loader,
NameFilterProc(args) if args else None,
StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
VariantProc(self._variants),
StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
self._create_predictable_filter(),
self._create_shard_proc(options),
self._create_seed_proc(options),
self._create_sequence_proc(options),
sigproc,
] + indicators + [
results,
self._create_timeout_proc(options),
self._create_rerun_proc(options),
execproc,
]
self._prepare_procs(procs)
loader.load_initial_tests(initial_batch_size=options.j * 2)
# This starts up worker processes and blocks until all tests are
# processed.
execproc.run()
for indicator in indicators:
indicator.finished()
if tests.test_count_estimate:
percentage = float(results.total) / tests.test_count_estimate * 100
else:
percentage = 0
print (('>>> %d base tests produced %d (%d%s)'
' non-filtered tests') % (
tests.test_count_estimate, results.total, percentage, '%'))
print('>>> %d tests ran' % (results.total - results.remaining))
exit_code = utils.EXIT_CODE_PASS
if results.failed:
exit_code = utils.EXIT_CODE_FAILURES
if not results.total:
exit_code = utils.EXIT_CODE_NO_TESTS
if options.time:
self._print_durations(options)
# Indicate if a SIGINT or SIGTERM happened.
return max(exit_code, sigproc.exit_code)
def _print_durations(self, options):
def format_duration(duration_in_seconds):
duration = datetime.timedelta(seconds=duration_in_seconds)
time = (datetime.datetime.min + duration).time()
return time.strftime('%M:%S:') + '%03i' % int(time.microsecond / 1000)
def _duration_results_text(test):
return [
'Test: %s' % test['name'],
'Flags: %s' % ' '.join(test['flags']),
'Command: %s' % test['command'],
'Duration: %s' % format_duration(test['duration']),
]
assert os.path.exists(options.json_test_results)
with open(options.json_test_results, "r") as f:
output = json.load(f)
lines = []
for test in output['slowest_tests']:
suffix = ''
if test.get('marked_slow') is False:
suffix = ' *'
lines.append(
'%s %s%s' % (format_duration(test['duration']),
test['name'], suffix))
# Slowest tests duration details.
lines.extend(['', 'Details:', ''])
for test in output['slowest_tests']:
lines.extend(_duration_results_text(test))
print("\n".join(lines))
def _create_predictable_filter(self):
if not self.build_config.predictable:
return None
return predictable.PredictableFilterProc()
def _create_seed_proc(self, options):
if options.random_seed_stress_count == 1:
return None
return SeedProc(options.random_seed_stress_count, options.random_seed,
options.j * 4)
if __name__ == '__main__':
sys.exit(StandardTestRunner().execute())
| 36.411028 | 79 | 0.655218 |
70d579e1c62e71a119a2a28d9d6e0073c1cf4b6c | 6,370 | py | Python | tests/integration/test_crossbar_integration.py | simonsobs/ocs | 24c6a617ea3038fccdb40bfd602ffd541415a476 | [
"BSD-2-Clause"
] | 9 | 2019-09-02T14:17:06.000Z | 2022-03-11T21:26:34.000Z | tests/integration/test_crossbar_integration.py | simonsobs/ocs | 24c6a617ea3038fccdb40bfd602ffd541415a476 | [
"BSD-2-Clause"
] | 158 | 2019-05-17T17:54:37.000Z | 2022-03-14T19:29:59.000Z | tests/integration/test_crossbar_integration.py | simonsobs/ocs | 24c6a617ea3038fccdb40bfd602ffd541415a476 | [
"BSD-2-Clause"
] | 1 | 2021-07-16T13:21:45.000Z | 2021-07-16T13:21:45.000Z | import os
import time
import pytest
import docker
from ocs.ocs_client import OCSClient
from integration.util import create_crossbar_fixture, restart_crossbar
try:
from so3g import hk
except ModuleNotFoundError as e:
print(f"Unable to import so3g: {e}")
pytest_plugins = ("docker_compose",)
wait_for_crossbar = create_crossbar_fixture()
CROSSBAR_SLEEP = 5 # time to wait before trying to make first connection
@pytest.mark.spt3g
@pytest.mark.dependency(name="so3g")
def test_so3g_spt3g_import():
"""Test that we can import so3g. Used to skip tests dependent on
this import.
"""
import so3g
# Just to prevent flake8 from complaining
print(so3g.__file__)
# @pytest.mark.integtest
# def test_testing(wait_for_crossbar):
# "Just testing if the docker-compose/crossbar wait fixture is working."
# assert True
@pytest.mark.integtest
def test_fake_data_after_crossbar_restart(wait_for_crossbar):
"""Restart the crossbar server, then test whether we can issue a command to
run a task, then check the sesssion.data on the acq process to see if it's
updating with new data.
The task call wouldn't work if we didn't reconnect to the crossbar server,
and the acq process should still be running.
"""
# give a few seconds for things to make first connection
time.sleep(CROSSBAR_SLEEP)
restart_crossbar()
now = time.time()
# Set OCS_CONFIG_DIR environment variable
os.environ['OCS_CONFIG_DIR'] = os.getcwd()
# Check fake data Agent is accessible and producing new data.
therm_client = OCSClient('fake-data1', args=[])
# Make sure we can call a task.
therm_client.delay_task()
therm_client.delay_task.wait()
response = therm_client.acq.status()
assert response.session.get('data').get('timestamp') > now
# @pytest.mark.integtest
# def test_influxdb_publisher_after_crossbar_restart(wait_for_crossbar):
# """Test that the InfluxDB publisher reconnects after a crossbar restart and
# continues to publish data to the InfluxDB.
#
# """
# pass
@pytest.mark.dependency(depends=["so3g"])
@pytest.mark.integtest
def test_aggregator_after_crossbar_restart(wait_for_crossbar):
"""Test that the aggregator reconnects after a crossbar restart and that
data from after the reconnection makes it into the latest .g3 file.
"""
# Set OCS_CONFIG_DIR environment variable
os.environ['OCS_CONFIG_DIR'] = os.getcwd()
# record first file being written by aggregator
# give a few seconds for things to make first connection
time.sleep(CROSSBAR_SLEEP)
agg_client = OCSClient('aggregator', args=[])
status = agg_client.record.status()
file00 = status.session.get('data').get('current_file')
assert file00 is not None
# restart crossbar
restart_crossbar()
# record current time
now = time.time()
# wait for file rotation by checking session.data's "current_file" value
status = agg_client.record.status()
file01 = status.session.get('data').get('current_file')
iterations = 0
while file01 == file00:
time.sleep(1)
status = agg_client.record.status()
file01 = status.session.get('data').get('current_file')
iterations += 1
# setting in default.yaml is 30 second files, though 40 seconds happens
if iterations > 45:
raise RuntimeError(f'Aggregator file not rotating. {file00} == {file01}')
# open rotated file and see if any data after recorded time exists
# scanner = hk.HKArchiveScanner()
# scanner.process_file("." + file00)
# arc = scanner.finalize()
# data = arc.simple(['channel_00'])
# assert np.any(data[0][0] > now)
# wait for another rotation and check that file?
status = agg_client.record.status()
file02 = status.session.get('data').get('current_file')
iterations = 0
while file01 == file02:
time.sleep(1)
status = agg_client.record.status()
file02 = status.session.get('data').get('current_file')
iterations += 1
# setting in default.yaml is 30 second files, though 40 seconds happens
if iterations > 45:
raise RuntimeError(f'Aggregator file not rotating. {file01} == {file02}')
# check "file01" is not empty
# scanner = hk.HKArchiveScanner()
# scanner.process_file("." + file01)
# arc = scanner.finalize()
# data = arc.simple(['channel_00'])
# assert data[0][0].size
# Perhaps the best test of whether we've lost data is to see if there are
# gaps between datapoints
# Open all created files and make sure no gaps
scanner = hk.HKArchiveScanner()
files = [file00, file01, file02]
for f in files:
scanner.process_file("." + f)
arc = scanner.finalize()
# Get all fields in the file
all_fields = []
for k, v in arc.get_fields()[0].items():
all_fields.append(k)
data = arc.simple(all_fields)
# Check for gaps in all timestreams
# This is an unreliable assertion
#for i, dataset in enumerate(data):
# assert np.all(np.diff(dataset[0]) < 0.25), f"{all_fields[i]} contains gap in data larger than 0.25 seconds"
@pytest.mark.integtest
def test_proper_agent_shutdown_on_lost_transport(wait_for_crossbar):
"""If the crossbar server goes down, i.e. TransportLost, after the timeout
period an Agent should shutdown after the reactor.stop() call. This will mean
the container running the Agent is gone.
Startup everything. Shutdown the crossbar server. Check for fake data agent
container. It's gotta be gone for a pass.
"""
client = docker.from_env()
# give a few seconds for things to make first connection
time.sleep(CROSSBAR_SLEEP)
# shutdown crossbar
crossbar_container = client.containers.get('ocs-tests-crossbar')
crossbar_container.stop()
# 15 seconds should be enough with default 10 second timeout
timeout = 15
while timeout > 0:
time.sleep(1) # give time for the fake-data-agent to timeout, then shutdown
fake_data_container = client.containers.get('ocs-tests-fake-data-agent')
if fake_data_container.status == "exited":
break
timeout -= 1
fake_data_container = client.containers.get('ocs-tests-fake-data-agent')
assert fake_data_container.status == "exited"
| 32.5 | 116 | 0.693093 |
f80b894767bc980bcd80d52d0eaf67f37907b56b | 1,200 | py | Python | google/ads/googleads/v7/enums/types/operating_system_version_operator_type.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | 285 | 2018-10-05T16:47:58.000Z | 2022-03-31T00:58:39.000Z | google/ads/googleads/v7/enums/types/operating_system_version_operator_type.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | 425 | 2018-09-10T13:32:41.000Z | 2022-03-31T14:50:05.000Z | google/ads/googleads/v7/enums/types/operating_system_version_operator_type.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | 369 | 2018-11-28T07:01:00.000Z | 2022-03-28T09:53:22.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v7.enums",
marshal="google.ads.googleads.v7",
manifest={"OperatingSystemVersionOperatorTypeEnum",},
)
class OperatingSystemVersionOperatorTypeEnum(proto.Message):
r"""Container for enum describing the type of OS operators. """
class OperatingSystemVersionOperatorType(proto.Enum):
r"""The type of operating system version."""
UNSPECIFIED = 0
UNKNOWN = 1
EQUALS_TO = 2
GREATER_THAN_EQUALS_TO = 4
__all__ = tuple(sorted(__protobuf__.manifest))
| 31.578947 | 74 | 0.725 |
ab1ee40a80cf389321530bc7d45de4e91765dc35 | 642 | py | Python | SublimeText3_3176/Data/Packages/SideBarEnhancements-st3/StatusBarFileSize.py | xiexie1993/Tool_Sublime_Text3_for_Windows | 51b11ac2d7df36242d68b3b5f85af5f2a8c550e2 | [
"RSA-MD"
] | null | null | null | SublimeText3_3176/Data/Packages/SideBarEnhancements-st3/StatusBarFileSize.py | xiexie1993/Tool_Sublime_Text3_for_Windows | 51b11ac2d7df36242d68b3b5f85af5f2a8c550e2 | [
"RSA-MD"
] | null | null | null | SublimeText3_3176/Data/Packages/SideBarEnhancements-st3/StatusBarFileSize.py | xiexie1993/Tool_Sublime_Text3_for_Windows | 51b11ac2d7df36242d68b3b5f85af5f2a8c550e2 | [
"RSA-MD"
] | null | null | null | import sublime, sublime_plugin
from .hurry.filesize import size
from os.path import getsize
s = {}
def plugin_loaded():
global s
s = sublime.load_settings('Side Bar.sublime-settings')
class StatusBarFileSize(sublime_plugin.EventListener):
def on_activated_async(self, v):
if s.get('statusbar_file_size') and v.file_name():
try:
self.show(v, size(getsize(v.file_name())))
except:
pass
def on_post_save_async(self, v):
if s.get('statusbar_file_size') and v.file_name():
try:
self.show(v, size(getsize(v.file_name())))
except:
pass
def show(self, v, size):
v.set_status('statusbar_file_size', size);
| 22.137931 | 55 | 0.711838 |
08583e4abfae956ddbea280770246e6894b6bfb6 | 1,159 | py | Python | python-stdlib/textwrap/setup.py | mkomon/micropython-lib | 25ebe4a261e7b1c7c8471bceef2fd0e12837cdd2 | [
"PSF-2.0"
] | 1,556 | 2015-01-18T01:10:21.000Z | 2022-03-31T23:27:33.000Z | python-stdlib/textwrap/setup.py | Li-Lian1069/micropython-lib | 1dfca5ad343b2841965df6c4e59f92d6d94a24bd | [
"PSF-2.0"
] | 414 | 2015-01-01T09:01:22.000Z | 2022-03-31T15:08:24.000Z | python-stdlib/textwrap/setup.py | Li-Lian1069/micropython-lib | 1dfca5ad343b2841965df6c4e59f92d6d94a24bd | [
"PSF-2.0"
] | 859 | 2015-02-05T13:23:00.000Z | 2022-03-28T02:28:16.000Z | import sys
# Remove current dir from sys.path, otherwise setuptools will peek up our
# module instead of system's.
sys.path.pop(0)
from setuptools import setup
sys.path.append("..")
import sdist_upip
setup(
name="micropython-textwrap",
version="3.4.2-1",
description="CPython textwrap module ported to MicroPython",
long_description="This is a module ported from CPython standard library to be compatible with\nMicroPython interpreter. Usually, this means applying small patches for\nfeatures not supported (yet, or at all) in MicroPython. Sometimes, heavier\nchanges are required. Note that CPython modules are written with availability\nof vast resources in mind, and may not work for MicroPython ports with\nlimited heap. If you are affected by such a case, please help reimplement\nthe module from scratch.",
url="https://github.com/micropython/micropython-lib",
author="CPython Developers",
author_email="python-dev@python.org",
maintainer="micropython-lib Developers",
maintainer_email="micro-python@googlegroups.com",
license="Python",
cmdclass={"sdist": sdist_upip.sdist},
py_modules=["textwrap"],
)
| 46.36 | 500 | 0.761001 |
083aacf4e285a49645e47d28bca7b62b1f3db199 | 1,704 | py | Python | logreg/models.py | ben741863140/cfsystem | 227e269f16533719251962f4d8caee8b51091d2f | [
"Apache-2.0"
] | 4 | 2018-02-22T01:59:07.000Z | 2020-07-09T06:28:46.000Z | logreg/models.py | ben741863140/cfsystem | 227e269f16533719251962f4d8caee8b51091d2f | [
"Apache-2.0"
] | null | null | null | logreg/models.py | ben741863140/cfsystem | 227e269f16533719251962f4d8caee8b51091d2f | [
"Apache-2.0"
] | null | null | null | # _*_ utf-8 _*_
from django.db import models
from django.contrib.auth.models import AbstractUser
class Pair(object):
def __init__(self, num, str):
self.val = num
self.tag = str
class User(AbstractUser):
Choices = []
for i in range(1,99):
if i < 10:
Choices.append(Pair(i, '0' + str(i)))
else:
Choices.append(Pair(i, str(i)))
grade_choices = [(choice.val, choice.tag) for choice in Choices]
nickname = models.CharField(max_length=20, blank=True, verbose_name='昵称', help_text='<ul><li>可不填</li></ul>')
realname = models.CharField(max_length=10, blank=False, verbose_name='真名', help_text='<ul><li>必填</li></ul>',
error_messages={'blank': '不能为空'})
handle = models.CharField(max_length=20, blank=False, verbose_name='CF账号', help_text='<ul><li>必填</li></ul>',
error_messages={'unique': '账号已存在'}, unique=True)
oj_password = models.CharField(max_length=50, blank=True, default='')
grade = models.IntegerField(verbose_name='年级', help_text='<ul><li>必填</li></ul>',
error_messages={'blank': '不能为空'}, choices=grade_choices, default=16, blank=False)
# Unknown what's Meta
class Meta(AbstractUser.Meta):
pass
# def set_password(self, raw_password):
# self.password = make_password(raw_password)
class Captcha(models.Model):
username = models.CharField(max_length=150, blank=False)
handle = models.CharField(max_length=20, blank=False, unique=True)
update_time = models.DateTimeField()
captcha = models.CharField(max_length=200, blank=False)
status = models.IntegerField(default=0)
| 40.571429 | 113 | 0.63615 |
d5d25bcee1f295948e3bfdd7b5b4877706a23488 | 812 | py | Python | sms/sms.py | mrprompt/bicsons | 664eeadab77f1fd1792d3e4b93df2305fb89e182 | [
"MIT"
] | 5 | 2017-11-16T11:49:24.000Z | 2020-01-15T15:59:49.000Z | sms/sms.py | mrprompt/bicsons | 664eeadab77f1fd1792d3e4b93df2305fb89e182 | [
"MIT"
] | null | null | null | sms/sms.py | mrprompt/bicsons | 664eeadab77f1fd1792d3e4b93df2305fb89e182 | [
"MIT"
] | null | null | null | """
Envio de SMS
"""
from random import randint
from datetime import datetime
from .driver import nexmo, totalvoice
def sms(numeros, exchange, mensagem):
"""
Envia o sms com o driver escolhido aleatoriamente
"""
for numero in numeros:
now = datetime.now().strftime("%H:%M")
if now in numero['horarios'] and exchange in numero['exchanges']:
driver = randint(1, 2)
newmessage = exchange + ": " + mensagem
if driver == 1:
result = totalvoice.send(numero['telefone'], newmessage)
elif driver == 2:
result = nexmo.send(numero['telefone'], newmessage)
else:
result = {'sucesso' : False}
print("Envio para " + numero['telefone'] + " : " + str(result['sucesso']))
| 30.074074 | 86 | 0.568966 |
1c593320ca76f8fa4205484ddde26f8259dff40c | 16,201 | py | Python | tests/base_tests.py | Ofeknielsen/incubator-superset | 8a58afb8f53692d772efca9f3783b393a94d85d8 | [
"Apache-2.0"
] | 1 | 2021-04-11T04:25:57.000Z | 2021-04-11T04:25:57.000Z | tests/base_tests.py | Ofeknielsen/incubator-superset | 8a58afb8f53692d772efca9f3783b393a94d85d8 | [
"Apache-2.0"
] | 2 | 2020-06-25T17:16:07.000Z | 2020-08-06T23:16:26.000Z | tests/base_tests.py | Ofeknielsen/incubator-superset | 8a58afb8f53692d772efca9f3783b393a94d85d8 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
"""Unit tests for Superset"""
import imp
import json
from typing import Any, Dict, Union, List, Optional
from unittest.mock import Mock, patch
import pandas as pd
from flask import Response
from flask_appbuilder.security.sqla import models as ab_models
from flask_testing import TestCase
from sqlalchemy.orm import Session
from tests.test_app import app
from superset.sql_parse import CtasMethod
from superset import db, security_manager
from superset.connectors.base.models import BaseDatasource
from superset.connectors.druid.models import DruidCluster, DruidDatasource
from superset.connectors.sqla.models import SqlaTable
from superset.models import core as models
from superset.models.slice import Slice
from superset.models.core import Database
from superset.models.dashboard import Dashboard
from superset.models.datasource_access_request import DatasourceAccessRequest
from superset.utils.core import get_example_database
from superset.views.base_api import BaseSupersetModelRestApi
FAKE_DB_NAME = "fake_db_100"
def login(client: Any, username: str = "admin", password: str = "general"):
resp = get_resp(client, "/login/", data=dict(username=username, password=password))
assert "User confirmation needed" not in resp
def get_resp(
client: Any,
url: str,
data: Any = None,
follow_redirects: bool = True,
raise_on_error: bool = True,
json_: Optional[str] = None,
):
"""Shortcut to get the parsed results while following redirects"""
if data:
resp = client.post(url, data=data, follow_redirects=follow_redirects)
elif json_:
resp = client.post(url, json=json_, follow_redirects=follow_redirects)
else:
resp = client.get(url, follow_redirects=follow_redirects)
if raise_on_error and resp.status_code > 400:
raise Exception("http request failed with code {}".format(resp.status_code))
return resp.data.decode("utf-8")
class SupersetTestCase(TestCase):
default_schema_backend_map = {
"sqlite": "main",
"mysql": "superset",
"postgresql": "public",
"presto": "default",
"hive": "default",
}
maxDiff = -1
def create_app(self):
return app
@staticmethod
def create_user_with_roles(username: str, roles: List[str]):
user_to_create = security_manager.find_user(username)
if not user_to_create:
security_manager.add_user(
username,
username,
username,
f"{username}@superset.com",
security_manager.find_role("Gamma"), # it needs a role
password="general",
)
db.session.commit()
user_to_create = security_manager.find_user(username)
assert user_to_create
user_to_create.roles = [security_manager.find_role(r) for r in roles]
db.session.commit()
return user_to_create
@staticmethod
def create_user(
username: str,
password: str,
role_name: str,
first_name: str = "admin",
last_name: str = "user",
email: str = "admin@fab.org",
) -> Union[ab_models.User, bool]:
role_admin = security_manager.find_role(role_name)
return security_manager.add_user(
username, first_name, last_name, email, role_admin, password
)
@staticmethod
def get_user(username: str) -> ab_models.User:
user = (
db.session.query(security_manager.user_model)
.filter_by(username=username)
.one_or_none()
)
return user
@classmethod
def create_druid_test_objects(cls):
# create druid cluster and druid datasources
with app.app_context():
session = db.session
cluster = (
session.query(DruidCluster).filter_by(cluster_name="druid_test").first()
)
if not cluster:
cluster = DruidCluster(cluster_name="druid_test")
session.add(cluster)
session.commit()
druid_datasource1 = DruidDatasource(
datasource_name="druid_ds_1", cluster=cluster
)
session.add(druid_datasource1)
druid_datasource2 = DruidDatasource(
datasource_name="druid_ds_2", cluster=cluster
)
session.add(druid_datasource2)
session.commit()
@staticmethod
def get_table_by_id(table_id: int) -> SqlaTable:
return db.session.query(SqlaTable).filter_by(id=table_id).one()
@staticmethod
def is_module_installed(module_name):
try:
imp.find_module(module_name)
return True
except ImportError:
return False
def get_or_create(self, cls, criteria, session, **kwargs):
obj = session.query(cls).filter_by(**criteria).first()
if not obj:
obj = cls(**criteria)
obj.__dict__.update(**kwargs)
session.add(obj)
session.commit()
return obj
def login(self, username="admin", password="general"):
return login(self.client, username, password)
def get_slice(
self, slice_name: str, session: Session, expunge_from_session: bool = True
) -> Slice:
slc = session.query(Slice).filter_by(slice_name=slice_name).one()
if expunge_from_session:
session.expunge_all()
return slc
@staticmethod
def get_table_by_name(name: str) -> SqlaTable:
return db.session.query(SqlaTable).filter_by(table_name=name).one()
@staticmethod
def get_database_by_id(db_id: int) -> Database:
return db.session.query(Database).filter_by(id=db_id).one()
@staticmethod
def get_druid_ds_by_name(name: str) -> DruidDatasource:
return db.session.query(DruidDatasource).filter_by(datasource_name=name).first()
@staticmethod
def get_datasource_mock() -> BaseDatasource:
datasource = Mock()
results = Mock()
results.query = Mock()
results.status = Mock()
results.error_message = None
results.df = pd.DataFrame()
datasource.type = "table"
datasource.query = Mock(return_value=results)
mock_dttm_col = Mock()
datasource.get_col = Mock(return_value=mock_dttm_col)
datasource.query = Mock(return_value=results)
datasource.database = Mock()
datasource.database.db_engine_spec = Mock()
datasource.database.db_engine_spec.mutate_expression_label = lambda x: x
return datasource
def get_resp(
self, url, data=None, follow_redirects=True, raise_on_error=True, json_=None
):
return get_resp(self.client, url, data, follow_redirects, raise_on_error, json_)
def get_json_resp(
self, url, data=None, follow_redirects=True, raise_on_error=True, json_=None
):
"""Shortcut to get the parsed results while following redirects"""
resp = self.get_resp(url, data, follow_redirects, raise_on_error, json_)
return json.loads(resp)
def get_access_requests(self, username, ds_type, ds_id):
DAR = DatasourceAccessRequest
return (
db.session.query(DAR)
.filter(
DAR.created_by == security_manager.find_user(username=username),
DAR.datasource_type == ds_type,
DAR.datasource_id == ds_id,
)
.first()
)
def logout(self):
self.client.get("/logout/", follow_redirects=True)
def grant_public_access_to_table(self, table):
public_role = security_manager.find_role("Public")
perms = db.session.query(ab_models.PermissionView).all()
for perm in perms:
if (
perm.permission.name == "datasource_access"
and perm.view_menu
and table.perm in perm.view_menu.name
):
security_manager.add_permission_role(public_role, perm)
def revoke_public_access_to_table(self, table):
public_role = security_manager.find_role("Public")
perms = db.session.query(ab_models.PermissionView).all()
for perm in perms:
if (
perm.permission.name == "datasource_access"
and perm.view_menu
and table.perm in perm.view_menu.name
):
security_manager.del_permission_role(public_role, perm)
def _get_database_by_name(self, database_name="main"):
if database_name == "examples":
return get_example_database()
else:
raise ValueError("Database doesn't exist")
def run_sql(
self,
sql,
client_id=None,
user_name=None,
raise_on_error=False,
query_limit=None,
database_name="examples",
sql_editor_id=None,
select_as_cta=False,
tmp_table_name=None,
schema=None,
ctas_method=CtasMethod.TABLE,
):
if user_name:
self.logout()
self.login(username=(user_name or "admin"))
dbid = self._get_database_by_name(database_name).id
json_payload = {
"database_id": dbid,
"sql": sql,
"client_id": client_id,
"queryLimit": query_limit,
"sql_editor_id": sql_editor_id,
"ctas_method": ctas_method,
}
if tmp_table_name:
json_payload["tmp_table_name"] = tmp_table_name
if select_as_cta:
json_payload["select_as_cta"] = select_as_cta
if schema:
json_payload["schema"] = schema
resp = self.get_json_resp(
"/superset/sql_json/", raise_on_error=False, json_=json_payload
)
if raise_on_error and "error" in resp:
raise Exception("run_sql failed")
return resp
def create_fake_db(self):
self.login(username="admin")
database_name = FAKE_DB_NAME
db_id = 100
extra = """{
"schemas_allowed_for_csv_upload":
["this_schema_is_allowed", "this_schema_is_allowed_too"]
}"""
return self.get_or_create(
cls=models.Database,
criteria={"database_name": database_name},
session=db.session,
sqlalchemy_uri="sqlite:///:memory:",
id=db_id,
extra=extra,
)
def delete_fake_db(self):
database = (
db.session.query(Database)
.filter(Database.database_name == FAKE_DB_NAME)
.scalar()
)
if database:
db.session.delete(database)
def create_fake_db_for_macros(self):
self.login(username="admin")
database_name = "db_for_macros_testing"
db_id = 200
return self.get_or_create(
cls=models.Database,
criteria={"database_name": database_name},
session=db.session,
sqlalchemy_uri="db_for_macros_testing://user@host:8080/hive",
id=db_id,
)
def delete_fake_db_for_macros(self):
database = (
db.session.query(Database)
.filter(Database.database_name == "db_for_macros_testing")
.scalar()
)
if database:
db.session.delete(database)
db.session.commit()
def validate_sql(
self,
sql,
client_id=None,
user_name=None,
raise_on_error=False,
database_name="examples",
):
if user_name:
self.logout()
self.login(username=(user_name if user_name else "admin"))
dbid = self._get_database_by_name(database_name).id
resp = self.get_json_resp(
"/superset/validate_sql_json/",
raise_on_error=False,
data=dict(database_id=dbid, sql=sql, client_id=client_id),
)
if raise_on_error and "error" in resp:
raise Exception("validate_sql failed")
return resp
def get_dash_by_slug(self, dash_slug):
sesh = db.session()
return sesh.query(Dashboard).filter_by(slug=dash_slug).first()
def get_assert_metric(self, uri: str, func_name: str) -> Response:
"""
Simple client get with an extra assertion for statsd metrics
:param uri: The URI to use for the HTTP GET
:param func_name: The function name that the HTTP GET triggers
for the statsd metric assertion
:return: HTTP Response
"""
with patch.object(
BaseSupersetModelRestApi, "incr_stats", return_value=None
) as mock_method:
rv = self.client.get(uri)
if 200 <= rv.status_code < 400:
mock_method.assert_called_once_with("success", func_name)
else:
mock_method.assert_called_once_with("error", func_name)
return rv
def delete_assert_metric(self, uri: str, func_name: str) -> Response:
"""
Simple client delete with an extra assertion for statsd metrics
:param uri: The URI to use for the HTTP DELETE
:param func_name: The function name that the HTTP DELETE triggers
for the statsd metric assertion
:return: HTTP Response
"""
with patch.object(
BaseSupersetModelRestApi, "incr_stats", return_value=None
) as mock_method:
rv = self.client.delete(uri)
if 200 <= rv.status_code < 400:
mock_method.assert_called_once_with("success", func_name)
else:
mock_method.assert_called_once_with("error", func_name)
return rv
def post_assert_metric(
self, uri: str, data: Dict[str, Any], func_name: str
) -> Response:
"""
Simple client post with an extra assertion for statsd metrics
:param uri: The URI to use for the HTTP POST
:param data: The JSON data payload to be posted
:param func_name: The function name that the HTTP POST triggers
for the statsd metric assertion
:return: HTTP Response
"""
with patch.object(
BaseSupersetModelRestApi, "incr_stats", return_value=None
) as mock_method:
rv = self.client.post(uri, json=data)
if 200 <= rv.status_code < 400:
mock_method.assert_called_once_with("success", func_name)
else:
mock_method.assert_called_once_with("error", func_name)
return rv
def put_assert_metric(
self, uri: str, data: Dict[str, Any], func_name: str
) -> Response:
"""
Simple client put with an extra assertion for statsd metrics
:param uri: The URI to use for the HTTP PUT
:param data: The JSON data payload to be posted
:param func_name: The function name that the HTTP PUT triggers
for the statsd metric assertion
:return: HTTP Response
"""
with patch.object(
BaseSupersetModelRestApi, "incr_stats", return_value=None
) as mock_method:
rv = self.client.put(uri, json=data)
if 200 <= rv.status_code < 400:
mock_method.assert_called_once_with("success", func_name)
else:
mock_method.assert_called_once_with("error", func_name)
return rv
| 34.84086 | 88 | 0.628974 |
b149d6d12a73073e91f6da11bcdc4a6c6aed77bf | 901 | py | Python | rubicon_ml/ui/views/header.py | jeklein/rubicon-ml | a4a3c4d3504cb251597890dbfa8302b0bde06f30 | [
"Apache-2.0"
] | 42 | 2021-02-23T23:30:49.000Z | 2021-05-01T02:54:03.000Z | rubicon_ml/ui/views/header.py | jeklein/rubicon-ml | a4a3c4d3504cb251597890dbfa8302b0bde06f30 | [
"Apache-2.0"
] | 56 | 2021-05-13T13:47:50.000Z | 2022-03-24T13:46:49.000Z | rubicon_ml/ui/views/header.py | jeklein/rubicon-ml | a4a3c4d3504cb251597890dbfa8302b0bde06f30 | [
"Apache-2.0"
] | 9 | 2021-02-23T23:30:51.000Z | 2021-04-24T16:42:28.000Z | import dash_html_components as html
def make_header_layout():
"""The html layout for the dashboard's header view."""
return html.Div(
id="header",
className="header",
children=[
html.Div(id="title", className="header--project", children="rubicon-ml"),
html.Div(
id="links",
className="header--links",
children=[
html.A(
className="header--link",
href="https://capitalone.github.io/rubicon-ml",
children="Docs",
),
html.A(
className="header--link",
href="https://github.com/capitalone/rubicon-ml",
children="Github",
),
],
),
],
)
| 30.033333 | 85 | 0.419534 |
b3bd4f9a9e00e0b53614f2e01d2961576c1815dd | 3,175 | py | Python | modules/audio_processing/test/py_quality_assessment/quality_assessment/eval_scores_unittest.py | Aexyn/webrtc2 | daea5bf2deb843567a792f22ea2047a037e09d78 | [
"DOC",
"BSD-3-Clause"
] | 2 | 2018-01-16T13:29:45.000Z | 2018-08-10T09:15:23.000Z | modules/audio_processing/test/py_quality_assessment/quality_assessment/eval_scores_unittest.py | Aexyn/webrtc2 | daea5bf2deb843567a792f22ea2047a037e09d78 | [
"DOC",
"BSD-3-Clause"
] | null | null | null | modules/audio_processing/test/py_quality_assessment/quality_assessment/eval_scores_unittest.py | Aexyn/webrtc2 | daea5bf2deb843567a792f22ea2047a037e09d78 | [
"DOC",
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Unit tests for the eval_scores module.
"""
import os
import shutil
import tempfile
import unittest
import pydub
from . import data_access
from . import eval_scores
from . import eval_scores_factory
from . import signal_processing
class TestEvalScores(unittest.TestCase):
"""Unit tests for the eval_scores module.
"""
def setUp(self):
"""Create temporary output folder and two audio track files."""
self._output_path = tempfile.mkdtemp()
# Create fake reference and tested (i.e., APM output) audio track files.
silence = pydub.AudioSegment.silent(duration=1000, frame_rate=48000)
fake_reference_signal = (
signal_processing.SignalProcessingUtils.GenerateWhiteNoise(silence))
fake_tested_signal = (
signal_processing.SignalProcessingUtils.GenerateWhiteNoise(silence))
# Save fake audio tracks.
self._fake_reference_signal_filepath = os.path.join(
self._output_path, 'fake_ref.wav')
signal_processing.SignalProcessingUtils.SaveWav(
self._fake_reference_signal_filepath, fake_reference_signal)
self._fake_tested_signal_filepath = os.path.join(
self._output_path, 'fake_test.wav')
signal_processing.SignalProcessingUtils.SaveWav(
self._fake_tested_signal_filepath, fake_tested_signal)
def tearDown(self):
"""Recursively delete temporary folder."""
shutil.rmtree(self._output_path)
def testRegisteredClasses(self):
# Preliminary check.
self.assertTrue(os.path.exists(self._output_path))
# Check that there is at least one registered evaluation score worker.
registered_classes = eval_scores.EvaluationScore.REGISTERED_CLASSES
self.assertIsInstance(registered_classes, dict)
self.assertGreater(len(registered_classes), 0)
# Instance evaluation score workers factory with fake dependencies.
eval_score_workers_factory = (
eval_scores_factory.EvaluationScoreWorkerFactory(
score_filename_prefix='scores-',
polqa_tool_bin_path=os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'fake_polqa')))
# Try each registered evaluation score worker.
for eval_score_name in registered_classes:
# Instance evaluation score worker.
eval_score_worker = eval_score_workers_factory.GetInstance(
registered_classes[eval_score_name])
# Set reference and test, then run.
eval_score_worker.SetReferenceSignalFilepath(
self._fake_reference_signal_filepath)
eval_score_worker.SetTestedSignalFilepath(
self._fake_tested_signal_filepath)
eval_score_worker.Run(self._output_path)
# Check output.
score = data_access.ScoreFile.Load(eval_score_worker.output_filepath)
self.assertTrue(isinstance(score, float))
| 36.918605 | 76 | 0.750866 |
b82e7b57bb96d7c75d7ad4753a65bde3af6fbc42 | 11,689 | py | Python | vcpp-data-analysis/vcpp-full-data.py | fcmeyer/meyer-psychcode | 5024d790b7af5b6fb21ea3f70decbe488e5e3161 | [
"MIT"
] | null | null | null | vcpp-data-analysis/vcpp-full-data.py | fcmeyer/meyer-psychcode | 5024d790b7af5b6fb21ea3f70decbe488e5e3161 | [
"MIT"
] | null | null | null | vcpp-data-analysis/vcpp-full-data.py | fcmeyer/meyer-psychcode | 5024d790b7af5b6fb21ea3f70decbe488e5e3161 | [
"MIT"
] | null | null | null | #####################################################################
## CPP Trial Data Parser #
## #
## Written by Francisco Meyer (fcmeyer@uchicago.edu) #
## Last edited: Friday, August 28 2015 #
#####################################################################
# Typical dependencies (leaving some unnecessary stuff here... sorry)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.cm as cm
import pylab
import sys
from os import listdir, rename, makedirs
from os.path import isfile, join, curdir, exists
from operator import itemgetter
import csv, datetime, time, sys, itertools
import random
from types import *
# Print headers?
HEADERS_ON = True
GRAPHS_ENABLED = True
# Output file names
LOC_OUT = 'CPP-LOC-OUT.csv'
RWD_OUT = 'CPP-RWD-OUT.csv'
BLN_OUT = 'CPP-BLN-OUT.csv'
# Axis Code Globals
X = 0
Y = 1
Z = 2
# Room Type Code Globals
NA = -1
L_ROOM = 0
R_ROOM = 1
NEUTRAL = 2
# Filename Format Globals
ID = 0
FILETYPE = 1
TASKNAME = 2
TRIAL_NO = 3
# Globals for making grpahs
XLOC = 0
YLOC = 1
ZLOC = 2
FIGWIDTH = 12
FIGHEIGHT = 8
BLUE = '#0000ff'
PURPLE = '#9933ff'
RED = '#ff0000'
####################
# Script Functions #
####################
def sort_files_in_target(path_to_files):
'''
Reads all of the compatible data files in for a given path.
Classifies files based on their role and delivers them to the
appropriate data structure.
'''
# Generate list of .txt files generated by GMod in the given path
files_in_dir = [f for f in listdir(path_to_files) if
((isfile(join(path_to_files, f))) and
(f[-3:] == "txt") and
(join(path_to_files, f)[-9:-7] == "__"))]
# Stop if there are no usable data files
if len(files_in_dir) < 1:
print 'There are no usable data files in the specified directory.'
sys.exit(2)
# Sort them into three batches (ignore aim files)
loc_files = []
rwd_files = []
bln_files = []
for f_name in files_in_dir:
file_type = f_name.split(".")[0][-3:]
if file_type == 'loc':
loc_files.append(f_name)
elif file_type == 'rwd':
rwd_files.append(f_name)
elif file_type == 'bln':
bln_files.append(f_name)
else:
pass
return loc_files, rwd_files, bln_files
def parse_loc_files(loc_files, path_to_files):
'''
Parse all of the location files for all available trials. Output
data table with time spent & first room entered information.
'''
graph_path = join(curdir, 'graphs')
if not exists(graph_path): makedirs(graph_path)
rv_table = []
rv_header = ['SubjectID', 'TaskID', 'TrialNo', 'TimeRedRoom',
'TimeBlueRoom', 'TimeNeutral', 'FirstRm']
if HEADERS_ON:
rv_table.append(rv_header)
for f_name in loc_files:
# Read file, generate string with all vectors
v_string = file_to_string(join(path_to_files, f_name))
# Parse into vector list
vector_list = read_vector_string(v_string)
# Calcualte time spent and first room
time_spent = roam_time_spent(vector_list)
first_room = roam_first_room(vector_list)
# Interpret filename to get trial no, subject id, etc.
log_info = interpret_filename(f_name)
# Write this up
row = [log_info[ID], log_info[TASKNAME], log_info[TRIAL_NO],
time_spent[L_ROOM], time_spent[R_ROOM], time_spent[NEUTRAL],
first_room]
rv_table.append(row)
# Make graphs (if enabled)
if GRAPHS_ENABLED:
f_name_grph = f_name[:-3] + 'png'
path_to_new_file = join(graph_path, f_name_grph)
x = vector_list[X]
y = vector_list[Y]
ta = str(time_spent[L_ROOM])
tb = str(time_spent[R_ROOM])
tn = str(time_spent[NEUTRAL])
make_loc_graph(x, y, ta, tb, tn, path_to_new_file)
return rv_table
def parse_rwd_files(rwd_files, path_to_files):
'''
__rwd files contain the variable interval reward schedule for a particular
trial.
- The first item is of the format MAPNAME-N_REWARDS-AVG_INTERVAL
- The following items are float numbers indicating the times at which
rewards were given.
Example:
cpp_cd_a-30-15, 1.2329909057364, 2.3492390710167, 2.727413955858,
'''
rv_table = []
rv_header = ['SubjectID', 'TaskID', 'TrialNo', 'Room', 'AvgInterval',
'NumRwdsDetermined', 'NumRwdsActual', 'Schedule']
if HEADERS_ON:
rv_table.append(rv_header)
for f_name in rwd_files:
# Read file, generate string with all vectors
rwd_string = file_to_string(join(path_to_files, f_name))
# Parse out each part
rwd_data = rwd_string.split(", ")
# Strip out the metadata.
metadata = rwd_data[0]
schedule = rwd_data[1:]
# Parse metadata.
meta = metadata.split("-")
avg_interval = meta[2]
n_rwds_determined = meta[1]
n_rwds_actual = len(schedule) - 1
if meta[0][-1] == 'a':
rm = 'red'
elif meta[0][-1] == 'b':
rm = 'blu'
else:
rm = 'NA'
# Interpret filename to get trial no, subject id, etc.
log_info = interpret_filename(f_name)
# Write this up
row = [log_info[ID], log_info[TASKNAME], log_info[TRIAL_NO],
rm, avg_interval, n_rwds_determined, n_rwds_actual] + schedule
rv_table.append(row)
return rv_table
def parse_bln_files(bln_files, path_to_files):
'''
__bln files indicate the SYSTIMES at which a balloon is popped, and its
coordinates.
- The first item is the initial systime
- The following items contain the systime and coordinates, separated by |
Example:
1429077925, 1429077934|166.000000 52.000000 153.000000,
'''
rv_table = []
rv_header = ['SubjectID', 'TaskID', 'TrialNo', 'NumPops', 'PopTimes']
if HEADERS_ON:
rv_table.append(rv_header)
for f_name in bln_files:
bln_string = file_to_string(join(path_to_files, f_name))
bln_data = bln_string.split(", ")
elements = bln_data[0].split("|")
t_0 = int(elements[0])
# Processed balloon data.
times = []
for pop_data in bln_data[1:]:
elements = pop_data.split("|")
if elements[0] != '':
times.append(int(elements[0]) - t_0)
num_pops = len(times)
# Interpret filename to get trial no, subject id, etc.
log_info = interpret_filename(f_name)
# Write this up
row = [log_info[ID], log_info[TASKNAME], log_info[TRIAL_NO],
num_pops] + times
rv_table.append(row)
return rv_table
def main(path_to_files):
loc_files, rwd_files, bln_files = sort_files_in_target(path_to_files)
loc_table = parse_loc_files(loc_files, path_to_files)
rwd_table = parse_rwd_files(rwd_files, path_to_files)
bln_table = parse_bln_files(bln_files, path_to_files)
write_table_to_file(LOC_OUT, loc_table)
write_table_to_file(RWD_OUT, rwd_table)
write_table_to_file(BLN_OUT, bln_table)
######################
# Analysis Functions #
######################
def roam_first_room(vector_list):
'''
Determine which was the first room that the subject entered in this trial.
If first room is indeterminate (no room was entered), will return NA.
NOTE: vector_list object needs to be like the one from read_vector_string
'''
t = 0
# Make sure no weird lists are there!
assert(len(vector_list) > 0)
assert(len(vector_list[Y]) > 0)
rv = NA
while t < len(vector_list[Y]):
if vector_list[Y][0] >= 0:
rv = NA
break
elif vector_list[Y][t] > 0:
if vector_list[X][t] < 0:
rv = L_ROOM
break
if vector_list[X][t] > 0:
rv = R_ROOM
break
t += 1
return rv
def roam_time_spent(vector_list):
'''
Count the amount of time spent in each room, for a roam task.
Returns a list w/ count of form [L_ROOM, R_ROOM, NEUTRAL]
NOTE: vector_list object needs to be like the one from read_vector_string
'''
zipped = zip(vector_list[X], vector_list[Y])
time_neutral = 0
time_l_room = 0
time_r_room = 0
count = 0
for each_tuple in zipped:
x_loc, y_loc = each_tuple
if y_loc <= 0:
time_neutral = time_neutral + 1
elif x_loc < 0:
time_l_room = time_l_room + 1
elif x_loc > 0:
time_r_room = time_r_room + 1
else: # If at the exact threshold, decide randomly
if random.choice([True, False]):
time_l_room = time_l_room + 1
else:
time_r_room = time_r_room + 1
return [float(time_l_room), float(time_r_room), float(time_neutral)]
def make_loc_graph(x, y, ta, tb, tn, filename):
'''
Make a graph for the loc file provided.
'''
# Create the plot using the data we just prepped
plt.figure()
plt.xlim((-350,350))
plt.ylim((-100,350))
plt.scatter(x, y, c='r', alpha=0.5)
plt.plot(x,y, c=PURPLE, alpha = 0.5)
# Do some formatting/make it pretty
plt.axhline(0, color=RED)
plt.axvline(0, color=BLUE)
plt.text(-300,300,'A ' + ta + 's', fontsize = 20)
plt.text(280,300,'B ' + tb + 's', fontsize = 20)
plt.text(-100,-50, 'Netural ' + tn + 's', fontsize = 20)
plt.ylabel('y coordinates')
plt.xlabel('x coordinates')
plt.title("Subject's movement in conditioning VE")
plt.gcf().set_size_inches(FIGWIDTH, FIGHEIGHT)
filename = filename[:-3] + 'png'
# Generate output
plt.savefig(filename)
plt.close()
####################
# Helper Functions #
####################
def interpret_filename(fname_str):
'''
Extract subject ID, task type, trial no and other information from filename.
'''
subject_name = fname_str.split("_")[0]
file_type = fname_str.split(".")[0][-3:]
task_name = fname_str.split(".")[0][:-5].split("_")[-1]
trial_no = int ( fname_str.split("_")[1][1] )
return [subject_name, file_type, task_name, trial_no]
def file_to_string(path_to_file):
'''
Feed it a filepath, will return a string of all the file's contents.
'''
f = open(path_to_file, 'rU')
s = ''
for line in f:
s = s + line
f.close()
return s
def read_vector_string(v_string):
'''
Parse a vector string of the format used for this app. Return a list of
lists containing position coordinates for X, Y and Z: [[X], [Y], [Z]]
'''
x = []
y = []
z = []
s = v_string[:-2] # Trim the last space & comma.
c = s.split(", ")
for coords in c:
d = coords.split(" ")
x.append(float(d[X]))
y.append(float(d[Y]))
z.append(float(d[Z]))
return [x, y, z]
def write_table_to_file(output_filename, table):
with open(output_filename, "wb") as csvfile:
csvwr = csv.writer(csvfile, delimiter=',')
for row in table:
csvwr.writerow(row)
########
# Main #
########
if __name__ == '__main__':
if (len(sys.argv) != 2):
print "Usage: python cpp-full.py <path_to_data_folder>"
print "Example: python cpp-full.py C:/Users/r.psy.dewitlab/Desktop/data"
sys.exit(2)
main(sys.argv[1]) | 29.667513 | 80 | 0.593977 |
600e8f3368a07fa38ce7ac9e5099ff913bc9a718 | 237 | py | Python | root/ilikeit/MySQLCrashCourse/dbcom/tests/test_config.py | ChyiYaqing/chyidlTutorial | 77e7f6f84f21537a58a8a8a42e31cf2e3dd31996 | [
"MIT"
] | 5 | 2018-10-17T05:57:39.000Z | 2021-07-05T15:38:24.000Z | root/ilikeit/MySQLCrashCourse/dbcom/tests/test_config.py | ChyiYaqing/chyidlTutorial | 77e7f6f84f21537a58a8a8a42e31cf2e3dd31996 | [
"MIT"
] | 2 | 2021-04-14T00:48:43.000Z | 2021-04-14T02:20:50.000Z | root/ilikeit/MySQLCrashCourse/dbcom/tests/test_config.py | ChyiYaqing/chyidlTutorial | 77e7f6f84f21537a58a8a8a42e31cf2e3dd31996 | [
"MIT"
] | 3 | 2019-03-02T14:36:19.000Z | 2022-03-18T10:12:09.000Z | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# MySQL & MariaDB
mysql_host='192.168.82.56'
mysql_port=3306
mysql_user='xxx'
mysql_passwd='xxx'
# MsSQL
mssql_host='192.168.188.20'
mssql_port=1433
mssql_user='xxx'
mssql_passwd='xxxxxx'
| 14.8125 | 27 | 0.721519 |
58e46a118d48bf2efdf4c14601534eb91d496b0a | 4,179 | py | Python | tests/warehouses/athena_fixture.py | udemy/soda-sql | 53dd2218770c74367787f1d85ee0d48d498b51eb | [
"Apache-2.0"
] | null | null | null | tests/warehouses/athena_fixture.py | udemy/soda-sql | 53dd2218770c74367787f1d85ee0d48d498b51eb | [
"Apache-2.0"
] | null | null | null | tests/warehouses/athena_fixture.py | udemy/soda-sql | 53dd2218770c74367787f1d85ee0d48d498b51eb | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Soda
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import random
import re
import string
from os import path
from typing import List
import boto3
from sodasql.scan.db import sql_updates
from tests.common.boto3_helper import Boto3Helper
from tests.common.warehouse_fixture import WarehouseFixture
class AthenaFixture(WarehouseFixture):
S3_URI_PATTERN = r"(^s3://)([^/]*)/(.*$)"
def __init__(self, target: str) -> None:
super().__init__(target)
self.suite_id = 'suite_' + (''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(5)))
def drop_database(self):
pass
super().drop_database()
self.delete_staging_files()
def create_database(self):
self.database = self.create_unique_database_name()
self.warehouse.dialect.database = self.database
sql_updates(self.warehouse.connection, [
f'CREATE DATABASE IF NOT EXISTS {self.database}'])
def sql_create_table(self, columns: List[str], table_name: str):
columns_sql = ", ".join(columns)
table_postfix = (''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(5)))
table_location = path.join(self.warehouse.dialect.athena_staging_dir, self.suite_id, table_name, table_postfix)
return f"CREATE EXTERNAL TABLE " \
f"{self.warehouse.dialect.qualify_writable_table_name(table_name)} ( \n " \
f"{columns_sql} ) \n " \
f"LOCATION '{table_location}';"
def tear_down(self):
pass
def delete_staging_files(self):
database_full_location = path.join(self.warehouse.dialect.athena_staging_dir, self.suite_id)
logging.debug(f"Deleting all files under %s...", database_full_location)
bucket = self._extract_s3_bucket(database_full_location)
folder = self._extract_s3_folder(database_full_location)
s3_client = self._create_s3_client()
AthenaFixture.delete_s3_files(s3_client, bucket, folder)
def _create_s3_client(self):
Boto3Helper.filter_false_positive_boto3_warning()
aws_credentials = self.warehouse.dialect.aws_credentials
aws_credentials = aws_credentials.resolve_role("soda_sql_test_cleanup")
return boto3.client(
's3',
region_name=aws_credentials.region_name,
aws_access_key_id=aws_credentials.access_key_id,
aws_secret_access_key=aws_credentials.secret_access_key,
aws_session_token=aws_credentials.session_token
)
@staticmethod
def delete_s3_files(s3_client, bucket, folder, max_objects=200):
response = s3_client.list_objects_v2(Bucket=bucket, Prefix=folder)
object_keys = AthenaFixture._extract_object_keys(response)
assert len(object_keys) < max_objects, \
f"This method is intended for tests and hence limited to a maximum of {max_objects} objects, " \
f"{len(object_keys)} objects exceeds the limit."
if object_keys:
s3_client.delete_objects(Bucket=bucket, Delete={'Objects': object_keys})
@staticmethod
def _extract_object_keys(response):
object_keys = []
if 'Contents' in response:
objects = response['Contents']
for summary in objects:
key = summary['Key']
object_keys.append({'Key': key})
return object_keys
@classmethod
def _extract_s3_folder(cls, uri):
return re.search(cls.S3_URI_PATTERN, uri).group(3)
@classmethod
def _extract_s3_bucket(cls, uri):
return re.search(cls.S3_URI_PATTERN, uri).group(2)
| 40.572816 | 119 | 0.692989 |
2d9a51d557a41528d1cbef170c56443d5bf7b0ba | 1,167 | py | Python | setup.py | pappacena/pysockmmsg | 28018cd9998e0aad9fe5fea601442d81109f903d | [
"Unlicense"
] | 1 | 2021-09-12T18:20:15.000Z | 2021-09-12T18:20:15.000Z | setup.py | pappacena/pysockmmsg | 28018cd9998e0aad9fe5fea601442d81109f903d | [
"Unlicense"
] | null | null | null | setup.py | pappacena/pysockmmsg | 28018cd9998e0aad9fe5fea601442d81109f903d | [
"Unlicense"
] | null | null | null | """Python setup.py for pysockmmsg package"""
import io
import os
from setuptools import find_packages, setup
def read(*paths, **kwargs):
"""Read the contents of a text file safely.
>>> read("pysockmmsg", "VERSION")
'0.1.0'
>>> read("README.md")
...
"""
content = ""
with io.open(
os.path.join(os.path.dirname(__file__), *paths),
encoding=kwargs.get("encoding", "utf8"),
) as open_file:
content = open_file.read().strip()
return content
def read_requirements(path):
return [
line.strip()
for line in read(path).split("\n")
if not line.startswith(('"', "#", "-", "git+"))
]
setup(
name="pysockmmsg",
version=read("pysockmmsg", "VERSION"),
description="Awesome pysockmmsg created by pappacena",
url="https://github.com/pappacena/pysockmmsg/",
long_description=read("README.md"),
long_description_content_type="text/markdown",
author="pappacena",
packages=find_packages(exclude=["tests", ".github"]),
install_requires=read_requirements("requirements.txt"),
extras_require={"test": read_requirements("requirements-test.txt")},
)
| 26.522727 | 72 | 0.635818 |
81214be47ffdfdde1b96c43cd6d4f3715c8add92 | 1,393 | py | Python | capture_image.py | 6abi/pi08 | fa49d256569ba359ecde0f82a8f03d01db6d6a65 | [
"MIT"
] | null | null | null | capture_image.py | 6abi/pi08 | fa49d256569ba359ecde0f82a8f03d01db6d6a65 | [
"MIT"
] | null | null | null | capture_image.py | 6abi/pi08 | fa49d256569ba359ecde0f82a8f03d01db6d6a65 | [
"MIT"
] | null | null | null | import cv2
def capture_image():
key = cv2. waitKey(1)
webcam = cv2.VideoCapture(0)
while True:
try:
check, frame = webcam.read()
print(check) #prints true as long as the webcam is running
print(frame) #prints matrix values of each framecd
cv2.imshow("Capturing", frame)
key = cv2.waitKey(1)
if key == ord('s'):
cv2.imwrite(filename='./data/captured_image.jpg', img=frame)
webcam.release()
cv2.waitKey(1650)
cv2.destroyAllWindows()
print("Resizing image to 416x416 scale...")
img_ = cv2.resize(frame,(416,416))
print("Resized...")
img_resized = cv2.imwrite(filename='./data/captured_image.jpg', img=img_)
print("Image saved!")
break
elif key == ord('q'):
print("Turning off camera.")
webcam.release()
print("Camera off.")
print("Program ended.")
cv2.destroyAllWindows()
break
except(KeyboardInterrupt):
print("Turning off camera.")
webcam.release()
print("Camera off.")
print("Program ended.")
cv2.destroyAllWindows()
break
| 34.825 | 89 | 0.487437 |
76c73f47fa14616b9aebc7dcb9af95f1d57decfd | 18,156 | py | Python | barchybrid/src/arc_hybrid.py | maxtrem/uuparser | 989fc73b3a3a48b619d3d1ccf524f6c03882792f | [
"Apache-2.0"
] | null | null | null | barchybrid/src/arc_hybrid.py | maxtrem/uuparser | 989fc73b3a3a48b619d3d1ccf524f6c03882792f | [
"Apache-2.0"
] | null | null | null | barchybrid/src/arc_hybrid.py | maxtrem/uuparser | 989fc73b3a3a48b619d3d1ccf524f6c03882792f | [
"Apache-2.0"
] | null | null | null | from utils import ParseForest, read_conll, write_conll
from operator import itemgetter
from itertools import chain
import utils, time, random
import numpy as np
from copy import deepcopy
from collections import defaultdict
import json
class ArcHybridLSTM:
def __init__(self, vocab, options):
# import here so we don't load Dynet if just running parser.py --help for example
from multilayer_perceptron import MLP
from feature_extractor import FeatureExtractor
import dynet as dy
global dy
global LEFT_ARC, RIGHT_ARC, SHIFT, SWAP
LEFT_ARC, RIGHT_ARC, SHIFT, SWAP = 0,1,2,3
self.model = dy.ParameterCollection()
self.trainer = dy.AdamTrainer(self.model, alpha=options.learning_rate)
self.activations = {'tanh': dy.tanh, 'sigmoid': dy.logistic, 'relu':
dy.rectify, 'tanh3': (lambda x:
dy.tanh(dy.cwise_multiply(dy.cwise_multiply(x, x), x)))}
self.activation = self.activations[options.activation]
self.oracle = options.oracle
self.headFlag = options.headFlag
self.rlMostFlag = options.rlMostFlag
self.rlFlag = options.rlFlag
self.k = options.k
#dimensions depending on extended features
self.nnvecs = (1 if self.headFlag else 0) + (2 if self.rlFlag or self.rlMostFlag else 0)
self.feature_extractor = FeatureExtractor(self.model, options, vocab, self.nnvecs)
self.irels = self.feature_extractor.irels
if options.no_bilstms > 0:
mlp_in_dims = options.lstm_output_size*2*self.nnvecs*(self.k+1)
else:
mlp_in_dims = self.feature_extractor.lstm_input_size*self.nnvecs*(self.k+1)
self.unlabeled_MLP = MLP(self.model, 'unlabeled', mlp_in_dims, options.mlp_hidden_dims,
options.mlp_hidden2_dims, 4, self.activation)
self.labeled_MLP = MLP(self.model, 'labeled' ,mlp_in_dims, options.mlp_hidden_dims,
options.mlp_hidden2_dims,2*len(self.irels)+2,self.activation)
def __evaluate(self, stack, buf, train):
"""
ret = [left arc,
right arc
shift]
RET[i] = (rel, transition, score1, score2) for shift, l_arc and r_arc
shift = 2 (==> rel=None) ; l_arc = 0; r_acr = 1
ret[i][j][2] ~= ret[i][j][3] except the latter is a dynet
expression used in the loss, the first is used in rest of training
"""
#feature rep
empty = self.feature_extractor.empty
topStack = [ stack.roots[-i-1].lstms if len(stack) > i else [empty] for i in range(self.k) ]
topBuffer = [ buf.roots[i].lstms if len(buf) > i else [empty] for i in range(1) ]
input = dy.concatenate(list(chain(*(topStack + topBuffer))))
output = self.unlabeled_MLP(input)
routput = self.labeled_MLP(input)
#scores, unlabeled scores
scrs, uscrs = routput.value(), output.value()
#transition conditions
left_arc_conditions = len(stack) > 0
right_arc_conditions = len(stack) > 1
shift_conditions = buf.roots[0].id != 0
swap_conditions = len(stack) > 0 and stack.roots[-1].id < buf.roots[0].id
if not train:
#(avoiding the multiple roots problem: disallow left-arc from root
#if stack has more than one element
left_arc_conditions = left_arc_conditions and not (buf.roots[0].id == 0 and len(stack) > 1)
uscrs0 = uscrs[0]
uscrs1 = uscrs[1]
uscrs2 = uscrs[2]
uscrs3 = uscrs[3]
if train:
output0 = output[0]
output1 = output[1]
output2 = output[2]
output3 = output[3]
ret = [ [ (rel, LEFT_ARC, scrs[2 + j * 2] + uscrs2, routput[2 + j * 2 ] + output2) for j, rel in enumerate(self.irels) ] if left_arc_conditions else [],
[ (rel, RIGHT_ARC, scrs[3 + j * 2] + uscrs3, routput[3 + j * 2 ] + output3) for j, rel in enumerate(self.irels) ] if right_arc_conditions else [],
[ (None, SHIFT, scrs[0] + uscrs0, routput[0] + output0) ] if shift_conditions else [] ,
[ (None, SWAP, scrs[1] + uscrs1, routput[1] + output1) ] if swap_conditions else [] ]
else:
s1,r1 = max(zip(scrs[2::2],self.irels))
s2,r2 = max(zip(scrs[3::2],self.irels))
s1 += uscrs2
s2 += uscrs3
ret = [ [ (r1, LEFT_ARC, s1) ] if left_arc_conditions else [],
[ (r2, RIGHT_ARC, s2) ] if right_arc_conditions else [],
[ (None, SHIFT, scrs[0] + uscrs0) ] if shift_conditions else [] ,
[ (None, SWAP, scrs[1] + uscrs1) ] if swap_conditions else [] ]
return ret
def Save(self, filename):
print('Saving model to ' + filename)
self.model.save(filename)
def Load(self, filename):
print('Loading model from ' + filename)
self.model.populate(filename)
def apply_transition(self,best,stack,buf,hoffset):
if best[1] == SHIFT:
stack.roots.append(buf.roots[0])
del buf.roots[0]
elif best[1] == SWAP:
child = stack.roots.pop()
buf.roots.insert(1,child)
elif best[1] == LEFT_ARC:
child = stack.roots.pop()
parent = buf.roots[0]
elif best[1] == RIGHT_ARC:
child = stack.roots.pop()
parent = stack.roots[-1]
if best[1] == LEFT_ARC or best[1] == RIGHT_ARC:
#attach
child.pred_parent_id = parent.id
child.pred_relation = best[0]
#update head representation
if self.rlMostFlag:
#deepest leftmost/rightmost descendant
parent.lstms[best[1] + hoffset] = child.lstms[best[1] + hoffset]
if self.rlFlag:
#leftmost/rightmost child
parent.lstms[best[1] + hoffset] = child.vec
def calculate_cost(self,scores,s0,s1,b,beta,stack_ids):
if len(scores[LEFT_ARC]) == 0:
left_cost = 1
else:
left_cost = len(s0[0].rdeps) + int(s0[0].parent_id != b[0].id and s0[0].id in s0[0].parent_entry.rdeps)
if len(scores[RIGHT_ARC]) == 0:
right_cost = 1
else:
right_cost = len(s0[0].rdeps) + int(s0[0].parent_id != s1[0].id and s0[0].id in s0[0].parent_entry.rdeps)
if len(scores[SHIFT]) == 0:
shift_cost = 1
shift_case = 0
elif len([item for item in beta if item.projective_order < b[0].projective_order and item.id > b[0].id ])> 0:
shift_cost = 0
shift_case = 1
else:
shift_cost = len([d for d in b[0].rdeps if d in stack_ids]) + int(len(s0)>0 and b[0].parent_id in stack_ids[:-1] and b[0].id in b[0].parent_entry.rdeps)
shift_case = 2
if len(scores[SWAP]) == 0 :
swap_cost = 1
elif s0[0].projective_order > b[0].projective_order:
swap_cost = 0
#disable all the others
left_cost = right_cost = shift_cost = 1
else:
swap_cost = 1
costs = (left_cost, right_cost, shift_cost, swap_cost,1)
return costs,shift_case
def oracle_updates(self,best,b,s0,stack_ids,shift_case):
if best[1] == SHIFT:
if shift_case ==2:
if b[0].parent_entry.id in stack_ids[:-1] and b[0].id in b[0].parent_entry.rdeps:
b[0].parent_entry.rdeps.remove(b[0].id)
blocked_deps = [d for d in b[0].rdeps if d in stack_ids]
for d in blocked_deps:
b[0].rdeps.remove(d)
elif best[1] == LEFT_ARC or best[1] == RIGHT_ARC:
s0[0].rdeps = []
if s0[0].id in s0[0].parent_entry.rdeps:
s0[0].parent_entry.rdeps.remove(s0[0].id)
def Predict(self, treebanks, datasplit, options):
reached_max_swap = 0
char_map = {}
if options.char_map_file:
char_map_fh = open(options.char_map_file,encoding='utf-8')
char_map = json.loads(char_map_fh.read())
# should probably use a namedtuple in get_vocab to make this prettier
_, test_words, test_chars, _, _, _, test_treebanks, test_langs = utils.get_vocab(treebanks,datasplit,char_map)
# get external embeddings for the set of words and chars in the
# test vocab but not in the training vocab
test_embeddings = defaultdict(lambda: {})
if options.word_emb_size > 0 and options.ext_word_emb_file:
new_test_words = \
set(test_words) - self.feature_extractor.words.keys()
print("Number of OOV word types at test time: %i (out of %i)" %
(len(new_test_words), len(test_words)))
if len(new_test_words) > 0:
# no point loading embeddings if there are no words to look for
for lang in test_langs:
embeddings = utils.get_external_embeddings(
options,
emb_file=options.ext_word_emb_file,
lang=lang,
words=new_test_words
)
test_embeddings["words"].update(embeddings)
if len(test_langs) > 1 and test_embeddings["words"]:
print("External embeddings found for %i words "\
"(out of %i)" % \
(len(test_embeddings["words"]), len(new_test_words)))
if options.char_emb_size > 0:
new_test_chars = \
set(test_chars) - self.feature_extractor.chars.keys()
print("Number of OOV char types at test time: %i (out of %i)" %
(len(new_test_chars), len(test_chars)))
if len(new_test_chars) > 0:
for lang in test_langs:
embeddings = utils.get_external_embeddings(
options,
emb_file=options.ext_char_emb_file,
lang=lang,
words=new_test_chars,
chars=True
)
test_embeddings["chars"].update(embeddings)
if len(test_langs) > 1 and test_embeddings["chars"]:
print("External embeddings found for %i chars "\
"(out of %i)" % \
(len(test_embeddings["chars"]), len(new_test_chars)))
data = utils.read_conll_dir(treebanks,datasplit,char_map=char_map)
for iSentence, osentence in enumerate(data,1):
sentence = deepcopy(osentence)
reached_swap_for_i_sentence = False
max_swap = 2*len(sentence)
iSwap = 0
self.feature_extractor.Init(options)
conll_sentence = [entry for entry in sentence if isinstance(entry, utils.ConllEntry)]
conll_sentence = conll_sentence[1:] + [conll_sentence[0]]
self.feature_extractor.getWordEmbeddings(conll_sentence, False, options, test_embeddings)
stack = ParseForest([])
buf = ParseForest(conll_sentence)
hoffset = 1 if self.headFlag else 0
for root in conll_sentence:
root.lstms = [root.vec] if self.headFlag else []
root.lstms += [root.vec for _ in range(self.nnvecs - hoffset)]
root.relation = root.relation if root.relation in self.irels else 'runk'
while not (len(buf) == 1 and len(stack) == 0):
scores = self.__evaluate(stack, buf, False)
best = max(chain(*(scores if iSwap < max_swap else scores[:3] )), key = itemgetter(2) )
if iSwap == max_swap and not reached_swap_for_i_sentence:
reached_max_swap += 1
reached_swap_for_i_sentence = True
print("reached max swap in %d out of %d sentences"%(reached_max_swap, iSentence))
self.apply_transition(best,stack,buf,hoffset)
if best[1] == SWAP:
iSwap += 1
dy.renew_cg()
#keep in memory the information we need, not all the vectors
oconll_sentence = [entry for entry in osentence if isinstance(entry, utils.ConllEntry)]
oconll_sentence = oconll_sentence[1:] + [oconll_sentence[0]]
for tok_o, tok in zip(oconll_sentence, conll_sentence):
tok_o.pred_relation = tok.pred_relation
tok_o.pred_parent_id = tok.pred_parent_id
yield osentence
def Train(self, trainData, options):
mloss = 0.0
eloss = 0.0
eerrors = 0
lerrors = 0
etotal = 0
ninf = -float('inf')
beg = time.time()
start = time.time()
random.shuffle(trainData) # in certain cases the data will already have been shuffled after being read from file or while creating dev data
print("Length of training data: ", len(trainData))
errs = []
self.feature_extractor.Init(options)
for iSentence, sentence in enumerate(trainData,1):
if iSentence % 100 == 0:
loss_message = 'Processing sentence number: %d'%iSentence + \
' Loss: %.3f'%(eloss / etotal)+ \
' Errors: %.3f'%((float(eerrors)) / etotal)+\
' Labeled Errors: %.3f'%(float(lerrors) / etotal)+\
' Time: %.2gs'%(time.time()-start)
print(loss_message)
start = time.time()
eerrors = 0
eloss = 0.0
etotal = 0
lerrors = 0
sentence = deepcopy(sentence) # ensures we are working with a clean copy of sentence and allows memory to be recycled each time round the loop
conll_sentence = [entry for entry in sentence if isinstance(entry, utils.ConllEntry)]
conll_sentence = conll_sentence[1:] + [conll_sentence[0]]
self.feature_extractor.getWordEmbeddings(conll_sentence, True, options)
stack = ParseForest([])
buf = ParseForest(conll_sentence)
hoffset = 1 if self.headFlag else 0
for root in conll_sentence:
root.lstms = [root.vec] if self.headFlag else []
root.lstms += [root.vec for _ in range(self.nnvecs - hoffset)]
root.relation = root.relation if root.relation in self.irels else 'runk'
while not (len(buf) == 1 and len(stack) == 0):
scores = self.__evaluate(stack, buf, True)
#to ensure that we have at least one wrong operation
scores.append([(None, 4, ninf ,None)])
stack_ids = [sitem.id for sitem in stack.roots]
s1 = [stack.roots[-2]] if len(stack) > 1 else []
s0 = [stack.roots[-1]] if len(stack) > 0 else []
b = [buf.roots[0]] if len(buf) > 0 else []
beta = buf.roots[1:] if len(buf) > 1 else []
costs, shift_case = self.calculate_cost(scores,s0,s1,b,beta,stack_ids)
bestValid = list(( s for s in chain(*scores) if costs[s[1]] == 0 and ( s[1] == SHIFT or s[1] == SWAP or s[0] == s0[0].relation ) ))
bestValid = max(bestValid, key=itemgetter(2))
bestWrong = max(( s for s in chain(*scores) if costs[s[1]] != 0 or ( s[1] != SHIFT and s[1] != SWAP and s[0] != s0[0].relation ) ), key=itemgetter(2))
#force swap
if costs[SWAP]== 0:
best = bestValid
else:
#select a transition to follow
# + aggresive exploration
#1: might want to experiment with that parameter
if bestWrong[1] == SWAP:
best = bestValid
else:
best = bestValid if ( (not self.oracle) or (bestValid[2] - bestWrong[2] > 1.0) or (bestValid[2] > bestWrong[2] and random.random() > 0.1) ) else bestWrong
if best[1] == LEFT_ARC or best[1] ==RIGHT_ARC:
child = s0[0]
#updates for the dynamic oracle
if self.oracle:
self.oracle_updates(best,b,s0,stack_ids,shift_case)
self.apply_transition(best,stack,buf,hoffset)
if bestValid[2] < bestWrong[2] + 1.0:
loss = bestWrong[3] - bestValid[3]
mloss += 1.0 + bestWrong[2] - bestValid[2]
eloss += 1.0 + bestWrong[2] - bestValid[2]
errs.append(loss)
#labeled errors
if best[1] == LEFT_ARC or best[1] ==RIGHT_ARC:
if (child.pred_parent_id != child.parent_id or child.pred_relation != child.relation):
lerrors += 1
#attachment error
if child.pred_parent_id != child.parent_id:
eerrors += 1
#??? when did this happen and why?
if best[1] == 0 or best[1] == 2:
etotal += 1
#footnote 8 in Eli's original paper
if len(errs) > 50: # or True:
eerrs = dy.esum(errs)
scalar_loss = eerrs.scalar_value() #forward
eerrs.backward()
self.trainer.update()
errs = []
lerrs = []
dy.renew_cg()
self.feature_extractor.Init(options)
if len(errs) > 0:
eerrs = (dy.esum(errs))
eerrs.scalar_value()
eerrs.backward()
self.trainer.update()
errs = []
lerrs = []
dy.renew_cg()
self.trainer.update()
print("Loss: ", mloss/iSentence)
print("Total Training Time: %.2gs" % (time.time()-beg))
| 41.546911 | 178 | 0.548909 |
f01e8ed041b397b41d4308164bae37880b106177 | 2,310 | py | Python | examples/map_reduce_cos_bucket.py | spitfiredd/pywren-ibm-cloud | 3bc0818ffc510b32770d695b16755c8980feaefd | [
"Apache-2.0"
] | null | null | null | examples/map_reduce_cos_bucket.py | spitfiredd/pywren-ibm-cloud | 3bc0818ffc510b32770d695b16755c8980feaefd | [
"Apache-2.0"
] | null | null | null | examples/map_reduce_cos_bucket.py | spitfiredd/pywren-ibm-cloud | 3bc0818ffc510b32770d695b16755c8980feaefd | [
"Apache-2.0"
] | null | null | null | """
Simple PyWren example using the map_reduce method which
runs a wordcount over all the objects inside the 'bucketname'
COS bucket.
This example processes some objects from COS. Be sure you have
a bucket with some data objects in your COS account. Then change
the value of the 'bucketname' variable to point to your bucket.
As in this case you are processing objects from COS, the
map_reduce() method will first launch a partitioner to split
the objects in smaller chunks, thus increasing the parallelism
of the execution and reducing the total time needed to process
the data. After creating the partitions, it will launch one
map function for each partition. To finish one reducer will be
launched for all the objects in the Bucket. So In this case you
will get just one result from the reduce method.
Note that when you want to process objects stored in COS by
using a 'bucketname', the 'bucket', 'key' and 'data_stream'
parameters are mandatory in the parameters of the map function.
In the reduce function there will be always one parameter
from where you can access to the partial results.
"""
import pywren_ibm_cloud as pywren
bucketname = 'pw-sample-data'
def my_map_function(bucket, key, data_stream):
print('I am processing the object {}/{}'.format(bucket, key))
counter = {}
data = data_stream.read()
for line in data.splitlines():
for word in line.decode('utf-8').split():
if word not in counter:
counter[word] = 1
else:
counter[word] += 1
return counter
def my_reduce_function(results):
final_result = {}
for count in results:
for word in count:
if word not in final_result:
final_result[word] = count[word]
else:
final_result[word] += count[word]
return final_result
chunk_size = 4*1024**2 # 4MB
pw = pywren.ibm_cf_executor()
pw.map_reduce(my_map_function, bucketname, my_reduce_function, chunk_size=chunk_size)
print(pw.get_result())
"""
One reducer for each object in the bucket
"""
print()
print('Testing one reducer per object:')
pw = pywren.ibm_cf_executor()
pw.map_reduce(my_map_function, bucketname, my_reduce_function, chunk_size=chunk_size,
reducer_one_per_object=True)
print(pw.get_result())
| 31.216216 | 85 | 0.717749 |
82adad1bc50ed129e2d64425090595b8328d4c01 | 5,108 | py | Python | GetSylly.py | nicholascjones/get-sylly | b4410f5edcdad4ea7097b05075d94b61502df269 | [
"MIT"
] | null | null | null | GetSylly.py | nicholascjones/get-sylly | b4410f5edcdad4ea7097b05075d94b61502df269 | [
"MIT"
] | null | null | null | GetSylly.py | nicholascjones/get-sylly | b4410f5edcdad4ea7097b05075d94b61502df269 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
## GetSylly.py
## Nicholas Jones
## Course Project 2 -- Syllable Counter
#imports
import random
import string
#List Initialization for Syllable Counts
words = []
one = []
two = []
three = []
vowels = ['a','e','i','o','u']
vowels = set(vowels)
#functions
#reading words in from file
def ReadWords(inf='wordlist.10000'):
#ensures readable words
lst = open(inf,'r')
for n in lst:
n = n.rstrip() #sanitization
if len(n) > 3:
vc = 0
ii = 0
while ii < len(n):
if n[ii] in vowels:
words.append(n)
break
ii+=1
#count syllables in a given word
def CountSyllables(word):
word = str(word) #ensuring type check
word = word.lower()
l = len(word) #length of word
vcount = 0 #initializes vowel count to be 0
i = 0 #iterator set to 0
while (i < l):
if (word[i] in vowels): #if is in vowels
if i + 1 == l:
if word[i] == 'e':
break
else:
vcount+=1
break
elif (i + 2 == l) and (word[i] == 'e') and (word[i+1] == 'd'):
vcount += 1
i+=1
continue
elif word[i+1] in vowels:
i+=1
continue
else:
vcount += 1
elif word[i] == 'y':
if i == 0:
i+=1
continue
elif word[i-1] not in vowels:
if i + 1 == l:
vcount+=1
elif word[i+1] not in vowels:
vcount+=1
else:
if word[i] not in vowels:
if (i + 3 == l) or (i + 4 == l):
if word[i+1] == 'l' and word[i+2] == 'e':
vcount+=1
break
i+=1
return vcount
#gets syllables for each word in list and assigns words to appropriate list
def GetSyllableCounts(inlist=words):
for w in inlist:
if isOneSyllable(w):
one.append(w)
elif isTwoSyllables(w):
two.append(w)
elif isThreeSyllables(w):
three.append(w)
# print w
# print syl
return
def MakeOneClause():
n = random.randint(0,len(one)-1)
phrase = one[n] + " "
return phrase
def MakeTwoClause():
rn = random.randint(0,2)
if rn == 0:
n = random.randint(0,len(two)-1)
phrase = two[n] + " "
else:
p1 = MakeOneClause()
p2 = MakeOneClause()
phrase = p1 + p2
return phrase
def MakeThreeClause():
rn = random.randint(0,2)
if rn == 0:
n = random.randint(0,len(three)-1)
phrase = three[n] + " "
elif rn == 1:
p1 = MakeTwoClause()
p2 = MakeOneClause()
phrase = p1 + p2
else:
p1 = MakeOneClause()
p2 = MakeTwoClause()
phrase = p1 + p2
return phrase
def MakeFiveClause():
rn = random.randint(0,2)
if rn == 0:
p1 = MakeThreeClause()
p2 = MakeTwoClause()
phrase = p1 + p2
elif rn == 1:
p1 = MakeTwoClause()
p2 = MakeThreeClause()
phrase = p1 + p2
else:
p1 = MakeOneClause()
p2 = MakeThreeClause()
p3 = MakeOneClause()
phrase = p1 + p2 + p3
return phrase
def MakeSevenClause():
rn = random.randint(0,17)
if rn < 5:
p1 = MakeOneClause()
p2 = MakeFiveClause()
p3 = MakeOneClause()
phrase = p1 + p2 + p3
elif rn < 10:
p1 = MakeTwoClause()
p2 = MakeFiveClause()
phrase = p1 + p2
elif rn < 15:
p1 = MakeFiveClause()
p2 = MakeTwoClause()
phrase = p1 + p2
elif rn == 15:
p2 = MakeOneClause()
p1 = MakeThreeClause()
p3 = MakeThreeClause()
phrase = p1 + p2 + p3
elif rn == 16:
p1 = MakeOneClause()
p2 = MakeThreeClause()
p3 = MakeThreeClause()
phrase = p1 + p2 + p3
else:
p3 = MakeOneClause()
p2 = MakeThreeClause()
p1 = MakeThreeClause()
phrase = p1 + p2 + p3
return phrase
def MakeHaiku():
phrase = ""
phrase += MakeFiveClause()
phrase += "\n"
phrase += MakeSevenClause()
phrase += "\n"
phrase += MakeFiveClause()
phrase += "\n"
return phrase
def isOneSyllable(s):
if CountSyllables(s) == 1:
return True
else:
return False
def isTwoSyllables(s):
if CountSyllables(s) == 2:
return True
else:
return False
def isThreeSyllables(s):
if CountSyllables(s) == 3:
return True
else:
return False
#de-facto main function initialization
#initialization at startup
ReadWords()
GetSyllableCounts()
del words[:] #deletes word list for space sake
#instructions
print "\n"
print "Hello, welcome to Nick's Syllable Counter and Random Haiku Generator!!"
print "\n"
print "Enter an 'h' to receive a Haiku!"
print "Enter a word to see how many syllables it has!"
print "Enter 1 to see if a word has one syllable"
print "Enter 2 to see if a word has two syllables"
print "Enter 3 to see if a word has three syllables"
print "Enter 'Q' to quit."
print "\n"
response = 'x' #generic response as to not cause problems
while (response != 'Q'): #case switch kills it all
#prompt given with responses used to control flow
response = raw_input("What would you like to do?\n")
if response == 'Q':
break
elif response == 'h':
print MakeHaiku()
elif response == '1':
print isOneSyllable(raw_input("What is your word?\n"))
elif response == '2':
print isTwoSyllables(raw_input("What is your word?\n"))
elif response == '3':
print isThreeSyllables(raw_input("What is your word?\n"))
else:
try:
print CountSyllables(response)
print "\n"
except ValueError:
print "Invalid input. Please try again."
continue
print "\n" #newline easy to print
#end execution
| 17.613793 | 78 | 0.630579 |
22ee46dbb26f2d94100aa9f25ce8cc1cf30618fb | 55 | py | Python | social/backends/shopify.py | raccoongang/python-social-auth | 81c0a542d158772bd3486d31834c10af5d5f08b0 | [
"BSD-3-Clause"
] | 1,987 | 2015-01-01T16:12:45.000Z | 2022-03-29T14:24:25.000Z | social/backends/shopify.py | raccoongang/python-social-auth | 81c0a542d158772bd3486d31834c10af5d5f08b0 | [
"BSD-3-Clause"
] | 731 | 2015-01-01T22:55:25.000Z | 2022-03-10T15:07:51.000Z | virtual/lib/python3.6/site-packages/social/backends/shopify.py | dennismwaniki67/awards | 80ed10541f5f751aee5f8285ab1ad54cfecba95f | [
"MIT"
] | 1,082 | 2015-01-01T16:27:26.000Z | 2022-03-22T21:18:33.000Z | from social_core.backends.shopify import ShopifyOAuth2
| 27.5 | 54 | 0.890909 |
5f8e0f5f59cb336c89f2b8cf4047a222371be4f9 | 360 | py | Python | authlib/jose/rfc7515/__init__.py | YPCrumble/authlib | 782a0fced780849418dc2a869528d10387e24b65 | [
"BSD-3-Clause"
] | 3,172 | 2017-11-11T05:54:14.000Z | 2022-03-31T23:59:59.000Z | authlib/jose/rfc7515/__init__.py | YPCrumble/authlib | 782a0fced780849418dc2a869528d10387e24b65 | [
"BSD-3-Clause"
] | 397 | 2017-11-11T02:49:06.000Z | 2022-03-31T21:02:37.000Z | authlib/jose/rfc7515/__init__.py | YPCrumble/authlib | 782a0fced780849418dc2a869528d10387e24b65 | [
"BSD-3-Clause"
] | 387 | 2017-11-18T08:59:56.000Z | 2022-03-15T18:37:37.000Z | """
authlib.jose.rfc7515
~~~~~~~~~~~~~~~~~~~~~
This module represents a direct implementation of
JSON Web Signature (JWS).
https://tools.ietf.org/html/rfc7515
"""
from .jws import JsonWebSignature
from .models import JWSAlgorithm, JWSHeader, JWSObject
__all__ = [
'JsonWebSignature',
'JWSAlgorithm', 'JWSHeader', 'JWSObject'
]
| 18.947368 | 54 | 0.655556 |
f59acd47ace51326ba84de036021763db2c8b4c9 | 3,710 | bzl | Python | tools/build_defs/repo/utils.bzl | sevki/bazel | b18915752a69fbbc6ed94e1710198167593565fc | [
"Apache-2.0"
] | 5 | 2019-01-10T09:41:11.000Z | 2020-07-15T12:02:22.000Z | tools/build_defs/repo/utils.bzl | sevki/bazel | b18915752a69fbbc6ed94e1710198167593565fc | [
"Apache-2.0"
] | 1 | 2020-01-19T03:55:41.000Z | 2020-01-19T03:55:41.000Z | tools/build_defs/repo/utils.bzl | sevki/bazel | b18915752a69fbbc6ed94e1710198167593565fc | [
"Apache-2.0"
] | 3 | 2019-05-05T01:52:36.000Z | 2020-11-04T03:16:14.000Z | # Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for manipulating external repositories, once fetched.
### Setup
These utility are intended to be used by other repository rules. They
can be loaded as follows.
```python
load(
"@bazel_tools//tools/build_defs/repo:utils.bzl",
"workspace_and_buildfile",
)
```
"""
def workspace_and_buildfile(ctx):
"""Utility function for writing WORKSPACE and, if requested, a BUILD file.
It assumes the parameters name, build_file, build_file_contents,
workspace_file, and workspace_file_content to be
present in ctx.attr, the latter four possibly with value None.
Args:
ctx: The repository context of the repository rule calling this utility
function.
"""
if ctx.attr.build_file and ctx.attr.build_file_content:
ctx.fail("Only one of build_file and build_file_content can be provided.")
if ctx.attr.workspace_file and ctx.attr.workspace_file_content:
ctx.fail("Only one of workspace_file and workspace_file_content can be provided.")
if ctx.attr.workspace_file:
bash_exe = ctx.os.environ["BAZEL_SH"] if "BAZEL_SH" in ctx.os.environ else "bash"
ctx.execute([bash_exe, "-c", "rm -f WORKSPACE"])
ctx.symlink(ctx.attr.workspace_file, "WORKSPACE")
elif ctx.attr.workspace_file_content:
bash_exe = ctx.os.environ["BAZEL_SH"] if "BAZEL_SH" in ctx.os.environ else "bash"
ctx.execute([bash_exe, "-c", "rm -f WORKSPACE"])
ctx.file("WORKSPACE", ctx.attr.workspace_file_content)
else:
ctx.file("WORKSPACE", "workspace(name = \"{name}\")\n".format(name = ctx.name))
if ctx.attr.build_file:
bash_exe = ctx.os.environ["BAZEL_SH"] if "BAZEL_SH" in ctx.os.environ else "bash"
ctx.execute([bash_exe, "-c", "rm -f BUILD BUILD.bazel"])
ctx.symlink(ctx.attr.build_file, "BUILD.bazel")
elif ctx.attr.build_file_content:
bash_exe = ctx.os.environ["BAZEL_SH"] if "BAZEL_SH" in ctx.os.environ else "bash"
ctx.execute([bash_exe, "-c", "rm -f BUILD.bazel"])
ctx.file("BUILD.bazel", ctx.attr.build_file_content)
def patch(ctx):
"""Implementation of patching an already extracted repository"""
bash_exe = ctx.os.environ["BAZEL_SH"] if "BAZEL_SH" in ctx.os.environ else "bash"
if len(ctx.attr.patches) > 0 or len(ctx.attr.patch_cmds) > 0:
ctx.report_progress("Patching repository")
for patchfile in ctx.attr.patches:
command = "{patchtool} {patch_args} < {patchfile}".format(
patchtool = ctx.attr.patch_tool,
patchfile = ctx.path(patchfile),
patch_args = " ".join([
"'%s'" % arg
for arg in ctx.attr.patch_args
]),
)
st = ctx.execute([bash_exe, "-c", command])
if st.return_code:
fail("Error applying patch %s:\n%s%s" %
(str(patchfile), st.stderr, st.stdout))
for cmd in ctx.attr.patch_cmds:
st = ctx.execute([bash_exe, "-c", cmd])
if st.return_code:
fail("Error applying patch command %s:\n%s%s" %
(cmd, st.stdout, st.stderr))
| 41.685393 | 90 | 0.666846 |
7a5fe50932cdaed27ee4d95536d004d964f5ca1c | 12,827 | py | Python | autotest/test_gwt_moc3d01.py | mwtoews/modflow6 | 3426f524fba90b8b6186d09272226a941b97cef7 | [
"CC0-1.0"
] | null | null | null | autotest/test_gwt_moc3d01.py | mwtoews/modflow6 | 3426f524fba90b8b6186d09272226a941b97cef7 | [
"CC0-1.0"
] | null | null | null | autotest/test_gwt_moc3d01.py | mwtoews/modflow6 | 3426f524fba90b8b6186d09272226a941b97cef7 | [
"CC0-1.0"
] | 1 | 2020-09-01T18:43:19.000Z | 2020-09-01T18:43:19.000Z | import os
import sys
import numpy as np
try:
import pymake
except:
msg = "Error. Pymake package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install https://github.com/modflowpy/pymake/zipball/master"
raise Exception(msg)
try:
import flopy
except:
msg = "Error. FloPy package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install flopy"
raise Exception(msg)
from framework import testing_framework
from simulation import Simulation
ex = [
"moc3d01a",
"moc3d01b",
"moc3d01c",
"moc3d01d",
"moc3d01e",
"moc3d01f",
"moc3d01g",
"moc3d01h",
]
diffc = [0.0, 0.01, 0.0, 0.1, 0.0, 0.0, 0.0, 0]
alphal = [0.1, 0.0, 1.0, 0.0, 0.1, 0.1, 0.1, 0.1]
retardation = [None, None, None, None, 40.0, 4.0, 2.0, None]
perlens = 4 * [120.0] + 3 * [240.0] + [120.0]
decay = 7 * [None] + [0.01]
exdirs = []
for s in ex:
exdirs.append(os.path.join("temp", s))
ddir = "data"
def get_model(idx, dir):
nlay, nrow, ncol = 1, 122, 1
nper = 1
perlen = perlens[idx] # [120.]
perlen = [perlen]
nstp = [240]
tsmult = [1.0]
steady = [True]
delr = 0.1
delc = 0.1
top = 1.0
botm = [0.0]
strt = 1.0
hnoflo = 1e30
hdry = -1e30
hk = 0.01
laytyp = 0
# ss = 0.
# sy = 0.1
nouter, ninner = 100, 300
hclose, rclose, relax = 1e-8, 1e-6, 1.0
tdis_rc = []
for i in range(nper):
tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
name = ex[idx]
# build MODFLOW 6 files
ws = dir
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
# create tdis package
tdis = flopy.mf6.ModflowTdis(
sim, time_units="DAYS", nper=nper, perioddata=tdis_rc
)
# create gwf model
gwfname = "gwf_" + name
gwf = flopy.mf6.MFModel(
sim,
model_type="gwf6",
modelname=gwfname,
model_nam_file="{}.nam".format(gwfname),
)
# create iterative model solution and register the gwf model with it
imsgwf = flopy.mf6.ModflowIms(
sim,
print_option="SUMMARY",
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation="NONE",
inner_maximum=ninner,
inner_dvclose=hclose,
rcloserecord=rclose,
linear_acceleration="CG",
scaling_method="NONE",
reordering_method="NONE",
relaxation_factor=relax,
filename="{}.ims".format(gwfname),
)
sim.register_ims_package(imsgwf, [gwf.name])
dis = flopy.mf6.ModflowGwfdis(
gwf,
nlay=nlay,
nrow=nrow,
ncol=ncol,
delr=delr,
delc=delc,
top=top,
botm=botm,
idomain=np.ones((nlay, nrow, ncol), dtype=int),
filename="{}.dis".format(gwfname),
)
# initial conditions
ic = flopy.mf6.ModflowGwfic(
gwf, strt=strt, filename="{}.ic".format(gwfname)
)
# node property flow
npf = flopy.mf6.ModflowGwfnpf(
gwf,
save_flows=False,
save_specific_discharge=True,
icelltype=laytyp,
k=hk,
k33=hk,
)
# storage
# sto = flopy.mf6.ModflowGwfsto(gwf, save_flows=False,
# iconvert=laytyp[idx],
# ss=ss[idx], sy=sy[idx],
# steady_state={0: True, 2: True},
# transient={1: True})
# chd files
c = {0: [[(0, 121, 0), 0.0000000]]}
chd = flopy.mf6.ModflowGwfchd(
gwf, stress_period_data=c, save_flows=False, pname="CHD-1"
)
# wel files
w = {0: [[(0, 0, 0), 0.001, 1.0]]}
wel = flopy.mf6.ModflowGwfwel(
gwf,
print_input=True,
print_flows=True,
stress_period_data=w,
save_flows=False,
auxiliary="CONCENTRATION",
pname="WEL-1",
)
# output control
oc = flopy.mf6.ModflowGwfoc(
gwf,
budget_filerecord="{}.cbc".format(gwfname),
head_filerecord="{}.hds".format(gwfname),
headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")],
saverecord=[("HEAD", "LAST")],
printrecord=[("HEAD", "LAST"), ("BUDGET", "LAST")],
)
# create gwt model
gwtname = "gwt_" + name
gwt = flopy.mf6.MFModel(
sim,
model_type="gwt6",
modelname=gwtname,
model_nam_file="{}.nam".format(gwtname),
)
# create iterative model solution and register the gwt model with it
imsgwt = flopy.mf6.ModflowIms(
sim,
print_option="SUMMARY",
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation="NONE",
inner_maximum=ninner,
inner_dvclose=hclose,
rcloserecord=rclose,
linear_acceleration="BICGSTAB",
scaling_method="NONE",
reordering_method="NONE",
relaxation_factor=relax,
filename="{}.ims".format(gwtname),
)
sim.register_ims_package(imsgwt, [gwt.name])
dis = flopy.mf6.ModflowGwtdis(
gwt,
nlay=nlay,
nrow=nrow,
ncol=ncol,
delr=delr,
delc=delc,
top=top,
botm=botm,
idomain=1,
filename="{}.dis".format(gwtname),
)
# initial conditions
strt = np.zeros((nlay, nrow, ncol))
strt[0, 0, 0] = 0.0
ic = flopy.mf6.ModflowGwtic(
gwt, strt=strt, filename="{}.ic".format(gwtname)
)
# advection
adv = flopy.mf6.ModflowGwtadv(
gwt, scheme="tvd", filename="{}.adv".format(gwtname)
)
# dispersion
dsp = flopy.mf6.ModflowGwtdsp(
gwt,
diffc=diffc[idx],
alh=alphal[idx],
alv=alphal[idx],
ath1=0.0,
atv=0.0,
filename="{}.dsp".format(gwtname),
)
# constant concentration
# cncs = {0: [[(0, 0, 0), 1.0]]}
# cnc = flopy.mf6.ModflowGwtcnc(gwt, maxbound=len(cncs),
# stress_period_data=cncs,
# save_flows=False,
# pname='CNC-1')
# storage
porosity = 0.1
rtd = retardation[idx]
sorption = None
kd = None
if rtd is not None:
rhob = 1.0
kd = (rtd - 1.0) * porosity / rhob
decay_rate = decay[idx]
first_order_decay = False
if decay_rate is not None:
first_order_decay = True
# mass storage and transfer
mst = flopy.mf6.ModflowGwtmst(
gwt,
porosity=porosity,
first_order_decay=first_order_decay,
decay=decay_rate,
decay_sorbed=decay_rate,
sorption=sorption,
distcoef=kd,
)
# sources
sourcerecarray = [("WEL-1", "AUX", "CONCENTRATION")]
ssm = flopy.mf6.ModflowGwtssm(
gwt, sources=sourcerecarray, filename="{}.ssm".format(gwtname)
)
# output control
oc = flopy.mf6.ModflowGwtoc(
gwt,
budget_filerecord="{}.cbc".format(gwtname),
concentration_filerecord="{}.ucn".format(gwtname),
concentrationprintrecord=[
("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")
],
saverecord=[("CONCENTRATION", "ALL")],
printrecord=[("CONCENTRATION", "LAST"), ("BUDGET", "LAST")],
)
# GWF GWT exchange
gwfgwt = flopy.mf6.ModflowGwfgwt(
sim,
exgtype="GWF6-GWT6",
exgmnamea=gwfname,
exgmnameb=gwtname,
filename="{}.gwfgwt".format(name),
)
return sim
def eval_transport(sim):
print("evaluating transport...")
name = ex[sim.idxsim]
gwtname = "gwt_" + name
fpth = os.path.join(sim.simpath, "{}.ucn".format(gwtname))
try:
cobj = flopy.utils.HeadFile(
fpth, precision="double", text="CONCENTRATION"
)
station = [(0, 0, 0), (0, 40, 0), (0, 110, 0)]
tssim = cobj.get_ts(station)[::10]
except:
assert False, 'could not load data from "{}"'.format(fpth)
tsresab = [
[5.00000000e-01, 2.83603277e-01, 1.98913375e-16, 4.55149741e-41],
[5.50000000e00, 9.17951825e-01, 2.69455937e-10, -2.07960785e-31],
[1.05000000e01, 9.84228070e-01, 1.28040427e-06, 2.48438252e-26],
[1.55000000e01, 9.96457822e-01, 1.90868536e-04, 3.41305818e-22],
[2.05000000e01, 9.99136905e-01, 4.44854016e-03, 4.50363732e-19],
[2.55000000e01, 9.99778638e-01, 3.37898628e-02, 1.68992365e-16],
[3.05000000e01, 9.99941176e-01, 1.25336227e-01, 6.43299718e-14],
[3.55000000e01, 9.99983954e-01, 2.90158229e-01, 1.10351780e-11],
[4.05000000e01, 9.99995534e-01, 4.91078611e-01, 8.14269315e-10],
[4.55000000e01, 9.99998737e-01, 6.75816022e-01, 3.00545939e-08],
[5.05000000e01, 9.99999636e-01, 8.13798122e-01, 6.22095988e-07],
[5.55000000e01, 9.99999894e-01, 9.01968239e-01, 7.99960358e-06],
[6.05000000e01, 9.99999969e-01, 9.51974734e-01, 6.86618749e-05],
[6.55000000e01, 9.99999991e-01, 9.77827407e-01, 4.18059967e-04],
[7.05000000e01, 9.99999997e-01, 9.90251754e-01, 1.89993945e-03],
[7.55000000e01, 9.99999999e-01, 9.95884247e-01, 6.72520577e-03],
[8.05000000e01, 1.00000000e00, 9.98319951e-01, 1.92175257e-02],
[8.55000000e01, 1.00000000e00, 9.99333379e-01, 4.57014056e-02],
[9.05000000e01, 1.00000000e00, 9.99741747e-01, 9.28423497e-02],
[9.55000000e01, 1.00000000e00, 9.99901965e-01, 1.64807939e-01],
[1.00500000e02, 1.00000000e00, 9.99963427e-01, 2.60760778e-01],
[1.05500000e02, 1.00000000e00, 9.99986559e-01, 3.74256690e-01],
[1.10500000e02, 1.00000000e00, 9.99995124e-01, 4.94966053e-01],
[1.15500000e02, 1.00000000e00, 9.99998251e-01, 6.11754824e-01],
]
tsrescd = [
[5.00000000e-01, 1.62245199e-01, 1.99581015e-08, 1.96866573e-20],
[5.50000000e00, 5.69190185e-01, 7.72268573e-04, 1.32726540e-12],
[1.05000000e01, 7.09416269e-01, 1.50275044e-02, 4.42507587e-09],
[1.55000000e01, 7.89741082e-01, 6.02353184e-02, 5.75599816e-07],
[2.05000000e01, 8.42144507e-01, 1.32658034e-01, 1.44051847e-05],
[2.55000000e01, 8.78656387e-01, 2.19297521e-01, 1.38208708e-04],
[3.05000000e01, 9.05151032e-01, 3.09311460e-01, 7.25613436e-04],
[3.55000000e01, 9.24920151e-01, 3.96026115e-01, 2.55150051e-03],
[4.05000000e01, 9.39976345e-01, 4.75987512e-01, 6.79179030e-03],
[4.55000000e01, 9.51625144e-01, 5.47771861e-01, 1.47963272e-02],
[5.05000000e01, 9.60751243e-01, 6.11121931e-01, 2.77842830e-02],
[5.55000000e01, 9.67974420e-01, 6.66402923e-01, 4.65998752e-02],
[6.05000000e01, 9.73740367e-01, 7.14279555e-01, 7.15925045e-02],
[6.55000000e01, 9.78376414e-01, 7.55531394e-01, 1.02616451e-01],
[7.05000000e01, 9.82127171e-01, 7.90951085e-01, 1.39111803e-01],
[7.55000000e01, 9.85178098e-01, 8.21291242e-01, 1.80223847e-01],
[8.05000000e01, 9.87671554e-01, 8.47239438e-01, 2.24927782e-01],
[8.55000000e01, 9.89717967e-01, 8.69409099e-01, 2.72138588e-01],
[9.05000000e01, 9.91403786e-01, 8.88339150e-01, 3.20796786e-01],
[9.55000000e01, 9.92797229e-01, 9.04498187e-01, 3.69928359e-01],
[1.00500000e02, 9.93952511e-01, 9.18290783e-01, 4.18681292e-01],
[1.05500000e02, 9.94912987e-01, 9.30064513e-01, 4.66343157e-01],
[1.10500000e02, 9.95713522e-01, 9.40116964e-01, 5.12344545e-01],
[1.15500000e02, 9.96382294e-01, 9.48702318e-01, 5.56252766e-01],
]
tsresab = np.array(tsresab)
tsrescd = np.array(tsrescd)
tsreslist = [tsresab, tsresab, tsrescd, tsrescd, None, None, None, None]
tsres = tsreslist[sim.idxsim]
if tsres is not None:
assert np.allclose(
tsres, tssim
), "simulated concentrations do not match with known solution."
return
# - No need to change any code below
def build_models():
for idx, dir in enumerate(exdirs):
sim = get_model(idx, dir)
sim.write_simulation()
return
def test_mf6model():
# initialize testing framework
test = testing_framework()
# build the models
build_models()
# run the test models
for idx, dir in enumerate(exdirs):
yield test.run_mf6, Simulation(dir, exfunc=eval_transport, idxsim=idx)
return
def main():
# initialize testing framework
test = testing_framework()
# build the models
build_models()
# run the test models
for idx, dir in enumerate(exdirs):
sim = Simulation(dir, exfunc=eval_transport, idxsim=idx)
test.run_mf6(sim)
return
if __name__ == "__main__":
# print message
print("standalone run of {}".format(os.path.basename(__file__)))
# run main routine
main()
| 30.252358 | 79 | 0.590473 |
79553202bd943a3745bcce095b9a9a6b3e58c3a1 | 4,307 | py | Python | word_knn/nlpl_retriever.py | rom1504/word-knn | 56cb113a8f843eaafec6200ec5ed6e88876edf12 | [
"MIT"
] | 4 | 2019-08-26T11:52:23.000Z | 2020-08-10T17:52:40.000Z | word_knn/nlpl_retriever.py | rom1504/word-knn | 56cb113a8f843eaafec6200ec5ed6e88876edf12 | [
"MIT"
] | 5 | 2019-08-26T00:20:36.000Z | 2020-08-10T12:37:51.000Z | word_knn/nlpl_retriever.py | rom1504/word-knn | 56cb113a8f843eaafec6200ec5ed6e88876edf12 | [
"MIT"
] | 1 | 2020-10-08T20:56:14.000Z | 2020-10-08T20:56:14.000Z | import numpy as np
import os.path
import os
from io import BytesIO
from zipfile import ZipFile
from urllib.request import urlopen
from word_knn.closest_words import inverse_dict
from word_knn.closest_words import ClosestWords
from word_knn.closest_words import build_knn_index
from pathlib import Path
home = str(Path.home())
def csv_to_embeddings_and_dict(input_file):
d = dict()
def read_func(iter):
next(iter) # skip first row
for i, line in enumerate(iter):
if isinstance(line, str):
stripped_line = line.rstrip()
else:
stripped_line = line.decode("utf-8", "ignore").rstrip()
line = stripped_line.split(" ")
word = line[0].split("_")[0]
d[i] = word.replace("::", " ")
line.pop(0)
for item in line:
yield float(item)
csv_to_embeddings_and_dict.rowlength = len(line)
def iter_func():
csv_to_embeddings_and_dict.rowlength = 0
if isinstance(input_file, str):
with open(input_file, "r") as infile:
yield from read_func(infile)
else:
yield from read_func(input_file)
data = np.fromiter(iter_func(), dtype=float)
embeddings = data.reshape((-1, csv_to_embeddings_and_dict.rowlength)).astype(np.float32)
inv_d = inverse_dict(d)
return embeddings, d, inv_d
def csv_to_dict(input_file):
d = dict()
def read(iter):
next(iter) # skip first row
for i, line in enumerate(iter):
line = line.rstrip().split("_")
d[i] = line[0].replace("::", " ")
if isinstance(input_file, str):
with open(input_file, "r") as infile:
read(infile)
else:
read(input_file)
inv_d = inverse_dict(d)
return d, inv_d
def sizeof_fmt(num, suffix="B"):
for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, "Yi", suffix)
def from_csv(input_file, keep_embeddings=True):
embeddings, word_dict, inverse_word_dict = csv_to_embeddings_and_dict(input_file)
knn_index = build_knn_index(embeddings)
if not keep_embeddings:
embeddings = None
return ClosestWords(embeddings, inverse_word_dict, word_dict, knn_index)
def from_csv_or_cache(word_embedding_dir, input_file=None, keep_embeddings=False):
if input_file is None:
input_file = word_embedding_dir + "/model.txt"
if os.path.exists(word_embedding_dir + "/word_dict.pkl"):
return ClosestWords.from_disk_cache(word_embedding_dir)
closest_words = from_csv(input_file, True)
closest_words.cache_to_disk(word_embedding_dir)
if not keep_embeddings:
del closest_words.embeddings
return closest_words
def from_nlpl(root_word_embedding_dir=home + "/embeddings", embedding_id="0", save_zip=False, keep_embeddings=False):
word_embedding_dir = root_word_embedding_dir + "/" + embedding_id
if not os.path.exists(word_embedding_dir):
os.makedirs(word_embedding_dir)
if os.path.exists(word_embedding_dir + "/word_dict.pkl"):
return ClosestWords.from_disk_cache(word_embedding_dir, keep_embeddings)
zip_file_path = word_embedding_dir + "/model.zip"
if not os.path.exists(word_embedding_dir + "/model.txt"):
if os.path.exists(zip_file_path):
zipfile = ZipFile(zip_file_path, "r")
else:
url = "http://vectors.nlpl.eu/repository/11/" + embedding_id + ".zip"
resp = urlopen(url)
length = resp.getheader("content-length")
print("Downloading " + url + " (" + sizeof_fmt(int(length)) + ")")
content = resp.read()
del resp
if save_zip:
file = open(word_embedding_dir + "/model.zip", "wb")
file.write(content)
file.close()
the_bytes = BytesIO(content)
zipfile = ZipFile(the_bytes)
del content
del the_bytes
zipfile.extract("model.txt", word_embedding_dir)
zipfile.close()
return from_csv_or_cache(word_embedding_dir, open(word_embedding_dir + "/model.txt", "rb"), keep_embeddings)
| 33.913386 | 117 | 0.634781 |
edb1aa04d75571b775e3705cbb84347e7d3bb6ef | 3,276 | py | Python | packages/syft/src/syft/core/node/common/permissions/permissions.py | jackbandy/PySyft | 0e20e90abab6a7a7ca672d6eedfa1e7f83c4981b | [
"Apache-2.0"
] | null | null | null | packages/syft/src/syft/core/node/common/permissions/permissions.py | jackbandy/PySyft | 0e20e90abab6a7a7ca672d6eedfa1e7f83c4981b | [
"Apache-2.0"
] | null | null | null | packages/syft/src/syft/core/node/common/permissions/permissions.py | jackbandy/PySyft | 0e20e90abab6a7a7ca672d6eedfa1e7f83c4981b | [
"Apache-2.0"
] | null | null | null | # future
from __future__ import annotations
# stdlib
from typing import Any
from typing import Optional
from typing import Type
# third party
from nacl.signing import VerifyKey
# relative
from ...abstract.node_service_interface import NodeServiceInterface
from ..node_service.generic_payload.syft_message import NewSyftMessage as SyftMessage
class BinaryOperation:
"""Executes an operation between two operands."""
def __init__(self, op1: Any, op2: Any, operator: Any) -> None:
self.op1 = op1
self.op2 = op2
self.operator = operator
def __call__(self, *args: Any, **kwargs: Any) -> Any:
op1 = self.op1(*args, **kwargs)
op2 = self.op2(*args, **kwargs)
return self.operator(op1, op2)
class UnaryOperation:
"""Executes an operation on a single operand."""
def __init__(self, op1: Any, operator: Any) -> None:
self.op1 = op1
self.operator = operator
def __call__(self, *args: Any, **kwargs: Any) -> Any:
op1 = self.op1(*args, **kwargs)
return self.operator(op1)
class AND:
"""Implements the `AND` functionality on a set of permission classes."""
def __init__(self, op1: Type[BasePermission], op2: Type[BasePermission]):
self.op1 = op1
self.op2 = op2
def has_permission(self, *args: Any, **kwargs: Any) -> bool:
return self.op1.has_permission(*args, **kwargs) and self.op2.has_permission(
*args, **kwargs
)
class OR:
"""Implements the `OR` functionality on a set of permission classes."""
def __init__(self, op1: Type[BasePermission], op2: Type[BasePermission]):
self.op1 = op1
self.op2 = op2
def has_permission(self, *args: Any, **kwargs: Any) -> bool:
return self.op1.has_permission(*args, **kwargs) or self.op2.has_permission(
*args, **kwargs
)
class NOT:
"""Implements the `NOT` functionality on a permission class."""
def __init__(self, op1: Type[BasePermission]):
self.op1 = op1
def has_permission(self, *args: Any, **kwargs: Any) -> bool:
return not self.op1.has_permission(*args, **kwargs)
class BasePermissionMetaclass(type):
"""A metaclass to allow composition between different permission classes."""
def __and__(self, other: Type[BasePermission]) -> BinaryOperation:
return BinaryOperation(self, other, AND)
def __or__(self, other: Type[BasePermission]) -> BinaryOperation: # type: ignore
return BinaryOperation(self, other, OR)
def __rand__(self, other: Type[BasePermission]) -> BinaryOperation:
return BinaryOperation(other, self, AND)
def __ror__(self, other: Type[BasePermission]) -> BinaryOperation: # type: ignore
return BinaryOperation(other, self, OR)
def __invert__(self) -> UnaryOperation:
return UnaryOperation(self, NOT)
class BasePermission(metaclass=BasePermissionMetaclass):
"""A base class from which all permission classes should inherit."""
def has_permission(
self,
msg: SyftMessage,
node: NodeServiceInterface,
verify_key: Optional[VerifyKey],
) -> bool:
"""Return `True` if permission is granted, `False` otherwise."""
raise NotImplementedError
| 30.055046 | 86 | 0.663614 |
96d2dd5abb670e68d0d1f2d3b073503feba2c0b6 | 12,676 | py | Python | stubs/stubs.py | zestyping/stubs | 4a5d2983361184537ffe607d10db4c6afd660016 | [
"Apache-2.0"
] | 1 | 2016-10-31T23:26:00.000Z | 2016-10-31T23:26:00.000Z | stubs/stubs.py | zestyping/stubs | 4a5d2983361184537ffe607d10db4c6afd660016 | [
"Apache-2.0"
] | null | null | null | stubs/stubs.py | zestyping/stubs | 4a5d2983361184537ffe607d10db4c6afd660016 | [
"Apache-2.0"
] | null | null | null | """Handy tools for setting up stubs and mocks.
This module exports two special objects, `stub` and `expect`, which can be
used to stub out functions or methods. To replace a function with a stub,
start a context block using `with`, then `stub` or `expect`, then the function
wrapped in square brackets. By default, the stub returns None. Upon exiting
the context block, the original function is restored. It looks like this:
def f(x): return x * 2
with stub[f]:
print(f(3)) # prints None
print f(3)) # prints 6
To make the stub return a constant, write `>>` and the return value:
with stub[f] >> 5:
print(f(3)) # prints 5
print(f(3)) # prints 6
The function object is replaced in the namespace where it was defined. If
the function has been imported into a different namespace, that namespace
won't be affected; you can specify the namespace by providing a string path
to the function instead of the function itself:
# --- a.py ---
def f(x): return x * 2
# --- b.py ---
from a import f
# --- c.py ---
import a
import b
with stub[b.f] >> 5:
print(a.f(3)) # stub, prints 5
print(b.f(3)) # unaffected, prints 6
with stub['b.f'] >> 5:
print(a.f(3)) # unaffected, prints 6
print(b.f(3)) # stub, prints 5
You can also stub out a method on a class or an instance:
class Foo:
def f(self, x):
return x * 3
foo = Foo()
with stub[foo.f] >> 8:
print(foo.f(3)) # stub, prints 8
bar = Foo()
print(bar.f(3)) # original, prints 9
print(foo.f(3)) # original, prints 9
with stub[Foo.f] >> 8:
print(foo.f(3)) # original, prints 9
bar = Foo()
print(bar.f(3)) # stub, prints 8
print(foo.f(3)) # original, prints 9
In all the examples above, the stub is invoked regardless of any arguments or
keyword arguments. You can specify expected arguments after the function:
with stub[f](3) >> 5:
with stub[f](4) >> 1:
print(f(4)) # prints 1
print(f(4)) # prints 1
print(f(3)) # prints 5
print(f(1)) # AssertionError: Unexpected call
Matchers (see matchers.py) are accepted in the argument specification, giving
you lots of expressive power. The matchers are tested in order starting from
the innermost context and proceeding outward; always specify the most specific
matchers on the inside:
with stub[f] >> 0:
with stub[f](_ > 3) >> 7:
with stub[f](1, ..., foo=[5, 10 < _ < 20]) >> -1:
print(f(1)) # prints 0
print(f(2)) # prints 0
print(f(3)) # prints 0
print(f(4)) # prints 7
print(f(1, 2, 3, foo=[5, 13])) # prints -1
To invoke another function instead of returning a constant, use the `|`
operator instead (mnemonic: the stub pipes its arguments to the new function):
def g(x): return x + 1
with stub[f] | g:
print(f(3)) # prints 4
print(f(3)) # prints 6
To raise an exception instead of returning anything, use the `^` operator
(mnemonic: `^` points up to raise):
with stub[f] | f:
with stub[f](0) ^ ValueError('Zero is not allowed!')
print(f(2)) # prints 4
print(f(1)) # prints 2
print(f(0)) # ValueError: Zero is not allowed!
When you set up a stub with `stub`, it doesn't care whether the function is
called at all, or called many times. The stub will just replace the function;
it will only complain if called with arguments for which no behaviour has been
specified. In the last example above, the outermost context `with stub[f] | f`
causes the stub to fall back to calling the original `f` for any arguments
other than 0.
`expect` can be used in all the same ways as `stub` above, but unlike `stub`,
it requires exactly one call with arguments matching the specified pattern
to occur within the `expect` context:
with stub[f](5) >> 5:
with expect[f](6) >> 5:
print(f(5)) # prints 5
# AssertionError: Missing expected call to f(6)
When multiple `expect` contexts are present, the calls are expected to occur
in exactly the specified order:
with expect[f](1) >> 1, expect[f](2) >> 2, expect[f](3) >> 3:
print(f(1)) # prints 1
print(f(3)) # AssertionError: Unexpected call to f(3); expecting f(2)
When you use `stub` or `expect` with `|` to replace a method on a class, you
should provide a function that takes the same arguments as in the original
method definition (i.e. with an initial `self` argument for an instance method;
an initial `cls` argument for a class method, or no initial `self` or `cls`
argument for a static method). However, do not annotate your function with
`@staticmethod` or `@classmethod`; it will be automatically converted to the
same kind of method as the method being replaced:
class Foo:
def f(self, x):
return x * 3
@staticmethod
def g(x):
return x + 2
foo = Foo()
with stub[Foo.f] | (lambda self, x: x * 4): # takes a self argument, like f
print(foo.f(2)) # stub, prints 8
print(foo.f(2)) # original, prints 6
with stub[Foo.g] | (lambda x: x + 3): # takes no self argument, like g
print(foo.g(2)) # stub, prints 5
print(foo.g(2)) # original, prints 4
"""
import inspect
from .matchers import match
import sys
import types
__all__ = ['expect', 'stub']
class StubWrapper:
"""Implements the `stub` object, which when indexed with a function
like `stub[foo]`, sets up the function to be replaced with a stub."""
def __getitem__(self, target):
return Invocation(get_pointer(target), False, None, ..., _=...)
class ExpectWrapper:
"""Implements the `expect` object, which when indexed with a function
like `expect[foo]`, sets up the function to be replaced with a stub
that expects exactly one call matching a specified pattern."""
def __getitem__(self, target):
return Invocation(get_pointer(target), True, None, ..., _=...)
def get_pointer(target):
if isinstance(target, str):
parts = target.split('.')
assert len(parts) >= 2
namespace = sys.modules[parts[0]]
for part in parts[1:-1]:
namespace = getattr(namespace, part)
return Pointer(namespace, parts[-1])
if isinstance(target, Stub):
return target.pointer
if inspect.ismethod(target):
namespace = target.__self__
name = target.__name__
elif hasattr(target, '__module__'):
namespace = sys.modules[target.__module__]
for part in target.__qualname__.split('.')[:-1]:
namespace = getattr(namespace, part)
name = target.__name__
assert getattr(namespace, name) is target
else:
raise TypeError('%r cannot be stubbed out' % target)
return Pointer(namespace, name)
class Pointer:
"""A pointer to a function or method to be stubbed out in a namespace.
This is designed to work either on normal read-write namespaces (like
modules and instances) or on namespaces with read-only __dict__s (like
classes), where the values are not data descriptors (i.e. they may or
may not have __get__, but most not have __set__); hence the deliberate
the deliberate asymmetry between Pointer.get and Pointer.setattr.
Functions are never data descriptors, so this works fine for functions,
instance methods, static methods, and class methods."""
# A sentinel value that indicates when an attribute is missing.
MISSING = object()
def __init__(self, namespace, name):
self.namespace = namespace
self.name = name
def __repr__(self):
namespace_repr = getattr(self.namespace, __name__, repr(self.namespace))
return '%s.%s' % (namespace_repr, self.name)
def get(self):
# We access the __dict__ to get the underlying value. This lets us
# detect whether a method in a class is an instance method, a static
# method, or a class method.
return (self.namespace.__dict__[self.name]
if self.name in self.namespace.__dict__
else Pointer.MISSING)
def setattr(self, value):
# We're assuming the namespace dict might be read-only (e.g. a class
# __dict__), so we can't write directly into the __dict__, and that
# the value has no __set__, so it is safe to use setattr.
if value is Pointer.MISSING:
delattr(self.namespace, self.name)
else:
setattr(self.namespace, self.name, value)
class Invocation:
"""A context manager that describes a possible way that a function can be
invoked, and stubs out the function to handle that invocation."""
def __init__(self, pointer, required, delegate, *args, **kwargs):
self.pointer = pointer
self.required = required
self.delegate = delegate or (lambda *args, **kwargs: None)
self.args = args
self.kwargs = kwargs
self.original = None
self.invoked = False
def __repr__(self):
return '%r(%s)' % (self.pointer, format_args(self.args, self.kwargs))
def __call__(self, *args, **kwargs):
return Invocation(self.pointer, self.required, None, *args, **kwargs)
def __or__(self, delegate):
return Invocation(
self.pointer, self.required, delegate, *self.args, **self.kwargs)
def __rshift__(self, return_value):
return self | (lambda *args, **kwargs: return_value)
def __xor__(self, exception):
return self | make_raiser(exception)
def __enter__(self):
self.current = getattr(self.pointer.namespace, self.pointer.name)
if isinstance(self.current, Stub):
self.current.add_invocation(self)
else:
self.original = self.pointer.get()
stub = Stub(self.pointer, self)
if isinstance(self.original, staticmethod):
stub = staticmethod(stub)
if isinstance(self.original, classmethod):
stub = classmethod(stub)
self.pointer.setattr(stub)
def __exit__(self, etype, evalue, tb):
if self.original is not None:
self.pointer.setattr(self.original)
else:
self.current.remove_invocation(self)
if self.required and not self.invoked and not evalue:
raise AssertionError('Missing expected call to %r' % self)
def matches(self, args, kwargs):
return match(args, self.args) and match(kwargs, self.kwargs)
class Stub:
"""A stub that takes the place of a function or method."""
def __init__(self, pointer, *invocations):
self.pointer = pointer
self.required_invocations = []
self.optional_invocations = []
for invocation in invocations:
self.add_invocation(invocation)
def __get__(self, obj, objtype=None):
return self if obj is None else types.MethodType(self, obj)
def __call__(self, *args, **kwargs):
for invocation in self.optional_invocations:
if invocation.matches(args, kwargs):
return invocation.delegate(*args, **kwargs)
if self.required_invocations:
invocation = self.required_invocations.pop(0)
if invocation.matches(args, kwargs):
invocation.invoked = True
return invocation.delegate(*args, **kwargs)
raise AssertionError('Unexpected call to %r(%s); expecting %r' % (
self.pointer, format_args(args, kwargs), invocation
))
raise AssertionError('Unexpected call to %r(%s)' % (
self.pointer, format_args(args, kwargs)
))
def add_invocation(self, invocation):
if invocation.required:
self.required_invocations.append(invocation)
else:
self.optional_invocations.insert(0, invocation)
def remove_invocation(self, invocation):
self.optional_invocations = [
i for i in self.optional_invocations if i is not invocation]
self.required_invocations = [
i for i in self.required_invocations if i is not invocation]
def format_args(args, kwargs):
"""Formats args and kwargs the way they look in a function call."""
return ', '.join([repr(arg) for arg in args] +
['%s=%r' % item for item in sorted(kwargs.items())])
def make_raiser(exception):
def raiser(*args, **kwargs):
raise exception
return raiser
stub = StubWrapper()
expect = ExpectWrapper()
| 37.064327 | 80 | 0.634427 |
082053fba2c9f13257a6f1081d9cb9b6601a4eee | 8 | py | Python | bad/listNonExistentList.py | Alberto42/Interpreter | a56c4d905672572734a8470ef607b66727489f15 | [
"BSD-3-Clause"
] | null | null | null | bad/listNonExistentList.py | Alberto42/Interpreter | a56c4d905672572734a8470ef607b66727489f15 | [
"BSD-3-Clause"
] | null | null | null | bad/listNonExistentList.py | Alberto42/Interpreter | a56c4d905672572734a8470ef607b66727489f15 | [
"BSD-3-Clause"
] | null | null | null | x = l[0] | 8 | 8 | 0.375 |
1b5e1b19455cd69a0ff98239d81029e9bb909e77 | 35,257 | py | Python | mars/dataframe/reduction/aggregation.py | BoxFishLab/mars | c2f53b74fd2bfaea918bf1f072a8927eb36e2038 | [
"Apache-2.0"
] | 1 | 2020-11-27T08:44:16.000Z | 2020-11-27T08:44:16.000Z | mars/dataframe/reduction/aggregation.py | BoxFishLab/mars | c2f53b74fd2bfaea918bf1f072a8927eb36e2038 | [
"Apache-2.0"
] | null | null | null | mars/dataframe/reduction/aggregation.py | BoxFishLab/mars | c2f53b74fd2bfaea918bf1f072a8927eb36e2038 | [
"Apache-2.0"
] | null | null | null | # Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from collections import OrderedDict
from collections.abc import Iterable
from typing import List, Dict, Union
import numpy as np
import pandas as pd
from ... import opcodes, tensor as mars_tensor
from ...config import options
from ...core import OutputType, Base, Entity
from ...custom_log import redirect_custom_log
from ...operands import OperandStage
from ...serialize import BoolField, AnyField, Int32Field, ListField
from ...utils import ceildiv, lazy_import, enter_mode, enter_current_session
from ..merge import DataFrameConcat
from ..operands import DataFrameOperand, DataFrameOperandMixin
from ..utils import build_df, build_empty_df, build_series, parse_index, validate_axis
from .core import CustomReduction, ReductionCompiler, ReductionSteps, ReductionPreStep, \
ReductionAggStep, ReductionPostStep
cp = lazy_import('cupy', globals=globals(), rename='cp')
cudf = lazy_import('cudf', globals=globals())
def where_function(cond, var1, var2):
if var1.ndim >= 1:
return var1.where(cond, var2)
else:
if isinstance(var1, (Base, Entity)):
return mars_tensor.where(cond, var1, var2)
else:
return np.where(cond, var1, var2).item()
def mean_function(x, skipna=None):
return x.sum(skipna=skipna) / x.count()
def var_function(x, skipna=None, ddof=1):
cnt = x.count()
if ddof == 0:
return (x ** 2).sum(skipna=skipna) / cnt - (x.sum(skipna=skipna) / cnt) ** 2
return ((x ** 2).sum(skipna=skipna) - x.sum(skipna=skipna) ** 2 / cnt) / (cnt - ddof)
def sem_function(x, skipna=None, ddof=1):
var = var_function(x, skipna=skipna, ddof=ddof)
cnt = x.count()
return (var / cnt) ** 0.5
def skew_function(x, skipna=None, bias=False):
cnt = x.count()
mean = x.sum(skipna=skipna) / cnt
divided = (x ** 3).sum(skipna=skipna) / cnt \
- 3 * (x ** 2).sum(skipna=skipna) / cnt * mean \
+ 2 * mean ** 3
var = var_function(x, skipna=skipna, ddof=0)
val = where_function(var > 0, divided / var ** 1.5, np.nan)
if not bias:
val = where_function((var > 0) & (cnt > 2), val * ((cnt * (cnt - 1)) ** 0.5 / (cnt - 2)), np.nan)
return val
def kurt_function(x, skipna=None, bias=False, fisher=True):
cnt = x.count()
mean = x.sum(skipna=skipna) / cnt
divided = (x ** 4).sum(skipna=skipna) / cnt \
- 4 * (x ** 3).sum(skipna=skipna) / cnt * mean \
+ 6 * (x ** 2).sum(skipna=skipna) / cnt * mean ** 2 \
- 3 * mean ** 4
var = var_function(x, skipna=skipna, ddof=0)
val = where_function(var > 0, divided / var ** 2, np.nan)
if not bias:
val = where_function((var > 0) & (cnt > 3),
(val * (cnt ** 2 - 1) - 3 * (cnt - 1) ** 2) / (cnt - 2) / (cnt - 3), np.nan)
if not fisher:
val += 3
return val
_agg_functions = {
'sum': lambda x, skipna=None: x.sum(skipna=skipna),
'prod': lambda x, skipna=None: x.prod(skipna=skipna),
'product': lambda x, skipna=None: x.product(skipna=skipna),
'min': lambda x, skipna=None: x.min(skipna=skipna),
'max': lambda x, skipna=None: x.max(skipna=skipna),
'all': lambda x, skipna=None: x.all(skipna=skipna),
'any': lambda x, skipna=None: x.any(skipna=skipna),
'count': lambda x: x.count(),
'size': lambda x: x._reduction_size(),
'mean': mean_function,
'var': var_function,
'std': lambda x, skipna=None, ddof=1: var_function(x, skipna=skipna, ddof=ddof) ** 0.5,
'sem': sem_function,
'skew': skew_function,
'kurt': kurt_function,
'kurtosis': kurt_function,
}
class DataFrameAggregate(DataFrameOperand, DataFrameOperandMixin):
_op_type_ = opcodes.AGGREGATE
_func = AnyField('func')
_raw_func = AnyField('raw_func')
_axis = AnyField('axis')
_numeric_only = BoolField('numeric_only')
_bool_only = BoolField('bool_only')
_use_inf_as_na = BoolField('use_inf_as_na')
_combine_size = Int32Field('combine_size')
_pre_funcs = ListField('pre_funcs')
_agg_funcs = ListField('agg_funcs')
_post_funcs = ListField('post_funcs')
def __init__(self, func=None, raw_func=None, axis=None, use_inf_as_na=None, numeric_only=None,
bool_only=None, combine_size=None, pre_funcs=None, agg_funcs=None, post_funcs=None,
output_types=None, stage=None, **kw):
super().__init__(_func=func, _raw_func=raw_func, _axis=axis, _use_inf_as_na=use_inf_as_na,
_numeric_only=numeric_only, _bool_only=bool_only, _combine_size=combine_size,
_pre_funcs=pre_funcs, _agg_funcs=agg_funcs, _post_funcs=post_funcs, _stage=stage,
_output_types=output_types, **kw)
@property
def func(self) -> Union[List, Dict[str, List]]:
return self._func
@property
def raw_func(self):
return self._raw_func
@property
def axis(self) -> int:
return self._axis
@property
def numeric_only(self) -> bool:
return self._numeric_only
@property
def bool_only(self) -> bool:
return self._bool_only
@property
def use_inf_as_na(self) -> int:
return self._use_inf_as_na
@property
def combine_size(self) -> int:
return self._combine_size
@property
def pre_funcs(self) -> List[ReductionPreStep]:
return self._pre_funcs
@property
def agg_funcs(self) -> List[ReductionAggStep]:
return self._agg_funcs
@property
def post_funcs(self) -> List[ReductionPostStep]:
return self._post_funcs
@staticmethod
def _filter_dtypes(op: "DataFrameAggregate", dtypes):
if not op.numeric_only and not op.bool_only:
return dtypes
empty_df = build_empty_df(dtypes)
return empty_df.select_dtypes([np.number, np.bool_] if op.numeric_only else [np.bool_]).dtypes
def _calc_result_shape(self, df):
if df.ndim == 2:
if self._numeric_only:
df = df.select_dtypes([np.number, np.bool_])
elif self._bool_only:
df = df.select_dtypes([np.bool_])
if self.output_types[0] == OutputType.dataframe:
test_obj = pd.concat([
build_df(df, size=2, fill_value=1),
build_df(df, size=2, fill_value=2),
])
else:
test_obj = pd.concat([
build_series(df, size=2, fill_value=1, name=df.name),
build_series(df, size=2, fill_value=2, name=df.name),
])
result_df = test_obj.agg(self.raw_func, axis=self.axis)
if isinstance(result_df, pd.DataFrame):
self.output_types = [OutputType.dataframe]
return result_df.dtypes, result_df.index
elif isinstance(result_df, pd.Series):
self.output_types = [OutputType.series]
return pd.Series([result_df.dtype], index=[result_df.name]), result_df.index
else:
self.output_types = [OutputType.scalar]
return np.array(result_df).dtype, None
def _normalize_funcs(self):
self._raw_func = self._func
if isinstance(self._func, dict):
new_func = OrderedDict()
for k, v in self._func.items():
if isinstance(v, str) or callable(v):
new_func[k] = [v]
else:
new_func[k] = v
self._func = new_func
elif isinstance(self._func, Iterable) and not isinstance(self._func, str):
self._func = list(self._func)
else:
self._func = [self._func]
custom_idx = 0
if isinstance(self._func, list):
custom_iter = (f for f in self._func if isinstance(f, CustomReduction))
else:
custom_iter = (f for f in self._func.values() if isinstance(f, CustomReduction))
for r in custom_iter:
if r.name == '<custom>':
r.name = f'<custom_{custom_idx}>'
custom_idx += 1
def __call__(self, df, output_type=None, dtypes=None, index=None):
self._normalize_funcs()
if output_type is None or dtypes is None:
with enter_mode(kernel=False, build=False):
dtypes, index = self._calc_result_shape(df)
else:
self.output_types = [output_type]
if self.output_types[0] == OutputType.dataframe:
if self.axis == 0:
new_shape = (len(index), len(dtypes))
new_index = parse_index(index, store_data=True)
else:
new_shape = (df.shape[0], len(dtypes))
new_index = df.index_value
return self.new_dataframe([df], shape=new_shape, dtypes=dtypes, index_value=new_index,
columns_value=parse_index(dtypes.index, store_data=True))
elif self.output_types[0] == OutputType.series:
if df.ndim == 1:
new_shape = (len(index),)
new_index = parse_index(index, store_data=True)
elif self.axis == 0:
new_shape = (len(index),)
new_index = parse_index(index, store_data=True)
else:
new_shape = (df.shape[0],)
new_index = df.index_value
return self.new_series([df], shape=new_shape, dtype=dtypes[0], name=dtypes.index[0],
index_value=new_index)
elif self.output_types[0] == OutputType.tensor:
return self.new_tileable([df], dtype=dtypes, shape=(np.nan,))
else:
return self.new_scalar([df], dtype=dtypes)
@staticmethod
def _safe_append(d, key, val):
if key not in d:
d[key] = []
if val not in d[key]:
d[key].append(val)
@classmethod
def _gen_map_chunks(cls, op, in_df, out_df, func_infos: List[ReductionSteps],
input_index_to_output: Dict[int, int]):
axis = op.axis
if axis == 0:
agg_chunks_shape = (in_df.chunk_shape[0], len(func_infos)) \
if len(in_df.chunk_shape) == 2 else (in_df.chunk_shape[0], 1)
else:
agg_chunks_shape = (len(func_infos), in_df.chunk_shape[1])
agg_chunks = np.empty(agg_chunks_shape, dtype=np.object)
for chunk in in_df.chunks:
input_index = chunk.index[1 - axis] if len(chunk.index) > 1 else 0
if input_index not in input_index_to_output:
continue
map_op = op.copy().reset_key() # type: "DataFrameAggregate"
new_axis_index = input_index_to_output[input_index]
func_info = func_infos[new_axis_index]
# force as_index=True for map phase
map_op.output_types = [OutputType.dataframe] if chunk.ndim == 2 else [OutputType.series]
map_op._stage = OperandStage.map
map_op._pre_funcs = func_info.pre_funcs
map_op._agg_funcs = func_info.agg_funcs
if axis == 0:
new_index = (chunk.index[0], new_axis_index) if len(chunk.index) == 2 else (chunk.index[0], 0)
else:
new_index = (new_axis_index, chunk.index[1])
if map_op.output_types[0] == OutputType.dataframe:
if axis == 0:
shape = (1, out_df.shape[-1])
if out_df.ndim == 2:
columns_value = out_df.columns_value
index_value = out_df.index_value
else:
columns_value = out_df.index_value
index_value = parse_index(pd.Index([0]), out_df.key)
else:
shape = (out_df.shape[0], 1)
columns_value = parse_index(pd.Index([0]), out_df.key, store_data=True)
index_value = out_df.index_value
agg_chunk = map_op.new_chunk([chunk], shape=shape, index=new_index,
columns_value=columns_value, index_value=index_value)
else:
agg_chunk = map_op.new_chunk([chunk], shape=(1,), index=new_index)
agg_chunks[agg_chunk.index] = agg_chunk
return agg_chunks
@classmethod
def _tile_single_chunk(cls, op: "DataFrameAggregate"):
in_df = op.inputs[0]
out_df = op.outputs[0]
chunk_op = op.copy().reset_key()
if op.output_types[0] == OutputType.dataframe:
chunk = chunk_op.new_chunk(in_df.chunks, index=(0, 0), shape=out_df.shape,
index_value=out_df.index_value, columns_value=out_df.columns_value,
dtypes=out_df.dtypes)
elif op.output_types[0] == OutputType.series:
chunk = chunk_op.new_chunk(in_df.chunks, index=(0,), shape=out_df.shape, dtype=out_df.dtype,
index_value=out_df.index_value, name=out_df.name)
elif op.output_types[0] == OutputType.tensor:
chunk = chunk_op.new_chunk(in_df.chunks, index=(0,), dtype=out_df.dtype, shape=(np.nan,))
else:
chunk = chunk_op.new_chunk(in_df.chunks, dtype=out_df.dtype, index=(), shape=())
tileable_op = op.copy().reset_key()
kw = out_df.params.copy()
kw.update(dict(chunks=[chunk], nsplits=tuple((x,) for x in out_df.shape)))
return tileable_op.new_tileables([in_df], **kw)
@classmethod
def _tile_size(cls, op: "DataFrameAggregate"):
in_df = op.inputs[0]
out_df = op.outputs[0]
chunks = []
for c in in_df.chunks:
chunk_op = op.copy().reset_key()
chunks.append(chunk_op.new_chunk([c], index=c.index, shape=(1,) * len(in_df.shape),
dtype=out_df.dtype))
tileable_op = op.copy().reset_key()
nsplits = tuple((1,) * s for s in in_df.chunk_shape)
tileable = tileable_op.new_tileable(out_df.inputs, chunks=chunks, nsplits=nsplits,
shape=in_df.chunk_shape, dtype=out_df.dtype)
return [tileable.sum()._inplace_tile()]
@staticmethod
def _add_functions(op: "DataFrameAggregate", compiler: ReductionCompiler,
cols=None):
if isinstance(op.func, list):
func_iter = ((None, f) for f in op.func)
cols_set = set(cols) if cols is not None else None
else:
assert cols is not None
cols_set = set(cols) & set(op.func.keys())
if len(cols_set) == 0:
return False
func_iter = ((col, f) for col, funcs in op.func.items() for f in funcs)
for col, f in func_iter:
if cols_set is not None and col is not None and col not in cols_set:
continue
func_name = None
if isinstance(f, str):
f, func_name = _agg_functions[f], f
ndim = 1 if cols is None else 2
func_cols = [col] if col is not None else None
compiler.add_function(f, ndim, cols=func_cols, func_name=func_name)
return True
@classmethod
def _tile_tree(cls, op: "DataFrameAggregate"):
in_df = op.inputs[0]
out_df = op.outputs[0]
combine_size = op.combine_size
axis = op.axis
input_index_to_output = dict()
output_index_to_input = []
axis_func_infos = []
dtypes_list = []
if len(in_df.chunk_shape) > 1:
for col_idx in range(in_df.chunk_shape[1 - axis]):
compiler = ReductionCompiler(axis=op.axis)
idx_chunk = in_df.cix[0, col_idx] if axis == 0 else in_df.cix[col_idx, 0]
new_dtypes = cls._filter_dtypes(op, idx_chunk.dtypes)
if not cls._add_functions(op, compiler, cols=list(new_dtypes.index)):
continue
input_index_to_output[col_idx] = len(axis_func_infos)
output_index_to_input.append(col_idx)
axis_func_infos.append(compiler.compile())
dtypes_list.append(new_dtypes)
else:
compiler = ReductionCompiler(axis=op.axis)
cls._add_functions(op, compiler)
input_index_to_output[0] = 0
axis_func_infos.append(compiler.compile())
chunks = cls._gen_map_chunks(op, in_df, out_df, axis_func_infos, input_index_to_output)
while chunks.shape[axis] > combine_size:
if axis == 0:
new_chunks_shape = (ceildiv(chunks.shape[0], combine_size), chunks.shape[1])
else:
new_chunks_shape = (chunks.shape[0], ceildiv(chunks.shape[1], combine_size))
new_chunks = np.empty(new_chunks_shape, dtype=np.object)
for idx0, i in enumerate(range(0, chunks.shape[axis], combine_size)):
for idx1 in range(chunks.shape[1 - axis]):
func_info = axis_func_infos[idx1]
if axis == 0:
chks = chunks[i: i + combine_size, idx1]
chunk_index = (idx0, idx1)
if chks[0].ndim == 1:
concat_shape = (len(chks),)
agg_shape = (1,)
else:
concat_shape = (len(chks), chks[0].shape[1])
agg_shape = (chks[0].shape[1], 1)
else:
chks = chunks[idx1, i: i + combine_size]
chunk_index = (idx1, idx0)
concat_shape = (chks[0].shape[0], len(chks))
agg_shape = (chks[0].shape[0], 1)
chks = chks.reshape((chks.shape[0],)).tolist()
if len(chks) == 1:
chk = chks[0]
else:
concat_op = DataFrameConcat(output_types=[OutputType.dataframe], axis=axis)
# Change index for concatenate
for j, c in enumerate(chks):
c._index = (j, 0) if axis == 0 else (0, j)
chk = concat_op.new_chunk(chks, dtypes=dtypes_list[idx1] if dtypes_list else None,
shape=concat_shape, index_value=chks[0].index_value)
chunk_op = op.copy().reset_key()
chunk_op.output_types = [OutputType.dataframe]
chunk_op._stage = OperandStage.combine
chunk_op._agg_funcs = func_info.agg_funcs
if axis == 0:
new_chunks[chunk_index] = chunk_op.new_chunk(
[chk], index=chunk_index, shape=agg_shape,
index_value=chks[0].index_value)
else:
new_chunks[chunk_index] = chunk_op.new_chunk(
[chk], index=chunk_index, shape=agg_shape,
index_value=chks[0].columns_value)
chunks = new_chunks
agg_chunks = []
for idx in range(chunks.shape[1 - axis]):
func_info = axis_func_infos[idx]
concat_op = DataFrameConcat(output_types=[OutputType.dataframe], axis=axis)
if axis == 0:
chks = chunks[:, idx]
if chks[0].ndim == 1:
concat_shape = (len(chks),)
else:
concat_shape = (len(chks), chks[0].shape[1])
else:
chks = chunks[idx, :]
concat_shape = (chks[0].shape[0], len(chks))
chks = chks.reshape((chks.shape[0],)).tolist()
chk = concat_op.new_chunk(chks, dtypes=dtypes_list[idx] if dtypes_list else None,
shape=concat_shape, index_value=chks[0].index_value)
chunk_op = op.copy().reset_key()
chunk_op._stage = OperandStage.agg
chunk_op._agg_funcs = func_info.agg_funcs
chunk_op._post_funcs = func_info.post_funcs
kw = out_df.params.copy()
if op.output_types[0] == OutputType.dataframe:
if axis == 0:
src_col_chunk = in_df.cix[0, output_index_to_input[idx]]
valid_cols = [c for pre in func_info.pre_funcs for c in pre.columns or ()]
if not valid_cols:
columns_value = src_col_chunk.columns_value
shape_len = src_col_chunk.shape[1]
else:
col_index = pd.Index(valid_cols).unique()
columns_value = parse_index(col_index, store_data=True)
shape_len = len(col_index)
kw.update(dict(shape=(out_df.shape[0], shape_len), columns_value=columns_value,
index=(0, idx), dtypes=out_df.dtypes[columns_value.to_pandas()]))
else:
src_col_chunk = in_df.cix[output_index_to_input[idx], 0]
kw.update(dict(index=(idx, 0), index_value=src_col_chunk.index_value,
shape=(src_col_chunk.shape[0], out_df.shape[1]),
dtypes=out_df.dtypes))
else:
if op.output_types[0] == OutputType.series:
if in_df.ndim == 1:
index_value, shape = out_df.index_value, out_df.shape
elif axis == 0:
out_dtypes = dtypes_list[idx]
index_value = parse_index(out_dtypes.index, store_data=True)
shape = (len(out_dtypes),)
else:
src_chunk = in_df.cix[output_index_to_input[idx], 0]
index_value, shape = src_chunk.index_value, (src_chunk.shape[0],)
kw.update(dict(name=out_df.name, dtype=out_df.dtype, index=(idx,),
index_value=index_value, shape=shape))
elif op.output_types[0] == OutputType.tensor:
kw.update(dict(index=(0,), shape=(np.nan,), dtype=out_df.dtype))
else:
kw.update(dict(index=(), shape=(), dtype=out_df.dtype))
agg_chunks.append(chunk_op.new_chunk([chk], **kw))
new_op = op.copy()
if op.output_types[0] == OutputType.dataframe:
if axis == 0:
nsplits = ((out_df.shape[0],), tuple(c.shape[1] for c in agg_chunks))
else:
nsplits = (tuple(c.shape[0] for c in agg_chunks), (out_df.shape[1],))
return new_op.new_tileables(op.inputs, chunks=agg_chunks, nsplits=nsplits, dtypes=out_df.dtypes,
shape=out_df.shape, index_value=out_df.index_value,
columns_value=out_df.columns_value)
elif op.output_types[0] == OutputType.series:
nsplits = (tuple(c.shape[0] for c in agg_chunks),)
return new_op.new_tileables(op.inputs, chunks=agg_chunks, nsplits=nsplits, dtype=out_df.dtype,
shape=out_df.shape, index_value=out_df.index_value, name=out_df.name)
elif op.output_types[0] == OutputType.tensor: # unique
return new_op.new_tileables(op.inputs, chunks=agg_chunks, dtype=out_df.dtype,
shape=out_df.shape, nsplits=((np.nan,),))
else: # scalar
return new_op.new_tileables(op.inputs, chunks=agg_chunks, dtype=out_df.dtype,
shape=(), nsplits=())
@classmethod
def tile(cls, op: "DataFrameAggregate"):
in_df = op.inputs[0]
if len(in_df.chunks) == 1:
return cls._tile_single_chunk(op)
elif in_df.ndim == 2 and op.raw_func == 'size':
return cls._tile_size(op)
else:
return cls._tile_tree(op)
@classmethod
def _wrap_df(cls, op, value, index=None):
xdf = cudf if op.gpu else pd
axis = op.axis
ndim = op.inputs[0].ndim
if ndim == 2:
if isinstance(value, (np.generic, int, float, complex)):
value = xdf.DataFrame([value], columns=index)
elif not isinstance(value, xdf.DataFrame):
new_index = None if not op.gpu else getattr(value, 'index', None)
value = xdf.DataFrame(value, columns=index, index=new_index)
else:
return value
return value.T if axis == 0 else value
else:
if isinstance(value, (np.generic, int, float, complex)):
value = xdf.Series([value], index=index)
elif isinstance(value, np.ndarray):
# assert value.ndim == 0
value = xdf.Series(value.tolist(), index=index)
return value
@staticmethod
def _pack_inputs(agg_funcs: List[ReductionAggStep], in_data):
pos = 0
out_dict = dict()
for step in agg_funcs:
if step.custom_reduction is None:
out_dict[step.output_key] = in_data[pos]
else:
out_dict[step.output_key] = tuple(in_data[pos:pos + step.output_limit])
pos += step.output_limit
return out_dict
@classmethod
def _execute_map(cls, ctx, op: "DataFrameAggregate"):
in_data = ctx[op.inputs[0].key]
axis = op.axis
axis_index = op.outputs[0].index[op.axis]
if in_data.ndim == 2:
if op.numeric_only:
in_data = in_data.select_dtypes([np.number, np.bool_])
elif op.bool_only:
in_data = in_data.select_dtypes([np.bool_])
# map according to map groups
ret_map_dfs = dict()
for input_key, output_key, cols, func in op.pre_funcs:
src_df = in_data if cols is None else in_data[cols]
if input_key == output_key:
ret_map_dfs[output_key] = src_df
else:
ret_map_dfs[output_key] = func(src_df, gpu=op.is_gpu())
agg_dfs = []
for input_key, map_func_name, _agg_func_name, custom_reduction, \
_output_key, _output_limit, kwds in op.agg_funcs:
input_obj = ret_map_dfs[input_key]
if map_func_name == 'custom_reduction':
pre_result = custom_reduction.pre(input_obj)
if not isinstance(pre_result, tuple):
pre_result = (pre_result,)
if custom_reduction.pre_with_agg:
# when custom_reduction.pre already aggregates, skip
agg_result = pre_result
else:
agg_result = custom_reduction.agg(*pre_result)
if not isinstance(agg_result, tuple):
agg_result = (agg_result,)
agg_dfs.extend([cls._wrap_df(op, r, index=[axis_index]) for r in agg_result])
elif map_func_name == 'size':
agg_dfs.append(cls._wrap_df(op, input_obj.agg(lambda x: x.size, axis=axis),
index=[axis_index]))
else:
agg_dfs.append(cls._wrap_df(op, getattr(input_obj, map_func_name)(**kwds),
index=[axis_index]))
ctx[op.outputs[0].key] = tuple(agg_dfs)
@classmethod
def _execute_combine(cls, ctx, op: "DataFrameAggregate"):
in_data = ctx[op.inputs[0].key]
in_data_dict = cls._pack_inputs(op.agg_funcs, in_data)
axis = op.axis
axis_index = op.outputs[0].index[axis]
combines = []
for _input_key, _map_func_name, agg_func_name, custom_reduction, \
output_key, _output_limit, kwds in op.agg_funcs:
input_obj = in_data_dict[output_key]
if agg_func_name == 'custom_reduction':
agg_result = custom_reduction.agg(*input_obj)
if not isinstance(agg_result, tuple):
agg_result = (agg_result,)
combines.extend([cls._wrap_df(op, r, index=[axis_index])
for r in agg_result])
else:
if op.gpu:
if kwds.pop('numeric_only', None):
raise NotImplementedError('numeric_only not implemented under cudf')
result = cls._wrap_df(op, getattr(input_obj, agg_func_name)(**kwds), index=[axis_index])
combines.append(result)
ctx[op.outputs[0].key] = tuple(combines)
@classmethod
def _execute_agg(cls, ctx, op: "DataFrameAggregate"):
xdf = cudf if op.gpu else pd
xp = cp if op.gpu else np
out = op.outputs[0]
in_data = ctx[op.inputs[0].key]
in_data_dict = cls._pack_inputs(op.agg_funcs, in_data)
axis = op.axis
# perform agg
for _input_key, _map_func_name, agg_func_name, custom_reduction, \
output_key, _output_limit, kwds in op.agg_funcs:
input_obj = in_data_dict[output_key]
if agg_func_name == 'custom_reduction':
agg_result = custom_reduction.agg(*input_obj)
if not isinstance(agg_result, tuple):
agg_result = (agg_result,)
in_data_dict[output_key] = custom_reduction.post(*agg_result)
else:
if op.gpu:
if kwds.pop('numeric_only', None):
raise NotImplementedError('numeric_only not implemented under cudf')
in_data_dict[output_key] = getattr(input_obj, agg_func_name)(**kwds)
aggs = []
# perform post op
for input_keys, _output_key, func_name, cols, func in op.post_funcs:
if cols is None:
func_inputs = [in_data_dict[k] for k in input_keys]
else:
func_inputs = [in_data_dict[k][cols] for k in input_keys]
agg_series = func(*func_inputs, gpu=op.is_gpu())
agg_series_ndim = getattr(agg_series, 'ndim', 0)
ser_index = None
if agg_series_ndim < out.ndim:
ser_index = [func_name]
aggs.append(cls._wrap_df(op, agg_series, index=ser_index))
# concatenate to produce final result
concat_df = xdf.concat(aggs, axis=axis)
if op.output_types[0] == OutputType.series:
if concat_df.ndim > 1:
if op.inputs[0].ndim == 2:
if axis == 0:
concat_df = concat_df.iloc[0, :]
else:
concat_df = concat_df.iloc[:, 0]
else:
concat_df = concat_df.iloc[:, 0]
concat_df.name = op.outputs[0].name
concat_df = concat_df.astype(op.outputs[0].dtype, copy=False)
elif op.output_types[0] == OutputType.scalar:
concat_df = concat_df.iloc[0].astype(op.outputs[0].dtype)
elif op.output_types[0] == OutputType.tensor:
concat_df = xp.array(concat_df).astype(dtype=out.dtype)
else:
if axis == 0:
concat_df = concat_df.reindex(op.outputs[0].index_value.to_pandas())
else:
concat_df = concat_df[op.outputs[0].columns_value.to_pandas()]
concat_df = concat_df.astype(op.outputs[0].dtypes, copy=False)
ctx[op.outputs[0].key] = concat_df
@classmethod
@redirect_custom_log
@enter_current_session
def execute(cls, ctx, op: "DataFrameAggregate"):
try:
pd.set_option('mode.use_inf_as_na', op.use_inf_as_na)
if op.stage == OperandStage.map:
cls._execute_map(ctx, op)
elif op.stage == OperandStage.combine:
cls._execute_combine(ctx, op)
elif op.stage == OperandStage.agg:
cls._execute_agg(ctx, op)
elif op.raw_func == 'size':
xp = cp if op.gpu else np
ctx[op.outputs[0].key] = xp.array(ctx[op.inputs[0].key].agg(op.raw_func, axis=op.axis)) \
.reshape(op.outputs[0].shape)
else:
xp = cp if op.gpu else np
result = ctx[op.inputs[0].key].agg(op.raw_func, axis=op.axis)
if op.output_types[0] == OutputType.tensor:
result = xp.array(result)
ctx[op.outputs[0].key] = result
finally:
pd.reset_option('mode.use_inf_as_na')
def is_funcs_aggregate(func, ndim=2):
to_check = []
if isinstance(func, list):
to_check.extend(func)
elif isinstance(func, dict):
for f in func.values():
if isinstance(f, Iterable) and not isinstance(f, str):
to_check.extend(f)
else:
to_check.append(f)
else:
to_check.append(func)
compiler = ReductionCompiler()
for f in to_check:
if f in _agg_functions:
continue
elif callable(f):
try:
if ndim == 2:
compiler.add_function(f, 2, cols=['A', 'B'])
else:
compiler.add_function(f, 1)
except ValueError:
return False
else:
return False
return True
def aggregate(df, func, axis=0, **kw):
if not is_funcs_aggregate(func, df.ndim):
return df.transform(func, axis=axis, _call_agg=True)
axis = validate_axis(axis, df)
use_inf_as_na = kw.pop('_use_inf_as_na', options.dataframe.mode.use_inf_as_na)
if (df.op.output_types[0] == OutputType.series or axis == 1) and isinstance(func, dict):
raise NotImplementedError('Currently cannot aggregate dicts over axis=1 on %s'
% type(df).__name__)
combine_size = kw.get('combine_size') or options.combine_size
numeric_only = kw.get('numeric_only')
bool_only = kw.get('bool_only')
op = DataFrameAggregate(func=copy.deepcopy(func), axis=axis, output_types=df.op.output_types,
combine_size=combine_size, numeric_only=numeric_only, bool_only=bool_only,
use_inf_as_na=use_inf_as_na)
output_type = kw.get('_output_type')
dtypes = kw.get('_dtypes')
index = kw.get('_index')
return op(df, output_type=output_type, dtypes=dtypes, index=index)
| 42.996341 | 110 | 0.565306 |
5f3e4a8bae4803790fd92f4957975f737c03a4df | 1,560 | py | Python | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/rds/apis/ModifyInstanceAzRequest.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 14 | 2018-04-19T09:53:56.000Z | 2022-01-27T06:05:48.000Z | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/rds/apis/ModifyInstanceAzRequest.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 15 | 2018-09-11T05:39:54.000Z | 2021-07-02T12:38:02.000Z | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/rds/apis/ModifyInstanceAzRequest.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 33 | 2018-04-20T05:29:16.000Z | 2022-02-17T09:10:05.000Z | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class ModifyInstanceAzRequest(JDCloudRequest):
"""
修改实例的可用区,例如将实例的可用区从单可用区调整为多可用区
"""
def __init__(self, parameters, header=None, version="v1"):
super(ModifyInstanceAzRequest, self).__init__(
'/regions/{regionId}/instances/{instanceId}:modifyInstanceAz', 'POST', header, version)
self.parameters = parameters
class ModifyInstanceAzParameters(object):
def __init__(self, regionId, instanceId, newAzId):
"""
:param regionId: 地域代码,取值范围参见[《各地域及可用区对照表》](../Enum-Definitions/Regions-AZ.md)
:param instanceId: RDS 实例ID,唯一标识一个RDS实例
:param newAzId: 新可用区ID。 如果是单机实例,只需输入一个可用区;如果是主备实例,则必须输入两个可用区ID:第一个为主节点所在可用区,第二个为备节点所在可用区。主备两个可用区可以相同,也可以不同
"""
self.regionId = regionId
self.instanceId = instanceId
self.newAzId = newAzId
| 33.913043 | 114 | 0.725641 |
5708edc9841ccdde4639c9912bb02934c8450a94 | 1,067 | py | Python | test/test_parser.py | mactov/nlpy | ad4131d9d8e7d5c5702b943234072ff52fd64ed6 | [
"MIT"
] | null | null | null | test/test_parser.py | mactov/nlpy | ad4131d9d8e7d5c5702b943234072ff52fd64ed6 | [
"MIT"
] | null | null | null | test/test_parser.py | mactov/nlpy | ad4131d9d8e7d5c5702b943234072ff52fd64ed6 | [
"MIT"
] | null | null | null | import unittest
import sys
sys.path.append('../')
from nlpy.parser import Parser
class TestParser(unittest.TestCase):
def setUp(self):
self.text = 'Hello, World! My name is Chris. What is your name?'
self.p = Parser(self.text)
def test_list_sentences(self):
result = ['Hello, World', 'My name is Chris', 'What is your name']
self.assertEqual(result, self.p.list_sentences())
def test_list_words_flat(self):
result = ['Hello', 'World', 'My', 'name', 'is', 'Chris', 'What', 'is', 'your', 'name']
self.assertEqual(result, self.p.list_words(flat=True))
def test_list_words_embedded(self):
result = [['Hello', 'World'], ['My', 'name', 'is', 'Chris'], ['What', 'is', 'your', 'name']]
self.assertEqual(result, self.p.list_words(flat=False))
def test_list_words_counted(self):
result = {'Hello':1, 'World':1, 'My':1, 'name':2, 'is':2, 'Chris':1, 'What':1, 'your':1}
self.assertEqual(result, self.p.list_words(count=True))
if __name__ == '__main__':
unittest.main() | 36.793103 | 100 | 0.619494 |
fe2b717e30e3cb3dc50c1df78846a58bde9ca531 | 3,667 | py | Python | test/functional/interface_bitcoin_cli.py | JoffreyBourdieux/ApsioCoin | 6573a9ab4e6e302f99d203fe0e1e414adb1fd349 | [
"MIT"
] | null | null | null | test/functional/interface_bitcoin_cli.py | JoffreyBourdieux/ApsioCoin | 6573a9ab4e6e302f99d203fe0e1e414adb1fd349 | [
"MIT"
] | null | null | null | test/functional/interface_bitcoin_cli.py | JoffreyBourdieux/ApsioCoin | 6573a9ab4e6e302f99d203fe0e1e414adb1fd349 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoin-cli"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_process_error, get_auth_cookie
class TestBitcoinCli(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
"""Main test logic"""
self.log.info("Compare responses from gewalletinfo RPC and `apsiocoin-cli getwalletinfo`")
cli_response = self.nodes[0].cli.getwalletinfo()
rpc_response = self.nodes[0].getwalletinfo()
assert_equal(cli_response, rpc_response)
self.log.info("Compare responses from getblockchaininfo RPC and `apsiocoin-cli getblockchaininfo`")
cli_response = self.nodes[0].cli.getblockchaininfo()
rpc_response = self.nodes[0].getblockchaininfo()
assert_equal(cli_response, rpc_response)
user, password = get_auth_cookie(self.nodes[0].datadir)
self.log.info("Test -stdinrpcpass option")
assert_equal(0, self.nodes[0].cli('-rpcuser=%s' % user, '-stdinrpcpass', input=password).getblockcount())
assert_raises_process_error(1, "incorrect rpcuser or rpcpassword", self.nodes[0].cli('-rpcuser=%s' % user, '-stdinrpcpass', input="foo").echo)
self.log.info("Test -stdin and -stdinrpcpass")
assert_equal(["foo", "bar"], self.nodes[0].cli('-rpcuser=%s' % user, '-stdin', '-stdinrpcpass', input=password + "\nfoo\nbar").echo())
assert_raises_process_error(1, "incorrect rpcuser or rpcpassword", self.nodes[0].cli('-rpcuser=%s' % user, '-stdin', '-stdinrpcpass', input="foo").echo)
self.log.info("Make sure that -getinfo with arguments fails")
assert_raises_process_error(1, "-getinfo takes no arguments", self.nodes[0].cli('-getinfo').help)
self.log.info("Compare responses from `apsiocoin-cli -getinfo` and the RPCs data is retrieved from.")
cli_get_info = self.nodes[0].cli('-getinfo').send_cli()
wallet_info = self.nodes[0].getwalletinfo()
network_info = self.nodes[0].getnetworkinfo()
blockchain_info = self.nodes[0].getblockchaininfo()
assert_equal(cli_get_info['version'], network_info['version'])
assert_equal(cli_get_info['protocolversion'], network_info['protocolversion'])
assert_equal(cli_get_info['walletversion'], wallet_info['walletversion'])
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['blocks'], blockchain_info['blocks'])
assert_equal(cli_get_info['timeoffset'], network_info['timeoffset'])
assert_equal(cli_get_info['connections'], network_info['connections'])
assert_equal(cli_get_info['proxy'], network_info['networks'][0]['proxy'])
assert_equal(cli_get_info['difficulty'], blockchain_info['difficulty'])
assert_equal(cli_get_info['testnet'], blockchain_info['chain'] == "test")
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['keypoololdest'], wallet_info['keypoololdest'])
assert_equal(cli_get_info['keypoolsize'], wallet_info['keypoolsize'])
assert_equal(cli_get_info['paytxfee'], wallet_info['paytxfee'])
assert_equal(cli_get_info['relayfee'], network_info['relayfee'])
# unlocked_until is not tested because the wallet is not encrypted
if __name__ == '__main__':
TestBitcoinCli().main()
| 55.560606 | 160 | 0.707118 |
03277e46ead548df7a20c05d866636d06a399125 | 7,315 | py | Python | lib/urls_base.py | clouserw/olympia | 1d5755b08a526372ec66e6bc64ab636018181969 | [
"BSD-3-Clause"
] | 1 | 2017-07-14T19:22:39.000Z | 2017-07-14T19:22:39.000Z | lib/urls_base.py | clouserw/olympia | 1d5755b08a526372ec66e6bc64ab636018181969 | [
"BSD-3-Clause"
] | 6 | 2021-02-02T23:08:48.000Z | 2021-09-08T02:47:17.000Z | lib/urls_base.py | clouserw/olympia | 1d5755b08a526372ec66e6bc64ab636018181969 | [
"BSD-3-Clause"
] | null | null | null | from django.conf import settings
from django.conf.urls import include, patterns, url
from django.contrib import admin
from django.shortcuts import redirect, render
from django.views.i18n import javascript_catalog
from django.views.decorators.cache import cache_page
from amo.urlresolvers import reverse
from amo.utils import urlparams
import blocklist.views
import versions.urls
admin.autodiscover()
handler403 = 'amo.views.handler403'
handler404 = 'amo.views.handler404'
handler500 = 'amo.views.handler500'
urlpatterns = patterns('',
# Discovery pane is first for undetectable efficiency wins.
('^discovery/', include('discovery.urls')),
# There are many more params but we only care about these three. The end is
# not anchored on purpose!
url('^blocklist/(?P<apiver>\d+)/(?P<app>[^/]+)/(?P<appver>[^/]+)/',
blocklist.views.blocklist, name='blocklist'),
('^blocked/', include('blocklist.urls')),
# AMO homepage or Marketplace Developer Hub? Choose your destiny.
url('^$', settings.HOME, name='home'),
# Add-ons.
('', include('addons.urls')),
# Browse pages.
('', include('browse.urls')),
# Tags.
('', include('tags.urls')),
# Collections.
('', include('bandwagon.urls')),
# Files
('^files/', include('files.urls')),
# Downloads.
('^downloads/', include(versions.urls.download_patterns)),
# Localizer Pages
('^localizers/', include('localizers.urls')),
# Users
('', include('users.urls')),
# Developer Hub.
('^developers/', include('devhub.urls')),
# Developer Hub.
('editors/', include('editors.urls')),
# AMO admin (not django admin).
('^admin/', include('zadmin.urls')),
# Performance wall of shame.
('^performance/', include('perf.urls')),
# Localizable pages.
('', include('pages.urls')),
# App versions.
('pages/appversions/', include('applications.urls')),
# Services
('', include('amo.urls')),
# Paypal
('^services/', include('paypal.urls')),
# Search
('^search/', include('search.urls')),
# Javascript translations.
url('^jsi18n.js$', cache_page(60 * 60 * 24 * 365)(javascript_catalog),
{'domain': 'javascript', 'packages': ['zamboni']}, name='jsi18n'),
# SAMO/API
('^api/', include('api.urls')),
('^compatibility/', include('compat.urls')),
# Site events data.
url('^statistics/events-(?P<start>\d{8})-(?P<end>\d{8}).json$',
'stats.views.site_events', name='amo.site_events'),
# Site statistics that we are going to catch, the rest will fall through.
url('^statistics/', include('stats.urls')),
# Fall through for any URLs not matched above stats dashboard.
url('^statistics/', lambda r: redirect('/'), name='statistics.dashboard'),
# Review spam.
url('^reviews/spam/$', 'reviews.views.spam', name='addons.reviews.spam'),
# Redirect patterns.
('^bookmarks/?$',
lambda r: redirect('browse.extensions', 'bookmarks', permanent=True)),
('^reviews/display/(\d+)',
lambda r, id: redirect('addons.reviews.list', id, permanent=True)),
('^reviews/add/(\d+)',
lambda r, id: redirect('addons.reviews.add', id, permanent=True)),
('^users/info/(\d+)',
lambda r, id: redirect('users.profile', id, permanent=True)),
('^pages/about$',
lambda r: redirect('pages.about', permanent=True)),
('^pages/credits$',
lambda r: redirect('pages.credits', permanent=True)),
('^pages/faq$',
lambda r: redirect('pages.faq', permanent=True)),
# Redirect persona/xxx
('^getpersonas$',
lambda r: redirect('http://www.getpersonas.com/gallery/All/Popular',
permanent=True)),
url('^persona/(?P<persona_id>\d+)', 'addons.views.persona_redirect',
name='persona'),
# Redirect top-tags to tags/top
('^top-tags/?',
lambda r: redirect('tags.top_cloud', permanent=True)),
('^personas/film and tv/?$',
lambda r: redirect('browse.personas', 'film-and-tv', permanent=True)),
('^addons/versions/(\d+)/?$',
lambda r, id: redirect('addons.versions', id, permanent=True)),
('^addons/versions/(\d+)/format:rss$',
lambda r, id: redirect('addons.versions.rss', id, permanent=True)),
# Legacy redirect. Requires a view to get extra data not provided in URL.
('^versions/updateInfo/(?P<version_id>\d+)',
'versions.views.update_info_redirect'),
('^addons/reviews/(\d+)/format:rss$',
lambda r, id: redirect('addons.reviews.list.rss', id, permanent=True)),
('^search-engines.*$',
lambda r: redirect(urlparams(reverse('search.search'), atype=4),
permanent=True)),
('^addons/contribute/(\d+)/?$',
lambda r, id: redirect('addons.contribute', id, permanent=True)),
('^recommended$',
lambda r: redirect(reverse('browse.extensions') + '?sort=featured',
permanent=True)),
('^recommended/format:rss$',
lambda r: redirect('browse.featured.rss', permanent=True)),
)
urlpatterns += patterns('piston.authentication.oauth.views',
url(r'^oauth/request_token/$', 'get_request_token',
name='oauth.request_token'),
url(r'^oauth/authorize/$', 'authorize_request_token',
name='oauth.authorize'),
url(r'^oauth/access_token/$', 'get_access_token',
name='oauth.access_token'),
)
if 'django_qunit' in settings.INSTALLED_APPS:
def _zamboni_qunit(request, path, template):
from time import time
import django_qunit.views
import jingo
import mock
# Patch `js` so that CI gets cache-busted JS with TEMPLATE_DEBUG=True.
# (This will be fixed in `jingo-minify` with bug 717094.)
from jingo_minify.helpers import _build_html
import jinja2
def js(bundle, defer=False, async=False):
items = settings.MINIFY_BUNDLES['js'][bundle]
attrs = ['src="%s?v=%s"' % ('%s', time())]
if defer:
attrs.append('defer')
if async:
attrs.append('async')
string = '<script %s></script>' % ' '.join(attrs)
return _build_html(items, string)
ctx = django_qunit.views.get_suite_context(request, path)
ctx.update(timestamp=time(), Mock=mock.Mock, js=js)
response = render(request, template, ctx)
# This allows another site to embed the QUnit suite
# in an iframe (for CI).
response['x-frame-options'] = ''
return response
def zamboni_qunit(request, path):
return _zamboni_qunit(request, path, 'qunit/qunit.html')
urlpatterns += patterns('',
url(r'^qunit/(?P<path>.*)', zamboni_qunit),
url(r'^_qunit/', include('django_qunit.urls')),
)
if settings.TEMPLATE_DEBUG:
# Remove leading and trailing slashes so the regex matches.
media_url = settings.MEDIA_URL.lstrip('/').rstrip('/')
urlpatterns += patterns('',
(r'^%s/(?P<path>.*)$' % media_url, 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
)
if settings.SERVE_TMP_PATH and settings.DEBUG:
urlpatterns += patterns('',
(r'^tmp/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.TMP_PATH}),
)
| 31.260684 | 79 | 0.618455 |
4fda65b6e29e75ce48bb623b22e83dc5ee4a6118 | 407 | py | Python | source/interprocedural_analyses/taint/test/integration/recognized_callable_targets.py | rcramblit/pyre-check | fa15d2021ab1bf5b52aae8c09d1520e2b7bcf6c9 | [
"MIT"
] | 1 | 2021-05-17T23:13:52.000Z | 2021-05-17T23:13:52.000Z | source/interprocedural_analyses/taint/test/integration/recognized_callable_targets.py | rcramblit/pyre-check | fa15d2021ab1bf5b52aae8c09d1520e2b7bcf6c9 | [
"MIT"
] | 1 | 2021-08-16T12:11:37.000Z | 2021-08-16T12:11:37.000Z | source/interprocedural_analyses/taint/test/integration/recognized_callable_targets.py | rcramblit/pyre-check | fa15d2021ab1bf5b52aae8c09d1520e2b7bcf6c9 | [
"MIT"
] | 1 | 2021-02-20T13:09:30.000Z | 2021-02-20T13:09:30.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from builtins import __test_sink, __test_source, to_callable_target
@to_callable_target
def callable_target(arg):
__test_sink(arg)
def test_callable_target():
x = __test_source()
callable_target.async_schedule(x)
| 23.941176 | 67 | 0.771499 |
3bd7055c37102d5553a4c02c8851e44dcdabd8b1 | 114 | py | Python | shop_app/serializers/__init__.py | syz247179876/e_mall | f94e39e091e098242342f532ae371b8ff127542f | [
"Apache-2.0"
] | 7 | 2021-04-10T13:20:56.000Z | 2022-03-29T15:00:29.000Z | shop_app/serializers/__init__.py | syz247179876/E_mall | f94e39e091e098242342f532ae371b8ff127542f | [
"Apache-2.0"
] | 9 | 2021-05-11T03:53:31.000Z | 2022-03-12T00:58:03.000Z | shop_app/serializers/__init__.py | syz247179876/E_mall | f94e39e091e098242342f532ae371b8ff127542f | [
"Apache-2.0"
] | 2 | 2020-11-24T08:59:22.000Z | 2020-11-24T14:10:59.000Z | # -*- coding: utf-8 -*-
# @Time : 2020/6/1 16:56
# @Author : 司云中
# @File : __init__.py.py
# @Software: PyCharm | 22.8 | 25 | 0.561404 |
0e0ec6fe50262f375ee64d31934db3ef49a59a35 | 2,105 | py | Python | cogdl/models/nn/m3s.py | zhangdan0602/cogdl | 35a338f29066e4b1a5d7f46217f09ebceaf13106 | [
"MIT"
] | null | null | null | cogdl/models/nn/m3s.py | zhangdan0602/cogdl | 35a338f29066e4b1a5d7f46217f09ebceaf13106 | [
"MIT"
] | null | null | null | cogdl/models/nn/m3s.py | zhangdan0602/cogdl | 35a338f29066e4b1a5d7f46217f09ebceaf13106 | [
"MIT"
] | null | null | null | import torch.nn.functional as F
from cogdl.layers import GCNLayer
from cogdl.trainers.m3s_trainer import M3STrainer
from .. import BaseModel, register_model
@register_model("m3s")
class M3S(BaseModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument("--num-features", type=int)
parser.add_argument("--num-classes", type=int)
parser.add_argument("--hidden-size", type=int, default=64)
parser.add_argument("--dropout", type=float, default=0)
parser.add_argument("--num-clusters", type=int, default=50)
parser.add_argument("--num-stages", type=int, default=10)
parser.add_argument("--epochs-per-stage", type=int, default=50)
parser.add_argument("--label-rate", type=float, default=1)
parser.add_argument("--num-new-labels", type=int, default=2)
parser.add_argument("--alpha", type=float, default=1)
parser.add_argument("--approximate", action="store_true")
# fmt: on
@classmethod
def build_model_from_args(cls, args):
return cls(
args.num_features,
args.hidden_size,
args.num_classes,
args.dropout,
)
def __init__(self, num_features, hidden_size, num_classes, dropout):
super(M3S, self).__init__()
self.dropout = dropout
self.gcn1 = GCNLayer(num_features, hidden_size)
self.gcn2 = GCNLayer(hidden_size, num_classes)
def get_embeddings(self, graph):
graph.sym_norm()
h = graph.x
h = self.gcn1(graph, h)
h = F.relu(F.dropout(h, self.dropout, training=self.training))
return h.detach().cpu().numpy()
def forward(self, graph):
graph.sym_norm()
h = graph.x
h = self.gcn1(graph, h)
h = F.dropout(F.relu(h), self.dropout, training=self.training)
h = self.gcn2(graph, h)
return h
def predict(self, data):
return self.forward(data)
@staticmethod
def get_trainer(args):
return M3STrainer
| 33.412698 | 72 | 0.628504 |
2a0fad35ed34ebe1694ef130ee8da74d13444a22 | 737 | py | Python | profiles_api/permissions.py | oyekanmiayo/profiles-rest-api | ed2746932b01d453b15a1be5ba182a49294680dd | [
"MIT"
] | null | null | null | profiles_api/permissions.py | oyekanmiayo/profiles-rest-api | ed2746932b01d453b15a1be5ba182a49294680dd | [
"MIT"
] | 7 | 2020-05-16T17:14:22.000Z | 2022-02-10T10:37:49.000Z | profiles_api/permissions.py | oyekanmiayo/profiles-rest-api | ed2746932b01d453b15a1be5ba182a49294680dd | [
"MIT"
] | null | null | null | from rest_framework import permissions
class UpdateOwnProfile(permissions.BasePermission):
"""Allow users to edit their own profile"""
def has_object_permission(self, request, view, obj):
"""Check user is trying to update their own profile"""
if request.method in permissions.SAFE_METHODS:
return True
return obj.id == request.user.id
class UpdateOwnStatus(permissions.BasePermission):
"""Allow users to edit their own status"""
def has_object_permission(self, request, view, obj):
"""Check the user is trying to update their own status"""
if request.method in permissions.SAFE_METHODS:
return True
return obj.user_profile.id == request.user.id
| 36.85 | 65 | 0.696065 |
245ef5f1ca14031aa062c6e24a9d839dc7d1ef6f | 1,894 | py | Python | lib/surface/compute/sole_tenancy/node_groups/update.py | bshaffer/google-cloud-sdk | f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9 | [
"Apache-2.0"
] | null | null | null | lib/surface/compute/sole_tenancy/node_groups/update.py | bshaffer/google-cloud-sdk | f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9 | [
"Apache-2.0"
] | null | null | null | lib/surface/compute/sole_tenancy/node_groups/update.py | bshaffer/google-cloud-sdk | f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*- #
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Update node group command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute.sole_tenancy import node_groups
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.compute.sole_tenancy.node_groups import flags
class Update(base.UpdateCommand):
"""Updates a Google Compute Engine node group."""
@staticmethod
def Args(parser):
flags.MakeNodeGroupArg().AddArgument(parser)
flags.AddUpdateArgsToParser(parser)
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
messages = holder.client.messages
groups_client = node_groups.NodeGroupsClient(
holder.client.apitools_client, messages, holder.resources)
node_group_ref = flags.MakeNodeGroupArg().ResolveAsResource(
args, holder.resources,
scope_lister=compute_flags.GetDefaultScopeLister(holder.client))
return groups_client.Update(
node_group_ref,
node_template=args.node_template,
additional_node_count=args.add_nodes,
delete_nodes=args.delete_nodes)
| 37.137255 | 77 | 0.774023 |
517b9302bad1201348c69437a6a12d05423f8514 | 2,579 | py | Python | App/alias.py | Jeffacy99/Blog | d9f45935998a073e4a3c947456be23c74ae64fef | [
"MIT"
] | null | null | null | App/alias.py | Jeffacy99/Blog | d9f45935998a073e4a3c947456be23c74ae64fef | [
"MIT"
] | 5 | 2020-03-24T18:10:26.000Z | 2022-03-12T00:12:33.000Z | App/alias.py | Jeffacy99/Blog-app | d9f45935998a073e4a3c947456be23c74ae64fef | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ********************************************************************************
# Copyright © 2019 jianglin
# File Name: alias.py
# Author: jianglin
# Email: mail@honmaple.com
# Created: 2019-07-13 00:29:40 (CST)
# Last Update: Tuesday 2019-09-24 18:09:14 (CST)
# By:
# Description:
# ********************************************************************************
import hashlib
from functools import wraps
from flask import abort, current_app, request
from werkzeug.routing import MethodNotAllowed, NotFound, RequestRedirect
FUNCTION = dict()
def view_function_cache(func):
@wraps(func)
def _view_function(url, method='GET'):
if len(FUNCTION) > 100:
for k, v in FUNCTION.items():
if v is None:
FUNCTION.pop(k)
key = method + url
key = str(hashlib.md5(key.encode("UTF-8")).hexdigest())
if key in FUNCTION:
return FUNCTION[key]
FUNCTION[key] = func(url, method)
return FUNCTION[key]
return _view_function
# https://stackoverflow.com/questions/38488134/get-the-flask-view-function-that-matches-a-url
@view_function_cache
def get_view_function(url, method='GET'):
adapter = current_app.url_map.bind('localhost')
try:
match = adapter.match(url, method=method)
except RequestRedirect as e:
# recursively match redirects
return get_view_function(e.new_url, method)
except (MethodNotAllowed, NotFound):
# no match
return None
try:
# return the view function and arguments
return current_app.view_functions[match[0]], match[1]
except KeyError:
# no view is associated with the endpoint
return None
def redirect_en(uri):
view_function = get_view_function(
"/" + uri,
request.method,
)
if view_function is None:
abort(404)
request.environ["HTTP_ACCEPT_LANGUAGE"] = "en-US,en;q=0.5"
return view_function[0](**view_function[1])
def init_app(app):
app.add_url_rule(
"/en",
defaults={"uri": ""},
view_func=redirect_en,
)
app.add_url_rule(
"/en/<path:uri>",
view_func=redirect_en,
)
# @app.before_request
# def before_request():
# if request.path.startswith("/en/"):
# request.environ["HTTP_ACCEPT_LANGUAGE"] = "en-US,en;q=0.5"
# url_map = list(app.url_map.iter_rules())
# for rule in url_map:
# app.add_url_rule("/en" + rule.rule, rule.endpoint, alias=True)
| 28.340659 | 93 | 0.589376 |
5ed61c6b216c04a0b92a5a9c426679773aea8f77 | 3,702 | py | Python | release/src-rt-6.x.4708/router/samba3/source4/scripting/python/samba/tests/credentials.py | zaion520/ATtomato | 4d48bb79f8d147f89a568cf18da9e0edc41f93fb | [
"FSFAP"
] | 2 | 2019-01-13T09:19:10.000Z | 2019-02-15T01:21:02.000Z | release/src-rt-6.x.4708/router/samba3/source4/scripting/python/samba/tests/credentials.py | zaion520/ATtomato | 4d48bb79f8d147f89a568cf18da9e0edc41f93fb | [
"FSFAP"
] | null | null | null | release/src-rt-6.x.4708/router/samba3/source4/scripting/python/samba/tests/credentials.py | zaion520/ATtomato | 4d48bb79f8d147f89a568cf18da9e0edc41f93fb | [
"FSFAP"
] | 2 | 2020-03-08T01:58:25.000Z | 2020-12-20T10:34:54.000Z | #!/usr/bin/env python
# Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Tests for the Credentials Python bindings.
Note that this just tests the bindings work. It does not intend to test
the functionality, that's already done in other tests.
"""
from samba import credentials
import samba.tests
class CredentialsTests(samba.tests.TestCase):
def setUp(self):
super(CredentialsTests, self).setUp()
self.creds = credentials.Credentials()
def test_set_username(self):
self.creds.set_username("somebody")
self.assertEquals("somebody", self.creds.get_username())
def test_set_password(self):
self.creds.set_password("S3CreT")
self.assertEquals("S3CreT", self.creds.get_password())
def test_set_domain(self):
self.creds.set_domain("ABMAS")
self.assertEquals("ABMAS", self.creds.get_domain())
def test_set_realm(self):
self.creds.set_realm("myrealm")
self.assertEquals("MYREALM", self.creds.get_realm())
def test_parse_string_anon(self):
self.creds.parse_string("%")
self.assertEquals("", self.creds.get_username())
self.assertEquals(None, self.creds.get_password())
def test_parse_string_user_pw_domain(self):
self.creds.parse_string("dom\\someone%secr")
self.assertEquals("someone", self.creds.get_username())
self.assertEquals("secr", self.creds.get_password())
self.assertEquals("DOM", self.creds.get_domain())
def test_bind_dn(self):
self.assertEquals(None, self.creds.get_bind_dn())
self.creds.set_bind_dn("dc=foo,cn=bar")
self.assertEquals("dc=foo,cn=bar", self.creds.get_bind_dn())
def test_is_anon(self):
self.creds.set_username("")
self.assertTrue(self.creds.is_anonymous())
self.creds.set_username("somebody")
self.assertFalse(self.creds.is_anonymous())
self.creds.set_anonymous()
self.assertTrue(self.creds.is_anonymous())
def test_workstation(self):
# FIXME: This is uninitialised, it should be None
#self.assertEquals(None, self.creds.get_workstation())
self.creds.set_workstation("myworksta")
self.assertEquals("myworksta", self.creds.get_workstation())
def test_get_nt_hash(self):
self.creds.set_password("geheim")
self.assertEquals('\xc2\xae\x1f\xe6\xe6H\x84cRE>\x81o*\xeb\x93',
self.creds.get_nt_hash())
def test_guess(self):
# Just check the method is there and doesn't raise an exception
self.creds.guess()
def test_set_cmdline_callbacks(self):
self.creds.set_cmdline_callbacks()
def test_authentication_requested(self):
self.creds.set_username("")
self.assertFalse(self.creds.authentication_requested())
self.creds.set_username("somebody")
self.assertTrue(self.creds.authentication_requested())
def test_wrong_password(self):
self.assertFalse(self.creds.wrong_password())
| 36.653465 | 73 | 0.69557 |
a2d514dcc02eaf93ab8e5ce43856f0f643cfde4f | 1,409 | py | Python | client/plugins/installer_plugin.py | scudette/grr | d4257c5259af881e28a7d62e9837fa13352e2bf6 | [
"Apache-2.0"
] | 6 | 2015-04-03T02:25:28.000Z | 2021-11-17T21:42:59.000Z | client/plugins/installer_plugin.py | defaultnamehere/grr | ba1648b97a76f844ffb8e1891cc9e2680f9b1c6e | [
"Apache-2.0"
] | null | null | null | client/plugins/installer_plugin.py | defaultnamehere/grr | ba1648b97a76f844ffb8e1891cc9e2680f9b1c6e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Example Installer plugin.
This is an example plugin to illustrate how GRR installation can be customized.
In this example we want to also uninstall an old service called "old_service" as
part of the GRR installation. This could be because we decided to rename the GRR
service itself (by default GRR will update its own service name).
To include arbitrary plugins into the deployed client, you can repack the client
using the client_builder.py tool:
python grr/client/client_build.py \
--config /etc/grr/grr-server.yaml --verbose \
--platform windows --arch amd64 deploy \
-p grr/client/plugins/installer_plugin.py
"""
import pywintypes
import win32serviceutil
import winerror
from grr.client import installer
class StopOldService(installer.Installer):
def RunOnce(self):
"""Stop and remove an old unneeded service during installation."""
service_name = "My Old Service Name"
try:
win32serviceutil.StopService(service_name)
except pywintypes.error as e:
if e[0] not in [winerror.ERROR_SERVICE_NOT_ACTIVE,
winerror.ERROR_SERVICE_DOES_NOT_EXIST]:
raise OSError("Could not stop service: {0}".format(e))
try:
win32serviceutil.RemoveService(service_name)
except pywintypes.error as e:
if e[0] != winerror.ERROR_SERVICE_DOES_NOT_EXIST:
raise OSError("Could not remove service: {0}".format(e))
| 32.767442 | 80 | 0.74308 |
a644511a5f45b8d6a240ff6e8b70f8774e94e482 | 1,258 | py | Python | epipy/utils/csvmanager.py | ckaus/EpiPy | f69eba6bf21fd9c60a8a6cbf7fe0d5c3eab69dce | [
"MIT"
] | 5 | 2020-04-08T09:18:00.000Z | 2020-05-07T16:18:17.000Z | epipy/utils/csvmanager.py | ckaus/EpiPy | f69eba6bf21fd9c60a8a6cbf7fe0d5c3eab69dce | [
"MIT"
] | 3 | 2016-02-10T14:11:49.000Z | 2017-06-07T07:52:56.000Z | epipy/utils/csvmanager.py | ckaus/EpiPy | f69eba6bf21fd9c60a8a6cbf7fe0d5c3eab69dce | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""This file contains functionality for reading a CSV file."""
import csv
from epipy.utils import logging
def read(file_name='', column=[]):
"""Read a CSV file by given header.
:param file_name: the file name
:type file_name: str
:param column: the columns of CSV file
:type column: list
:raises: *IOError*, *csv.Error* or *ValueError* if CSV file is not readable
:returns: a content of CSV file
:rtype: dict
"""
result = {}
if not file_name:
return result
try:
# Read input file
with open(file_name, 'rb') as _csvfile:
dialect = csv.Sniffer().sniff(_csvfile.read(), delimiters=';,')
_csvfile.seek(0)
reader = csv.reader(_csvfile, dialect)
header = reader.next()
# Header
if len(column) == 0:
column = header
for h in column:
result[h] = []
# Content
for row in reader:
# Match content with origin header
[result[h].append(row[header.index(h)]) for h in column]
except (IOError, csv.Error, ValueError) as e:
logging.error('%s' % e)
return {}
return result
| 28.590909 | 79 | 0.554054 |
364897ba0a33f004dd3ae3325853705dd9be04c9 | 244 | py | Python | setup.py | SangheonOhWDC/neko | 041a35d883ff7f7ad10ab8841c12a739fc2a73bc | [
"MIT"
] | 11 | 2021-05-05T07:03:57.000Z | 2021-12-10T04:48:55.000Z | setup.py | byin-cwi/neko | 9a09cc5585f6a5f1cb25fefc88cc3ab461b8cb12 | [
"MIT"
] | 1 | 2021-08-02T19:02:30.000Z | 2021-08-10T23:13:05.000Z | setup.py | byin-cwi/neko | 9a09cc5585f6a5f1cb25fefc88cc3ab461b8cb12 | [
"MIT"
] | 2 | 2021-06-25T02:37:18.000Z | 2022-02-18T09:29:20.000Z | from setuptools import setup, find_packages
setup(
name="neko",
version="0.3a1",
packages=find_packages(),
install_requires=["tensorflow>=2.3", "torch>=1.6", "numpy", "scipy", "tqdm", "requests"],
python_requires=">=3.8"
)
| 24.4 | 93 | 0.643443 |
1883d7a11671298ae8473d2408d42b37b721c9fb | 4,249 | py | Python | jsondler/json_tools/sort.py | loven-doo/jsondler | 1466f022fc059a5a58e34dcc3ebddb76b5f82c5c | [
"BSD-3-Clause"
] | null | null | null | jsondler/json_tools/sort.py | loven-doo/jsondler | 1466f022fc059a5a58e34dcc3ebddb76b5f82c5c | [
"BSD-3-Clause"
] | null | null | null | jsondler/json_tools/sort.py | loven-doo/jsondler | 1466f022fc059a5a58e34dcc3ebddb76b5f82c5c | [
"BSD-3-Clause"
] | null | null | null | import itertools
from copy import deepcopy
from jsondler.json_tools import get_by_path
def sort_dicts_list(in_json, prior_list, reverse=False):
paths_order = get_paths_order(in_json=in_json, prior_list=prior_list, reverse=reverse)
reorder_dict = {path[0]: sorted_i for sorted_i, path in enumerate(paths_order)}
out_json = [None]*len(in_json)
not_sorted_i = len(reorder_dict)
for i, dict_i in enumerate(in_json):
try:
out_json[reorder_dict[i]] = deepcopy(dict_i)
except KeyError:
out_json[not_sorted_i] = deepcopy(dict_i)
not_sorted_i += 1
return out_json
def get_paths_order(in_json, prior_list, reverse=False, preorder=None):
# returns list of paths (no regular expressions): [path_1, path_2, ..., path_n]
paths_order = list()
if preorder is None:
curr_lev = sorted(get_by_path(in_json=in_json, path_list=prior_list[0]), key=lambda p: p[1], reverse=reverse)
if len(prior_list) == 1:
return list(map(lambda p: p[0], curr_lev))
return get_paths_order(in_json=in_json,
prior_list=prior_list[1:],
reverse=reverse,
preorder=_group_by_value(curr_lev))
for group in preorder:
paths_to_get = _superpos_paths(real_paths=group, path_to_superpos=prior_list[0])
curr_lev = sorted(itertools.chain.from_iterable(map(lambda path: get_by_path(in_json=in_json,
path_list=path),
paths_to_get)),
key=lambda p: p[1], reverse=reverse)
if len(prior_list) == 1:
paths_order.__iadd__(map(lambda p: tuple(p[0]), curr_lev))
else:
paths_order.__iadd__(get_paths_order(in_json=in_json,
prior_list=prior_list[1:],
reverse=reverse,
preorder=_group_by_value(curr_lev)))
return paths_order
def _group_by_value(sorted_items):
v = None
groups_list = list()
group_list = list()
for item in sorted_items:
if item[1] != v:
if group_list:
groups_list.append(group_list.copy())
group_list = [item[0]]
v = item[1]
else:
group_list.append(item[0])
groups_list.append(group_list.copy())
return groups_list
def _superpos_paths(real_paths, path_to_superpos):
superposed_paths = [[[], True] for i in range(len(real_paths))]
for i in range(len(path_to_superpos)):
for j in range(len(real_paths)):
if not superposed_paths[j][1]:
continue
if path_to_superpos[i] == "*":
try:
superposed_paths[j][0].append(real_paths[j][i])
except IndexError:
superposed_paths[j][1] = False
superposed_paths[j][0].__iadd__(path_to_superpos[i:])
elif type(path_to_superpos[i]) in (list, tuple, set, frozenset):
try:
if real_paths[j][i] in path_to_superpos[i]:
superposed_paths[j][0].append(real_paths[j][i])
else:
superposed_paths[j][1] = False
superposed_paths[j][0].__iadd__(path_to_superpos[i:])
except IndexError:
superposed_paths[j][1] = False
superposed_paths[j][0].__iadd__(path_to_superpos[i:])
else:
try:
if real_paths[j][i] == path_to_superpos[i]:
superposed_paths[j][0].append(real_paths[j][i])
else:
superposed_paths[j][1] = False
superposed_paths[j][0].__iadd__(path_to_superpos[i:])
except IndexError:
superposed_paths[j][1] = False
superposed_paths[j][0].__iadd__(path_to_superpos[i:])
return set(map(lambda p: tuple(p[0]), superposed_paths))
| 43.357143 | 117 | 0.546482 |
137346ad1f48bb0f2a5e77d37f30c43ba2e0431a | 379 | py | Python | tests/flask_app.py | vb64/test.helper.flask | cc807dcea7554936fa291e83b0b0f86d91797865 | [
"MIT"
] | null | null | null | tests/flask_app.py | vb64/test.helper.flask | cc807dcea7554936fa291e83b0b0f86d91797865 | [
"MIT"
] | null | null | null | tests/flask_app.py | vb64/test.helper.flask | cc807dcea7554936fa291e83b0b0f86d91797865 | [
"MIT"
] | null | null | null | """
Entry point for app
"""
from flask import Flask, redirect, url_for
app = Flask(__name__) # pylint: disable=invalid-name
@app.route('/', methods=['GET', 'POST'])
def main_page():
"""
root page
"""
return "Flask OK"
@app.route('/redirect', methods=['POST'])
def redirect_page():
"""
redirect page
"""
return redirect(url_for('main_page'))
| 16.478261 | 53 | 0.609499 |
5b9a2b231a5837328fcff3e743d2e1a12aecfa10 | 2,117 | py | Python | util/readme-generator.py | TISparta/competitive-programming-solutions | 31987d4e67bb874bf15653565c6418b5605a20a8 | [
"MIT"
] | 1 | 2018-01-30T13:21:30.000Z | 2018-01-30T13:21:30.000Z | util/readme-generator.py | TISparta/competitive-programming-solutions | 31987d4e67bb874bf15653565c6418b5605a20a8 | [
"MIT"
] | null | null | null | util/readme-generator.py | TISparta/competitive-programming-solutions | 31987d4e67bb874bf15653565c6418b5605a20a8 | [
"MIT"
] | 1 | 2018-08-29T13:26:50.000Z | 2018-08-29T13:26:50.000Z | """
> Author: TISparta
> Description: Script to generate readme files for this repo
"""
import os
# Extensions to include
extensions = ['cpp', 'py']
# Files to ignore
ignore = ['readme-generator.py']
# Mapping problem with urls
urls = {
'exceeding the speed limit': 'exceeding-speed-limit',
'interesting trip': 'marmelade-kingdom'
}
SECTION_NAME = "University Code Spring 5"
LINK_SECTION = "https://www.hackerrank.com/contests/university-codesprint-5/challenges"
HEADERS = ['Problem', 'Difficulty', 'Tags']
fileNames = [file for file in os.listdir('.') if
os.path.isfile(os.path.join('.', file)) and
any(file.endswith(ext) for ext in extensions)]
def parseDate (str):
str = str[str.find(':') + 1:].strip()
return str
def parseTags (str):
str = str[str.find(':') + 1:].strip().split(',')
str = [tag.strip() for tag in str]
return ', '.join(tag for tag in str)
def parseDifficulty (str):
str = str[str.find(':') + 1:].strip().split('/')
dif = int(str[0].strip())
return ':red_circle:' * dif + ':black_circle:' * (10 - dif)
def parseName (str):
str = str[:str.find('.')]
str = str.replace('-', ' ')
name = str.replace('_', ' ')
link = LINK_SECTION + '/' + (urls[name.lower()] if name.lower() in urls else '-'.join(w.lower() for w in name.split()))
return "[%s](%s)" %(name, link)
table = []
table.append(HEADERS)
table.append(['-' * 7 for _ in range(len(HEADERS))])
for file_name in fileNames:
if file_name in ignore:
continue
with open(file_name) as file:
foo = file.readline()
author = file.readline()
date = parseDate(file.readline())
tags = parseTags(file.readline())
difficulty = parseDifficulty(file.readline())
row = [parseName(file_name), difficulty, tags]
table.append(row)
table[2:] = sorted(table[2:], key = lambda row: row[1])
with open('README.md', 'w') as readme:
readme.write('# [%s](%s)\n' %(SECTION_NAME, LINK_SECTION))
readme.write('\n')
readme.write('\n'.join(' | '.join(c for c in r) for r in table))
| 29.816901 | 123 | 0.607936 |
939d8e69eb41838fb2a7af13bfbf10abfea839d1 | 7,945 | py | Python | logbook/helpers.py | Infinidat/logbook | 220d2f3409bc5e8fe13ddc8155e72d5b78ece165 | [
"BSD-3-Clause"
] | null | null | null | logbook/helpers.py | Infinidat/logbook | 220d2f3409bc5e8fe13ddc8155e72d5b78ece165 | [
"BSD-3-Clause"
] | null | null | null | logbook/helpers.py | Infinidat/logbook | 220d2f3409bc5e8fe13ddc8155e72d5b78ece165 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
logbook.helpers
~~~~~~~~~~~~~~~
Various helper functions
:copyright: (c) 2010 by Armin Ronacher, Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
import os
import re
import sys
import errno
import time
import random
from datetime import datetime, timedelta
PY2 = sys.version_info[0] == 2
if PY2:
import __builtin__ as _builtins
else:
import builtins as _builtins
try:
import json
except ImportError:
import simplejson as json
if PY2:
from cStringIO import StringIO
iteritems = dict.iteritems
from itertools import izip as zip
xrange = _builtins.xrange
else:
from io import StringIO
zip = _builtins.zip
xrange = range
iteritems = dict.items
_IDENTITY = lambda obj: obj
if PY2:
def u(s):
return unicode(s, "unicode_escape")
else:
u = _IDENTITY
if PY2:
integer_types = (int, long)
string_types = (basestring,)
else:
integer_types = (int,)
string_types = (str,)
if PY2:
import httplib as http_client
else:
from http import client as http_client
if PY2:
#Yucky, but apparently that's the only way to do this
exec("""
def reraise(tp, value, tb=None):
raise tp, value, tb
""", locals(), globals())
else:
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
# this regexp also matches incompatible dates like 20070101 because
# some libraries (like the python xmlrpclib modules) use this
_iso8601_re = re.compile(
# date
r'(\d{4})(?:-?(\d{2})(?:-?(\d{2}))?)?'
# time
r'(?:T(\d{2}):(\d{2})(?::(\d{2}(?:\.\d+)?))?(Z|[+-]\d{2}:\d{2})?)?$'
)
_missing = object()
if PY2:
def b(x): return x
def _is_text_stream(x): return True
else:
import io
def b(x): return x.encode('ascii')
def _is_text_stream(stream): return isinstance(stream, io.TextIOBase)
can_rename_open_file = False
if os.name == 'nt': # pragma: no cover
_rename = lambda src, dst: False
_rename_atomic = lambda src, dst: False
try:
import ctypes
_MOVEFILE_REPLACE_EXISTING = 0x1
_MOVEFILE_WRITE_THROUGH = 0x8
_MoveFileEx = ctypes.windll.kernel32.MoveFileExW
def _rename(src, dst):
if PY2:
if not isinstance(src, unicode):
src = unicode(src, sys.getfilesystemencoding())
if not isinstance(dst, unicode):
dst = unicode(dst, sys.getfilesystemencoding())
if _rename_atomic(src, dst):
return True
retry = 0
rv = False
while not rv and retry < 100:
rv = _MoveFileEx(src, dst, _MOVEFILE_REPLACE_EXISTING |
_MOVEFILE_WRITE_THROUGH)
if not rv:
time.sleep(0.001)
retry += 1
return rv
# new in Vista and Windows Server 2008
_CreateTransaction = ctypes.windll.ktmw32.CreateTransaction
_CommitTransaction = ctypes.windll.ktmw32.CommitTransaction
_MoveFileTransacted = ctypes.windll.kernel32.MoveFileTransactedW
_CloseHandle = ctypes.windll.kernel32.CloseHandle
can_rename_open_file = True
def _rename_atomic(src, dst):
ta = _CreateTransaction(None, 0, 0, 0, 0, 1000, 'Logbook rename')
if ta == -1:
return False
try:
retry = 0
rv = False
while not rv and retry < 100:
rv = _MoveFileTransacted(src, dst, None, None,
_MOVEFILE_REPLACE_EXISTING |
_MOVEFILE_WRITE_THROUGH, ta)
if rv:
rv = _CommitTransaction(ta)
break
else:
time.sleep(0.001)
retry += 1
return rv
finally:
_CloseHandle(ta)
except Exception:
pass
def rename(src, dst):
# Try atomic or pseudo-atomic rename
if _rename(src, dst):
return
# Fall back to "move away and replace"
try:
os.rename(src, dst)
except OSError:
e = sys.exc_info()[1]
if e.errno != errno.EEXIST:
raise
old = "%s-%08x" % (dst, random.randint(0, sys.maxint))
os.rename(dst, old)
os.rename(src, dst)
try:
os.unlink(old)
except Exception:
pass
else:
rename = os.rename
can_rename_open_file = True
_JSON_SIMPLE_TYPES = (bool, float) + integer_types + string_types
def to_safe_json(data):
"""Makes a data structure safe for JSON silently discarding invalid
objects from nested structures. This also converts dates.
"""
def _convert(obj):
if obj is None:
return None
elif PY2 and isinstance(obj, str):
return obj.decode('utf-8', 'replace')
elif isinstance(obj, _JSON_SIMPLE_TYPES):
return obj
elif isinstance(obj, datetime):
return format_iso8601(obj)
elif isinstance(obj, list):
return [_convert(x) for x in obj]
elif isinstance(obj, tuple):
return tuple(_convert(x) for x in obj)
elif isinstance(obj, dict):
rv = {}
for key, value in iteritems(obj):
if not isinstance(key, string_types):
key = str(key)
if not is_unicode(key):
key = u(key)
rv[key] = _convert(value)
return rv
return _convert(data)
def format_iso8601(d=None):
"""Returns a date in iso8601 format."""
if d is None:
d = datetime.utcnow()
rv = d.strftime('%Y-%m-%dT%H:%M:%S')
if d.microsecond:
rv += '.' + str(d.microsecond)
return rv + 'Z'
def parse_iso8601(value):
"""Parse an iso8601 date into a datetime object. The timezone is
normalized to UTC.
"""
m = _iso8601_re.match(value)
if m is None:
raise ValueError('not a valid iso8601 date value')
groups = m.groups()
args = []
for group in groups[:-2]:
if group is not None:
group = int(group)
args.append(group)
seconds = groups[-2]
if seconds is not None:
if '.' in seconds:
sec, usec = seconds.split('.')
args.append(int(sec))
args.append(int(usec.ljust(6, '0')))
else:
args.append(int(seconds))
rv = datetime(*args)
tz = groups[-1]
if tz and tz != 'Z':
args = [int(x) for x in tz[1:].split(':')]
delta = timedelta(hours=args[0], minutes=args[1])
if tz[0] == '+':
rv -= delta
else:
rv += delta
return rv
def get_application_name():
if not sys.argv or not sys.argv[0]:
return 'Python'
return os.path.basename(sys.argv[0]).title()
class cached_property(object):
"""A property that is lazily calculated and then cached."""
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
def __get__(self, obj, type=None):
if obj is None:
return self
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
def get_iterator_next_method(it):
return lambda: next(it)
# python 2 support functions and aliases
def is_unicode(x):
if PY2:
return isinstance(x, unicode)
return isinstance(x, str)
| 27.975352 | 77 | 0.559597 |
ac23ec687ca28ddc0da8d8e27b6d22a2f1835ea1 | 1,972 | py | Python | src/notifier.py | AbhirajHinge/IntelligentMedicineBox | c95ee8c722a952bd81476ba3b28ca1c23399a68a | [
"MIT"
] | null | null | null | src/notifier.py | AbhirajHinge/IntelligentMedicineBox | c95ee8c722a952bd81476ba3b28ca1c23399a68a | [
"MIT"
] | null | null | null | src/notifier.py | AbhirajHinge/IntelligentMedicineBox | c95ee8c722a952bd81476ba3b28ca1c23399a68a | [
"MIT"
] | null | null | null | #!/bin/python3
from event_queue import EventQueue
import firebase_admin
from firebase_admin import credentials, db
"""
Notifier
Gathers notifications from EventQueue and sends them to PrescriptionManager.
"""
class Notifier:
_ref = None
_notification_ref= None
_prescription_ref= None
def __init__(self, event_queue, patient_name):
event_queue.register(self, ['alert', 'presc_man'])
# Fetch the service account key JSON file contents
cred = credentials.Certificate('./access_key/intelligent-medicine-box-firebase-adminsdk-iio70-6832e102bb.json')
# Initialize the app with a service account, granting admin privileges
firebase_admin.initialize_app(cred, {
'databaseURL': 'https://intelligent-medicine-box.firebaseio.com/'
})
# As an admin, the app has access to read and write all data, regardless of Security Rules
self._ref = db.reference('/patients/')
self._prescription_ref = self._ref.child(patient_name + '/prescriptions/')
self._notification_ref = self._ref.child(patient_name + '/notifications/')
print(self._ref.get())
def _send_alert(self, alert_event):
print('sending alert', alert_event)
self._notification_ref.push().set(alert_event.data)
def _send_pres(self, presc):
print('sending prescription', presc)
self._prescription_ref.child('pres'+presc['id']).set(presc)
def notify(self, event):
#print(event.data, 'in notifier')
if event.etype == 'alert' :
self._send_alert(event)
elif event.etype == 'presc_man':
self._send_pres(event.data['prescription'])
# print('Alert: ' + event.data)
#else if event.etype == 'print' :
# print('Print: ' + event.data)
#else if event.etype == 'update' :
# print('Update: ' + event.data)
#else:
# print('Unknown event: ' + event.data)
| 35.854545 | 119 | 0.649594 |
59158c27c78aae2e2b0306cf3d633296c250f861 | 727 | py | Python | aws_cloudformation_policies/aws_cloudformation_stack_drifted.py | glerb/panther-analysis | bc8518644e0a8fc7735576a700aa7269b3257546 | [
"Apache-2.0"
] | null | null | null | aws_cloudformation_policies/aws_cloudformation_stack_drifted.py | glerb/panther-analysis | bc8518644e0a8fc7735576a700aa7269b3257546 | [
"Apache-2.0"
] | null | null | null | aws_cloudformation_policies/aws_cloudformation_stack_drifted.py | glerb/panther-analysis | bc8518644e0a8fc7735576a700aa7269b3257546 | [
"Apache-2.0"
] | null | null | null | from panther_base_helpers import deep_get
# CloudFormation stacks tagged with "STACK=(name)" will be marked as passing.
IGNORE_STACK_TAGS = {
'panther-bootstrap-gateway', 'panther-cloud-security', 'panther-core',
'panther-log-analysis'
}
def policy(resource):
if deep_get(resource, 'DriftInformation', 'StackDriftStatus') != "DRIFTED":
return True
# Some of Panther's own stacks contain Lambda functions which will always show as "drifted."
# Panther stacks have a fixed "Stack" tag, even though the real stack name is dynamic.
tags = resource['Tags']
if tags.get('Application') == 'Panther' and tags.get(
'Stack') in IGNORE_STACK_TAGS:
return True
return False
| 33.045455 | 96 | 0.698762 |
c4c92dceb70efee50e1ed020ec044e9b8561018b | 8,681 | py | Python | train_dance.py | VuongLong/DANCE_W | 8a7dc39a16908bb4726ed57049c6a7d6698a76bc | [
"MIT"
] | null | null | null | train_dance.py | VuongLong/DANCE_W | 8a7dc39a16908bb4726ed57049c6a7d6698a76bc | [
"MIT"
] | null | null | null | train_dance.py | VuongLong/DANCE_W | 8a7dc39a16908bb4726ed57049c6a7d6698a76bc | [
"MIT"
] | null | null | null | from __future__ import print_function
import yaml
import easydict
import os
import numpy as np
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.transforms as transforms
from apex import amp, optimizers
from data_loader.get_loader import get_loader
from utils.utils import *
from utils.lr_schedule import inv_lr_scheduler
from utils.loss import *
from models.LinearAverage import LinearAverage
from eval import test
# Training settings asas
import argparse
parser = argparse.ArgumentParser(description='Pytorch DA',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--config', type=str, default='config.yaml', help='/path/to/config/file')
parser.add_argument('--source_path', type=str, default='./utils/source_list.txt', metavar='B',
help='path to source list')
parser.add_argument('--target_path', type=str, default='./utils/target_list.txt', metavar='B',
help='path to target list')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--exp_name', type=str, default='office_close', help='/path/to/config/file')
parser.add_argument("--gpu_devices", type=int, nargs='+', default=None, help="")
# args = parser.parse_args()
args = parser.parse_args()
config_file = args.config
conf = yaml.load(open(config_file))
save_config = yaml.load(open(config_file))
conf = easydict.EasyDict(conf)
gpu_devices = ','.join([str(id) for id in args.gpu_devices])
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_devices
args.cuda = torch.cuda.is_available()
source_data = args.source_path
target_data = args.target_path
evaluation_data = args.target_path
batch_size = conf.data.dataloader.batch_size
filename = source_data.split("_")[1] + "2" + target_data.split("_")[1]
filename = os.path.join("record", args.exp_name,
config_file.replace(".yaml", ""), filename)
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
print("record in %s " % filename)
data_transforms = {
source_data: transforms.Compose([
transforms.Scale((256, 256)),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
target_data: transforms.Compose([
transforms.Scale((256, 256)),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
evaluation_data: transforms.Compose([
transforms.Scale((256, 256)),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
use_gpu = torch.cuda.is_available()
source_loader, target_loader, \
test_loader, target_folder = get_loader(source_data, target_data,
evaluation_data, data_transforms,
batch_size=batch_size, return_id=True,
balanced=conf.data.dataloader.class_balance)
dataset_test = test_loader
n_share = conf.data.dataset.n_share
n_source_private = conf.data.dataset.n_source_private
num_class = n_share + n_source_private
G, C1 = get_model_mme(conf.model.base_model, num_class=num_class,
temp=conf.model.temp)
device = torch.device("cuda")
if args.cuda:
G.cuda()
C1.cuda()
G.to(device)
C1.to(device)
ndata = target_folder.__len__()
## Memory
lemniscate = LinearAverage(2048, ndata, conf.model.temp, conf.train.momentum).cuda()
params = []
for key, value in dict(G.named_parameters()).items():
if value.requires_grad and "features" in key:
if 'bias' in key:
params += [{'params': [value], 'lr': conf.train.multi,
'weight_decay': conf.train.weight_decay}]
else:
params += [{'params': [value], 'lr': conf.train.multi,
'weight_decay': conf.train.weight_decay}]
else:
if 'bias' in key:
params += [{'params': [value], 'lr': 1.0,
'weight_decay': conf.train.weight_decay}]
else:
params += [{'params': [value], 'lr': 1.0,
'weight_decay': conf.train.weight_decay}]
criterion = torch.nn.CrossEntropyLoss().cuda()
opt_g = optim.SGD(params, momentum=conf.train.sgd_momentum,
weight_decay=0.0005, nesterov=True)
opt_c1 = optim.SGD(list(C1.parameters()), lr=1.0,
momentum=conf.train.sgd_momentum, weight_decay=0.0005,
nesterov=True)
[G, C1], [opt_g, opt_c1] = amp.initialize([G, C1],
[opt_g, opt_c1],
opt_level="O1")
G = nn.DataParallel(G)
C1 = nn.DataParallel(C1)
param_lr_g = []
for param_group in opt_g.param_groups:
param_lr_g.append(param_group["lr"])
param_lr_f = []
for param_group in opt_c1.param_groups:
param_lr_f.append(param_group["lr"])
def train():
criterion = nn.CrossEntropyLoss().cuda()
print('train start!')
data_iter_s = iter(source_loader)
data_iter_t = iter(target_loader)
len_train_source = len(source_loader)
len_train_target = len(target_loader)
for step in range(conf.train.min_step + 1):
G.train()
C1.train()
if step % len_train_target == 0:
data_iter_t = iter(target_loader)
if step % len_train_source == 0:
data_iter_s = iter(source_loader)
data_t = next(data_iter_t)
data_s = next(data_iter_s)
inv_lr_scheduler(param_lr_g, opt_g, step,
init_lr=conf.train.lr,
max_iter=conf.train.min_step)
inv_lr_scheduler(param_lr_f, opt_c1, step,
init_lr=conf.train.lr,
max_iter=conf.train.min_step)
img_s = data_s[0]
label_s = data_s[1]
img_t = data_t[0]
index_t = data_t[2]
img_s, label_s = Variable(img_s.cuda()), \
Variable(label_s.cuda())
img_t = Variable(img_t.cuda())
index_t = Variable(index_t.cuda())
if len(img_t) < batch_size:
break
if len(img_s) < batch_size:
break
opt_g.zero_grad()
opt_c1.zero_grad()
## Weight normalizztion
#import pdb; pdb.set_trace()
C1.module.weight_norm()
## Source loss calculation
feat = G(img_s)
out_s = C1(feat)
loss_s = criterion(out_s, label_s)
feat_t = G(img_t)
out_t = C1(feat_t)
feat_t = F.normalize(feat_t)
### Calculate mini-batch x memory similarity
feat_mat = lemniscate(feat_t, index_t)
### We do not use memory features present in mini-batch
feat_mat[:, index_t] = -1 / conf.model.temp
### Calculate mini-batch x mini-batch similarity
feat_mat2 = torch.matmul(feat_t,
feat_t.t()) / conf.model.temp
mask = torch.eye(feat_mat2.size(0),
feat_mat2.size(0)).bool().cuda()
feat_mat2.masked_fill_(mask, -1 / conf.model.temp)
loss_nc = conf.train.eta * entropy(torch.cat([out_t, feat_mat,
feat_mat2], 1))
loss_ent = conf.train.eta * entropy_margin(out_t, conf.train.thr,
conf.train.margin)
all = loss_nc + loss_s + loss_ent
with amp.scale_loss(all, [opt_g, opt_c1]) as scaled_loss:
scaled_loss.backward()
opt_g.step()
opt_c1.step()
opt_g.zero_grad()
opt_c1.zero_grad()
lemniscate.update_weight(feat_t, index_t)
if step % conf.train.log_interval == 0:
print('Train [{}/{} ({:.2f}%)]\tLoss Source: {:.6f} '
'Loss NC: {:.6f} Loss ENS: {:.6f}\t'.format(
step, conf.train.min_step,
100 * float(step / conf.train.min_step),
loss_s.item(), loss_nc.item(), loss_ent.item()))
if step > 0 and step % conf.test.test_interval == 0:
test(step, dataset_test, filename, n_share, num_class, G, C1,
conf.train.thr)
G.train()
C1.train()
train()
| 37.908297 | 96 | 0.603617 |
92e41928627e8c2b941faadb24f8b7517f7f2971 | 4,355 | py | Python | segnlp/layers/encoders/lstm.py | AxlAlm/SegNLP | 89b8d077952397dfcea089376b373b117bcf6a65 | [
"Apache-2.0"
] | 1 | 2021-01-21T17:16:55.000Z | 2021-01-21T17:16:55.000Z | segnlp/layers/encoders/lstm.py | AxlAlm/SegNLP | 89b8d077952397dfcea089376b373b117bcf6a65 | [
"Apache-2.0"
] | 2 | 2021-01-24T20:07:54.000Z | 2021-01-26T16:59:28.000Z | segnlp/layers/encoders/lstm.py | AxlAlm/SegNLP | 89b8d077952397dfcea089376b373b117bcf6a65 | [
"Apache-2.0"
] | 1 | 2021-01-21T17:16:57.000Z | 2021-01-21T17:16:57.000Z |
#basics
from typing import Sequence, Union
import numpy as np
#pytorch
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence
from torch.nn.functional import pad
from torch import Tensor
#segnlp
from segnlp import utils
class LSTM(nn.Module):
def __init__(
self,
input_size:int,
hidden_size:int,
num_layers:int,
bidir:bool,
dropout:float=0.0,
weight_init : Union[str, dict] = None,
):
super().__init__()
self.lstm = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=bidir,
batch_first=True,
dropout = dropout
)
self.bidir = bidir
self.hidden_size = hidden_size
self.output_size = hidden_size * (2 if bidir else 1)
utils.init_weights(self, weight_init)
def forward(self, input:Union[Tensor, Sequence[Tensor]], lengths:Tensor, padding_value=0.0):
# if input in a sequence we concatentate the tensors
# if the second input element is a tuple its assumed its the states (h0,c0)
pass_states = False
if not isinstance(input, Tensor):
#to take care of given states
if isinstance(input[1], tuple):
input, (h_0, c_0) = input
# If states are bidirectional and the LSTM is not. The hidden dim of the states needs to be
# 1/2 of the LSTMs hidden dim. The states will be concatenated in terms of direction
# and passed as states to the LSTM
# from (NUM_LAYER*DIRECTIONS, BATCH_SIZE, HIDDEN_SIZE) -> (1, BATCH_SIZE, HIDDEN_SIZE*NR_DIRECTIONS)
if h_0.shape[-1] == (self.hidden_size/2) and not self.bidir:
# The cell state and last hidden state is used to start the decoder (first states and hidden of the decoder)
# -2 will pick the last layer forward and -1 will pick the last layer backwards
h_0 = torch.cat((h_0[-2], h_0[-1]), dim=1).unsqueeze(0)
c_0 = torch.cat((c_0[-2], c_0[-1]), dim=1).unsqueeze(0)
pass_states = True
else:
input = torch.cat(input, dim = -1)
sorted_lengths, sorted_idxs = torch.sort(lengths, descending=True)
sorted = torch.equal(sorted_lengths, lengths)
if not sorted:
lengths = sorted_lengths
_ , original_idxs = torch.sort(sorted_idxs, descending=False)
input = input[sorted_idxs]
# if a sample is length == 0, we assume its filled with zeros. So, we remove the sample,
# and then extend the dims later
non_zero_lens = lengths != 0
packed_embs = pack_padded_sequence(
input[non_zero_lens],
utils.ensure_numpy(lengths[non_zero_lens]),
batch_first=True
)
#if we are given states this are also passed
if pass_states:
lstm_packed, states = self.lstm(packed_embs, (h_0, c_0))
else:
lstm_packed, states = self.lstm(packed_embs)
output, _ = pad_packed_sequence(
lstm_packed,
batch_first=True,
padding_value=padding_value,
)
# we pad on the ends of dim 0 and dim 1
output = pad(
output,
(0, 0, 0, abs(output.size(1) - input.size(1)), 0, abs(output.size(0) - input.size(0))),
mode = 'constant',
value = 0
)
if not sorted:
output = output[original_idxs]
return output, states | 35.696721 | 128 | 0.507692 |
180b19909116d122c1406576ac6c0842f4d0fa12 | 4,403 | py | Python | chimp/simulators/atari/atari.py | sisl/Chimp | 39aecc18a635ce2608b3f604310dedd738946574 | [
"Apache-2.0"
] | 76 | 2016-03-19T17:12:24.000Z | 2022-02-27T16:33:35.000Z | chimp/simulators/atari/atari.py | afcarl/Chimp | 39aecc18a635ce2608b3f604310dedd738946574 | [
"Apache-2.0"
] | 2 | 2016-03-24T20:45:56.000Z | 2018-05-04T22:57:36.000Z | chimp/simulators/atari/atari.py | afcarl/Chimp | 39aecc18a635ce2608b3f604310dedd738946574 | [
"Apache-2.0"
] | 18 | 2016-04-06T13:03:59.000Z | 2019-04-28T03:34:09.000Z | from ale_python_interface import ALEInterface
import pygame
import numpy as np
import scipy.misc as spm
class AtariSimulator(object):
def __init__(self, settings):
'''Initiate Arcade Learning Environment (ALE) using Python interface
https://github.com/bbitmaster/ale_python_interface/wiki
- Set number of frames to be skipped, random seed, ROM and title for display.
- Retrieve a set of legal actions and their number.
- Retrieve dimensions of the original screen (width/height), and set the dimensions
of the cropped screen, together with the padding used to crop the screen rectangle.
- Set dimensions of the pygame display that will show visualization of the simulation.
(May be cropped --- showing what the learner sees, or not --- showing full Atari screen)
- Allocate memory for generated grayscale screenshots. Accepts dims in (height/width) format
'''
self.ale = ALEInterface()
self.ale.setInt("frame_skip",settings["frame_skip"])
self.ale.setInt("random_seed",settings["seed_simulator"])
self.ale.loadROM(settings["rom_dir"] + '/' + settings["rom"])
self.title = "ALE Simulator: " + str(settings["rom"])
self.actions = self.ale.getLegalActionSet()
self.n_actions = self.actions.size
self.screen_dims = self.ale.getScreenDims()
self.model_dims = settings['model_dims']
self.pad = settings['pad']
print("Original screen width/height: " + str(self.screen_dims[0]) + "/" + str(self.screen_dims[1]))
print("Cropped screen width/height: " + str(self.model_dims[0]) + "/" + str(self.model_dims[1]))
self.viz_cropped = settings['viz_cropped']
if self.viz_cropped:
self.display_dims = (int(self.model_dims[0]*2), int(self.model_dims[1]*2))
else:
self.display_dims = (int(self.screen_dims[0]*2), int(self.screen_dims[1]*2))
# preallocate an array to accept ALE screen data (height/width) !
self.screen_data = np.empty((self.screen_dims[1],self.screen_dims[0]),dtype=np.uint8)
def get_screenshot(self):
'''returns a cropped snapshot of the simulator
- store grayscale values in a preallocated array
- cut out a square from the rectangle, using provided padding value
- downsample to the desired size and transpose from (height/width) to (width/height)
'''
self.ale.getScreenGrayscale(self.screen_data)
self.tmp = self.screen_data[(self.screen_dims[1]-self.screen_dims[0]-self.pad):(self.screen_dims[1]-self.pad),:]
self.frame = spm.imresize(self.tmp,self.model_dims[::-1],interp='nearest').T #, interp='nearest'
return self.frame
def act(self,action_index):
'''function to transition the simulator from s to s' using provided action
the action that is provided is in form of an index
simulator deals with translating the index into an actual action'''
self.last_reward = self.ale.act(self.actions[action_index])
def reward(self):
'''return reward - has to be called after the "act" function'''
return self.last_reward
def episode_over(self):
'''return a boolean indicator on whether the game is still running'''
return self.ale.game_over()
def reset_episode(self):
'''reset the game that ended'''
self.ale.reset_game()
def init_viz_display(self):
'''initialize display that will show visualization'''
pygame.init()
self.screen = pygame.display.set_mode(self.display_dims)
if self.title:
pygame.display.set_caption(self.title)
def refresh_viz_display(self):
'''if display is shut down, shut the game down
else move the current simulator's frame (cropped or not cropped) into the pygame display,
after expanding it 2x along x and y dimensions'''
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit
if self.viz_cropped:
self.surface = pygame.surfarray.make_surface(self.frame) # has already been transposed
else:
self.surface = pygame.surfarray.make_surface(self.screen_data.T)
self.screen.blit(pygame.transform.scale2x(self.surface),(0,0))
pygame.display.flip()
| 37.956897 | 120 | 0.660005 |
529d887307c44d88fb62e4d85c058e8e76ebe953 | 9,436 | py | Python | web.py | ASionville/Python-Audio-Assistant | 0b10ace64522af3eee15b956e2bf74401dd7fa13 | [
"MIT"
] | 1 | 2020-03-24T09:28:27.000Z | 2020-03-24T09:28:27.000Z | web.py | ASionville/Python-Audio-Assistant | 0b10ace64522af3eee15b956e2bf74401dd7fa13 | [
"MIT"
] | null | null | null | web.py | ASionville/Python-Audio-Assistant | 0b10ace64522af3eee15b956e2bf74401dd7fa13 | [
"MIT"
] | null | null | null | # importing speech recognition package from google api
import speech_recognition as sr
import playsound # to play saved mp3 file
from gtts import gTTS # google text to speech
import os, sys # to save/open files
import wolframalpha # to calculate strings into formula
#from selenium import webdriver # to control browser operations
from googlesearch import search as gsearch
import webbrowser
import googletrans
import requests
from bs4 import BeautifulSoup
languages = googletrans.LANGCODES
del languages["chinese (simplified)"]
del languages["chinese (traditional)"]
languages["chinese"] = "zh-cn"
num = 1
def assistant_speaks(output, lang='fr'):
global num
# num to rename every audio file
# with different name to remove ambiguity b
num += 1
print("Mrs Nobody : ", output)
toSpeak = gTTS(text = output, lang =lang, slow = False)
# saving the audio file given by google text to speech
file = str(num)+".mp3 "
toSpeak.save(file)
# playsound package is used to play the same file.
playsound.playsound(file, True)
os.remove(file)
def scan_for_wake_up():
rObject = sr.Recognizer()
audio = ''
with sr.Microphone() as source:
# recording the audio using speech recognition
rObject.adjust_for_ambient_noise(source)
audio = rObject.listen(source)
text = rObject.recognize_google(audio, language ='fr-FR')
return text
def get_audio():
rObject = sr.Recognizer()
audio = ''
with sr.Microphone() as source:
print("Je vous écoute...")
# recording the audio using speech recognition
rObject.adjust_for_ambient_noise(source)
audio = rObject.listen(source)
print("Stop.") # limit 5 secs
try:
text = rObject.recognize_google(audio, language ='fr-FR')
print("Vous avez dit : ", text)
return text
except:
assistant_speaks("Je n'ai pas compris, réessayez !")
return 0
def traduire(text="", src="", dest=""):
translator = googletrans.Translator() # Create object of Translator.
translated = translator.translate(str(text), src=src, dest=dest)
return(translated.text)
def get_answer(url):
res = requests.get(url)
html_page = res.content
soup = BeautifulSoup(html_page, 'html.parser')
print(soup)
return (soup)
def search(text=''):
client = wolframalpha.Client("Y3UAYV-J5KAG7RULW")
shortquery = text.strip().replace(" ", "+")
url = 'https://api.wolframalpha.com/v1/result?i=' + str(shortquery) + '%3F&appid=Y3UAYV-J5KAG7RULW'
res = client.query(text)
print(shortquery)
# Wolfram cannot resolve the question
if res['@success'] == 'false':
print("success : no")
#for keys,values in res.items():
# print(str(keys) + " : " + str(values))
try:
shortquery = (str(mean['didyoumean']['#text']))
shortquery = shortquery.strip().replace(" ", "+")
url = 'https://api.wolframalpha.com/v1/result?i=' + str(shortquery) + '%3F&appid=Y3UAYV-J5KAG7RULW'
print(url)
result=get_answer(str(url))
except Exception as e:
assistant_speaks("Je crois que je n'ai pas bien compris ce que vous vouliez.")
pass
else:
mean = res['didyoumeans']
else:
print("success : yes")
result=get_answer(str(url))
try:
print(result)
result = traduire(result, 'en', 'fr')
assistant_speaks(str(result))
except Exception as e:
print("Pas de résultat")
def resolveListOrDict(variable):
if isinstance(variable, list):
return variable[0]['plaintext']
else:
return variable['plaintext']
def removeBrackets(variable):
return variable.split('(')[0]
def resolveListOrDict(variable):
if isinstance(variable, list):
return variable[0]['plaintext']
else:
return variable['plaintext']
def removeBrackets(variable):
return variable.split('(')[0]
def process_text(voix_user):
if 'cherche' in voix_user or 'joue' in voix_user:
# a basic web crawler using selenium
search_web(voix_user)
return
elif "qui es-tu" in voix_user:
speak = '''Je suis Madame Nobody, je suis là pour te servir.'''
assistant_speaks(speak)
return
# elif "calcul" in voix_user:
# query = voix_user.split("calcul ")[1]
# res = client.query(query)
# output = next(res.results).text
# print(output)
# assistant_speaks("La réponse est " + output)
# return
elif "traduire" in voix_user:
txt_to_translate = voix_user.split("en ")[0]
txt_to_translate = txt_to_translate.split("traduire ")[1]
want = voix_user.split("en ")[1]
want = traduire(text=want, src="fr", dest="en").lower()
want=languages[want]
translated = traduire(text=txt_to_translate, src="fr", dest=want)
assistant_speaks(translated, lang=want)
elif "traduis" in voix_user:
txt_to_translate = voix_user.split("en ")[0]
txt_to_translate = txt_to_translate.split("traduis ")[1]
want = voix_user.split("en ")[1]
want = traduire(text=want, src="fr", dest="en").lower()
want=languages[want]
translated = traduire(text=txt_to_translate, src="fr", dest=want)
assistant_speaks(translated, lang=want)
elif 'ouvre' in voix_user:
# another function to open
# different application availaible
open_application(voix_user)
return
else:
query = traduire(voix_user, 'fr', 'en')
search(query)
def search_web(voix_user):
for j in gsearch(voix_user, tld="fr", num=10, stop=5, pause=2):
if 'youtube' in j:
if 'channel' in j or 'user' in j:
assistant_speaks("J'ai trouvé une chaîne Youtube")
elif 'watch' in j:
assistant_speaks("J'ai trouvé une vidéo Youtube")
else:
assistant_speaks("J'ai trouvé quelque chose sur Youtube")
elif 'wikipedia' in j:
if 'en.' in j:
assistant_speaks("J'ai trouvé un article Wikipédia anglais")
elif 'fr.' in j:
assistant_speaks("J'ai trouvé un article Wikipédia français")
else:
assistant_speaks("J'ai trouvé un article Wikipédia")
else:
print(str(j))
url = j.split("//")[-1].split("/")[0].split('?')[0]
assistant_speaks("J'ai trouvé le site " + str(url))
assistant_speaks("Est-ce que c'est ce que vous cherchez ?")
ans = get_audio()
if 'oui' in str(ans) or 'ouais' in str(ans):
webbrowser.open(j, new=0, autoraise=True)
sys.exit()
sys.exit()
# function used to open application
# present inside the system.
def open_application(voix_user):
if "chrome" in voix_user:
assistant_speaks("Google Chrome")
try:
os.startfile('C:\Program Files (x86)\Google\Chrome\Application\chrome.exe')
except FileNotFoundError:
assistant_speaks("Chrome n'est pas installé sur votre ordinateur")
return
elif "firefox" in voix_user or "mozilla" in voix_user:
assistant_speaks("Ouverture de Mozilla Firefox")
try:
os.startfile('C:\Program Files\Mozilla Firefox\\firefox.exe')
except FileNotFoundError:
assistant_speaks("Firefox n'est pas installé sur votre ordinateur")
return
elif "word" in voix_user:
assistant_speaks("Ouverture de Word")
try:
os.startfile('C:\ProgramData\Microsoft\Windows\Start Menu\Programs\Microsoft Office 2013\\Word 2013.lnk')
except FileNotFoundError:
assistant_speaks("Word n'est pas installé sur votre ordinateur")
return
elif "excel" in voix_user:
assistant_speaks("Ouverture d'Excel")
try:
os.startfile('C:\ProgramData\Microsoft\Windows\Start Menu\Programs\Microsoft Office 2013\\Excel 2013.lnk')
except FileNotFoundError:
assistant_speaks("Excel n'est pas installé sur votre ordinateur")
return
else:
assistant_speaks("Je ne connais pas cette application")
return
# Driver Code
if __name__ == "__main__":
name ='Humain'
while(1):
text0 = str(scan_for_wake_up()).lower()
if 'alexia' in text0:
playsound.playsound('listen.mp3', True)
text = str(get_audio()).lower()
if text == 0:
continue
elif "stop" in str(text) or "bye" in str(text) or "quitte" in str(text):
playsound.playsound('close.mp3', True)
break
else:
# calling process text to process the query
process_text(text)
break
| 31.141914 | 119 | 0.585524 |
932ad9ddf0958ae3c162c7a3f4dcdbffcfd73ceb | 2,963 | py | Python | python/en/_numpy/python_numpy_tutorial/python_numpy_tutorial-scipy_image_operations_imageio.py | aimldl/coding | 70ddbfaa454ab92fd072ee8dc614ecc330b34a70 | [
"MIT"
] | null | null | null | python/en/_numpy/python_numpy_tutorial/python_numpy_tutorial-scipy_image_operations_imageio.py | aimldl/coding | 70ddbfaa454ab92fd072ee8dc614ecc330b34a70 | [
"MIT"
] | null | null | null | python/en/_numpy/python_numpy_tutorial/python_numpy_tutorial-scipy_image_operations_imageio.py | aimldl/coding | 70ddbfaa454ab92fd072ee8dc614ecc330b34a70 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
python_numpy_tutorial-scipy_image_operations_imageio.py
TODO: Fix the following error and make this code work.
img_tinted = Image.resize( (300,300), img_tinted )
AttributeError: module 'PIL.Image' has no attribute 'resize'
This Python script is an attempt to change the deprecated scipy.misc
to imageio and PIL (Python Imaging Library). But there's an error and
I stopped working on this code.
When I have enough free time, I may work on this, but currently, scipy.mics works.
2019-07-03 (Wed)
"""
#elif python_package == 'PIL': # Python Imaging Library
# from PIL import Image
# from matplotlib.pyplot import imshow
#
# img = Image.open('assets/cat.jpg')
# print( img.size)
# # img.dtype
# # print( img.dtype, img.size)
# # AttributeError: 'JpegImageFile' object has no attribute 'dtype'
# imshow( img )
#
# img_tinted = img * [1,0.95,0.9]
from imageio import imread, imsave
from PIL import Image
#from scipy.misc import imresize
from matplotlib.pyplot import imshow
# Read an JPEG image into a numpy array
img = imread('cat.jpg') # AIMLDL: assets/cat.jpg is changed to cat.jpg
print( img.dtype,img.shape )
# uint8 (400, 248, 3)
#img = imread('sample_images/surfing-01.jpg')
#print( img.dtype,img.shape )
#uint8 (2048, 2048, 3)
imshow( img )
# The image is tinted by scaling each of the color channels by a different scalar constant.
# For examples, when [1, 0.95, 0.9] is multiplied by numpy broadcasting:
# the red channel is unchanged.
# the green channel is multiplied by 0.95
# the blue channel is multiplied by 0.9.
# If [1,1,1] is multiplied, it's the same as the original image.
img_tinted = img * [1,0.95,0.9]
#img_tinted = img * [1,0.99,0.98]
imshow( img_tinted )
# Clipping input data to the valid range for imshow with RGB data
# ([0..1] for floats or [0..255] for integers).
# TODO: This doesn't work as it should be.
# I may change the following line with Pillow, but I'll stop here.
# When I have time, come back and fix this.
# Processing individual bands
# https://pillow.readthedocs.io/en/latest/handbook/tutorial.html
# split the image into individual bands
# source = im.split()
#
# R, G, B = 0, 1, 2
#
# # select regions where red is less than 100
# mask = source[R].point(lambda i: i < 100 and 255)
#
# # process the green band
# out = source[G].point(lambda i: i * 0.7)
#
# # paste the processed band back, but only where red was < 100
# source[G].paste(out, None, mask)
#
# # build a new multiband image
# im = Image.merge(im.mode, source)
# Resize the tinted image to be 300x300 pixels
img_tinted = Image.resize( (300,300), img_tinted )
# Write the tinted image back to disk
imsave('cat_tinted_by_imageio.jpg', img_tinted) # AIMLDL: directory assets is removed.
imshow( img_tinted ) | 34.453488 | 92 | 0.673304 |
10ab36821d5533ad8febd89df9e98c79920a3eb7 | 33,969 | py | Python | king_phisher/client/graphs.py | tanc7/king-phisher | 457e42440603bd9ba241a688bbaad6eb32ed118e | [
"BSD-3-Clause"
] | null | null | null | king_phisher/client/graphs.py | tanc7/king-phisher | 457e42440603bd9ba241a688bbaad6eb32ed118e | [
"BSD-3-Clause"
] | null | null | null | king_phisher/client/graphs.py | tanc7/king-phisher | 457e42440603bd9ba241a688bbaad6eb32ed118e | [
"BSD-3-Clause"
] | 1 | 2019-11-03T23:47:56.000Z | 2019-11-03T23:47:56.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/graphs.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import collections
import string
from king_phisher import color
from king_phisher import ipaddress
from king_phisher import its
from king_phisher import ua_parser
from king_phisher import utilities
from king_phisher.client import client_rpc
from king_phisher.client import gui_utilities
from king_phisher.client.widget import extras
from king_phisher.constants import ColorHexCode
from king_phisher.constants import OSFamily
from boltons import iterutils
from gi.repository import Gtk
from smoke_zephyr.requirements import check_requirements
from smoke_zephyr.utilities import unique
try:
import matplotlib
matplotlib.rcParams['backend'] = 'GTK3Cairo'
from matplotlib import dates
from matplotlib import patches
from matplotlib import pyplot
from matplotlib import ticker
from matplotlib import lines
from matplotlib.backends.backend_gtk3cairo import FigureCanvasGTK3Cairo as FigureCanvas
from matplotlib.backends.backend_gtk3cairo import FigureManagerGTK3Cairo as FigureManager
from matplotlib.backends.backend_gtk3 import NavigationToolbar2GTK3 as NavigationToolbar
except ImportError:
has_matplotlib = False
"""Whether the :py:mod:`matplotlib` module is available."""
else:
if not its.frozen and check_requirements(['matplotlib>=1.5.1']):
has_matplotlib = False
else:
has_matplotlib = True
try:
import mpl_toolkits.basemap
except ImportError:
has_matplotlib_basemap = False
"""Whether the :py:mod:`mpl_toolkits.basemap` module is available."""
else:
if not its.frozen and check_requirements(['basemap>=1.0.7']):
has_matplotlib_basemap = False
else:
has_matplotlib_basemap = True
EXPORTED_GRAPHS = {}
MPL_COLOR_NULL = 'darkcyan'
__all__ = ('export_graph_provider', 'get_graph', 'get_graphs', 'CampaignGraph')
def export_graph_provider(cls):
"""
Decorator to mark classes as valid graph providers. This decorator also sets
the :py:attr:`~.CampaignGraph.name` attribute.
:param class cls: The class to mark as a graph provider.
:return: The *cls* parameter is returned.
"""
if not issubclass(cls, CampaignGraph):
raise RuntimeError("{0} is not a subclass of CampaignGraph".format(cls.__name__))
if not cls.is_available:
return None
graph_name = cls.__name__[13:]
cls.name = graph_name
EXPORTED_GRAPHS[graph_name] = cls
return cls
def get_graph(graph_name):
"""
Return the graph providing class for *graph_name*. The class providing the
specified graph must have been previously exported using
:py:func:`.export_graph_provider`.
:param str graph_name: The name of the graph provider.
:return: The graph provider class.
:rtype: :py:class:`.CampaignGraph`
"""
return EXPORTED_GRAPHS.get(graph_name)
def get_graphs():
"""
Get a list of all registered graph providers.
:return: All registered graph providers.
:rtype: list
"""
return sorted(EXPORTED_GRAPHS.keys())
class GraphBase(object):
"""
A basic graph provider for using :py:mod:`matplotlib` to create graph
representations of campaign data. This class is meant to be subclassed
by real providers.
"""
name = 'Unknown'
"""The name of the graph provider."""
name_human = 'Unknown'
"""The human readable name of the graph provider used for UI identification."""
graph_title = 'Unknown'
"""The title that will be given to the graph."""
table_subscriptions = []
"""A list of tables from which information is needed to produce the graph."""
is_available = True
def __init__(self, application, size_request=None, style_context=None):
"""
:param tuple size_request: The size to set for the canvas.
"""
self.application = application
self.style_context = style_context
self.config = application.config
"""A reference to the King Phisher client configuration."""
self.figure, _ = pyplot.subplots()
self.figure.set_facecolor(self.get_color('bg', ColorHexCode.WHITE))
self.axes = self.figure.get_axes()
self.canvas = FigureCanvas(self.figure)
self.manager = None
self.minimum_size = (380, 200)
"""An absolute minimum size for the canvas."""
if size_request is not None:
self.resize(*size_request)
self.canvas.mpl_connect('button_press_event', self.mpl_signal_canvas_button_pressed)
self.canvas.show()
self.navigation_toolbar = NavigationToolbar(self.canvas, self.application.get_active_window())
self.popup_menu = Gtk.Menu.new()
menu_item = Gtk.MenuItem.new_with_label('Export')
menu_item.connect('activate', self.signal_activate_popup_menu_export)
self.popup_menu.append(menu_item)
menu_item = Gtk.MenuItem.new_with_label('Refresh')
menu_item.connect('activate', self.signal_activate_popup_refresh)
self.popup_menu.append(menu_item)
menu_item = Gtk.CheckMenuItem.new_with_label('Show Toolbar')
menu_item.connect('toggled', self.signal_toggled_popup_menu_show_toolbar)
self._menu_item_show_toolbar = menu_item
self.popup_menu.append(menu_item)
self.popup_menu.show_all()
self.navigation_toolbar.hide()
self._legend = None
@property
def rpc(self):
return self.application.rpc
@staticmethod
def _ax_hide_ticks(ax):
for tick in ax.yaxis.get_major_ticks():
tick.tick1On = False
tick.tick2On = False
@staticmethod
def _ax_set_spine_color(ax, spine_color):
for pos in ('top', 'right', 'bottom', 'left'):
ax.spines[pos].set_color(spine_color)
def add_legend_patch(self, legend_rows, fontsize=None):
if self._legend is not None:
self._legend.remove()
self._legend = None
fontsize = fontsize or self.fontsize_scale
legend_bbox = self.figure.legend(
tuple(patches.Patch(color=patch_color) for patch_color, _ in legend_rows),
tuple(label for _, label in legend_rows),
borderaxespad=1.25,
fontsize=fontsize,
frameon=True,
handlelength=1.5,
handletextpad=0.75,
labelspacing=0.3,
loc='lower right'
)
legend_bbox.legendPatch.set_linewidth(0)
self._legend = legend_bbox
def get_color(self, color_name, default):
"""
Get a color by its style name such as 'fg' for foreground. If the
specified color does not exist, default will be returned. The underlying
logic for this function is provided by
:py:func:`~.gui_utilities.gtk_style_context_get_color`.
:param str color_name: The style name of the color.
:param default: The default color to return if the specified one was not found.
:return: The desired color if it was found.
:rtype: tuple
"""
color_name = 'theme_color_graph_' + color_name
sc_color = gui_utilities.gtk_style_context_get_color(self.style_context, color_name, default)
return (sc_color.red, sc_color.green, sc_color.blue)
def make_window(self):
"""
Create a window from the figure manager.
:return: The graph in a new, dedicated window.
:rtype: :py:class:`Gtk.Window`
"""
if self.manager is None:
self.manager = FigureManager(self.canvas, 0)
self.navigation_toolbar.destroy()
self.navigation_toolbar = self.manager.toolbar
self._menu_item_show_toolbar.set_active(True)
window = self.manager.window
window.set_transient_for(self.application.get_active_window())
window.set_title(self.graph_title)
return window
@property
def fontsize_scale(self):
scale = self.markersize_scale
if scale < 5:
fontsize = 'xx-small'
elif scale < 7:
fontsize = 'x-small'
elif scale < 9:
fontsize = 'small'
else:
fontsize = 'medium'
return fontsize
@property
def markersize_scale(self):
bbox = self.axes[0].get_window_extent().transformed(self.figure.dpi_scale_trans.inverted())
return bbox.width * self.figure.dpi * 0.01
def mpl_signal_canvas_button_pressed(self, event):
if event.button != 3:
return
self.popup_menu.popup(None, None, None, None, event.button, Gtk.get_current_event_time())
return True
def signal_activate_popup_menu_export(self, action):
dialog = extras.FileChooserDialog('Export Graph', self.application.get_active_window())
file_name = self.config['campaign_name'] + '.png'
response = dialog.run_quick_save(file_name)
dialog.destroy()
if not response:
return
destination_file = response['target_path']
self.figure.savefig(destination_file, format='png')
def signal_activate_popup_refresh(self, event):
self.refresh()
def signal_toggled_popup_menu_show_toolbar(self, widget):
if widget.get_property('active'):
self.navigation_toolbar.show()
else:
self.navigation_toolbar.hide()
def resize(self, width=0, height=0):
"""
Attempt to resize the canvas. Regardless of the parameters the canvas
will never be resized to be smaller than :py:attr:`.minimum_size`.
:param int width: The desired width of the canvas.
:param int height: The desired height of the canvas.
"""
min_width, min_height = self.minimum_size
width = max(width, min_width)
height = max(height, min_height)
self.canvas.set_size_request(width, height)
class CampaignGraph(GraphBase):
"""
Graph format used for the graphs generated in the dashboard and
in the create graphs tab.
"""
def _load_graph(self, info_cache):
raise NotImplementedError()
def load_graph(self):
"""Load the graph information via :py:meth:`.refresh`."""
self.refresh()
def refresh(self, info_cache=None, stop_event=None):
"""
Refresh the graph data by retrieving the information from the
remote server.
:param dict info_cache: An optional cache of data tables.
:param stop_event: An optional object indicating that the operation should stop.
:type stop_event: :py:class:`threading.Event`
:return: A dictionary of cached tables from the server.
:rtype: dict
"""
info_cache = (info_cache or {})
if not self.rpc:
return info_cache
for table in self.table_subscriptions:
if stop_event and stop_event.is_set():
return info_cache
if not table in info_cache:
query_filter = None
if 'campaign_id' in client_rpc.database_table_objects[table].__slots__:
query_filter = {'campaign_id': self.config['campaign_id']}
info_cache[table] = tuple(self.rpc.remote_table(table, query_filter=query_filter))
for ax in self.axes:
ax.clear()
if self._legend is not None:
self._legend.remove()
self._legend = None
self._load_graph(info_cache)
self.figure.suptitle(
self.graph_title,
color=self.get_color('fg', ColorHexCode.BLACK),
size=14,
weight='bold',
y=0.97
)
self.canvas.draw()
return info_cache
class CampaignBarGraph(CampaignGraph):
yticklabel_fmt = "{0:,}"
def __init__(self, *args, **kwargs):
super(CampaignBarGraph, self).__init__(*args, **kwargs)
self.figure.subplots_adjust(top=0.85, right=0.85, bottom=0.05, left=0.225)
ax = self.axes[0]
ax.tick_params(
axis='both',
top='off',
right='off',
bottom='off',
left='off',
labelbottom='off'
)
ax.invert_yaxis()
self.axes.append(ax.twinx())
def _barh(self, ax, bars, height, max_bars=None):
# define the necessary colors
color_bg = self.get_color('bg', ColorHexCode.WHITE)
color_bar_bg = self.get_color('bar_bg', ColorHexCode.GRAY)
color_bar_fg = self.get_color('bar_fg', ColorHexCode.BLACK)
ax.set_axis_bgcolor(color_bg)
self.resize(height=60 + 20 * len(bars))
# draw the foreground / filled bar
bar_container = ax.barh(
range(len(bars)),
bars,
height=height,
color=color_bar_fg,
linewidth=0
)
# draw the background / unfilled bar
largest_bar = (max(bars) if len(bars) else 0)
ax.barh(
range(len(bars)),
[largest_bar - bar for bar in bars],
left=bars,
height=height,
color=color_bar_bg,
linewidth=0
)
return bar_container
def _load_graph(self, info_cache):
raise NotImplementedError()
def _graph_null_bar(self, title):
return self.graph_bar([0], 1, [''], xlabel=title)
def graph_bar(self, bars, max_bars, yticklabels, xlabel=None):
"""
Create a horizontal bar graph with better defaults for the standard use
cases.
:param list bars: The values of the bars to graph.
:param int max_bars: The number to treat as the logical maximum number of plotted bars.
:param list yticklabels: The labels to use on the x-axis.
:param str xlabel: The label to give to the y-axis.
:return: The bars created using :py:mod:`matplotlib`
:rtype: `matplotlib.container.BarContainer`
"""
height = 0.275
color_bg = self.get_color('bg', ColorHexCode.WHITE)
color_fg = self.get_color('fg', ColorHexCode.BLACK)
ax1, ax2 = self.axes # primary axis
bar_container = self._barh(ax1, bars, height, max_bars)
yticks = [float(y) + (height / 2) for y in range(len(bars))]
# this makes the top bar shorter than the rest
# ax1.set_ybound(0, max(len(bars), max_bars))
ax1.set_yticks(yticks)
ax1.set_yticklabels(yticklabels, color=color_fg, size=10)
ax2.set_yticks(yticks)
ax2.set_yticklabels([self.yticklabel_fmt.format(bar) for bar in bars], color=color_fg, size=12)
ax2.set_ylim(ax1.get_ylim())
# remove the y-axis tick marks
self._ax_hide_ticks(ax1)
self._ax_hide_ticks(ax2)
self._ax_set_spine_color(ax1, color_bg)
self._ax_set_spine_color(ax2, color_bg)
if xlabel:
ax1.set_xlabel(xlabel, color=color_fg, size=12)
return bar_container
class CampaignLineGraph(CampaignGraph):
def __init__(self, *args, **kwargs):
super(CampaignLineGraph, self).__init__(*args, **kwargs)
def _load_graph(self, info_cache):
raise NotImplementedError()
class CampaignPieGraph(CampaignGraph):
def __init__(self, *args, **kwargs):
super(CampaignPieGraph, self).__init__(*args, **kwargs)
self.figure.subplots_adjust(top=0.85, right=0.75, bottom=0.05, left=0.05)
def _load_graph(self, info_cache):
raise NotImplementedError()
def _graph_null_pie(self, title):
ax = self.axes[0]
ax.pie(
(100,),
autopct='%1.0f%%',
colors=(self.get_color('pie_low', ColorHexCode.GRAY),),
labels=(title,),
shadow=True,
startangle=225,
textprops={'color': self.get_color('fg', ColorHexCode.BLACK)}
)
ax.axis('equal')
return
def graph_pie(self, parts, autopct=None, labels=None, legend_labels=None):
colors = color.get_scale(
self.get_color('pie_low', ColorHexCode.BLACK),
self.get_color('pie_high', ColorHexCode.GRAY),
len(parts),
ascending=False
)
ax = self.axes[0]
pie = ax.pie(
parts,
autopct=autopct,
colors=colors,
explode=[0.1] + ([0] * (len(parts) - 1)),
labels=labels or tuple("{0:.1f}%".format(p) for p in parts),
labeldistance=1.15,
shadow=True,
startangle=45,
textprops={'color': self.get_color('fg', ColorHexCode.BLACK)},
wedgeprops={'linewidth': 0}
)
ax.axis('equal')
if legend_labels is not None:
self.add_legend_patch(tuple(zip(colors, legend_labels)), fontsize='x-small')
return pie
@export_graph_provider
class CampaignGraphDepartmentComparison(CampaignBarGraph):
"""Display a graph which compares the different departments."""
graph_title = 'Department Comparison'
name_human = 'Bar - Department Comparison'
table_subscriptions = ('company_departments', 'messages', 'visits')
yticklabel_fmt = "{0:.01f}%"
def _load_graph(self, info_cache):
departments = info_cache['company_departments']
departments = dict((department.id, department.name) for department in departments)
messages = info_cache['messages']
message_departments = dict((message.id, departments[message.company_department_id]) for message in messages if message.company_department_id is not None)
if not len(message_departments):
self._graph_null_bar('')
return
messages = [message for message in messages if message.id in message_departments]
visits = info_cache['visits']
visits = [visit for visit in visits if visit.message_id in message_departments]
visits = unique(visits, key=lambda visit: visit.message_id)
department_visits = collections.Counter()
department_visits.update(message_departments[visit.message_id] for visit in visits)
department_totals = collections.Counter()
department_totals.update(message_departments[message.id] for message in messages)
department_scores = dict((department, (department_visits[department] / total) * 100) for department, total in department_totals.items())
department_scores = sorted(department_scores.items(), key=lambda x: (x[1], x[0]), reverse=True)
department_scores = collections.OrderedDict(department_scores)
yticklabels, bars = zip(*department_scores.items())
self.graph_bar(bars, len(yticklabels), yticklabels)
return
@export_graph_provider
class CampaignGraphOverview(CampaignBarGraph):
"""Display a graph which represents an overview of the campaign."""
graph_title = 'Campaign Overview'
name_human = 'Bar - Campaign Overview'
table_subscriptions = ('credentials', 'visits')
def _load_graph(self, info_cache):
rpc = self.rpc
visits = info_cache['visits']
creds = info_cache['credentials']
messages_count = rpc('db/table/count', 'messages', query_filter={'campaign_id': self.config['campaign_id']})
messages_not_opened = rpc('db/table/count', 'messages', query_filter={'campaign_id': self.config['campaign_id'], 'opened': None})
bars = []
bars.append(messages_count)
bars.append(messages_count - messages_not_opened)
bars.append(len(visits))
bars.append(len(unique(visits, key=lambda visit: visit.message_id)))
if len(creds):
bars.append(len(creds))
bars.append(len(unique(creds, key=lambda cred: cred.message_id)))
yticklabels = ('Messages', 'Opened', 'Visits', 'Unique\nVisits', 'Credentials', 'Unique\nCredentials')
self.graph_bar(bars, len(yticklabels), yticklabels[:len(bars)])
return
@export_graph_provider
class CampaignGraphVisitorInfo(CampaignBarGraph):
"""Display a graph which shows the different operating systems seen from visitors."""
graph_title = 'Campaign Visitor OS Information'
name_human = 'Bar - Visitor OS Information'
table_subscriptions = ('visits',)
def _load_graph(self, info_cache):
visits = info_cache['visits']
operating_systems = collections.Counter()
for visit in visits:
user_agent = None
if visit.visitor_details:
user_agent = ua_parser.parse_user_agent(visit.visitor_details)
operating_systems.update([user_agent.os_name if user_agent and user_agent.os_name else 'Unknown OS'])
os_names = sorted(operating_systems.keys())
bars = [operating_systems[os_name] for os_name in os_names]
self.graph_bar(bars, len(OSFamily), os_names)
return
@export_graph_provider
class CampaignGraphVisitorInfoPie(CampaignPieGraph):
"""Display a graph which compares the different operating systems seen from visitors."""
graph_title = 'Campaign Visitor OS Information'
name_human = 'Pie - Visitor OS Information'
table_subscriptions = ('visits',)
def _load_graph(self, info_cache):
visits = info_cache['visits']
if not len(visits):
self._graph_null_pie('No Visitor Information')
return
operating_systems = collections.Counter()
for visit in visits:
ua = ua_parser.parse_user_agent(visit.visitor_details)
operating_systems.update([ua.os_name or 'Unknown OS' if ua else 'Unknown OS'])
(os_names, count) = tuple(zip(*reversed(sorted(operating_systems.items(), key=lambda item: item[1]))))
self.graph_pie(count, labels=tuple("{0:,}".format(os) for os in count), legend_labels=os_names)
return
@export_graph_provider
class CampaignGraphVisitsTimeline(CampaignLineGraph):
"""Display a graph which represents the visits of a campaign over time."""
graph_title = 'Campaign Visits Timeline'
name_human = 'Line - Visits Timeline'
table_subscriptions = ('visits',)
def _load_graph(self, info_cache):
# define the necessary colors
color_bg = self.get_color('bg', ColorHexCode.WHITE)
color_fg = self.get_color('fg', ColorHexCode.BLACK)
color_line_bg = self.get_color('line_bg', ColorHexCode.WHITE)
color_line_fg = self.get_color('line_fg', ColorHexCode.BLACK)
visits = info_cache['visits']
first_visits = [utilities.datetime_utc_to_local(visit.first_visit) for visit in visits]
ax = self.axes[0]
ax.tick_params(
axis='both',
which='both',
colors=color_fg,
top='off',
bottom='off'
)
ax.set_axis_bgcolor(color_line_bg)
ax.set_ylabel('Number of Visits', color=self.get_color('fg', ColorHexCode.WHITE), size=10)
self._ax_hide_ticks(ax)
self._ax_set_spine_color(ax, color_bg)
if not len(first_visits):
ax.set_yticks((0,))
ax.set_xticks((0,))
return
first_visits.sort()
ax.plot_date(
first_visits,
range(1, len(first_visits) + 1),
'-',
color=color_line_fg,
linewidth=6
)
self.figure.autofmt_xdate()
self.figure.subplots_adjust(top=0.85, right=0.95, bottom=0.25, left=0.1)
locator = dates.AutoDateLocator()
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(dates.AutoDateFormatter(locator))
return
@export_graph_provider
class CampaignGraphMessageResults(CampaignPieGraph):
"""Display the percentage of messages which resulted in a visit."""
graph_title = 'Campaign Message Results'
name_human = 'Pie - Message Results'
table_subscriptions = ('credentials', 'visits')
def _load_graph(self, info_cache):
rpc = self.rpc
messages_count = rpc('db/table/count', 'messages', query_filter={'campaign_id': self.config['campaign_id']})
if not messages_count:
self._graph_null_pie('No Messages Sent')
return
visits_count = len(unique(info_cache['visits'], key=lambda visit: visit.message_id))
credentials_count = len(unique(info_cache['credentials'], key=lambda cred: cred.message_id))
if not credentials_count <= visits_count <= messages_count:
raise ValueError('credential visit and message counts are inconsistent')
labels = ['Without Visit', 'With Visit', 'With Credentials']
sizes = []
sizes.append((float(messages_count - visits_count) / float(messages_count)) * 100)
sizes.append((float(visits_count - credentials_count) / float(messages_count)) * 100)
sizes.append((float(credentials_count) / float(messages_count)) * 100)
if not credentials_count:
labels.pop()
sizes.pop()
if not visits_count:
labels.pop()
sizes.pop()
self.graph_pie(sizes, legend_labels=labels)
return
class CampaignGraphVisitsMap(CampaignGraph):
"""A base class to display a map which shows the locations of visit origins."""
graph_title = 'Campaign Visit Locations'
table_subscriptions = ('credentials', 'visits')
is_available = has_matplotlib_basemap
draw_states = False
def _load_graph(self, info_cache):
visits = unique(info_cache['visits'], key=lambda visit: visit.message_id)
cred_ips = set(cred.message_id for cred in info_cache['credentials'])
cred_ips = set([visit.visitor_ip for visit in visits if visit.message_id in cred_ips])
color_fg = self.get_color('fg', ColorHexCode.BLACK)
color_land = self.get_color('map_land', ColorHexCode.GRAY)
color_water = self.get_color('map_water', ColorHexCode.WHITE)
ax = self.axes[0]
bm = mpl_toolkits.basemap.Basemap(resolution='c', ax=ax, **self.basemap_args)
if self.draw_states:
bm.drawstates()
bm.drawcoastlines()
bm.drawcountries()
bm.fillcontinents(color=color_land, lake_color=color_water)
parallels = bm.drawparallels(
(-60, -30, 0, 30, 60),
labels=(1, 1, 0, 0)
)
self._map_set_line_color(parallels, color_fg)
meridians = bm.drawmeridians(
(0, 90, 180, 270),
labels=(0, 0, 0, 1)
)
self._map_set_line_color(meridians, color_fg)
bm.drawmapboundary(
fill_color=color_water,
linewidth=0
)
if not visits:
return
ctr = collections.Counter()
ctr.update([visit.visitor_ip for visit in visits])
base_markersize = self.markersize_scale
base_markersize = max(base_markersize, 3.05)
base_markersize = min(base_markersize, 9)
self._plot_visitor_map_points(bm, ctr, base_markersize, cred_ips)
self.add_legend_patch(((self.color_with_creds, 'With Credentials'), (self.color_without_creds, 'Without Credentials')))
return
def _resolve_geolocations(self, all_ips):
geo_locations = {}
public_ips = []
for visitor_ip in all_ips:
ip = ipaddress.ip_address(visitor_ip)
if ip.is_private or ip.is_loopback:
continue
public_ips.append(visitor_ip)
public_ips.sort()
for ip_chunk in iterutils.chunked(public_ips, 100):
geo_locations.update(self.rpc.geoip_lookup_multi(ip_chunk))
return geo_locations
def _plot_visitor_map_points(self, bm, ctr, base_markersize, cred_ips):
o_high = float(max(ctr.values()))
o_low = float(min(ctr.values()))
color_with_creds = self.color_with_creds
color_without_creds = self.color_without_creds
geo_locations = self._resolve_geolocations(ctr.keys())
for visitor_ip, geo_location in geo_locations.items():
if not (geo_location.coordinates.longitude and geo_location.coordinates.latitude):
continue
occurrences = ctr[visitor_ip]
pts = bm(geo_location.coordinates.longitude, geo_location.coordinates.latitude)
if o_high == o_low:
markersize = 2.0
else:
markersize = 1.0 + (float(occurrences) - o_low) / (o_high - o_low)
markersize = markersize * base_markersize
bm.plot(
pts[0],
pts[1],
'o',
markeredgewidth=0,
markerfacecolor=(color_with_creds if visitor_ip in cred_ips else color_without_creds),
markersize=markersize
)
return
def _map_set_line_color(self, map_lines, line_color):
for sub_lines, texts in map_lines.values():
for line in sub_lines:
line.set_color(line_color)
for text in texts:
text.set_color(line_color)
@property
def color_with_creds(self):
return self.get_color('map_marker1', ColorHexCode.RED)
@property
def color_without_creds(self):
return self.get_color('map_marker2', ColorHexCode.YELLOW)
@export_graph_provider
class CampaignGraphVisitsMapUSA(CampaignGraphVisitsMap):
"""Display a map of the USA which shows the locations of visit origins."""
name_human = 'Map - Visit Locations (USA)'
draw_states = True
basemap_args = dict(projection='lcc', lat_1=30, lon_0=-90, llcrnrlon=-122.5, llcrnrlat=12.5, urcrnrlon=-45, urcrnrlat=50)
@export_graph_provider
class CampaignGraphVisitsMapWorld(CampaignGraphVisitsMap):
"""Display a map of the world which shows the locations of visit origins."""
name_human = 'Map - Visit Locations (World)'
basemap_args = dict(projection='kav7', lon_0=0)
@export_graph_provider
class CampaignGraphPasswordComplexityPie(CampaignPieGraph):
"""Display a graph which displays the number of passwords which meet standard complexity requirements."""
graph_title = 'Campaign Password Complexity'
name_human = 'Pie - Password Complexity'
table_subscriptions = ('credentials',)
def _load_graph(self, info_cache):
passwords = set(cred.password for cred in info_cache['credentials'])
if not len(passwords):
self._graph_null_pie('No Credential Information')
return
ctr = collections.Counter()
ctr.update(self._check_complexity(password) for password in passwords)
self.graph_pie((ctr[True], ctr[False]), autopct='%1.1f%%', legend_labels=('Complex', 'Not Complex'))
return
def _check_complexity(self, password):
if len(password) < 8:
return False
met = 0
for char_set in (string.ascii_uppercase, string.ascii_lowercase, string.digits, string.punctuation):
for char in password:
if char in char_set:
met += 1
break
return met >= 3
class CampaignCompGraph(GraphBase):
""" Display selected campaigns data by order of campaign start date."""
graph_title = 'Campaign Comparison Graph'
name_human = 'Graph'
def __init__(self, *args, **kwargs):
super(CampaignCompGraph, self).__init__(*args, **kwargs)
ax = self.axes[0]
self.axes.append(ax.twinx())
ax2 = self.axes[1]
self._config_axes(ax, ax2)
self._campaigns = []
def _calc(self, stats, key, comp_key='messages'):
return 0 if stats[comp_key] == 0 else (float(stats[key]) / stats[comp_key]) * 100
def _config_axes(self, ax, ax2):
# define the necessary colors
color_bg = self.get_color('bg', ColorHexCode.WHITE)
color_fg = self.get_color('fg', ColorHexCode.BLACK)
color_line_bg = self.get_color('line_bg', ColorHexCode.WHITE)
ax.tick_params(
axis='both',
which='both',
colors=color_fg,
top='off',
bottom='off'
)
ax2.tick_params(
axis='both',
which='both',
colors=color_fg,
top='off',
bottom='off'
)
ax.set_axis_bgcolor(color_line_bg)
ax2.set_axis_bgcolor(color_line_bg)
title = pyplot.title('Campaign Comparison', color=color_fg, size=self.markersize_scale * 1.75, loc='left')
title.set_position([0.075, 1.05])
ax.set_ylabel('Percent Visits/Credentials', color=color_fg, size=self.markersize_scale * 1.5)
ax.set_xlabel('Campaign Name', color=color_fg, size=self.markersize_scale * 1.5)
self._ax_hide_ticks(ax)
self._ax_hide_ticks(ax2)
ax2.set_ylabel('Messages', color=color_fg, size=self.markersize_scale * 1.25, rotation=270, labelpad=20)
self._ax_set_spine_color(ax, color_bg)
self._ax_set_spine_color(ax2, color_bg)
ax2.get_yaxis().set_major_locator(ticker.MaxNLocator(integer=True))
ax.tick_params(axis='x', labelsize=10, pad=5)
def load_graph(self, campaigns):
"""
Load the information to compare the specified and paint it to the
canvas. Campaigns are graphed on the X-axis in the order that they are
provided. No sorting of campaigns is done by this method.
:param tuple campaigns: A tuple containing campaign IDs to compare.
"""
ax = self.axes[0]
ax2 = self.axes[1]
ax.clear()
ax2.clear()
self._config_axes(ax, ax2)
rpc = self.rpc
ellipsize = lambda text: (text if len(text) < 20 else text[:17] + '...')
visits_line_color = self.get_color('line_fg', ColorHexCode.RED)
creds_line_color = self.get_color('map_marker1', ColorHexCode.BLACK)
messages_color = '#046D8B'
trained_color = '#77c67f'
ax.grid(True)
ax.set_xticks(range(len(campaigns)))
ax.set_xticklabels([ellipsize(rpc.remote_table_row('campaigns', cid).name) for cid in campaigns])
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(self.markersize_scale * 1.25)
labels = ax.get_xticklabels()
pyplot.setp(labels, rotation=15)
self._campaigns = campaigns
campaigns = [rpc('/campaign/stats', cid) for cid in campaigns]
ax2.plot([stats['messages'] for stats in campaigns], label='Messages', color=messages_color, lw=3)
if sum(stats['messages-trained'] for stats in campaigns):
ax.plot([self._calc(stats, 'messages-trained', 'visits-unique') for stats in campaigns], label='Trained (Visited)', color=trained_color, lw=3)
ax.plot([self._calc(stats, 'messages-trained') for stats in campaigns], label='Trained (All)', color=trained_color, lw=3, ls='dashed')
ax.plot([self._calc(stats, 'visits') for stats in campaigns], label='Visits', color=visits_line_color, lw=3)
ax.plot([self._calc(stats, 'visits-unique') for stats in campaigns], label='Unique Visits', color=visits_line_color, lw=3, ls='dashed')
if sum(stats['credentials'] for stats in campaigns):
ax.plot([self._calc(stats, 'credentials') for stats in campaigns], label='Credentials', color=creds_line_color, lw=3)
ax.plot([self._calc(stats, 'credentials-unique') for stats in campaigns], label='Unique Credentials', color=creds_line_color, lw=3, ls='dashed')
ax.set_ylim((0, 100))
ax2.set_ylim(bottom=0)
self.canvas.set_size_request(500 + 50 * (len(campaigns) - 1), 500)
legend_patch = [
(visits_line_color, 'solid', 'Visits'),
(visits_line_color, 'dotted', 'Unique Visits')
]
if sum(stats['credentials'] for stats in campaigns):
legend_patch.extend([
(creds_line_color, 'solid', 'Credentials'),
(creds_line_color, 'dotted', 'Unique Credentials')
])
if sum(stats['messages-trained'] for stats in campaigns):
legend_patch.extend([
(trained_color, 'solid', 'Trained (Visited)'),
(trained_color, 'dotted', 'Trained (All)')
])
legend_patch.append(
(messages_color, 'solid', 'Messages')
)
self.add_legend_patch(legend_patch)
pyplot.tight_layout()
def add_legend_patch(self, legend_rows, fontsize=None):
if self._legend is not None:
self._legend.remove()
self._legend = None
legend_bbox = self.figure.legend(
tuple(lines.Line2D([], [], color=patch_color, lw=3, ls=style) for patch_color, style, _ in legend_rows),
tuple(label for _, _, label in legend_rows),
borderaxespad=1,
columnspacing=1.5,
fontsize=self.fontsize_scale,
ncol=3,
frameon=True,
handlelength=2,
handletextpad=0.5,
labelspacing=0.5,
loc='upper right'
)
legend_bbox.get_frame().set_facecolor(self.get_color('line_bg', ColorHexCode.GRAY))
for text in legend_bbox.get_texts():
text.set_color('white')
legend_bbox.legendPatch.set_linewidth(0)
self._legend = legend_bbox
def refresh(self):
self.load_graph(self._campaigns)
| 35.310811 | 155 | 0.741765 |
f8452017a584849906650f870fe946c58404dfc1 | 4,010 | py | Python | lane_detection_utils/determination_of_parallelism_with_previously_tracked_lanes.py | syeda27/MonoRARP | 71415d9fc71bc636ac1f5de1a90f033b4e519538 | [
"MIT"
] | 2 | 2020-07-22T07:05:01.000Z | 2021-11-27T13:28:03.000Z | lane_detection_utils/determination_of_parallelism_with_previously_tracked_lanes.py | syeda27/MonoRARP | 71415d9fc71bc636ac1f5de1a90f033b4e519538 | [
"MIT"
] | null | null | null | lane_detection_utils/determination_of_parallelism_with_previously_tracked_lanes.py | syeda27/MonoRARP | 71415d9fc71bc636ac1f5de1a90f033b4e519538 | [
"MIT"
] | null | null | null | """
Determination of Paralelism and Distance
Author: Juan Carlos Aragon - Allstate
Minor Editing: djp42 - Stanford
See README for more details.
We assess in this function wheather or not the white marking is sufficiently parallel to a previous tracked Lane.
"""
import math
def determination_of_parallelism_w(lane_detector_object, scan_args, top_left):
"""
A wrapper to make this function ~relatively~ modular and work with a class.
"""
lane_detector_object.aligned_to_tracked_lane = determination_of_parallelism(
top_left,
scan_args.rx1,
scan_args.ry1,
lane_detector_object.count_lane_group1,
lane_detector_object.count_lane_group2,
lane_detector_object.whitemarkings_average,
lane_detector_object.road1_average,
lane_detector_object.base_ptx_lane_vec_final1,
lane_detector_object.base_pty_lane_vec_final1,
lane_detector_object.mux_lane_vec_final1,
lane_detector_object.muy_lane_vec_final1,
lane_detector_object.base_ptx_lane_vec_final2,
lane_detector_object.base_pty_lane_vec_final2,
lane_detector_object.mux_lane_vec_final2,
lane_detector_object.muy_lane_vec_final2,
scan_args.angles)
def determination_of_parallelism(top_left,
rx1,
ry1,
count_lane_group1,
count_lane_group2,
whitemarkings_average,
road1_average,
base_ptx_lane_vec_final1,
base_pty_lane_vec_final1,
mux_lane_vec_final1,
muy_lane_vec_final1,
base_ptx_lane_vec_final2,
base_pty_lane_vec_final2,
mux_lane_vec_final2,
muy_lane_vec_final2,
angles):
aligned_to_tracked_lane = 0
if whitemarkings_average / road1_average > 1:
if count_lane_group1 != 0 or count_lane_group2 != 0:
for selection in range(0, 2): #we compare against just two previous final lanes
proceed = 0
if selection == 0 and count_lane_group1 != 0:
proceed = 1
xb = base_ptx_lane_vec_final1
yb = base_pty_lane_vec_final1
mux_previous_lane = mux_lane_vec_final1
muy_previous_lane = muy_lane_vec_final1
if selection == 1 and count_lane_group2 != 0:
proceed = 1
xb = base_ptx_lane_vec_final2
yb = base_pty_lane_vec_final2
mux_previous_lane = mux_lane_vec_final2
muy_previous_lane = muy_lane_vec_final2
if proceed == 1:
if abs(
angles[top_left] - 180 * (
math.atan(muy_previous_lane / mux_previous_lane)
/ 3.14159
)
) < 6:
x0 = rx1[top_left]
y0 = ry1[top_left]
Lproj = ((x0 - xb) * mux_previous_lane +
(y0 - yb) * muy_previous_lane
) / \
((mux_previous_lane)**2 +
(muy_previous_lane)**2
)
xp = xb + Lproj * mux_previous_lane
yp = yb + Lproj * muy_previous_lane
distance_to_Lane = ((x0 - xp)**2 + (y0 - yp)**2)**0.5
if distance_to_Lane < 10:
aligned_to_tracked_lane = 1
return aligned_to_tracked_lane
| 40.505051 | 113 | 0.518953 |
e15e1d5f160598edff676fc498f0d1510b434d48 | 406 | py | Python | api/hosts/models.py | mingweiarthurli/CMPUT-404-Project | b88bafc3e107f2b5271b58056d8ef778c861eaf9 | [
"MIT"
] | null | null | null | api/hosts/models.py | mingweiarthurli/CMPUT-404-Project | b88bafc3e107f2b5271b58056d8ef778c861eaf9 | [
"MIT"
] | null | null | null | api/hosts/models.py | mingweiarthurli/CMPUT-404-Project | b88bafc3e107f2b5271b58056d8ef778c861eaf9 | [
"MIT"
] | 2 | 2020-01-31T22:09:39.000Z | 2020-02-01T03:25:28.000Z | from django.db import models
# Create your models here.
import uuid
import json
from config.settings import DEFAULT_HOST
class Host(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
baseURL = models.URLField(null=False, blank=False)
username = models.CharField(blank=False, max_length=200)
password = models.CharField(blank=False, max_length=200) | 31.230769 | 79 | 0.770936 |
f8f93e9bb1d86f10bd61f821b40415e644666109 | 5,069 | py | Python | fvaForward.py | AlessandroGnoatto/Deep-xVA-Solver | 4b6efc0caef32424a70739c59b1867e80ab7fd6f | [
"MIT"
] | 5 | 2020-07-02T08:57:35.000Z | 2021-12-17T14:13:32.000Z | fvaForward.py | AlessandroGnoatto/Deep-xVA-Solver | 4b6efc0caef32424a70739c59b1867e80ab7fd6f | [
"MIT"
] | null | null | null | fvaForward.py | AlessandroGnoatto/Deep-xVA-Solver | 4b6efc0caef32424a70739c59b1867e80ab7fd6f | [
"MIT"
] | 1 | 2020-06-25T09:42:19.000Z | 2020-06-25T09:42:19.000Z | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from solver import BSDESolver
from XvaSolver import XvaSolver
import xvaEquation as eqn
import RecursiveEquation as receqn
import munch
from scipy.stats import norm
if __name__ == "__main__":
dim = 1 #dimension of brownian motion
P = 2048 #number of outer Monte Carlo Loops
batch_size = 64
total_time = 1.0
num_time_interval=100
strike = 100
r = 0.02
sigma=0.25
x_init=100
config = {
"eqn_config": {
"_comment": "a forward contract",
"eqn_name": "PricingForward",
"total_time": total_time,
"dim": dim,
"num_time_interval": num_time_interval,
"strike":strike,
"r":r,
"sigma":sigma,
"x_init":x_init
},
"net_config": {
"y_init_range": [-5, 5],
"num_hiddens": [dim+20, dim+20],
"lr_values": [5e-2, 5e-3],
"lr_boundaries": [2000],
"num_iterations": 4000,
"batch_size": batch_size,
"valid_size": 256,
"logging_frequency": 100,
"dtype": "float64",
"verbose": True
}
}
config = munch.munchify(config)
bsde = getattr(eqn, config.eqn_config.eqn_name)(config.eqn_config)
tf.keras.backend.set_floatx(config.net_config.dtype)
#apply algorithm 1
bsde_solver = BSDESolver(config, bsde)
training_history = bsde_solver.train()
#apply trained model to evaluate value of the forward contract via Monte Carlo
simulations = bsde_solver.model.simulate_path(bsde.sample(P))
#estimated epected positive and negative exposure
time_stamp = np.linspace(0,1,num_time_interval+1)
epe = np.mean(np.exp(-r*time_stamp)*np.maximum(simulations,0),axis=0)
ene = np.mean(np.exp(-r*time_stamp)*np.minimum(simulations,0),axis=0)
#exact solution
rv = norm()
d1 = np.array([(-r * s + np.log(x_init/strike) + (r+sigma**2/2)*s)/sigma/np.sqrt(s)
for s in time_stamp[1:]])
d2 = np.array([d1[i]-sigma*np.sqrt(s) for i,s in enumerate(time_stamp[1:])])
epe_exact = x_init*rv.cdf(d1) - strike*np.exp(-r)*rv.cdf(d2)
ene_exact = x_init*rv.cdf(-d1) - strike*np.exp(-r)*rv.cdf(-d2)
plt.figure()
plt.plot(time_stamp,[epe_exact[0]]+list(epe_exact),'b--',label='DEPE = exact solution')
plt.plot(time_stamp,epe[0],'b',label='DEPE = deep solver approximation')
plt.plot(time_stamp,[ene_exact[0]]+list(ene_exact),'r--',label='DNPE = exact solution')
plt.plot(time_stamp,ene[0],'r',label='DNPE = deep solver approximation')
plt.xlabel('t')
plt.legend()
plt.show()
# bsde_solver.model.save('testmodel.tf',save_format='tf')
# XVA computation step.
r_f = 0.04
configFVA = {
"eqn_config": {
"_comment": "XVA on a forward",
"eqn_name": "FVA",
"total_time": total_time,
"num_time_interval": num_time_interval,
"r":r,
"r_fl": r_f,
"r_fb": r_f,
"r_cl": 0.00,
"r_cl": 0.00,
"clean_value": bsde,
"clean_value_model": bsde_solver.model
},
"net_config": {
"y_init_range": [-5, 5],
"num_hiddens": [dim+20, dim+20],
"lr_values": [5e-2, 5e-3],
"lr_boundaries": [2000],
"num_iterations": 4000,
"batch_size": batch_size,
"valid_size": 256,
"logging_frequency": 100,
"dtype": "float64",
"verbose": True
}
}
configFVA = munch.munchify(configFVA)
fvabsde = getattr(receqn, configFVA.eqn_config.eqn_name)(configFVA.eqn_config)
tf.keras.backend.set_floatx(configFVA.net_config.dtype)
#apply algorithm 3
xva_solver = XvaSolver(config, fvabsde)
xva_training_history = xva_solver.train()
fva_simulations = xva_solver.model.simulate_path(fvabsde.sample(P))
print("Exact Values from analytic formulas")
exactVhat = x_init - strike*np.exp(-r * total_time)
exactV = np.exp(-(r_f - r) * total_time)*x_init - strike*np.exp(-r_f * total_time)
exactFVA = exactVhat - exactV
print("exactV = " + str(exactV))
print("exactVhat = " + str(exactVhat))
print("exactFVA = " + str(exactFVA))
print("FVA from Algorithm 3")
fvaFromSolver = fva_simulations[0,0,0]
print("fvaFromSolver = " +str(fvaFromSolver) )
fvaError = fva_simulations[0,0,0] - exactFVA
print("error = "+ str(fvaError))
| 34.719178 | 91 | 0.541724 |
0a1e878b3b2d6c0ac227d0610ea4a66a7d3ae0bb | 244 | py | Python | datawrapper/__init__.py | gka/datawrapper-api.py | 92966511f10740d9394d6bf900f08a861f50fa32 | [
"MIT"
] | null | null | null | datawrapper/__init__.py | gka/datawrapper-api.py | 92966511f10740d9394d6bf900f08a861f50fa32 | [
"MIT"
] | null | null | null | datawrapper/__init__.py | gka/datawrapper-api.py | 92966511f10740d9394d6bf900f08a861f50fa32 | [
"MIT"
] | null | null | null | """Top-level package for datawrapper."""
__author__ = """Sergio Sanchez"""
__email__ = "chekos@tacosdedatos.com"
__version__ = "0.4.2"
import datawrapper.datawrapper as dw
from IPython.display import HTML, Image
Datawrapper = dw.Datawrapper
| 22.181818 | 40 | 0.758197 |
a6ae885b50a4e6b8c687cbd28211b5ed07dab8c5 | 915 | py | Python | tools/env_utils/utils.py | fangchengji/DSGN | 9a70e1574f042d5de856ed540cfa9111099437f1 | [
"MIT"
] | 192 | 2019-08-30T08:09:51.000Z | 2021-05-18T06:24:40.000Z | tools/env_utils/utils.py | fangchengji/DSGN | 9a70e1574f042d5de856ed540cfa9111099437f1 | [
"MIT"
] | 24 | 2019-08-29T22:30:30.000Z | 2021-04-28T12:08:08.000Z | tools/env_utils/utils.py | fangchengji/DSGN | 9a70e1574f042d5de856ed540cfa9111099437f1 | [
"MIT"
] | 44 | 2019-09-17T23:00:41.000Z | 2021-04-30T07:30:03.000Z | import os
import os.path as osp
import shutil
import sys
import numpy as np
from datetime import datetime
from glob import glob
from itertools import chain
import gc
import torch
def mem_info():
import subprocess
dev = subprocess.check_output(
"nvidia-smi | grep MiB | awk -F '|' '{print $3}' | awk -F '/' '{print $1}' | grep -Eo '[0-9]{1,10}'",
shell=True)
dev = dev.decode()
dev_mem = list(map(lambda x: int(x), dev.split('\n')[:-1]))
return dev_mem
def random_int(obj=None):
return (id(obj) + os.getpid() + int(datetime.now().strftime("%Y%m%d%H%M%S%f"))) % 4294967295
def cmd(command):
import subprocess
output = subprocess.check_output(command, shell=True)
output = output.decode()
return output
def reset_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
| 25.416667 | 109 | 0.665574 |
15726f6ce14ee60a821d6fcea38bd45ca2ffcc4a | 23,622 | py | Python | eiseg/util/coco/coco.py | He-jerry/EISeg | b562a98620fc50f4aedc130de17da6f02b1771db | [
"Apache-2.0"
] | null | null | null | eiseg/util/coco/coco.py | He-jerry/EISeg | b562a98620fc50f4aedc130de17da6f02b1771db | [
"Apache-2.0"
] | null | null | null | eiseg/util/coco/coco.py | He-jerry/EISeg | b562a98620fc50f4aedc130de17da6f02b1771db | [
"Apache-2.0"
] | 1 | 2021-11-05T07:22:25.000Z | 2021-11-05T07:22:25.000Z | import json
import time
from matplotlib.collections import PatchCollection
import numpy as np
import itertools
# from . import mask as maskUtils
import os
from collections import defaultdict
import sys
from datetime import datetime
PYTHON_VERSION = sys.version_info[0]
if PYTHON_VERSION == 2:
from urllib import urlretrieve
elif PYTHON_VERSION == 3:
from urllib.request import urlretrieve
def _isArrayLike(obj):
return hasattr(obj, "__iter__") and hasattr(obj, "__len__")
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# dataset, anns, cats, imgs, imgToAnns, catToImgs, imgNameToId, maxAnnId, maxImgId
self.dataset = {
"categories": [],
"images": [],
"annotations": [],
"info": "info",
"licenses": "licenses",
} # the complete json
self.anns = dict() # anns[annId]={}
self.cats = dict() # cats[catId] = {}
self.imgs = dict() # imgs[imgId] = {}
self.imgToAnns = defaultdict(list) # imgToAnns[imgId] = [ann]
self.catToImgs = defaultdict(list) # catToImgs[catId] = [imgId]
self.imgNameToId = defaultdict(list) # imgNameToId[name] = imgId
self.maxAnnId = 0
self.maxImgId = 0
if annotation_file is not None:
print("loading annotations into memory...")
tic = time.time()
dataset = json.load(open(annotation_file, "r"))
assert (
type(dataset) == dict
), "annotation file format {} not supported".format(type(dataset))
print("Done (t={:0.2f}s)".format(time.time() - tic))
self.dataset = dataset
self.createIndex()
print(
f"load coco with {len(self.dataset['images'])} images and {len(self.dataset['annotations'])} annotations."
)
def hasImage(self, imageName):
imgId = self.imgNameToId.get(imageName, None)
return imgId is not None
def hasCat(self, catIdx):
res = self.cats.get(catIdx)
return res is not None
def createIndex(self):
# create index
print("creating index...")
anns, cats, imgs = {}, {}, {}
imgNameToId, imgToAnns, catToImgs, imgNameToId = [
defaultdict(list) for _ in range(4)
]
if "annotations" in self.dataset:
for ann in self.dataset["annotations"]:
imgToAnns[ann["image_id"]].append(ann)
anns[ann["id"]] = ann
self.maxAnnId = max(self.maxAnnId, ann["id"])
if "images" in self.dataset:
for img in self.dataset["images"]:
imgs[img["id"]] = img
imgNameToId[img["file_name"]] = img["id"]
try:
imgId = int(img["id"])
self.maxImgId = max(self.maxImgId, imgId)
except:
pass
if "categories" in self.dataset:
for cat in self.dataset["categories"]:
cats[cat["id"]] = cat
if "annotations" in self.dataset and "categories" in self.dataset:
for ann in self.dataset["annotations"]:
catToImgs[ann["category_id"]].append(ann["image_id"])
# TODO: read license
print("index created!")
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgNameToId = imgNameToId
self.imgs = imgs
self.cats = cats
def setInfo(
self,
year: int = "",
version: str = "",
description: str = "",
contributor: str = "",
url: str = "",
date_created: datetime = "",
):
self.dataset["info"] = {
"year": year,
"version": version,
"description": description,
"contributor": contributor,
"url": url,
"date_created": date_created,
}
def addCategory(
self,
id: int,
name: str,
color: list,
supercategory: str = "",
):
cat = {
"id": id,
"name": name,
"color": color,
"supercategory": supercategory,
}
self.cats[id] = cat
self.dataset["categories"].append(cat)
def updateCategory(
self,
id: int,
name: str,
color: list,
supercategory: str = "",
):
cat = {
"id": id,
"name": name,
"color": color,
"supercategory": supercategory,
}
self.cats[id] = cat
for idx in range(len(self.dataset["categories"])):
if self.dataset["categories"][idx]["id"] == id:
self.dataset["categories"][idx] = cat
def addImage(
self,
file_name: str,
width: int,
height: int,
id: int = None,
license: int = "",
flickr_url: str = "",
coco_url: str = "",
date_captured: datetime = "",
):
if self.hasImage(file_name):
print(f"{file_name}图片已存在")
return
if not id:
self.maxImgId += 1
id = self.maxImgId
image = {
"id": id,
"width": width,
"height": height,
"file_name": file_name,
"license": license,
"flickr_url": flickr_url,
"coco_url": coco_url,
"date_captured": date_captured,
}
self.dataset["images"].append(image)
self.imgs[id] = image
self.imgNameToId[file_name] = id
return id
def addAnnotation(
self,
image_id: int,
category_id: int,
segmentation: list,
bbox: list = None,
area: float = None,
id: int = None,
):
if id is not None and self.anns.get(id, None) is not None:
print("标签已经存在")
return
if not id:
self.maxAnnId += 1
id = self.maxAnnId
if not bbox:
x, y, width, height = 0, 0, 0, 0
else:
x, y, width, height = bbox[:]
# TODO: cal area
if not area:
area = 0
ann = {
"id": id,
"image_id": image_id,
"category_id": category_id,
"segmentation": [segmentation],
"area": area,
"bbox": [x, y, width, height],
}
self.dataset["annotations"].append(ann)
self.anns[id] = ann
self.imgToAnns[image_id].append(ann)
self.catToImgs[category_id].append(image_id)
return id
def delAnnotation(self, annId, imgId):
if "annotations" in self.dataset:
for idx, ann in enumerate(self.dataset["annotations"]):
if ann["id"] == annId:
del self.dataset["annotations"][idx]
if annId in self.anns.keys():
del self.anns[annId]
for idx, ann in enumerate(self.imgToAnns[imgId]):
if ann["id"] == annId:
del self.imgToAnns[imgId][idx]
def updateAnnotation(self, id, imgId, points, bbox=None):
self.anns[id]["segmentation"] = [points]
for rec in self.dataset["annotations"]:
if rec["id"] == id:
rec["segmentation"] = [points]
if bbox is not None:
rec["bbox"] = bbox
break
for rec in self.dataset["annotations"]:
if rec["id"] == id:
# @todo TODO move into debug codes or controls
print("record point : ", rec["segmentation"][0][0], rec["segmentation"][0][1])
break
for rec in self.imgToAnns[imgId]:
if rec["id"] == id:
rec["segmentation"] = [points]
break
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset["info"].items():
print("{}: {}".format(key, value))
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]
catIds = catIds if _isArrayLike(catIds) else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset["annotations"]
else:
if not len(imgIds) == 0:
lists = [
self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns
]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset["annotations"]
anns = (
anns
if len(catIds) == 0
else [ann for ann in anns if ann["category_id"] in catIds]
)
anns = (
anns
if len(areaRng) == 0
else [
ann
for ann in anns
if ann["area"] > areaRng[0] and ann["area"] < areaRng[1]
]
)
if not iscrowd == None:
ids = [ann["id"] for ann in anns if ann["iscrowd"] == iscrowd]
else:
ids = [ann["id"] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if _isArrayLike(catNms) else [catNms]
supNms = supNms if _isArrayLike(supNms) else [supNms]
catIds = catIds if _isArrayLike(catIds) else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset["categories"]
else:
cats = self.dataset["categories"]
cats = (
cats
if len(catNms) == 0
else [cat for cat in cats if cat["name"] in catNms]
)
cats = (
cats
if len(supNms) == 0
else [cat for cat in cats if cat["supercategory"] in supNms]
)
cats = (
cats
if len(catIds) == 0
else [cat for cat in cats if cat["id"] in catIds]
)
ids = [cat["id"] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
"""
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
"""
imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]
catIds = catIds if _isArrayLike(catIds) else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if _isArrayLike(ids):
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if _isArrayLike(ids):
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if _isArrayLike(ids):
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
# def showAnns(self, anns, draw_bbox=False):
# """
# Display the specified annotations.
# :param anns (array of object): annotations to display
# :return: None
# """
# if len(anns) == 0:
# return 0
# if "segmentation" in anns[0] or "keypoints" in anns[0]:
# datasetType = "instances"
# elif "caption" in anns[0]:
# datasetType = "captions"
# else:
# raise Exception("datasetType not supported")
# if datasetType == "instances":
# ax = plt.gca()
# ax.set_autoscale_on(False)
# polygons = []
# color = []
# for ann in anns:
# c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0]
# if "segmentation" in ann:
# if type(ann["segmentation"]) == list:
# # polygon
# for seg in ann["segmentation"]:
# poly = np.array(seg).reshape((int(len(seg) / 2), 2))
# polygons.append(Polygon(poly))
# color.append(c)
# else:
# # mask
# t = self.imgs[ann["image_id"]]
# if type(ann["segmentation"]["counts"]) == list:
# rle = maskUtils.frPyObjects(
# [ann["segmentation"]], t["height"], t["width"]
# )
# else:
# rle = [ann["segmentation"]]
# m = maskUtils.decode(rle)
# img = np.ones((m.shape[0], m.shape[1], 3))
# if ann["iscrowd"] == 1:
# color_mask = np.array([2.0, 166.0, 101.0]) / 255
# if ann["iscrowd"] == 0:
# color_mask = np.random.random((1, 3)).tolist()[0]
# for i in range(3):
# img[:, :, i] = color_mask[i]
# ax.imshow(np.dstack((img, m * 0.5)))
# if "keypoints" in ann and type(ann["keypoints"]) == list:
# # turn skeleton into zero-based index
# sks = np.array(self.loadCats(ann["category_id"])[0]["skeleton"]) - 1
# kp = np.array(ann["keypoints"])
# x = kp[0::3]
# y = kp[1::3]
# v = kp[2::3]
# for sk in sks:
# if np.all(v[sk] > 0):
# plt.plot(x[sk], y[sk], linewidth=3, color=c)
# plt.plot(
# x[v > 0],
# y[v > 0],
# "o",
# markersize=8,
# markerfacecolor=c,
# markeredgecolor="k",
# markeredgewidth=2,
# )
# plt.plot(
# x[v > 1],
# y[v > 1],
# "o",
# markersize=8,
# markerfacecolor=c,
# markeredgecolor=c,
# markeredgewidth=2,
# )
# if draw_bbox:
# [bbox_x, bbox_y, bbox_w, bbox_h] = ann["bbox"]
# poly = [
# [bbox_x, bbox_y],
# [bbox_x, bbox_y + bbox_h],
# [bbox_x + bbox_w, bbox_y + bbox_h],
# [bbox_x + bbox_w, bbox_y],
# ]
# np_poly = np.array(poly).reshape((4, 2))
# polygons.append(Polygon(np_poly))
# color.append(c)
# p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
# ax.add_collection(p)
# p = PatchCollection(
# polygons, facecolor="none", edgecolors=color, linewidths=2
# )
# ax.add_collection(p)
# elif datasetType == "captions":
# for ann in anns:
# print(ann["caption"])
# def loadRes(self, resFile):
# """
# Load result file and return a result api object.
# :param resFile (str) : file name of result file
# :return: res (obj) : result api object
# """
# res = COCO()
# res.dataset["images"] = [img for img in self.dataset["images"]]
# print("Loading and preparing results...")
# tic = time.time()
# if type(resFile) == str or (PYTHON_VERSION == 2 and type(resFile) == unicode):
# anns = json.load(open(resFile))
# elif type(resFile) == np.ndarray:
# anns = self.loadNumpyAnnotations(resFile)
# else:
# anns = resFile
# assert type(anns) == list, "results in not an array of objects"
# annsImgIds = [ann["image_id"] for ann in anns]
# assert set(annsImgIds) == (
# set(annsImgIds) & set(self.getImgIds())
# ), "Results do not correspond to current coco set"
# if "caption" in anns[0]:
# imgIds = set([img["id"] for img in res.dataset["images"]]) & set(
# [ann["image_id"] for ann in anns]
# )
# res.dataset["images"] = [
# img for img in res.dataset["images"] if img["id"] in imgIds
# ]
# for id, ann in enumerate(anns):
# ann["id"] = id + 1
# elif "bbox" in anns[0] and not anns[0]["bbox"] == []:
# res.dataset["categories"] = copy.deepcopy(self.dataset["categories"])
# for id, ann in enumerate(anns):
# bb = ann["bbox"]
# x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]]
# if not "segmentation" in ann:
# ann["segmentation"] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
# ann["area"] = bb[2] * bb[3]
# ann["id"] = id + 1
# ann["iscrowd"] = 0
# elif "segmentation" in anns[0]:
# res.dataset["categories"] = copy.deepcopy(self.dataset["categories"])
# for id, ann in enumerate(anns):
# # now only support compressed RLE format as segmentation results
# ann["area"] = maskUtils.area(ann["segmentation"])
# if not "bbox" in ann:
# ann["bbox"] = maskUtils.toBbox(ann["segmentation"])
# ann["id"] = id + 1
# ann["iscrowd"] = 0
# elif "keypoints" in anns[0]:
# res.dataset["categories"] = copy.deepcopy(self.dataset["categories"])
# for id, ann in enumerate(anns):
# s = ann["keypoints"]
# x = s[0::3]
# y = s[1::3]
# x0, x1, y0, y1 = np.min(x), np.max(x), np.min(y), np.max(y)
# ann["area"] = (x1 - x0) * (y1 - y0)
# ann["id"] = id + 1
# ann["bbox"] = [x0, y0, x1 - x0, y1 - y0]
# print("DONE (t={:0.2f}s)".format(time.time() - tic))
# res.dataset["annotations"] = anns
# res.createIndex()
# return res
def download(self, tarDir=None, imgIds=[]):
"""
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
"""
if tarDir is None:
print("Please specify target directory")
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img["file_name"])
if not os.path.exists(fname):
urlretrieve(img["coco_url"], fname)
print(
"downloaded {}/{} images (t={:0.1f}s)".format(i, N, time.time() - tic)
)
def loadNumpyAnnotations(self, data):
"""
Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}
:param data (numpy.ndarray)
:return: annotations (python nested list)
"""
print("Converting ndarray to lists...")
assert type(data) == np.ndarray
print(data.shape)
assert data.shape[1] == 7
N = data.shape[0]
ann = []
for i in range(N):
if i % 1000000 == 0:
print("{}/{}".format(i, N))
ann += [
{
"image_id": int(data[i, 0]),
"bbox": [data[i, 1], data[i, 2], data[i, 3], data[i, 4]],
"score": data[i, 5],
"category_id": int(data[i, 6]),
}
]
return ann
# def annToRLE(self, ann):
# """
# Convert annotation which can be polygons, uncompressed RLE to RLE.
# :return: binary mask (numpy 2D array)
# """
# t = self.imgs[ann["image_id"]]
# h, w = t["height"], t["width"]
# segm = ann["segmentation"]
# if type(segm) == list:
# # polygon -- a single object might consist of multiple parts
# # we merge all parts into one mask rle code
# rles = maskUtils.frPyObjects(segm, h, w)
# rle = maskUtils.merge(rles)
# elif type(segm["counts"]) == list:
# # uncompressed RLE
# rle = maskUtils.frPyObjects(segm, h, w)
# else:
# # rle
# rle = ann["segmentation"]
# return rle
# def annToMask(self, ann):
# """
# Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
# :return: binary mask (numpy 2D array)
# """
# rle = self.annToRLE(ann)
# m = maskUtils.decode(rle)
# return m
| 36.623256 | 126 | 0.475235 |
9629e3a3bbfc6898d40d90e6ab8f8963b8e6005e | 394 | py | Python | backend/apps/config/asgi.py | ambient-innovation/workshop-django | 3105f6684097a4443919edc0ac300f64937a1371 | [
"MIT"
] | null | null | null | backend/apps/config/asgi.py | ambient-innovation/workshop-django | 3105f6684097a4443919edc0ac300f64937a1371 | [
"MIT"
] | null | null | null | backend/apps/config/asgi.py | ambient-innovation/workshop-django | 3105f6684097a4443919edc0ac300f64937a1371 | [
"MIT"
] | null | null | null | """
ASGI config for mysite project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'apps.config.settings')
application = get_asgi_application()
| 23.176471 | 78 | 0.784264 |
91aebf10d40f594a5d310060ce91eb99bf3fc647 | 324 | py | Python | tests/conftest.py | esadek/adroa | 9033a3aa0a082c229e8d7275b75e1d28ae6c0c54 | [
"MIT"
] | null | null | null | tests/conftest.py | esadek/adroa | 9033a3aa0a082c229e8d7275b75e1d28ae6c0c54 | [
"MIT"
] | null | null | null | tests/conftest.py | esadek/adroa | 9033a3aa0a082c229e8d7275b75e1d28ae6c0c54 | [
"MIT"
] | null | null | null | import sys
import os
import shutil
import typing
import pytest
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../adroa')))
import adroa
@pytest.fixture
def directories() -> typing.Generator[None, None, None]:
adroa.create_directories('my-app')
yield None
shutil.rmtree('my-app')
| 19.058824 | 88 | 0.725309 |
29beb5a410ca3aebad36facbadd71bf346ef93b5 | 27,303 | py | Python | tests/python_client/common/common_func.py | drow931/milvus | d6bb86dd46a238efaa42c46f9770aec19cfbcc5b | [
"Apache-2.0"
] | null | null | null | tests/python_client/common/common_func.py | drow931/milvus | d6bb86dd46a238efaa42c46f9770aec19cfbcc5b | [
"Apache-2.0"
] | null | null | null | tests/python_client/common/common_func.py | drow931/milvus | d6bb86dd46a238efaa42c46f9770aec19cfbcc5b | [
"Apache-2.0"
] | null | null | null | import os
import random
import math
import string
import numpy as np
import pandas as pd
from sklearn import preprocessing
from pymilvus import DataType
from base.schema_wrapper import ApiCollectionSchemaWrapper, ApiFieldSchemaWrapper
from common import common_type as ct
from utils.util_log import test_log as log
"""" Methods of processing data """
class ParamInfo:
def __init__(self):
self.param_host = ""
self.param_port = ""
self.param_handler = ""
self.param_replica_num = ct.default_replica_num
def prepare_param_info(self, host, port, handler, replica_num):
self.param_host = host
self.param_port = port
self.param_handler = handler
self.param_replica_num = replica_num
param_info = ParamInfo()
def gen_unique_str(str_value=None):
prefix = "".join(random.choice(string.ascii_letters + string.digits) for _ in range(8))
return "test_" + prefix if str_value is None else str_value + "_" + prefix
def gen_str_by_length(length=8):
return "".join(random.choice(string.ascii_letters + string.digits) for _ in range(length))
def gen_digits_by_length(length=8):
return "".join(random.choice(string.digits) for _ in range(length))
def gen_bool_field(name=ct.default_bool_field_name, description=ct.default_desc, is_primary=False, **kwargs):
bool_field, _ = ApiFieldSchemaWrapper().init_field_schema(name=name, dtype=DataType.BOOL, description=description,
is_primary=is_primary, **kwargs)
return bool_field
def gen_string_field(name=ct.default_string_field_name, description=ct.default_desc, is_primary=False, max_length_per_row=ct.default_length, **kwargs):
string_field, _ = ApiFieldSchemaWrapper().init_field_schema(name=name, dtype=DataType.VARCHAR, description=description, max_length_per_row=max_length_per_row,
is_primary=is_primary, **kwargs)
return string_field
def gen_int8_field(name=ct.default_int8_field_name, description=ct.default_desc, is_primary=False, **kwargs):
int8_field, _ = ApiFieldSchemaWrapper().init_field_schema(name=name, dtype=DataType.INT8, description=description,
is_primary=is_primary, **kwargs)
return int8_field
def gen_int16_field(name=ct.default_int16_field_name, description=ct.default_desc, is_primary=False, **kwargs):
int16_field, _ = ApiFieldSchemaWrapper().init_field_schema(name=name, dtype=DataType.INT16, description=description,
is_primary=is_primary, **kwargs)
return int16_field
def gen_int32_field(name=ct.default_int32_field_name, description=ct.default_desc, is_primary=False, **kwargs):
int32_field, _ = ApiFieldSchemaWrapper().init_field_schema(name=name, dtype=DataType.INT32, description=description,
is_primary=is_primary, **kwargs)
return int32_field
def gen_int64_field(name=ct.default_int64_field_name, description=ct.default_desc, is_primary=False, **kwargs):
int64_field, _ = ApiFieldSchemaWrapper().init_field_schema(name=name, dtype=DataType.INT64, description=description,
is_primary=is_primary, **kwargs)
return int64_field
def gen_float_field(name=ct.default_float_field_name, is_primary=False, description=ct.default_desc):
float_field, _ = ApiFieldSchemaWrapper().init_field_schema(name=name, dtype=DataType.FLOAT, description=description,
is_primary=is_primary)
return float_field
def gen_double_field(name=ct.default_double_field_name, is_primary=False, description=ct.default_desc):
double_field, _ = ApiFieldSchemaWrapper().init_field_schema(name=name, dtype=DataType.DOUBLE,
description=description, is_primary=is_primary)
return double_field
def gen_float_vec_field(name=ct.default_float_vec_field_name, is_primary=False, dim=ct.default_dim,
description=ct.default_desc):
float_vec_field, _ = ApiFieldSchemaWrapper().init_field_schema(name=name, dtype=DataType.FLOAT_VECTOR,
description=description, dim=dim,
is_primary=is_primary)
return float_vec_field
def gen_binary_vec_field(name=ct.default_binary_vec_field_name, is_primary=False, dim=ct.default_dim,
description=ct.default_desc):
binary_vec_field, _ = ApiFieldSchemaWrapper().init_field_schema(name=name, dtype=DataType.BINARY_VECTOR,
description=description, dim=dim,
is_primary=is_primary)
return binary_vec_field
def gen_default_collection_schema(description=ct.default_desc, primary_field=ct.default_int64_field_name,
auto_id=False, dim=ct.default_dim):
fields = [gen_int64_field(), gen_float_field(), gen_string_field(), gen_float_vec_field(dim=dim)]
schema, _ = ApiCollectionSchemaWrapper().init_collection_schema(fields=fields, description=description,
primary_field=primary_field, auto_id=auto_id)
return schema
def gen_string_pk_default_collection_schema(description=ct.default_desc, primary_field=ct.default_string_field_name,
auto_id=False, dim=ct.default_dim):
fields = [gen_int64_field(), gen_float_field(), gen_string_field(), gen_float_vec_field(dim=dim)]
schema, _ = ApiCollectionSchemaWrapper().init_collection_schema(fields=fields, description=description,
primary_field=primary_field, auto_id=auto_id)
return schema
def gen_collection_schema_all_datatype(description=ct.default_desc,
primary_field=ct.default_int64_field_name,
auto_id=False, dim=ct.default_dim):
fields = [gen_int64_field(), gen_int32_field(), gen_int16_field(), gen_int8_field(),
gen_bool_field(), gen_float_field(), gen_double_field(), gen_string_field(), gen_float_vec_field(dim=dim)]
schema, _ = ApiCollectionSchemaWrapper().init_collection_schema(fields=fields, description=description,
primary_field=primary_field, auto_id=auto_id)
return schema
def gen_collection_schema(fields, primary_field=None, description=ct.default_desc, auto_id=False):
schema, _ = ApiCollectionSchemaWrapper().init_collection_schema(fields=fields, primary_field=primary_field,
description=description, auto_id=auto_id)
return schema
def gen_default_binary_collection_schema(description=ct.default_desc, primary_field=ct.default_int64_field_name,
auto_id=False, dim=ct.default_dim):
fields = [gen_int64_field(), gen_float_field(), gen_string_field(), gen_binary_vec_field(dim=dim)]
binary_schema, _ = ApiCollectionSchemaWrapper().init_collection_schema(fields=fields, description=description,
primary_field=primary_field,
auto_id=auto_id)
return binary_schema
def gen_schema_multi_vector_fields(vec_fields):
fields = [gen_int64_field(), gen_float_field(),gen_string_field(), gen_float_vec_field()]
fields.extend(vec_fields)
primary_field = ct.default_int64_field_name
schema, _ = ApiCollectionSchemaWrapper().init_collection_schema(fields=fields, description=ct.default_desc,
primary_field=primary_field, auto_id=False)
return schema
def gen_schema_multi_string_fields(string_fields):
fields =[gen_int64_field(), gen_float_field(),gen_string_field(),gen_float_vec_field()]
fields.extend(string_fields)
primary_field = ct.default_int64_field_name
schema, _ = ApiCollectionSchemaWrapper().init_collection_schema(fields=fields, description=ct.default_desc,
primary_field=primary_field, auto_id=False)
return schema
def gen_vectors(nb, dim):
vectors = [[random.random() for _ in range(dim)] for _ in range(nb)]
vectors = preprocessing.normalize(vectors, axis=1, norm='l2')
return vectors.tolist()
def gen_string(nb):
string_values = [str(random.random()) for _ in range(nb)]
return string_values
def gen_binary_vectors(num, dim):
raw_vectors = []
binary_vectors = []
for _ in range(num):
raw_vector = [random.randint(0, 1) for _ in range(dim)]
raw_vectors.append(raw_vector)
# packs a binary-valued array into bits in a unit8 array, and bytes array_of_ints
binary_vectors.append(bytes(np.packbits(raw_vector, axis=-1).tolist()))
return raw_vectors, binary_vectors
def gen_default_dataframe_data(nb=ct.default_nb, dim=ct.default_dim, start=0):
int_values = pd.Series(data=[i for i in range(start, start + nb)])
float_values = pd.Series(data=[np.float32(i) for i in range(start, start + nb)], dtype="float32")
string_values = pd.Series(data=[str(i) for i in range(start, start + nb)], dtype="string")
float_vec_values = gen_vectors(nb, dim)
df = pd.DataFrame({
ct.default_int64_field_name: int_values,
ct.default_float_field_name: float_values,
ct.default_string_field_name: string_values,
ct.default_float_vec_field_name: float_vec_values
})
return df
def gen_dataframe_multi_vec_fields(vec_fields, nb=ct.default_nb):
"""
gen dataframe data for fields: int64, float, float_vec and vec_fields
:param nb: num of entities, default default_nb
:param vec_fields: list of FieldSchema
:return: dataframe
"""
int_values = pd.Series(data=[i for i in range(0, nb)])
float_values = pd.Series(data=[float(i) for i in range(nb)], dtype="float32")
string_values = pd.Series(data=[str(i) for i in range(nb)], dtype="string")
df = pd.DataFrame({
ct.default_int64_field_name: int_values,
ct.default_float_field_name: float_values,
ct.default_string_field_name: string_values,
ct.default_float_vec_field_name: gen_vectors(nb, ct.default_dim)
})
for field in vec_fields:
dim = field.params['dim']
if field.dtype == DataType.FLOAT_VECTOR:
vec_values = gen_vectors(nb, dim)
elif field.dtype == DataType.BINARY_VECTOR:
vec_values = gen_binary_vectors(nb, dim)[1]
df[field.name] = vec_values
return df
def gen_dataframe_multi_string_fields(string_fields, nb=ct.default_nb):
"""
gen dataframe data for fields: int64, float, float_vec and vec_fields
:param nb: num of entities, default default_nb
:param vec_fields: list of FieldSchema
:return: dataframe
"""
int_values = pd.Series(data=[i for i in range(0, nb)])
float_values = pd.Series(data=[float(i) for i in range(nb)], dtype="float32")
string_values = pd.Series(data=[str(i) for i in range(nb)], dtype="string")
df = pd.DataFrame({
ct.default_int64_field_name: int_values,
ct.default_float_field_name: float_values,
ct.default_string_field_name: string_values,
ct.default_float_vec_field_name: gen_vectors(nb, ct.default_dim)
})
for field in string_fields:
if field.dtype == DataType.VARCHAR:
string_values = gen_string(nb)
df[field.name] = string_values
return df
def gen_dataframe_all_data_type(nb=ct.default_nb, dim=ct.default_dim, start=0):
int64_values = pd.Series(data=[i for i in range(start, start + nb)])
int32_values = pd.Series(data=[np.int32(i) for i in range(start, start + nb)], dtype="int32")
int16_values = pd.Series(data=[np.int16(i) for i in range(start, start + nb)], dtype="int16")
int8_values = pd.Series(data=[np.int8(i) for i in range(start, start + nb)], dtype="int8")
bool_values = pd.Series(data=[np.bool(i) for i in range(start, start + nb)], dtype="bool")
float_values = pd.Series(data=[np.float32(i) for i in range(start, start + nb)], dtype="float32")
double_values = pd.Series(data=[np.double(i) for i in range(start, start + nb)], dtype="double")
string_values = pd.Series(data=[str(i) for i in range(start, start + nb)], dtype="string")
float_vec_values = gen_vectors(nb, dim)
df = pd.DataFrame({
ct.default_int64_field_name: int64_values,
ct.default_int32_field_name: int32_values,
ct.default_int16_field_name: int16_values,
ct.default_int8_field_name: int8_values,
ct.default_bool_field_name: bool_values,
ct.default_float_field_name: float_values,
ct.default_double_field_name: double_values,
ct.default_string_field_name: string_values,
ct.default_float_vec_field_name: float_vec_values
})
return df
def gen_default_binary_dataframe_data(nb=ct.default_nb, dim=ct.default_dim, start=0):
int_values = pd.Series(data=[i for i in range(start, start + nb)])
float_values = pd.Series(data=[np.float32(i) for i in range(start, start + nb)], dtype="float32")
string_values = pd.Series(data=[str(i) for i in range(start, start + nb)], dtype="string")
binary_raw_values, binary_vec_values = gen_binary_vectors(nb, dim)
df = pd.DataFrame({
ct.default_int64_field_name: int_values,
ct.default_float_field_name: float_values,
ct.default_string_field_name: string_values,
ct.default_binary_vec_field_name: binary_vec_values
})
return df, binary_raw_values
def gen_default_list_data(nb=ct.default_nb, dim=ct.default_dim):
int_values = [i for i in range(nb)]
float_values = [np.float32(i) for i in range(nb)]
string_values = [str(i) for i in range(nb)]
float_vec_values = gen_vectors(nb, dim)
data = [int_values, float_values, string_values, float_vec_values]
return data
def gen_default_tuple_data(nb=ct.default_nb, dim=ct.default_dim):
int_values = [i for i in range(nb)]
float_values = [np.float32(i) for i in range(nb)]
string_values = [str(i) for i in range(nb)]
float_vec_values = gen_vectors(nb, dim)
data = (int_values, float_values, string_values, float_vec_values)
return data
def gen_numpy_data(nb=ct.default_nb, dim=ct.default_dim):
int_values = np.arange(nb, dtype='int64')
float_values = np.arange(nb, dtype='float32')
string_values = [np.str(i) for i in range(nb)]
float_vec_values = gen_vectors(nb, dim)
data = [int_values, float_values, string_values, float_vec_values]
return data
def gen_default_binary_list_data(nb=ct.default_nb, dim=ct.default_dim):
int_values = [i for i in range(nb)]
float_values = [np.float32(i) for i in range(nb)]
string_values = [str(i) for i in range(nb)]
binary_raw_values, binary_vec_values = gen_binary_vectors(nb, dim)
data = [int_values, float_values, string_values, binary_vec_values]
return data, binary_raw_values
def gen_simple_index():
index_params = []
for i in range(len(ct.all_index_types)):
if ct.all_index_types[i] in ct.binary_support:
continue
dic = {"index_type": ct.all_index_types[i], "metric_type": "L2"}
dic.update({"params": ct.default_index_params[i]})
index_params.append(dic)
return index_params
def gen_invalid_field_types():
field_types = [
6,
1.0,
[[]],
{},
(),
"",
"a"
]
return field_types
def gen_invaild_search_params_type():
invalid_search_key = 100
search_params = []
for index_type in ct.all_index_types:
if index_type == "FLAT":
continue
search_params.append({"index_type": index_type, "search_params": {"invalid_key": invalid_search_key}})
if index_type in ["IVF_FLAT", "IVF_SQ8", "IVF_SQ8H", "IVF_PQ"]:
for nprobe in ct.get_invalid_ints:
ivf_search_params = {"index_type": index_type, "search_params": {"nprobe": nprobe}}
search_params.append(ivf_search_params)
elif index_type in ["HNSW", "RHNSW_FLAT", "RHNSW_PQ", "RHNSW_SQ"]:
for ef in ct.get_invalid_ints:
hnsw_search_param = {"index_type": index_type, "search_params": {"ef": ef}}
search_params.append(hnsw_search_param)
elif index_type in ["NSG", "RNSG"]:
for search_length in ct.get_invalid_ints:
nsg_search_param = {"index_type": index_type, "search_params": {"search_length": search_length}}
search_params.append(nsg_search_param)
search_params.append({"index_type": index_type, "search_params": {"invalid_key": invalid_search_key}})
elif index_type == "ANNOY":
for search_k in ct.get_invalid_ints:
if isinstance(search_k, int):
continue
annoy_search_param = {"index_type": index_type, "search_params": {"search_k": search_k}}
search_params.append(annoy_search_param)
return search_params
def gen_search_param(index_type, metric_type="L2"):
search_params = []
if index_type in ["FLAT", "IVF_FLAT", "IVF_SQ8", "IVF_SQ8H", "IVF_PQ"] \
or index_type in ["BIN_FLAT", "BIN_IVF_FLAT"]:
for nprobe in [64, 128]:
ivf_search_params = {"metric_type": metric_type, "params": {"nprobe": nprobe}}
search_params.append(ivf_search_params)
elif index_type in ["HNSW", "RHNSW_FLAT", "RHNSW_PQ", "RHNSW_SQ"]:
for ef in [64, 32768]:
hnsw_search_param = {"metric_type": metric_type, "params": {"ef": ef}}
search_params.append(hnsw_search_param)
elif index_type in ["NSG", "RNSG"]:
for search_length in [100, 300]:
nsg_search_param = {"metric_type": metric_type, "params": {"search_length": search_length}}
search_params.append(nsg_search_param)
elif index_type == "ANNOY":
for search_k in [1000, 5000]:
annoy_search_param = {"metric_type": metric_type, "params": {"search_k": search_k}}
search_params.append(annoy_search_param)
else:
log.error("Invalid index_type.")
raise Exception("Invalid index_type.")
return search_params
def gen_all_type_fields():
fields = []
for k, v in DataType.__members__.items():
if v != DataType.UNKNOWN:
field, _ = ApiFieldSchemaWrapper().init_field_schema(name=k.lower(), dtype=v)
fields.append(field)
return fields
def gen_normal_expressions():
expressions = [
"",
"int64 > 0",
"(int64 > 0 && int64 < 400) or (int64 > 500 && int64 < 1000)",
"int64 not in [1, 2, 3]",
"int64 in [1, 2, 3] and float != 2",
"int64 == 0 || int64 == 1 || int64 == 2",
"0 < int64 < 400",
"500 <= int64 < 1000",
"200+300 < int64 <= 500+500"
]
return expressions
def gen_normal_string_expressions(field):
expressions = [
f"\"0\"< {field} < \"3\"",
f"{field} >= \"0\"",
f"({field} > \"0\" && {field} < \"100\") or ({field} > \"200\" && {field} < \"300\")",
f"\"0\" <= {field} <= \"100\"",
f"{field} == \"0\"|| {field} == \"1\"|| {field} ==\"2\"",
f"{field} != \"0\"",
f"{field} not in [\"0\", \"1\", \"2\"]",
f"{field} in [\"0\", \"1\", \"2\"]"
]
return expressions
def gen_invaild_string_expressions():
expressions = [
"varchar in [0, \"1\"]",
"varchar not in [\"0\", 1, 2]"
]
return expressions
def gen_normal_expressions_field(field):
expressions = [
"",
f"{field} > 0",
f"({field} > 0 && {field} < 400) or ({field} > 500 && {field} < 1000)",
f"{field} not in [1, 2, 3]",
f"{field} in [1, 2, 3] and {field} != 2",
f"{field} == 0 || {field} == 1 || {field} == 2",
f"0 < {field} < 400",
f"500 <= {field} <= 1000",
f"200+300 <= {field} <= 500+500"
]
return expressions
def l2(x, y):
return np.linalg.norm(np.array(x) - np.array(y))
def ip(x, y):
return np.inner(np.array(x), np.array(y))
def jaccard(x, y):
x = np.asarray(x, np.bool)
y = np.asarray(y, np.bool)
return 1 - np.double(np.bitwise_and(x, y).sum()) / np.double(np.bitwise_or(x, y).sum())
def hamming(x, y):
x = np.asarray(x, np.bool)
y = np.asarray(y, np.bool)
return np.bitwise_xor(x, y).sum()
def tanimoto(x, y):
x = np.asarray(x, np.bool)
y = np.asarray(y, np.bool)
return -np.log2(np.double(np.bitwise_and(x, y).sum()) / np.double(np.bitwise_or(x, y).sum()))
def tanimoto_calc(x, y):
x = np.asarray(x, np.bool)
y = np.asarray(y, np.bool)
return np.double((len(x) - np.bitwise_xor(x, y).sum())) / (len(y) + np.bitwise_xor(x, y).sum())
def substructure(x, y):
x = np.asarray(x, np.bool)
y = np.asarray(y, np.bool)
return 1 - np.double(np.bitwise_and(x, y).sum()) / np.count_nonzero(y)
def superstructure(x, y):
x = np.asarray(x, np.bool)
y = np.asarray(y, np.bool)
return 1 - np.double(np.bitwise_and(x, y).sum()) / np.count_nonzero(x)
def compare_distance_2d_vector(x, y, distance, metric, sqrt):
for i in range(len(x)):
for j in range(len(y)):
if metric == "L2":
distance_i = l2(x[i], y[j])
if not sqrt:
distance_i = math.pow(distance_i, 2)
elif metric == "IP":
distance_i = ip(x[i], y[j])
elif metric == "HAMMING":
distance_i = hamming(x[i], y[j])
elif metric == "TANIMOTO":
distance_i = tanimoto_calc(x[i], y[j])
elif metric == "JACCARD":
distance_i = jaccard(x[i], y[j])
else:
raise Exception("metric type is invalid")
assert abs(distance_i - distance[i][j]) < ct.epsilon
return True
def modify_file(file_path_list, is_modify=False, input_content=""):
"""
file_path_list : file list -> list[<file_path>]
is_modify : does the file need to be reset
input_content :the content that need to insert to the file
"""
if not isinstance(file_path_list, list):
log.error("[modify_file] file is not a list.")
for file_path in file_path_list:
folder_path, file_name = os.path.split(file_path)
if not os.path.isdir(folder_path):
log.debug("[modify_file] folder(%s) is not exist." % folder_path)
os.makedirs(folder_path)
if not os.path.isfile(file_path):
log.error("[modify_file] file(%s) is not exist." % file_path)
else:
if is_modify is True:
log.debug("[modify_file] start modifying file(%s)..." % file_path)
with open(file_path, "r+") as f:
f.seek(0)
f.truncate()
f.write(input_content)
f.close()
log.info("[modify_file] file(%s) modification is complete." % file_path_list)
def index_to_dict(index):
return {
"collection_name": index.collection_name,
"field_name": index.field_name,
# "name": index.name,
"params": index.params
}
def assert_equal_index(index_1, index_2):
return index_to_dict(index_1) == index_to_dict(index_2)
def gen_partitions(collection_w, partition_num=1):
"""
target: create extra partitions except for _default
method: create more than one partitions
expected: return collection and raw data
"""
log.info("gen_partitions: creating partitions")
for i in range(partition_num):
partition_name = "search_partition_" + str(i)
collection_w.create_partition(partition_name=partition_name,
description="search partition")
par = collection_w.partitions
assert len(par) == (partition_num + 1)
log.info("gen_partitions: created partitions %s" % par)
def insert_data(collection_w, nb=3000, is_binary=False, is_all_data_type=False,
auto_id=False, dim=ct.default_dim, insert_offset=0):
"""
target: insert non-binary/binary data
method: insert non-binary/binary data into partitions if any
expected: return collection and raw data
"""
par = collection_w.partitions
num = len(par)
vectors = []
binary_raw_vectors = []
insert_ids = []
start = insert_offset
log.info("insert_data: inserting data into collection %s (num_entities: %s)"
% (collection_w.name, nb))
for i in range(num):
default_data = gen_default_dataframe_data(nb // num, dim=dim, start=start)
if is_binary:
default_data, binary_raw_data = gen_default_binary_dataframe_data(nb // num, dim=dim, start=start)
binary_raw_vectors.extend(binary_raw_data)
if is_all_data_type:
default_data = gen_dataframe_all_data_type(nb // num, dim=dim, start=start)
if auto_id:
default_data.drop(ct.default_int64_field_name, axis=1, inplace=True)
insert_res = collection_w.insert(default_data, par[i].name)[0]
time_stamp = insert_res.timestamp
insert_ids.extend(insert_res.primary_keys)
vectors.append(default_data)
start += nb // num
return collection_w, vectors, binary_raw_vectors, insert_ids, time_stamp
def _check_primary_keys(primary_keys, nb):
if primary_keys is None:
raise Exception("The primary_keys is None")
assert len(primary_keys) == nb
for i in range(nb - 1):
if primary_keys[i] >= primary_keys[i + 1]:
return False
return True
def get_segment_distribution(res):
"""
Get segment distribution
"""
from collections import defaultdict
segment_distribution = defaultdict(lambda: {"growing": [], "sealed": []})
for r in res:
if r.nodeID not in segment_distribution:
segment_distribution[r.nodeID] = {
"growing": [],
"sealed": []
}
if r.state == 3:
segment_distribution[r.nodeID]["sealed"].append(r.segmentID)
if r.state == 2:
segment_distribution[r.nodeID]["growing"].append(r.segmentID)
return segment_distribution
def percent_to_int(string):
"""
transform percent(0%--100%) to int
"""
new_int = -1
if not isinstance(string, str):
log.error("%s is not a string" % string)
return new_int
if "%" not in string:
log.error("%s is not a percent" % string)
else:
new_int = int(string.strip("%"))
return new_int
| 41.243202 | 163 | 0.636047 |
5ee76cce8c69c53892802819a6cde59da8aad897 | 17,872 | py | Python | tests/unit/test_config.py | sgillies/warehouse | 081c3dd5c85b2377d8f37ce163b179510c746ccd | [
"Apache-2.0"
] | null | null | null | tests/unit/test_config.py | sgillies/warehouse | 081c3dd5c85b2377d8f37ce163b179510c746ccd | [
"Apache-2.0"
] | null | null | null | tests/unit/test_config.py | sgillies/warehouse | 081c3dd5c85b2377d8f37ce163b179510c746ccd | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import pretend
import pytest
from pyramid import renderers
from pyramid.authorization import Allow, Authenticated
from pyramid.httpexceptions import HTTPForbidden, HTTPUnauthorized
from pyramid.tweens import EXCVIEW
from warehouse import config
from warehouse.errors import BasicAuthBreachedPassword, BasicAuthFailedPassword
from warehouse.utils.wsgi import HostRewrite, ProxyFixer, VhmRootRemover
class TestRequireHTTPSTween:
def test_noops_when_disabled(self):
handler = pretend.stub()
registry = pretend.stub(
settings=pretend.stub(get=pretend.call_recorder(lambda k, v: False))
)
assert config.require_https_tween_factory(handler, registry) is handler
assert registry.settings.get.calls == [pretend.call("enforce_https", True)]
@pytest.mark.parametrize(
("params", "scheme"),
[({}, "https"), ({":action": "thing"}, "https"), ({}, "http")],
)
def test_allows_through(self, params, scheme):
request = pretend.stub(params=params, scheme=scheme)
response = pretend.stub()
handler = pretend.call_recorder(lambda req: response)
registry = pretend.stub(settings=pretend.stub(get=lambda k, v: True))
tween = config.require_https_tween_factory(handler, registry)
assert tween(request) is response
assert handler.calls == [pretend.call(request)]
@pytest.mark.parametrize(("params", "scheme"), [({":action": "thing"}, "http")])
def test_rejects(self, params, scheme):
request = pretend.stub(params=params, scheme=scheme)
handler = pretend.stub()
registry = pretend.stub(settings=pretend.stub(get=lambda k, v: True))
tween = config.require_https_tween_factory(handler, registry)
resp = tween(request)
assert resp.status == "403 SSL is required"
assert resp.headers["X-Fastly-Error"] == "803"
assert resp.content_type == "text/plain"
assert resp.body == b"SSL is required."
@pytest.mark.parametrize(
("path", "expected"),
[("/foo/bar/", True), ("/static/wat/", False), ("/_debug_toolbar/thing/", False)],
)
def test_activate_hook(path, expected):
request = pretend.stub(path=path)
assert config.activate_hook(request) == expected
@pytest.mark.parametrize(
("exc_info", "expected"),
[
(None, False),
((ValueError, ValueError(), None), True),
((HTTPForbidden, HTTPForbidden(), None), True),
((HTTPUnauthorized, HTTPUnauthorized(), None), True),
((BasicAuthBreachedPassword, BasicAuthBreachedPassword(), None), False),
((BasicAuthFailedPassword, BasicAuthFailedPassword(), None), False),
],
)
def test_commit_veto(exc_info, expected):
request = pretend.stub(exc_info=exc_info)
response = pretend.stub()
assert bool(config.commit_veto(request, response)) == expected
@pytest.mark.parametrize("route_kw", [None, {}, {"foo": "bar"}])
def test_template_view(route_kw):
configobj = pretend.stub(
add_route=pretend.call_recorder(lambda *a, **kw: None),
add_view=pretend.call_recorder(lambda *a, **kw: None),
)
config.template_view(configobj, "test", "/test/", "test.html", route_kw=route_kw)
assert configobj.add_route.calls == [
pretend.call("test", "/test/", **({} if route_kw is None else route_kw))
]
assert configobj.add_view.calls == [
pretend.call(renderer="test.html", route_name="test")
]
@pytest.mark.parametrize(
("environ", "name", "envvar", "coercer", "default", "expected"),
[
({}, "test.foo", "TEST_FOO", None, None, {}),
({"TEST_FOO": "bar"}, "test.foo", "TEST_FOO", None, None, {"test.foo": "bar"}),
({"TEST_INT": "1"}, "test.int", "TEST_INT", int, None, {"test.int": 1}),
({}, "test.foo", "TEST_FOO", None, "lol", {"test.foo": "lol"}),
({"TEST_FOO": "bar"}, "test.foo", "TEST_FOO", None, "lol", {"test.foo": "bar"}),
],
)
def test_maybe_set(monkeypatch, environ, name, envvar, coercer, default, expected):
for key, value in environ.items():
monkeypatch.setenv(key, value)
settings = {}
config.maybe_set(settings, name, envvar, coercer=coercer, default=default)
assert settings == expected
@pytest.mark.parametrize(
("environ", "base", "name", "envvar", "expected"),
[
({}, "test", "foo", "TEST_FOO", {}),
({"TEST_FOO": "bar"}, "test", "foo", "TEST_FOO", {"test.foo": "bar"}),
(
{"TEST_FOO": "bar thing=other"},
"test",
"foo",
"TEST_FOO",
{"test.foo": "bar", "test.thing": "other"},
),
(
{"TEST_FOO": 'bar thing=other wat="one two"'},
"test",
"foo",
"TEST_FOO",
{"test.foo": "bar", "test.thing": "other", "test.wat": "one two"},
),
],
)
def test_maybe_set_compound(monkeypatch, environ, base, name, envvar, expected):
for key, value in environ.items():
monkeypatch.setenv(key, value)
settings = {}
config.maybe_set_compound(settings, base, name, envvar)
assert settings == expected
@pytest.mark.parametrize(
("settings", "environment", "other_settings"),
[
(None, config.Environment.production, {}),
({}, config.Environment.production, {}),
({"my settings": "the settings value"}, config.Environment.production, {}),
(None, config.Environment.development, {}),
({}, config.Environment.development, {}),
({"my settings": "the settings value"}, config.Environment.development, {}),
],
)
def test_configure(monkeypatch, settings, environment, other_settings):
json_renderer_obj = pretend.stub()
json_renderer_cls = pretend.call_recorder(lambda **kw: json_renderer_obj)
monkeypatch.setattr(renderers, "JSON", json_renderer_cls)
xmlrpc_renderer_obj = pretend.stub()
xmlrpc_renderer_cls = pretend.call_recorder(lambda **kw: xmlrpc_renderer_obj)
monkeypatch.setattr(config, "XMLRPCRenderer", xmlrpc_renderer_cls)
if environment == config.Environment.development:
monkeypatch.setenv("WAREHOUSE_ENV", "development")
class FakeRegistry(dict):
def __init__(self):
self.settings = {
"warehouse.token": "insecure token",
"warehouse.env": environment,
"camo.url": "http://camo.example.com/",
"pyramid.reload_assets": False,
"dirs.packages": "/srv/data/pypi/packages/",
"warehouse.xmlrpc.client.ratelimit_string": "3600 per hour",
}
configurator_settings = other_settings.copy()
configurator_obj = pretend.stub(
registry=FakeRegistry(),
set_root_factory=pretend.call_recorder(lambda rf: None),
include=pretend.call_recorder(lambda include: None),
add_directive=pretend.call_recorder(lambda name, fn, **k: None),
add_wsgi_middleware=pretend.call_recorder(lambda m, *a, **kw: None),
add_renderer=pretend.call_recorder(lambda name, renderer: None),
add_request_method=pretend.call_recorder(lambda fn: None),
add_jinja2_renderer=pretend.call_recorder(lambda renderer: None),
add_jinja2_search_path=pretend.call_recorder(lambda path, name: None),
get_settings=lambda: configurator_settings,
add_settings=pretend.call_recorder(lambda d: configurator_settings.update(d)),
add_tween=pretend.call_recorder(lambda tween_factory, **kw: None),
add_static_view=pretend.call_recorder(lambda *a, **kw: None),
add_cache_buster=pretend.call_recorder(lambda spec, buster: None),
whitenoise_serve_static=pretend.call_recorder(lambda *a, **kw: None),
whitenoise_add_files=pretend.call_recorder(lambda *a, **kw: None),
whitenoise_add_manifest=pretend.call_recorder(lambda *a, **kw: None),
scan=pretend.call_recorder(lambda categories, ignore: None),
commit=pretend.call_recorder(lambda: None),
)
configurator_cls = pretend.call_recorder(lambda settings: configurator_obj)
monkeypatch.setattr(config, "Configurator", configurator_cls)
cachebuster_obj = pretend.stub()
cachebuster_cls = pretend.call_recorder(lambda p, **kw: cachebuster_obj)
monkeypatch.setattr(config, "ManifestCacheBuster", cachebuster_cls)
transaction_manager = pretend.stub()
transaction = pretend.stub(
TransactionManager=pretend.call_recorder(lambda: transaction_manager)
)
monkeypatch.setattr(config, "transaction", transaction)
result = config.configure(settings=settings)
expected_settings = {
"warehouse.env": environment,
"warehouse.commit": "null",
"site.name": "Warehouse",
"token.two_factor.max_age": 300,
"token.default.max_age": 21600,
"warehouse.xmlrpc.client.ratelimit_string": "3600 per hour",
"warehouse.xmlrpc.search.enabled": True,
"github.token_scanning_meta_api.url": (
"https://api.github.com/meta/public_keys/token_scanning"
),
"warehouse.account.user_login_ratelimit_string": "10 per 5 minutes",
"warehouse.account.ip_login_ratelimit_string": "10 per 5 minutes",
"warehouse.account.global_login_ratelimit_string": "1000 per 5 minutes",
"warehouse.account.email_add_ratelimit_string": "2 per day",
"warehouse.account.password_reset_ratelimit_string": "5 per day",
}
if environment == config.Environment.development:
expected_settings.update(
{
"enforce_https": False,
"pyramid.reload_templates": True,
"pyramid.reload_assets": True,
"pyramid.prevent_http_cache": True,
"debugtoolbar.hosts": ["0.0.0.0/0"],
"debugtoolbar.panels": [
"pyramid_debugtoolbar.panels.versions.VersionDebugPanel",
"pyramid_debugtoolbar.panels.settings.SettingsDebugPanel",
"pyramid_debugtoolbar.panels.headers.HeaderDebugPanel",
(
"pyramid_debugtoolbar.panels.request_vars."
"RequestVarsDebugPanel"
),
"pyramid_debugtoolbar.panels.renderings.RenderingsDebugPanel",
"pyramid_debugtoolbar.panels.logger.LoggingPanel",
(
"pyramid_debugtoolbar.panels.performance."
"PerformanceDebugPanel"
),
"pyramid_debugtoolbar.panels.routes.RoutesDebugPanel",
"pyramid_debugtoolbar.panels.sqla.SQLADebugPanel",
"pyramid_debugtoolbar.panels.tweens.TweensDebugPanel",
(
"pyramid_debugtoolbar.panels.introspection."
"IntrospectionDebugPanel"
),
],
}
)
if settings is not None:
expected_settings.update(settings)
assert configurator_cls.calls == [pretend.call(settings=expected_settings)]
assert result is configurator_obj
assert configurator_obj.set_root_factory.calls == [pretend.call(config.RootFactory)]
assert configurator_obj.add_wsgi_middleware.calls == [
pretend.call(ProxyFixer, token="insecure token", num_proxies=1),
pretend.call(VhmRootRemover),
pretend.call(HostRewrite),
]
assert configurator_obj.include.calls == (
[
pretend.call("pyramid_services"),
pretend.call(".metrics"),
pretend.call(".csrf"),
]
+ [
pretend.call(x)
for x in [
(
"pyramid_debugtoolbar"
if environment == config.Environment.development
else None
)
]
if x is not None
]
+ [
pretend.call(".logging"),
pretend.call("pyramid_jinja2"),
pretend.call(".filters"),
pretend.call("pyramid_mailer"),
pretend.call("pyramid_retry"),
pretend.call("pyramid_tm"),
pretend.call(".legacy.api.xmlrpc"),
pretend.call(".legacy.api.xmlrpc.cache"),
pretend.call("pyramid_rpc.xmlrpc"),
pretend.call(".legacy.action_routing"),
pretend.call(".predicates"),
pretend.call(".i18n"),
pretend.call(".db"),
pretend.call(".tasks"),
pretend.call(".rate_limiting"),
pretend.call(".static"),
pretend.call(".policy"),
pretend.call(".search"),
pretend.call(".aws"),
pretend.call(".gcloud"),
pretend.call(".sessions"),
pretend.call(".cache.http"),
pretend.call(".cache.origin"),
pretend.call(".email"),
pretend.call(".accounts"),
pretend.call(".macaroons"),
pretend.call(".oidc"),
pretend.call(".malware"),
pretend.call(".manage"),
pretend.call(".packaging"),
pretend.call(".redirects"),
pretend.call(".routes"),
pretend.call(".sponsors"),
pretend.call(".banners"),
pretend.call(".admin"),
pretend.call(".forklift"),
pretend.call(".sentry"),
pretend.call(".csp"),
pretend.call(".referrer_policy"),
pretend.call(".http"),
]
+ [pretend.call(x) for x in [configurator_settings.get("warehouse.theme")] if x]
+ [pretend.call(".sanity")]
)
assert configurator_obj.add_jinja2_renderer.calls == [
pretend.call(".html"),
pretend.call(".txt"),
pretend.call(".xml"),
]
assert configurator_obj.add_jinja2_search_path.calls == [
pretend.call("warehouse:templates", name=".html"),
pretend.call("warehouse:templates", name=".txt"),
pretend.call("warehouse:templates", name=".xml"),
]
assert configurator_obj.add_settings.calls == [
pretend.call({"jinja2.newstyle": True}),
pretend.call({"jinja2.i18n.domain": "messages"}),
pretend.call({"retry.attempts": 3}),
pretend.call(
{
"tm.manager_hook": mock.ANY,
"tm.activate_hook": config.activate_hook,
"tm.commit_veto": config.commit_veto,
"tm.annotate_user": False,
}
),
pretend.call({"http": {"verify": "/etc/ssl/certs/"}}),
]
add_settings_dict = configurator_obj.add_settings.calls[3].args[0]
assert add_settings_dict["tm.manager_hook"](pretend.stub()) is transaction_manager
assert configurator_obj.add_tween.calls == [
pretend.call("warehouse.config.require_https_tween_factory"),
pretend.call(
"warehouse.utils.compression.compression_tween_factory",
over=[
"warehouse.cache.http.conditional_http_tween_factory",
"pyramid_debugtoolbar.toolbar_tween_factory",
EXCVIEW,
],
),
]
assert configurator_obj.add_static_view.calls == [
pretend.call("static", "warehouse:static/dist/", cache_max_age=315360000)
]
assert configurator_obj.add_cache_buster.calls == [
pretend.call("warehouse:static/dist/", cachebuster_obj)
]
assert cachebuster_cls.calls == [
pretend.call("warehouse:static/dist/manifest.json", reload=False, strict=True)
]
assert configurator_obj.whitenoise_serve_static.calls == [
pretend.call(autorefresh=False, max_age=315360000)
]
assert configurator_obj.whitenoise_add_files.calls == [
pretend.call("warehouse:static/dist/", prefix="/static/")
]
assert configurator_obj.whitenoise_add_manifest.calls == [
pretend.call("warehouse:static/dist/manifest.json", prefix="/static/")
]
assert configurator_obj.add_directive.calls == [
pretend.call("add_template_view", config.template_view, action_wrap=False)
]
assert configurator_obj.scan.calls == [
pretend.call(
categories=(
"pyramid",
"warehouse",
),
ignore=["warehouse.migrations.env", "warehouse.celery", "warehouse.wsgi"],
)
]
assert configurator_obj.commit.calls == [pretend.call()]
assert configurator_obj.add_renderer.calls == [
pretend.call("json", json_renderer_obj),
pretend.call("xmlrpc", xmlrpc_renderer_obj),
]
assert json_renderer_cls.calls == [
pretend.call(sort_keys=True, separators=(",", ":"))
]
assert xmlrpc_renderer_cls.calls == [pretend.call(allow_none=True)]
def test_root_factory_access_control_list():
acl = config.RootFactory.__acl__
assert len(acl) == 5
assert acl[0] == (Allow, "group:admins", "admin")
assert acl[1] == (Allow, "group:moderators", "moderator")
assert acl[2] == (Allow, "group:psf_staff", "psf_staff")
assert acl[3] == (
Allow,
"group:with_admin_dashboard_access",
"admin_dashboard_access",
)
assert acl[4] == (Allow, Authenticated, "manage:user")
| 40.071749 | 88 | 0.618901 |
9991307d006dceca91ab709e2882287f944a26de | 6,204 | py | Python | src/_pytest/debugging.py | oss-qm/python-pytest | 68bbd42213c33884ea5df2ac0df21570f057c1f8 | [
"MIT"
] | 1 | 2020-05-16T05:14:01.000Z | 2020-05-16T05:14:01.000Z | src/_pytest/debugging.py | oss-qm/python-pytest | 68bbd42213c33884ea5df2ac0df21570f057c1f8 | [
"MIT"
] | 5 | 2020-03-24T16:37:25.000Z | 2021-06-10T21:24:54.000Z | src/_pytest/debugging.py | oss-qm/python-pytest | 68bbd42213c33884ea5df2ac0df21570f057c1f8 | [
"MIT"
] | null | null | null | """ interactive debugging with PDB, the Python Debugger. """
from __future__ import absolute_import, division, print_function
import pdb
import sys
import os
from doctest import UnexpectedException
from _pytest.config import hookimpl
try:
from builtins import breakpoint # noqa
SUPPORTS_BREAKPOINT_BUILTIN = True
except ImportError:
SUPPORTS_BREAKPOINT_BUILTIN = False
def pytest_addoption(parser):
group = parser.getgroup("general")
group._addoption(
"--pdb",
dest="usepdb",
action="store_true",
help="start the interactive Python debugger on errors or KeyboardInterrupt.",
)
group._addoption(
"--pdbcls",
dest="usepdb_cls",
metavar="modulename:classname",
help="start a custom interactive Python debugger on errors. "
"For example: --pdbcls=IPython.terminal.debugger:TerminalPdb",
)
group._addoption(
"--trace",
dest="trace",
action="store_true",
help="Immediately break when running each test.",
)
def pytest_configure(config):
if config.getvalue("usepdb_cls"):
modname, classname = config.getvalue("usepdb_cls").split(":")
__import__(modname)
pdb_cls = getattr(sys.modules[modname], classname)
else:
pdb_cls = pdb.Pdb
if config.getvalue("trace"):
config.pluginmanager.register(PdbTrace(), "pdbtrace")
if config.getvalue("usepdb"):
config.pluginmanager.register(PdbInvoke(), "pdbinvoke")
# Use custom Pdb class set_trace instead of default Pdb on breakpoint() call
if SUPPORTS_BREAKPOINT_BUILTIN:
_environ_pythonbreakpoint = os.environ.get("PYTHONBREAKPOINT", "")
if _environ_pythonbreakpoint == "":
sys.breakpointhook = pytestPDB.set_trace
old = (pdb.set_trace, pytestPDB._pluginmanager)
def fin():
pdb.set_trace, pytestPDB._pluginmanager = old
pytestPDB._config = None
pytestPDB._pdb_cls = pdb.Pdb
if SUPPORTS_BREAKPOINT_BUILTIN:
sys.breakpointhook = sys.__breakpointhook__
pdb.set_trace = pytestPDB.set_trace
pytestPDB._pluginmanager = config.pluginmanager
pytestPDB._config = config
pytestPDB._pdb_cls = pdb_cls
config._cleanup.append(fin)
class pytestPDB(object):
""" Pseudo PDB that defers to the real pdb. """
_pluginmanager = None
_config = None
_pdb_cls = pdb.Pdb
@classmethod
def set_trace(cls, set_break=True):
""" invoke PDB set_trace debugging, dropping any IO capturing. """
import _pytest.config
frame = sys._getframe().f_back
if cls._pluginmanager is not None:
capman = cls._pluginmanager.getplugin("capturemanager")
if capman:
capman.suspend_global_capture(in_=True)
tw = _pytest.config.create_terminal_writer(cls._config)
tw.line()
tw.sep(">", "PDB set_trace (IO-capturing turned off)")
cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config)
if set_break:
cls._pdb_cls().set_trace(frame)
class PdbInvoke(object):
def pytest_exception_interact(self, node, call, report):
capman = node.config.pluginmanager.getplugin("capturemanager")
if capman:
out, err = capman.suspend_global_capture(in_=True)
sys.stdout.write(out)
sys.stdout.write(err)
_enter_pdb(node, call.excinfo, report)
def pytest_internalerror(self, excrepr, excinfo):
for line in str(excrepr).split("\n"):
sys.stderr.write("INTERNALERROR> %s\n" % line)
sys.stderr.flush()
tb = _postmortem_traceback(excinfo)
post_mortem(tb)
class PdbTrace(object):
@hookimpl(hookwrapper=True)
def pytest_pyfunc_call(self, pyfuncitem):
_test_pytest_function(pyfuncitem)
yield
def _test_pytest_function(pyfuncitem):
pytestPDB.set_trace(set_break=False)
testfunction = pyfuncitem.obj
pyfuncitem.obj = pdb.runcall
if pyfuncitem._isyieldedfunction():
arg_list = list(pyfuncitem._args)
arg_list.insert(0, testfunction)
pyfuncitem._args = tuple(arg_list)
else:
if "func" in pyfuncitem._fixtureinfo.argnames:
raise ValueError("--trace can't be used with a fixture named func!")
pyfuncitem.funcargs["func"] = testfunction
new_list = list(pyfuncitem._fixtureinfo.argnames)
new_list.append("func")
pyfuncitem._fixtureinfo.argnames = tuple(new_list)
def _enter_pdb(node, excinfo, rep):
# XXX we re-use the TerminalReporter's terminalwriter
# because this seems to avoid some encoding related troubles
# for not completely clear reasons.
tw = node.config.pluginmanager.getplugin("terminalreporter")._tw
tw.line()
showcapture = node.config.option.showcapture
for sectionname, content in (
("stdout", rep.capstdout),
("stderr", rep.capstderr),
("log", rep.caplog),
):
if showcapture in (sectionname, "all") and content:
tw.sep(">", "captured " + sectionname)
if content[-1:] == "\n":
content = content[:-1]
tw.line(content)
tw.sep(">", "traceback")
rep.toterminal(tw)
tw.sep(">", "entering PDB")
tb = _postmortem_traceback(excinfo)
post_mortem(tb)
rep._pdbshown = True
return rep
def _postmortem_traceback(excinfo):
if isinstance(excinfo.value, UnexpectedException):
# A doctest.UnexpectedException is not useful for post_mortem.
# Use the underlying exception instead:
return excinfo.value.exc_info[2]
else:
return excinfo._excinfo[2]
def _find_last_non_hidden_frame(stack):
i = max(0, len(stack) - 1)
while i and stack[i][0].f_locals.get("__tracebackhide__", False):
i -= 1
return i
def post_mortem(t):
class Pdb(pytestPDB._pdb_cls):
def get_stack(self, f, t):
stack, i = pdb.Pdb.get_stack(self, f, t)
if f is None:
i = _find_last_non_hidden_frame(stack)
return stack, i
p = Pdb()
p.reset()
p.interaction(None, t)
| 31.333333 | 85 | 0.651838 |
20dbd76d5cda31a9c33263a62a7c2bc2fdd350ca | 1,140 | py | Python | setup.py | smearle/gym-micropolis | 42b338ee050fb654e1c74dfe59bf5f03f03d39a4 | [
"MIT"
] | 8 | 2018-08-22T21:10:33.000Z | 2019-07-08T05:39:37.000Z | setup.py | smearle/gym-micropolis | 42b338ee050fb654e1c74dfe59bf5f03f03d39a4 | [
"MIT"
] | 4 | 2018-11-08T13:55:53.000Z | 2019-07-07T17:49:12.000Z | setup.py | smearle/gym-micropolis | 42b338ee050fb654e1c74dfe59bf5f03f03d39a4 | [
"MIT"
] | 8 | 2018-09-11T17:57:54.000Z | 2019-07-05T21:32:25.000Z | from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name='gym_city',
version='0.0.0',
install_requires=['gym',
'numpy',
'pillow',
'baselines',
'imutils',
'visdom',
'graphviz'],
author="Sam Earle",
author_email="smearle93@gmail.com",
description="An OpenAI Gym interface for Micropolis.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/smearle/gym-city",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: Ubuntu",
]
)
setup(name='MicropolisEnv-v0',
version='0.0.1',
install_requires=['gym'] # And any other dependencies foo needs
)
setup(name='MicropolisWalkEnv-v0',
version='0.0.1',
install_requires=['gym'] # And any other dependencies foo needs
)
setup(name='MicropolisArcadeEnv-v0',
version='0.0.1',
install_requires=['gym'] # And any other dependencies foo needs
)
| 25.909091 | 70 | 0.608772 |
9e48a7b9240a132dd3ec3c323ddb78080776eabb | 7,632 | py | Python | pypy/jit/codegen/llvm/test/test_llvmjit.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | 12 | 2016-01-06T07:10:28.000Z | 2021-05-13T23:02:02.000Z | pypy/jit/codegen/llvm/test/test_llvmjit.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | null | null | null | pypy/jit/codegen/llvm/test/test_llvmjit.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | 2 | 2016-07-29T07:09:50.000Z | 2016-10-16T08:50:26.000Z | import py
from os.path import dirname, join
from pypy.translator.c.test.test_genc import compile
from pypy.jit.codegen.llvm import llvmjit
from pypy.jit.codegen.llvm.compatibility import define, globalprefix, icmp, i1, i32
py.test.skip("doesn't work right now since it is using rctypes")
try:
from pypy.jit.codegen.llvm import llvmjit
except OSError:
py.test.skip("can not load libllvmjit library (see ../README.TXT)")
#
def skip_unsupported_platform():
from sys import platform
if platform == 'darwin':
py.test.skip('dynamic vs. static library issue on Darwin. see: http://www.cocoadev.com/index.pl?ApplicationLinkingIssues for more information (FIXME)')
#
llsquare = '''%(define)s %(i32)s %(globalprefix)ssquare(%(i32)s %%n) {
%%n2 = mul %(i32)s %%n, %%n
ret %(i32)s %%n2
}''' % vars()
llmul2 = '''%(define)s %(i32)s %(globalprefix)smul2(%(i32)s %%n) {
%%n2 = mul %(i32)s %%n, 2
ret %(i32)s %%n2
}''' % vars()
#
lldeadcode = '''%(define)s %(i32)s %(globalprefix)sdeadcode(%(i32)s %%n) {
Test:
%%cond = %(icmp)seq %(i32)s %%n, %%n
br %(i1)s %%cond, label %%IfEqual, label %%IfUnequal
IfEqual:
%%n2 = mul %(i32)s %%n, 2
ret %(i32)s %%n2
IfUnequal:
ret %(i32)s -1
}''' % vars()
#
llfuncA = '''%(define)s %(i32)s %(globalprefix)sfunc(%(i32)s %%n) {
%%n2 = add %(i32)s %%n, %%n
ret %(i32)s %%n2
}''' % vars()
llfuncB = '''%(define)s %(i32)s %(globalprefix)sfunc(%(i32)s %%n) {
%%n2 = mul %(i32)s %%n, %%n
ret %(i32)s %%n2
}''' % vars()
#
llacross1 = '''declare %(i32)s %(globalprefix)sacross2(%(i32)s)
implementation
%(define)s %(i32)s %(globalprefix)sacross1(%(i32)s %%n) {
%%n2 = mul %(i32)s %%n, 3
ret %(i32)s %%n2
}
%(define)s %(i32)s %(globalprefix)sacross1to2(%(i32)s %%n) {
%%n2 = add %(i32)s %%n, 5
%%n3 = call %(i32)s %(globalprefix)sacross2(%(i32)s %%n2)
ret %(i32)s %%n3
}''' % vars()
llacross2 = '''declare %(i32)s %(globalprefix)sacross1(%(i32)s %%dsf)
implementation
%(define)s %(i32)s %(globalprefix)sacross2(%(i32)s %%n) {
%%n2 = mul %(i32)s %%n, 7
ret %(i32)s %%n2
}
%(define)s %(i32)s %(globalprefix)sacross2to1(%(i32)s %%n) {
%%n2 = add %(i32)s %%n, 9
%%n3 = call %(i32)s %(globalprefix)sacross1(%(i32)s %%n2)
ret %(i32)s %%n3
}''' % vars()
#
llglobalmul4 = '''%(globalprefix)smy_global_data = external global %(i32)s
implementation
%(define)s %(i32)s %(globalprefix)sglobalmul4(%(i32)s %%a) {
%%v0 = load %(i32)s* %(globalprefix)smy_global_data
%%v1 = mul %(i32)s %%v0, 4
%%v2 = add %(i32)s %%v1, %%a
store %(i32)s %%v2, %(i32)s* %(globalprefix)smy_global_data
ret %(i32)s %%v2
}''' % vars()
#
llcall_global_function = '''declare %(i32)s %(globalprefix)smy_global_function(%(i32)s, %(i32)s, %(i32)s)
implementation
%(define)s %(i32)s %(globalprefix)scall_global_function(%(i32)s %%n) {
%%v = call %(i32)s %(globalprefix)smy_global_function(%(i32)s 3, %(i32)s %%n, %(i32)s 7) ;note: maybe tail call?
ret %(i32)s %%v
}''' % vars()
#helpers
def execute(llsource, function_name, param):
assert llvmjit.parse(llsource)
function = llvmjit.getNamedFunction(function_name)
assert function
return llvmjit.execute(function, param)
#tests...
def test_restart():
for i in range(3):
llvmjit.restart()
assert not llvmjit.getNamedFunction('square')
assert llvmjit.parse(llsquare)
assert llvmjit.getNamedFunction('square')
def test_getNamedFunction():
for i in range(3):
llvmjit.restart()
assert not llvmjit.getNamedFunction('square')
assert not llvmjit.getNamedFunction('square')
assert llvmjit.parse(llsquare)
assert llvmjit.getNamedFunction('square')
assert llvmjit.getNamedFunction('square')
def test_parse():
llvmjit.restart()
assert llvmjit.parse(llsquare)
def test_execute():
llvmjit.restart()
assert execute(llsquare, 'square', 4) == 4 * 4
def test_execute_nothing():
llvmjit.restart()
assert llvmjit.execute(None, 4) == -1 #-1 == no function supplied
def test_execute_multiple():
llvmjit.restart()
llvmjit.parse(llsquare)
llvmjit.parse(llmul2)
square = llvmjit.getNamedFunction('square')
mul2 = llvmjit.getNamedFunction('mul2')
for i in range(5):
assert llvmjit.execute(square, i) == i * i
assert llvmjit.execute(mul2 , i) == i * 2
def test_execute_across_module():
def my_across1(n):
return n * 3
def my_across1to2(n):
return my_across2(n + 5)
def my_across2(n):
return n * 7
def my_across2to1(n):
return my_across1(n + 9)
llvmjit.restart()
llvmjit.parse(llacross1)
llvmjit.parse(llacross2)
across1to2 = llvmjit.getNamedFunction('across1to2')
across2to1 = llvmjit.getNamedFunction('across2to1')
for i in range(5):
assert llvmjit.execute(across1to2, i) == my_across1to2(i)
assert llvmjit.execute(across2to1, i) == my_across2to1(i)
def test_recompile():
py.test.skip("recompile new function implementation test is work in progress")
def funcA(n):
return n + n
def funcB(n):
return n * n
llvmjit.restart()
llvmjit.parse(llfuncA)
_llfuncA = llvmjit.getNamedFunction('func')
print '_llfuncA', _llfuncA
for i in range(5):
assert llvmjit.execute(_llfuncA, i) == funcA(i)
llvmjit.freeMachineCodeForFunction(_llfuncA)
llvmjit.parse(llfuncB)
_llfuncB = llvmjit.getNamedFunction('func')
print '_llfuncB', _llfuncB
llvmjit.recompile(_llfuncB) #note: because %func has changed because of the 2nd parse
for i in range(5):
assert llvmjit.execute(_llfuncB, i) == funcB(i)
def test_transform(): #XXX This uses Module transforms, think about Function transforms too.
llvmjit.restart()
llvmjit.parse(lldeadcode)
deadcode = llvmjit.getNamedFunction('deadcode')
assert llvmjit.execute(deadcode, 10) == 10 * 2
assert llvmjit.transform(3) #optlevel = [0123]
assert llvmjit.execute(deadcode, 20) == 20 * 2
def test_modify_global_data():
llvmjit.restart()
llvmjit.set_global_data(10)
assert llvmjit.get_global_data() == 10
gp_data = llvmjit.get_pointer_to_global_data()
llvmjit.parse(llglobalmul4)
p = llvmjit.getNamedGlobal('my_global_data...')
assert not p
p = llvmjit.getNamedGlobal('my_global_data')
assert p
llvmjit.addGlobalMapping(p, gp_data) #note: should be prior to execute()
globalmul4 = llvmjit.getNamedFunction('globalmul4')
assert llvmjit.execute(globalmul4, 5) == 10 * 4 + 5
assert llvmjit.get_global_data() == 10 * 4 + 5
def test_call_global_function(): #used by PyPy JIT for adding case(s) to a flexswitch
llvmjit.restart()
gp_function = llvmjit.get_pointer_to_global_function()
llvmjit.parse(llcall_global_function)
p = llvmjit.getNamedFunction('my_global_function...')
assert not p
p = llvmjit.getNamedFunction('my_global_function')
assert p
llvmjit.addGlobalMapping(p, gp_function) #prior to execute()!
call_global_function = llvmjit.getNamedFunction('call_global_function')
assert llvmjit.execute(call_global_function, 5) == 3 + 5 + 7
def DONTtest_functions_with_different_signatures():
pass
def DONTtest_layers_of_codegenerators(): #e.g. i386 code until function stabilizes then llvm
pass
def test_execute_translation(): #put this one last because it takes the most time
skip_unsupported_platform()
llvmjit.restart()
def f(x):
return execute(llsquare, 'square', x + 5)
fn = compile(f, [int])
res = fn(1)
assert res == 36
| 30.650602 | 159 | 0.654088 |
bbdbc09ecc4a230821e3a1c0fd6cde72247a6776 | 31,127 | py | Python | tests/sensors/test_external_task_sensor.py | rliuamzn/airflow | 177dfbd12a42a5c229640c6c830f43f280ea5caa | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 3 | 2021-07-30T17:29:38.000Z | 2022-03-06T08:44:23.000Z | tests/sensors/test_external_task_sensor.py | rliuamzn/airflow | 177dfbd12a42a5c229640c6c830f43f280ea5caa | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 22 | 2020-12-13T07:33:35.000Z | 2022-02-27T17:55:01.000Z | tests/sensors/test_external_task_sensor.py | rliuamzn/airflow | 177dfbd12a42a5c229640c6c830f43f280ea5caa | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2021-09-18T17:04:21.000Z | 2021-09-18T17:04:21.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from datetime import time, timedelta
import pytest
from airflow import exceptions, settings
from airflow.exceptions import AirflowException, AirflowSensorTimeout
from airflow.models import DagBag, TaskInstance
from airflow.models.dag import DAG
from airflow.operators.bash import BashOperator
from airflow.operators.dummy import DummyOperator
from airflow.sensors.external_task import ExternalTaskMarker, ExternalTaskSensor
from airflow.sensors.time_sensor import TimeSensor
from airflow.serialization.serialized_objects import SerializedBaseOperator
from airflow.utils.state import State
from airflow.utils.timezone import datetime
from tests.test_utils.db import clear_db_runs
DEFAULT_DATE = datetime(2015, 1, 1)
TEST_DAG_ID = 'unit_test_dag'
TEST_TASK_ID = 'time_sensor_check'
DEV_NULL = '/dev/null'
@pytest.fixture(autouse=True)
def clean_db():
clear_db_runs()
class TestExternalTaskSensor(unittest.TestCase):
def setUp(self):
self.dagbag = DagBag(dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
def test_time_sensor(self):
op = TimeSensor(task_id=TEST_TASK_ID, target_time=time(0), dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor(self):
self.test_time_sensor()
op = ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_catch_overlap_allowed_failed_state(self):
with pytest.raises(AirflowException):
ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
allowed_states=[State.SUCCESS],
failed_states=[State.SUCCESS],
dag=self.dag,
)
def test_external_task_sensor_wrong_failed_states(self):
with pytest.raises(ValueError):
ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
failed_states=["invalid_state"],
dag=self.dag,
)
def test_external_task_sensor_failed_states(self):
self.test_time_sensor()
op = ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
failed_states=["failed"],
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_failed_states_as_success(self):
self.test_time_sensor()
op = ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
allowed_states=["failed"],
failed_states=["success"],
dag=self.dag,
)
with pytest.raises(AirflowException) as ctx:
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
assert str(ctx.value) == "The external task " "time_sensor_check in DAG " "unit_test_dag failed."
def test_external_dag_sensor(self):
other_dag = DAG('other_dag', default_args=self.args, end_date=DEFAULT_DATE, schedule_interval='@once')
other_dag.create_dagrun(
run_id='test', start_date=DEFAULT_DATE, execution_date=DEFAULT_DATE, state=State.SUCCESS
)
op = ExternalTaskSensor(
task_id='test_external_dag_sensor_check',
external_dag_id='other_dag',
external_task_id=None,
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_templated_sensor(self):
with self.dag:
sensor = ExternalTaskSensor(
task_id='templated_task', external_dag_id='dag_{{ ds }}', external_task_id='task_{{ ds }}'
)
instance = TaskInstance(sensor, DEFAULT_DATE)
instance.render_templates()
assert sensor.external_dag_id == f"dag_{DEFAULT_DATE.date()}"
assert sensor.external_task_id == f"task_{DEFAULT_DATE.date()}"
def test_external_task_sensor_fn_multiple_execution_dates(self):
bash_command_code = """
{% set s=execution_date.time().second %}
echo "second is {{ s }}"
if [[ $(( {{ s }} % 60 )) == 1 ]]
then
exit 1
fi
exit 0
"""
dag_external_id = TEST_DAG_ID + '_external'
dag_external = DAG(dag_external_id, default_args=self.args, schedule_interval=timedelta(seconds=1))
task_external_with_failure = BashOperator(
task_id="task_external_with_failure", bash_command=bash_command_code, retries=0, dag=dag_external
)
task_external_without_failure = DummyOperator(
task_id="task_external_without_failure", retries=0, dag=dag_external
)
task_external_without_failure.run(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE + timedelta(seconds=1), ignore_ti_state=True
)
session = settings.Session()
TI = TaskInstance
try:
task_external_with_failure.run(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE + timedelta(seconds=1), ignore_ti_state=True
)
# The test_with_failure task is excepted to fail
# once per minute (the run on the first second of
# each minute).
except Exception as e:
failed_tis = (
session.query(TI)
.filter(
TI.dag_id == dag_external_id,
TI.state == State.FAILED,
TI.execution_date == DEFAULT_DATE + timedelta(seconds=1),
)
.all()
)
if len(failed_tis) == 1 and failed_tis[0].task_id == 'task_external_with_failure':
pass
else:
raise e
dag_id = TEST_DAG_ID
dag = DAG(dag_id, default_args=self.args, schedule_interval=timedelta(minutes=1))
task_without_failure = ExternalTaskSensor(
task_id='task_without_failure',
external_dag_id=dag_external_id,
external_task_id='task_external_without_failure',
execution_date_fn=lambda dt: [dt + timedelta(seconds=i) for i in range(2)],
allowed_states=['success'],
retries=0,
timeout=1,
poke_interval=1,
dag=dag,
)
task_with_failure = ExternalTaskSensor(
task_id='task_with_failure',
external_dag_id=dag_external_id,
external_task_id='task_external_with_failure',
execution_date_fn=lambda dt: [dt + timedelta(seconds=i) for i in range(2)],
allowed_states=['success'],
retries=0,
timeout=1,
poke_interval=1,
dag=dag,
)
task_without_failure.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
with pytest.raises(AirflowSensorTimeout):
task_with_failure.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_delta(self):
self.test_time_sensor()
op = ExternalTaskSensor(
task_id='test_external_task_sensor_check_delta',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_delta=timedelta(0),
allowed_states=['success'],
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_fn(self):
self.test_time_sensor()
# check that the execution_fn works
op1 = ExternalTaskSensor(
task_id='test_external_task_sensor_check_delta_1',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_date_fn=lambda dt: dt + timedelta(0),
allowed_states=['success'],
dag=self.dag,
)
op1.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
# double check that the execution is being called by failing the test
op2 = ExternalTaskSensor(
task_id='test_external_task_sensor_check_delta_2',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_date_fn=lambda dt: dt + timedelta(days=1),
allowed_states=['success'],
timeout=1,
poke_interval=1,
dag=self.dag,
)
with pytest.raises(exceptions.AirflowSensorTimeout):
op2.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_fn_multiple_args(self):
"""Check this task sensor passes multiple args with full context. If no failure, means clean run."""
self.test_time_sensor()
def my_func(dt, context):
assert context['execution_date'] == dt
return dt + timedelta(0)
op1 = ExternalTaskSensor(
task_id='test_external_task_sensor_multiple_arg_fn',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_date_fn=my_func,
allowed_states=['success'],
dag=self.dag,
)
op1.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_fn_kwargs(self):
"""Check this task sensor passes multiple args with full context. If no failure, means clean run."""
self.test_time_sensor()
def my_func(dt, ds_nodash, tomorrow_ds_nodash):
assert ds_nodash == dt.strftime("%Y%m%d")
assert tomorrow_ds_nodash == (dt + timedelta(days=1)).strftime("%Y%m%d")
return dt + timedelta(0)
op1 = ExternalTaskSensor(
task_id='test_external_task_sensor_fn_kwargs',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_date_fn=my_func,
allowed_states=['success'],
dag=self.dag,
)
op1.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_error_delta_and_fn(self):
self.test_time_sensor()
# Test that providing execution_delta and a function raises an error
with pytest.raises(ValueError):
ExternalTaskSensor(
task_id='test_external_task_sensor_check_delta',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_delta=timedelta(0),
execution_date_fn=lambda dt: dt,
allowed_states=['success'],
dag=self.dag,
)
def test_catch_invalid_allowed_states(self):
with pytest.raises(ValueError):
ExternalTaskSensor(
task_id='test_external_task_sensor_check_1',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
allowed_states=['invalid_state'],
dag=self.dag,
)
with pytest.raises(ValueError):
ExternalTaskSensor(
task_id='test_external_task_sensor_check_2',
external_dag_id=TEST_DAG_ID,
external_task_id=None,
allowed_states=['invalid_state'],
dag=self.dag,
)
def test_external_task_sensor_waits_for_task_check_existence(self):
op = ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id="example_bash_operator",
external_task_id="non-existing-task",
check_existence=True,
dag=self.dag,
)
with pytest.raises(AirflowException):
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_waits_for_dag_check_existence(self):
op = ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id="non-existing-dag",
external_task_id=None,
check_existence=True,
dag=self.dag,
)
with pytest.raises(AirflowException):
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
class TestExternalTaskMarker(unittest.TestCase):
def test_serialized_fields(self):
assert {"recursion_depth"}.issubset(ExternalTaskMarker.get_serialized_fields())
def test_serialized_external_task_marker(self):
dag = DAG('test_serialized_external_task_marker', start_date=DEFAULT_DATE)
task = ExternalTaskMarker(
task_id="parent_task",
external_dag_id="external_task_marker_child",
external_task_id="child_task1",
dag=dag,
)
serialized_op = SerializedBaseOperator.serialize_operator(task)
deserialized_op = SerializedBaseOperator.deserialize_operator(serialized_op)
assert deserialized_op.task_type == 'ExternalTaskMarker'
assert getattr(deserialized_op, 'external_dag_id') == 'external_task_marker_child'
assert getattr(deserialized_op, 'external_task_id') == 'child_task1'
@pytest.fixture
def dag_bag_ext():
"""
Create a DagBag with DAGs looking like this. The dotted lines represent external dependencies
set up using ExternalTaskMarker and ExternalTaskSensor.
dag_0: task_a_0 >> task_b_0
|
|
dag_1: ---> task_a_1 >> task_b_1
|
|
dag_2: ---> task_a_2 >> task_b_2
|
|
dag_3: ---> task_a_3 >> task_b_3
"""
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
dag_0 = DAG("dag_0", start_date=DEFAULT_DATE, schedule_interval=None)
task_a_0 = DummyOperator(task_id="task_a_0", dag=dag_0)
task_b_0 = ExternalTaskMarker(
task_id="task_b_0", external_dag_id="dag_1", external_task_id="task_a_1", recursion_depth=3, dag=dag_0
)
task_a_0 >> task_b_0
dag_1 = DAG("dag_1", start_date=DEFAULT_DATE, schedule_interval=None)
task_a_1 = ExternalTaskSensor(
task_id="task_a_1", external_dag_id=dag_0.dag_id, external_task_id=task_b_0.task_id, dag=dag_1
)
task_b_1 = ExternalTaskMarker(
task_id="task_b_1", external_dag_id="dag_2", external_task_id="task_a_2", recursion_depth=2, dag=dag_1
)
task_a_1 >> task_b_1
dag_2 = DAG("dag_2", start_date=DEFAULT_DATE, schedule_interval=None)
task_a_2 = ExternalTaskSensor(
task_id="task_a_2", external_dag_id=dag_1.dag_id, external_task_id=task_b_1.task_id, dag=dag_2
)
task_b_2 = ExternalTaskMarker(
task_id="task_b_2", external_dag_id="dag_3", external_task_id="task_a_3", recursion_depth=1, dag=dag_2
)
task_a_2 >> task_b_2
dag_3 = DAG("dag_3", start_date=DEFAULT_DATE, schedule_interval=None)
task_a_3 = ExternalTaskSensor(
task_id="task_a_3", external_dag_id=dag_2.dag_id, external_task_id=task_b_2.task_id, dag=dag_3
)
task_b_3 = DummyOperator(task_id="task_b_3", dag=dag_3)
task_a_3 >> task_b_3
for dag in [dag_0, dag_1, dag_2, dag_3]:
dag_bag.bag_dag(dag=dag, root_dag=dag)
return dag_bag
@pytest.fixture
def dag_bag_parent_child():
"""
Create a DagBag with two DAGs looking like this. task_1 of child_dag_1 on day 1 depends on
task_0 of parent_dag_0 on day 1. Therefore, when task_0 of parent_dag_0 on day 1 and day 2
are cleared, parent_dag_0 DagRuns need to be set to running on both days, but child_dag_1
only needs to be set to running on day 1.
day 1 day 2
parent_dag_0 task_0 task_0
|
|
v
child_dag_1 task_1 task_1
"""
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
day_1 = DEFAULT_DATE
dag_0 = DAG("parent_dag_0", start_date=day_1, schedule_interval=None)
task_0 = ExternalTaskMarker(
task_id="task_0",
external_dag_id="child_dag_1",
external_task_id="task_1",
execution_date=day_1.isoformat(),
recursion_depth=3,
dag=dag_0,
)
dag_1 = DAG("child_dag_1", start_date=day_1, schedule_interval=None)
_ = ExternalTaskSensor(
task_id="task_1",
external_dag_id=dag_0.dag_id,
external_task_id=task_0.task_id,
execution_date_fn=lambda execution_date: day_1 if execution_date == day_1 else [],
mode='reschedule',
dag=dag_1,
)
for dag in [dag_0, dag_1]:
dag_bag.bag_dag(dag=dag, root_dag=dag)
return dag_bag
def run_tasks(dag_bag, execution_date=DEFAULT_DATE):
"""
Run all tasks in the DAGs in the given dag_bag. Return the TaskInstance objects as a dict
keyed by task_id.
"""
tis = {}
for dag in dag_bag.dags.values():
for task in dag.tasks:
ti = TaskInstance(task=task, execution_date=execution_date)
tis[task.task_id] = ti
ti.run()
assert_ti_state_equal(ti, State.SUCCESS)
return tis
def assert_ti_state_equal(task_instance, state):
"""
Assert state of task_instances equals the given state.
"""
task_instance.refresh_from_db()
assert task_instance.state == state
def clear_tasks(dag_bag, dag, task, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, dry_run=False):
"""
Clear the task and its downstream tasks recursively for the dag in the given dagbag.
"""
partial: DAG = dag.partial_subset(task_ids_or_regex=[task.task_id], include_downstream=True)
return partial.clear(start_date=start_date, end_date=end_date, dag_bag=dag_bag, dry_run=dry_run)
def test_external_task_marker_transitive(dag_bag_ext):
"""
Test clearing tasks across DAGs.
"""
tis = run_tasks(dag_bag_ext)
dag_0 = dag_bag_ext.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
clear_tasks(dag_bag_ext, dag_0, task_a_0)
ti_a_0 = tis["task_a_0"]
ti_b_3 = tis["task_b_3"]
assert_ti_state_equal(ti_a_0, State.NONE)
assert_ti_state_equal(ti_b_3, State.NONE)
def test_external_task_marker_clear_activate(dag_bag_parent_child):
"""
Test clearing tasks across DAGs and make sure the right DagRuns are activated.
"""
from airflow.utils.session import create_session
from airflow.utils.types import DagRunType
dag_bag = dag_bag_parent_child
day_1 = DEFAULT_DATE
day_2 = DEFAULT_DATE + timedelta(days=1)
run_tasks(dag_bag, execution_date=day_1)
run_tasks(dag_bag, execution_date=day_2)
with create_session() as session:
for dag in dag_bag.dags.values():
for execution_date in [day_1, day_2]:
dagrun = dag.create_dagrun(
State.RUNNING, execution_date, run_type=DagRunType.MANUAL, session=session
)
dagrun.set_state(State.SUCCESS)
session.add(dagrun)
session.commit()
# Assert that dagruns of all the affected dags are set to SUCCESS before tasks are cleared.
for dag in dag_bag.dags.values():
for execution_date in [day_1, day_2]:
dagrun = dag.get_dagrun(execution_date=execution_date)
assert dagrun.state == State.SUCCESS
dag_0 = dag_bag.get_dag("parent_dag_0")
task_0 = dag_0.get_task("task_0")
clear_tasks(dag_bag, dag_0, task_0, start_date=day_1, end_date=day_2)
# Assert that dagruns of all the affected dags are set to QUEUED after tasks are cleared.
# Unaffected dagruns should be left as SUCCESS.
dagrun_0_1 = dag_bag.get_dag('parent_dag_0').get_dagrun(execution_date=day_1)
dagrun_0_2 = dag_bag.get_dag('parent_dag_0').get_dagrun(execution_date=day_2)
dagrun_1_1 = dag_bag.get_dag('child_dag_1').get_dagrun(execution_date=day_1)
dagrun_1_2 = dag_bag.get_dag('child_dag_1').get_dagrun(execution_date=day_2)
assert dagrun_0_1.state == State.QUEUED
assert dagrun_0_2.state == State.QUEUED
assert dagrun_1_1.state == State.QUEUED
assert dagrun_1_2.state == State.SUCCESS
def test_external_task_marker_future(dag_bag_ext):
"""
Test clearing tasks with no end_date. This is the case when users clear tasks with
Future, Downstream and Recursive selected.
"""
date_0 = DEFAULT_DATE
date_1 = DEFAULT_DATE + timedelta(days=1)
tis_date_0 = run_tasks(dag_bag_ext, execution_date=date_0)
tis_date_1 = run_tasks(dag_bag_ext, execution_date=date_1)
dag_0 = dag_bag_ext.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
# This should clear all tasks on dag_0 to dag_3 on both date_0 and date_1
clear_tasks(dag_bag_ext, dag_0, task_a_0, end_date=None)
ti_a_0_date_0 = tis_date_0["task_a_0"]
ti_b_3_date_0 = tis_date_0["task_b_3"]
ti_b_3_date_1 = tis_date_1["task_b_3"]
assert_ti_state_equal(ti_a_0_date_0, State.NONE)
assert_ti_state_equal(ti_b_3_date_0, State.NONE)
assert_ti_state_equal(ti_b_3_date_1, State.NONE)
def test_external_task_marker_exception(dag_bag_ext):
"""
Clearing across multiple DAGs should raise AirflowException if more levels are being cleared
than allowed by the recursion_depth of the first ExternalTaskMarker being cleared.
"""
run_tasks(dag_bag_ext)
dag_0 = dag_bag_ext.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
task_b_0 = dag_0.get_task("task_b_0")
task_b_0.recursion_depth = 2
with pytest.raises(AirflowException, match="Maximum recursion depth 2"):
clear_tasks(dag_bag_ext, dag_0, task_a_0)
@pytest.fixture
def dag_bag_cyclic():
"""
Create a DagBag with DAGs having cyclic dependencies set up by ExternalTaskMarker and
ExternalTaskSensor.
dag_0: task_a_0 >> task_b_0
^ |
| |
dag_1: | ---> task_a_1 >> task_b_1
| ^
| |
dag_n: | ---> task_a_n >> task_b_n
| |
-----------------------------------------------------
"""
def _factory(depth: int) -> DagBag:
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
dags = []
with DAG("dag_0", start_date=DEFAULT_DATE, schedule_interval=None) as dag:
dags.append(dag)
task_a_0 = DummyOperator(task_id="task_a_0")
task_b_0 = ExternalTaskMarker(
task_id="task_b_0", external_dag_id="dag_1", external_task_id="task_a_1", recursion_depth=3
)
task_a_0 >> task_b_0
for n in range(1, depth):
with DAG(f"dag_{n}", start_date=DEFAULT_DATE, schedule_interval=None) as dag:
dags.append(dag)
task_a = ExternalTaskSensor(
task_id=f"task_a_{n}",
external_dag_id=f"dag_{n-1}",
external_task_id=f"task_b_{n-1}",
)
task_b = ExternalTaskMarker(
task_id=f"task_b_{n}",
external_dag_id=f"dag_{n+1}",
external_task_id=f"task_a_{n+1}",
recursion_depth=3,
)
task_a >> task_b
# Create the last dag wich loops back
with DAG(f"dag_{depth}", start_date=DEFAULT_DATE, schedule_interval=None) as dag:
dags.append(dag)
task_a = ExternalTaskSensor(
task_id=f"task_a_{depth}",
external_dag_id=f"dag_{depth-1}",
external_task_id=f"task_b_{depth-1}",
)
task_b = ExternalTaskMarker(
task_id=f"task_b_{depth}",
external_dag_id="dag_0",
external_task_id="task_a_0",
recursion_depth=2,
)
task_a >> task_b
for dag in dags:
dag_bag.bag_dag(dag=dag, root_dag=dag)
return dag_bag
return _factory
def test_external_task_marker_cyclic_deep(dag_bag_cyclic):
"""
Tests clearing across multiple DAGs that have cyclic dependencies. AirflowException should be
raised.
"""
dag_bag = dag_bag_cyclic(10)
run_tasks(dag_bag)
dag_0 = dag_bag.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
with pytest.raises(AirflowException, match="Maximum recursion depth 3"):
clear_tasks(dag_bag, dag_0, task_a_0)
def test_external_task_marker_cyclic_shallow(dag_bag_cyclic):
"""
Tests clearing across multiple DAGs that have cyclic dependencies shallower
than recursion_depth
"""
dag_bag = dag_bag_cyclic(2)
run_tasks(dag_bag)
dag_0 = dag_bag.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
tis = clear_tasks(dag_bag, dag_0, task_a_0, dry_run=True)
assert [
("dag_0", "task_a_0"),
("dag_0", "task_b_0"),
("dag_1", "task_a_1"),
("dag_1", "task_b_1"),
("dag_2", "task_a_2"),
("dag_2", "task_b_2"),
] == sorted((ti.dag_id, ti.task_id) for ti in tis)
@pytest.fixture
def dag_bag_multiple():
"""
Create a DagBag containing two DAGs, linked by multiple ExternalTaskMarker.
"""
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
daily_dag = DAG("daily_dag", start_date=DEFAULT_DATE, schedule_interval="@daily")
agg_dag = DAG("agg_dag", start_date=DEFAULT_DATE, schedule_interval="@daily")
dag_bag.bag_dag(dag=daily_dag, root_dag=daily_dag)
dag_bag.bag_dag(dag=agg_dag, root_dag=agg_dag)
daily_task = DummyOperator(task_id="daily_tas", dag=daily_dag)
start = DummyOperator(task_id="start", dag=agg_dag)
for i in range(25):
task = ExternalTaskMarker(
task_id=f"{daily_task.task_id}_{i}",
external_dag_id=daily_dag.dag_id,
external_task_id=daily_task.task_id,
execution_date="{{ macros.ds_add(ds, -1 * %s) }}" % i,
dag=agg_dag,
)
start >> task
yield dag_bag
@pytest.mark.quarantined
@pytest.mark.backend("postgres", "mysql")
def test_clear_multiple_external_task_marker(dag_bag_multiple):
"""
Test clearing a dag that has multiple ExternalTaskMarker.
sqlite3 parser stack size is 100 lexical items by default so this puts a hard limit on
the level of nesting in the sql. This test is intentionally skipped in sqlite.
"""
agg_dag = dag_bag_multiple.get_dag("agg_dag")
for delta in range(len(agg_dag.tasks)):
execution_date = DEFAULT_DATE + timedelta(days=delta)
run_tasks(dag_bag_multiple, execution_date=execution_date)
# There used to be some slowness caused by calling count() inside DAG.clear().
# That has since been fixed. It should take no more than a few seconds to call
# dag.clear() here.
assert agg_dag.clear(start_date=execution_date, end_date=execution_date, dag_bag=dag_bag_multiple) == 51
@pytest.fixture
def dag_bag_head_tail():
"""
Create a DagBag containing one DAG, with task "head" depending on task "tail" of the
previous execution_date.
20200501 20200502 20200510
+------+ +------+ +------+
| head | -->head | --> -->head |
| | | / | | | / / | | |
| v | / | v | / / | v |
| body | / | body | / ... / | body |
| | |/ | | |/ / | | |
| v / | v / / | v |
| tail/| | tail/| / | tail |
+------+ +------+ +------+
"""
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
with DAG("head_tail", start_date=DEFAULT_DATE, schedule_interval="@daily") as dag:
head = ExternalTaskSensor(
task_id='head',
external_dag_id=dag.dag_id,
external_task_id="tail",
execution_delta=timedelta(days=1),
mode="reschedule",
)
body = DummyOperator(task_id="body")
tail = ExternalTaskMarker(
task_id="tail",
external_dag_id=dag.dag_id,
external_task_id=head.task_id,
execution_date="{{ tomorrow_ds_nodash }}",
)
head >> body >> tail
dag_bag.bag_dag(dag=dag, root_dag=dag)
yield dag_bag
def test_clear_overlapping_external_task_marker(dag_bag_head_tail):
dag = dag_bag_head_tail.get_dag("head_tail")
# Mark first head task success.
first = TaskInstance(task=dag.get_task("head"), execution_date=DEFAULT_DATE)
first.run(mark_success=True)
for delta in range(10):
execution_date = DEFAULT_DATE + timedelta(days=delta)
run_tasks(dag_bag_head_tail, execution_date=execution_date)
# The next two lines are doing the same thing. Clearing the first "head" with "Future"
# selected is the same as not selecting "Future". They should take similar amount of
# time too because dag.clear() uses visited_external_tis to keep track of visited ExternalTaskMarker.
assert dag.clear(start_date=DEFAULT_DATE, dag_bag=dag_bag_head_tail) == 30
assert dag.clear(start_date=DEFAULT_DATE, end_date=execution_date, dag_bag=dag_bag_head_tail) == 30
| 38.239558 | 110 | 0.640055 |
f48113bb1d01455fca0863eb34cc0bbe71da72f4 | 16,277 | py | Python | fibo/wallet/wallet_transaction_store.py | Fibo-Network/fibo-blockchain | 34471efc081a52443e874749bb8ea3dc50b59891 | [
"Apache-2.0"
] | null | null | null | fibo/wallet/wallet_transaction_store.py | Fibo-Network/fibo-blockchain | 34471efc081a52443e874749bb8ea3dc50b59891 | [
"Apache-2.0"
] | null | null | null | fibo/wallet/wallet_transaction_store.py | Fibo-Network/fibo-blockchain | 34471efc081a52443e874749bb8ea3dc50b59891 | [
"Apache-2.0"
] | null | null | null | import time
from typing import Dict, List, Optional, Tuple
import aiosqlite
from fibo.types.blockchain_format.sized_bytes import bytes32
from fibo.types.mempool_inclusion_status import MempoolInclusionStatus
from fibo.util.db_wrapper import DBWrapper
from fibo.util.errors import Err
from fibo.util.ints import uint8, uint32
from fibo.wallet.transaction_record import TransactionRecord
from fibo.wallet.util.transaction_type import TransactionType
class WalletTransactionStore:
"""
WalletTransactionStore stores transaction history for the wallet.
"""
db_connection: aiosqlite.Connection
db_wrapper: DBWrapper
tx_record_cache: Dict[bytes32, TransactionRecord]
tx_submitted: Dict[bytes32, Tuple[int, int]] # tx_id: [time submitted: count]
unconfirmed_for_wallet: Dict[int, Dict[bytes32, TransactionRecord]]
@classmethod
async def create(cls, db_wrapper: DBWrapper):
self = cls()
self.db_wrapper = db_wrapper
self.db_connection = self.db_wrapper.db
await self.db_connection.execute("pragma journal_mode=wal")
await self.db_connection.execute("pragma synchronous=2")
await self.db_connection.execute(
(
"CREATE TABLE IF NOT EXISTS transaction_record("
" transaction_record blob,"
" bundle_id text PRIMARY KEY," # NOTE: bundle_id is being stored as bytes, not hex
" confirmed_at_height bigint,"
" created_at_time bigint,"
" to_puzzle_hash text,"
" amount blob,"
" fee_amount blob,"
" confirmed int,"
" sent int,"
" wallet_id bigint,"
" trade_id text,"
" type int)"
)
)
# Useful for reorg lookups
await self.db_connection.execute(
"CREATE INDEX IF NOT EXISTS tx_confirmed_index on transaction_record(confirmed_at_height)"
)
await self.db_connection.execute(
"CREATE INDEX IF NOT EXISTS tx_created_index on transaction_record(created_at_time)"
)
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS tx_confirmed on transaction_record(confirmed)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS tx_sent on transaction_record(sent)")
await self.db_connection.execute(
"CREATE INDEX IF NOT EXISTS tx_created_time on transaction_record(created_at_time)"
)
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS tx_type on transaction_record(type)")
await self.db_connection.execute(
"CREATE INDEX IF NOT EXISTS tx_to_puzzle_hash on transaction_record(to_puzzle_hash)"
)
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS wallet_id on transaction_record(wallet_id)")
await self.db_connection.commit()
self.tx_record_cache = {}
self.tx_submitted = {}
self.unconfirmed_for_wallet = {}
await self.rebuild_tx_cache()
return self
async def rebuild_tx_cache(self):
# init cache here
all_records = await self.get_all_transactions()
self.tx_record_cache = {}
self.unconfirmed_for_wallet = {}
for record in all_records:
self.tx_record_cache[record.name] = record
if record.wallet_id not in self.unconfirmed_for_wallet:
self.unconfirmed_for_wallet[record.wallet_id] = {}
if not record.confirmed:
self.unconfirmed_for_wallet[record.wallet_id][record.name] = record
async def _clear_database(self):
cursor = await self.db_connection.execute("DELETE FROM transaction_record")
await cursor.close()
await self.db_connection.commit()
async def add_transaction_record(self, record: TransactionRecord, in_transaction: bool) -> None:
"""
Store TransactionRecord in DB and Cache.
"""
self.tx_record_cache[record.name] = record
if record.wallet_id not in self.unconfirmed_for_wallet:
self.unconfirmed_for_wallet[record.wallet_id] = {}
unconfirmed_dict = self.unconfirmed_for_wallet[record.wallet_id]
if record.confirmed and record.name in unconfirmed_dict:
unconfirmed_dict.pop(record.name)
if not record.confirmed:
unconfirmed_dict[record.name] = record
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"INSERT OR REPLACE INTO transaction_record VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(
bytes(record),
record.name,
record.confirmed_at_height,
record.created_at_time,
record.to_puzzle_hash.hex(),
bytes(record.amount),
bytes(record.fee_amount),
int(record.confirmed),
record.sent,
record.wallet_id,
record.trade_id,
record.type,
),
)
await cursor.close()
if not in_transaction:
await self.db_connection.commit()
except BaseException:
if not in_transaction:
await self.rebuild_tx_cache()
raise
finally:
if not in_transaction:
self.db_wrapper.lock.release()
async def set_confirmed(self, tx_id: bytes32, height: uint32):
"""
Updates transaction to be confirmed.
"""
current: Optional[TransactionRecord] = await self.get_transaction_record(tx_id)
if current is None:
return None
tx: TransactionRecord = TransactionRecord(
confirmed_at_height=height,
created_at_time=current.created_at_time,
to_puzzle_hash=current.to_puzzle_hash,
amount=current.amount,
fee_amount=current.fee_amount,
confirmed=True,
sent=current.sent,
spend_bundle=current.spend_bundle,
additions=current.additions,
removals=current.removals,
wallet_id=current.wallet_id,
sent_to=current.sent_to,
trade_id=None,
type=current.type,
name=current.name,
)
await self.add_transaction_record(tx, True)
async def increment_sent(
self,
tx_id: bytes32,
name: str,
send_status: MempoolInclusionStatus,
err: Optional[Err],
) -> bool:
"""
Updates transaction sent count (Full Node has received spend_bundle and sent ack).
"""
current: Optional[TransactionRecord] = await self.get_transaction_record(tx_id)
if current is None:
return False
sent_to = current.sent_to.copy()
current_peers = set()
err_str = err.name if err is not None else None
append_data = (name, uint8(send_status.value), err_str)
for peer_id, status, error in sent_to:
current_peers.add(peer_id)
if name in current_peers:
sent_count = uint32(current.sent)
else:
sent_count = uint32(current.sent + 1)
sent_to.append(append_data)
tx: TransactionRecord = TransactionRecord(
confirmed_at_height=current.confirmed_at_height,
created_at_time=current.created_at_time,
to_puzzle_hash=current.to_puzzle_hash,
amount=current.amount,
fee_amount=current.fee_amount,
confirmed=current.confirmed,
sent=sent_count,
spend_bundle=current.spend_bundle,
additions=current.additions,
removals=current.removals,
wallet_id=current.wallet_id,
sent_to=sent_to,
trade_id=None,
type=current.type,
name=current.name,
)
await self.add_transaction_record(tx, False)
return True
async def tx_reorged(self, record: TransactionRecord):
"""
Updates transaction sent count to 0 and resets confirmation data
"""
tx: TransactionRecord = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=record.created_at_time,
to_puzzle_hash=record.to_puzzle_hash,
amount=record.amount,
fee_amount=record.fee_amount,
confirmed=False,
sent=uint32(0),
spend_bundle=record.spend_bundle,
additions=record.additions,
removals=record.removals,
wallet_id=record.wallet_id,
sent_to=[],
trade_id=None,
type=record.type,
name=record.name,
)
await self.add_transaction_record(tx, True)
async def get_transaction_record(self, tx_id: bytes32) -> Optional[TransactionRecord]:
"""
Checks DB and cache for TransactionRecord with id: id and returns it.
"""
if tx_id in self.tx_record_cache:
return self.tx_record_cache[tx_id]
# NOTE: bundle_id is being stored as bytes, not hex
cursor = await self.db_connection.execute("SELECT * from transaction_record WHERE bundle_id=?", (tx_id,))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
record = TransactionRecord.from_bytes(row[0])
return record
return None
async def get_not_sent(self) -> List[TransactionRecord]:
"""
Returns the list of transaction that have not been received by full node yet.
"""
current_time = int(time.time())
cursor = await self.db_connection.execute(
"SELECT * from transaction_record WHERE confirmed=?",
(0,),
)
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TransactionRecord.from_bytes(row[0])
if record.name in self.tx_submitted:
time_submitted, count = self.tx_submitted[record.name]
if time_submitted < current_time - (60 * 10):
records.append(record)
self.tx_submitted[record.name] = current_time, 1
else:
if count < 5:
records.append(record)
self.tx_submitted[record.name] = time_submitted, (count + 1)
else:
records.append(record)
self.tx_submitted[record.name] = current_time, 1
return records
async def get_farming_rewards(self) -> List[TransactionRecord]:
"""
Returns the list of all farming rewards.
"""
fee_int = TransactionType.FEE_REWARD.value
pool_int = TransactionType.COINBASE_REWARD.value
cursor = await self.db_connection.execute(
"SELECT * from transaction_record WHERE confirmed=? and (type=? or type=?)", (1, fee_int, pool_int)
)
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TransactionRecord.from_bytes(row[0])
records.append(record)
return records
async def get_all_unconfirmed(self) -> List[TransactionRecord]:
"""
Returns the list of all transaction that have not yet been confirmed.
"""
cursor = await self.db_connection.execute("SELECT * from transaction_record WHERE confirmed=?", (0,))
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TransactionRecord.from_bytes(row[0])
records.append(record)
return records
async def get_unconfirmed_for_wallet(self, wallet_id: int) -> List[TransactionRecord]:
"""
Returns the list of transaction that have not yet been confirmed.
"""
if wallet_id in self.unconfirmed_for_wallet:
return list(self.unconfirmed_for_wallet[wallet_id].values())
else:
return []
async def get_transactions_between(self, wallet_id: int, start, end) -> List[TransactionRecord]:
"""Return a list of transaction between start and end index. List is in reverse chronological order.
start = 0 is most recent transaction
"""
limit = end - start
cursor = await self.db_connection.execute(
f"SELECT * from transaction_record where wallet_id=? and confirmed_at_height not in"
f" (select confirmed_at_height from transaction_record order by confirmed_at_height"
f" ASC LIMIT {start})"
f" order by confirmed_at_height DESC LIMIT {limit}",
(wallet_id,),
)
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TransactionRecord.from_bytes(row[0])
records.append(record)
records.reverse()
return records
async def get_transaction_count_for_wallet(self, wallet_id) -> int:
cursor = await self.db_connection.execute(
"SELECT COUNT(*) FROM transaction_record where wallet_id=?", (wallet_id,)
)
count_result = await cursor.fetchone()
if count_result is not None:
count = count_result[0]
else:
count = 0
await cursor.close()
return count
async def get_all_transactions_for_wallet(self, wallet_id: int, type: int = None) -> List[TransactionRecord]:
"""
Returns all stored transactions.
"""
if type is None:
cursor = await self.db_connection.execute(
"SELECT * from transaction_record where wallet_id=?", (wallet_id,)
)
else:
cursor = await self.db_connection.execute(
"SELECT * from transaction_record where wallet_id=? and type=?",
(
wallet_id,
type,
),
)
rows = await cursor.fetchall()
await cursor.close()
records = []
cache_set = set()
for row in rows:
record = TransactionRecord.from_bytes(row[0])
records.append(record)
cache_set.add(record.name)
return records
async def get_all_transactions(self) -> List[TransactionRecord]:
"""
Returns all stored transactions.
"""
cursor = await self.db_connection.execute("SELECT * from transaction_record")
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TransactionRecord.from_bytes(row[0])
records.append(record)
return records
async def get_transaction_above(self, height: int) -> List[TransactionRecord]:
# Can be -1 (get all tx)
cursor = await self.db_connection.execute(
"SELECT * from transaction_record WHERE confirmed_at_height>?", (height,)
)
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TransactionRecord.from_bytes(row[0])
records.append(record)
return records
async def rollback_to_block(self, height: int):
# Delete from storage
to_delete = []
for tx in self.tx_record_cache.values():
if tx.confirmed_at_height > height:
to_delete.append(tx)
for tx in to_delete:
self.tx_record_cache.pop(tx.name)
c1 = await self.db_connection.execute("DELETE FROM transaction_record WHERE confirmed_at_height>?", (height,))
await c1.close()
async def delete_unconfirmed_transactions(self, wallet_id: int):
cursor = await self.db_connection.execute(
"DELETE FROM transaction_record WHERE confirmed=0 AND wallet_id=?", (wallet_id,)
)
await cursor.close()
| 36.25167 | 118 | 0.605947 |
69de4afb5f510516f6b948782ac78b234c03ee18 | 15,057 | py | Python | tests/solvers/cpp/test_cpp_solvers.py | POPOGO/scikit-decide | 1128499c57aae1c989815b6fca4a29b96994dc12 | [
"MIT"
] | null | null | null | tests/solvers/cpp/test_cpp_solvers.py | POPOGO/scikit-decide | 1128499c57aae1c989815b6fca4a29b96994dc12 | [
"MIT"
] | null | null | null | tests/solvers/cpp/test_cpp_solvers.py | POPOGO/scikit-decide | 1128499c57aae1c989815b6fca4a29b96994dc12 | [
"MIT"
] | 1 | 2021-02-26T17:31:51.000Z | 2021-02-26T17:31:51.000Z | # Copyright (c) AIRBUS and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import pytest
import inspect
from enum import Enum
from typing import NamedTuple, Optional
from math import sqrt
from copy import deepcopy
from pathos.helpers import mp
from skdecide import DeterministicPlanningDomain, Value, \
Value, Space, ImplicitSpace, \
EnvironmentOutcome, TransitionOutcome, \
SingleValueDistribution
from skdecide.builders.domain import UnrestrictedActions
from skdecide.hub.space.gym import EnumSpace, MultiDiscreteSpace
from skdecide.utils import load_registered_solver
# Must be defined outside the grid_domain() fixture
# so that parallel domains can pickle it
# /!\ Is it worth defining the domain as a fixture?
class State(NamedTuple):
x: int
y: int
s: int # step => to make the domain cycle-free for algorithms like AO*
# Must be defined outside the grid_domain() fixture
# so that parallel domains can pickle it
# /!\ Is it worth defining the domain as a fixture?
class Action(Enum):
up = 0
down = 1
left = 2
right = 3
class D(DeterministicPlanningDomain, UnrestrictedActions):
T_state = State # Type of states
T_observation = T_state # Type of observations
T_event = Action # Type of events
T_value = float # Type of transition values (rewards or costs)
T_predicate = bool # Type of logical checks
T_info = None # Type of additional information given as part of an environment outcome
class GridDomain(D):
def __init__(self, num_cols=10, num_rows=10):
self.num_cols = num_cols
self.num_rows = num_rows
def _get_next_state(self, memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]]) -> D.T_state:
if action == Action.left:
next_state = State(max(memory.x - 1, 0), memory.y, memory.s + 1)
if action == Action.right:
next_state = State(min(memory.x + 1, self.num_cols - 1), memory.y, memory.s + 1)
if action == Action.up:
next_state = State(memory.x, max(memory.y - 1, 0), memory.s + 1)
if action == Action.down:
next_state = State(memory.x, min(memory.y + 1, self.num_rows - 1), memory.s + 1)
return next_state
def _get_transition_value(self, memory: D.T_memory[D.T_state], action: D.T_agent[D.T_concurrency[D.T_event]],
next_state: Optional[D.T_state] = None) -> D.T_agent[Value[D.T_value]]:
if next_state.x == memory.x and next_state.y == memory.y:
cost = 2 # big penalty when hitting a wall
else:
cost = abs(next_state.x - memory.x) + abs(next_state.y - memory.y) # every move costs 1
return Value(cost=cost)
def _is_terminal(self, state: D.T_state) -> D.T_agent[D.T_predicate]:
return self._is_goal(state) or state.s >= 100
def _get_action_space_(self) -> D.T_agent[Space[D.T_event]]:
return EnumSpace(Action)
def _get_goals_(self) -> D.T_agent[Space[D.T_observation]]:
return ImplicitSpace(lambda state: state.x == (self.num_cols - 1) and state.y == (self.num_rows - 1))
def _get_initial_state_(self) -> D.T_state:
return State(x=0, y=0, s=0)
def _get_observation_space_(self) -> D.T_agent[Space[D.T_observation]]:
return MultiDiscreteSpace([self.num_cols, self.num_rows, 100])
# FIXTURES
@pytest.fixture(params=[{'entry': 'Astar',
'config': {'heuristic': lambda d, s: Value(cost=sqrt((d.num_cols - 1 - s.x)**2 + (d.num_rows - 1 - s.y)**2)),
'debug_logs': False},
'optimal': True},
{'entry': 'AOstar',
'config': {'heuristic': lambda d, s: Value(cost=sqrt((d.num_cols - 1 - s.x)**2 + (d.num_rows - 1 - s.y)**2)),
'debug_logs': False},
'optimal': True},
{'entry': 'BFWS',
'config': {'state_features': lambda d, s: (s.x, s.y),
'heuristic': lambda d, s: Value(cost=sqrt((d.num_cols - 1 - s.x)**2 + (d.num_rows - 1 - s.y)**2)),
'termination_checker': lambda d, s: d.is_goal(s),
'debug_logs': False},
'optimal': True},
{'entry': 'IW',
'config': {'state_features': lambda d, s: (s.x, s.y),
'debug_logs': False},
'optimal': True},
{'entry': 'RIW',
'config': {'state_features': lambda d, s: (s.x, s.y),
'time_budget': 20,
'rollout_budget': 10,
'max_depth': 10,
'exploration': 0.25,
'use_simulation_domain': True,
'online_node_garbage': True,
'continuous_planning': True,
'debug_logs': False},
'optimal': False},
{'entry': 'UCT',
'config': {'time_budget': 20,
'rollout_budget': 10,
'max_depth': 10,
'continuous_planning': True,
'debug_logs': False},
'optimal': False},
{'entry': 'LRTDP',
'config': {'heuristic': lambda d, s: Value(cost=sqrt((d.num_cols - 1 - s.x)**2 + (d.num_rows - 1 - s.y)**2)),
'use_labels': True,
'time_budget': 60000,
'rollout_budget': 10000,
'max_depth': 500,
'discount': 1.0,
'epsilon': 0.001,
'online_node_garbage': True,
'continuous_planning': False,
'debug_logs': False},
'optimal': True},
{'entry': 'ILAOstar',
'config': {'heuristic': lambda d, s: Value(cost=sqrt((d.num_cols - 1 - s.x)**2 + (d.num_rows - 1 - s.y)**2)),
'discount': 1.0,
'epsilon': 0.001,
'debug_logs': False},
'optimal': True}])
def solver_cpp(request):
return request.param
@pytest.fixture(params=[False, True])
def parallel(request):
return request.param
@pytest.fixture(params=[False, True])
def shared_memory(request):
return request.param
# HELPER FUNCTION
def get_plan(domain, solver):
plan = []
cost = 0
observation = domain.reset()
nb_steps = 0
while (not domain.is_goal(observation)) and nb_steps < 20:
plan.append(solver.sample_action(observation))
outcome = domain.step(plan[-1])
cost += outcome.value.cost
observation = outcome.observation
nb_steps += 1
return plan, cost
# SHARED MEMORY PROXY FOR PARALLEL TESTS
class GridShmProxy:
_register_ = [(State, 2), (Action, 1), (EnumSpace, 1), (SingleValueDistribution, 1),
(Value, 1), (EnvironmentOutcome, 1), (TransitionOutcome, 1),
(bool, 1), (float, 1), (int, 2)]
def __init__(self):
self._proxies_ = {State: GridShmProxy.StateProxy, Action: GridShmProxy.ActionProxy,
EnumSpace: GridShmProxy.EnumSpaceProxy,
SingleValueDistribution: GridShmProxy.SingleValueDistributionProxy,
Value: GridShmProxy.ValueProxy,
EnvironmentOutcome: GridShmProxy.EnvironmentOutcomeProxy,
TransitionOutcome: GridShmProxy.TransitionOutcomeProxy,
bool: GridShmProxy.BoolProxy,
float: GridShmProxy.FloatProxy,
int: GridShmProxy.IntProxy}
def copy(self):
p = GridShmProxy()
p._proxies_ = dict(self._proxies_)
return p
def register(self):
return GridShmProxy._register_
def initialize(self, t):
return self._proxies_[t].initialize()
def encode(self, value, shm_value):
self._proxies_[type(value)].encode(value, shm_value)
def decode(self, t, shm_value):
return self._proxies_[t].decode(shm_value)
class StateProxy:
@staticmethod
def initialize():
return mp.Array('d', [0, 0, 0], lock=True)
@staticmethod
def encode(state, shm_state):
shm_state[0] = state.x
shm_state[1] = state.y
shm_state[2] = state.s
@staticmethod
def decode(shm_state):
return State(int(shm_state[0]), int(shm_state[1]), int(shm_state[2]))
class ActionProxy:
@staticmethod
def initialize():
return mp.Value('I', 0, lock=True)
@staticmethod
def encode(action, shm_action):
shm_action.value = action.value
@staticmethod
def decode(shm_action):
return Action(shm_action.value)
class EnumSpaceProxy: # Always used with Action as enum class
@staticmethod
def initialize():
return mp.Array('c', b'')
@staticmethod
def encode(val, shm_val):
pass
@staticmethod
def decode(val):
return EnumSpace(Action)
class SingleValueDistributionProxy: # Always used with State
@staticmethod
def initialize():
return GridShmProxy.StateProxy.initialize()
@staticmethod
def encode(svd, shm_svd):
GridShmProxy.StateProxy.encode(svd._value, shm_svd)
@staticmethod
def decode(svd):
return SingleValueDistribution(GridShmProxy.StateProxy.decode(svd))
class ValueProxy:
@staticmethod
def initialize():
return [mp.Value('d', 0), mp.Value('b', False)]
@staticmethod
def encode(value, shm_value):
if value.reward is not None:
shm_value[0].value = value.reward
shm_value[1].value = True
elif value.cost is not None:
shm_value[0].value = value.cost
shm_value[1].value = False
else:
shm_value[0].value = 0
shm_value[1].value = True
@staticmethod
def decode(value):
if value[1].value:
return Value(reward=value[0].value)
else:
return Value(cost=value[0].value)
class EnvironmentOutcomeProxy:
@staticmethod
def initialize():
return [GridShmProxy.StateProxy.initialize()] + \
GridShmProxy.ValueProxy.initialize() + \
[GridShmProxy.BoolProxy.initialize()]
@staticmethod
def encode(outcome, shm_outcome):
GridShmProxy.StateProxy.encode(outcome.observation, shm_outcome[0])
GridShmProxy.ValueProxy.encode(outcome.value, shm_outcome[1:3])
GridShmProxy.BoolProxy.encode(outcome.termination, shm_outcome[3])
@staticmethod
def decode(outcome):
return EnvironmentOutcome(observation=GridShmProxy.StateProxy.decode(outcome[0]),
value=GridShmProxy.ValueProxy.decode(outcome[1:3]),
termination=GridShmProxy.BoolProxy.decode(outcome[3]))
class TransitionOutcomeProxy:
@staticmethod
def initialize():
return [GridShmProxy.StateProxy.initialize()] + \
GridShmProxy.ValueProxy.initialize() + \
[GridShmProxy.BoolProxy.initialize()]
@staticmethod
def encode(outcome, shm_outcome):
GridShmProxy.StateProxy.encode(outcome.state, shm_outcome[0])
GridShmProxy.ValueProxy.encode(outcome.value, shm_outcome[1:3])
GridShmProxy.BoolProxy.encode(outcome.termination, shm_outcome[3])
@staticmethod
def decode(outcome):
return TransitionOutcome(state=GridShmProxy.StateProxy.decode(outcome[0]),
value=GridShmProxy.ValueProxy.decode(outcome[1:3]),
termination=GridShmProxy.BoolProxy.decode(outcome[3]))
class BoolProxy:
@staticmethod
def initialize():
return mp.Value('b', False)
@staticmethod
def encode(val, shm_val):
shm_val.value = val
@staticmethod
def decode(val):
return bool(val.value)
class FloatProxy:
@staticmethod
def initialize():
return mp.Value('d', False)
@staticmethod
def encode(val, shm_val):
shm_val.value = val
@staticmethod
def decode(val):
return float(val.value)
class IntProxy:
@staticmethod
def initialize():
return mp.Value('i', False)
@staticmethod
def encode(val, shm_val):
shm_val.value = val
@staticmethod
def decode(val):
return int(val.value)
# TESTS
def test_solver_cpp(solver_cpp, parallel, shared_memory):
noexcept = True
try:
dom = GridDomain()
solver_type = load_registered_solver(solver_cpp['entry'])
solver_args = deepcopy(solver_cpp['config'])
if 'parallel' in inspect.signature(solver_type.__init__).parameters:
solver_args['parallel'] = parallel
if 'shared_memory_proxy' in inspect.signature(solver_type.__init__).parameters and shared_memory:
solver_args['shared_memory_proxy'] = GridShmProxy()
solver_args['domain_factory'] = lambda: GridDomain()
with solver_type(**solver_args) as slv:
GridDomain.solve_with(slv)
plan, cost = get_plan(dom, slv)
except Exception as e:
print(e)
noexcept = False
assert solver_type.check_domain(dom) and noexcept and \
((not solver_cpp['optimal']) or parallel or (cost == 18 and len(plan) == 18))
| 37.831658 | 136 | 0.538686 |
0f277f042cfb7d7bb5584982fffc3e390b9f8242 | 7,455 | py | Python | execute_cora_sparse.py | 1049451037/FA-GAT | d8552cb79c30be50c93c869eeb042470b485e2fc | [
"MIT"
] | 5 | 2020-08-05T11:34:18.000Z | 2020-08-05T13:42:09.000Z | execute_cora_sparse.py | 1049451037/FA-GAT | d8552cb79c30be50c93c869eeb042470b485e2fc | [
"MIT"
] | null | null | null | execute_cora_sparse.py | 1049451037/FA-GAT | d8552cb79c30be50c93c869eeb042470b485e2fc | [
"MIT"
] | null | null | null | import time
import scipy.sparse as sp
import numpy as np
import tensorflow as tf
import argparse
from models import GAT
from models import SpGAT
from utils import process
checkpt_file = 'pre_trained/cora/mod_cora.ckpt'
dataset = 'cora'
# training params
batch_size = 1
nb_epochs = 100000
patience = 100
lr = 0.005 # learning rate
l2_coef = 0.0005 # weight decay
hid_units = [8] # numbers of hidden units per each attention head in each layer
n_heads = [8, 1] # additional entry for the output layer
residual = False
nonlinearity = tf.nn.elu
# model = GAT
model = SpGAT
print('Dataset: ' + dataset)
print('----- Opt. hyperparams -----')
print('lr: ' + str(lr))
print('l2_coef: ' + str(l2_coef))
print('----- Archi. hyperparams -----')
print('nb. layers: ' + str(len(hid_units)))
print('nb. units per layer: ' + str(hid_units))
print('nb. attention heads: ' + str(n_heads))
print('residual: ' + str(residual))
print('nonlinearity: ' + str(nonlinearity))
print('model: ' + str(model))
sparse = True
adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask = process.load_data(dataset)
features, spars = process.preprocess_features(features)
nb_nodes = features.shape[0]
ft_size = features.shape[1]
nb_classes = y_train.shape[1]
features = features[np.newaxis]
y_train = y_train[np.newaxis]
y_val = y_val[np.newaxis]
y_test = y_test[np.newaxis]
train_mask = train_mask[np.newaxis]
val_mask = val_mask[np.newaxis]
test_mask = test_mask[np.newaxis]
if sparse:
biases = process.preprocess_adj_bias(adj)
else:
adj = adj.todense()
adj = adj[np.newaxis]
biases = process.adj_to_bias(adj, [nb_nodes], nhood=1)
with tf.Graph().as_default():
with tf.name_scope('input'):
ftr_in = tf.placeholder(dtype=tf.float32, shape=(batch_size, nb_nodes, ft_size))
if sparse:
#bias_idx = tf.placeholder(tf.int64)
#bias_val = tf.placeholder(tf.float32)
#bias_shape = tf.placeholder(tf.int64)
bias_in = tf.sparse_placeholder(dtype=tf.float32)
else:
bias_in = tf.placeholder(dtype=tf.float32, shape=(batch_size, nb_nodes, nb_nodes))
lbl_in = tf.placeholder(dtype=tf.int32, shape=(batch_size, nb_nodes, nb_classes))
msk_in = tf.placeholder(dtype=tf.int32, shape=(batch_size, nb_nodes))
attn_drop = tf.placeholder(dtype=tf.float32, shape=())
ffd_drop = tf.placeholder(dtype=tf.float32, shape=())
is_train = tf.placeholder(dtype=tf.bool, shape=())
logits = model.inference(ftr_in, nb_classes, nb_nodes, is_train,
attn_drop, ffd_drop,
bias_mat=bias_in,
hid_units=hid_units, n_heads=n_heads,
residual=residual, activation=nonlinearity)
log_resh = tf.reshape(logits, [-1, nb_classes])
lab_resh = tf.reshape(lbl_in, [-1, nb_classes])
msk_resh = tf.reshape(msk_in, [-1])
loss = model.masked_softmax_cross_entropy(log_resh, lab_resh, msk_resh)
accuracy = model.masked_accuracy(log_resh, lab_resh, msk_resh)
train_op = model.training(loss, lr, l2_coef)
saver = tf.train.Saver()
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
vlss_mn = np.inf
vacc_mx = 0.0
curr_step = 0
with tf.Session() as sess:
sess.run(init_op)
train_loss_avg = 0
train_acc_avg = 0
val_loss_avg = 0
val_acc_avg = 0
for epoch in range(nb_epochs):
tr_step = 0
tr_size = features.shape[0]
while tr_step * batch_size < tr_size:
if sparse:
bbias = biases
else:
bbias = biases[tr_step*batch_size:(tr_step+1)*batch_size]
_, loss_value_tr, acc_tr = sess.run([train_op, loss, accuracy],
feed_dict={
ftr_in: features[tr_step*batch_size:(tr_step+1)*batch_size],
bias_in: bbias,
lbl_in: y_train[tr_step*batch_size:(tr_step+1)*batch_size],
msk_in: train_mask[tr_step*batch_size:(tr_step+1)*batch_size],
is_train: True,
attn_drop: 0.6, ffd_drop: 0.8})
train_loss_avg += loss_value_tr
train_acc_avg += acc_tr
tr_step += 1
vl_step = 0
vl_size = features.shape[0]
while vl_step * batch_size < vl_size:
if sparse:
bbias = biases
else:
bbias = biases[vl_step*batch_size:(vl_step+1)*batch_size]
loss_value_vl, acc_vl = sess.run([loss, accuracy],
feed_dict={
ftr_in: features[vl_step*batch_size:(vl_step+1)*batch_size],
bias_in: bbias,
lbl_in: y_val[vl_step*batch_size:(vl_step+1)*batch_size],
msk_in: val_mask[vl_step*batch_size:(vl_step+1)*batch_size],
is_train: False,
attn_drop: 0.0, ffd_drop: 0.0})
val_loss_avg += loss_value_vl
val_acc_avg += acc_vl
vl_step += 1
print('Training: loss = %.5f, acc = %.5f | Val: loss = %.5f, acc = %.5f' %
(train_loss_avg/tr_step, train_acc_avg/tr_step,
val_loss_avg/vl_step, val_acc_avg/vl_step))
if val_acc_avg/vl_step >= vacc_mx or val_loss_avg/vl_step <= vlss_mn:
if val_acc_avg/vl_step >= vacc_mx and val_loss_avg/vl_step <= vlss_mn:
vacc_early_model = val_acc_avg/vl_step
vlss_early_model = val_loss_avg/vl_step
saver.save(sess, checkpt_file)
vacc_mx = np.max((val_acc_avg/vl_step, vacc_mx))
vlss_mn = np.min((val_loss_avg/vl_step, vlss_mn))
curr_step = 0
else:
curr_step += 1
if curr_step == patience:
print('Early stop! Min loss: ', vlss_mn, ', Max accuracy: ', vacc_mx)
print('Early stop model validation loss: ', vlss_early_model, ', accuracy: ', vacc_early_model)
break
train_loss_avg = 0
train_acc_avg = 0
val_loss_avg = 0
val_acc_avg = 0
saver.restore(sess, checkpt_file)
ts_size = features.shape[0]
ts_step = 0
ts_loss = 0.0
ts_acc = 0.0
while ts_step * batch_size < ts_size:
if sparse:
bbias = biases
else:
bbias = biases[ts_step*batch_size:(ts_step+1)*batch_size]
loss_value_ts, acc_ts = sess.run([loss, accuracy],
feed_dict={
ftr_in: features[ts_step*batch_size:(ts_step+1)*batch_size],
bias_in: bbias,
lbl_in: y_test[ts_step*batch_size:(ts_step+1)*batch_size],
msk_in: test_mask[ts_step*batch_size:(ts_step+1)*batch_size],
is_train: False,
attn_drop: 0.0, ffd_drop: 0.0})
ts_loss += loss_value_ts
ts_acc += acc_ts
ts_step += 1
print('Test loss:', ts_loss/ts_step, '; Test accuracy:', ts_acc/ts_step)
sess.close()
| 36.905941 | 115 | 0.580282 |
008df23f3e67be8cb6d8eb6c002b38eae8709108 | 7,364 | py | Python | Map.py | lmtri1998/Super-Zombie-Flag-Capturing-Game | e472282b4b0f4c1d570b5ad63374a022c6aa3a43 | [
"MIT"
] | null | null | null | Map.py | lmtri1998/Super-Zombie-Flag-Capturing-Game | e472282b4b0f4c1d570b5ad63374a022c6aa3a43 | [
"MIT"
] | null | null | null | Map.py | lmtri1998/Super-Zombie-Flag-Capturing-Game | e472282b4b0f4c1d570b5ad63374a022c6aa3a43 | [
"MIT"
] | null | null | null | from Grid import Wall
class Map:
def __init__(self, grid_list):
self.grid_list = grid_list
def make_map(self):
wall_A = Wall([(0, 0), (1, 0), (2, 0)])
wall_B = Wall([(0, 0)])
wall_C = Wall([(0, 0), (0, 1), (0, 2)])
wall_D = Wall([(0, 0), (0, 1), (1, 0), (1, 1)])
wall_E = Wall([(0, 0), (0, 1), (1, 1), (1, 2)])
wall_F = Wall([(1, 0), (1, 1), (0, 1), (0, 2)])
wall_G = Wall([(0, 0), (1, 0), (1, 1), (2, 1)])
wall_H = Wall([(0, 1), (1, 1), (1, 0), (2, 0)])
wall_1 = Wall([(0, 2), (0, 1), (0, 0), (1, 0), (2, 0)])
wall_2 = Wall([(2, 2), (2, 1), (0, 0), (1, 0), (2, 0)])
wall_3 = Wall([(0, 2), (0, 1), (0, 0), (1, 2), (2, 2)])
wall_4 = Wall([(2, 2), (2, 1), (0, 2), (1, 2), (2, 0)])
wall_5 = Wall([(0, 0), (0, 1), (1, 1)])
wall_6 = Wall([(0, 1), (1, 0), (1, 1)])
wall_7 = Wall([(0, 0), (0, 1), (1, 0)])
wall_8 = Wall([(0, 0), (1, 0), (1, 1)])
sprites = []
sprites.extend(wall_1.get_obsticle(self.grid_list[3][9], 36))
sprites.extend(wall_3.get_obsticle(self.grid_list[10][9], 36))
sprites.extend(wall_4.get_obsticle(self.grid_list[10][21], 36))
sprites.extend(wall_2.get_obsticle(self.grid_list[3][21], 36))
sprites.extend(wall_E.get_obsticle(self.grid_list[7][14], 36))
sprites.extend(wall_E.get_obsticle(self.grid_list[6][17], 36))
sprites.extend(wall_D.get_obsticle(self.grid_list[1][15], 36))
sprites.extend(wall_D.get_obsticle(self.grid_list[1][16], 36))
sprites.extend(wall_A.get_obsticle(self.grid_list[2][4], 36))
sprites.extend(wall_C.get_obsticle(self.grid_list[1][5], 36))
sprites.extend(wall_A.get_obsticle(self.grid_list[14][26], 36))
sprites.extend(wall_C.get_obsticle(self.grid_list[13][27], 36))
sprites.extend(wall_4.get_obsticle(self.grid_list[12][13], 36))
sprites.extend(wall_3.get_obsticle(self.grid_list[12][17], 36))
sprites.extend(wall_G.get_obsticle(self.grid_list[5][19], 36))
sprites.extend(wall_H.get_obsticle(self.grid_list[9][19], 36))
sprites.extend(wall_G.get_obsticle(self.grid_list[9][11], 36))
sprites.extend(wall_H.get_obsticle(self.grid_list[5][11], 36))
sprites.extend(wall_A.get_obsticle(self.grid_list[14][4], 36))
sprites.extend(wall_C.get_obsticle(self.grid_list[13][5], 36))
sprites.extend(wall_A.get_obsticle(self.grid_list[2][26], 36))
sprites.extend(wall_C.get_obsticle(self.grid_list[1][27], 36))
sprites.extend(wall_5.get_obsticle(self.grid_list[5][6], 36))
sprites.extend(wall_7.get_obsticle(self.grid_list[9][6], 36))
sprites.extend(wall_6.get_obsticle(self.grid_list[5][3], 36))
sprites.extend(wall_8.get_obsticle(self.grid_list[9][3], 36))
sprites.extend(wall_5.get_obsticle(self.grid_list[5][28], 36))
sprites.extend(wall_7.get_obsticle(self.grid_list[9][28], 36))
sprites.extend(wall_6.get_obsticle(self.grid_list[5][25], 36))
sprites.extend(wall_8.get_obsticle(self.grid_list[9][25], 36))
return sprites
def get_box_position(self):
pos_lst = []
for i in range(33):
pos_lst.append(self.grid_list[0][i])
for j in range(33):
pos_lst.append(self.grid_list[16][j])
for k in range(0, 14):
pos_lst.append(self.grid_list[7][k])
pos_lst.append(self.grid_list[8][k])
for h in range(19, 31):
pos_lst.append(self.grid_list[7][h])
pos_lst.append(self.grid_list[8][h])
for l in range(6, 27):
pos_lst.append(self.grid_list[15][l])
pos_lst.extend([self.grid_list[1][0], self.grid_list[1][1],
self.grid_list[1][2], self.grid_list[1][3],
self.grid_list[1][4], self.grid_list[1][18],
self.grid_list[1][19], self.grid_list[1][20],
self.grid_list[1][21], self.grid_list[1][22],
self.grid_list[1][23], self.grid_list[1][24],
self.grid_list[1][25], self.grid_list[1][26],
self.grid_list[1][28], self.grid_list[1][29],
self.grid_list[1][30], self.grid_list[1][31],
self.grid_list[1][32], self.grid_list[2][7],
self.grid_list[2][8], self.grid_list[2][9],
self.grid_list[2][10], self.grid_list[2][11],
self.grid_list[2][12], self.grid_list[2][13],
self.grid_list[2][14], self.grid_list[2][18],
self.grid_list[2][19], self.grid_list[2][20],
self.grid_list[2][21], self.grid_list[2][22],
self.grid_list[2][23], self.grid_list[2][24],
self.grid_list[2][25], self.grid_list[1][6],
self.grid_list[1][7], self.grid_list[1][8],
self.grid_list[1][9], self.grid_list[1][10],
self.grid_list[1][11], self.grid_list[1][12],
self.grid_list[1][13], self.grid_list[1][14],
self.grid_list[4][0], self.grid_list[4][1],
self.grid_list[4][2], self.grid_list[4][3],
self.grid_list[4][4], self.grid_list[4][5],
self.grid_list[4][6], self.grid_list[4][7],
self.grid_list[4][8], self.grid_list[4][10],
self.grid_list[4][22], self.grid_list[4][24],
self.grid_list[4][25], self.grid_list[4][26],
self.grid_list[4][27], self.grid_list[4][28],
self.grid_list[4][29], self.grid_list[4][29],
self.grid_list[4][30], self.grid_list[4][31],
self.grid_list[4][32], self.grid_list[11][0],
self.grid_list[11][1], self.grid_list[11][2],
self.grid_list[11][3], self.grid_list[11][4],
self.grid_list[11][5], self.grid_list[11][6],
self.grid_list[11][7], self.grid_list[11][8],
self.grid_list[11][10], self.grid_list[11][22],
self.grid_list[11][24], self.grid_list[11][25],
self.grid_list[11][26], self.grid_list[11][27],
self.grid_list[11][28], self.grid_list[11][29],
self.grid_list[11][30], self.grid_list[11][31],
self.grid_list[11][32], self.grid_list[12][0],
self.grid_list[12][1], self.grid_list[12][2],
self.grid_list[12][3], self.grid_list[12][4],
self.grid_list[12][5], self.grid_list[12][6],
self.grid_list[12][7], self.grid_list[12][8],
self.grid_list[12][24], self.grid_list[12][25],
self.grid_list[12][26], self.grid_list[12][27],
self.grid_list[12][28], self.grid_list[12][29],
self.grid_list[12][30], self.grid_list[12][31],
self.grid_list[12][32]])
return pos_lst
| 51.859155 | 71 | 0.519147 |
962cf04b5951e8242d273a0681e355a8cc06d6e5 | 8,887 | py | Python | pcdswidgets/icons/valves.py | klauer/pcdswidgets | a6f50fdb41b4d7a991c86fec9bce06a4f09a80af | [
"BSD-3-Clause-LBNL"
] | null | null | null | pcdswidgets/icons/valves.py | klauer/pcdswidgets | a6f50fdb41b4d7a991c86fec9bce06a4f09a80af | [
"BSD-3-Clause-LBNL"
] | null | null | null | pcdswidgets/icons/valves.py | klauer/pcdswidgets | a6f50fdb41b4d7a991c86fec9bce06a4f09a80af | [
"BSD-3-Clause-LBNL"
] | null | null | null | import math
from qtpy.QtCore import (QPointF, QRectF, Qt, Property, QLineF)
from qtpy.QtGui import (QPainterPath, QBrush, QColor, QPolygonF, QTransform)
from .base import BaseSymbolIcon
class PneumaticValveSymbolIcon(BaseSymbolIcon):
"""
A widget with a pneumatic valve symbol drawn in it.
Parameters
----------
parent : QWidget
The parent widget for the icon
"""
def __init__(self, parent=None, **kwargs):
super(PneumaticValveSymbolIcon, self).__init__(parent, **kwargs)
self._interlock_brush = QBrush(QColor(0, 255, 0), Qt.SolidPattern)
@Property(QBrush)
def interlockBrush(self):
return self._interlock_brush
@interlockBrush.setter
def interlockBrush(self, new_brush):
if new_brush != self._interlock_brush:
self._interlock_brush = new_brush
self.update()
def draw_icon(self, painter):
path = QPainterPath(QPointF(0, 0.3))
path.lineTo(0, 0.9)
path.lineTo(1, 0.3)
path.lineTo(1, 0.9)
path.closeSubpath()
painter.drawPath(path)
painter.drawLine(QPointF(0.5, 0.6), QPointF(0.5, 0.3))
painter.setBrush(self._interlock_brush)
painter.drawRect(QRectF(0.2, 0, 0.6, 0.3))
class FastShutterSymbolIcon(BaseSymbolIcon):
"""
A widget with a fast shutter symbol drawn in it.
Parameters
----------
parent : QWidget
The parent widget for the icon
"""
def draw_icon(self, painter):
path = QPainterPath(QPointF(0, 0.3))
path.lineTo(0, 0.9)
path.lineTo(1, 0.3)
path.lineTo(1, 0.9)
path.closeSubpath()
painter.drawPath(path)
painter.drawLine(QPointF(0.4, 0), QPointF(0.5, 0.15))
painter.drawLine(QPointF(0.4, 0.10), QPointF(0.5, 0.25))
painter.drawLine(QPointF(0.5, 0.15), QPointF(0.6, 0))
painter.drawLine(QPointF(0.5, 0.25), QPointF(0.6, 0.10))
painter.drawLine(QPointF(0.5, 0.6), QPointF(0.5, 0.0))
class RightAngleManualValveSymbolIcon(BaseSymbolIcon):
"""
A widget with a right angle manual valve symbol drawn in it.
Parameters
----------
parent : QWidget
The parent widget for the icon
"""
def draw_icon(self, painter):
path = QPainterPath(QPointF(0, 0))
path.lineTo(1, 1)
path.lineTo(0.005, 1)
path.lineTo(0.5, 0.5)
path.lineTo(0, 0.9)
path.closeSubpath()
painter.drawPath(path)
painter.drawEllipse(QPointF(0.5, 0.5), 0.05, 0.05)
class ApertureValveSymbolIcon(BaseSymbolIcon):
"""
A widget with an aperture valve symbol drawn in it.
Parameters
----------
parent : QWidget
The parent widget for the icon
"""
def __init__(self, parent=None, **kwargs):
super(ApertureValveSymbolIcon, self).__init__(parent, **kwargs)
self._interlock_brush = QBrush(QColor(0, 255, 0), Qt.SolidPattern)
@Property(QBrush)
def interlockBrush(self):
return self._interlock_brush
@interlockBrush.setter
def interlockBrush(self, new_brush):
if new_brush != self._interlock_brush:
self._interlock_brush = new_brush
self.update()
def draw_icon(self, painter):
path = QPainterPath(QPointF(0, 0.3))
path.lineTo(0, 0.9)
path.lineTo(1, 0.3)
path.lineTo(1, 0.9)
path.closeSubpath()
painter.drawPath(path)
painter.drawEllipse(QPointF(0.5, 0.6), 0.1, 0.1)
painter.drawLine(QPointF(0.5, 0.5), QPointF(0.5, 0.3))
painter.setBrush(self._interlock_brush)
painter.drawRect(QRectF(0.2, 0, 0.6, 0.3))
class NeedleValveSymbolIcon(BaseSymbolIcon):
"""
A widget with a needle valve symbol drawn in it.
Parameters
----------
parent : QWidget
The parent widget for the icon
"""
def __init__(self, parent=None, **kwargs):
super(NeedleValveSymbolIcon, self).__init__(parent, **kwargs)
self._interlock_brush = QBrush(QColor(0, 255, 0), Qt.SolidPattern)
@Property(QBrush)
def interlockBrush(self):
return self._interlock_brush
@interlockBrush.setter
def interlockBrush(self, new_brush):
if new_brush != self._interlock_brush:
self._interlock_brush = new_brush
self.update()
def draw_icon(self, painter):
path = QPainterPath(QPointF(0, 0.3))
path.lineTo(0, 0.9)
path.lineTo(1, 0.3)
path.lineTo(1, 0.9)
path.closeSubpath()
painter.drawPath(path)
painter.drawLine(QPointF(0.5, 0.6), QPointF(0.5, 0.15))
# Draw the arrow end-caps
painter.setBrush(QBrush(QColor(0, 0, 0)))
top_arrow_point = QPointF(0.65, 0.36)
arrow = QPolygonF(
[QPointF(-0.09, 0.0),
QPointF(-0.005, 0.0),
QPointF(-0.005, 0.8),
QPointF(0.005, 0.8),
QPointF(0.005, 0.0),
QPointF(0.09, 0.0),
QPointF(0.00, -0.25)]
)
t = QTransform()
t.rotate(35)
top_arrow_r = t.map(arrow)
arrow_l = top_arrow_r.translated(top_arrow_point)
painter.drawPolygon(arrow_l)
painter.setBrush(self._interlock_brush)
painter.drawRect(QRectF(0.3, 0, 0.4, 0.15))
class ProportionalValveSymbolIcon(BaseSymbolIcon):
"""
A widget with a proportional valve symbol drawn in it.
Parameters
----------
parent : QWidget
The parent widget for the icon
"""
def __init__(self, parent=None, **kwargs):
super(ProportionalValveSymbolIcon, self).__init__(parent, **kwargs)
self._interlock_brush = QBrush(QColor(0, 255, 0), Qt.SolidPattern)
@Property(QBrush)
def interlockBrush(self):
return self._interlock_brush
@interlockBrush.setter
def interlockBrush(self, new_brush):
if new_brush != self._interlock_brush:
self._interlock_brush = new_brush
self.update()
def draw_icon(self, painter):
path = QPainterPath(QPointF(0, 0.3))
path.lineTo(0, 0.9)
path.lineTo(1, 0.3)
path.lineTo(1, 0.9)
path.closeSubpath()
painter.drawPath(path)
painter.drawLine(QPointF(0.5, 0.6), QPointF(0.5, 0.15))
painter.setBrush(self._interlock_brush)
painter.drawRect(QRectF(0.35, 0, 0.3, 0.3))
# Draw the arrow end-caps
painter.setBrush(QBrush(QColor(0, 0, 0)))
top_arrow_point = QPointF(0.65, 0.42)
arrow = QPolygonF(
[QPointF(-0.07, 0.0),
QPointF(-0.005, 0.0),
QPointF(-0.005, 0.8),
QPointF(0.005, 0.8),
QPointF(0.005, 0.0),
QPointF(0.07, 0.0),
QPointF(0.00, -0.25)]
)
t = QTransform()
t.rotate(40)
top_arrow_r = t.map(arrow)
arrow_l = top_arrow_r.translated(top_arrow_point)
painter.drawPolygon(arrow_l)
t_x = 0.4
t_y = 0.05
painter.drawLines([QLineF(0.0+t_x, 0.0+t_y, 0.0+t_x, 0.2+t_y),
QLineF(0.0+t_x, 0.0+t_y, 0.1+t_x, 0.2+t_y),
QLineF(0.1+t_x, 0.2+t_y, 0.2+t_x, 0.0+t_y),
QLineF(0.2+t_x, 0.0+t_y, 0.2+t_x, 0.2+t_y)])
class ControlValveSymbolIcon(PneumaticValveSymbolIcon):
"""Icon for a Control Valve with readback"""
def draw_icon(self, painter):
pen = painter.pen()
pen.setWidthF(pen.width()*2)
pen.setCapStyle(Qt.FlatCap)
painter.setPen(pen)
# Circle parameters
radius = 0.3
center = (0.5, 1 - radius)
# Draw circle
painter.drawEllipse(QPointF(*center),
radius, radius)
# X pattern
quad = math.cos(math.radians(45)) * radius
painter.drawLine(QLineF(center[0] + quad,
center[1] + quad,
center[0] - quad,
center[1] - quad))
painter.drawLine(QLineF(center[0] + quad,
center[1] - quad,
center[0] - quad,
center[1] + quad))
# Interlock Icon
square_dims = (0.4, 0.2)
painter.drawLine(QPointF(center[0], center[1] - radius),
QPointF(center[0], square_dims[1]))
painter.setBrush(self._interlock_brush)
painter.drawRect(QRectF((1 - square_dims[0])/2., 0, *square_dims))
class ControlOnlyValveSymbolIcon(BaseSymbolIcon):
"""Icon for a Control Valve with no readback"""
def draw_icon(self, painter):
path = QPainterPath(QPointF(0, 0.3))
path.lineTo(0, 0.9)
path.lineTo(1, 0.3)
path.lineTo(1, 0.9)
path.closeSubpath()
painter.drawPath(path)
| 31.182456 | 76 | 0.579498 |
ec0e63d23c581c03939e4974709025752469b436 | 941 | py | Python | openDAM/model/StepCurve.py | bcornelusse/openDAM | ad74541d5b5955b6ebb7212aecc6942ff5f71575 | [
"BSD-2-Clause"
] | 15 | 2018-09-18T23:28:41.000Z | 2022-01-11T12:44:10.000Z | openDAM/model/StepCurve.py | bcornelusse/openDAM | ad74541d5b5955b6ebb7212aecc6942ff5f71575 | [
"BSD-2-Clause"
] | 2 | 2018-07-06T13:18:59.000Z | 2018-07-11T08:53:29.000Z | openDAM/model/StepCurve.py | bcornelusse/openDAM | ad74541d5b5955b6ebb7212aecc6942ff5f71575 | [
"BSD-2-Clause"
] | 3 | 2020-06-02T04:16:54.000Z | 2021-02-22T10:52:06.000Z | ## Step curve of the energy market.
from openDAM.model.Bid import *
from openDAM.model.SinglePeriodBid import *
class StepCurve(Bid):
def __init__(self, points=[], period=0, location=None):
"""
Volumes are negative for demand bids.
:param points: List of volume-price pairs. First pair must have a 0 volume.
:param period: Period of the bid.
:param location: Location of the curve
"""
Bid.__init__(self, location=location)
assert(len(points) > 0)
assert(points[0][0] == 0.0)
self.period = period
self.bids = self.__points2bids(points)
def __points2bids(self, points):
bids = []
while(len(points) > 1):
p1 = points.pop(0)
p2 = points.pop(0)
bids.append(SinglePeriodBid(p2[0] - p1[0], p1[1], self.period, self.location))
return bids
def collect(self):
return self.bids | 29.40625 | 90 | 0.5983 |
e5ac56cd17222f1d7390a3b6aba0a1da6faabca9 | 1,958 | py | Python | torchbiggraph/types.py | stillmatic/PyTorch-BigGraph | d7d6576281faa54ec5850e204ffc07b1268fdb04 | [
"BSD-3-Clause"
] | 3,189 | 2019-04-01T23:25:40.000Z | 2022-03-29T10:26:22.000Z | torchbiggraph/types.py | KonstantinKlepikov/PyTorch-BigGraph | db9d1478211dcf74a24b88dae1348588c5f645fb | [
"BSD-3-Clause"
] | 238 | 2019-04-02T07:19:55.000Z | 2022-03-22T11:03:06.000Z | torchbiggraph/types.py | KonstantinKlepikov/PyTorch-BigGraph | db9d1478211dcf74a24b88dae1348588c5f645fb | [
"BSD-3-Clause"
] | 457 | 2019-04-01T23:50:09.000Z | 2022-03-28T14:48:12.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE.txt file in the root directory of this source tree.
from enum import Enum
from typing import Any, Dict, NamedTuple, TypeVar
import torch
# torch.FloatTensor and torch.LongTensor are defined as empty subclasses of
# torch.Tensor by PyTorch's type stub, which means that any operation on them
# returns plain untyped torch.Tensors. This makes it impossible to use the typed
# subtypes to annotate functions as they wouldn't get the type they expect.
# Thus for type checking to work functions must be annotated with torch.Tensor.
# To preserve and expose that information, at least to humans, we use more
# informative aliases for torch.Tensor. (PS: FloatTensor and LongTensor are in
# fact instances of the torch.tensortype metaclass).
ByteTensorType = torch.Tensor # uint8
CharTensorType = torch.Tensor # int8
FloatTensorType = torch.Tensor # float32
LongTensorType = torch.Tensor # int64
T = TypeVar("T")
class Side(Enum):
LHS = 0
RHS = 1
def pick(self, lhs: T, rhs: T) -> T:
if self is Side.LHS:
return lhs
elif self is Side.RHS:
return rhs
else:
raise NotImplementedError("Unknown side: %s" % self)
EntityName = str
Rank = int
GPURank = int
Partition = int
SubPartition = int
ModuleStateDict = Dict[str, torch.Tensor]
OptimizerStateDict = Dict[str, Any]
class Bucket(NamedTuple):
lhs: Partition
rhs: Partition
def get_partition(self, side: Side) -> Partition:
return side.pick(self.lhs, self.rhs)
def __str__(self) -> str:
return "( %d , %d )" % (self.lhs, self.rhs)
# Use as partition index for unpartitioned entities, which have a single partition.
UNPARTITIONED: Partition = 0
# Use as rank for single-machine training.
SINGLE_TRAINER: Rank = 0
| 28.376812 | 83 | 0.712972 |
fb776dc05e223f063b94c681d2fd9b93ad4731f3 | 46 | py | Python | venv/lib/python3.7/imp.py | OseiasBeu/PyECom | 2ea4e7e3be4ca015fb1bbc1083aa3f2d44accc5f | [
"CC0-1.0"
] | 1 | 2020-08-16T04:04:23.000Z | 2020-08-16T04:04:23.000Z | venv/lib/python3.7/imp.py | OseiasBeu/PyECom | 2ea4e7e3be4ca015fb1bbc1083aa3f2d44accc5f | [
"CC0-1.0"
] | null | null | null | venv/lib/python3.7/imp.py | OseiasBeu/PyECom | 2ea4e7e3be4ca015fb1bbc1083aa3f2d44accc5f | [
"CC0-1.0"
] | null | null | null | /home/oseiasbeu/anaconda3/lib/python3.7/imp.py | 46 | 46 | 0.826087 |
78e87c53aca97af19253848d2681c7d48cbe5635 | 184 | py | Python | pypint/multi_level_providers/__init__.py | DiMoser/PyPinT | 3cba394d0fd87055ab412d35fe6dbf4a3b0dbe73 | [
"MIT"
] | null | null | null | pypint/multi_level_providers/__init__.py | DiMoser/PyPinT | 3cba394d0fd87055ab412d35fe6dbf4a3b0dbe73 | [
"MIT"
] | null | null | null | pypint/multi_level_providers/__init__.py | DiMoser/PyPinT | 3cba394d0fd87055ab412d35fe6dbf4a3b0dbe73 | [
"MIT"
] | null | null | null | # coding=utf-8
"""Multi-Level Providers for Iterative Time Solvers
.. moduleauthor:: Torbjörn Klatt <t.klatt@fz-juelich.de>
.. moduleauthor:: Dieter Moser <d.moser@fz-juelich.de>
"""
| 26.285714 | 56 | 0.728261 |
044b013039760ad1ec24b0bf0a7c270aa8637817 | 2,715 | py | Python | infcomp/protocol/MultivariateNormal.py | tuananhle7/pyprob | 6a46bc43a5af9f598b44bac7a74c80949c94fcec | [
"MIT"
] | 1 | 2018-02-11T16:32:23.000Z | 2018-02-11T16:32:23.000Z | infcomp/protocol/MultivariateNormal.py | tuananhle7/pyprob | 6a46bc43a5af9f598b44bac7a74c80949c94fcec | [
"MIT"
] | null | null | null | infcomp/protocol/MultivariateNormal.py | tuananhle7/pyprob | 6a46bc43a5af9f598b44bac7a74c80949c94fcec | [
"MIT"
] | 1 | 2020-06-24T15:29:15.000Z | 2020-06-24T15:29:15.000Z | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: protocol
import flatbuffers
class MultivariateNormal(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsMultivariateNormal(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = MultivariateNormal()
x.Init(buf, n + offset)
return x
# MultivariateNormal
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# MultivariateNormal
def PriorMean(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from .NDArray import NDArray
obj = NDArray()
obj.Init(self._tab.Bytes, x)
return obj
return None
# MultivariateNormal
def PriorCov(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from .NDArray import NDArray
obj = NDArray()
obj.Init(self._tab.Bytes, x)
return obj
return None
# MultivariateNormal
def ProposalMean(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from .NDArray import NDArray
obj = NDArray()
obj.Init(self._tab.Bytes, x)
return obj
return None
# MultivariateNormal
def ProposalVars(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from .NDArray import NDArray
obj = NDArray()
obj.Init(self._tab.Bytes, x)
return obj
return None
def MultivariateNormalStart(builder): builder.StartObject(4)
def MultivariateNormalAddPriorMean(builder, priorMean): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(priorMean), 0)
def MultivariateNormalAddPriorCov(builder, priorCov): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(priorCov), 0)
def MultivariateNormalAddProposalMean(builder, proposalMean): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(proposalMean), 0)
def MultivariateNormalAddProposalVars(builder, proposalVars): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(proposalVars), 0)
def MultivariateNormalEnd(builder): return builder.EndObject()
| 38.239437 | 165 | 0.672192 |
abc1cd272c9036b17829510f98567fbc20b7cba7 | 1,508 | py | Python | Classification Based Machine Learning for Algorithmic Trading/default_predictions/Naive Bayes.py | sangamkotalwar/Classification-and-Regression-based-ML-for-Finance | c1d73391c0ae8eba923da1e98eda3ede47013869 | [
"MIT"
] | 1 | 2018-04-02T11:53:48.000Z | 2018-04-02T11:53:48.000Z | Classification Based Machine Learning for Algorithmic Trading/default_predictions/Naive Bayes.py | sangamkotalwar/Classification-and-Regression-based-ML-for-Finance | c1d73391c0ae8eba923da1e98eda3ede47013869 | [
"MIT"
] | null | null | null | Classification Based Machine Learning for Algorithmic Trading/default_predictions/Naive Bayes.py | sangamkotalwar/Classification-and-Regression-based-ML-for-Finance | c1d73391c0ae8eba923da1e98eda3ede47013869 | [
"MIT"
] | 1 | 2019-07-03T08:47:53.000Z | 2019-07-03T08:47:53.000Z | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 25 22:02:07 2017
@author: HellRider
"""
import numpy as np
import pandas as pd
df = pd.read_csv("dataset_2.csv")
df['default'].describe()
print(sum(df['default'] == 0))
print(sum(df['default'] == 1))
X = df.iloc[:, 1:6].values
y = df['default'].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25,
random_state=0)
shuffle_index = np.random.permutation(len(X_train))
X_train, y_train = X_train[shuffle_index], y_train[shuffle_index]
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
clf.fit(X_train, y_train)
# Cross Validation
from sklearn.model_selection import cross_val_score
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_predict
cross_val_score(clf, X_train, y_train, cv=3, scoring='accuracy')
y_train_pred = cross_val_predict(clf, X_train, y_train, cv=3)
cm = confusion_matrix(y_train, y_train_pred)
print(cm)
from sklearn.metrics import precision_score, recall_score
print("precision score = {0:.4f}".format(precision_score(y_train, y_train_pred)))
print("recall score = {0:.4f}".format(recall_score(y_train, y_train_pred)))
| 30.16 | 81 | 0.745358 |
f0a6e8a74226cbbc4c6c7a34ca80d8feb0e68361 | 652 | py | Python | stockvis/extract_scripts/sort_by_time.py | visdata/DeepClue | 8d80ecd783919c97ba225db67664a0dfe5f3fb37 | [
"Apache-2.0"
] | 1 | 2020-12-06T08:04:32.000Z | 2020-12-06T08:04:32.000Z | stockvis/extract_scripts/sort_by_time.py | visdata/DeepClue | 8d80ecd783919c97ba225db67664a0dfe5f3fb37 | [
"Apache-2.0"
] | null | null | null | stockvis/extract_scripts/sort_by_time.py | visdata/DeepClue | 8d80ecd783919c97ba225db67664a0dfe5f3fb37 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import sys
import traceback
import time
from datetime import timedelta
from time import mktime
from datetime import datetime
import os
from dateutil.parser import parse
if len(sys.argv)!= 3:
print 'Usage: %s input output' % sys.argv[0]
sys.exit(0)
input_file = open(sys.argv[1],"r")
output_file = open(sys.argv[2],"w")
titles = [ line.strip().split("|||") for line in input_file ]
sorted_titles = sorted(titles, key=lambda dt: time.strptime(dt[1].strip(),"%Y-%m-%d %H:%M:%S"))
for title in sorted_titles:
if title[0].strip().find("Update") ==0:
print title
output_file.write(" ||| ".join(title)+"\n")
| 26.08 | 95 | 0.67638 |
34fe3f2455205a426b236f44f07c6dc1cb21b6d9 | 484 | py | Python | regexlib/2021-5-15/python_re2_test_file/regexlib_1840.py | yetingli/ReDoS-Benchmarks | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | [
"MIT"
] | 1 | 2022-01-24T14:43:23.000Z | 2022-01-24T14:43:23.000Z | regexlib/python_re2_test_file/regexlib_1840.py | yetingli/ReDoS-Benchmarks | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | [
"MIT"
] | null | null | null | regexlib/python_re2_test_file/regexlib_1840.py | yetingli/ReDoS-Benchmarks | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | [
"MIT"
] | null | null | null | # 1840
# (\d*)'*-*(\d*)/*(\d*)"
# EXPONENT
# nums:5
# EXPONENT AttackString:""+"1"*256+"! _1SLQ_1"
import re2 as re
from time import perf_counter
regex = """(\d*)'*-*(\d*)/*(\d*)""""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "" + "1" * i * 1 + "! _1SLQ_1"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *1}: took {DURATION} seconds!") | 25.473684 | 46 | 0.568182 |
a0420edcfc0f00efbf19ec6d5d341114dbd6c2a7 | 3,282 | py | Python | htdocs/plotting/auto/scripts/p67.py | trentford/iem | 7264d24f2d79a3cd69251a09758e6531233a732f | [
"MIT"
] | null | null | null | htdocs/plotting/auto/scripts/p67.py | trentford/iem | 7264d24f2d79a3cd69251a09758e6531233a732f | [
"MIT"
] | null | null | null | htdocs/plotting/auto/scripts/p67.py | trentford/iem | 7264d24f2d79a3cd69251a09758e6531233a732f | [
"MIT"
] | null | null | null | """Wind Speed by Temperature"""
import datetime
import calendar
import matplotlib.patheffects as PathEffects
import psycopg2.extras
import pandas as pd
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.plot.use_agg import plt
from pyiem.network import Table as NetworkTable
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc['data'] = True
desc['cache'] = 86400
desc['description'] = """This plot displays the frequency of having a
reported wind speed be above a given threshold by reported temperature
and by month."""
desc['arguments'] = [
dict(type='zstation', name='zstation', default='DSM',
network='IA_ASOS', label='Select Station:'),
dict(type='int', name='threshold', default=10,
label='Wind Speed Threshold (knots)'),
dict(type='month', name='month', default='3',
label='Select Month:'),
]
return desc
def plotter(fdict):
""" Go """
pgconn = get_dbconn('asos')
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
ctx = get_autoplot_context(fdict, get_description())
station = ctx['zstation']
network = ctx['network']
threshold = ctx['threshold']
month = ctx['month']
nt = NetworkTable(network)
cursor.execute("""
WITH data as (
SELECT tmpf::int as t, sknt from alldata where station = %s
and extract(month from valid) = %s and tmpf is not null
and sknt >= 0)
SELECT t, sum(case when sknt >= %s then 1 else 0 end), count(*) from data
GROUP by t ORDER by t ASC
""", (station, month, threshold))
tmpf = []
events = []
total = []
hits = 0
cnt = 0
for row in cursor:
if row[2] < 3:
continue
tmpf.append(row[0])
hits += row[1]
cnt += row[2]
events.append(row[1])
total.append(row[2])
df = pd.DataFrame(dict(tmpf=pd.Series(tmpf), events=pd.Series(events),
total=pd.Series(total)))
(fig, ax) = plt.subplots(1, 1)
ax.bar(tmpf, df['events'] / df['total'] * 100., width=1.1, ec='green',
fc='green')
avgval = hits / float(cnt) * 100.
ax.axhline(avgval, lw=2, zorder=2)
txt = ax.text(tmpf[10], avgval + 1, "Average: %.1f%%" % (avgval,),
va='bottom', zorder=2, color='yellow', fontsize=14)
txt.set_path_effects([PathEffects.withStroke(linewidth=2,
foreground="k")])
ax.grid(True, zorder=11)
ax.set_title(("%s [%s]\nFrequency of %s+ knot Wind Speeds by Temperature "
"for %s (%s-%s)\n"
"(must have 3+ hourly observations at the given temperature)"
) % (nt.sts[station]['name'], station, threshold,
calendar.month_name[month],
nt.sts[station]['archive_begin'].year,
datetime.datetime.now().year), size=10)
ax.set_ylabel("Frequency [%]")
ax.set_ylim(0, 100)
ax.set_xlim(min(tmpf)-3, max(tmpf)+3)
ax.set_xlabel(r"Air Temperature $^\circ$F")
ax.set_yticks([0, 5, 10, 25, 50, 75, 90, 95, 100])
return fig, df
if __name__ == '__main__':
plotter(dict())
| 33.151515 | 79 | 0.585619 |
bd2e36ca1f925593c1d67d1823717b35bf8e289f | 2,152 | py | Python | mopack/arguments.py | jimporter/mopack | e912be11528645f5463e7873b5470c420b698418 | [
"BSD-3-Clause"
] | null | null | null | mopack/arguments.py | jimporter/mopack | e912be11528645f5463e7873b5470c420b698418 | [
"BSD-3-Clause"
] | 17 | 2020-07-23T20:28:36.000Z | 2022-03-04T04:33:55.000Z | mopack/arguments.py | jimporter/mopack | e912be11528645f5463e7873b5470c420b698418 | [
"BSD-3-Clause"
] | 1 | 2020-11-04T03:51:20.000Z | 2020-11-04T03:51:20.000Z | from argparse import *
import yaml
from .iterutils import merge_into_dict
_ArgumentParser = ArgumentParser
_Action = Action
# Add some simple wrappers to make it easier to specify shell-completion
# behaviors.
def _add_complete(argument, complete):
if complete is not None:
argument.complete = complete
return argument
class Action(_Action):
def __init__(self, *args, complete=None, **kwargs):
super().__init__(*args, **kwargs)
_add_complete(self, complete)
class ArgumentParser(_ArgumentParser):
@staticmethod
def _wrap_complete(action):
def wrapper(*args, complete=None, **kwargs):
return _add_complete(action(*args, **kwargs), complete)
return wrapper
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for k, v in self._registries['action'].items():
self._registries['action'][k] = self._wrap_complete(v)
class KeyValueAction(Action):
def __call__(self, parser, namespace, values, option_string=None):
try:
key, value = values.split('=', 1)
except ValueError:
raise ArgumentError(self, 'expected TYPE=PATH')
if getattr(namespace, self.dest) is None:
setattr(namespace, self.dest, {})
getattr(namespace, self.dest)[key] = value
class ConfigOptionAction(Action):
def __init__(self, *args, key=None, **kwargs):
super().__init__(*args, **kwargs)
self.key = key or []
def __call__(self, parser, namespace, values, option_string=None):
try:
key, value = values.split('=', 1)
except ValueError:
raise ArgumentError(self, 'expected OPTION=VALUE')
key = self.key + key.split(':')
try:
value = yaml.safe_load(value)
except yaml.parser.ParserError:
raise ArgumentError(self, 'invalid yaml: {!r}'.format(value))
for i in reversed(key):
value = {i: value}
if getattr(namespace, self.dest) is None:
setattr(namespace, self.dest, {})
merge_into_dict(getattr(namespace, self.dest), value)
| 29.479452 | 73 | 0.631506 |
3ae27059367239a3cc223eeb25128133d3a5af17 | 4,290 | py | Python | greendoge/protocols/full_node_protocol.py | grayfallstown/greendoge-blockchain | 31e325913374d694dc0859140d006a642e7f95ac | [
"Apache-2.0"
] | 44 | 2021-07-06T10:09:06.000Z | 2022-02-09T04:30:14.000Z | greendoge/protocols/full_node_protocol.py | grayfallstown/greendoge-blockchain | 31e325913374d694dc0859140d006a642e7f95ac | [
"Apache-2.0"
] | 67 | 2021-07-06T11:57:18.000Z | 2022-02-02T16:14:15.000Z | greendoge/protocols/full_node_protocol.py | grayfallstown/greendoge-blockchain | 31e325913374d694dc0859140d006a642e7f95ac | [
"Apache-2.0"
] | 16 | 2021-07-06T10:36:37.000Z | 2022-03-15T08:35:16.000Z | from dataclasses import dataclass
from typing import List, Optional
from greendoge.types.blockchain_format.sized_bytes import bytes32
from greendoge.types.blockchain_format.vdf import VDFInfo, VDFProof
from greendoge.types.end_of_slot_bundle import EndOfSubSlotBundle
from greendoge.types.full_block import FullBlock
from greendoge.types.peer_info import TimestampedPeerInfo
from greendoge.types.spend_bundle import SpendBundle
from greendoge.types.unfinished_block import UnfinishedBlock
from greendoge.types.weight_proof import WeightProof
from greendoge.util.ints import uint8, uint32, uint64, uint128
from greendoge.util.streamable import Streamable, streamable
"""
Protocol between full nodes.
Note: When changing this file, also change protocol_message_types.py, and the protocol version in shared_protocol.py
"""
@dataclass(frozen=True)
@streamable
class NewPeak(Streamable):
header_hash: bytes32
height: uint32
weight: uint128
fork_point_with_previous_peak: uint32
unfinished_reward_block_hash: bytes32
@dataclass(frozen=True)
@streamable
class NewTransaction(Streamable):
transaction_id: bytes32
cost: uint64
fees: uint64
@dataclass(frozen=True)
@streamable
class RequestTransaction(Streamable):
transaction_id: bytes32
@dataclass(frozen=True)
@streamable
class RespondTransaction(Streamable):
transaction: SpendBundle
@dataclass(frozen=True)
@streamable
class RequestProofOfWeight(Streamable):
total_number_of_blocks: uint32
tip: bytes32
@dataclass(frozen=True)
@streamable
class RespondProofOfWeight(Streamable):
wp: WeightProof
tip: bytes32
@dataclass(frozen=True)
@streamable
class RequestBlock(Streamable):
height: uint32
include_transaction_block: bool
@dataclass(frozen=True)
@streamable
class RejectBlock(Streamable):
height: uint32
@dataclass(frozen=True)
@streamable
class RequestBlocks(Streamable):
start_height: uint32
end_height: uint32
include_transaction_block: bool
@dataclass(frozen=True)
@streamable
class RespondBlocks(Streamable):
start_height: uint32
end_height: uint32
blocks: List[FullBlock]
@dataclass(frozen=True)
@streamable
class RejectBlocks(Streamable):
start_height: uint32
end_height: uint32
@dataclass(frozen=True)
@streamable
class RespondBlock(Streamable):
block: FullBlock
@dataclass(frozen=True)
@streamable
class NewUnfinishedBlock(Streamable):
unfinished_reward_hash: bytes32
@dataclass(frozen=True)
@streamable
class RequestUnfinishedBlock(Streamable):
unfinished_reward_hash: bytes32
@dataclass(frozen=True)
@streamable
class RespondUnfinishedBlock(Streamable):
unfinished_block: UnfinishedBlock
@dataclass(frozen=True)
@streamable
class NewSignagePointOrEndOfSubSlot(Streamable):
prev_challenge_hash: Optional[bytes32]
challenge_hash: bytes32
index_from_challenge: uint8
last_rc_infusion: bytes32
@dataclass(frozen=True)
@streamable
class RequestSignagePointOrEndOfSubSlot(Streamable):
challenge_hash: bytes32
index_from_challenge: uint8
last_rc_infusion: bytes32
@dataclass(frozen=True)
@streamable
class RespondSignagePoint(Streamable):
index_from_challenge: uint8
challenge_chain_vdf: VDFInfo
challenge_chain_proof: VDFProof
reward_chain_vdf: VDFInfo
reward_chain_proof: VDFProof
@dataclass(frozen=True)
@streamable
class RespondEndOfSubSlot(Streamable):
end_of_slot_bundle: EndOfSubSlotBundle
@dataclass(frozen=True)
@streamable
class RequestMempoolTransactions(Streamable):
filter: bytes
@dataclass(frozen=True)
@streamable
class NewCompactVDF(Streamable):
height: uint32
header_hash: bytes32
field_vdf: uint8
vdf_info: VDFInfo
@dataclass(frozen=True)
@streamable
class RequestCompactVDF(Streamable):
height: uint32
header_hash: bytes32
field_vdf: uint8
vdf_info: VDFInfo
@dataclass(frozen=True)
@streamable
class RespondCompactVDF(Streamable):
height: uint32
header_hash: bytes32
field_vdf: uint8
vdf_info: VDFInfo
vdf_proof: VDFProof
@dataclass(frozen=True)
@streamable
class RequestPeers(Streamable):
"""
Return full list of peers
"""
@dataclass(frozen=True)
@streamable
class RespondPeers(Streamable):
peer_list: List[TimestampedPeerInfo]
| 21.029412 | 116 | 0.791142 |
4955d6d5e198a490b94b088dfd09ec3fa2e24aca | 3,950 | py | Python | pipng/multiplexer1.py | nwiizo/joke | 808c4c998cc7f5b7f6f3fb5a3ce421588a70c087 | [
"MIT"
] | 1 | 2017-01-11T06:12:24.000Z | 2017-01-11T06:12:24.000Z | pipng/multiplexer1.py | ShuyaMotouchi/joke | 808c4c998cc7f5b7f6f3fb5a3ce421588a70c087 | [
"MIT"
] | null | null | null | pipng/multiplexer1.py | ShuyaMotouchi/joke | 808c4c998cc7f5b7f6f3fb5a3ce421588a70c087 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright © 2012-13 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. It is provided for educational
# purposes and is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import collections
import random
random.seed(917) # Not truly random for ease of regression testing
def main():
totalCounter = Counter()
carCounter = Counter("cars")
commercialCounter = Counter("vans", "trucks")
multiplexer = Multiplexer()
for eventName, callback in (("cars", carCounter),
("vans", commercialCounter), ("trucks", commercialCounter)):
multiplexer.connect(eventName, callback)
multiplexer.connect(eventName, totalCounter)
for event in generate_random_events(100):
multiplexer.send(event)
print("After 100 active events: cars={} vans={} trucks={} total={}"
.format(carCounter.cars, commercialCounter.vans,
commercialCounter.trucks, totalCounter.count))
multiplexer.state = Multiplexer.DORMANT
for event in generate_random_events(100):
multiplexer.send(event)
print("After 100 dormant events: cars={} vans={} trucks={} total={}"
.format(carCounter.cars, commercialCounter.vans,
commercialCounter.trucks, totalCounter.count))
multiplexer.state = Multiplexer.ACTIVE
for event in generate_random_events(100):
multiplexer.send(event)
print("After 100 active events: cars={} vans={} trucks={} total={}"
.format(carCounter.cars, commercialCounter.vans,
commercialCounter.trucks, totalCounter.count))
def generate_random_events(count):
vehicles = (("cars",) * 11) + (("vans",) * 3) + ("trucks",)
for _ in range(count):
yield Event(random.choice(vehicles), random.randint(1, 3))
class Counter:
def __init__(self, *names):
self.anonymous = not bool(names)
if self.anonymous:
self.count = 0
else:
for name in names:
if not name.isidentifier():
raise ValueError("names must be valid identifiers")
setattr(self, name, 0)
def __call__(self, event):
if self.anonymous:
self.count += event.count
else:
count = getattr(self, event.name)
setattr(self, event.name, count + event.count)
class Event:
def __init__(self, name, count=1):
if not name.isidentifier():
raise ValueError("names must be valid identifiers")
self.name = name
self.count = count
class Multiplexer:
ACTIVE, DORMANT = ("ACTIVE", "DORMANT")
def __init__(self):
self.callbacksForEvent = collections.defaultdict(list)
self.state = Multiplexer.ACTIVE
def connect(self, eventName, callback):
if self.state == Multiplexer.ACTIVE:
self.callbacksForEvent[eventName].append(callback)
def disconnect(self, eventName, callback=None):
if self.state == Multiplexer.ACTIVE:
if callback is None:
del self.callbacksForEvent[eventName]
else:
self.callbacksForEvent[eventName].remove(callback)
def send(self, event):
if self.state == Multiplexer.ACTIVE:
for callback in self.callbacksForEvent.get(event.name, ()):
callback(event)
if __name__ == "__main__":
main()
| 33.760684 | 75 | 0.627342 |
0bbc9ea3e9fb55bb32aeb3815a4c69d782252f0e | 2,858 | py | Python | ML/Advanced.py | ronnie7z7z/Autumn-of-Automation-Ronit-Shukla | 3bdafcb512ea40b7caccf9a1b49653cce86f9291 | [
"MIT"
] | null | null | null | ML/Advanced.py | ronnie7z7z/Autumn-of-Automation-Ronit-Shukla | 3bdafcb512ea40b7caccf9a1b49653cce86f9291 | [
"MIT"
] | null | null | null | ML/Advanced.py | ronnie7z7z/Autumn-of-Automation-Ronit-Shukla | 3bdafcb512ea40b7caccf9a1b49653cce86f9291 | [
"MIT"
] | null | null | null | #This code is supposed to be the ideal multi-feature classification model
#But it is incomplete, I'm adding to it at the moment as I gather some useful methods that I can find
import os
import numpy as np
import glob
import shutil
import scipy
import tensorflow as tf
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
#Storing address of folder containing the images
root = os.path.join(os.getcwd())
base_dir = os.path.join(root,'Advanced_Assignment_Dataset')
#Conversion of pgm files to jpg or png
# for filename in os.listdir(base_dir):
# if filename.endswith(".pgm"):
# os.chdir(base_dir)
# name = filename[:-4]
# os.rename(name + '.pgm',name + '.png')
# os.chdir(root)
# continue
# else:
# continue
#Defining different levels of labels
classes1 = ['left', 'right', 'straight', 'up']
classes2 = ['angry','happy','neutral','sad']
classes3 = ['open','sunglasses']
tp = ['train','val']
#Creation and distribution of images in their respective directories
for cl1 in classes1:
for cl2 in classes2:
for cl3 in classes3:
code = base_dir +'/'+ cl1 +'_'+ cl2 +'_'+ cl3
images = glob.glob(code + '_[1-9].pgm') + glob.glob(code + '_[1-9]?.pgm') #The duplicates are eliminated here itself! hehe!
num_train = int(round(len(images)*0.75))
train, val = images[:num_train], images[num_train:]
for t in train:
if not os.path.exists(os.path.join(base_dir, 'train', cl1, cl2, cl3)):
os.makedirs(os.path.join(base_dir, 'train', cl1, cl2, cl3))
shutil.move(t, os.path.join(base_dir, 'train', cl1, cl2, cl3))
for v in val:
if not os.path.exists(os.path.join(base_dir, 'val', cl1, cl2, cl3)):
os.makedirs(os.path.join(base_dir, 'val', cl1, cl2, cl3))
shutil.move(v, os.path.join(base_dir, 'val', cl1, cl2, cl3))
train_dir = os.path.join(base_dir, 'train')
val_dir = os.path.join(base_dir, 'val')
batchsize = 100
ImgShape = 100
image_gen_train = ImageDataGenerator( #No need of different orientations/zoom or such as people are almost in the same position and dist from the camera
rescale=1./255
)
train_data_gen = image_gen_train.flow_from_directory(
batch_size=batchsize,
directory=train_dir,
shuffle=True,
target_size=(ImgShape,ImgShape),
class_mode='sparse'
)
image_gen_val = ImageDataGenerator(rescale=1./255)
val_data_gen = image_gen_val.flow_from_directory(batch_size=batchsize,
directory=val_dir,
target_size=(ImgShape, ImgShape),
class_mode='sparse'
)
| 33.232558 | 152 | 0.6676 |
a8c01ae5bfee7bf364de9465066d8bc4cf92479e | 1,397 | py | Python | context_click_example.py | itsvinayak/selenium-python-examples | 4de79f6189c2aaf1ec27d7f17675c4342c2a95cc | [
"MIT"
] | null | null | null | context_click_example.py | itsvinayak/selenium-python-examples | 4de79f6189c2aaf1ec27d7f17675c4342c2a95cc | [
"MIT"
] | null | null | null | context_click_example.py | itsvinayak/selenium-python-examples | 4de79f6189c2aaf1ec27d7f17675c4342c2a95cc | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver import ActionChains
## constants
username = "<YOUR_LAMBDATEST_USERNAME>"
access_key = "<YOUR__LAMBDATEST_ACCESS_KEY>"
## your desired capabilities from lambdatest
desired_caps = {
"build": "Build_Name",
"name": "Test_Name",
"platform": "Windows 10",
"browserName": "Chrome",
"version": "92.0",
"selenium_version": "3.13.0",
"geoLocation": "IN",
"chrome.driver": "91.0",
}
"""
Setup remote driver
-------
username and access_key can be found on the lt platform
"""
driver = webdriver.Remote(
command_executor="https://{}:{}@hub.lambdatest.com/wd/hub".format(
username, access_key
),
desired_capabilities=desired_caps,
)
## to maximize the browser window
driver.maximize_window()
## opening webpage
driver.get("https://www.lambdatest.com/")
## selecting element
email_input_field = driver.find_element_by_xpath(
"//body[1]/div[1]/div[1]/section[1]/div[1]/div[1]/div[1]/div[1]/form[1]/input[1]"
)
## creating a Action chain object
action = ActionChains(driver)
## calling function to right click on email form
action.context_click(email_input_field)
## perform the action
action.perform()
## to save screenshot for verification
## this screenshort can be found in you local storage
driver.save_screenshot("screenshot.png")
# to close the browser
driver.close()
| 24.086207 | 85 | 0.702935 |
6515a6c2d89327ba89e03f24a0c3cea2a0b8b6a3 | 9,763 | py | Python | CIM100/IEC61970/Informative/InfWork/DesignLocation.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
] | null | null | null | CIM100/IEC61970/Informative/InfWork/DesignLocation.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
] | null | null | null | CIM100/IEC61970/Informative/InfWork/DesignLocation.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
] | null | null | null | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM100.IEC61970.Base.Core.IdentifiedObject import IdentifiedObject
class DesignLocation(IdentifiedObject):
"""A logical part of the design (e.g., pole and all equipment on a pole). This includes points and spans.A logical part of the design (e.g., pole and all equipment on a pole). This includes points and spans.
"""
def __init__(self, spanLength=0.0, status=None, MaterialItems=None, Designs=None, DesignLocationCUs=None, WorkLocations=None, Diagrams=None, MiscCostItems=None, ErpBomItemDatas=None, ConditionFactors=None, *args, **kw_args):
"""Initialises a new 'DesignLocation' instance.
@param spanLength: The legth of the span from the previous pole to this pole.
@param status:
@param MaterialItems:
@param Designs:
@param DesignLocationCUs:
@param WorkLocations:
@param Diagrams:
@param MiscCostItems:
@param ErpBomItemDatas:
@param ConditionFactors:
"""
#: The legth of the span from the previous pole to this pole.
self.spanLength = spanLength
self.status = status
self._MaterialItems = []
self.MaterialItems = [] if MaterialItems is None else MaterialItems
self._Designs = []
self.Designs = [] if Designs is None else Designs
self._DesignLocationCUs = []
self.DesignLocationCUs = [] if DesignLocationCUs is None else DesignLocationCUs
self._WorkLocations = []
self.WorkLocations = [] if WorkLocations is None else WorkLocations
self._Diagrams = []
self.Diagrams = [] if Diagrams is None else Diagrams
self._MiscCostItems = []
self.MiscCostItems = [] if MiscCostItems is None else MiscCostItems
self._ErpBomItemDatas = []
self.ErpBomItemDatas = [] if ErpBomItemDatas is None else ErpBomItemDatas
self._ConditionFactors = []
self.ConditionFactors = [] if ConditionFactors is None else ConditionFactors
super(DesignLocation, self).__init__(*args, **kw_args)
_attrs = ["spanLength"]
_attr_types = {"spanLength": float}
_defaults = {"spanLength": 0.0}
_enums = {}
_refs = ["status", "MaterialItems", "Designs", "DesignLocationCUs", "WorkLocations", "Diagrams", "MiscCostItems", "ErpBomItemDatas", "ConditionFactors"]
_many_refs = ["MaterialItems", "Designs", "DesignLocationCUs", "WorkLocations", "Diagrams", "MiscCostItems", "ErpBomItemDatas", "ConditionFactors"]
status = None
def getMaterialItems(self):
return self._MaterialItems
def setMaterialItems(self, value):
for x in self._MaterialItems:
x.DesignLocation = None
for y in value:
y._DesignLocation = self
self._MaterialItems = value
MaterialItems = property(getMaterialItems, setMaterialItems)
def addMaterialItems(self, *MaterialItems):
for obj in MaterialItems:
obj.DesignLocation = self
def removeMaterialItems(self, *MaterialItems):
for obj in MaterialItems:
obj.DesignLocation = None
def getDesigns(self):
return self._Designs
def setDesigns(self, value):
for p in self._Designs:
filtered = [q for q in p.DesignLocations if q != self]
self._Designs._DesignLocations = filtered
for r in value:
if self not in r._DesignLocations:
r._DesignLocations.append(self)
self._Designs = value
Designs = property(getDesigns, setDesigns)
def addDesigns(self, *Designs):
for obj in Designs:
if self not in obj._DesignLocations:
obj._DesignLocations.append(self)
self._Designs.append(obj)
def removeDesigns(self, *Designs):
for obj in Designs:
if self in obj._DesignLocations:
obj._DesignLocations.remove(self)
self._Designs.remove(obj)
def getDesignLocationCUs(self):
return self._DesignLocationCUs
def setDesignLocationCUs(self, value):
for x in self._DesignLocationCUs:
x.DesignLocation = None
for y in value:
y._DesignLocation = self
self._DesignLocationCUs = value
DesignLocationCUs = property(getDesignLocationCUs, setDesignLocationCUs)
def addDesignLocationCUs(self, *DesignLocationCUs):
for obj in DesignLocationCUs:
obj.DesignLocation = self
def removeDesignLocationCUs(self, *DesignLocationCUs):
for obj in DesignLocationCUs:
obj.DesignLocation = None
def getWorkLocations(self):
return self._WorkLocations
def setWorkLocations(self, value):
for p in self._WorkLocations:
filtered = [q for q in p.DesignLocations if q != self]
self._WorkLocations._DesignLocations = filtered
for r in value:
if self not in r._DesignLocations:
r._DesignLocations.append(self)
self._WorkLocations = value
WorkLocations = property(getWorkLocations, setWorkLocations)
def addWorkLocations(self, *WorkLocations):
for obj in WorkLocations:
if self not in obj._DesignLocations:
obj._DesignLocations.append(self)
self._WorkLocations.append(obj)
def removeWorkLocations(self, *WorkLocations):
for obj in WorkLocations:
if self in obj._DesignLocations:
obj._DesignLocations.remove(self)
self._WorkLocations.remove(obj)
def getDiagrams(self):
return self._Diagrams
def setDiagrams(self, value):
for p in self._Diagrams:
filtered = [q for q in p.DesignLocations if q != self]
self._Diagrams._DesignLocations = filtered
for r in value:
if self not in r._DesignLocations:
r._DesignLocations.append(self)
self._Diagrams = value
Diagrams = property(getDiagrams, setDiagrams)
def addDiagrams(self, *Diagrams):
for obj in Diagrams:
if self not in obj._DesignLocations:
obj._DesignLocations.append(self)
self._Diagrams.append(obj)
def removeDiagrams(self, *Diagrams):
for obj in Diagrams:
if self in obj._DesignLocations:
obj._DesignLocations.remove(self)
self._Diagrams.remove(obj)
def getMiscCostItems(self):
return self._MiscCostItems
def setMiscCostItems(self, value):
for x in self._MiscCostItems:
x.DesignLocation = None
for y in value:
y._DesignLocation = self
self._MiscCostItems = value
MiscCostItems = property(getMiscCostItems, setMiscCostItems)
def addMiscCostItems(self, *MiscCostItems):
for obj in MiscCostItems:
obj.DesignLocation = self
def removeMiscCostItems(self, *MiscCostItems):
for obj in MiscCostItems:
obj.DesignLocation = None
def getErpBomItemDatas(self):
return self._ErpBomItemDatas
def setErpBomItemDatas(self, value):
for x in self._ErpBomItemDatas:
x.DesignLocation = None
for y in value:
y._DesignLocation = self
self._ErpBomItemDatas = value
ErpBomItemDatas = property(getErpBomItemDatas, setErpBomItemDatas)
def addErpBomItemDatas(self, *ErpBomItemDatas):
for obj in ErpBomItemDatas:
obj.DesignLocation = self
def removeErpBomItemDatas(self, *ErpBomItemDatas):
for obj in ErpBomItemDatas:
obj.DesignLocation = None
def getConditionFactors(self):
return self._ConditionFactors
def setConditionFactors(self, value):
for p in self._ConditionFactors:
filtered = [q for q in p.DesignLocations if q != self]
self._ConditionFactors._DesignLocations = filtered
for r in value:
if self not in r._DesignLocations:
r._DesignLocations.append(self)
self._ConditionFactors = value
ConditionFactors = property(getConditionFactors, setConditionFactors)
def addConditionFactors(self, *ConditionFactors):
for obj in ConditionFactors:
if self not in obj._DesignLocations:
obj._DesignLocations.append(self)
self._ConditionFactors.append(obj)
def removeConditionFactors(self, *ConditionFactors):
for obj in ConditionFactors:
if self in obj._DesignLocations:
obj._DesignLocations.remove(self)
self._ConditionFactors.remove(obj)
| 35.761905 | 228 | 0.662092 |
401e4c30ee328ec730070fe3e48cfd834cd79c10 | 71,578 | py | Python | my_env/Lib/site-packages/sklearn/linear_model/_least_angle.py | obulrdy6881/Drowsinss | 61cb9281d7dd22aee282b517e2fbf500f0ff9935 | [
"MIT"
] | null | null | null | my_env/Lib/site-packages/sklearn/linear_model/_least_angle.py | obulrdy6881/Drowsinss | 61cb9281d7dd22aee282b517e2fbf500f0ff9935 | [
"MIT"
] | 7 | 2021-06-08T21:46:24.000Z | 2022-03-12T00:35:31.000Z | my_env/Lib/site-packages/sklearn/linear_model/_least_angle.py | obulrdy6881/Drowsinss | 61cb9281d7dd22aee282b517e2fbf500f0ff9935 | [
"MIT"
] | null | null | null | """
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
# Author: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from joblib import Parallel, delayed
from ._base import LinearModel
from ..base import RegressorMixin, MultiOutputMixin
# mypy error: Module 'sklearn.utils' has no attribute 'arrayfuncs'
from ..utils import arrayfuncs, as_float_array # type: ignore
from ..utils import check_random_state
from ..model_selection import check_cv
from ..exceptions import ConvergenceWarning
from ..utils.validation import _deprecate_positional_args
SOLVE_TRIANGULAR_ARGS = {'check_finite': False}
@_deprecate_positional_args
def lars_path(X, y, Xy=None, *, Gram=None, max_iter=500, alpha_min=0,
method='lar', copy_X=True, eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False, positive=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
X : None or array-like of shape (n_samples, n_features)
Input data. Note that if X is None then the Gram matrix must be
specified, i.e., cannot be None or False.
y : None or array-like of shape (n_samples,)
Input targets.
Xy : array-like of shape (n_samples,) or (n_samples, n_targets), \
default=None
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
Gram : None, 'auto', array-like of shape (n_features, n_features), \
default=None
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
max_iter : int, default=500
Maximum number of iterations to perform, set to infinity for no limit.
alpha_min : float, default=0
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, default='lar'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
copy_X : bool, default=True
If ``False``, ``X`` is overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. By default, ``np.finfo(np.float).eps`` is used.
copy_Gram : bool, default=True
If ``False``, ``Gram`` is overwritten.
verbose : int, default=0
Controls output verbosity.
return_path : bool, default=True
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, default=False
Whether to return the number of iterations.
positive : bool, default=False
Restrict coefficients to be >= 0.
This option is only allowed with method 'lasso'. Note that the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha. Only coefficients up to the smallest alpha
value (``alphas_[alphas_ > 0.].min()`` when fit_path=True) reached by
the stepwise Lars-Lasso algorithm are typically in congruence with the
solution of the coordinate descent lasso_path function.
Returns
-------
alphas : array-like of shape (n_alphas + 1,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array-like of shape (n_alphas,)
Indices of active variables at the end of the path.
coefs : array-like of shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lars_path_gram
lasso_path
lasso_path_gram
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Efron et al.
http://statweb.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<https://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<https://en.wikipedia.org/wiki/Lasso_(statistics)>`_
"""
if X is None and Gram is not None:
raise ValueError(
'X cannot be None if Gram is not None'
'Use lars_path_gram to avoid passing X and y.'
)
return _lars_path_solver(
X=X, y=y, Xy=Xy, Gram=Gram, n_samples=None, max_iter=max_iter,
alpha_min=alpha_min, method=method, copy_X=copy_X,
eps=eps, copy_Gram=copy_Gram, verbose=verbose, return_path=return_path,
return_n_iter=return_n_iter, positive=positive)
@_deprecate_positional_args
def lars_path_gram(Xy, Gram, *, n_samples, max_iter=500, alpha_min=0,
method='lar', copy_X=True, eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False, positive=False):
"""lars_path in the sufficient stats mode [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
Xy : array-like of shape (n_samples,) or (n_samples, n_targets)
Xy = np.dot(X.T, y).
Gram : array-like of shape (n_features, n_features)
Gram = np.dot(X.T * X).
n_samples : int or float
Equivalent size of sample.
max_iter : int, default=500
Maximum number of iterations to perform, set to infinity for no limit.
alpha_min : float, default=0
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, default='lar'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
copy_X : bool, default=True
If ``False``, ``X`` is overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. By default, ``np.finfo(np.float).eps`` is used.
copy_Gram : bool, default=True
If ``False``, ``Gram`` is overwritten.
verbose : int, default=0
Controls output verbosity.
return_path : bool, default=True
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, default=False
Whether to return the number of iterations.
positive : bool, default=False
Restrict coefficients to be >= 0.
This option is only allowed with method 'lasso'. Note that the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha. Only coefficients up to the smallest alpha
value (``alphas_[alphas_ > 0.].min()`` when fit_path=True) reached by
the stepwise Lars-Lasso algorithm are typically in congruence with the
solution of the coordinate descent lasso_path function.
Returns
-------
alphas : array-like of shape (n_alphas + 1,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array-like of shape (n_alphas,)
Indices of active variables at the end of the path.
coefs : array-like of shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lars_path
lasso_path
lasso_path_gram
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Efron et al.
http://statweb.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<https://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<https://en.wikipedia.org/wiki/Lasso_(statistics)>`_
"""
return _lars_path_solver(
X=None, y=None, Xy=Xy, Gram=Gram, n_samples=n_samples,
max_iter=max_iter, alpha_min=alpha_min, method=method,
copy_X=copy_X, eps=eps, copy_Gram=copy_Gram,
verbose=verbose, return_path=return_path,
return_n_iter=return_n_iter, positive=positive)
def _lars_path_solver(X, y, Xy=None, Gram=None, n_samples=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps, copy_Gram=True, verbose=0,
return_path=True, return_n_iter=False, positive=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
X : None or ndarray, of shape (n_samples, n_features)
Input data. Note that if X is None then Gram must be specified,
i.e., cannot be None or False.
y : None or ndarray, of shape (n_samples,)
Input targets.
Xy : array-like of shape (n_samples,) or (n_samples, n_targets), \
default=None
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
Gram : None, 'auto' or array-like of shape (n_features, n_features), \
default=None
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
n_samples : int or float, default=None
Equivalent size of sample.
max_iter : int, default=500
Maximum number of iterations to perform, set to infinity for no limit.
alpha_min : float, default=0
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, default='lar'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
copy_X : bool, default=True
If ``False``, ``X`` is overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. By default, ``np.finfo(np.float).eps`` is used
copy_Gram : bool, default=True
If ``False``, ``Gram`` is overwritten.
verbose : int, default=0
Controls output verbosity.
return_path : bool, default=True
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, default=False
Whether to return the number of iterations.
positive : bool, default=False
Restrict coefficients to be >= 0.
This option is only allowed with method 'lasso'. Note that the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha. Only coefficients up to the smallest alpha
value (``alphas_[alphas_ > 0.].min()`` when fit_path=True) reached by
the stepwise Lars-Lasso algorithm are typically in congruence with the
solution of the coordinate descent lasso_path function.
Returns
-------
alphas : array-like of shape (n_alphas + 1,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array-like of shape (n_alphas,)
Indices of active variables at the end of the path.
coefs : array-like of shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Efron et al.
http://statweb.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<https://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<https://en.wikipedia.org/wiki/Lasso_(statistics)>`_
"""
if method == 'lar' and positive:
raise ValueError(
"Positive constraint not supported for 'lar' "
"coding method."
)
n_samples = n_samples if n_samples is not None else y.size
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if Gram is None or Gram is False:
Gram = None
if X is None:
raise ValueError('X and Gram cannot both be unspecified.')
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif isinstance(Gram, str) and Gram == 'auto' or Gram is True:
if Gram is True or X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
else:
Gram = None
elif copy_Gram:
Gram = Gram.copy()
if Gram is None:
n_features = X.shape[1]
else:
n_features = Cov.shape[0]
if Gram.shape != (n_features, n_features):
raise ValueError('The shapes of the inputs Gram and Xy'
' do not match.')
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
if Gram is None:
L = np.empty((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
else:
L = np.empty((max_features, max_features), dtype=Gram.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (Cov,))
solve_cholesky, = get_lapack_funcs(('potrs',), (L,))
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
if Gram is not None:
Gram_copy = Gram.copy()
Cov_copy = Cov.copy()
while True:
if Cov.size:
if positive:
C_idx = np.argmax(Cov)
else:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
if positive:
C = C_
else:
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
if positive:
sign_active[n_active] = np.ones_like(C_)
else:
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**SOLVE_TRIANGULAR_ARGS)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by
# the test suite. The `equality_tolerance` margin added in 0.16
# to get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e.'
' Reduce max_iter or increase eps parameters.'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, _ = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, _ = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny32))
if positive:
gamma_ = min(g1, C / AA)
else:
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny32))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
coefs[-add_features:] = 0
alphas = np.resize(alphas, n_iter + add_features)
alphas[-add_features:] = 0
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
for ii in idx:
arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii)
n_active -= 1
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
temp = Cov_copy[drop_idx] - np.dot(Gram_copy[drop_idx], coef)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(MultiOutputMixin, RegressorMixin, LinearModel):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
verbose : bool or int, default=False
Sets the verbosity amount
normalize : bool, default=True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : bool, 'auto' or array-like , default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
n_nonzero_coefs : int, default=500
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
By default, ``np.finfo(np.float).eps`` is used.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
fit_path : bool, default=True
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
jitter : float, default=None
Upper bound on a uniform noise parameter to be added to the
`y` values, to satisfy the model's assumption of
one-at-a-time computations. Might help with stability.
random_state : int, RandomState instance or None (default)
Determines random number generation for jittering. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`. Ignored if `jitter` is None.
Attributes
----------
alphas_ : array-like of shape (n_alphas + 1,) | list of n_targets such \
arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array-like of shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array-like of shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float or array-like of shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> reg = linear_model.Lars(n_nonzero_coefs=1)
>>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
Lars(n_nonzero_coefs=1)
>>> print(reg.coef_)
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
method = 'lar'
positive = False
@_deprecate_positional_args
def __init__(self, *, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
jitter=None, random_state=None):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
self.jitter = jitter
self.random_state = random_state
@staticmethod
def _get_gram(precompute, X, y):
if (not hasattr(precompute, '__array__')) and (
(precompute is True) or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
precompute = np.dot(X.T, X)
return precompute
def _fit(self, X, y, max_iter, alpha, fit_path, Xy=None):
"""Auxiliary method to fit the model using X, y as training data"""
n_features = X.shape[1]
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
Gram = self._get_gram(self.precompute, X, y)
self.alphas_ = []
self.n_iter_ = []
self.coef_ = np.empty((n_targets, n_features))
if fit_path:
self.active_ = []
self.coef_path_ = []
for k in range(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True, positive=self.positive)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_[k] = coef_path[:, -1]
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
for k in range(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True,
positive=self.positive)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_offset, y_offset, X_scale)
return self
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like of shape (n_samples,) or (n_samples, n_targets), \
default=None
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
Returns
-------
self : object
returns an instance of self.
"""
X, y = self._validate_data(X, y, y_numeric=True, multi_output=True)
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
if self.jitter is not None:
rng = check_random_state(self.random_state)
noise = rng.uniform(high=self.jitter, size=len(y))
y = y + noise
self._fit(X, y, max_iter=max_iter, alpha=alpha, fit_path=self.fit_path,
Xy=Xy)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float, default=1.0
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
verbose : bool or int, default=False
Sets the verbosity amount
normalize : bool, default=True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : bool, 'auto' or array-like, default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, default=500
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
By default, ``np.finfo(np.float).eps`` is used.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
fit_path : bool, default=True
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
positive : bool, default=False
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients will not converge
to the ordinary-least-squares solution for small values of alpha.
Only coefficients up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
jitter : float, default=None
Upper bound on a uniform noise parameter to be added to the
`y` values, to satisfy the model's assumption of
one-at-a-time computations. Might help with stability.
random_state : int, RandomState instance or None (default)
Determines random number generation for jittering. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`. Ignored if `jitter` is None.
Attributes
----------
alphas_ : array-like of shape (n_alphas + 1,) | list of n_targets such \
arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array-like of shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array-like of shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float or array-like of shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> reg = linear_model.LassoLars(alpha=0.01)
>>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
LassoLars(alpha=0.01)
>>> print(reg.coef_)
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
LassoLarsIC
sklearn.decomposition.sparse_encode
"""
method = 'lasso'
@_deprecate_positional_args
def __init__(self, alpha=1.0, *, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False, jitter=None, random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.positive = positive
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
self.jitter = jitter
self.random_state = random_state
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps, positive=False):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array-like of shape (n_samples, n_features)
The data to fit the LARS on
y_train : array-like of shape (n_samples,)
The target variable to fit LARS on
X_test : array-like of shape (n_samples, n_features)
The data to compute the residues on
y_test : array-like of shape (n_samples,)
The target variable to compute the residues on
Gram : None, 'auto' or array-like of shape (n_features, n_features), \
default=None
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : bool, default=True
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : {'lar' , 'lasso'}, default='lar'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : bool or int, default=False
Sets the amount of verbosity
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
positive : bool, default=False
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
See reservations for using this option in combination with method
'lasso' for expected small values of alpha in the doc of LassoLarsCV
and LassoLarsIC.
normalize : bool, default=True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
max_iter : int, default=500
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
By default, ``np.finfo(np.float).eps`` is used
Returns
--------
alphas : array-like of shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array-like of shape (n_features, n_alphas)
Coefficients along the path
residues : array-like of shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps,
positive=positive)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model.
See glossary entry for :term:`cross-validation estimator`.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
verbose : bool or int, default=False
Sets the verbosity amount
max_iter : int, default=500
Maximum number of iterations to perform.
normalize : bool, default=True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : bool, 'auto' or array-like , default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram matrix
cannot be passed as argument since we will use only subsets of X.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
max_n_alphas : int, default=1000
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : int or None, default=None
Number of CPUs to use during the cross validation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. By default, ``np.finfo(np.float).eps`` is used.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array-like of shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array-like of shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array-like of shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array-like of shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
mse_path_ : array-like of shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Examples
--------
>>> from sklearn.linear_model import LarsCV
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_samples=200, noise=4.0, random_state=0)
>>> reg = LarsCV(cv=5).fit(X, y)
>>> reg.score(X, y)
0.9996...
>>> reg.alpha_
0.0254...
>>> reg.predict(X[:1,])
array([154.0842...])
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
@_deprecate_positional_args
def __init__(self, *, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=None, eps=np.finfo(np.float).eps,
copy_X=True):
self.max_iter = max_iter
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
super().__init__(fit_intercept=fit_intercept,
verbose=verbose, normalize=normalize,
precompute=precompute,
n_nonzero_coefs=500,
eps=eps, copy_X=copy_X, fit_path=True)
def _more_tags(self):
return {'multioutput': False}
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = self._validate_data(X, y, y_numeric=True)
X = as_float_array(X, copy=self.copy_X)
y = as_float_array(y, copy=self.copy_X)
# init cross-validation generator
cv = check_cv(self.cv, classifier=False)
# As we use cross-validation, the Gram matrix is not precomputed here
Gram = self.precompute
if hasattr(Gram, '__array__'):
warnings.warn('Parameter "precompute" cannot be an array in '
'%s. Automatically switch to "auto" instead.'
% self.__class__.__name__)
Gram = 'auto'
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps, positive=self.positive)
for train, test in cv.split(X, y))
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, _, _, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
self._fit(X, y, max_iter=self.max_iter, alpha=best_alpha,
Xy=None, fit_path=True)
return self
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm.
See glossary entry for :term:`cross-validation estimator`.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
verbose : bool or int, default=False
Sets the verbosity amount
max_iter : int, default=500
Maximum number of iterations to perform.
normalize : bool, default=True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : bool or 'auto' , default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram matrix
cannot be passed as argument since we will use only subsets of X.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
max_n_alphas : int, default=1000
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : int or None, default=None
Number of CPUs to use during the cross validation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. By default, ``np.finfo(np.float).eps`` is used.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
positive : bool, default=False
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coefficients up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsCV only makes sense for problems where
a sparse solution is expected and/or reached.
Attributes
----------
coef_ : array-like of shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array-like of shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array-like of shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array-like of shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
mse_path_ : array-like of shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Examples
--------
>>> from sklearn.linear_model import LassoLarsCV
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(noise=4.0, random_state=0)
>>> reg = LassoLarsCV(cv=5).fit(X, y)
>>> reg.score(X, y)
0.9992...
>>> reg.alpha_
0.0484...
>>> reg.predict(X[:1,])
array([-77.8723...])
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
@_deprecate_positional_args
def __init__(self, *, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=None, eps=np.finfo(np.float).eps,
copy_X=True, positive=False):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.max_iter = max_iter
self.normalize = normalize
self.precompute = precompute
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
self.copy_X = copy_X
self.positive = positive
# XXX : we don't use super().__init__
# to avoid setting n_nonzero_coefs
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : {'bic' , 'aic'}, default='aic'
The type of criterion to use.
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
verbose : bool or int, default=False
Sets the verbosity amount
normalize : bool, default=True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : bool, 'auto' or array-like, default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, default=500
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
By default, ``np.finfo(np.float).eps`` is used
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
positive : bool, default=False
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coefficients up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsIC only makes sense for problems where
a sparse solution is expected and/or reached.
Attributes
----------
coef_ : array-like of shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array-like of shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criterion is
chosen. This value is larger by a factor of ``n_samples`` compared to
Eqns. 2.15 and 2.16 in (Zou et al, 2007).
Examples
--------
>>> from sklearn import linear_model
>>> reg = linear_model.LassoLarsIC(criterion='bic')
>>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
LassoLarsIC(criterion='bic')
>>> print(reg.coef_)
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
https://en.wikipedia.org/wiki/Akaike_information_criterion
https://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
@_deprecate_positional_args
def __init__(self, criterion='aic', *, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, positive=False):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
self.fit_path = True
def _more_tags(self):
return {'multioutput': False}
def fit(self, X, y, copy_X=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
training data.
y : array-like of shape (n_samples,)
target values. Will be cast to X's dtype if necessary
copy_X : bool, default=None
If provided, this parameter will override the choice
of copy_X made at instance creation.
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
if copy_X is None:
copy_X = self.copy_X
X, y = self._validate_data(X, y, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._preprocess_data(
X, y, self.fit_intercept, self.normalize, copy_X)
max_iter = self.max_iter
Gram = self.precompute
alphas_, _, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True, positive=self.positive)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
sigma2 = np.var(y)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
eps64 = np.finfo('float64').eps
self.criterion_ = (n_samples * mean_squared_error / (sigma2 + eps64) +
K * df) # Eqns. 2.15--16 in (Zou et al, 2007)
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
| 38.943417 | 80 | 0.587094 |
8ac454af2babda9488bcb6870a79ab3b2192d03e | 1,121 | py | Python | plant/models/models.py | gyzi/devops | fe342ab1b25fca6005b1e4537e43488b544cffbb | [
"MIT"
] | null | null | null | plant/models/models.py | gyzi/devops | fe342ab1b25fca6005b1e4537e43488b544cffbb | [
"MIT"
] | 1 | 2020-06-18T15:22:46.000Z | 2020-06-18T15:22:46.000Z | plant/models/models.py | gyzi/devops | fe342ab1b25fca6005b1e4537e43488b544cffbb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from odoo import models, fields, api
# class custom/plant(models.Model):
# _name = 'custom/plant.custom/plant'
# name = fields.Char()
# value = fields.Integer()
# value2 = fields.Float(compute="_value_pc", store=True)
# description = fields.Text()
#
# @api.depends('value')
# def _value_pc(self):
# self.value2 = float(self.value) / 100
class Plants(models.Model):
_name='nurse.plant'
name=fields.Char("Plant Name",required=True)
price=fields.Float("Price")
customer= fields.Many2one('res.company')
email=fields.Char("Customer Email")
# result=fields.Float(compute="_calc_sum",store=True)
# @api.depends('price')
# def _calc_sum(self):
# self.result= self.price * 100
class Customer(models.Model):
_name='nurse.customer'
name=fields.Char("Customer Name",required=True)
email=fields.Char(help="To receive newsletter")
company=fields.Many2one('res.company', string="company clients")
# class VendorsClass(models.Model):
# _name = 'vendor.class'
# _inhert = 'res.company'
# vendor_x=fields.Char()
| 26.069767 | 68 | 0.661909 |
9772e62d54359e6d53dfc45e87800765c95ea4ed | 5,068 | py | Python | docs/source/conf.py | jmilhone/fabry_perot | cd3cb7a1dbcaa3c9382f9f2dbd3407d95447b3ce | [
"MIT"
] | 1 | 2020-03-29T20:39:31.000Z | 2020-03-29T20:39:31.000Z | docs/source/conf.py | jmilhone/fabry_perot | cd3cb7a1dbcaa3c9382f9f2dbd3407d95447b3ce | [
"MIT"
] | null | null | null | docs/source/conf.py | jmilhone/fabry_perot | cd3cb7a1dbcaa3c9382f9f2dbd3407d95447b3ce | [
"MIT"
] | 2 | 2020-04-16T15:05:23.000Z | 2020-12-05T18:19:10.000Z | # -*- coding: utf-8 -*-
#
# Fabry Perot documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 31 09:58:29 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
sys.path.append(os.path.abspath('_themes'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.imgmath',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Fabry Perot'
copyright = u'2018, Jason Milhone'
author = u'Jason Milhone'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1.0'
# The full version, including alpha/beta/rc tags.
release = u'0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
#html_theme = 'pydoctheme'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_theme_path = ['_themes']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'FabryPerotdoc'
#html_theme_options = {'collapsiblesidebar': True,
# 'sidebarbtncolor': '#eeeeee',
# }
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'FabryPerot.tex', u'Fabry Perot Documentation',
u'Jason Milhone', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'fabryperot', u'Fabry Perot Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'FabryPerot', u'Fabry Perot Documentation',
author, 'FabryPerot', 'One line description of project.',
'Miscellaneous'),
]
| 30.715152 | 79 | 0.677782 |
f7749bb45c49d48f9492334a3797e7526d6554b5 | 208 | py | Python | python/testData/codeInsight/controlflow/While.py | 06needhamt/intellij-community | 63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b | [
"Apache-2.0"
] | null | null | null | python/testData/codeInsight/controlflow/While.py | 06needhamt/intellij-community | 63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b | [
"Apache-2.0"
] | null | null | null | python/testData/codeInsight/controlflow/While.py | 06needhamt/intellij-community | 63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b | [
"Apache-2.0"
] | null | null | null | while c1:
x = 1
else:
y = 1
while c2:
a = 1
break
b = 1
else:
c = 1
while c3:
m = 1
if m:
break
n = 1
else:
o = 1
while c4:
m = 1
if m:
break | 8.666667 | 13 | 0.379808 |
c25b23c52e33c173f6d1774eadf2c435397996c5 | 783 | py | Python | hackerrank/python/ordered_dict.py | wesinalves/codeskill | 270a55ef0f9cef5d4e947e71d0d648761c21a065 | [
"Apache-2.0"
] | null | null | null | hackerrank/python/ordered_dict.py | wesinalves/codeskill | 270a55ef0f9cef5d4e947e71d0d648761c21a065 | [
"Apache-2.0"
] | null | null | null | hackerrank/python/ordered_dict.py | wesinalves/codeskill | 270a55ef0f9cef5d4e947e71d0d648761c21a065 | [
"Apache-2.0"
] | null | null | null | '''
Code for HackerRank
Python Language Proficiency
Wesin Ribeiro
###################################
An OrderedDict is a dictionary that remembers the order of the keys that were inserted first.
If a new entry overwrites an existing entry, the original insertion position is left unchanged.
'''
from collections import OrderedDict
def main():
'''Main Function'''
N = int(input())
ordered_items = OrderedDict()
for _ in range(N):
line = input().split(' ')
item_name = ' '.join(line[0:-1])
price = int(line[-1])
if not ordered_items.get(item_name):
ordered_items[item_name] = price
else:
ordered_items[item_name] += price
for key, value in ordered_items.items():
print(key, value)
main() | 27.964286 | 95 | 0.623244 |
92287bafd95e5d55b8e1d66351ecff14ae23e054 | 3,512 | py | Python | models/shufflenet.py | camilaodsouza/Masters | bbf043222ff77fce760f2be42321307f308e5451 | [
"BSD-2-Clause"
] | null | null | null | models/shufflenet.py | camilaodsouza/Masters | bbf043222ff77fce760f2be42321307f308e5451 | [
"BSD-2-Clause"
] | null | null | null | models/shufflenet.py | camilaodsouza/Masters | bbf043222ff77fce760f2be42321307f308e5451 | [
"BSD-2-Clause"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['ShuffleNet', 'shufflenetg2', 'shufflenetg3']
class ShuffleBlock(nn.Module):
def __init__(self, groups):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
"""Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]"""
N, C, H, W = x.size()
g = self.groups
return x.view(N, g, C/g, H, W).permute(0, 2, 1, 3, 4).contiguous().view(N, C, H, W)
class Bottleneck(nn.Module):
def __init__(self, in_planes, out_planes, stride, groups):
super(Bottleneck, self).__init__()
self.stride = stride
# mid_planes = out_planes/4
mid_planes = out_planes // 4
g = 1 if in_planes == 24 else groups
self.conv1 = nn.Conv2d(in_planes, mid_planes, kernel_size=1, groups=g, bias=False)
self.bn1 = nn.BatchNorm2d(mid_planes)
self.shuffle1 = ShuffleBlock(groups=g)
self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride,
padding=1, groups=mid_planes, bias=False)
self.bn2 = nn.BatchNorm2d(mid_planes)
self.conv3 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, groups=groups, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 2:
self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.shuffle1(out)
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
res = self.shortcut(x)
out = F.relu(torch.cat([out, res], 1)) if self.stride == 2 else F.relu(out+res)
return out
class ShuffleNet(nn.Module):
def __init__(self, cfg, num_classes=10):
super(ShuffleNet, self).__init__()
out_planes = cfg['out_planes']
num_blocks = cfg['num_blocks']
groups = cfg['groups']
self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(24)
self.in_planes = 24
self.layer1 = self._make_layer(out_planes[0], num_blocks[0], groups)
self.layer2 = self._make_layer(out_planes[1], num_blocks[1], groups)
self.layer3 = self._make_layer(out_planes[2], num_blocks[2], groups)
self.linear = nn.Linear(out_planes[2], num_classes)
def _make_layer(self, out_planes, num_blocks, groups):
layers = []
for i in range(num_blocks):
stride = 2 if i == 0 else 1
cat_planes = self.in_planes if i == 0 else 0
layers.append(Bottleneck(self.in_planes, out_planes-cat_planes, stride=stride, groups=groups))
self.in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def shufflenetg2(**kwargs):
cfg = {
'out_planes': [200, 400, 800],
'num_blocks': [4, 8, 4],
'groups': 2
}
return ShuffleNet(cfg, **kwargs)
def shufflenetg3(**kwargs):
cfg = {
'out_planes': [240, 480, 960],
'num_blocks': [4, 8, 4],
'groups': 3
}
return ShuffleNet(cfg, **kwargs)
| 34.431373 | 106 | 0.594248 |
dea3d9539044ba01c374ac9fd60ce59fd88d97f2 | 2,874 | py | Python | portfolio/migrations/0001_initial.py | GoWebyCMS/portfolio | 1ed5c20f6fe280388ff0876ca6a5b5129cf6b3f2 | [
"MIT"
] | 1 | 2016-12-07T15:31:22.000Z | 2016-12-07T15:31:22.000Z | portfolio/migrations/0001_initial.py | GoWebyCMS/portfolio | 1ed5c20f6fe280388ff0876ca6a5b5129cf6b3f2 | [
"MIT"
] | 26 | 2016-12-12T10:05:49.000Z | 2017-01-31T15:25:10.000Z | portfolio/migrations/0001_initial.py | GoWebyCMS/portfolio | 1ed5c20f6fe280388ff0876ca6a5b5129cf6b3f2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-01-17 10:34
from __future__ import unicode_literals
import ckeditor.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('slug', models.SlugField(unique=True)),
('url', models.URLField(blank=True, null=True)),
('short_description', models.TextField(blank=True, null=True)),
('description', ckeditor.fields.RichTextField()),
('job', models.CharField(blank=True, max_length=200, null=True)),
('end_date', models.DateField(blank=True, null=True)),
('client', models.CharField(blank=True, max_length=200, null=True)),
('featured_image', models.ImageField(blank=True, null=True, upload_to='portfolio')),
('status', models.CharField(choices=[('draft', 'Draft'), ('completed', 'Completed')], default='draft', max_length=10)),
],
options={
'ordering': ['end_date'],
},
),
migrations.CreateModel(
name='ProjectCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300, unique=True)),
('slug', models.SlugField(unique=True)),
],
options={
'verbose_name_plural': 'categories',
'verbose_name': 'category',
},
),
migrations.CreateModel(
name='Skill',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300, unique=True)),
('slug', models.SlugField(unique=True)),
],
options={
'verbose_name_plural': 'Skills',
'verbose_name': 'Skill',
},
),
migrations.AddField(
model_name='project',
name='categories',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='project_categories', to='portfolio.ProjectCategory'),
),
migrations.AddField(
model_name='project',
name='skills',
field=models.ManyToManyField(related_name='project_skills', to='portfolio.Skill'),
),
]
| 39.916667 | 171 | 0.559151 |