code
stringlengths 10
805k
| def_use_chains
sequencelengths 0
667
|
---|---|
# Import the Twython class
from twython import Twython
import json
import os
import pandas as pd
from tqdm import tqdm
try:
os.remove('twitter_dataset.csv')
except OSError:
pass
def main():
old_df = pd.read_csv('data/twitter_dataset_2.csv', lineterminator='\n')
#first load the dictonary with the top used english words
with open('improved_dict.txt') as d:
word_list = d.read()
words = word_list.split('\n')
# Dictonary structure with the fields that we are interested in acquire from the tweets
dict_ = {'user': [],
'text': [],
'hashtags': [],
'mentions': []
}
# Instantiate an object
python_tweets = Twython('9Tz9FnZ1PR9AcEvudwC7hqOod', #API Key
'Z7upFmGJZE3oAfcb2ZUmRdEeBJJkkYTQ86PuB3iKgWqXFdMFNo') #API Secret
#each query has a target word
queries = []
for w in words:
query = {'q': w, #the query word
'result_type': 'recent',
'count': 100, #100 tweets, which is the maximum limit admitted by Twitter
'lang': 'en', #we are interested only in english tweets
}
queries.append(query)
#perform the queries to get the tweet and map the JSON in our dictonary
for q in tqdm(queries[:50]):
for status in python_tweets.search(**q)['statuses']:
dict_['user'].append(status['user']['screen_name']) #username
dict_['text'].append(status['text']) #content of the tweet
#this is necessary cuz the hashtags may be null or there can be more than one
#this can easily be done with this magical regular expression
ht = [d['text'] for d in status['entities']['hashtags'] if 'text' in d] #list of hashtags
dict_['hashtags'].append(ht)
#same thing for the mentions
ment = [d['screen_name'] for d in status['entities']['user_mentions'] if 'screen_name' in d] #list of mentions
dict_['mentions'].append(ment)
# Structure data in a pandas DataFrame for easier manipulation
df = pd.DataFrame(dict_)
df = df.append(old_df)
df.to_csv('data/twitter_dataset_2.csv', index=False, encoding='utf-8')
if __name__ == '__main__':
main()
from time import sleep
while True:
sleep(1200)
main()
| [
[
[
47,
54
],
[
709,
716
]
],
[
[
62,
66
]
],
[
[
74,
76
],
[
129,
131
]
],
[
[
84,
96
],
[
213,
215
],
[
2123,
2125
]
],
[
[
114,
118
],
[
1304,
1308
]
],
[
[
192,
196
],
[
2278,
2282
],
[
2356,
2360
]
],
[
[
2306,
2311
],
[
2336,
2341
]
]
] |
"""
grdfilter - Filter a grid in the space (or time) domain.
"""
from pygmt.clib import Session
from pygmt.helpers import (
GMTTempFile,
build_arg_string,
fmt_docstring,
kwargs_to_strings,
use_alias,
)
from pygmt.io import load_dataarray
@fmt_docstring
@use_alias(
D="distance",
F="filter",
G="outgrid",
I="spacing",
N="nans",
R="region",
T="toggle",
V="verbose",
f="coltypes",
r="registration",
)
@kwargs_to_strings(I="sequence", R="sequence")
def grdfilter(grid, **kwargs):
r"""
Filter a grid in the space (or time) domain.
Filter a grid file in the time domain using one of the selected convolution
or non-convolution isotropic or rectangular filters and compute distances
using Cartesian or Spherical geometries. The output grid file can
optionally be generated as a sub-region of the input (via ``region``)
and/or with new increment (via ``spacing``) or registration
(via ``toggle``). In this way, one may have "extra space" in the input
data so that the edges will not be used and the output can be within one
half-width of the input edges. If the filter is low-pass, then the output
may be less frequently sampled than the input.
Full option list at :gmt-docs:`grdfilter.html`
{aliases}
Parameters
----------
grid : str or xarray.DataArray
The file name of the input grid or the grid loaded as a DataArray.
outgrid : str or None
The name of the output netCDF file with extension .nc to store the grid
in.
filter : str
**b**\|\ **c**\|\ **g**\|\ **o**\|\ **m**\|\ **p**\|\ **h**\ *xwidth*\
[/*width2*\][*modifiers*].
Name of filter type you which to apply, followed by the width:
b: Box Car
c: Cosine Arch
g: Gaussian
o: Operator
m: Median
p: Maximum Likelihood probability
h: histogram
distance : str
Distance *flag* tells how grid (x,y) relates to filter width as
follows:
p: grid (px,py) with *width* an odd number of pixels; Cartesian
distances.
0: grid (x,y) same units as *width*, Cartesian distances.
1: grid (x,y) in degrees, *width* in kilometers, Cartesian distances.
2: grid (x,y) in degrees, *width* in km, dx scaled by cos(middle y),
Cartesian distances.
The above options are fastest because they allow weight matrix to be
computed only once. The next three options are slower because they
recompute weights for each latitude.
3: grid (x,y) in degrees, *width* in km, dx scaled by cosine(y),
Cartesian distance calculation.
4: grid (x,y) in degrees, *width* in km, Spherical distance
calculation.
5: grid (x,y) in Mercator ``projection='m1'`` img units, *width* in km,
Spherical distance calculation.
{I}
nans : str or float
**i**\|\ **p**\|\ **r**.
Determine how NaN-values in the input grid affects the filtered output.
{R}
toggle : bool
Toggle the node registration for the output grid so as to become the
opposite of the input grid. [Default gives the same registration as the
input grid].
{V}
{f}
{r}
Returns
-------
ret: xarray.DataArray or None
Return type depends on whether the ``outgrid`` parameter is set:
- :class:`xarray.DataArray` if ``outgrid`` is not set
- None if ``outgrid`` is set (grid output will be stored in file set by
``outgrid``)
Example
-------
>>> import os
>>> import pygmt
>>> # Apply a filter of 600km (full width) to the @earth_relief_30m file
>>> # and return a filtered field (saved as netcdf)
>>> pygmt.grdfilter(
... grid="@earth_relief_30m",
... filter="m600",
... distance="4",
... region=[150, 250, 10, 40],
... spacing=0.5,
... outgrid="filtered_pacific.nc",
... )
>>> os.remove("filtered_pacific.nc") # cleanup file
>>> # Apply a gaussian smoothing filter of 600 km in the input data array,
>>> # and returns a filtered data array with the smoothed field.
>>> grid = pygmt.datasets.load_earth_relief()
>>> smooth_field = pygmt.grdfilter(grid=grid, filter="g600", distance="4")
"""
with GMTTempFile(suffix=".nc") as tmpfile:
with Session() as lib:
file_context = lib.virtualfile_from_data(check_kind="raster", data=grid)
with file_context as infile:
if (outgrid := kwargs.get("G")) is None:
kwargs["G"] = outgrid = tmpfile.name # output to tmpfile
lib.call_module("grdfilter", build_arg_string(kwargs, infile=infile))
return load_dataarray(outgrid) if outgrid == tmpfile.name else None
| [
[
[
89,
96
],
[
4425,
4432
]
],
[
[
129,
140
],
[
4374,
4385
]
],
[
[
146,
162
],
[
4749,
4765
]
],
[
[
168,
181
],
[
262,
275
]
],
[
[
187,
204
],
[
462,
479
]
],
[
[
210,
219
],
[
277,
286
]
],
[
[
244,
258
],
[
4806,
4820
]
],
[
[
512,
521
]
]
] |
"""Data structure of RSS and useful functions.
"""
#
# Copyright (c) 2005-2020 shinGETsu Project.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
import html
import re
import cgi
from .template import Template
class Item:
"""One item."""
title = ""
link = ""
description = ""
date = 0 # Seconds from 1970-01-01T00:00
def __init__(self, link="", title="", date=0, creator='', subject=None,
description="", content=""):
"""Constructor."""
del_eos = re.compile(r'[\r\n]*')
self.link = link
self.date = date
self.creator = creator
if subject:
self.subject = subject
else:
self.subject = []
self.title = del_eos.sub('', title)
self.description = del_eos.sub('', description)
self.content = content
class RSS(dict):
"""RSS.
It is the dictionary which key is URI.
"""
encode = "utf-8"
lang = "en"
title = ""
parent = "" # Place where is documents or RSS
link = "" # URI of main page
uri = "" # URI of RSS
description = ""
def __init__(self, encode="utf-8", lang="en", title="",
parent="", link="", uri="", description="", xsl=""):
"""Constructor."""
self.encode = encode
self.lang = lang
self.title = title
self.description = description
self.parent = parent
self.xsl = xsl
if parent and parent[-1] != "/":
parent += "/"
self.parent += "/"
if link != "":
self.link = link
else:
self.link = parent
if uri != "":
self.uri = uri
else:
self.uri = parent + "rss.xml"
def append(self, link,
title = "",
date = 0,
creator = '',
subject = None,
description = "",
content = "",
abs = False):
"""Add an item."""
if not abs:
link = self.parent + link
item = Item(link,
title = title,
date = date,
creator = creator,
subject = subject,
description = description,
content = content)
self[link] = item
def keys(self):
"""List of links sorted by date."""
links = list(dict.keys(self))
links.sort(key=lambda x: self[x].date, reverse=True)
return links
def __iter__(self):
return iter(list(self.keys()))
def make_rss1(rss):
'''Generate RSS 1.0.
'''
def w3cdate(date):
from time import strftime, gmtime
return strftime('%Y-%m-%dT%H:%M:%SZ', gmtime(date))
var = {
'rss': rss,
'feed': [rss[uri] for uri in rss],
'w3cdate': w3cdate,
'escape': html.escape,
}
return Template().display('rss1', var)
| [
[
[
1399,
1403
],
[
4189,
4193
]
],
[
[
1411,
1413
],
[
1767,
1769
]
],
[
[
1421,
1424
]
],
[
[
1448,
1456
],
[
4219,
4227
]
],
[
[
1465,
1469
],
[
3370,
3374
]
],
[
[
2110,
2113
]
],
[
[
3894,
3903
]
]
] |
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from .kern import Kern
import numpy as np
from ...core.parameterization import Param
from paramz.transformations import Logexp
from paramz.caching import Cache_this
class Static(Kern):
def __init__(self, input_dim, variance, active_dims, name):
super(Static, self).__init__(input_dim, active_dims, name)
self.variance = Param('variance', variance, Logexp())
self.link_parameters(self.variance)
def _to_dict(self):
input_dict = super(Static, self)._to_dict()
input_dict["variance"] = self.variance.values.tolist()
return input_dict
def Kdiag(self, X):
ret = np.empty((X.shape[0],), dtype=np.float64)
ret[:] = self.variance
return ret
def gradients_X(self, dL_dK, X, X2=None):
return np.zeros(X.shape)
def gradients_X_diag(self, dL_dKdiag, X):
return np.zeros(X.shape)
def gradients_XX(self, dL_dK, X, X2=None):
if X2 is None:
X2 = X
return np.zeros((X.shape[0], X2.shape[0], X.shape[1], X.shape[1]), dtype=np.float64)
def gradients_XX_diag(self, dL_dKdiag, X, cov=False):
return np.zeros((X.shape[0], X.shape[1], X.shape[1]), dtype=np.float64)
def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return np.zeros(Z.shape)
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return np.zeros(variational_posterior.shape), np.zeros(variational_posterior.shape)
def psi0(self, Z, variational_posterior):
return self.Kdiag(variational_posterior.mean)
def psi1(self, Z, variational_posterior):
return self.K(variational_posterior.mean, Z)
def psi2(self, Z, variational_posterior):
K = self.K(variational_posterior.mean, Z)
return np.einsum('ij,ik->jk',K,K) #K[:,:,None]*K[:,None,:] # NB. more efficient implementations on inherriting classes
def input_sensitivity(self, summarize=True):
if summarize:
return super(Static, self).input_sensitivity(summarize=summarize)
else:
return np.ones(self.input_dim) * self.variance
class White(Static):
def __init__(self, input_dim, variance=1., active_dims=None, name='white'):
super(White, self).__init__(input_dim, variance, active_dims, name)
def K(self, X, X2=None):
if X2 is None:
return np.eye(X.shape[0])*self.variance
else:
return np.zeros((X.shape[0], X2.shape[0]))
def psi2(self, Z, variational_posterior):
return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64)
def psi2n(self, Z, variational_posterior):
return np.zeros((1, Z.shape[0], Z.shape[0]), dtype=np.float64)
def update_gradients_full(self, dL_dK, X, X2=None):
if X2 is None:
self.variance.gradient = np.trace(dL_dK)
else:
self.variance.gradient = 0.
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = dL_dKdiag.sum()
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
self.variance.gradient = dL_dpsi0.sum()
class WhiteHeteroscedastic(Static):
def __init__(self, input_dim, num_data, variance=1., active_dims=None, name='white_hetero'):
"""
A heteroscedastic White kernel (nugget/noise).
It defines one variance (nugget) per input sample.
Prediction excludes any noise learnt by this Kernel, so be careful using this kernel.
You can plot the errors learnt by this kernel by something similar as:
plt.errorbar(m.X, m.Y, yerr=2*np.sqrt(m.kern.white.variance))
"""
super(Static, self).__init__(input_dim, active_dims, name)
self.variance = Param('variance', np.ones(num_data) * variance, Logexp())
self.link_parameters(self.variance)
def Kdiag(self, X):
if X.shape[0] == self.variance.shape[0]:
# If the input has the same number of samples as
# the number of variances, we return the variances
return self.variance
return 0.
def K(self, X, X2=None):
if X2 is None and X.shape[0] == self.variance.shape[0]:
return np.eye(X.shape[0]) * self.variance
else:
return 0.
def psi2(self, Z, variational_posterior):
return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64)
def psi2n(self, Z, variational_posterior):
return np.zeros((1, Z.shape[0], Z.shape[0]), dtype=np.float64)
def update_gradients_full(self, dL_dK, X, X2=None):
if X2 is None:
self.variance.gradient = np.diagonal(dL_dK)
else:
self.variance.gradient = 0.
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = dL_dKdiag
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
self.variance.gradient = dL_dpsi0
class Bias(Static):
def __init__(self, input_dim, variance=1., active_dims=None, name='bias'):
super(Bias, self).__init__(input_dim, variance, active_dims, name)
def to_dict(self):
input_dict = super(Bias, self)._to_dict()
input_dict["class"] = "GPy.kern.Bias"
return input_dict
@staticmethod
def _from_dict(kernel_class, input_dict):
useGPU = input_dict.pop('useGPU', None)
return Bias(**input_dict)
def K(self, X, X2=None):
shape = (X.shape[0], X.shape[0] if X2 is None else X2.shape[0])
return np.full(shape, self.variance, dtype=np.float64)
def update_gradients_full(self, dL_dK, X, X2=None):
self.variance.gradient = dL_dK.sum()
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = dL_dKdiag.sum()
def psi2(self, Z, variational_posterior):
return np.full((Z.shape[0], Z.shape[0]), self.variance*self.variance*variational_posterior.shape[0], dtype=np.float64)
def psi2n(self, Z, variational_posterior):
ret = np.empty((variational_posterior.mean.shape[0], Z.shape[0], Z.shape[0]), dtype=np.float64)
ret[:] = self.variance*self.variance
return ret
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
if dL_dpsi2.ndim == 2:
self.variance.gradient = (dL_dpsi0.sum() + dL_dpsi1.sum()
+ 2.*self.variance*dL_dpsi2.sum()*variational_posterior.shape[0])
else:
self.variance.gradient = (dL_dpsi0.sum() + dL_dpsi1.sum()
+ 2.*self.variance*dL_dpsi2.sum())
class Fixed(Static):
def __init__(self, input_dim, covariance_matrix, variance=1., active_dims=None, name='fixed'):
"""
:param input_dim: the number of input dimensions
:type input_dim: int
:param variance: the variance of the kernel
:type variance: float
"""
super(Fixed, self).__init__(input_dim, variance, active_dims, name)
self.fixed_K = covariance_matrix
def K(self, X, X2):
if X2 is None:
return self.variance * self.fixed_K
else:
return np.zeros((X.shape[0], X2.shape[0]))
def Kdiag(self, X):
return self.variance * self.fixed_K.diagonal()
def update_gradients_full(self, dL_dK, X, X2=None):
if X2 is None:
self.variance.gradient = np.einsum('ij,ij', dL_dK, self.fixed_K)
else:
self.variance.gradient = 0
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = np.einsum('i,i', dL_dKdiag, np.diagonal(self.fixed_K))
def psi2(self, Z, variational_posterior):
return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64)
def psi2n(self, Z, variational_posterior):
return np.zeros((1, Z.shape[0], Z.shape[0]), dtype=np.float64)
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
self.variance.gradient = dL_dpsi0.sum()
class Precomputed(Fixed):
def __init__(self, input_dim, covariance_matrix, variance=1., active_dims=None, name='precomputed'):
"""
Class for precomputed kernels, indexed by columns in X
Usage example:
import numpy as np
from GPy.models import GPClassification
from GPy.kern import Precomputed
from sklearn.cross_validation import LeaveOneOut
n = 10
d = 100
X = np.arange(n).reshape((n,1)) # column vector of indices
y = 2*np.random.binomial(1,0.5,(n,1))-1
X0 = np.random.randn(n,d)
k = np.dot(X0,X0.T)
kern = Precomputed(1,k) # k is a n x n covariance matrix
cv = LeaveOneOut(n)
ypred = y.copy()
for train, test in cv:
m = GPClassification(X[train], y[train], kernel=kern)
m.optimize()
ypred[test] = 2*(m.predict(X[test])[0]>0.5)-1
:param input_dim: the number of input dimensions
:type input_dim: int
:param variance: the variance of the kernel
:type variance: float
"""
assert input_dim==1, "Precomputed only implemented in one dimension. Use multiple Precomputed kernels to have more dimensions by making use of active_dims"
super(Precomputed, self).__init__(input_dim, covariance_matrix, variance, active_dims, name)
@Cache_this(limit=2)
def _index(self, X, X2):
if X2 is None:
i1 = i2 = X.astype('int').flat
else:
i1, i2 = X.astype('int').flat, X2.astype('int').flat
return self.fixed_K[i1,:][:,i2]
def K(self, X, X2=None):
return self.variance * self._index(X, X2)
def Kdiag(self, X):
return self.variance * self._index(X,None).diagonal()
def update_gradients_full(self, dL_dK, X, X2=None):
self.variance.gradient = np.einsum('ij,ij', dL_dK, self._index(X, X2))
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = np.einsum('i,ii', dL_dKdiag, self._index(X, None))
| [
[
[
133,
137
],
[
294,
298
]
],
[
[
145,
156
],
[
744,
746
],
[
774,
776
],
[
898,
900
],
[
978,
980
],
[
1101,
1103
],
[
1167,
1169
],
[
1253,
1255
],
[
1306,
1308
],
[
1430,
1432
],
[
1561,
1563
],
[
1600,
1602
],
[
1951,
1953
],
[
2246,
2248
],
[
2536,
2538
],
[
2602,
2604
],
[
2700,
2702
],
[
2741,
2743
],
[
2816,
2818
],
[
2860,
2862
],
[
2989,
2991
],
[
3936,
3938
],
[
4382,
4384
],
[
4515,
4517
],
[
4556,
4558
],
[
4631,
4633
],
[
4675,
4677
],
[
4804,
4806
],
[
5701,
5703
],
[
5737,
5739
],
[
6014,
6016
],
[
6114,
6116
],
[
6188,
6190
],
[
6266,
6268
],
[
7360,
7362
],
[
7593,
7595
],
[
7771,
7773
],
[
7799,
7801
],
[
7888,
7890
],
[
7929,
7931
],
[
8004,
8006
],
[
8048,
8050
],
[
10090,
10092
],
[
10221,
10223
]
],
[
[
194,
199
],
[
456,
461
],
[
3918,
3923
]
],
[
[
235,
241
],
[
484,
490
],
[
3966,
3972
]
],
[
[
269,
279
],
[
9599,
9609
]
],
[
[
287,
293
],
[
2299,
2305
],
[
3338,
3344
],
[
5128,
5134
],
[
6815,
6821
],
[
379,
385
],
[
590,
596
],
[
2160,
2166
],
[
3841,
3847
]
],
[
[
2293,
2298
],
[
2402,
2407
]
],
[
[
3317,
3337
]
],
[
[
5123,
5127
],
[
5230,
5234
],
[
5342,
5346
],
[
5565,
5569
]
],
[
[
6809,
6814
],
[
8229,
8234
],
[
7129,
7134
]
],
[
[
8217,
8228
],
[
9506,
9517
]
]
] |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="slacksdk",
version="0.0.1a",
author="Thanakrit Juthamongkhon",
author_email="thanakrit.ju.work@gmail.com",
description="A minimal slack sdk",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/thanakritju/python-slack-events-sdk",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
) | [
[
[
7,
17
],
[
88,
98
],
[
443,
453
]
],
[
[
50,
52
],
[
77,
79
]
],
[
[
58,
74
],
[
295,
311
]
]
] |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 Grzegorz Jacenków.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Training and evaluation pipeline for the networks."""
import csv
import os
import tensorflow as tf
from tensorflow.keras.metrics import Mean
from inside import config
from inside.callbacks import setup_callbacks
from inside.constructor import setup_comet_ml, setup_model
from inside.loaders import CLEVR
from inside.metrics import DiceScore
def _write_results(logs):
"""Write final logs to a CSV file."""
w = csv.writer(open(os.path.join(
config.EXPERIMENT_FOLDER, "results.csv"), "w"))
for key, val in logs.items():
w.writerow([key, val])
class Pipeline:
def __init__(self):
# Model.
self.model = setup_model()
# Comet.ml experiment.
self.comet_ml = setup_comet_ml()
# Testing metrics.
self.test_dice = DiceScore(name="testing_dice")
self.test_loss = Mean(name="testing_loss")
# Training metrics.
self.training_dice = DiceScore(name="training_dice")
self.training_loss = Mean(name="training_loss")
# Callbacks.
self.cl, self.es, self.mc, self.pp = setup_callbacks()
self.cl.model, self.es.model, self.mc.model = \
self.model, self.model, self.model
self.pp.model = self.model
self.pp.comet_ml = self.comet_ml
def fit(self):
"""Train the model."""
# Toy dataset.
loader = CLEVR()
train_ds, valid_ds, test_ds = loader.load()
with self.comet_ml.train():
self.cl.on_train_begin()
self.es.on_train_begin()
self.mc.on_train_begin()
self.pp.on_train_begin()
for epoch in range(config.EXPERIMENT_EPOCHS):
self.comet_ml.set_epoch(epoch)
for images, labels in train_ds:
self.train_step(images, labels)
for batch, (images, labels) in enumerate(valid_ds):
self.test_step(images, labels)
if not batch: # Log only first mini-batch from an epoch.
self.pp.on_epoch_end(epoch, images, labels)
# Get results.
logs = {
"dice": self.training_dice.result().numpy(),
"loss": self.training_loss.result().numpy(),
"validation_dice": self.test_dice.result().numpy(),
"validation_loss": self.test_loss.result().numpy(),
}
template = ("Epoch {}. Training Loss: {}. Training Dice: {}. "
"Validation Loss: {}. Validation Dice: {}.")
print(template.format(epoch + 1,
logs['loss'],
logs['dice'],
logs['validation_loss'],
logs['validation_dice']))
# Log metrics.
self.comet_ml.log_metrics(logs, epoch=epoch)
self.cl.on_epoch_end(epoch, logs)
self.es.on_epoch_end(epoch, logs)
self.mc.on_epoch_end(epoch, logs)
# Reset the metrics for the next epoch.
self.training_dice.reset_states()
self.training_loss.reset_states()
self.test_dice.reset_states()
self.test_loss.reset_states()
# Early stopping criterion.
if self.es.model.stop_training:
self.cl.on_train_end()
self.es.on_train_end()
self.mc.on_train_end()
break
with self.comet_ml.test():
for batch, (images, labels) in enumerate(test_ds):
self.test_step(images, labels)
if not batch:
self.pp.on_test_end(images, labels)
# Get results.
logs = {
"dice": self.test_dice.result().numpy(),
"loss": self.test_loss.result().numpy(),
}
print("Test Loss: {}. Test Dice: {}.".format(
logs['loss'], logs['dice']))
# Log metrics.
self.comet_ml.log_metrics(logs)
_write_results(logs)
@tf.function
def train_step(self, images, labels):
with tf.GradientTape() as tape:
predictions = self.model.inference(images)
loss = self.model.loss(labels, predictions)
gradients = tape.gradient(loss, self.model.trainable_variables)
self.model.optimiser.apply_gradients(
zip(gradients, self.model.trainable_variables))
self.training_loss(loss)
self.training_dice(labels, predictions)
@tf.function
def test_step(self, images, labels):
predictions = self.model.inference(images)
t_loss = self.model.loss(labels, predictions)
self.test_loss(t_loss)
self.test_dice(labels, predictions)
| [
[
[
671,
674
],
[
1031,
1034
]
],
[
[
682,
684
],
[
1047,
1049
]
],
[
[
693,
709
],
[
4853,
4855
],
[
5325,
5327
],
[
4920,
4922
]
],
[
[
747,
751
],
[
1458,
1462
],
[
1603,
1607
]
],
[
[
772,
778
],
[
1069,
1075
],
[
2263,
2269
]
],
[
[
808,
823
],
[
1697,
1712
]
],
[
[
855,
869
],
[
1332,
1346
]
],
[
[
871,
882
],
[
1262,
1273
]
],
[
[
910,
915
],
[
1986,
1991
]
],
[
[
943,
952
],
[
1402,
1411
],
[
1542,
1551
]
],
[
[
959,
973
],
[
4826,
4840
]
],
[
[
1190,
1198
]
]
] |
from openmmtools import testsystems
from simtk.openmm.app import *
import simtk.unit as unit
import logging
import numpy as np
from openmmtools.constants import kB
from openmmtools import respa, utils
logger = logging.getLogger(__name__)
# Energy unit used by OpenMM unit system
from openmmtools import states, integrators
import time
import numpy as np
import sys
import os
def get_rotation_matrix():
""" Randomly rotate the point clouds to augument the dataset
rotation is per shape based along up direction
Input:
Nx3 array, original point clouds
Return:
Nx3 array, rotated point clouds
"""
angles = np.random.uniform(-1.0, 1.0, size=(3,)) * np.pi
print(f'Using angle: {angles}')
Rx = np.array([[1., 0, 0],
[0, np.cos(angles[0]), -np.sin(angles[0])],
[0, np.sin(angles[0]), np.cos(angles[0])]], dtype=np.float32)
Ry = np.array([[np.cos(angles[1]), 0, np.sin(angles[1])],
[0, 1, 0],
[-np.sin(angles[1]), 0, np.cos(angles[1])]], dtype=np.float32)
Rz = np.array([[np.cos(angles[2]), -np.sin(angles[2]), 0],
[np.sin(angles[2]), np.cos(angles[2]), 0],
[0, 0, 1]], dtype=np.float32)
rotation_matrix = np.matmul(Rz, np.matmul(Ry, Rx))
return rotation_matrix
def center_positions(pos):
offset = np.mean(pos, axis=0)
return pos - offset, offset
BOX_SCALE = 2
DT = 2
for seed in range(10):
print(f'Running seed: {seed}')
waterbox = testsystems.WaterBox(
box_edge=2 * unit.nanometers,
model='tip4pew')
[topology, system, positions] = [waterbox.topology, waterbox.system, waterbox.positions]
R = get_rotation_matrix()
positions = positions.value_in_unit(unit.angstrom)
positions, off = center_positions(positions)
positions = np.matmul(positions, R)
positions += off
positions += np.random.randn(positions.shape[0], positions.shape[1]) * 0.005
positions *= unit.angstrom
p_num = positions.shape[0] // 3
timestep = DT * unit.femtoseconds
temperature = 300 * unit.kelvin
chain_length = 10
friction = 1. / unit.picosecond
num_mts = 5
num_yoshidasuzuki = 5
integrator = integrators.NoseHooverChainVelocityVerletIntegrator(system,
temperature,
friction,
timestep, chain_length, num_mts, num_yoshidasuzuki)
simulation = Simulation(topology, system, integrator)
simulation.context.setPositions(positions)
simulation.context.setVelocitiesToTemperature(temperature)
simulation.minimizeEnergy(tolerance=1*unit.kilojoule/unit.mole)
simulation.step(1)
os.makedirs(f'./water_data_tip4p/', exist_ok=True)
dataReporter_gt = StateDataReporter(f'./log_nvt_tip4p_{seed}.txt', 50, totalSteps=50000,
step=True, time=True, speed=True, progress=True, elapsedTime=True, remainingTime=True,
potentialEnergy=True, kineticEnergy=True, totalEnergy=True, temperature=True,
separator='\t')
simulation.reporters.append(dataReporter_gt)
for t in range(1000):
if (t+1)%100 == 0:
print(f'Finished {(t+1)*50} steps')
state = simulation.context.getState(getPositions=True,
getVelocities=True,
getForces=True,
enforcePeriodicBox=True)
pos = state.getPositions(asNumpy=True).value_in_unit(unit.angstrom)
vel = state.getVelocities(asNumpy=True).value_in_unit(unit.meter / unit.second)
force = state.getForces(asNumpy=True).value_in_unit(unit.kilojoules_per_mole/unit.nanometer)
np.savez(f'./water_data_tip4p/data_{seed}_{t}.npz',
pos=pos,
vel=vel,
forces=force)
simulation.step(50)
| [
[
[
24,
35
],
[
1563,
1574
]
],
[
[
65,
66
],
[
2641,
2651
],
[
2962,
2979
]
],
[
[
74,
92
],
[
1606,
1610
],
[
1812,
1816
],
[
2035,
2039
],
[
2106,
2110
],
[
2148,
2152
],
[
2202,
2206
],
[
2835,
2839
],
[
2850,
2854
],
[
3737,
3741
],
[
3814,
3818
],
[
3827,
3831
],
[
3900,
3904
],
[
3925,
3929
]
],
[
[
101,
108
],
[
214,
221
]
],
[
[
117,
128
]
],
[
[
164,
166
]
],
[
[
191,
196
]
],
[
[
198,
203
]
],
[
[
205,
211
]
],
[
[
308,
314
]
],
[
[
316,
327
],
[
2278,
2289
]
],
[
[
335,
339
]
],
[
[
347,
358
],
[
1892,
1894
],
[
1954,
1956
],
[
3950,
3952
],
[
666,
668
],
[
708,
710
],
[
759,
761
],
[
808,
810
],
[
828,
830
],
[
875,
877
],
[
894,
896
],
[
921,
923
],
[
942,
944
],
[
953,
955
],
[
975,
977
],
[
1054,
1056
],
[
1076,
1078
],
[
1103,
1105
],
[
1124,
1126
],
[
1135,
1137
],
[
1155,
1157
],
[
1198,
1200
],
[
1217,
1219
],
[
1277,
1279
],
[
1311,
1313
],
[
1325,
1327
],
[
1413,
1415
]
],
[
[
366,
369
]
],
[
[
377,
379
],
[
2889,
2891
]
],
[
[
386,
405
],
[
1750,
1769
]
],
[
[
1377,
1393
],
[
1848,
1864
]
],
[
[
1468,
1477
]
],
[
[
1482,
1484
],
[
2101,
2103
]
],
[
[
1493,
1497
],
[
1539,
1543
],
[
2999,
3003
],
[
3986,
3990
]
],
[
[
1552,
1560
],
[
1685,
1693
],
[
1704,
1712
],
[
1721,
1729
]
],
[
[
1653,
1661
],
[
2652,
2660
]
],
[
[
1663,
1669
],
[
2330,
2336
],
[
2662,
2668
]
],
[
[
1671,
1680
],
[
1788,
1797
]
],
[
[
1746,
1747
],
[
1913,
1914
]
],
[
[
1776,
1785
],
[
1865,
1874
]
],
[
[
1831,
1840
],
[
1902,
1911
]
],
[
[
1842,
1845
],
[
1933,
1936
]
],
[
[
1880,
1889
],
[
1920,
1929
]
],
[
[
2054,
2059
]
],
[
[
2090,
2098
],
[
2571,
2579
]
],
[
[
2128,
2139
],
[
2408,
2419
],
[
2779,
2790
]
],
[
[
2164,
2176
],
[
2581,
2593
]
],
[
[
2186,
2194
],
[
2491,
2499
]
],
[
[
2222,
2229
],
[
2595,
2602
]
],
[
[
2238,
2255
],
[
2604,
2621
]
],
[
[
2265,
2275
],
[
2670,
2680
]
],
[
[
2628,
2638
],
[
2686,
2696
],
[
2733,
2743
],
[
2797,
2807
],
[
2865,
2875
],
[
3271,
3281
],
[
3433,
3443
],
[
4093,
4103
]
],
[
[
2944,
2959
],
[
3299,
3314
]
],
[
[
3324,
3325
],
[
3354,
3355
],
[
3400,
3401
],
[
3993,
3994
]
],
[
[
3425,
3430
],
[
3690,
3695
],
[
3766,
3771
],
[
3856,
3861
]
],
[
[
3684,
3687
],
[
4023,
4026
]
],
[
[
3760,
3763
],
[
4049,
4052
]
],
[
[
3848,
3853
],
[
4078,
4083
]
]
] |
import os
import hashlib
def _update_sha256(filename, sha256):
"""
Updates a SHA-256 algorithm with the filename and the contents of a file.
"""
block_size = 64 * 1024 # 64 KB
with open(filename, 'rb') as input_file:
while True:
data = input_file.read(block_size)
if not data:
break
sha256.update(data)
sha256.update(filename.encode("utf-8"))
return sha256
def hash_tree(root):
"""
Returns a cryptographically secure hash for a whole directory tree taking into account the names and the content of
the files.
"""
file_list = []
for root_directory, directories, files in os.walk(root):
for file in files:
file_list.append(os.path.join(root_directory, file))
sorted_file_list = sorted(file_list)
sha256 = hashlib.sha256()
for file in sorted_file_list:
_update_sha256(file, sha256)
return sha256.hexdigest()
| [
[
[
7,
9
],
[
687,
689
],
[
758,
760
]
],
[
[
17,
24
],
[
848,
855
]
],
[
[
31,
45
],
[
907,
921
]
],
[
[
454,
463
]
]
] |
def destructure(obj, *params):
import operator
return operator.itemgetter(*params)(obj)
def greet(**kwargs):
year, day, puzzle = destructure(kwargs, 'year', 'day', 'puzzle')
print('Advent of Code')
print(f'-> {year}-{day}-{puzzle}')
print('--------------')
def load_data(filename):
with filename.open('r') as handle:
return handle.read()
def start(fn):
import pathlib
base_path = pathlib.Path(__file__).parent.parent / 'data'
def wrapped(*args, **kwargs):
greet(**kwargs)
data = load_data(base_path / f'{kwargs["year"]}.{kwargs["day"]}.txt')
return fn(data, *args, **kwargs)
return wrapped
def flatten_json(nested_json):
out = {}
def flatten(x, name=''):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '_')
elif type(x) is list:
for i, a in enumerate(x):
flatten(a, name + str(i) + '_')
else:
out[name[:-1]] = x
flatten(nested_json)
return out
def sparse_matrix():
from collections import defaultdict
return defaultdict(lambda: 0)
| [
[
[
4,
15
],
[
142,
153
]
],
[
[
101,
106
],
[
515,
520
]
],
[
[
287,
296
],
[
546,
555
]
],
[
[
381,
386
]
],
[
[
674,
686
]
],
[
[
1048,
1061
]
]
] |
# Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
# external
from mixbox import fields
import cybox.common
from cybox.common.tools import ToolInformationList
# internal
import stix
import stix.bindings.stix_common as stix_common_binding
# relative
from .vocabs import VocabField
from .references import References
from .identity import Identity, IdentityFactory
from .structured_text import StructuredTextList
class InformationSource(stix.Entity):
_binding = stix_common_binding
_binding_class = stix_common_binding.InformationSourceType
_namespace = 'http://stix.mitre.org/common-1'
identity = fields.TypedField("Identity", type_=Identity, factory=IdentityFactory)
descriptions = fields.TypedField("Description", StructuredTextList)
contributing_sources = fields.TypedField("Contributing_Sources", type_="stix.common.information_source.ContributingSources")
time = fields.TypedField("Time", cybox.common.Time)
roles = VocabField("Role", multiple=True, key_name="roles")
tools = fields.TypedField("Tools", ToolInformationList)
references = fields.TypedField("References", References)
def __init__(self, description=None, identity=None, time=None, tools=None, contributing_sources=None, references=None):
super(InformationSource, self).__init__()
self.identity = identity
self.descriptions = StructuredTextList(description)
self.contributing_sources = contributing_sources
self.time = time
self.tools = tools
self.references = references
#self.roles = None
def add_contributing_source(self, value):
self.contributing_sources.append(value)
def add_reference(self, value):
if not value:
return
# TODO: Check if it's a valid URI?
self.references.append(value)
@property
def description(self):
"""A single description about the contents or purpose of this object.
Default Value: ``None``
Note:
If this object has more than one description set, this will return
the description with the lowest ordinality value.
Returns:
An instance of :class:`.StructuredText`
"""
return next(iter(self.descriptions), None)
@description.setter
def description(self, value):
from stix.common.structured_text import StructuredTextList
self.descriptions = StructuredTextList(value)
def add_description(self, description):
"""Adds a description to the ``descriptions`` collection.
This is the same as calling "foo.descriptions.add(bar)".
"""
self.descriptions.add(description)
def add_role(self, value):
self.roles.append(value)
class ContributingSources(stix.EntityList):
_namespace = "http://stix.mitre.org/common-1"
_binding = stix_common_binding
_binding_class = stix_common_binding.ContributingSourcesType
source = fields.TypedField("Source", InformationSource, multiple=True, key_name="sources")
@classmethod
def _dict_as_list(cls):
return False
| [
[
[
135,
141
],
[
671,
677
],
[
761,
767
],
[
841,
847
],
[
954,
960
],
[
1075,
1081
],
[
1140,
1146
],
[
3017,
3023
]
],
[
[
149,
161
],
[
980,
985
]
],
[
[
193,
212
],
[
1102,
1121
]
],
[
[
232,
236
],
[
493,
497
],
[
2835,
2839
]
],
[
[
244,
292
],
[
522,
541
],
[
563,
582
],
[
2918,
2937
],
[
2959,
2978
]
],
[
[
325,
335
],
[
1011,
1021
]
],
[
[
360,
370
],
[
1172,
1182
]
],
[
[
393,
401
],
[
707,
715
]
],
[
[
403,
418
],
[
725,
740
]
],
[
[
448,
466
],
[
794,
812
],
[
1421,
1439
]
],
[
[
475,
492
],
[
3045,
3062
],
[
1323,
1340
]
],
[
[
2815,
2834
]
]
] |
import copy
from types import GeneratorType
class MergeDict(object):
"""
A simple class for creating new "virtual" dictionaries that actually look
up values in more than one dictionary, passed in the constructor.
If a key appears in more than one of the given dictionaries, only the
first occurrence will be used.
"""
def __init__(self, *dicts):
self.dicts = dicts
def __getitem__(self, key):
for dict_ in self.dicts:
try:
return dict_[key]
except KeyError:
pass
raise KeyError
def __copy__(self):
return self.__class__(*self.dicts)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def getlist(self, key):
for dict_ in self.dicts:
if key in dict_.keys():
return dict_.getlist(key)
return []
def iteritems(self):
seen = set()
for dict_ in self.dicts:
for item in dict_.iteritems():
k, v = item
if k in seen:
continue
seen.add(k)
yield item
def iterkeys(self):
for k, v in self.iteritems():
yield k
def itervalues(self):
for k, v in self.iteritems():
yield v
def items(self):
return list(self.iteritems())
def keys(self):
return list(self.iterkeys())
def values(self):
return list(self.itervalues())
def has_key(self, key):
for dict_ in self.dicts:
if key in dict_:
return True
return False
__contains__ = has_key
__iter__ = iterkeys
def copy(self):
"""Returns a copy of this object."""
return self.__copy__()
def __str__(self):
'''
Returns something like
"{'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}"
instead of the generic "<object meta-data>" inherited from object.
'''
return str(dict(self.items()))
def __repr__(self):
'''
Returns something like
MergeDict({'key1': 'val1', 'key2': 'val2'}, {'key3': 'val3'})
instead of generic "<object meta-data>" inherited from object.
'''
dictreprs = ', '.join(repr(d) for d in self.dicts)
return '%s(%s)' % (self.__class__.__name__, dictreprs)
class SortedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
"""
def __new__(cls, *args, **kwargs):
instance = super(SortedDict, cls).__new__(cls, *args, **kwargs)
instance.keyOrder = []
return instance
def __init__(self, data=None):
if data is None:
data = {}
elif isinstance(data, GeneratorType):
# Unfortunately we need to be able to read a generator twice. Once
# to get the data into self with our super().__init__ call and a
# second time to setup keyOrder correctly
data = list(data)
super(SortedDict, self).__init__(data)
if isinstance(data, dict):
self.keyOrder = data.keys()
else:
self.keyOrder = []
seen = set()
for key, value in data:
if key not in seen:
self.keyOrder.append(key)
seen.add(key)
def __deepcopy__(self, memo):
return self.__class__([(key, copy.deepcopy(value, memo))
for key, value in self.iteritems()])
def __copy__(self):
# The Python's default copy implementation will alter the state
# of self. The reason for this seems complex but is likely related to
# subclassing dict.
return self.copy()
def __setitem__(self, key, value):
if key not in self:
self.keyOrder.append(key)
super(SortedDict, self).__setitem__(key, value)
def __delitem__(self, key):
super(SortedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
return iter(self.keyOrder)
def pop(self, k, *args):
result = super(SortedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(SortedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def items(self):
return zip(self.keyOrder, self.values())
def iteritems(self):
for key in self.keyOrder:
yield key, self[key]
def keys(self):
return self.keyOrder[:]
def iterkeys(self):
return iter(self.keyOrder)
def values(self):
return map(self.__getitem__, self.keyOrder)
def itervalues(self):
for key in self.keyOrder:
yield self[key]
def update(self, dict_):
for k, v in dict_.iteritems():
self[k] = v
def setdefault(self, key, default):
if key not in self:
self.keyOrder.append(key)
return super(SortedDict, self).setdefault(key, default)
def value_for_index(self, index):
"""Returns the value of the item at the given zero-based index."""
return self[self.keyOrder[index]]
def insert(self, index, key, value):
"""Inserts the key, value pair before the item with the given index."""
if key in self.keyOrder:
n = self.keyOrder.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(SortedDict, self).__setitem__(key, value)
def copy(self):
"""Returns a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
return self.__class__(self)
def __repr__(self):
"""
Replaces the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])
def clear(self):
super(SortedDict, self).clear()
self.keyOrder = []
class MultiValueDictKeyError(KeyError):
pass
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the
same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.getlist('doesnotexist')
[]
>>> d.getlist('doesnotexist', ['Adrian', 'Simon'])
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
super(MultiValueDict, self).__init__(key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
super(MultiValueDict, self).__repr__())
def __getitem__(self, key):
"""
Returns the last data value for this key, or [] if it's an empty list;
raises KeyError if not found.
"""
try:
list_ = super(MultiValueDict, self).__getitem__(key)
except KeyError:
raise MultiValueDictKeyError("Key %r not found in %r" % (key, self))
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
super(MultiValueDict, self).__setitem__(key, [value])
def __copy__(self):
return self.__class__([
(k, v[:])
for k, v in self.lists()
])
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo),
copy.deepcopy(value, memo))
return result
def __getstate__(self):
obj_dict = self.__dict__.copy()
obj_dict['_data'] = dict([(k, self.getlist(k)) for k in self])
return obj_dict
def __setstate__(self, obj_dict):
data = obj_dict.pop('_data', {})
for k, v in data.items():
self.setlist(k, v)
self.__dict__.update(obj_dict)
def get(self, key, default=None):
"""
Returns the last data value for the passed key. If key doesn't exist
or value is an empty list, then default is returned.
"""
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def getlist(self, key, default=None):
"""
Returns the list of values for the passed key. If key doesn't exist,
then a default value is returned.
"""
try:
return super(MultiValueDict, self).__getitem__(key)
except KeyError:
if default is None:
return []
return default
def setlist(self, key, list_):
super(MultiValueDict, self).__setitem__(key, list_)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
return default
return self[key]
def setlistdefault(self, key, default_list=None):
if key not in self:
if default_list is None:
default_list = []
self.setlist(key, default_list)
return default_list
return self.getlist(key)
def appendlist(self, key, value):
"""Appends an item to the internal list associated with key."""
self.setlistdefault(key).append(value)
def items(self):
"""
Returns a list of (key, value) pairs, where value is the last item in
the list associated with the key.
"""
return [(key, self[key]) for key in self.keys()]
def iteritems(self):
"""
Yields (key, value) pairs, where value is the last item in the list
associated with the key.
"""
for key in self.keys():
yield (key, self[key])
def lists(self):
"""Returns a list of (key, list) pairs."""
return super(MultiValueDict, self).items()
def iterlists(self):
"""Yields (key, list) pairs."""
return super(MultiValueDict, self).iteritems()
def values(self):
"""Returns a list of the last value on every key list."""
return [self[key] for key in self.keys()]
def itervalues(self):
"""Yield the last value on every key list."""
for key in self.iterkeys():
yield self[key]
def copy(self):
"""Returns a shallow copy of this object."""
return copy.copy(self)
def update(self, *args, **kwargs):
"""
update() extends rather than replaces existing key lists.
Also accepts keyword args.
"""
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key).append(value)
except TypeError:
raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary")
for key, value in kwargs.iteritems():
self.setlistdefault(key).append(value)
def dict(self):
"""
Returns current object as a dict with singular values.
"""
return dict((key, self[key]) for key in self)
class DotExpandedDict(dict):
"""
A special dictionary constructor that takes a dictionary in which the keys
may contain dots to specify inner dictionaries. It's confusing, but this
example should make sense.
>>> d = DotExpandedDict({'person.1.firstname': ['Simon'], \
'person.1.lastname': ['Willison'], \
'person.2.firstname': ['Adrian'], \
'person.2.lastname': ['Holovaty']})
>>> d
{'person': {'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}}}
>>> d['person']
{'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}}
>>> d['person']['1']
{'lastname': ['Willison'], 'firstname': ['Simon']}
# Gotcha: Results are unpredictable if the dots are "uneven":
>>> DotExpandedDict({'c.1': 2, 'c.2': 3, 'c': 1})
{'c': 1}
"""
def __init__(self, key_to_list_mapping):
for k, v in key_to_list_mapping.items():
current = self
bits = k.split('.')
for bit in bits[:-1]:
current = current.setdefault(bit, {})
# Now assign value to current position
try:
current[bits[-1]] = v
except TypeError: # Special-case if current isn't a dict.
current = {bits[-1]: v}
class ImmutableList(tuple):
"""
A tuple-like object that raises useful errors when it is asked to mutate.
Example::
>>> a = ImmutableList(range(5), warning="You cannot mutate this.")
>>> a[3] = '4'
Traceback (most recent call last):
...
AttributeError: You cannot mutate this.
"""
def __new__(cls, *args, **kwargs):
if 'warning' in kwargs:
warning = kwargs['warning']
del kwargs['warning']
else:
warning = 'ImmutableList object is immutable.'
self = tuple.__new__(cls, *args, **kwargs)
self.warning = warning
return self
def complain(self, *wargs, **kwargs):
if isinstance(self.warning, Exception):
raise self.warning
else:
raise AttributeError(self.warning)
# All list mutation functions complain.
__delitem__ = complain
__delslice__ = complain
__iadd__ = complain
__imul__ = complain
__setitem__ = complain
__setslice__ = complain
append = complain
extend = complain
insert = complain
pop = complain
remove = complain
sort = complain
reverse = complain
class DictWrapper(dict):
"""
Wraps accesses to a dictionary so that certain values (those starting with
the specified prefix) are passed through a function before being returned.
The prefix is removed before looking up the real value.
Used by the SQL construction code to ensure that values are correctly
quoted before being used.
"""
def __init__(self, data, func, prefix):
super(DictWrapper, self).__init__(data)
self.func = func
self.prefix = prefix
def __getitem__(self, key):
"""
Retrieves the real value after stripping the prefix string (if
present). If the prefix is present, pass the value through self.func
before returning, otherwise return the raw value.
"""
if key.startswith(self.prefix):
use_func = True
key = key[len(self.prefix):]
else:
use_func = False
value = super(DictWrapper, self).__getitem__(key)
if use_func:
return self.func(value)
return value
| [
[
[
7,
11
],
[
3531,
3535
],
[
8297,
8301
],
[
8352,
8356
],
[
11227,
11231
]
],
[
[
30,
43
],
[
2858,
2871
]
],
[
[
51,
60
]
],
[
[
2468,
2478
],
[
2643,
2653
],
[
3129,
3139
],
[
3977,
3987
],
[
4066,
4076
],
[
4248,
4258
],
[
4517,
4527
],
[
5269,
5279
],
[
5805,
5815
],
[
6304,
6314
]
],
[
[
6364,
6386
],
[
7680,
7702
]
],
[
[
6414,
6428
],
[
7185,
7199
],
[
7351,
7365
],
[
7598,
7612
],
[
7888,
7902
],
[
9336,
9350
],
[
9535,
9549
],
[
10703,
10717
],
[
10820,
10834
],
[
11609,
11623
]
],
[
[
12317,
12332
]
],
[
[
13713,
13726
]
],
[
[
14970,
14981
],
[
15386,
15397
],
[
15911,
15922
]
]
] |
import os
import sys
import dlib
import glob
import csv
import pickle as pp
from sklearn.neighbors import KNeighborsClassifier
import pandas as pd
from sklearn import preprocessing
# from sklearn.model_selection import train_test_split
import webbrowser
from timeit import Timer
from keras.preprocessing.image import img_to_array
from keras.models import load_model
import numpy as np
from time import time
import time
import multiprocessing
from flask import Flask, render_template, request
from PIL import Image
from elasticsearch import Elasticsearch
from tensorflow.python.keras._impl.keras.preprocessing.image import img_to_array
from twilio.rest import Client
from flask import Flask, render_template, request, url_for
app = Flask(__name__, template_folder='templates')
App_root=os.path.dirname("maintype")
@app.route("/knn")
def classify(try_vector): #CLASIFIER OPTION -A using KNN
start_time = time.time()
print("in classifier======================================================")
p_1=pp.load(open('model.p','rb'))
p_2=pp.load(open('model_1.p','rb'))
pred = p_1.predict([try_vector])
v = p_2.inverse_transform(pred)
print(p_2.inverse_transform(pred))
print("My program took", time.time() - start_time, "to run")
return v
def vector(destination,option): ###CONVERTING IMAGE INTO 128 vectors --DLIB
predictor_path = "shape_predictor_5_face_landmarks.dat"
face_rec_model_path = "dlib_face_recognition_resnet_model_v1.dat"
faces_folder_path ="/home/sethiamayank14/PycharmProjects/project2/src/"+destination
detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor(predictor_path)
facerec = dlib.face_recognition_model_v1(face_rec_model_path)
img = dlib.load_rgb_image(faces_folder_path)
dets = detector(img, 1)
for k, d in enumerate(dets):
shape = sp(img, d)
face_descriptor = facerec.compute_face_descriptor(img, shape)
try_vector=face_descriptor
#print("======================================",try_vector)
if option == "KNN":
d = classify(try_vector) #knn
print(d)
# if(d=="Akash Bhaiya"):
#
# account_sid = 'AC48a2b57630cde3ad7acc662ea91cf5fd'
# auth_token = '101da4d773c821ed0c60d7f7dd17cb98'
# client = Client(account_sid, auth_token)
#
# message = client.messages \
# .create(
# body="Employee Akash entered",
# from_='+15052786996',
# to='+918826748151'
# )
#
# print(message.sid)
# else:
# account_sid = 'AC48a2b57630cde3ad7acc662ea91cf5fd'
# auth_token = '101da4d773c821ed0c60d7f7dd17cb98'
# client = Client(account_sid, auth_token)
#
# message = client.messages \
# .create(
# body="intruder detected",
# from_='+15052786996',
# to='+918826748151'
# )
#
# print(message.sid)
return d
@app.route("/") # this runs first
def index():
print("index working==================================")
return render_template("upload1.html")
@app.route("/upload", methods = ['POST'])
def upload():
# print("heyy========================")
target = os.path.join(App_root, "images/")
# print("hello")
if not os.path.isdir(target):
print("In here")
os.mkdir(target)
print("-----------------------",request.files.getlist("file"))
for file in request.files.getlist("file"):
filename = file.filename
destination ="".join([target, filename])
print(destination)
file.save(destination)
option = request.form['classifier']
print(option)
if( option == "KNN"):
name1 = vector(destination,option)
name1 = str(name1[0])
print(name1, type(name1))
f = open('helloworld.html', 'w')
# name = "Akash Bhaiya"
name = name1 + '.jpg'
print(name)
name2 = "/home/sethiamayank14/PycharmProjects/project2/src/images/"+ name
print(name2)
message = """<html>
<head></head>
<body>
<p>Your input image: </p>
<br>
<img src = "/home/sethiamayank14/PycharmProjects/project2/src/""" + destination + """"/>
<br>
<p>Standard Image:</p>
<br>
<img src = "/home/sethiamayank14/PycharmProjects/project2/src/images/""" + name + """"/>
<p> """ + name1 + """</p>
</body>
</html>"""
print(message)
f.write(message)
f.close()
# Change path to reflect file location
filename = 'helloworld.html'
webbrowser.open_new_tab(filename)
return name
# return name
if __name__== "__main__":
app.run(debug=True,port=5001,host='127.0.0.1')
| [
[
[
7,
9
],
[
788,
790
],
[
3590,
3592
],
[
3656,
3658
],
[
3712,
3714
]
],
[
[
17,
20
]
],
[
[
28,
32
],
[
1678,
1682
],
[
1720,
1724
],
[
1771,
1775
],
[
1833,
1837
]
],
[
[
40,
44
]
],
[
[
52,
55
]
],
[
[
63,
75
],
[
1057,
1059
],
[
1095,
1097
]
],
[
[
106,
126
]
],
[
[
134,
146
]
],
[
[
167,
180
]
],
[
[
243,
253
],
[
5117,
5127
]
],
[
[
273,
278
]
],
[
[
317,
329
]
],
[
[
355,
365
]
],
[
[
373,
384
]
],
[
[
402,
406
]
],
[
[
414,
418
],
[
956,
960
],
[
1269,
1273
]
],
[
[
426,
441
]
],
[
[
460,
465
]
],
[
[
467,
482
]
],
[
[
484,
491
]
],
[
[
508,
513
]
],
[
[
540,
553
]
],
[
[
622,
634
]
],
[
[
659,
665
]
],
[
[
684,
689
],
[
733,
738
]
],
[
[
691,
706
],
[
3444,
3459
]
],
[
[
708,
715
],
[
3765,
3772
],
[
3812,
3819
],
[
4000,
4007
]
],
[
[
717,
724
]
],
[
[
727,
730
],
[
817,
820
],
[
3300,
3303
],
[
3478,
3481
],
[
5242,
5245
]
],
[
[
779,
787
],
[
3603,
3611
]
],
[
[
839,
847
],
[
2177,
2185
]
],
[
[
1326,
1332
],
[
4099,
4105
]
],
[
[
3363,
3368
]
],
[
[
3523,
3529
]
]
] |
from dyn2sel.dcs_techniques import DCSTechnique
import numpy as np
from scipy.stats import mode
class DESDDSel(DCSTechnique):
def predict(self, ensemble, instances, real_labels=None):
return ensemble[ensemble.get_max_accuracy()].predict(instances)
| [
[
[
35,
47
],
[
113,
125
]
],
[
[
55,
66
]
],
[
[
91,
95
]
],
[
[
104,
112
]
]
] |
"""
This module provide the case to test the coexistance between TDX guest and non TD
guest. There are two types of non-TD guest:
1. Boot with legacy BIOS, it is default loader without pass "-loader" or "-bios"
option
2. Boot with OVMF UEFI BIOS, will boot with "-loader" => OVMFD.fd compiled from
the latest edk2 project.
"""
import logging
import pytest
from pycloudstack.vmparam import VM_TYPE_LEGACY, VM_TYPE_EFI, VM_TYPE_TD
__author__ = 'cpio'
LOG = logging.getLogger(__name__)
# pylint: disable=invalid-name
pytestmark = [
pytest.mark.vm_image("latest-guest-image"),
pytest.mark.vm_kernel("latest-guest-kernel"),
]
def test_tdguest_with_legacy_base(vm_factory):
"""
Test the different type VM run parallel
Test Steps
----------
1. Launch a TD guest
2. Launch a legacy guest
3. Launch an OVMF guest
"""
LOG.info("Create a TD guest")
td_inst = vm_factory.new_vm(VM_TYPE_TD, auto_start=True)
LOG.info("Create a legacy guest")
legacy_inst = vm_factory.new_vm(VM_TYPE_LEGACY, auto_start=True)
LOG.info("Create an OVMF guest")
efi_inst = vm_factory.new_vm(VM_TYPE_EFI, auto_start=True)
assert td_inst.wait_for_ssh_ready(), "Could not reach TD VM"
assert legacy_inst.wait_for_ssh_ready(), "Could not reach legacy VM"
assert efi_inst.wait_for_ssh_ready(), "Could not reach EFI VM"
| [
[
[
352,
359
],
[
481,
488
]
],
[
[
368,
374
],
[
566,
572
],
[
615,
621
]
],
[
[
409,
423
],
[
1073,
1087
]
],
[
[
425,
436
],
[
1180,
1191
]
],
[
[
438,
448
],
[
966,
976
]
],
[
[
452,
462
]
],
[
[
475,
478
],
[
903,
906
],
[
1002,
1005
],
[
1113,
1116
]
],
[
[
546,
556
]
],
[
[
673,
702
]
]
] |
import sys
import pytest
from pre_commit_hooks.loaderon_hooks.tests.util.test_helpers import perform_test_on_file_expecting_result
from pre_commit_hooks.loaderon_hooks.general_hooks.check_location import main
@pytest.fixture(autouse=True)
def clean_sys_argv():
sys.argv = []
# Each line is a directory that allows certain types of files.
sys.argv.append('--directories')
sys.argv.append(r'.*\/xml')
sys.argv.append('--directories')
sys.argv.append(r'.*\/javascript')
# Each line specifies what types of files can be located inside the directory.
sys.argv.append('--files')
sys.argv.append(r'correct_xml.xml')
sys.argv.append('--files')
sys.argv.append(r'correct_js.js')
yield
def test_locations_ok_1():
perform_test_on_file_expecting_result('check_location_samples/xml/correct_xml.xml', main)
def test_locations_ok_2():
perform_test_on_file_expecting_result('check_location_samples/javascript/correct_js.js', main)
def test_locations_error1():
perform_test_on_file_expecting_result('check_location_samples/xml/incorrect_js.js', main, expected_result=2)
def test_locations_error2():
perform_test_on_file_expecting_result('check_location_samples/not_enabled_directory/incorrect_xml.xml', main, expected_result=2)
def test_locations_arguments_size_mismatch_error():
sys.argv = []
sys.argv.append('--directories')
sys.argv.append(r'.*\/xml')
# Lacking files for this directory
sys.argv.append('--directories')
sys.argv.append(r'.*\/javascript')
sys.argv.append('--files')
sys.argv.append(r'correct_xml.xml')
perform_test_on_file_expecting_result('check_location_samples/xml/correct_xml.xml', main, expected_result=2)
def test_locations_no_arguments_error():
sys.argv = []
with pytest.raises(TypeError) as error:
perform_test_on_file_expecting_result('check_location_samples/xml/correct_xml.xml', main)
assert "'NoneType' object is not iterable" in str(error.value)
| [
[
[
7,
10
],
[
269,
272
],
[
355,
358
],
[
392,
395
],
[
424,
427
],
[
461,
464
],
[
584,
587
],
[
615,
618
],
[
655,
658
],
[
686,
689
],
[
1347,
1350
],
[
1366,
1369
],
[
1403,
1406
],
[
1474,
1477
],
[
1511,
1514
],
[
1551,
1554
],
[
1582,
1585
],
[
1779,
1782
]
],
[
[
19,
25
],
[
214,
220
],
[
1802,
1808
]
],
[
[
95,
132
],
[
763,
800
],
[
886,
923
],
[
1016,
1053
],
[
1160,
1197
],
[
1623,
1660
],
[
1845,
1882
]
],
[
[
206,
210
],
[
847,
851
],
[
975,
979
],
[
1100,
1104
],
[
1264,
1268
],
[
1707,
1711
],
[
1929,
1933
]
],
[
[
247,
261
]
],
[
[
736,
755
]
],
[
[
859,
878
]
],
[
[
987,
1008
]
],
[
[
1131,
1152
]
],
[
[
1295,
1339
]
],
[
[
1738,
1771
]
]
] |
"""
1) "a" + "bc" -> abc
2) 3 * "bc" -> bcbcbc
3) "3" * "bc" -> error as we can't use the * operator on two strings
4) abcd"[2] -> c (Just takes the character at index 2 in the string. a has index 0 and b index 1)
5) "abcd"[0:2] -> ab (Returns the substring from index 0 all the way to index n -1 in this case b)
6) "abcd"[:2] -> ab (Not giving a starting value to slice the string we start at 0)
7) "abcd"[2:] -> cd (When we don't give an end value it goes all the way to the end of the string)
""" | [] |
exp_name = 'basicvsr_vimeo90k_bd'
# model settings
model = dict(
type='BasicVSR',
generator=dict(
type='BasicVSRNet',
mid_channels=64,
num_blocks=30,
spynet_pretrained='pretrained_models/spynet.pth'),
pixel_loss=dict(type='CharbonnierLoss', loss_weight=1.0, reduction='mean'))
# model training and testing settings
train_cfg = dict(fix_iter=5000)
test_cfg = dict(metrics=['PSNR'], crop_border=0, convert_to='y')
# dataset settings
train_dataset_type = 'SRVimeo90KMultipleGTDataset'
val_dataset_type = 'SRTestMultipleGTDataset'
test_dataset_type = 'SRVimeo90KDataset'
train_pipeline = [
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
dict(type='PairedRandomCrop', gt_patch_size=256),
dict(
type='Flip', keys=['lq', 'gt'], flip_ratio=0.5,
direction='horizontal'),
dict(type='Flip', keys=['lq', 'gt'], flip_ratio=0.5, direction='vertical'),
dict(type='RandomTransposeHW', keys=['lq', 'gt'], transpose_ratio=0.5),
dict(type='MirrorSequence', keys=['lq', 'gt']),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(type='Collect', keys=['lq', 'gt'], meta_keys=['lq_path', 'gt_path'])
]
val_pipeline = [
dict(type='GenerateSegmentIndices', interval_list=[1]),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(
type='Collect',
keys=['lq', 'gt'],
meta_keys=['lq_path', 'gt_path', 'key'])
]
test_pipeline = [
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
dict(type='MirrorSequence', keys=['lq']),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(
type='Collect',
keys=['lq', 'gt'],
meta_keys=['lq_path', 'gt_path', 'key'])
]
data = dict(
workers_per_gpu=6,
train_dataloader=dict(samples_per_gpu=4, drop_last=True), # 2 gpus
val_dataloader=dict(samples_per_gpu=1),
test_dataloader=dict(samples_per_gpu=1, workers_per_gpu=1),
# train
train=dict(
type='RepeatDataset',
times=1000,
dataset=dict(
type=train_dataset_type,
lq_folder='data/vimeo90k/BDx4',
gt_folder='data/vimeo90k/GT',
ann_file='data/vimeo90k/meta_info_Vimeo90K_train_GT.txt',
pipeline=train_pipeline,
scale=4,
test_mode=False)),
# val
val=dict(
type=val_dataset_type,
lq_folder='data/Vid4/BDx4',
gt_folder='data/Vid4/GT',
pipeline=val_pipeline,
scale=4,
test_mode=True),
# test
test=dict(
type=test_dataset_type,
lq_folder='data/vimeo90k/BDx4',
gt_folder='data/vimeo90k/GT',
ann_file='data/vimeo90k/meta_info_Vimeo90K_test_GT.txt',
pipeline=test_pipeline,
scale=4,
num_input_frames=7,
test_mode=True),
)
# optimizer
optimizers = dict(
generator=dict(
type='Adam',
lr=2e-4,
betas=(0.9, 0.99),
paramwise_cfg=dict(custom_keys={'spynet': dict(lr_mult=0.125)})))
# learning policy
total_iters = 300000
lr_config = dict(
policy='CosineRestart',
by_epoch=False,
periods=[300000],
restart_weights=[1],
min_lr=1e-7)
checkpoint_config = dict(interval=5, save_optimizer=True, by_epoch=False)
# remove gpu_collect=True in non distributed training
evaluation = dict(interval=5000, save_image=False, gpu_collect=True)
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
# dict(type='TensorboardLoggerHook'),
])
visual_config = None
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = f'./work_dirs/{exp_name}'
load_from = None
resume_from = None
workflow = [('train', 1)]
| [
[
[
0,
8
],
[
4435,
4443
]
],
[
[
52,
57
]
],
[
[
360,
369
]
],
[
[
392,
400
]
],
[
[
477,
495
],
[
2832,
2850
]
],
[
[
528,
544
],
[
3134,
3150
]
],
[
[
573,
590
],
[
3334,
3351
]
],
[
[
614,
628
],
[
3029,
3043
]
],
[
[
1427,
1439
],
[
3239,
3251
]
],
[
[
1969,
1982
],
[
3513,
3526
]
],
[
[
2498,
2502
]
],
[
[
3613,
3623
]
],
[
[
3810,
3821
]
],
[
[
3831,
3840
]
],
[
[
3962,
3979
]
],
[
[
4090,
4100
]
],
[
[
4159,
4169
]
],
[
[
4314,
4327
]
],
[
[
4355,
4366
]
],
[
[
4390,
4399
]
],
[
[
4409,
4417
]
],
[
[
4446,
4455
]
],
[
[
4463,
4474
]
],
[
[
4482,
4490
]
]
] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from glob import glob
from os.path import basename
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='evgflip',
version='0.1.0',
license='Apache License, Version 2.0',
description='',
long_description=long_description,
long_description_content_type='text/markdown',
author='David Bradford',
author_email='david.bradford@mongodb.com',
url='https://github.com/dbradf/evgflip',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
'Intended Audience :: Developers',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
install_requires=[
'boltons==19.1.0',
'Click==7.0',
'evergreen.py==0.5.0',
'PyYAML==5.4',
'structlog==19.1.0',
],
entry_points='''
[console_scripts]
evg-flip=evgflip.cli:main
''',
)
| [
[
[
71,
86
]
],
[
[
110,
124
]
],
[
[
143,
147
],
[
786,
790
]
],
[
[
168,
176
],
[
755,
763
]
],
[
[
197,
205
],
[
746,
754
]
],
[
[
230,
243
],
[
679,
692
]
],
[
[
267,
272
],
[
344,
349
]
],
[
[
306,
308
],
[
333,
335
]
],
[
[
314,
330
],
[
476,
492
]
]
] |
# This is an example of firing up PyMOL inside of a subordinate
# process via an "import pymol"
#
# NOTE: for this to work, PyMOL must be installed in a
# Python-dependent fashion (e.g. pymol-0_98-bin-win32-py23) etc.
#
# WARNING: stability issues have been known to occur with this
# approach, so anticipate problems...take-down is messy.
#
# WARNING: Right now, there is no way for the main process to know
# when PyMOL is actually initialized and ready to go, so we simply
# sleep a second after importing.
import string
import __main__
# note that passing in a "-z" option would keep the window hidden
# until you called pymol.cmd.window("show").
__main__.pymol_argv= string.split("pymol -qxiF -X 300 -Y 100 -H 400 -W 400")
import pymol
# give PyMOL enough time to initialize (we need to find a safe and
# robust alternative to this stupid delay especially since the
# pymol.finish_launching() method now seems to be broken)
import time
time.sleep(1)
# put up some content
if 1:
pymol.cmd.set("sweep_mode",3)
pymol.cmd.rock()
pymol.cmd.turn("x",180)
pymol.cmd.load("$TUT/1hpv.pdb")
pymol.preset.pretty("1hpv")
pymol.cmd.orient()
pymol.cmd.turn("y",85)
pymol.cmd.zoom("all",20)
pymol.cmd.orient("organic & e. N+O",animate=10)
pymol.cmd.show("sticks","organic")
# play peek-a-boo with the window
if 1:
time.sleep(5)
pymol.cmd.window("hide")
print("Peek-a-boo!")
time.sleep(1)
pymol.cmd.window("show")
time.sleep(5)
pymol.cmd.window("hide")
print("Peek-a-boo!")
time.sleep(1)
pymol.cmd.window("show")
time.sleep(5)
pymol.cmd.window("hide")
print("Peek-a-boo!")
time.sleep(1)
pymol.cmd.window("show")
# now quit
print("Quitting...")
time.sleep(1)
print("3...")
time.sleep(1)
print("2...")
time.sleep(1)
print("1...")
time.sleep(1)
print("Die!")
# note, we cannot let the main thread terminate without first calling
# pymol.cmd.quit() which will take-down PyMOL
pymol.cmd.quit()
| [
[
[
520,
526
],
[
677,
683
]
],
[
[
534,
542
],
[
656,
664
]
],
[
[
741,
746
],
[
997,
1002
],
[
1030,
1035
],
[
1050,
1055
],
[
1077,
1082
],
[
1112,
1117
],
[
1143,
1148
],
[
1165,
1170
],
[
1191,
1196
],
[
1219,
1224
],
[
1270,
1275
],
[
1367,
1372
],
[
1436,
1441
],
[
1481,
1486
],
[
1550,
1555
],
[
1595,
1600
],
[
1664,
1669
],
[
1984,
1989
]
],
[
[
944,
948
],
[
949,
953
],
[
1350,
1354
],
[
1419,
1423
],
[
1464,
1468
],
[
1533,
1537
],
[
1578,
1582
],
[
1647,
1651
],
[
1730,
1734
],
[
1764,
1768
],
[
1798,
1802
],
[
1832,
1836
]
]
] |
import unittest
def linear_sum(S, n):
"""Return the sum of the first n numbers of sequence S."""
if n == 0:
return 0
else:
return linear_sum(S, n - 1) + S[n - 1]
class TestLinearSum(unittest.TestCase):
def test_linear_sum(self):
S = [4, 3, 6, 2, 8]
self.assertEqual(23, linear_sum(S, 5))
if __name__ == '__main__':
unittest.main()
| [
[
[
7,
15
],
[
212,
220
],
[
371,
379
]
],
[
[
21,
31
],
[
159,
169
],
[
321,
331
]
],
[
[
198,
211
]
]
] |
from django.urls import path
from . import views
#here are our app-connections.(these connection just affect to our app, not at entire system)
#each connection going us to a view functionality
#these connections needs to be connect with url root, because that's where the requests come from
app_name = 'polls'
urlpatterns = [
path('', views.IndexView.as_view(), name='index'),
path('<int:pk>/', views.DetailView.as_view(), name='detail'),
path('<int:pk>/result/', views.ResultView.as_view(), name='result'),
path('<int:question_id>/vote/', views.vote, name='vote'),
] | [
[
[
24,
28
],
[
334,
338
],
[
389,
393
],
[
455,
459
],
[
528,
532
]
],
[
[
44,
49
],
[
343,
348
],
[
407,
412
],
[
480,
485
],
[
560,
565
]
],
[
[
295,
303
]
],
[
[
314,
325
]
]
] |
"""
Menu handling file
- Every menu is of the Menu class
- Menus are initialized with an array of options
- What a menu option does is determined by the following table:
- "set_state_map": s.set_state('map')
- "exit": exit()
"""
from config import *
import sys
class Menu:
def __init__(self, options, sel_index, results):
self.options = options # Array of strings
self.results = results # Array of strings
self._sel_index = sel_index
self.first_print = True
@property
def sel_index(self):
return self._sel_index
@sel_index.setter
def sel_index(self, value):
length = len(self.options)
if value > length:
self._sel_index = 1
elif value < 1:
self._sel_index = length
else:
self._sel_index = value
@sel_index.deleter
def sel_index(self):
del self._sel_index
def print_menu_center(self):
if not self.first_print:
print(t.move_up(len(self.options) + 1))
for _ in range(len(self.options) + 1):
print(t.clear_eol)
print(t.move_up(len(self.options) + 2))
count = 1
for option in self.options:
if self.sel_index == count:
print(t.center("> " + str(count) + ". " + option))
else:
print(t.center(str(count) + ". " + option))
count += 1
self.first_print = False
# Prints a menu at cursor where x and y is the top left of the menu
# Specifically meant for use in the 'battle' state
def battle_menu(self):
output = []
count = 1
for option in self.options:
if self.sel_index == count:
output.append("> " + str(count) + ". " + option)
else:
output.append(str(count) + ". " + option)
count += 1
return output
def decision(self):
choice = self.results[(self.sel_index-1)]
if choice == "set_state_map":
s.set_state('map')
elif choice == "exit":
sys.exit()
| [
[
[
252,
253
],
[
996,
997
],
[
1103,
1104
],
[
1134,
1135
],
[
1285,
1286
],
[
1370,
1371
],
[
2047,
2048
]
],
[
[
261,
264
],
[
2109,
2112
]
],
[
[
273,
277
]
]
] |
from enum import Enum
class GameScenes(Enum):
FIRST_LEVEL = 1
SECOND_LEVEL = 2
THIRD_LEVEL = 3
FOURTH_LEVEL = 4
FIFTH_LEVEL = 5
| [
[
[
17,
21
],
[
41,
45
]
],
[
[
30,
40
]
]
] |
"""
login app
"""
from zoom.apps import App
class MyApp(App):
pass
app = MyApp()
| [
[
[
45,
48
],
[
63,
66
]
],
[
[
57,
62
],
[
86,
91
]
],
[
[
80,
83
]
]
] |
from cohere import Diffractometer
class Diffractometer_34idc(Diffractometer):
"""
Subclass of Diffractometer. Encapsulates "34idc" diffractometer.
"""
name = "34idc"
sampleaxes = ('y+', 'z-', 'y+') # in xrayutilities notation
detectoraxes = ('y+', 'x-')
incidentaxis = (0, 0, 1)
sampleaxes_name = ('th', 'chi', 'phi') # using the spec mnemonics for scan id.
detectoraxes_name = ('delta', 'gamma')
def __init__(self):
super(Diffractometer_34idc, self).__init__('34idc')
def create_diffractometer(diff_name):
if diff_name == '34idc':
return Diffractometer_34idc()
else:
print ('diffractometer ' + diff_name + ' not defined.')
def verify_diffractometer(diff_name):
if diff_name == '34idc':
return True
else:
return False
| [
[
[
19,
33
],
[
63,
77
]
],
[
[
42,
62
],
[
476,
496
],
[
606,
626
]
],
[
[
528,
549
]
],
[
[
709,
730
]
]
] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CheckDomainSunriseClaimRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Domain', '2018-01-29', 'CheckDomainSunriseClaim')
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_UserClientIp(self):
return self.get_query_params().get('UserClientIp')
def set_UserClientIp(self,UserClientIp):
self.add_query_param('UserClientIp',UserClientIp)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang) | [
[
[
826,
836
],
[
874,
884
],
[
911,
921
]
],
[
[
843,
873
]
]
] |
# -*- coding: utf-8 -*-
"""
Statement pre-processors.
"""
def clean_whitespace(chatbot, statement):
"""
Remove any consecutive whitespace characters from the statement text.
"""
import re
# Replace linebreaks and tabs with spaces
statement.text = statement.text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ')
# Remove any leeding or trailing whitespace
statement.text = statement.text.strip()
# Remove consecutive spaces
statement.text = re.sub(' +', ' ', statement.text)
return statement
def unescape_html(chatbot, statement):
"""
Convert escaped html characters into unescaped html characters.
For example: "<b>" becomes "<b>".
"""
import sys
# Replace HTML escape characters
if sys.version_info[0] < 3:
from HTMLParser import HTMLParser
html = HTMLParser()
else:
import html
statement.text = html.unescape(statement.text)
return statement
def convert_to_ascii(chatbot, statement):
"""
Converts unicode characters to ASCII character equivalents.
For example: "på fédéral" becomes "pa federal".
"""
import unicodedata
import sys
# Normalize unicode characters
if sys.version_info[0] < 3:
statement.text = unicode(statement.text) # NOQA
text = unicodedata.normalize('NFKD', statement.text)
text = text.encode('ascii', 'ignore').decode('utf-8')
statement.text = str(text)
return statement
| [
[
[
64,
80
]
],
[
[
555,
568
]
],
[
[
983,
999
]
]
] |
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import sys
class NaluWind(CMakePackage):
"""Nalu-Wind: Wind energy focused variant of Nalu."""
homepage = "https://github.com/exawind/nalu-wind"
git = "https://github.com/exawind/nalu-wind.git"
maintainers = ['jrood-nrel']
tags = ['ecp', 'ecp-apps']
version('master', branch='master')
# Options
variant('shared', default=(sys.platform != 'darwin'),
description='Build dependencies as shared libraries')
variant('pic', default=True,
description='Position independent code')
# Third party libraries
variant('cuda', default=False,
description='Compile with CUDA support')
variant('openfast', default=False,
description='Compile with OpenFAST support')
variant('tioga', default=False,
description='Compile with Tioga support')
variant('hypre', default=False,
description='Compile with Hypre support')
variant('catalyst', default=False,
description='Compile with Catalyst support')
variant('fftw', default=False,
description='Compile with FFTW support')
# Required dependencies
depends_on('mpi')
depends_on('yaml-cpp@0.5.3:', when='+shared')
depends_on('yaml-cpp~shared@0.5.3:', when='~shared')
# Cannot build Trilinos as a shared library with STK on Darwin
# which is why we have a 'shared' variant for Nalu-Wind
# https://github.com/trilinos/Trilinos/issues/2994
depends_on('trilinos+exodus+tpetra+muelu+belos+ifpack2+amesos2+zoltan+stk+boost~superlu-dist+superlu+hdf5+zlib+pnetcdf+shards~hypre@master,develop', when='+shared')
depends_on('trilinos~shared+exodus+tpetra+muelu+belos+ifpack2+amesos2+zoltan+stk+boost~superlu-dist+superlu+hdf5+zlib+pnetcdf+shards~hypre@master,develop', when='~shared')
depends_on('trilinos~shared+cuda+exodus+tpetra+muelu+belos+ifpack2+amesos2+zoltan+stk+boost~superlu-dist+superlu+hdf5+zlib+pnetcdf+shards~hypre@master,develop', when='+cuda')
# Optional dependencies
depends_on('openfast+cxx', when='+openfast+shared')
depends_on('openfast+cxx~shared', when='+openfast~shared')
depends_on('tioga', when='+tioga+shared')
depends_on('tioga~shared', when='+tioga~shared')
depends_on('hypre+mpi+int64', when='+hypre+shared')
depends_on('hypre+mpi+int64~shared', when='+hypre~shared')
depends_on('trilinos-catalyst-ioss-adapter', when='+catalyst')
# FFTW doesn't have a 'shared' variant at this moment
depends_on('fftw+mpi', when='+fftw')
depends_on('cuda', when='+cuda')
def setup_environment(self, spack_env, run_env):
if '+cuda' in self.spec:
spack_env.set('NVCC_WRAPPER_DEFAULT_COMPILER', spack_cxx)
def cmake_args(self):
spec = self.spec
options = []
options.extend([
'-DCMAKE_Fortran_COMPILER=%s' % spec['mpi'].mpifc,
'-DCMAKE_C_COMPILER=%s' % spec['mpi'].mpicc,
])
if '+cuda' in self.spec:
options.extend([
'-DCMAKE_CXX_COMPILER=%s' % join_path(self.spec['trilinos'].prefix, 'bin', 'nvcc_wrapper'),
])
else:
options.extend([
'-DCMAKE_CXX_COMPILER=%s' % spec['mpi'].mpicxx,
])
options.extend([
'-DTrilinos_DIR:PATH=%s' % spec['trilinos'].prefix,
'-DYAML_DIR:PATH=%s' % spec['yaml-cpp'].prefix,
'-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=%s' % (
'ON' if '+pic' in spec else 'OFF'),
])
if '+openfast' in spec:
options.extend([
'-DENABLE_OPENFAST:BOOL=ON',
'-DOpenFAST_DIR:PATH=%s' % spec['openfast'].prefix
])
else:
options.append('-DENABLE_OPENFAST:BOOL=OFF')
if '+tioga' in spec:
options.extend([
'-DENABLE_TIOGA:BOOL=ON',
'-DTIOGA_DIR:PATH=%s' % spec['tioga'].prefix
])
else:
options.append('-DENABLE_TIOGA:BOOL=OFF')
if '+hypre' in spec:
options.extend([
'-DENABLE_HYPRE:BOOL=ON',
'-DHYPRE_DIR:PATH=%s' % spec['hypre'].prefix
])
else:
options.append('-DENABLE_HYPRE:BOOL=OFF')
if '+catalyst' in spec:
options.extend([
'-DENABLE_PARAVIEW_CATALYST:BOOL=ON',
'-DPARAVIEW_CATALYST_INSTALL_PATH:PATH=%s' %
spec['trilinos-catalyst-ioss-adapter'].prefix
])
else:
options.append('-DENABLE_PARAVIEW_CATALYST:BOOL=OFF')
if '+fftw' in spec:
options.extend([
'-DENABLE_FFTW:BOOL=ON',
'-DFFTW_DIR:PATH=%s' % spec['fftw'].prefix
])
else:
options.append('-DENABLE_FFTW:BOOL=OFF')
if '+cuda' in spec:
options.extend([
'-DENABLE_CUDA:BOOL=ON',
])
if 'darwin' in spec.architecture:
options.append('-DCMAKE_MACOSX_RPATH:BOOL=ON')
return options
| [
[
[
216,
217
],
[
246,
258
],
[
503,
510
],
[
557,
564
],
[
682,
689
],
[
796,
803
],
[
884,
891
],
[
980,
987
],
[
1070,
1077
],
[
1160,
1167
],
[
1256,
1263
],
[
1373,
1383
],
[
1395,
1405
],
[
1445,
1455
],
[
1684,
1694
],
[
1853,
1863
],
[
2029,
2039
],
[
2236,
2246
],
[
2292,
2302
],
[
2355,
2365
],
[
2401,
2411
],
[
2454,
2464
],
[
2510,
2520
],
[
2573,
2583
],
[
2698,
2708
],
[
2739,
2749
],
[
2918,
2927
],
[
3266,
3275
]
],
[
[
225,
228
],
[
584,
587
]
],
[
[
237,
245
]
]
] |
sentence = input().split()
ae = 0
for word in sentence:
if 'ae' in word:
ae += 1
if ae/len(sentence) >= 0.4:
print("dae ae ju traeligt va")
else:
print("haer talar vi rikssvenska") | [
[
[
0,
8
],
[
47,
55
],
[
105,
113
]
],
[
[
28,
30
],
[
86,
88
],
[
98,
100
]
],
[
[
39,
43
],
[
72,
76
]
]
] |
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import with_metaclass, viewkeys, PY3
import types
try:
from collections import OrderedDict
except ImportError:
from pip._vendor.ordereddict import OrderedDict
from . import _inputstream
from . import _tokenizer
from . import treebuilders
from .treebuilders.base import Marker
from . import _utils
from .constants import (
spaceCharacters, asciiUpper2Lower,
specialElements, headingElements, cdataElements, rcdataElements,
tokenTypes, tagTokenTypes,
namespaces,
htmlIntegrationPointElements, mathmlTextIntegrationPointElements,
adjustForeignAttributes as adjustForeignAttributesMap,
adjustMathMLAttributes, adjustSVGAttributes,
E,
ReparseException
)
def parse(doc, treebuilder="etree", namespaceHTMLElements=True, **kwargs):
"""Parse a string or file-like object into a tree"""
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parse(doc, **kwargs)
def parseFragment(doc, container="div", treebuilder="etree", namespaceHTMLElements=True, **kwargs):
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parseFragment(doc, container=container, **kwargs)
def method_decorator_metaclass(function):
class Decorated(type):
def __new__(meta, classname, bases, classDict):
for attributeName, attribute in classDict.items():
if isinstance(attribute, types.FunctionType):
attribute = function(attribute)
classDict[attributeName] = attribute
return type.__new__(meta, classname, bases, classDict)
return Decorated
class HTMLParser(object):
"""HTML parser. Generates a tree structure from a stream of (possibly
malformed) HTML"""
def __init__(self, tree=None, strict=False, namespaceHTMLElements=True, debug=False):
"""
strict - raise an exception when a parse error is encountered
tree - a treebuilder class controlling the type of tree that will be
returned. Built in treebuilders can be accessed through
html5lib.treebuilders.getTreeBuilder(treeType)
"""
# Raise an exception on the first error encountered
self.strict = strict
if tree is None:
tree = treebuilders.getTreeBuilder("etree")
self.tree = tree(namespaceHTMLElements)
self.errors = []
self.phases = dict([(name, cls(self, self.tree)) for name, cls in
getPhases(debug).items()])
def _parse(self, stream, innerHTML=False, container="div", scripting=False, **kwargs):
self.innerHTMLMode = innerHTML
self.container = container
self.scripting = scripting
self.tokenizer = _tokenizer.HTMLTokenizer(stream, parser=self, **kwargs)
self.reset()
try:
self.mainLoop()
except ReparseException:
self.reset()
self.mainLoop()
def reset(self):
self.tree.reset()
self.firstStartTag = False
self.errors = []
self.log = [] # only used with debug mode
# "quirks" / "limited quirks" / "no quirks"
self.compatMode = "no quirks"
if self.innerHTMLMode:
self.innerHTML = self.container.lower()
if self.innerHTML in cdataElements:
self.tokenizer.state = self.tokenizer.rcdataState
elif self.innerHTML in rcdataElements:
self.tokenizer.state = self.tokenizer.rawtextState
elif self.innerHTML == 'plaintext':
self.tokenizer.state = self.tokenizer.plaintextState
else:
# state already is data state
# self.tokenizer.state = self.tokenizer.dataState
pass
self.phase = self.phases["beforeHtml"]
self.phase.insertHtmlElement()
self.resetInsertionMode()
else:
self.innerHTML = False # pylint:disable=redefined-variable-type
self.phase = self.phases["initial"]
self.lastPhase = None
self.beforeRCDataPhase = None
self.framesetOK = True
@property
def documentEncoding(self):
"""The name of the character encoding
that was used to decode the input stream,
or :obj:`None` if that is not determined yet.
"""
if not hasattr(self, 'tokenizer'):
return None
return self.tokenizer.stream.charEncoding[0].name
def isHTMLIntegrationPoint(self, element):
if (element.name == "annotation-xml" and
element.namespace == namespaces["mathml"]):
return ("encoding" in element.attributes and
element.attributes["encoding"].translate(
asciiUpper2Lower) in
("text/html", "application/xhtml+xml"))
else:
return (element.namespace, element.name) in htmlIntegrationPointElements
def isMathMLTextIntegrationPoint(self, element):
return (element.namespace, element.name) in mathmlTextIntegrationPointElements
def mainLoop(self):
CharactersToken = tokenTypes["Characters"]
SpaceCharactersToken = tokenTypes["SpaceCharacters"]
StartTagToken = tokenTypes["StartTag"]
EndTagToken = tokenTypes["EndTag"]
CommentToken = tokenTypes["Comment"]
DoctypeToken = tokenTypes["Doctype"]
ParseErrorToken = tokenTypes["ParseError"]
for token in self.normalizedTokens():
prev_token = None
new_token = token
while new_token is not None:
prev_token = new_token
currentNode = self.tree.openElements[-1] if self.tree.openElements else None
currentNodeNamespace = currentNode.namespace if currentNode else None
currentNodeName = currentNode.name if currentNode else None
type = new_token["type"]
if type == ParseErrorToken:
self.parseError(new_token["data"], new_token.get("datavars", {}))
new_token = None
else:
if (len(self.tree.openElements) == 0 or
currentNodeNamespace == self.tree.defaultNamespace or
(self.isMathMLTextIntegrationPoint(currentNode) and
((type == StartTagToken and
token["name"] not in frozenset(["mglyph", "malignmark"])) or
type in (CharactersToken, SpaceCharactersToken))) or
(currentNodeNamespace == namespaces["mathml"] and
currentNodeName == "annotation-xml" and
type == StartTagToken and
token["name"] == "svg") or
(self.isHTMLIntegrationPoint(currentNode) and
type in (StartTagToken, CharactersToken, SpaceCharactersToken))):
phase = self.phase
else:
phase = self.phases["inForeignContent"]
if type == CharactersToken:
new_token = phase.processCharacters(new_token)
elif type == SpaceCharactersToken:
new_token = phase.processSpaceCharacters(new_token)
elif type == StartTagToken:
new_token = phase.processStartTag(new_token)
elif type == EndTagToken:
new_token = phase.processEndTag(new_token)
elif type == CommentToken:
new_token = phase.processComment(new_token)
elif type == DoctypeToken:
new_token = phase.processDoctype(new_token)
if (type == StartTagToken and prev_token["selfClosing"] and
not prev_token["selfClosingAcknowledged"]):
self.parseError("non-void-element-with-trailing-solidus",
{"name": prev_token["name"]})
# When the loop finishes it's EOF
reprocess = True
phases = []
while reprocess:
phases.append(self.phase)
reprocess = self.phase.processEOF()
if reprocess:
assert self.phase not in phases
def normalizedTokens(self):
for token in self.tokenizer:
yield self.normalizeToken(token)
def parse(self, stream, *args, **kwargs):
"""Parse a HTML document into a well-formed tree
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
scripting - treat noscript elements as if javascript was turned on
"""
self._parse(stream, False, None, *args, **kwargs)
return self.tree.getDocument()
def parseFragment(self, stream, *args, **kwargs):
"""Parse a HTML fragment into a well-formed tree fragment
container - name of the element we're setting the innerHTML property
if set to None, default to 'div'
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
scripting - treat noscript elements as if javascript was turned on
"""
self._parse(stream, True, *args, **kwargs)
return self.tree.getFragment()
def parseError(self, errorcode="XXX-undefined-error", datavars=None):
# XXX The idea is to make errorcode mandatory.
if datavars is None:
datavars = {}
self.errors.append((self.tokenizer.stream.position(), errorcode, datavars))
if self.strict:
raise ParseError(E[errorcode] % datavars)
def normalizeToken(self, token):
""" HTML5 specific normalizations to the token stream """
if token["type"] == tokenTypes["StartTag"]:
raw = token["data"]
token["data"] = OrderedDict(raw)
if len(raw) > len(token["data"]):
# we had some duplicated attribute, fix so first wins
token["data"].update(raw[::-1])
return token
def adjustMathMLAttributes(self, token):
adjust_attributes(token, adjustMathMLAttributes)
def adjustSVGAttributes(self, token):
adjust_attributes(token, adjustSVGAttributes)
def adjustForeignAttributes(self, token):
adjust_attributes(token, adjustForeignAttributesMap)
def reparseTokenNormal(self, token):
# pylint:disable=unused-argument
self.parser.phase()
def resetInsertionMode(self):
# The name of this method is mostly historical. (It's also used in the
# specification.)
last = False
newModes = {
"select": "inSelect",
"td": "inCell",
"th": "inCell",
"tr": "inRow",
"tbody": "inTableBody",
"thead": "inTableBody",
"tfoot": "inTableBody",
"caption": "inCaption",
"colgroup": "inColumnGroup",
"table": "inTable",
"head": "inBody",
"body": "inBody",
"frameset": "inFrameset",
"html": "beforeHead"
}
for node in self.tree.openElements[::-1]:
nodeName = node.name
new_phase = None
if node == self.tree.openElements[0]:
assert self.innerHTML
last = True
nodeName = self.innerHTML
# Check for conditions that should only happen in the innerHTML
# case
if nodeName in ("select", "colgroup", "head", "html"):
assert self.innerHTML
if not last and node.namespace != self.tree.defaultNamespace:
continue
if nodeName in newModes:
new_phase = self.phases[newModes[nodeName]]
break
elif last:
new_phase = self.phases["inBody"]
break
self.phase = new_phase
def parseRCDataRawtext(self, token, contentType):
"""Generic RCDATA/RAWTEXT Parsing algorithm
contentType - RCDATA or RAWTEXT
"""
assert contentType in ("RAWTEXT", "RCDATA")
self.tree.insertElement(token)
if contentType == "RAWTEXT":
self.tokenizer.state = self.tokenizer.rawtextState
else:
self.tokenizer.state = self.tokenizer.rcdataState
self.originalPhase = self.phase
self.phase = self.phases["text"]
@_utils.memoize
def getPhases(debug):
def log(function):
"""Logger that records which phase processes each token"""
type_names = dict((value, key) for key, value in
tokenTypes.items())
def wrapped(self, *args, **kwargs):
if function.__name__.startswith("process") and len(args) > 0:
token = args[0]
try:
info = {"type": type_names[token['type']]}
except:
raise
if token['type'] in tagTokenTypes:
info["name"] = token['name']
self.parser.log.append((self.parser.tokenizer.state.__name__,
self.parser.phase.__class__.__name__,
self.__class__.__name__,
function.__name__,
info))
return function(self, *args, **kwargs)
else:
return function(self, *args, **kwargs)
return wrapped
def getMetaclass(use_metaclass, metaclass_func):
if use_metaclass:
return method_decorator_metaclass(metaclass_func)
else:
return type
# pylint:disable=unused-argument
class Phase(with_metaclass(getMetaclass(debug, log))):
"""Base class for helper object that implements each phase of processing
"""
def __init__(self, parser, tree):
self.parser = parser
self.tree = tree
def processEOF(self):
raise NotImplementedError
def processComment(self, token):
# For most phases the following is correct. Where it's not it will be
# overridden.
self.tree.insertComment(token, self.tree.openElements[-1])
def processDoctype(self, token):
self.parser.parseError("unexpected-doctype")
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processSpaceCharacters(self, token):
self.tree.insertText(token["data"])
def processStartTag(self, token):
return self.startTagHandler[token["name"]](token)
def startTagHtml(self, token):
if not self.parser.firstStartTag and token["name"] == "html":
self.parser.parseError("non-html-root")
# XXX Need a check here to see if the first start tag token emitted is
# this token... If it's not, invoke self.parser.parseError().
for attr, value in token["data"].items():
if attr not in self.tree.openElements[0].attributes:
self.tree.openElements[0].attributes[attr] = value
self.parser.firstStartTag = False
def processEndTag(self, token):
return self.endTagHandler[token["name"]](token)
class InitialPhase(Phase):
def processSpaceCharacters(self, token):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
correct = token["correct"]
if (name != "html" or publicId is not None or
systemId is not None and systemId != "about:legacy-compat"):
self.parser.parseError("unknown-doctype")
if publicId is None:
publicId = ""
self.tree.insertDoctype(token)
if publicId != "":
publicId = publicId.translate(asciiUpper2Lower)
if (not correct or token["name"] != "html" or
publicId.startswith(
("+//silmaril//dtd html pro v0r11 19970101//",
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
"-//as//dtd html 3.0 aswedit + extensions//",
"-//ietf//dtd html 2.0 level 1//",
"-//ietf//dtd html 2.0 level 2//",
"-//ietf//dtd html 2.0 strict level 1//",
"-//ietf//dtd html 2.0 strict level 2//",
"-//ietf//dtd html 2.0 strict//",
"-//ietf//dtd html 2.0//",
"-//ietf//dtd html 2.1e//",
"-//ietf//dtd html 3.0//",
"-//ietf//dtd html 3.2 final//",
"-//ietf//dtd html 3.2//",
"-//ietf//dtd html 3//",
"-//ietf//dtd html level 0//",
"-//ietf//dtd html level 1//",
"-//ietf//dtd html level 2//",
"-//ietf//dtd html level 3//",
"-//ietf//dtd html strict level 0//",
"-//ietf//dtd html strict level 1//",
"-//ietf//dtd html strict level 2//",
"-//ietf//dtd html strict level 3//",
"-//ietf//dtd html strict//",
"-//ietf//dtd html//",
"-//metrius//dtd metrius presentational//",
"-//microsoft//dtd internet explorer 2.0 html strict//",
"-//microsoft//dtd internet explorer 2.0 html//",
"-//microsoft//dtd internet explorer 2.0 tables//",
"-//microsoft//dtd internet explorer 3.0 html strict//",
"-//microsoft//dtd internet explorer 3.0 html//",
"-//microsoft//dtd internet explorer 3.0 tables//",
"-//netscape comm. corp.//dtd html//",
"-//netscape comm. corp.//dtd strict html//",
"-//o'reilly and associates//dtd html 2.0//",
"-//o'reilly and associates//dtd html extended 1.0//",
"-//o'reilly and associates//dtd html extended relaxed 1.0//",
"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
"-//spyglass//dtd html 2.0 extended//",
"-//sq//dtd html 2.0 hotmetal + extensions//",
"-//sun microsystems corp.//dtd hotjava html//",
"-//sun microsystems corp.//dtd hotjava strict html//",
"-//w3c//dtd html 3 1995-03-24//",
"-//w3c//dtd html 3.2 draft//",
"-//w3c//dtd html 3.2 final//",
"-//w3c//dtd html 3.2//",
"-//w3c//dtd html 3.2s draft//",
"-//w3c//dtd html 4.0 frameset//",
"-//w3c//dtd html 4.0 transitional//",
"-//w3c//dtd html experimental 19960712//",
"-//w3c//dtd html experimental 970421//",
"-//w3c//dtd w3 html//",
"-//w3o//dtd w3 html 3.0//",
"-//webtechs//dtd mozilla html 2.0//",
"-//webtechs//dtd mozilla html//")) or
publicId in ("-//w3o//dtd w3 html strict 3.0//en//",
"-/w3c/dtd html 4.0 transitional/en",
"html") or
publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is None or
systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"):
self.parser.compatMode = "quirks"
elif (publicId.startswith(
("-//w3c//dtd xhtml 1.0 frameset//",
"-//w3c//dtd xhtml 1.0 transitional//")) or
publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is not None):
self.parser.compatMode = "limited quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def anythingElse(self):
self.parser.compatMode = "quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def processCharacters(self, token):
self.parser.parseError("expected-doctype-but-got-chars")
self.anythingElse()
return token
def processStartTag(self, token):
self.parser.parseError("expected-doctype-but-got-start-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEndTag(self, token):
self.parser.parseError("expected-doctype-but-got-end-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEOF(self):
self.parser.parseError("expected-doctype-but-got-eof")
self.anythingElse()
return True
class BeforeHtmlPhase(Phase):
# helper methods
def insertHtmlElement(self):
self.tree.insertRoot(impliedTagToken("html", "StartTag"))
self.parser.phase = self.parser.phases["beforeHead"]
# other
def processEOF(self):
self.insertHtmlElement()
return True
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.insertHtmlElement()
return token
def processStartTag(self, token):
if token["name"] == "html":
self.parser.firstStartTag = True
self.insertHtmlElement()
return token
def processEndTag(self, token):
if token["name"] not in ("head", "body", "html", "br"):
self.parser.parseError("unexpected-end-tag-before-html",
{"name": token["name"]})
else:
self.insertHtmlElement()
return token
class BeforeHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
(("head", "body", "html", "br"), self.endTagImplyHead)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.startTagHead(impliedTagToken("head", "StartTag"))
return True
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.tree.insertElement(token)
self.tree.headPointer = self.tree.openElements[-1]
self.parser.phase = self.parser.phases["inHead"]
def startTagOther(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagImplyHead(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagOther(self, token):
self.parser.parseError("end-tag-after-implied-root",
{"name": token["name"]})
class InHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("title", self.startTagTitle),
(("noframes", "style"), self.startTagNoFramesStyle),
("noscript", self.startTagNoscript),
("script", self.startTagScript),
(("base", "basefont", "bgsound", "command", "link"),
self.startTagBaseLinkCommand),
("meta", self.startTagMeta),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("head", self.endTagHead),
(("br", "html", "body"), self.endTagHtmlBodyBr)
])
self.endTagHandler.default = self.endTagOther
# the real thing
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.parser.parseError("two-heads-are-not-better-than-one")
def startTagBaseLinkCommand(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMeta(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
attributes = token["data"]
if self.parser.tokenizer.stream.charEncoding[1] == "tentative":
if "charset" in attributes:
self.parser.tokenizer.stream.changeEncoding(attributes["charset"])
elif ("content" in attributes and
"http-equiv" in attributes and
attributes["http-equiv"].lower() == "content-type"):
# Encoding it as UTF-8 here is a hack, as really we should pass
# the abstract Unicode string, and just use the
# ContentAttrParser on that, but using UTF-8 allows all chars
# to be encoded and as a ASCII-superset works.
data = _inputstream.EncodingBytes(attributes["content"].encode("utf-8"))
parser = _inputstream.ContentAttrParser(data)
codec = parser.parse()
self.parser.tokenizer.stream.changeEncoding(codec)
def startTagTitle(self, token):
self.parser.parseRCDataRawtext(token, "RCDATA")
def startTagNoFramesStyle(self, token):
# Need to decide whether to implement the scripting-disabled case
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagNoscript(self, token):
if self.parser.scripting:
self.parser.parseRCDataRawtext(token, "RAWTEXT")
else:
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inHeadNoscript"]
def startTagScript(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState
self.parser.originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["text"]
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHead(self, token):
node = self.parser.tree.openElements.pop()
assert node.name == "head", "Expected head got %s" % node.name
self.parser.phase = self.parser.phases["afterHead"]
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.endTagHead(impliedTagToken("head"))
class InHeadNoscriptPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
(("basefont", "bgsound", "link", "meta", "noframes", "style"), self.startTagBaseLinkCommand),
(("head", "noscript"), self.startTagHeadNoscript),
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("noscript", self.endTagNoscript),
("br", self.endTagBr),
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.parser.parseError("eof-in-head-noscript")
self.anythingElse()
return True
def processComment(self, token):
return self.parser.phases["inHead"].processComment(token)
def processCharacters(self, token):
self.parser.parseError("char-in-head-noscript")
self.anythingElse()
return token
def processSpaceCharacters(self, token):
return self.parser.phases["inHead"].processSpaceCharacters(token)
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagBaseLinkCommand(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagHeadNoscript(self, token):
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
def startTagOther(self, token):
self.parser.parseError("unexpected-inhead-noscript-tag", {"name": token["name"]})
self.anythingElse()
return token
def endTagNoscript(self, token):
node = self.parser.tree.openElements.pop()
assert node.name == "noscript", "Expected noscript got %s" % node.name
self.parser.phase = self.parser.phases["inHead"]
def endTagBr(self, token):
self.parser.parseError("unexpected-inhead-noscript-tag", {"name": token["name"]})
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
# Caller must raise parse error first!
self.endTagNoscript(impliedTagToken("noscript"))
class AfterHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("base", "basefont", "bgsound", "link", "meta", "noframes", "script",
"style", "title"),
self.startTagFromHead),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([(("body", "html", "br"),
self.endTagHtmlBodyBr)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagBody(self, token):
self.parser.framesetOK = False
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inBody"]
def startTagFrameset(self, token):
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagFromHead(self, token):
self.parser.parseError("unexpected-start-tag-out-of-my-head",
{"name": token["name"]})
self.tree.openElements.append(self.tree.headPointer)
self.parser.phases["inHead"].processStartTag(token)
for node in self.tree.openElements[::-1]:
if node.name == "head":
self.tree.openElements.remove(node)
break
def startTagHead(self, token):
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.tree.insertElement(impliedTagToken("body", "StartTag"))
self.parser.phase = self.parser.phases["inBody"]
self.parser.framesetOK = True
class InBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody
# the really-really-really-very crazy mode
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
# Set this to the default handler
self.processSpaceCharacters = self.processSpaceCharactersNonPre
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
(("base", "basefont", "bgsound", "command", "link", "meta",
"script", "style", "title"),
self.startTagProcessInHead),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("address", "article", "aside", "blockquote", "center", "details",
"dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "main", "menu", "nav", "ol", "p",
"section", "summary", "ul"),
self.startTagCloseP),
(headingElements, self.startTagHeading),
(("pre", "listing"), self.startTagPreListing),
("form", self.startTagForm),
(("li", "dd", "dt"), self.startTagListItem),
("plaintext", self.startTagPlaintext),
("a", self.startTagA),
(("b", "big", "code", "em", "font", "i", "s", "small", "strike",
"strong", "tt", "u"), self.startTagFormatting),
("nobr", self.startTagNobr),
("button", self.startTagButton),
(("applet", "marquee", "object"), self.startTagAppletMarqueeObject),
("xmp", self.startTagXmp),
("table", self.startTagTable),
(("area", "br", "embed", "img", "keygen", "wbr"),
self.startTagVoidFormatting),
(("param", "source", "track"), self.startTagParamSource),
("input", self.startTagInput),
("hr", self.startTagHr),
("image", self.startTagImage),
("isindex", self.startTagIsIndex),
("textarea", self.startTagTextarea),
("iframe", self.startTagIFrame),
("noscript", self.startTagNoscript),
(("noembed", "noframes"), self.startTagRawtext),
("select", self.startTagSelect),
(("rp", "rt"), self.startTagRpRt),
(("option", "optgroup"), self.startTagOpt),
(("math"), self.startTagMath),
(("svg"), self.startTagSvg),
(("caption", "col", "colgroup", "frame", "head",
"tbody", "td", "tfoot", "th", "thead",
"tr"), self.startTagMisplaced)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("body", self.endTagBody),
("html", self.endTagHtml),
(("address", "article", "aside", "blockquote", "button", "center",
"details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre",
"section", "summary", "ul"), self.endTagBlock),
("form", self.endTagForm),
("p", self.endTagP),
(("dd", "dt", "li"), self.endTagListItem),
(headingElements, self.endTagHeading),
(("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small",
"strike", "strong", "tt", "u"), self.endTagFormatting),
(("applet", "marquee", "object"), self.endTagAppletMarqueeObject),
("br", self.endTagBr),
])
self.endTagHandler.default = self.endTagOther
def isMatchingFormattingElement(self, node1, node2):
return (node1.name == node2.name and
node1.namespace == node2.namespace and
node1.attributes == node2.attributes)
# helper
def addFormattingElement(self, token):
self.tree.insertElement(token)
element = self.tree.openElements[-1]
matchingElements = []
for node in self.tree.activeFormattingElements[::-1]:
if node is Marker:
break
elif self.isMatchingFormattingElement(node, element):
matchingElements.append(node)
assert len(matchingElements) <= 3
if len(matchingElements) == 3:
self.tree.activeFormattingElements.remove(matchingElements[-1])
self.tree.activeFormattingElements.append(element)
# the real deal
def processEOF(self):
allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td",
"tfoot", "th", "thead", "tr", "body",
"html"))
for node in self.tree.openElements[::-1]:
if node.name not in allowed_elements:
self.parser.parseError("expected-closing-tag-but-got-eof")
break
# Stop parsing
def processSpaceCharactersDropNewline(self, token):
# Sometimes (start of <pre>, <listing>, and <textarea> blocks) we
# want to drop leading newlines
data = token["data"]
self.processSpaceCharacters = self.processSpaceCharactersNonPre
if (data.startswith("\n") and
self.tree.openElements[-1].name in ("pre", "listing", "textarea") and
not self.tree.openElements[-1].hasContent()):
data = data[1:]
if data:
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(data)
def processCharacters(self, token):
if token["data"] == "\u0000":
# The tokenizer should always emit null on its own
return
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
# This must be bad for performance
if (self.parser.framesetOK and
any([char not in spaceCharacters
for char in token["data"]])):
self.parser.framesetOK = False
def processSpaceCharactersNonPre(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
def startTagProcessInHead(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagBody(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "body"})
if (len(self.tree.openElements) == 1 or
self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
else:
self.parser.framesetOK = False
for attr, value in token["data"].items():
if attr not in self.tree.openElements[1].attributes:
self.tree.openElements[1].attributes[attr] = value
def startTagFrameset(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "frameset"})
if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
elif not self.parser.framesetOK:
pass
else:
if self.tree.openElements[1].parent:
self.tree.openElements[1].parent.removeChild(self.tree.openElements[1])
while self.tree.openElements[-1].name != "html":
self.tree.openElements.pop()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagCloseP(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
def startTagPreListing(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
def startTagForm(self, token):
if self.tree.formPointer:
self.parser.parseError("unexpected-start-tag", {"name": "form"})
else:
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
def startTagListItem(self, token):
self.parser.framesetOK = False
stopNamesMap = {"li": ["li"],
"dt": ["dt", "dd"],
"dd": ["dt", "dd"]}
stopNames = stopNamesMap[token["name"]]
for node in reversed(self.tree.openElements):
if node.name in stopNames:
self.parser.phase.processEndTag(
impliedTagToken(node.name, "EndTag"))
break
if (node.nameTuple in specialElements and
node.name not in ("address", "div", "p")):
break
if self.tree.elementInScope("p", variant="button"):
self.parser.phase.processEndTag(
impliedTagToken("p", "EndTag"))
self.tree.insertElement(token)
def startTagPlaintext(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.plaintextState
def startTagHeading(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
if self.tree.openElements[-1].name in headingElements:
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagA(self, token):
afeAElement = self.tree.elementInActiveFormattingElements("a")
if afeAElement:
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "a", "endName": "a"})
self.endTagFormatting(impliedTagToken("a"))
if afeAElement in self.tree.openElements:
self.tree.openElements.remove(afeAElement)
if afeAElement in self.tree.activeFormattingElements:
self.tree.activeFormattingElements.remove(afeAElement)
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagNobr(self, token):
self.tree.reconstructActiveFormattingElements()
if self.tree.elementInScope("nobr"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "nobr", "endName": "nobr"})
self.processEndTag(impliedTagToken("nobr"))
# XXX Need tests that trigger the following
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagButton(self, token):
if self.tree.elementInScope("button"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "button", "endName": "button"})
self.processEndTag(impliedTagToken("button"))
return token
else:
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
def startTagAppletMarqueeObject(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.activeFormattingElements.append(Marker)
self.parser.framesetOK = False
def startTagXmp(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.reconstructActiveFormattingElements()
self.parser.framesetOK = False
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagTable(self, token):
if self.parser.compatMode != "quirks":
if self.tree.elementInScope("p", variant="button"):
self.processEndTag(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.parser.phase = self.parser.phases["inTable"]
def startTagVoidFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagInput(self, token):
framesetOK = self.parser.framesetOK
self.startTagVoidFormatting(token)
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
# input type=hidden doesn't change framesetOK
self.parser.framesetOK = framesetOK
def startTagParamSource(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagHr(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagImage(self, token):
# No really...
self.parser.parseError("unexpected-start-tag-treated-as",
{"originalName": "image", "newName": "img"})
self.processStartTag(impliedTagToken("img", "StartTag",
attributes=token["data"],
selfClosing=token["selfClosing"]))
def startTagIsIndex(self, token):
self.parser.parseError("deprecated-tag", {"name": "isindex"})
if self.tree.formPointer:
return
form_attrs = {}
if "action" in token["data"]:
form_attrs["action"] = token["data"]["action"]
self.processStartTag(impliedTagToken("form", "StartTag",
attributes=form_attrs))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processStartTag(impliedTagToken("label", "StartTag"))
# XXX Localization ...
if "prompt" in token["data"]:
prompt = token["data"]["prompt"]
else:
prompt = "This is a searchable index. Enter search keywords: "
self.processCharacters(
{"type": tokenTypes["Characters"], "data": prompt})
attributes = token["data"].copy()
if "action" in attributes:
del attributes["action"]
if "prompt" in attributes:
del attributes["prompt"]
attributes["name"] = "isindex"
self.processStartTag(impliedTagToken("input", "StartTag",
attributes=attributes,
selfClosing=token["selfClosing"]))
self.processEndTag(impliedTagToken("label"))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processEndTag(impliedTagToken("form"))
def startTagTextarea(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.rcdataState
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
self.parser.framesetOK = False
def startTagIFrame(self, token):
self.parser.framesetOK = False
self.startTagRawtext(token)
def startTagNoscript(self, token):
if self.parser.scripting:
self.startTagRawtext(token)
else:
self.startTagOther(token)
def startTagRawtext(self, token):
"""iframe, noembed noframes, noscript(if scripting enabled)"""
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagOpt(self, token):
if self.tree.openElements[-1].name == "option":
self.parser.phase.processEndTag(impliedTagToken("option"))
self.tree.reconstructActiveFormattingElements()
self.parser.tree.insertElement(token)
def startTagSelect(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
if self.parser.phase in (self.parser.phases["inTable"],
self.parser.phases["inCaption"],
self.parser.phases["inColumnGroup"],
self.parser.phases["inTableBody"],
self.parser.phases["inRow"],
self.parser.phases["inCell"]):
self.parser.phase = self.parser.phases["inSelectInTable"]
else:
self.parser.phase = self.parser.phases["inSelect"]
def startTagRpRt(self, token):
if self.tree.elementInScope("ruby"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "ruby":
self.parser.parseError()
self.tree.insertElement(token)
def startTagMath(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustMathMLAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["mathml"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagSvg(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["svg"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMisplaced(self, token):
""" Elements that should be children of other elements that have a
different insertion mode; here they are ignored
"caption", "col", "colgroup", "frame", "frameset", "head",
"option", "optgroup", "tbody", "td", "tfoot", "th", "thead",
"tr", "noscript"
"""
self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]})
def startTagOther(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
def endTagP(self, token):
if not self.tree.elementInScope("p", variant="button"):
self.startTagCloseP(impliedTagToken("p", "StartTag"))
self.parser.parseError("unexpected-end-tag", {"name": "p"})
self.endTagP(impliedTagToken("p", "EndTag"))
else:
self.tree.generateImpliedEndTags("p")
if self.tree.openElements[-1].name != "p":
self.parser.parseError("unexpected-end-tag", {"name": "p"})
node = self.tree.openElements.pop()
while node.name != "p":
node = self.tree.openElements.pop()
def endTagBody(self, token):
if not self.tree.elementInScope("body"):
self.parser.parseError()
return
elif self.tree.openElements[-1].name != "body":
for node in self.tree.openElements[2:]:
if node.name not in frozenset(("dd", "dt", "li", "optgroup",
"option", "p", "rp", "rt",
"tbody", "td", "tfoot",
"th", "thead", "tr", "body",
"html")):
# Not sure this is the correct name for the parse error
self.parser.parseError(
"expected-one-end-tag-but-got-another",
{"gotName": "body", "expectedName": node.name})
break
self.parser.phase = self.parser.phases["afterBody"]
def endTagHtml(self, token):
# We repeat the test for the body end tag token being ignored here
if self.tree.elementInScope("body"):
self.endTagBody(impliedTagToken("body"))
return token
def endTagBlock(self, token):
# Put us back in the right whitespace handling mode
if token["name"] == "pre":
self.processSpaceCharacters = self.processSpaceCharactersNonPre
inScope = self.tree.elementInScope(token["name"])
if inScope:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if inScope:
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagForm(self, token):
node = self.tree.formPointer
self.tree.formPointer = None
if node is None or not self.tree.elementInScope(node):
self.parser.parseError("unexpected-end-tag",
{"name": "form"})
else:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1] != node:
self.parser.parseError("end-tag-too-early-ignored",
{"name": "form"})
self.tree.openElements.remove(node)
def endTagListItem(self, token):
if token["name"] == "li":
variant = "list"
else:
variant = None
if not self.tree.elementInScope(token["name"], variant=variant):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
else:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError(
"end-tag-too-early",
{"name": token["name"]})
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagHeading(self, token):
for item in headingElements:
if self.tree.elementInScope(item):
self.tree.generateImpliedEndTags()
break
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
for item in headingElements:
if self.tree.elementInScope(item):
item = self.tree.openElements.pop()
while item.name not in headingElements:
item = self.tree.openElements.pop()
break
def endTagFormatting(self, token):
"""The much-feared adoption agency algorithm"""
# http://svn.whatwg.org/webapps/complete.html#adoptionAgency revision 7867
# XXX Better parseError messages appreciated.
# Step 1
outerLoopCounter = 0
# Step 2
while outerLoopCounter < 8:
# Step 3
outerLoopCounter += 1
# Step 4:
# Let the formatting element be the last element in
# the list of active formatting elements that:
# - is between the end of the list and the last scope
# marker in the list, if any, or the start of the list
# otherwise, and
# - has the same tag name as the token.
formattingElement = self.tree.elementInActiveFormattingElements(
token["name"])
if (not formattingElement or
(formattingElement in self.tree.openElements and
not self.tree.elementInScope(formattingElement.name))):
# If there is no such node, then abort these steps
# and instead act as described in the "any other
# end tag" entry below.
self.endTagOther(token)
return
# Otherwise, if there is such a node, but that node is
# not in the stack of open elements, then this is a
# parse error; remove the element from the list, and
# abort these steps.
elif formattingElement not in self.tree.openElements:
self.parser.parseError("adoption-agency-1.2", {"name": token["name"]})
self.tree.activeFormattingElements.remove(formattingElement)
return
# Otherwise, if there is such a node, and that node is
# also in the stack of open elements, but the element
# is not in scope, then this is a parse error; ignore
# the token, and abort these steps.
elif not self.tree.elementInScope(formattingElement.name):
self.parser.parseError("adoption-agency-4.4", {"name": token["name"]})
return
# Otherwise, there is a formatting element and that
# element is in the stack and is in scope. If the
# element is not the current node, this is a parse
# error. In any case, proceed with the algorithm as
# written in the following steps.
else:
if formattingElement != self.tree.openElements[-1]:
self.parser.parseError("adoption-agency-1.3", {"name": token["name"]})
# Step 5:
# Let the furthest block be the topmost node in the
# stack of open elements that is lower in the stack
# than the formatting element, and is an element in
# the special category. There might not be one.
afeIndex = self.tree.openElements.index(formattingElement)
furthestBlock = None
for element in self.tree.openElements[afeIndex:]:
if element.nameTuple in specialElements:
furthestBlock = element
break
# Step 6:
# If there is no furthest block, then the UA must
# first pop all the nodes from the bottom of the stack
# of open elements, from the current node up to and
# including the formatting element, then remove the
# formatting element from the list of active
# formatting elements, and finally abort these steps.
if furthestBlock is None:
element = self.tree.openElements.pop()
while element != formattingElement:
element = self.tree.openElements.pop()
self.tree.activeFormattingElements.remove(element)
return
# Step 7
commonAncestor = self.tree.openElements[afeIndex - 1]
# Step 8:
# The bookmark is supposed to help us identify where to reinsert
# nodes in step 15. We have to ensure that we reinsert nodes after
# the node before the active formatting element. Note the bookmark
# can move in step 9.7
bookmark = self.tree.activeFormattingElements.index(formattingElement)
# Step 9
lastNode = node = furthestBlock
innerLoopCounter = 0
index = self.tree.openElements.index(node)
while innerLoopCounter < 3:
innerLoopCounter += 1
# Node is element before node in open elements
index -= 1
node = self.tree.openElements[index]
if node not in self.tree.activeFormattingElements:
self.tree.openElements.remove(node)
continue
# Step 9.6
if node == formattingElement:
break
# Step 9.7
if lastNode == furthestBlock:
bookmark = self.tree.activeFormattingElements.index(node) + 1
# Step 9.8
clone = node.cloneNode()
# Replace node with clone
self.tree.activeFormattingElements[
self.tree.activeFormattingElements.index(node)] = clone
self.tree.openElements[
self.tree.openElements.index(node)] = clone
node = clone
# Step 9.9
# Remove lastNode from its parents, if any
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
node.appendChild(lastNode)
# Step 9.10
lastNode = node
# Step 10
# Foster parent lastNode if commonAncestor is a
# table, tbody, tfoot, thead, or tr we need to foster
# parent the lastNode
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
if commonAncestor.name in frozenset(("table", "tbody", "tfoot", "thead", "tr")):
parent, insertBefore = self.tree.getTableMisnestedNodePosition()
parent.insertBefore(lastNode, insertBefore)
else:
commonAncestor.appendChild(lastNode)
# Step 11
clone = formattingElement.cloneNode()
# Step 12
furthestBlock.reparentChildren(clone)
# Step 13
furthestBlock.appendChild(clone)
# Step 14
self.tree.activeFormattingElements.remove(formattingElement)
self.tree.activeFormattingElements.insert(bookmark, clone)
# Step 15
self.tree.openElements.remove(formattingElement)
self.tree.openElements.insert(
self.tree.openElements.index(furthestBlock) + 1, clone)
def endTagAppletMarqueeObject(self, token):
if self.tree.elementInScope(token["name"]):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if self.tree.elementInScope(token["name"]):
element = self.tree.openElements.pop()
while element.name != token["name"]:
element = self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
def endTagBr(self, token):
self.parser.parseError("unexpected-end-tag-treated-as",
{"originalName": "br", "newName": "br element"})
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(impliedTagToken("br", "StartTag"))
self.tree.openElements.pop()
def endTagOther(self, token):
for node in self.tree.openElements[::-1]:
if node.name == token["name"]:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while self.tree.openElements.pop() != node:
pass
break
else:
if node.nameTuple in specialElements:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
break
class TextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("script", self.endTagScript)])
self.endTagHandler.default = self.endTagOther
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processEOF(self):
self.parser.parseError("expected-named-closing-tag-but-got-eof",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
return True
def startTagOther(self, token):
assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode" % token['name']
def endTagScript(self, token):
node = self.tree.openElements.pop()
assert node.name == "script"
self.parser.phase = self.parser.originalPhase
# The rest of this method is all stuff that only happens if
# document.write works
def endTagOther(self, token):
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
class InTablePhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("caption", self.startTagCaption),
("colgroup", self.startTagColgroup),
("col", self.startTagCol),
(("tbody", "tfoot", "thead"), self.startTagRowGroup),
(("td", "th", "tr"), self.startTagImplyTbody),
("table", self.startTagTable),
(("style", "script"), self.startTagStyleScript),
("input", self.startTagInput),
("form", self.startTagForm)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "tbody", "td",
"tfoot", "th", "thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableContext(self):
# "clear the stack back to a table context"
while self.tree.openElements[-1].name not in ("table", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
# When the current node is <html> it's an innerHTML case
# processing methods
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-table")
else:
assert self.parser.innerHTML
# Stop parsing
def processSpaceCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processSpaceCharacters(token)
def processCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processCharacters(token)
def insertText(self, token):
# If we get here there must be at least one non-whitespace character
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processCharacters(token)
self.tree.insertFromTable = False
def startTagCaption(self, token):
self.clearStackToTableContext()
self.tree.activeFormattingElements.append(Marker)
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCaption"]
def startTagColgroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inColumnGroup"]
def startTagCol(self, token):
self.startTagColgroup(impliedTagToken("colgroup", "StartTag"))
return token
def startTagRowGroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inTableBody"]
def startTagImplyTbody(self, token):
self.startTagRowGroup(impliedTagToken("tbody", "StartTag"))
return token
def startTagTable(self, token):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "table", "endName": "table"})
self.parser.phase.processEndTag(impliedTagToken("table"))
if not self.parser.innerHTML:
return token
def startTagStyleScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagInput(self, token):
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
self.parser.parseError("unexpected-hidden-input-in-table")
self.tree.insertElement(token)
# XXX associate with form
self.tree.openElements.pop()
else:
self.startTagOther(token)
def startTagForm(self, token):
self.parser.parseError("unexpected-form-in-table")
if self.tree.formPointer is None:
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
self.tree.openElements.pop()
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processStartTag(token)
self.tree.insertFromTable = False
def endTagTable(self, token):
if self.tree.elementInScope("table", variant="table"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "table":
self.parser.parseError("end-tag-too-early-named",
{"gotName": "table",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "table":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processEndTag(token)
self.tree.insertFromTable = False
class InTableTextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.originalPhase = None
self.characterTokens = []
def flushCharacters(self):
data = "".join([item["data"] for item in self.characterTokens])
if any([item not in spaceCharacters for item in data]):
token = {"type": tokenTypes["Characters"], "data": data}
self.parser.phases["inTable"].insertText(token)
elif data:
self.tree.insertText(data)
self.characterTokens = []
def processComment(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEOF(self):
self.flushCharacters()
self.parser.phase = self.originalPhase
return True
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.characterTokens.append(token)
def processSpaceCharacters(self, token):
# pretty sure we should never reach here
self.characterTokens.append(token)
# assert False
def processStartTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEndTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
class InCaptionPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-caption
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableElement)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("caption", self.endTagCaption),
("table", self.endTagTable),
(("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagCaption(self):
return not self.tree.elementInScope("caption", variant="table")
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableElement(self, token):
self.parser.parseError()
# XXX Have to duplicate logic here to find out if the tag is ignored
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagCaption(self, token):
if not self.ignoreEndTagCaption():
# AT this code is quite similar to endTagTable in "InTable"
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "caption":
self.parser.parseError("expected-one-end-tag-but-got-another",
{"gotName": "caption",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "caption":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inTable"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
self.parser.parseError()
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InColumnGroupPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-column
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("col", self.startTagCol)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("colgroup", self.endTagColgroup),
("col", self.endTagCol)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagColgroup(self):
return self.tree.openElements[-1].name == "html"
def processEOF(self):
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
return
else:
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return True
def processCharacters(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def startTagCol(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def endTagColgroup(self, token):
if self.ignoreEndTagColgroup():
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
else:
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
def endTagCol(self, token):
self.parser.parseError("no-end-tag", {"name": "col"})
def endTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
class InTableBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table0
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("tr", self.startTagTr),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead"),
self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "td", "th",
"tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableBodyContext(self):
while self.tree.openElements[-1].name not in ("tbody", "tfoot",
"thead", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTr(self, token):
self.clearStackToTableBodyContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inRow"]
def startTagTableCell(self, token):
self.parser.parseError("unexpected-cell-in-table-body",
{"name": token["name"]})
self.startTagTr(impliedTagToken("tr", "StartTag"))
return token
def startTagTableOther(self, token):
# XXX AT Any ideas on how to share this with endTagTable?
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.clearStackToTableBodyContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
else:
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagTable(self, token):
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InRowPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-row
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead",
"tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("tr", self.endTagTr),
("table", self.endTagTable),
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
(("body", "caption", "col", "colgroup", "html", "td", "th"),
self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods (XXX unify this with other table helper methods)
def clearStackToTableRowContext(self):
while self.tree.openElements[-1].name not in ("tr", "html"):
self.parser.parseError("unexpected-implied-end-tag-in-table-row",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
def ignoreEndTagTr(self):
return not self.tree.elementInScope("tr", variant="table")
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTableCell(self, token):
self.clearStackToTableRowContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCell"]
self.tree.activeFormattingElements.append(Marker)
def startTagTableOther(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTr(self, token):
if not self.ignoreEndTagTr():
self.clearStackToTableRowContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTableBody"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# Reprocess the current tag if the tr end tag was not ignored
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagTr(impliedTagToken("tr"))
return token
else:
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-row",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InCellPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-cell
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
(("td", "th"), self.endTagTableCell),
(("body", "caption", "col", "colgroup", "html"), self.endTagIgnore),
(("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply)
])
self.endTagHandler.default = self.endTagOther
# helper
def closeCell(self):
if self.tree.elementInScope("td", variant="table"):
self.endTagTableCell(impliedTagToken("td"))
elif self.tree.elementInScope("th", variant="table"):
self.endTagTableCell(impliedTagToken("th"))
# the rest
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableOther(self, token):
if (self.tree.elementInScope("td", variant="table") or
self.tree.elementInScope("th", variant="table")):
self.closeCell()
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagTableCell(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.tree.generateImpliedEndTags(token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-cell-end-tag",
{"name": token["name"]})
while True:
node = self.tree.openElements.pop()
if node.name == token["name"]:
break
else:
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inRow"]
else:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagImply(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.closeCell()
return token
else:
# sometimes innerHTML case
self.parser.parseError()
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InSelectPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("option", self.startTagOption),
("optgroup", self.startTagOptgroup),
("select", self.startTagSelect),
(("input", "keygen", "textarea"), self.startTagInput),
("script", self.startTagScript)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("option", self.endTagOption),
("optgroup", self.endTagOptgroup),
("select", self.endTagSelect)
])
self.endTagHandler.default = self.endTagOther
# http://www.whatwg.org/specs/web-apps/current-work/#in-select
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-select")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.tree.insertText(token["data"])
def startTagOption(self, token):
# We need to imply </option> if <option> is the current node.
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagOptgroup(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagSelect(self, token):
self.parser.parseError("unexpected-select-in-select")
self.endTagSelect(impliedTagToken("select"))
def startTagInput(self, token):
self.parser.parseError("unexpected-input-in-select")
if self.tree.elementInScope("select", variant="select"):
self.endTagSelect(impliedTagToken("select"))
return token
else:
assert self.parser.innerHTML
def startTagScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-select",
{"name": token["name"]})
def endTagOption(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "option"})
def endTagOptgroup(self, token):
# </optgroup> implicitly closes <option>
if (self.tree.openElements[-1].name == "option" and
self.tree.openElements[-2].name == "optgroup"):
self.tree.openElements.pop()
# It also closes </optgroup>
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
# But nothing else
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "optgroup"})
def endTagSelect(self, token):
if self.tree.elementInScope("select", variant="select"):
node = self.tree.openElements.pop()
while node.name != "select":
node = self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-select",
{"name": token["name"]})
class InSelectInTablePhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.startTagTable)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.endTagTable)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.parser.phases["inSelect"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inSelect"].processCharacters(token)
def startTagTable(self, token):
self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]})
self.endTagOther(impliedTagToken("select"))
return token
def startTagOther(self, token):
return self.parser.phases["inSelect"].processStartTag(token)
def endTagTable(self, token):
self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]})
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagOther(impliedTagToken("select"))
return token
def endTagOther(self, token):
return self.parser.phases["inSelect"].processEndTag(token)
class InForeignContentPhase(Phase):
breakoutElements = frozenset(["b", "big", "blockquote", "body", "br",
"center", "code", "dd", "div", "dl", "dt",
"em", "embed", "h1", "h2", "h3",
"h4", "h5", "h6", "head", "hr", "i", "img",
"li", "listing", "menu", "meta", "nobr",
"ol", "p", "pre", "ruby", "s", "small",
"span", "strong", "strike", "sub", "sup",
"table", "tt", "u", "ul", "var"])
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
def adjustSVGTagNames(self, token):
replacements = {"altglyph": "altGlyph",
"altglyphdef": "altGlyphDef",
"altglyphitem": "altGlyphItem",
"animatecolor": "animateColor",
"animatemotion": "animateMotion",
"animatetransform": "animateTransform",
"clippath": "clipPath",
"feblend": "feBlend",
"fecolormatrix": "feColorMatrix",
"fecomponenttransfer": "feComponentTransfer",
"fecomposite": "feComposite",
"feconvolvematrix": "feConvolveMatrix",
"fediffuselighting": "feDiffuseLighting",
"fedisplacementmap": "feDisplacementMap",
"fedistantlight": "feDistantLight",
"feflood": "feFlood",
"fefunca": "feFuncA",
"fefuncb": "feFuncB",
"fefuncg": "feFuncG",
"fefuncr": "feFuncR",
"fegaussianblur": "feGaussianBlur",
"feimage": "feImage",
"femerge": "feMerge",
"femergenode": "feMergeNode",
"femorphology": "feMorphology",
"feoffset": "feOffset",
"fepointlight": "fePointLight",
"fespecularlighting": "feSpecularLighting",
"fespotlight": "feSpotLight",
"fetile": "feTile",
"feturbulence": "feTurbulence",
"foreignobject": "foreignObject",
"glyphref": "glyphRef",
"lineargradient": "linearGradient",
"radialgradient": "radialGradient",
"textpath": "textPath"}
if token["name"] in replacements:
token["name"] = replacements[token["name"]]
def processCharacters(self, token):
if token["data"] == "\u0000":
token["data"] = "\uFFFD"
elif (self.parser.framesetOK and
any(char not in spaceCharacters for char in token["data"])):
self.parser.framesetOK = False
Phase.processCharacters(self, token)
def processStartTag(self, token):
currentNode = self.tree.openElements[-1]
if (token["name"] in self.breakoutElements or
(token["name"] == "font" and
set(token["data"].keys()) & set(["color", "face", "size"]))):
self.parser.parseError("unexpected-html-element-in-foreign-content",
{"name": token["name"]})
while (self.tree.openElements[-1].namespace !=
self.tree.defaultNamespace and
not self.parser.isHTMLIntegrationPoint(self.tree.openElements[-1]) and
not self.parser.isMathMLTextIntegrationPoint(self.tree.openElements[-1])):
self.tree.openElements.pop()
return token
else:
if currentNode.namespace == namespaces["mathml"]:
self.parser.adjustMathMLAttributes(token)
elif currentNode.namespace == namespaces["svg"]:
self.adjustSVGTagNames(token)
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = currentNode.namespace
self.tree.insertElement(token)
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def processEndTag(self, token):
nodeIndex = len(self.tree.openElements) - 1
node = self.tree.openElements[-1]
if node.name.translate(asciiUpper2Lower) != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while True:
if node.name.translate(asciiUpper2Lower) == token["name"]:
# XXX this isn't in the spec but it seems necessary
if self.parser.phase == self.parser.phases["inTableText"]:
self.parser.phase.flushCharacters()
self.parser.phase = self.parser.phase.originalPhase
while self.tree.openElements.pop() != node:
assert self.tree.openElements
new_token = None
break
nodeIndex -= 1
node = self.tree.openElements[nodeIndex]
if node.namespace != self.tree.defaultNamespace:
continue
else:
new_token = self.parser.phase.processEndTag(token)
break
return new_token
class AfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([("html", self.endTagHtml)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processComment(self, token):
# This is needed because data is to be appended to the <html> element
# here and not to whatever is currently open.
self.tree.insertComment(token, self.tree.openElements[0])
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-body")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def endTagHtml(self, name):
if self.parser.innerHTML:
self.parser.parseError("unexpected-end-tag-after-body-innerhtml")
else:
self.parser.phase = self.parser.phases["afterAfterBody"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class InFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-frameset
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("frameset", self.startTagFrameset),
("frame", self.startTagFrame),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("frameset", self.endTagFrameset)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-frameset")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
self.parser.parseError("unexpected-char-in-frameset")
def startTagFrameset(self, token):
self.tree.insertElement(token)
def startTagFrame(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagNoframes(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-frameset",
{"name": token["name"]})
def endTagFrameset(self, token):
if self.tree.openElements[-1].name == "html":
# innerHTML case
self.parser.parseError("unexpected-frameset-in-frameset-innerhtml")
else:
self.tree.openElements.pop()
if (not self.parser.innerHTML and
self.tree.openElements[-1].name != "frameset"):
# If we're not in innerHTML mode and the current node is not a
# "frameset" element (anymore) then switch.
self.parser.phase = self.parser.phases["afterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-frameset",
{"name": token["name"]})
class AfterFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#after3
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("html", self.endTagHtml)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-frameset")
def startTagNoframes(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-frameset",
{"name": token["name"]})
def endTagHtml(self, token):
self.parser.phase = self.parser.phases["afterAfterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-frameset",
{"name": token["name"]})
class AfterAfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class AfterAfterFramesetPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoFrames)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagNoFrames(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
# pylint:enable=unused-argument
return {
"initial": InitialPhase,
"beforeHtml": BeforeHtmlPhase,
"beforeHead": BeforeHeadPhase,
"inHead": InHeadPhase,
"inHeadNoscript": InHeadNoscriptPhase,
"afterHead": AfterHeadPhase,
"inBody": InBodyPhase,
"text": TextPhase,
"inTable": InTablePhase,
"inTableText": InTableTextPhase,
"inCaption": InCaptionPhase,
"inColumnGroup": InColumnGroupPhase,
"inTableBody": InTableBodyPhase,
"inRow": InRowPhase,
"inCell": InCellPhase,
"inSelect": InSelectPhase,
"inSelectInTable": InSelectInTablePhase,
"inForeignContent": InForeignContentPhase,
"afterBody": AfterBodyPhase,
"inFrameset": InFramesetPhase,
"afterFrameset": AfterFramesetPhase,
"afterAfterBody": AfterAfterBodyPhase,
"afterAfterFrameset": AfterAfterFramesetPhase,
# XXX after after frameset
}
def adjust_attributes(token, replacements):
if PY3 or _utils.PY27:
needs_adjustment = viewkeys(token['data']) & viewkeys(replacements)
else:
needs_adjustment = frozenset(token['data']) & frozenset(replacements)
if needs_adjustment:
token['data'] = OrderedDict((replacements.get(k, k), v)
for k, v in token['data'].items())
def impliedTagToken(name, type="EndTag", attributes=None,
selfClosing=False):
if attributes is None:
attributes = {}
return {"type": tokenTypes[type], "name": name, "data": attributes,
"selfClosing": selfClosing}
class ParseError(Exception):
"""Error in parsed document"""
pass
| [
[
[
23,
38
]
],
[
[
40,
48
]
],
[
[
50,
66
]
],
[
[
95,
109
],
[
14483,
14497
]
],
[
[
111,
119
],
[
116535,
116543
],
[
116561,
116569
]
],
[
[
121,
124
],
[
116488,
116491
]
],
[
[
133,
138
],
[
1586,
1591
]
],
[
[
173,
184
],
[
10545,
10556
],
[
116721,
116732
]
],
[
[
245,
256
],
[
10545,
10556
],
[
116721,
116732
]
],
[
[
272,
284
],
[
27882,
27894
],
[
27977,
27989
]
],
[
[
299,
309
],
[
2914,
2924
]
],
[
[
325,
337
],
[
929,
941
],
[
1183,
1195
],
[
2447,
2459
]
],
[
[
369,
375
],
[
39196,
39202
],
[
47469,
47475
],
[
73459,
73465
],
[
90508,
90514
]
],
[
[
391,
397
],
[
13155,
13161
],
[
116495,
116501
],
[
23889,
23895
],
[
24115,
24121
],
[
25517,
25523
],
[
26123,
26129
],
[
29786,
29792
],
[
30145,
30151
],
[
32292,
32298
],
[
32780,
32786
],
[
35158,
35164
],
[
37685,
37691
],
[
69323,
69329
],
[
69446,
69452
],
[
70776,
70782
],
[
71441,
71447
],
[
78818,
78824
],
[
79139,
79145
],
[
81928,
81934
],
[
82152,
82158
],
[
84314,
84320
],
[
84711,
84717
],
[
88663,
88669
],
[
89026,
89032
],
[
92407,
92413
],
[
92726,
92732
],
[
95585,
95591
],
[
96037,
96043
],
[
99729,
99735
],
[
99987,
99993
],
[
107355,
107361
],
[
107536,
107542
],
[
109272,
109278
],
[
109606,
109612
],
[
111579,
111585
],
[
111813,
111819
],
[
112879,
112885
],
[
114291,
114297
]
],
[
[
427,
442
],
[
41143,
41158
],
[
77377,
77392
],
[
104406,
104421
]
],
[
[
444,
460
],
[
4961,
4977
],
[
16860,
16876
],
[
48728,
48744
],
[
74843,
74859
],
[
106175,
106191
],
[
106361,
106377
]
],
[
[
466,
481
],
[
44313,
44328
],
[
63352,
63367
],
[
69029,
69044
]
],
[
[
483,
498
],
[
35839,
35854
],
[
38292,
38307
],
[
45126,
45141
],
[
59378,
59393
],
[
59702,
59717
],
[
59869,
59884
]
],
[
[
500,
513
],
[
3486,
3499
]
],
[
[
515,
529
],
[
3602,
3616
]
],
[
[
535,
545
],
[
5333,
5343
],
[
5389,
5399
],
[
5443,
5453
],
[
5488,
5498
],
[
5532,
5542
],
[
5577,
5587
],
[
5625,
5635
],
[
10461,
10471
],
[
117003,
117013
],
[
13365,
13375
],
[
50710,
50720
],
[
77446,
77456
]
],
[
[
547,
560
],
[
13706,
13719
]
],
[
[
566,
576
],
[
4795,
4805
],
[
6807,
6817
],
[
53767,
53777
],
[
54347,
54357
],
[
105424,
105434
],
[
105554,
105564
]
],
[
[
582,
610
],
[
5112,
5140
]
],
[
[
612,
646
],
[
5247,
5281
]
],
[
[
652,
705
],
[
11028,
11054
]
],
[
[
711,
733
],
[
10827,
10849
]
],
[
[
735,
754
],
[
10927,
10946
]
],
[
[
760,
761
],
[
10303,
10304
]
],
[
[
767,
783
],
[
3048,
3064
]
],
[
[
792,
797
]
],
[
[
1078,
1091
]
],
[
[
1361,
1387
],
[
14348,
14374
]
],
[
[
1809,
1819
],
[
978,
988
],
[
1232,
1242
]
],
[
[
13174,
13183
],
[
2660,
2669
]
],
[
[
116441,
116458
],
[
10802,
10819
],
[
10902,
10919
],
[
11003,
11020
]
],
[
[
116838,
116853
],
[
22712,
22727
],
[
24346,
24361
],
[
24549,
24564
],
[
25000,
25015
],
[
25135,
25150
],
[
29597,
29612
],
[
32104,
32119
],
[
34607,
34622
],
[
42948,
42963
],
[
43152,
43167
],
[
43619,
43634
],
[
44211,
44226
],
[
44560,
44575
],
[
44774,
44789
],
[
45054,
45069
],
[
45651,
45666
],
[
46539,
46554
],
[
47025,
47040
],
[
47652,
47667
],
[
48037,
48052
],
[
49188,
49203
],
[
49640,
49655
],
[
50178,
50193
],
[
50320,
50335
],
[
50388,
50403
],
[
51035,
51050
],
[
51259,
51274
],
[
51318,
51333
],
[
51384,
51399
],
[
52333,
52348
],
[
55434,
55449
],
[
55573,
55588
],
[
57174,
57189
],
[
68388,
68403
],
[
73846,
73861
],
[
74189,
74204
],
[
74491,
74506
],
[
80051,
80066
],
[
81373,
81388
],
[
82714,
82729
],
[
82944,
82959
],
[
83338,
83353
],
[
83992,
84007
],
[
86307,
86322
],
[
86810,
86825
],
[
87945,
87960
],
[
90637,
90652
],
[
91407,
91422
],
[
91793,
91808
],
[
93191,
93206
],
[
93317,
93332
],
[
97418,
97433
],
[
97654,
97669
],
[
100599,
100614
],
[
101025,
101040
]
],
[
[
117103,
117113
],
[
10292,
10302
]
]
] |
from docutils.parsers.rst import roles
from docutils import nodes
from docutils.parsers.rst.states import Inliner
import docutils.parsers.rst.roles
def strike_role(role, rawtext, text, lineno, inliner: Inliner, options={}, content=[]):
"""
USAGE: :del:`your context`
:param role: my-strike
:param rawtext: :my-strike:`your context`
:param text: your context
:param lineno:
:param inliner:
:param options:
:param content:
:return:
"""
# roles.set_classes(options)
# options.setdefault('classes', []).append("mys")
node = nodes.inline(rawtext, text, **dict(classes=['strike']))
return [node], []
def setup(app):
roles.register_canonical_role('del', strike_role)
| [
[
[
33,
38
],
[
682,
687
]
],
[
[
60,
65
],
[
582,
587
]
],
[
[
106,
113
],
[
204,
211
]
],
[
[
121,
147
]
],
[
[
154,
165
],
[
719,
730
]
],
[
[
666,
671
]
]
] |
print("Hello Github!")
| [] |
# Author: Khalid - naam toh suna hi hoga
# Steps to run ->
# :~$ python yoyo.py
from flask import Flask
from flask import request
from flask import render_template
import stringComparison
app = Flask(__name__)
@app.route('/')
def my_form():
return render_template("my-form.html")
@app.route('/', methods=['POST'])
def my_form_post():
text1 = request.form['text1']
text2 = request.form['text2']
plagiarismPercent = stringComparison.extremelySimplePlagiarismChecker(text1,text2)
if plagiarismPercent > 50 :
return "<h1>Plagiarism Detected !</h1>"
else :
return "<h1>No Plagiarism Detected !</h1>"
if __name__ == '__main__':
app.run()
| [
[
[
98,
103
],
[
195,
200
]
],
[
[
122,
129
],
[
354,
361
],
[
388,
395
]
],
[
[
148,
163
],
[
254,
269
]
],
[
[
171,
187
],
[
434,
450
]
],
[
[
189,
192
],
[
213,
216
],
[
288,
291
],
[
671,
674
]
],
[
[
232,
239
]
],
[
[
325,
337
]
]
] |
# Copyright 2019 Ram Rachum and collaborators.
# This program is distributed under the MIT license.
import io
import textwrap
import threading
import types
import sys
from pysnooper.utils import truncate
from python_toolbox import sys_tools, temp_file_tools
import pytest
import pysnooper
from pysnooper.variables import needs_parentheses
from .utils import (assert_output, assert_sample_output, VariableEntry,
CallEntry, LineEntry, ReturnEntry, OpcodeEntry,
ReturnValueEntry, ExceptionEntry)
def test_string_io():
string_io = io.StringIO()
@pysnooper.snoop(string_io)
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_thread_info():
@pysnooper.snoop(thread_info=True)
def my_function(foo):
x = 7
y = 8
return y + x
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function('baba')
assert result == 15
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_multi_thread_info():
@pysnooper.snoop(thread_info=True)
def my_function(foo):
x = 7
y = 8
return y + x
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
my_function('baba')
t1 = threading.Thread(target=my_function, name="test123",args=['bubu'])
t1.start()
t1.join()
t1 = threading.Thread(target=my_function, name="bibi",args=['bibi'])
t1.start()
t1.join()
output = output_capturer.string_io.getvalue()
calls = [line for line in output.split("\n") if "call" in line]
main_thread = calls[0]
assert len(main_thread) == len(calls[1])
assert len(main_thread) == len(calls[2])
main_thread_call_str = main_thread.find("call")
assert main_thread_call_str == calls[1].find("call")
assert main_thread_call_str == calls[2].find("call")
thread_info_regex = '([0-9]+-{name}+[ ]+)'
assert_output(
output,
(
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):',
thread_info_regex=thread_info_regex.format(
name="MainThread")),
LineEntry('x = 7',
thread_info_regex=thread_info_regex.format(
name="MainThread")),
VariableEntry('x', '7'),
LineEntry('y = 8',
thread_info_regex=thread_info_regex.format(
name="MainThread")),
VariableEntry('y', '8'),
LineEntry('return y + x',
thread_info_regex=thread_info_regex.format(
name="MainThread")),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
VariableEntry('foo', value_regex="u?'bubu'"),
CallEntry('def my_function(foo):',
thread_info_regex=thread_info_regex.format(
name="test123")),
LineEntry('x = 7',
thread_info_regex=thread_info_regex.format(
name="test123")),
VariableEntry('x', '7'),
LineEntry('y = 8',
thread_info_regex=thread_info_regex.format(
name="test123")),
VariableEntry('y', '8'),
LineEntry('return y + x',
thread_info_regex=thread_info_regex.format(
name="test123")),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
VariableEntry('foo', value_regex="u?'bibi'"),
CallEntry('def my_function(foo):',
thread_info_regex=thread_info_regex.format(name='bibi')),
LineEntry('x = 7',
thread_info_regex=thread_info_regex.format(name='bibi')),
VariableEntry('x', '7'),
LineEntry('y = 8',
thread_info_regex=thread_info_regex.format(name='bibi')),
VariableEntry('y', '8'),
LineEntry('return y + x',
thread_info_regex=thread_info_regex.format(name='bibi')),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_callable():
string_io = io.StringIO()
def write(msg):
string_io.write(msg)
@pysnooper.snoop(write)
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_watch():
class Foo(object):
def __init__(self):
self.x = 2
def square(self):
self.x **= 2
@pysnooper.snoop(watch=(
'foo.x',
'io.__name__',
'len(foo.__dict__["x"] * "abc")',
))
def my_function():
foo = Foo()
for i in range(2):
foo.square()
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('Foo'),
VariableEntry('io.__name__', "'io'"),
CallEntry('def my_function():'),
LineEntry('foo = Foo()'),
VariableEntry('foo'),
VariableEntry('foo.x', '2'),
VariableEntry('len(foo.__dict__["x"] * "abc")', '6'),
LineEntry(),
VariableEntry('i', '0'),
LineEntry(),
VariableEntry('foo.x', '4'),
VariableEntry('len(foo.__dict__["x"] * "abc")', '12'),
LineEntry(),
VariableEntry('i', '1'),
LineEntry(),
VariableEntry('foo.x', '16'),
VariableEntry('len(foo.__dict__["x"] * "abc")', '48'),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('None')
)
)
def test_watch_explode():
class Foo:
def __init__(self, x, y):
self.x = x
self.y = y
@pysnooper.snoop(watch_explode=('_d', '_point', 'lst + []'))
def my_function():
_d = {'a': 1, 'b': 2, 'c': 'ignore'}
_point = Foo(x=3, y=4)
lst = [7, 8, 9]
lst.append(10)
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('Foo'),
CallEntry('def my_function():'),
LineEntry(),
VariableEntry('_d'),
VariableEntry("_d['a']", '1'),
VariableEntry("_d['b']", '2'),
VariableEntry("_d['c']", "'ignore'"),
LineEntry(),
VariableEntry('_point'),
VariableEntry('_point.x', '3'),
VariableEntry('_point.y', '4'),
LineEntry(),
VariableEntry('lst'),
VariableEntry('(lst + [])[0]', '7'),
VariableEntry('(lst + [])[1]', '8'),
VariableEntry('(lst + [])[2]', '9'),
VariableEntry('lst + []'),
LineEntry(),
VariableEntry('lst'),
VariableEntry('(lst + [])[3]', '10'),
VariableEntry('lst + []'),
ReturnEntry(),
ReturnValueEntry('None')
)
)
def test_variables_classes():
class WithSlots(object):
__slots__ = ('x', 'y')
def __init__(self):
self.x = 3
self.y = 4
@pysnooper.snoop(watch=(
pysnooper.Keys('_d', exclude='c'),
pysnooper.Attrs('_d'), # doesn't have attributes
pysnooper.Attrs('_s'),
pysnooper.Indices('_lst')[-3:],
))
def my_function():
_d = {'a': 1, 'b': 2, 'c': 'ignore'}
_s = WithSlots()
_lst = list(range(1000))
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('WithSlots'),
CallEntry('def my_function():'),
LineEntry(),
VariableEntry('_d'),
VariableEntry("_d['a']", '1'),
VariableEntry("_d['b']", '2'),
LineEntry(),
VariableEntry('_s'),
VariableEntry('_s.x', '3'),
VariableEntry('_s.y', '4'),
LineEntry(),
VariableEntry('_lst'),
VariableEntry('_lst[997]', '997'),
VariableEntry('_lst[998]', '998'),
VariableEntry('_lst[999]', '999'),
ReturnEntry(),
ReturnValueEntry('None')
)
)
def test_single_watch_no_comma():
class Foo(object):
def __init__(self):
self.x = 2
def square(self):
self.x **= 2
@pysnooper.snoop(watch='foo')
def my_function():
foo = Foo()
for i in range(2):
foo.square()
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('Foo'),
CallEntry('def my_function():'),
LineEntry('foo = Foo()'),
VariableEntry('foo'),
LineEntry(),
VariableEntry('i', '0'),
LineEntry(),
LineEntry(),
VariableEntry('i', '1'),
LineEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('None')
)
)
def test_long_variable():
@pysnooper.snoop()
def my_function():
foo = list(range(1000))
return foo
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result == list(range(1000))
output = output_capturer.string_io.getvalue()
regex = r'^\[0, 1, 2, .*\.\.\..*, 997, 998, 999\]$'
assert_output(
output,
(
CallEntry('def my_function():'),
LineEntry('foo = list(range(1000))'),
VariableEntry('foo', value_regex=regex),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(value_regex=regex)
)
)
def test_repr_exception():
class Bad(object):
def __repr__(self):
1 / 0
@pysnooper.snoop()
def my_function():
bad = Bad()
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('Bad'),
CallEntry('def my_function():'),
LineEntry('bad = Bad()'),
VariableEntry('bad', value='REPR FAILED'),
ReturnEntry(),
ReturnValueEntry('None')
)
)
def test_depth():
string_io = io.StringIO()
def f4(x4):
result4 = x4 * 2
return result4
def f3(x3):
result3 = f4(x3)
return result3
def f2(x2):
result2 = f3(x2)
return result2
@pysnooper.snoop(string_io, depth=3)
def f1(x1):
result1 = f2(x1)
return result1
result = f1(10)
assert result == 20
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry(),
VariableEntry(),
CallEntry('def f1(x1):'),
LineEntry(),
VariableEntry(),
VariableEntry(),
CallEntry('def f2(x2):'),
LineEntry(),
VariableEntry(),
VariableEntry(),
CallEntry('def f3(x3):'),
LineEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
)
)
def test_method_and_prefix():
class Baz(object):
def __init__(self):
self.x = 2
@pysnooper.snoop(watch=('self.x',), prefix='ZZZ')
def square(self):
foo = 7
self.x **= 2
return self
baz = Baz()
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = baz.square()
assert result is baz
assert result.x == 4
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('self', prefix='ZZZ'),
VariableEntry('self.x', '2', prefix='ZZZ'),
CallEntry('def square(self):', prefix='ZZZ'),
LineEntry('foo = 7', prefix='ZZZ'),
VariableEntry('foo', '7', prefix='ZZZ'),
LineEntry('self.x **= 2', prefix='ZZZ'),
VariableEntry('self.x', '4', prefix='ZZZ'),
LineEntry(prefix='ZZZ'),
ReturnEntry(prefix='ZZZ'),
ReturnValueEntry(prefix='ZZZ'),
),
prefix='ZZZ'
)
def test_file_output():
with temp_file_tools.create_temp_folder(prefix='pysnooper') as folder:
path = folder / 'foo.log'
@pysnooper.snoop(path)
def my_function(_foo):
x = 7
y = 8
return y + x
result = my_function('baba')
assert result == 15
with path.open() as output_file:
output = output_file.read()
assert_output(
output,
(
VariableEntry('_foo', value_regex="u?'baba'"),
CallEntry('def my_function(_foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_confusing_decorator_lines():
string_io = io.StringIO()
def empty_decorator(function):
return function
@empty_decorator
@pysnooper.snoop(string_io,
depth=2) # Multi-line decorator for extra confusion!
@empty_decorator
@empty_decorator
def my_function(foo):
x = lambda bar: 7
y = 8
return y + x(foo)
result = my_function('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry(),
VariableEntry(),
LineEntry(),
VariableEntry(),
LineEntry(),
# inside lambda
VariableEntry('bar', value_regex="u?'baba'"),
CallEntry('x = lambda bar: 7'),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('7'),
# back in my_function
ReturnEntry(),
ReturnValueEntry('15'),
)
)
def test_lambda():
string_io = io.StringIO()
my_function = pysnooper.snoop(string_io)(lambda x: x ** 2)
result = my_function(7)
assert result == 49
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry('x', '7'),
CallEntry(source_regex='^my_function = pysnooper.*'),
LineEntry(source_regex='^my_function = pysnooper.*'),
ReturnEntry(source_regex='^my_function = pysnooper.*'),
ReturnValueEntry('49'),
)
)
def test_unavailable_source():
with temp_file_tools.create_temp_folder(prefix='pysnooper') as folder, \
sys_tools.TempSysPathAdder(str(folder)):
module_name = 'iaerojajsijf'
python_file_path = folder / ('%s.py' % (module_name,))
content = textwrap.dedent(u'''
import pysnooper
@pysnooper.snoop()
def f(x):
return x
''')
with python_file_path.open('w') as python_file:
python_file.write(content)
module = __import__(module_name)
python_file_path.unlink()
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = getattr(module, 'f')(7)
assert result == 7
output = output_capturer.output
assert_output(
output,
(
VariableEntry(stage='starting'),
CallEntry('SOURCE IS UNAVAILABLE'),
LineEntry('SOURCE IS UNAVAILABLE'),
ReturnEntry('SOURCE IS UNAVAILABLE'),
ReturnValueEntry('7'),
)
)
def test_no_overwrite_by_default():
with temp_file_tools.create_temp_folder(prefix='pysnooper') as folder:
path = folder / 'foo.log'
with path.open('w') as output_file:
output_file.write(u'lala')
@pysnooper.snoop(str(path))
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function('baba')
assert result == 15
with path.open() as output_file:
output = output_file.read()
assert output.startswith('lala')
shortened_output = output[4:]
assert_output(
shortened_output,
(
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_overwrite():
with temp_file_tools.create_temp_folder(prefix='pysnooper') as folder:
path = folder / 'foo.log'
with path.open('w') as output_file:
output_file.write(u'lala')
@pysnooper.snoop(str(path), overwrite=True)
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function('baba')
result = my_function('baba')
assert result == 15
with path.open() as output_file:
output = output_file.read()
assert 'lala' not in output
assert_output(
output,
(
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_error_in_overwrite_argument():
with temp_file_tools.create_temp_folder(prefix='pysnooper') as folder:
with pytest.raises(Exception, match='can only be used when writing'):
@pysnooper.snoop(overwrite=True)
def my_function(foo):
x = 7
y = 8
return y + x
def test_needs_parentheses():
assert not needs_parentheses('x')
assert not needs_parentheses('x.y')
assert not needs_parentheses('x.y.z')
assert not needs_parentheses('x.y.z[0]')
assert not needs_parentheses('x.y.z[0]()')
assert not needs_parentheses('x.y.z[0]()(3, 4 * 5)')
assert not needs_parentheses('foo(x)')
assert not needs_parentheses('foo(x+y)')
assert not needs_parentheses('(x+y)')
assert not needs_parentheses('[x+1 for x in ()]')
assert needs_parentheses('x + y')
assert needs_parentheses('x * y')
assert needs_parentheses('x and y')
assert needs_parentheses('x if z else y')
def test_with_block():
# Testing that a single Tracer can handle many mixed uses
snoop = pysnooper.snoop()
def foo(x):
if x == 0:
bar1(x)
qux()
return
with snoop:
# There should be line entries for these three lines,
# no line entries for anything else in this function,
# but calls to all bar functions should be traced
foo(x - 1)
bar2(x)
qux()
int(4)
bar3(9)
return x
@snoop
def bar1(_x):
qux()
@snoop
def bar2(_x):
qux()
@snoop
def bar3(_x):
qux()
def qux():
return 9 # not traced, mustn't show up
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = foo(2)
assert result == 2
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
# In first with
VariableEntry('x', '2'),
VariableEntry('bar1'),
VariableEntry('bar2'),
VariableEntry('bar3'),
VariableEntry('foo'),
VariableEntry('qux'),
VariableEntry('snoop'),
LineEntry('foo(x - 1)'),
# In with in recursive call
VariableEntry('x', '1'),
VariableEntry('bar1'),
VariableEntry('bar2'),
VariableEntry('bar3'),
VariableEntry('foo'),
VariableEntry('qux'),
VariableEntry('snoop'),
LineEntry('foo(x - 1)'),
# Call to bar1 from if block outside with
VariableEntry('_x', '0'),
VariableEntry('qux'),
CallEntry('def bar1(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
# In with in recursive call
LineEntry('bar2(x)'),
# Call to bar2 from within with
VariableEntry('_x', '1'),
VariableEntry('qux'),
CallEntry('def bar2(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
# In with in recursive call
LineEntry('qux()'),
# Call to bar3 from after with
VariableEntry('_x', '9'),
VariableEntry('qux'),
CallEntry('def bar3(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
# -- Similar to previous few sections,
# -- but from first call to foo
# In with in first call
LineEntry('bar2(x)'),
# Call to bar2 from within with
VariableEntry('_x', '2'),
VariableEntry('qux'),
CallEntry('def bar2(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
# In with in first call
LineEntry('qux()'),
# Call to bar3 from after with
VariableEntry('_x', '9'),
VariableEntry('qux'),
CallEntry('def bar3(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
),
)
def test_with_block_depth():
string_io = io.StringIO()
def f4(x4):
result4 = x4 * 2
return result4
def f3(x3):
result3 = f4(x3)
return result3
def f2(x2):
result2 = f3(x2)
return result2
def f1(x1):
str(3)
with pysnooper.snoop(string_io, depth=3):
result1 = f2(x1)
return result1
result = f1(10)
assert result == 20
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry(),
VariableEntry(),
VariableEntry(),
LineEntry('result1 = f2(x1)'),
VariableEntry(),
VariableEntry(),
CallEntry('def f2(x2):'),
LineEntry(),
VariableEntry(),
VariableEntry(),
CallEntry('def f3(x3):'),
LineEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
)
)
def test_cellvars():
string_io = io.StringIO()
def f2(a):
def f3(a):
x = 0
x += 1
def f4(a):
y = x
return 42
return f4(a)
return f3(a)
def f1(a):
with pysnooper.snoop(string_io, depth=4):
result1 = f2(a)
return result1
result = f1(42)
assert result == 42
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry(),
VariableEntry(),
VariableEntry(),
LineEntry('result1 = f2(a)'),
VariableEntry(),
CallEntry('def f2(a):'),
LineEntry(),
VariableEntry(),
LineEntry(),
VariableEntry("a"),
CallEntry('def f3(a):'),
LineEntry(),
VariableEntry("x"),
LineEntry(),
VariableEntry("x"),
LineEntry(),
VariableEntry(),
LineEntry(),
VariableEntry(),
VariableEntry("x"),
CallEntry('def f4(a):'),
LineEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(),
ReturnEntry(),
ReturnValueEntry(),
ReturnEntry(),
ReturnValueEntry(),
)
)
def test_var_order():
string_io = io.StringIO()
def f(one, two, three, four):
five = None
six = None
seven = None
five, six, seven = 5, 6, 7
with pysnooper.snoop(string_io, depth=2):
result = f(1, 2, 3, 4)
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry(),
VariableEntry(),
LineEntry('result = f(1, 2, 3, 4)'),
VariableEntry("one", "1"),
VariableEntry("two", "2"),
VariableEntry("three", "3"),
VariableEntry("four", "4"),
CallEntry('def f(one, two, three, four):'),
LineEntry(),
VariableEntry("five"),
LineEntry(),
VariableEntry("six"),
LineEntry(),
VariableEntry("seven"),
LineEntry(),
VariableEntry("five", "5"),
VariableEntry("six", "6"),
VariableEntry("seven", "7"),
ReturnEntry(),
ReturnValueEntry(),
)
)
def test_truncate():
max_length = 20
for i in range(max_length * 2):
string = i * 'a'
truncated = truncate(string, max_length)
if len(string) <= max_length:
assert string == truncated
else:
assert truncated == 'aaaaaaaa...aaaaaaaaa'
assert len(truncated) == max_length
def test_indentation():
from .samples import indentation, recursion
assert_sample_output(indentation)
assert_sample_output(recursion)
def test_exception():
from .samples import exception
assert_sample_output(exception)
def test_generator():
string_io = io.StringIO()
original_tracer = sys.gettrace()
original_tracer_active = lambda: (sys.gettrace() is original_tracer)
@pysnooper.snoop(string_io)
def f(x1):
assert not original_tracer_active()
x2 = (yield x1)
assert not original_tracer_active()
x3 = 'foo'
assert not original_tracer_active()
x4 = (yield 2)
assert not original_tracer_active()
return
assert original_tracer_active()
generator = f(0)
assert original_tracer_active()
first_item = next(generator)
assert original_tracer_active()
assert first_item == 0
second_item = generator.send('blabla')
assert original_tracer_active()
assert second_item == 2
with pytest.raises(StopIteration) as exc_info:
generator.send('looloo')
assert original_tracer_active()
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry('x1', '0'),
VariableEntry(),
CallEntry(),
LineEntry(),
VariableEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('0'),
# Pause and resume:
VariableEntry('x1', '0'),
VariableEntry(),
VariableEntry(),
VariableEntry(),
CallEntry(),
VariableEntry('x2', "'blabla'"),
LineEntry(),
LineEntry(),
VariableEntry('x3', "'foo'"),
LineEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('2'),
# Pause and resume:
VariableEntry('x1', '0'),
VariableEntry(),
VariableEntry(),
VariableEntry(),
VariableEntry(),
VariableEntry(),
CallEntry(),
VariableEntry('x4', "'looloo'"),
LineEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(None),
)
)
def test_custom_repr():
string_io = io.StringIO()
def large(l):
return isinstance(l, list) and len(l) > 5
def print_list_size(l):
return 'list(size={})'.format(len(l))
def print_dict(d):
return 'dict(keys={})'.format(sorted(list(d.keys())))
def evil_condition(x):
return large(x) or isinstance(x, dict)
@pysnooper.snoop(string_io, custom_repr=(
(large, print_list_size),
(dict, print_dict),
(evil_condition, lambda x: 'I am evil')))
def sum_to_x(x):
l = list(range(x))
a = {'1': 1, '2': 2}
return sum(l)
result = sum_to_x(10000)
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry('x', '10000'),
CallEntry(),
LineEntry(),
VariableEntry('l', 'list(size=10000)'),
LineEntry(),
VariableEntry('a', "dict(keys=['1', '2'])"),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('49995000'),
)
)
| [
[
[
108,
110
],
[
576,
578
],
[
5350,
5352
],
[
12850,
12852
],
[
16064,
16066
],
[
17144,
17146
],
[
25694,
25696
],
[
26825,
26827
],
[
28218,
28220
],
[
29876,
29878
],
[
31958,
31960
]
],
[
[
118,
126
],
[
17922,
17930
]
],
[
[
134,
143
],
[
2304,
2313
],
[
2421,
2430
]
],
[
[
151,
156
]
],
[
[
164,
167
],
[
29912,
29915
],
[
29965,
29968
]
],
[
[
197,
205
],
[
29370,
29378
]
],
[
[
233,
242
],
[
1362,
1371
],
[
2157,
2166
],
[
6415,
6424
],
[
7823,
7832
],
[
9505,
9514
],
[
10715,
10724
],
[
11538,
11547
],
[
12304,
12313
],
[
14311,
14320
],
[
17763,
17772
],
[
18246,
18255
],
[
23050,
23059
]
],
[
[
244,
259
],
[
15158,
15173
],
[
17683,
17698
],
[
18842,
18857
],
[
19897,
19912
],
[
21367,
21382
]
],
[
[
267,
273
],
[
21446,
21452
],
[
30613,
30619
]
],
[
[
282,
291
],
[
596,
605
],
[
1243,
1252
],
[
2038,
2047
],
[
5420,
5429
],
[
6185,
6194
],
[
7607,
7616
],
[
9150,
9159
],
[
9186,
9195
],
[
9233,
9242
],
[
9295,
9304
],
[
9330,
9339
],
[
10581,
10590
],
[
11436,
11445
],
[
12233,
12242
],
[
13065,
13074
],
[
14140,
14149
],
[
15268,
15277
],
[
16165,
16174
],
[
17176,
17185
],
[
19034,
19043
],
[
20089,
20098
],
[
21524,
21533
],
[
22409,
22418
],
[
28373,
28382
],
[
30007,
30016
],
[
32283,
32292
],
[
25948,
25957
],
[
27057,
27066
]
],
[
[
324,
341
],
[
21710,
21727
],
[
21748,
21765
],
[
21788,
21805
],
[
21830,
21847
],
[
21875,
21892
],
[
21922,
21939
],
[
21979,
21996
],
[
22022,
22039
],
[
22067,
22084
],
[
22109,
22126
],
[
22159,
22176
],
[
22197,
22214
],
[
22235,
22252
],
[
22275,
22292
]
],
[
[
362,
375
],
[
794,
807
],
[
1583,
1596
],
[
2974,
2987
],
[
5614,
5627
],
[
6632,
6645
],
[
8040,
8053
],
[
9722,
9735
],
[
10932,
10945
],
[
11824,
11837
],
[
12521,
12534
],
[
13248,
13261
],
[
14551,
14564
],
[
15537,
15550
],
[
16497,
16510
],
[
17311,
17324
],
[
18476,
18489
],
[
19385,
19398
],
[
20450,
20463
],
[
23257,
23270
],
[
26120,
26133
],
[
27228,
27241
],
[
28480,
28493
],
[
30763,
30776
],
[
32604,
32617
]
],
[
[
377,
397
],
[
29671,
29691
],
[
29709,
29729
],
[
29804,
29824
]
],
[
[
399,
412
],
[
847,
860
],
[
985,
998
],
[
1054,
1067
],
[
1636,
1649
],
[
1774,
1787
],
[
1843,
1856
],
[
3027,
3040
],
[
3389,
3402
],
[
3570,
3583
],
[
3835,
3848
],
[
4191,
4204
],
[
4369,
4382
],
[
4631,
4644
],
[
4927,
4940
],
[
5075,
5088
],
[
5667,
5680
],
[
5805,
5818
],
[
5874,
5887
],
[
6685,
6698
],
[
6719,
6732
],
[
6852,
6865
],
[
6886,
6899
],
[
6927,
6940
],
[
7018,
7031
],
[
7080,
7093
],
[
7121,
7134
],
[
7213,
7226
],
[
7275,
7288
],
[
7317,
7330
],
[
8093,
8106
],
[
8197,
8210
],
[
8230,
8243
],
[
8273,
8286
],
[
8316,
8329
],
[
8391,
8404
],
[
8428,
8441
],
[
8472,
8485
],
[
8541,
8554
],
[
8575,
8588
],
[
8624,
8637
],
[
8673,
8686
],
[
8722,
8735
],
[
8786,
8799
],
[
8820,
8833
],
[
8870,
8883
],
[
9775,
9788
],
[
9885,
9898
],
[
9918,
9931
],
[
9961,
9974
],
[
10029,
10042
],
[
10062,
10075
],
[
10102,
10115
],
[
10167,
10180
],
[
10202,
10215
],
[
10249,
10262
],
[
10296,
10309
],
[
10985,
10998
],
[
11102,
11115
],
[
11161,
11174
],
[
11248,
11261
],
[
11972,
11985
],
[
12574,
12587
],
[
12691,
12704
],
[
13301,
13314
],
[
13330,
13343
],
[
13423,
13436
],
[
13452,
13465
],
[
13545,
13558
],
[
13574,
13587
],
[
13667,
13680
],
[
13785,
13798
],
[
13903,
13916
],
[
14604,
14617
],
[
14653,
14666
],
[
14815,
14828
],
[
14921,
14934
],
[
15602,
15615
],
[
15754,
15767
],
[
15831,
15844
],
[
16550,
16563
],
[
16681,
16694
],
[
16735,
16748
],
[
16817,
16830
],
[
17364,
17377
],
[
18541,
18554
],
[
19460,
19473
],
[
19610,
19623
],
[
19687,
19700
],
[
20515,
20528
],
[
20665,
20678
],
[
20742,
20755
],
[
20912,
20925
],
[
21062,
21075
],
[
21139,
21152
],
[
23338,
23351
],
[
23375,
23388
],
[
23410,
23423
],
[
23445,
23458
],
[
23480,
23493
],
[
23514,
23527
],
[
23548,
23561
],
[
23662,
23675
],
[
23699,
23712
],
[
23734,
23747
],
[
23769,
23782
],
[
23804,
23817
],
[
23838,
23851
],
[
23872,
23885
],
[
24000,
24013
],
[
24038,
24051
],
[
24336,
24349
],
[
24374,
24387
],
[
24669,
24682
],
[
24707,
24720
],
[
25097,
25110
],
[
25135,
25148
],
[
25426,
25439
],
[
25464,
25477
],
[
26173,
26186
],
[
26202,
26215
],
[
26231,
26244
],
[
26304,
26317
],
[
26333,
26346
],
[
26426,
26439
],
[
26455,
26468
],
[
26548,
26561
],
[
26666,
26679
],
[
27281,
27294
],
[
27310,
27323
],
[
27339,
27352
],
[
27411,
27424
],
[
27502,
27515
],
[
27557,
27570
],
[
27651,
27664
],
[
27708,
27721
],
[
27765,
27778
],
[
27820,
27833
],
[
27849,
27862
],
[
27943,
27956
],
[
28533,
28546
],
[
28562,
28575
],
[
28641,
28654
],
[
28680,
28693
],
[
28719,
28732
],
[
28760,
28773
],
[
28882,
28895
],
[
28942,
28955
],
[
29001,
29014
],
[
29062,
29075
],
[
29102,
29115
],
[
29141,
29154
],
[
30816,
30829
],
[
30854,
30867
],
[
30933,
30946
],
[
30962,
30975
],
[
31112,
31125
],
[
31150,
31163
],
[
31179,
31192
],
[
31208,
31221
],
[
31262,
31275
],
[
31357,
31370
],
[
31545,
31558
],
[
31583,
31596
],
[
31612,
31625
],
[
31641,
31654
],
[
31670,
31683
],
[
31699,
31712
],
[
31753,
31766
],
[
32657,
32670
],
[
32748,
32761
],
[
32825,
32838
]
],
[
[
434,
443
],
[
905,
914
],
[
1694,
1703
],
[
3085,
3094
],
[
3893,
3902
],
[
4689,
4698
],
[
5725,
5734
],
[
6769,
6778
],
[
8127,
8136
],
[
9815,
9824
],
[
11019,
11028
],
[
11877,
11886
],
[
12608,
12617
],
[
13359,
13368
],
[
13481,
13490
],
[
13603,
13612
],
[
14709,
14718
],
[
15665,
15674
],
[
16608,
16617
],
[
16875,
16884
],
[
17401,
17410
],
[
18590,
18599
],
[
19522,
19531
],
[
20577,
20586
],
[
20974,
20983
],
[
24072,
24081
],
[
24408,
24417
],
[
24741,
24750
],
[
25169,
25178
],
[
25498,
25507
],
[
26362,
26371
],
[
26484,
26493
],
[
27440,
27449
],
[
27589,
27598
],
[
27881,
27890
],
[
28801,
28810
],
[
30883,
30892
],
[
31237,
31246
],
[
31728,
31737
],
[
32698,
32707
]
],
[
[
445,
454
],
[
953,
962
],
[
1022,
1031
],
[
1091,
1100
],
[
1742,
1751
],
[
1811,
1820
],
[
1880,
1889
],
[
3245,
3254
],
[
3426,
3435
],
[
3607,
3616
],
[
4050,
4059
],
[
4228,
4237
],
[
4406,
4415
],
[
4816,
4825
],
[
4964,
4973
],
[
5112,
5121
],
[
5773,
5782
],
[
5842,
5851
],
[
5911,
5920
],
[
6814,
6823
],
[
6993,
7002
],
[
7055,
7064
],
[
7188,
7197
],
[
7250,
7259
],
[
7384,
7393
],
[
8172,
8181
],
[
8366,
8375
],
[
8516,
8525
],
[
8761,
8770
],
[
9860,
9869
],
[
10004,
10013
],
[
10142,
10151
],
[
11064,
11073
],
[
11136,
11145
],
[
11198,
11207
],
[
11223,
11232
],
[
11285,
11294
],
[
11310,
11319
],
[
11922,
11931
],
[
12025,
12034
],
[
12653,
12662
],
[
13397,
13406
],
[
13519,
13528
],
[
13641,
13650
],
[
13696,
13705
],
[
13814,
13823
],
[
13932,
13941
],
[
14767,
14776
],
[
14868,
14877
],
[
14977,
14986
],
[
15718,
15727
],
[
15795,
15804
],
[
15872,
15881
],
[
16656,
16665
],
[
16710,
16719
],
[
16764,
16773
],
[
16919,
16928
],
[
17467,
17476
],
[
18642,
18651
],
[
19574,
19583
],
[
19651,
19660
],
[
19728,
19737
],
[
20629,
20638
],
[
20706,
20715
],
[
20783,
20792
],
[
21026,
21035
],
[
21103,
21112
],
[
21180,
21189
],
[
23584,
23593
],
[
23908,
23917
],
[
24112,
24121
],
[
24257,
24266
],
[
24448,
24457
],
[
24593,
24602
],
[
24781,
24790
],
[
25018,
25027
],
[
25209,
25218
],
[
25350,
25359
],
[
25538,
25547
],
[
26260,
26269
],
[
26400,
26409
],
[
26522,
26531
],
[
26577,
26586
],
[
26695,
26704
],
[
27368,
27377
],
[
27477,
27486
],
[
27531,
27540
],
[
27626,
27635
],
[
27683,
27692
],
[
27740,
27749
],
[
27795,
27804
],
[
27918,
27927
],
[
27972,
27981
],
[
28592,
28601
],
[
28857,
28866
],
[
28917,
28926
],
[
28976,
28985
],
[
29037,
29046
],
[
30908,
30917
],
[
30991,
31000
],
[
31307,
31316
],
[
31332,
31341
],
[
31399,
31408
],
[
31424,
31433
],
[
31798,
31807
],
[
31823,
31832
],
[
32723,
32732
],
[
32800,
32809
],
[
32882,
32891
]
],
[
[
456,
467
],
[
1130,
1141
],
[
1919,
1930
],
[
3758,
3769
],
[
4554,
4565
],
[
5230,
5241
],
[
5950,
5961
],
[
7409,
7420
],
[
8909,
8920
],
[
10343,
10354
],
[
11335,
11346
],
[
12050,
12061
],
[
12746,
12757
],
[
13721,
13732
],
[
13839,
13850
],
[
13957,
13968
],
[
15014,
15025
],
[
15915,
15926
],
[
16944,
16955
],
[
17040,
17051
],
[
17533,
17544
],
[
18694,
18705
],
[
19771,
19782
],
[
20826,
20837
],
[
21223,
21234
],
[
24144,
24155
],
[
24480,
24491
],
[
24813,
24824
],
[
25241,
25252
],
[
25570,
25581
],
[
26602,
26613
],
[
26720,
26731
],
[
27998,
28009
],
[
28057,
28068
],
[
28116,
28127
],
[
29182,
29193
],
[
31016,
31027
],
[
31449,
31460
],
[
31848,
31859
],
[
32907,
32918
]
],
[
[
469,
480
]
],
[
[
502,
518
],
[
1171,
1187
],
[
1960,
1976
],
[
3799,
3815
],
[
4595,
4611
],
[
5271,
5287
],
[
5991,
6007
],
[
7436,
7452
],
[
8936,
8952
],
[
10370,
10386
],
[
11362,
11378
],
[
12077,
12093
],
[
12773,
12789
],
[
13748,
13764
],
[
13866,
13882
],
[
13984,
14000
],
[
15053,
15069
],
[
15960,
15976
],
[
16971,
16987
],
[
17067,
17083
],
[
17601,
17617
],
[
18748,
18764
],
[
19816,
19832
],
[
20871,
20887
],
[
21268,
21284
],
[
24178,
24194
],
[
24514,
24530
],
[
24847,
24863
],
[
25275,
25291
],
[
25604,
25620
],
[
26629,
26645
],
[
26747,
26763
],
[
28025,
28041
],
[
28084,
28100
],
[
28143,
28159
],
[
29209,
29225
],
[
31043,
31059
],
[
31476,
31492
],
[
31875,
31891
],
[
32934,
32950
]
],
[
[
520,
534
]
],
[
[
542,
556
]
],
[
[
1217,
1233
]
],
[
[
2006,
2028
]
],
[
[
5317,
5330
]
],
[
[
6038,
6048
]
],
[
[
7483,
7501
]
],
[
[
8983,
9005
]
],
[
[
10418,
10444
]
],
[
[
11409,
11427
]
],
[
[
12135,
12154
]
],
[
[
12820,
12830
]
],
[
[
14030,
14052
]
],
[
[
15129,
15145
]
],
[
[
16014,
16044
]
],
[
[
17113,
17124
]
],
[
[
17647,
17670
]
],
[
[
18801,
18829
]
],
[
[
19870,
19884
]
],
[
[
21322,
21354
]
],
[
[
21669,
21691
]
],
[
[
22316,
22331
]
],
[
[
25653,
25674
]
],
[
[
26792,
26805
]
],
[
[
28184,
28198
]
],
[
[
29252,
29265
]
],
[
[
29599,
29615
]
],
[
[
29747,
29761
]
],
[
[
29842,
29856
]
],
[
[
31922,
31938
]
]
] |
import logging
import pathlib
from unittest import mock
from cabinetry import templates
@mock.patch("cabinetry.route.apply_to_all_templates")
@mock.patch("cabinetry.templates.builder._Builder")
def test_build(mock_builder, mock_apply):
config = {"General": {"HistogramFolder": "path/", "InputPath": "file.root"}}
method = "uproot"
# no router
templates.build(config, method=method)
assert mock_builder.call_args_list == [
((pathlib.Path("path/"), "file.root", method), {})
]
assert mock_apply.call_count == 1
config_call, func_call = mock_apply.call_args[0]
assert config_call == config
assert func_call._extract_mock_name() == "_Builder()._create_histogram"
assert mock_apply.call_args[1] == {"match_func": None}
# including a router
mock_router = mock.MagicMock()
templates.build(config, method=method, router=mock_router)
# verify wrapper was set
assert (
mock_router.template_builder_wrapper._extract_mock_name()
== "_Builder()._wrap_custom_template_builder"
)
assert mock_apply.call_count == 2 # 1 from before
config_call, func_call = mock_apply.call_args[0]
assert config_call == config
assert func_call._extract_mock_name() == "_Builder()._create_histogram"
assert mock_apply.call_args[1] == {
"match_func": mock_router._find_template_builder_match
}
@mock.patch("cabinetry.route.apply_to_all_templates")
@mock.patch("cabinetry.templates.collector._collector", return_value="func")
def test_collect(mock_collector, mock_apply, caplog):
caplog.set_level(logging.DEBUG)
config = {
"General": {
"HistogramFolder": "path/",
"InputPath": "f.root:{VariationPath}",
"VariationPath": "nominal",
}
}
method = "uproot"
templates.collect(config, method=method)
assert mock_collector.call_args_list == [
((pathlib.Path("path/"), "f.root:{VariationPath}", "nominal", method), {})
]
assert mock_apply.call_args_list == [((config, "func"), {})]
caplog.clear()
# no VariationPath in general settings
config = {
"General": {"HistogramFolder": "path/", "InputPath": "f.root:{VariationPath}"}
}
templates.collect(config, method=method)
assert 'no VariationPath specified in general settings, defaulting to ""' in [
rec.message for rec in caplog.records
]
assert mock_collector.call_args == (
(pathlib.Path("path/"), "f.root:{VariationPath}", "", method),
{},
)
caplog.set_level(logging.DEBUG)
@mock.patch("cabinetry.route.apply_to_all_templates")
@mock.patch("cabinetry.templates.postprocessor._postprocessor", return_value="func")
def test_run(mock_postprocessor, mock_apply):
config = {"General": {"HistogramFolder": "path/"}}
templates.postprocess(config)
assert mock_postprocessor.call_args_list == [((pathlib.Path("path/"),), {})]
assert mock_apply.call_args_list == [((config, "func"), {})]
| [
[
[
7,
14
],
[
1598,
1605
],
[
2567,
2574
]
],
[
[
22,
29
],
[
456,
463
],
[
1920,
1927
],
[
2466,
2473
],
[
2910,
2917
]
],
[
[
51,
55
],
[
92,
96
],
[
146,
150
],
[
1393,
1397
],
[
1447,
1451
],
[
2585,
2589
],
[
2639,
2643
],
[
814,
818
]
],
[
[
79,
88
],
[
363,
372
],
[
835,
844
],
[
1823,
1832
],
[
2240,
2249
],
[
2829,
2838
]
],
[
[
201,
211
]
],
[
[
1527,
1539
]
],
[
[
2727,
2735
]
]
] |
#!/usr/bin/env python
import socket
import re
class RobotFeedback:
"""Class for the Mecademic Robot allowing for live positional
feedback of the Mecademic Robot.
Attributes
----------
address : string
The IP address associated to the Mecademic robot.
socket : socket
Socket connecting to physical Mecademic Robot.
robot_status : tuple of boolean
States status bit of the robot.
gripper_status : tuple of boolean
States status bit of the gripper.
joints : tuple of floats
Joint angle in degrees of each joint starting from
joint 1 going all way to joint 6.
cartesian : tuple of floats
The cartesian values in mm and degrees of the TRF.
joints_vel : floats
Velocity of joints.
torque : tuple of floats
Torque of joints.
accelerometer : tuple of floats
Acceleration of joints.
last_msg_chunk : string
Buffer of received messages.
version : string
Firmware version of the Mecademic Robot.
version_regex : list of int
Version_regex.
"""
def __init__(self, address, firmware_version):
"""Constructor for an instance of the class Mecademic robot.
Parameters
----------
address : string
The IP address associated to the Mecademic robot.
firmware_version : string
Firmware version of the Mecademic Robot.
"""
self.address = address
self.socket = None
self.robot_status = ()
self.gripper_status = ()
self.joints = () #Joint Angles, angles in degrees | [theta_1, theta_2, ... theta_n]
self.cartesian = () #Cartesian coordinates, distances in mm, angles in degrees | [x,y,z,alpha,beta,gamma]
self.joints_vel =()
self.torque =()
self.accelerometer =()
self.last_msg_chunk = ''
a = re.search(r'(\d+)\.(\d+)\.(\d+)', firmware_version)
self.version = a.group(0)
self.version_regex = [int(a.group(1)), int(a.group(2)), int(a.group(3))]
def connect(self):
"""Connects Mecademic Robot object communication to the physical Mecademic Robot.
Returns
-------
status : boolean
Return whether the connection is established.
"""
try:
self.socket = socket.socket()
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY,1)
self.socket.settimeout(1) #1s
try:
self.socket.connect((self.address, 10001)) #connect to the robot's address
except socket.timeout: #catch if the robot is not connected to in time
#raise TimeoutError
raise RuntimeError
# Receive confirmation of connection
if self.socket is None: #check that socket is not connected to nothing
raise RuntimeError
self.socket.settimeout(1) #1s
try:
if(self.version_regex[0] <= 7):
self.get_data()
elif(self.version_regex[0] > 7): #RobotStatus and GripperStatus are sent on 10001 upon connecting from 8.x firmware
msg = self.socket.recv(256).decode('ascii') #read message from robot
self._get_robot_status(msg)
self._get_gripper_status(msg)
return True
except socket.timeout:
raise RuntimeError
#except TimeoutError:
#return False
# OTHER USER !!!
except RuntimeError:
return False
def disconnect(self):
"""Disconnects Mecademic Robot object from physical Mecademic Robot.
"""
if self.socket is not None:
self.socket.close()
self.socket = None
def get_data(self, delay=0.1):
"""Receives message from the Mecademic Robot and
saves the values in appropriate variables.
Parameters
----------
delay: int or float
Time to set for timeout of the socket.
"""
if self.socket is None: #check that the connection is established
return #if no connection, nothing to receive
self.socket.settimeout(delay) #set read timeout to desired delay
try:
raw_msg = self.socket.recv(256).decode('ascii') #read message from robot
raw_response = raw_msg.split('\x00') # Split the data at \x00 to manage fragmented data
raw_response[0] = self.last_msg_chunk + raw_response[0] # Merge the first data with last fragment from previous data stream
self.last_msg_chunk = raw_response[-1]
for response in raw_response[:-1]:
if(self.version_regex[0] <= 7):
self._get_joints(response)
self._get_cartesian(response)
elif(self.version_regex[0] > 7):
self._get_joints(response)
self._get_cartesian(response)
self._get_joints_vel(response)
self._get_torque_ratio(response)
self._get_accelerometer(response)
#except TimeoutError:
except RuntimeError:
pass
def _get_robot_status(self, response):
"""Gets the values of RobotStatus bits from the message sent by
the Robot upon connecting.
Values saved to attribute robotstatus of the object.
Parameters
----------
response : string
Message received from the Robot.
"""
code = None
code = self._get_response_code('RobotStatus')
for resp_code in code:
if response.find(resp_code) != -1:
self.robot_status = self._decode_msg(response, resp_code)
def _get_gripper_status(self, response):
"""Gets the values of GripperStatus bits from the message sent by
the Robot upon connecting.
Values saved to attribute robotstatus of the object.
Parameters
----------
response : string
Message received from the robot.
"""
code = None
code = self._get_response_code('GripperStatus')
for resp_code in code:
if response.find(resp_code) != -1:
self.gripper_status = self._decode_msg(response,resp_code)
def _get_joints(self, response):
"""Gets the joint values of the variables from the message sent by the Robot.
Values saved to attribute joints of the object.
Parameters
----------
response: string
Message received from the Robot.
"""
code = None
code = self._get_response_code('JointsPose')
for resp_code in code:
if response.find(resp_code) != -1:
self.joints = self._decode_msg(response, resp_code)
def _get_cartesian(self, response):
"""Gets the cartesian values of the variables from the message sent by the Robot.
Values saved to attribute cartesian of the object.
Parameters
----------
response : string
Message received from the Robot.
"""
code = None
code = self._get_response_code('CartesianPose')
for resp_code in code:
if response.find(resp_code) != -1:
self.cartesian = self._decode_msg(response,resp_code)
def _get_joints_vel(self, response):
"""Gets the velocity values of the Joints from the message sent by the Robot.
Values saved to attribute jointsvel of the object.
Parameters
----------
response : string
Message received from the Robot.
"""
code = None
code = self._get_response_code('JointsVel')
for resp_code in code:
if response.find(resp_code) != -1:
self.joints_vel = self._decode_msg(response,resp_code)
def _get_torque_ratio(self, response):
"""Gets the torque ratio values of the Joints from the message sent by the Robot.
Values saved to attribute torque of the object.
Parameters
----------
response : string
Message received from the Robot.
"""
code = None
code = self._get_response_code('TorqueRatio')
for resp_code in code:
if response.find(resp_code) != -1:
self.torque = self._decode_msg(response,resp_code)
def _get_accelerometer(self,response):
"""Gets the accelerometers values from the message sent by the Robot.
Values saved to attribute accelerometer of the object.
Parameters
----------
response : string
Message received from the Robot.
"""
code = None
code = self._get_response_code('AccelerometerData')
for resp_code in code:
if response.find(resp_code) != -1:
self.accelerometer = self._decode_msg(response,resp_code)
def _get_response_code(self, param):
"""Retreives the response code for the parameters being streamed on port 100001.
Parameters
----------
param : string
Parameter that needs to be extracted from raw data strem from Mecademic Robot.
1. Robot Status {sent only once upon connecting on 10001}.
2. Gripper Status {sent only once upon connecting on 10001}.
3. Joints Pose feedback.
4. Cartesian Pose feedback.
5. Joints Velocity feedback.
6. Torque Ratio.
7. Accelerometer data.
Returns
--------
answer_list : list of strings
List of response codes to search for in the raw data stream.
"""
if param.find('RobotStatus') != -1:
return ['[2007]']
elif param.find('GripperStatus')!= -1:
return ['[2079]']
elif param.find('JointsPose') != -1:
if(self.version_regex[0] <= 7):
return ['[2102]']
elif(self.version_regex[0] > 7):
return ['[2026]','[2210]']
elif param.find('CartesianPose') != -1:
if(self.version_regex[0] <= 7):
return ['[2103]']
elif(self.version_regex[0] > 7):
return ['[2027]','[2211]']
elif param.find('JointsVel') != -1:
return ['[2212]']
elif param.find('TorqueRatio') != -1:
return ['[2213]']
elif param.find('AccelerometerData') != -1:
return ['[2220]']
else:
return ['Invalid']
def _decode_msg(self, response, resp_code):
"""
Parameters
----------
response : string
Message received from the Robot.
resp_code : string
Message to decode
Returns
--------
params : tuplt of float
Message decoded.
"""
response = response.replace(resp_code+'[','').replace(']','')
params = ()
if response != '':
param_str = response.split(',')
if len(param_str) == 6:
params = tuple((float(x) for x in param_str))
elif len(param_str) == 7:
params = tuple((float(x) for x in param_str[1:])) # remove timestamp
else:
params =()
return params
| [
[
[
29,
35
],
[
2360,
2366
],
[
2411,
2417
],
[
2431,
2437
],
[
2622,
2628
],
[
3433,
3439
]
],
[
[
43,
45
],
[
1911,
1913
]
],
[
[
54,
67
]
]
] |
"""
Regression tests for Model inheritance behavior.
"""
from __future__ import unicode_literals
import datetime
from operator import attrgetter
from unittest import expectedFailure
from django import forms
from django.test import TestCase
from .models import (
ArticleWithAuthor, BachelorParty, BirthdayParty, BusStation, Child,
DerivedM, InternalCertificationAudit, ItalianRestaurant, M2MChild,
MessyBachelorParty, ParkingLot, ParkingLot2, ParkingLot3, ParkingLot4A,
ParkingLot4B, Person, Place, Profile, QualityControl, Restaurant,
SelfRefChild, SelfRefParent, Senator, Supplier, TrainStation, User,
Wholesaler,
)
class ModelInheritanceTest(TestCase):
def test_model_inheritance(self):
# Regression for #7350, #7202
# Check that when you create a Parent object with a specific reference
# to an existent child instance, saving the Parent doesn't duplicate
# the child. This behavior is only activated during a raw save - it
# is mostly relevant to deserialization, but any sort of CORBA style
# 'narrow()' API would require a similar approach.
# Create a child-parent-grandparent chain
place1 = Place(
name="Guido's House of Pasta",
address='944 W. Fullerton')
place1.save_base(raw=True)
restaurant = Restaurant(
place_ptr=place1,
serves_hot_dogs=True,
serves_pizza=False)
restaurant.save_base(raw=True)
italian_restaurant = ItalianRestaurant(
restaurant_ptr=restaurant,
serves_gnocchi=True)
italian_restaurant.save_base(raw=True)
# Create a child-parent chain with an explicit parent link
place2 = Place(name='Main St', address='111 Main St')
place2.save_base(raw=True)
park = ParkingLot(parent=place2, capacity=100)
park.save_base(raw=True)
# Check that no extra parent objects have been created.
places = list(Place.objects.all())
self.assertEqual(places, [place1, place2])
dicts = list(Restaurant.objects.values('name', 'serves_hot_dogs'))
self.assertEqual(dicts, [{
'name': "Guido's House of Pasta",
'serves_hot_dogs': True
}])
dicts = list(ItalianRestaurant.objects.values(
'name', 'serves_hot_dogs', 'serves_gnocchi'))
self.assertEqual(dicts, [{
'name': "Guido's House of Pasta",
'serves_gnocchi': True,
'serves_hot_dogs': True,
}])
dicts = list(ParkingLot.objects.values('name', 'capacity'))
self.assertEqual(dicts, [{
'capacity': 100,
'name': 'Main St',
}])
# You can also update objects when using a raw save.
place1.name = "Guido's All New House of Pasta"
place1.save_base(raw=True)
restaurant.serves_hot_dogs = False
restaurant.save_base(raw=True)
italian_restaurant.serves_gnocchi = False
italian_restaurant.save_base(raw=True)
place2.name = 'Derelict lot'
place2.save_base(raw=True)
park.capacity = 50
park.save_base(raw=True)
# No extra parent objects after an update, either.
places = list(Place.objects.all())
self.assertEqual(places, [place2, place1])
self.assertEqual(places[0].name, 'Derelict lot')
self.assertEqual(places[1].name, "Guido's All New House of Pasta")
dicts = list(Restaurant.objects.values('name', 'serves_hot_dogs'))
self.assertEqual(dicts, [{
'name': "Guido's All New House of Pasta",
'serves_hot_dogs': False,
}])
dicts = list(ItalianRestaurant.objects.values(
'name', 'serves_hot_dogs', 'serves_gnocchi'))
self.assertEqual(dicts, [{
'name': "Guido's All New House of Pasta",
'serves_gnocchi': False,
'serves_hot_dogs': False,
}])
dicts = list(ParkingLot.objects.values('name', 'capacity'))
self.assertEqual(dicts, [{
'capacity': 50,
'name': 'Derelict lot',
}])
# If you try to raw_save a parent attribute onto a child object,
# the attribute will be ignored.
italian_restaurant.name = "Lorenzo's Pasta Hut"
italian_restaurant.save_base(raw=True)
# Note that the name has not changed
# - name is an attribute of Place, not ItalianRestaurant
dicts = list(ItalianRestaurant.objects.values(
'name', 'serves_hot_dogs', 'serves_gnocchi'))
self.assertEqual(dicts, [{
'name': "Guido's All New House of Pasta",
'serves_gnocchi': False,
'serves_hot_dogs': False,
}])
def test_issue_7105(self):
# Regressions tests for #7105: dates() queries should be able to use
# fields from the parent model as easily as the child.
Child.objects.create(
name='child',
created=datetime.datetime(2008, 6, 26, 17, 0, 0))
datetimes = list(Child.objects.datetimes('created', 'month'))
self.assertEqual(datetimes, [datetime.datetime(2008, 6, 1, 0, 0)])
def test_issue_7276(self):
# Regression test for #7276: calling delete() on a model with
# multi-table inheritance should delete the associated rows from any
# ancestor tables, as well as any descendent objects.
place1 = Place(
name="Guido's House of Pasta",
address='944 W. Fullerton')
place1.save_base(raw=True)
restaurant = Restaurant(
place_ptr=place1,
serves_hot_dogs=True,
serves_pizza=False)
restaurant.save_base(raw=True)
italian_restaurant = ItalianRestaurant(
restaurant_ptr=restaurant,
serves_gnocchi=True)
italian_restaurant.save_base(raw=True)
ident = ItalianRestaurant.objects.all()[0].id
self.assertEqual(Place.objects.get(pk=ident), place1)
Restaurant.objects.create(
name='a',
address='xx',
serves_hot_dogs=True,
serves_pizza=False)
# This should delete both Restaurants, plus the related places, plus
# the ItalianRestaurant.
Restaurant.objects.all().delete()
with self.assertRaises(Place.DoesNotExist):
Place.objects.get(pk=ident)
with self.assertRaises(ItalianRestaurant.DoesNotExist):
ItalianRestaurant.objects.get(pk=ident)
def test_issue_6755(self):
"""
Regression test for #6755
"""
r = Restaurant(serves_pizza=False, serves_hot_dogs=False)
r.save()
self.assertEqual(r.id, r.place_ptr_id)
orig_id = r.id
r = Restaurant(place_ptr_id=orig_id, serves_pizza=True, serves_hot_dogs=False)
r.save()
self.assertEqual(r.id, orig_id)
self.assertEqual(r.id, r.place_ptr_id)
def test_issue_7488(self):
# Regression test for #7488. This looks a little crazy, but it's the
# equivalent of what the admin interface has to do for the edit-inline
# case.
suppliers = Supplier.objects.filter(
restaurant=Restaurant(name='xx', address='yy'))
suppliers = list(suppliers)
self.assertEqual(suppliers, [])
def test_issue_11764(self):
"""
Regression test for #11764
"""
wholesalers = list(Wholesaler.objects.all().select_related())
self.assertEqual(wholesalers, [])
def test_issue_7853(self):
"""
Regression test for #7853
If the parent class has a self-referential link, make sure that any
updates to that link via the child update the right table.
"""
obj = SelfRefChild.objects.create(child_data=37, parent_data=42)
obj.delete()
def test_get_next_previous_by_date(self):
"""
Regression tests for #8076
get_(next/previous)_by_date should work
"""
c1 = ArticleWithAuthor(
headline='ArticleWithAuthor 1',
author="Person 1",
pub_date=datetime.datetime(2005, 8, 1, 3, 0))
c1.save()
c2 = ArticleWithAuthor(
headline='ArticleWithAuthor 2',
author="Person 2",
pub_date=datetime.datetime(2005, 8, 1, 10, 0))
c2.save()
c3 = ArticleWithAuthor(
headline='ArticleWithAuthor 3',
author="Person 3",
pub_date=datetime.datetime(2005, 8, 2))
c3.save()
self.assertEqual(c1.get_next_by_pub_date(), c2)
self.assertEqual(c2.get_next_by_pub_date(), c3)
with self.assertRaises(ArticleWithAuthor.DoesNotExist):
c3.get_next_by_pub_date()
self.assertEqual(c3.get_previous_by_pub_date(), c2)
self.assertEqual(c2.get_previous_by_pub_date(), c1)
with self.assertRaises(ArticleWithAuthor.DoesNotExist):
c1.get_previous_by_pub_date()
def test_inherited_fields(self):
"""
Regression test for #8825 and #9390
Make sure all inherited fields (esp. m2m fields, in this case) appear
on the child class.
"""
m2mchildren = list(M2MChild.objects.filter(articles__isnull=False))
self.assertEqual(m2mchildren, [])
# Ordering should not include any database column more than once (this
# is most likely to occur naturally with model inheritance, so we
# check it here). Regression test for #9390. This necessarily pokes at
# the SQL string for the query, since the duplicate problems are only
# apparent at that late stage.
qs = ArticleWithAuthor.objects.order_by('pub_date', 'pk')
sql = qs.query.get_compiler(qs.db).as_sql()[0]
fragment = sql[sql.find('ORDER BY'):]
pos = fragment.find('pub_date')
self.assertEqual(fragment.find('pub_date', pos + 1), -1)
def test_queryset_update_on_parent_model(self):
"""
Regression test for #10362
It is possible to call update() and only change a field in
an ancestor model.
"""
article = ArticleWithAuthor.objects.create(
author="fred",
headline="Hey there!",
pub_date=datetime.datetime(2009, 3, 1, 8, 0, 0))
update = ArticleWithAuthor.objects.filter(
author="fred").update(headline="Oh, no!")
self.assertEqual(update, 1)
update = ArticleWithAuthor.objects.filter(
pk=article.pk).update(headline="Oh, no!")
self.assertEqual(update, 1)
derivedm1 = DerivedM.objects.create(
customPK=44,
base_name="b1",
derived_name="d1")
self.assertEqual(derivedm1.customPK, 44)
self.assertEqual(derivedm1.base_name, 'b1')
self.assertEqual(derivedm1.derived_name, 'd1')
derivedms = list(DerivedM.objects.all())
self.assertEqual(derivedms, [derivedm1])
def test_use_explicit_o2o_to_parent_as_pk(self):
"""
Regression tests for #10406
If there's a one-to-one link between a child model and the parent and
no explicit pk declared, we can use the one-to-one link as the pk on
the child.
"""
self.assertEqual(ParkingLot2._meta.pk.name, "parent")
# However, the connector from child to parent need not be the pk on
# the child at all.
self.assertEqual(ParkingLot3._meta.pk.name, "primary_key")
# the child->parent link
self.assertEqual(
ParkingLot3._meta.get_ancestor_link(Place).name,
"parent")
def test_use_explicit_o2o_to_parent_from_abstract_model(self):
self.assertEqual(ParkingLot4A._meta.pk.name, "parent")
ParkingLot4A.objects.create(
name="Parking4A",
address='21 Jump Street',
)
self.assertEqual(ParkingLot4B._meta.pk.name, "parent")
ParkingLot4A.objects.create(
name="Parking4B",
address='21 Jump Street',
)
def test_all_fields_from_abstract_base_class(self):
"""
Regression tests for #7588
"""
# All fields from an ABC, including those inherited non-abstractly
# should be available on child classes (#7588). Creating this instance
# should work without error.
QualityControl.objects.create(
headline="Problems in Django",
pub_date=datetime.datetime.now(),
quality=10,
assignee="adrian")
def test_abstract_base_class_m2m_relation_inheritance(self):
# Check that many-to-many relations defined on an abstract base class
# are correctly inherited (and created) on the child class.
p1 = Person.objects.create(name='Alice')
p2 = Person.objects.create(name='Bob')
p3 = Person.objects.create(name='Carol')
p4 = Person.objects.create(name='Dave')
birthday = BirthdayParty.objects.create(
name='Birthday party for Alice')
birthday.attendees.set([p1, p3])
bachelor = BachelorParty.objects.create(name='Bachelor party for Bob')
bachelor.attendees.set([p2, p4])
parties = list(p1.birthdayparty_set.all())
self.assertEqual(parties, [birthday])
parties = list(p1.bachelorparty_set.all())
self.assertEqual(parties, [])
parties = list(p2.bachelorparty_set.all())
self.assertEqual(parties, [bachelor])
# Check that a subclass of a subclass of an abstract model doesn't get
# its own accessor.
self.assertFalse(hasattr(p2, 'messybachelorparty_set'))
# ... but it does inherit the m2m from its parent
messy = MessyBachelorParty.objects.create(
name='Bachelor party for Dave')
messy.attendees.set([p4])
messy_parent = messy.bachelorparty_ptr
parties = list(p4.bachelorparty_set.all())
self.assertEqual(parties, [bachelor, messy_parent])
def test_abstract_verbose_name_plural_inheritance(self):
"""
verbose_name_plural correctly inherited from ABC if inheritance chain
includes an abstract model.
"""
# Regression test for #11369: verbose_name_plural should be inherited
# from an ABC even when there are one or more intermediate
# abstract models in the inheritance chain, for consistency with
# verbose_name.
self.assertEqual(
InternalCertificationAudit._meta.verbose_name_plural,
'Audits'
)
def test_inherited_nullable_exclude(self):
obj = SelfRefChild.objects.create(child_data=37, parent_data=42)
self.assertQuerysetEqual(
SelfRefParent.objects.exclude(self_data=72), [
obj.pk
],
attrgetter("pk")
)
self.assertQuerysetEqual(
SelfRefChild.objects.exclude(self_data=72), [
obj.pk
],
attrgetter("pk")
)
def test_concrete_abstract_concrete_pk(self):
"""
Primary key set correctly with concrete->abstract->concrete inheritance.
"""
# Regression test for #13987: Primary key is incorrectly determined
# when more than one model has a concrete->abstract->concrete
# inheritance hierarchy.
self.assertEqual(
len([field for field in BusStation._meta.local_fields if field.primary_key]),
1
)
self.assertEqual(
len([field for field in TrainStation._meta.local_fields if field.primary_key]),
1
)
self.assertIs(BusStation._meta.pk.model, BusStation)
self.assertIs(TrainStation._meta.pk.model, TrainStation)
def test_inherited_unique_field_with_form(self):
"""
Test that a model which has different primary key for the parent model
passes unique field checking correctly. Refs #17615.
"""
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = '__all__'
User.objects.create(username="user_only")
p = Profile.objects.create(username="user_with_profile")
form = ProfileForm({'username': "user_with_profile", 'extra': "hello"},
instance=p)
self.assertTrue(form.is_valid())
def test_inheritance_joins(self):
# Test for #17502 - check that filtering through two levels of
# inheritance chain doesn't generate extra joins.
qs = ItalianRestaurant.objects.all()
self.assertEqual(str(qs.query).count('JOIN'), 2)
qs = ItalianRestaurant.objects.filter(name='foo')
self.assertEqual(str(qs.query).count('JOIN'), 2)
@expectedFailure
def test_inheritance_values_joins(self):
# It would be nice (but not too important) to skip the middle join in
# this case. Skipping is possible as nothing from the middle model is
# used in the qs and top contains direct pointer to the bottom model.
qs = ItalianRestaurant.objects.values_list('serves_gnocchi').filter(name='foo')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_issue_21554(self):
senator = Senator.objects.create(
name='John Doe', title='X', state='Y'
)
senator = Senator.objects.get(pk=senator.pk)
self.assertEqual(senator.name, 'John Doe')
self.assertEqual(senator.title, 'X')
self.assertEqual(senator.state, 'Y')
def test_inheritance_resolve_columns(self):
Restaurant.objects.create(name='Bobs Cafe', address="Somewhere",
serves_pizza=True, serves_hot_dogs=True)
p = Place.objects.all().select_related('restaurant')[0]
self.assertIsInstance(p.restaurant.serves_pizza, bool)
def test_inheritance_select_related(self):
# Regression test for #7246
r1 = Restaurant.objects.create(
name="Nobu", serves_hot_dogs=True, serves_pizza=False
)
r2 = Restaurant.objects.create(
name="Craft", serves_hot_dogs=False, serves_pizza=True
)
Supplier.objects.create(name="John", restaurant=r1)
Supplier.objects.create(name="Jane", restaurant=r2)
self.assertQuerysetEqual(
Supplier.objects.order_by("name").select_related(), [
"Jane",
"John",
],
attrgetter("name")
)
jane = Supplier.objects.order_by("name").select_related("restaurant")[0]
self.assertEqual(jane.restaurant.name, "Craft")
def test_related_filtering_query_efficiency_ticket_15844(self):
r = Restaurant.objects.create(
name="Guido's House of Pasta",
address='944 W. Fullerton',
serves_hot_dogs=True,
serves_pizza=False,
)
s = Supplier.objects.create(restaurant=r)
with self.assertNumQueries(1):
self.assertQuerysetEqual(
Supplier.objects.filter(restaurant=r),
[s], lambda x: x,
)
with self.assertNumQueries(1):
self.assertQuerysetEqual(
r.supplier_set.all(),
[s], lambda x: x,
)
| [
[
[
80,
96
]
],
[
[
105,
113
],
[
5034,
5042
],
[
5183,
5191
],
[
8199,
8207
],
[
8382,
8390
],
[
8566,
8574
],
[
10349,
10357
],
[
12554,
12562
]
],
[
[
135,
145
],
[
14930,
14940
],
[
15099,
15109
],
[
18596,
18606
]
],
[
[
167,
182
],
[
16889,
16904
]
],
[
[
203,
208
],
[
16113,
16118
]
],
[
[
233,
241
],
[
673,
681
]
],
[
[
269,
286
],
[
8084,
8101
],
[
8267,
8284
],
[
8451,
8468
],
[
8759,
8776
],
[
8981,
8998
],
[
9749,
9766
],
[
10232,
10249
],
[
10406,
10423
],
[
10547,
10564
]
],
[
[
288,
301
],
[
13195,
13208
]
],
[
[
303,
316
],
[
13059,
13072
]
],
[
[
318,
328
],
[
15523,
15533
],
[
15765,
15775
],
[
15792,
15802
]
],
[
[
330,
335
],
[
4966,
4971
],
[
5101,
5106
]
],
[
[
341,
349
],
[
10692,
10700
],
[
10982,
10990
]
],
[
[
351,
377
],
[
14581,
14607
]
],
[
[
379,
396
],
[
1518,
1535
],
[
2295,
2312
],
[
3719,
3736
],
[
4518,
4535
],
[
5801,
5818
],
[
5956,
5973
],
[
6482,
6499
],
[
6527,
6544
],
[
16679,
16696
],
[
16781,
16798
],
[
17197,
17214
]
],
[
[
398,
406
],
[
9295,
9303
]
],
[
[
412,
430
],
[
13829,
13847
]
],
[
[
432,
442
],
[
1836,
1846
],
[
2575,
2585
],
[
4009,
4019
]
],
[
[
444,
455
],
[
11368,
11379
]
],
[
[
457,
468
],
[
11535,
11546
],
[
11648,
11659
]
],
[
[
470,
482
],
[
11812,
11824
],
[
11858,
11870
],
[
12037,
12049
]
],
[
[
488,
500
],
[
11991,
12003
]
],
[
[
502,
508
],
[
12859,
12865
],
[
12908,
12914
],
[
12955,
12961
],
[
13004,
13010
]
],
[
[
510,
515
],
[
1196,
1201
],
[
1741,
1746
],
[
1996,
2001
],
[
3278,
3283
],
[
5479,
5484
],
[
6019,
6024
],
[
6390,
6395
],
[
6423,
6428
],
[
11684,
11689
],
[
17868,
17873
]
],
[
[
517,
524
],
[
16179,
16186
],
[
16285,
16292
]
],
[
[
526,
540
],
[
12459,
12473
]
],
[
[
542,
552
],
[
1342,
1352
],
[
2090,
2100
],
[
3504,
3514
],
[
5625,
5635
],
[
6064,
6074
],
[
6324,
6334
],
[
6669,
6679
],
[
6822,
6832
],
[
7273,
7283
],
[
17716,
17726
],
[
18080,
18090
],
[
18196,
18206
],
[
18844,
18854
]
],
[
[
558,
570
],
[
7837,
7849
],
[
14728,
14740
],
[
15003,
15015
]
],
[
[
572,
585
],
[
14833,
14846
]
],
[
[
587,
594
],
[
17380,
17387
],
[
17483,
17490
]
],
[
[
596,
604
],
[
7225,
7233
],
[
18308,
18316
],
[
18368,
18376
],
[
18467,
18475
],
[
18641,
18649
],
[
19042,
19050
],
[
19173,
19181
]
],
[
[
606,
618
],
[
15663,
15675
],
[
15826,
15838
],
[
15855,
15867
]
],
[
[
620,
624
],
[
16231,
16235
]
],
[
[
630,
640
],
[
7505,
7515
]
],
[
[
652,
672
]
]
] |
import os
# import torch
import argparse
import base64
import sys
import io
import torch
import torch.nn as nn
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
def fullmodel2base64(model):
buffer = io.BytesIO()
torch.save(model, buffer)
bg = buffer.getvalue()
return base64.b64encode(bg).decode()
def base642fullmodel(modbase64):
inputrpc = bytes(modbase64.encode())
inputrpc_ = base64.b64decode(inputrpc)
loadmodel = torch.load(io.BytesIO(inputrpc_))
return loadmodel
model_list = []
f = open(sys.argv[1], "r")
models = f.read().split(",")
f.close()
print(models)
for m in models:
model_list.append(base642fullmodel(m))
new_model_state = model_list[0].state_dict()
#sum the weight of the model
for m in model_list[1:]:
state_m = m.state_dict()
for key in state_m:
new_model_state[key] += state_m[key]
#average the model weight
for key in new_model_state:
new_model_state[key] /= len(model_list)
new_model = model_list[0]
new_model.load_state_dict(new_model_state)
output = fullmodel2base64(new_model)
print(output)
| [
[
[
7,
9
]
],
[
[
32,
40
]
],
[
[
48,
54
],
[
369,
375
],
[
490,
496
]
],
[
[
62,
65
],
[
616,
619
]
],
[
[
73,
75
],
[
288,
290
],
[
544,
546
]
],
[
[
84,
89
],
[
305,
310
],
[
533,
538
]
],
[
[
97,
111
]
],
[
[
136,
146
]
],
[
[
176,
186
]
],
[
[
224,
243
]
],
[
[
250,
266
],
[
1130,
1146
]
],
[
[
404,
420
],
[
730,
746
]
],
[
[
590,
600
],
[
712,
722
],
[
770,
780
],
[
836,
846
],
[
1037,
1047
],
[
1063,
1073
]
],
[
[
607,
608
],
[
644,
645
],
[
665,
666
]
],
[
[
635,
641
],
[
682,
688
],
[
700,
706
]
],
[
[
695,
696
],
[
747,
748
]
],
[
[
752,
767
],
[
913,
928
],
[
988,
1003
],
[
1009,
1024
],
[
1103,
1118
]
],
[
[
831,
832
],
[
866,
867
]
],
[
[
856,
863
],
[
896,
903
],
[
937,
944
]
],
[
[
889,
892
],
[
945,
948
],
[
929,
932
]
],
[
[
981,
984
],
[
1025,
1028
]
],
[
[
1051,
1060
],
[
1077,
1086
],
[
1147,
1156
]
],
[
[
1121,
1127
],
[
1165,
1171
]
]
] |
#Question No 6
#Risen Each Year For Next 25 Years
year =1
millimeter= 1.6
while(year<=25):
years=(year * millimeter)
print(" The ocean will rises each year is=" , years,)
year+=1 | [
[
[
52,
56
],
[
85,
89
],
[
106,
110
],
[
185,
189
]
],
[
[
62,
72
],
[
113,
123
]
],
[
[
99,
104
],
[
174,
179
]
]
] |
from fastapi import FastAPI
import uvicorn
from src.routes import (
user,
employee,
car,
inventory,
product,
service,
dealership,
department,
)
from fastapi.middleware.cors import CORSMiddleware
from src.settings.envvariables import Settings
Settings().check_variables()
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Include/define our routes
app.include_router(user.app, prefix="/users", tags=["Users"])
app.include_router(employee.app, prefix="/employees", tags=["Employees"])
app.include_router(car.app, prefix="/cars", tags=["Cars"])
app.include_router(inventory.app, prefix="/inventory", tags=["Inventory"])
app.include_router(product.app, prefix="/products", tags=["Product"])
app.include_router(service.app, prefix="/services/requests", tags=["Service"])
app.include_router(dealership.app, prefix="/dealerships", tags=["Dealership"])
app.include_router(department.app, prefix="/departments", tags=["Department"])
# Launch the app with uvicorn and handle environment
# if Settings().ENV == "prod":
# if __name__ == "__main__":
# print("Launching Production Environment")
# uvicorn.run("main:app", host="0.0.0.0", port=Settings().PORT, reload=False, workers=3)
# else:
# if __name__ == "__main__":
# print("Launching Development Environment")
# uvicorn.run("main:app", host="0.0.0.0", port=Settings().PORT, reload=True, workers=1)
| [
[
[
20,
27
],
[
311,
318
]
],
[
[
35,
42
]
],
[
[
72,
76
],
[
515,
519
]
],
[
[
82,
90
],
[
577,
585
]
],
[
[
96,
99
],
[
651,
654
]
],
[
[
105,
114
],
[
710,
719
]
],
[
[
120,
127
],
[
785,
792
]
],
[
[
133,
140
],
[
855,
862
]
],
[
[
146,
156
],
[
934,
944
]
],
[
[
162,
172
],
[
1013,
1023
]
],
[
[
212,
226
],
[
346,
360
]
],
[
[
265,
273
],
[
275,
283
]
],
[
[
305,
308
],
[
322,
325
],
[
496,
499
],
[
558,
561
],
[
632,
635
],
[
691,
694
],
[
766,
769
],
[
836,
839
],
[
915,
918
],
[
994,
997
]
]
] |
from django.db.models import Q
from apps.configattribute.models import ConfigAttribute
from apps.property.models import GenericProperty
from apps.utils.data_helpers.manager import DataManager
from apps.utils.iotile.variable import SYSTEM_VID
from apps.utils.timezone_utils import display_formatted_ts
class TripInfo(object):
block = None
data = {}
slug = None
last_update = None
def __init__(self, block):
self.block = block
self.slug = block.slug
self.data = {
'summary': {},
'properties': {}
}
self.last_update = None
def add_property(self, key, value):
self.data['properties'][key] = value
def add_summary_event(self, event):
if 'summary' in self.data:
if self.last_update and self.last_update > event.timestamp:
return
self.data['summary'] = event.extra_data
# Trip Summary should win over Trip Update
self.last_update = event.timestamp
def to_representation(self):
data = {
'slug': self.slug,
'label': self.block.title,
'summary_date': display_formatted_ts(self.last_update) if self.last_update else '',
'data': self.data
}
return data
class TripOrgQualityReport(object):
org = None
results = {}
config = {}
def __init__(self, org):
self.org = org
self.results = {}
self.config = self._get_config_attributes()
def _get_config_attributes(self):
config_name = ':report:trip_quality:config'
attribute = ConfigAttribute.objects.get_attribute_by_priority(name=config_name, target_slug=self.org.obj_target_slug)
if attribute:
return attribute.data
# Return empty if it does not exist
return {
'summary_keys': [
"Device",
"START (UTC)",
"END (UTC)",
"Duration (Days)",
"Event Count",
"First event at (UTC)",
"Last event at (UTC)",
"Max Humidity (% RH)",
"Min Humidity (% RH)",
"Median Humidity (% RH)",
"Max Pressure (Mbar)",
"Min Pressure (Mbar)",
"Median Pressure (Mbar)",
"Max Temp (C)",
"Min Temp (C)",
"Median Temp (C)",
"Above 30C",
"Below 17C",
"Max Peak (G)",
"TimeStamp(MaxPeak) (UTC)",
"DeltaV at Max Peak (in/s)",
"MaxDeltaV (in/s)",
"TimeStamp(MaxDeltaV) (UTC)",
"Peak at MaxDeltaV (G)"
],
'property_keys': []
}
def analyze(self):
"""
Get all archives for an organization and fill a TripInfo object for each with the following
- Selected trip properties (based on project's configAttribute)
- Last Update Event, if any
- Last Trip Summary Event, if any
:return: Nothing
"""
blocks = self.org.data_blocks.all()
for block in blocks:
self.results[block.slug] = TripInfo(block)
block_slugs = [block.slug for block in blocks]
if self.config and 'property_keys' in self.config:
for property_item in self.config['property_keys']:
properties = GenericProperty.objects.filter(target__in=block_slugs, name=property_item)
for p in properties:
self.results[p.target].add_property(property_item, p.value)
# Not great, but we seem to have blocks with project as None and blocks as p--0000
q = Q(project_slug='') | Q(project_slug='p--0000-0000')
q = q & Q(device_slug__in=block_slugs, variable_slug__icontains=SYSTEM_VID['TRIP_SUMMARY'])
events = DataManager.filter_qs_using_q(
'event',
q=q
)
for event in events:
self.results[event.device_slug].add_summary_event(event)
# Cleanup reports that don't look complete (No Summary or Properties)
to_delete = []
for slug, trip in self.results.items():
if trip.data['summary'] == {}:
# Delete Archive that does not represent a real trip
to_delete.append(slug)
for slug in to_delete:
del(self.results[slug])
| [
[
[
29,
30
],
[
3751,
3752
],
[
3772,
3773
],
[
3819,
3820
]
],
[
[
72,
87
],
[
1613,
1628
]
],
[
[
121,
136
],
[
3455,
3470
]
],
[
[
181,
192
],
[
3920,
3931
]
],
[
[
232,
242
],
[
3875,
3885
]
],
[
[
281,
301
],
[
1157,
1177
]
],
[
[
310,
318
],
[
3232,
3240
]
],
[
[
1293,
1313
]
]
] |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for test classes."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import ast
import collections
import contextlib
import copy
import inspect
import itertools
import json
import logging
import os
import re
import unittest
from constants import constants
from core.controllers import base
from core.domain import auth_domain
from core.domain import caching_domain
from core.domain import collection_domain
from core.domain import collection_services
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import fs_domain
from core.domain import fs_services
from core.domain import interaction_registry
from core.domain import question_domain
from core.domain import question_services
from core.domain import rights_manager
from core.domain import skill_domain
from core.domain import skill_services
from core.domain import state_domain
from core.domain import stats_services
from core.domain import story_domain
from core.domain import story_services
from core.domain import subtopic_page_domain
from core.domain import subtopic_page_services
from core.domain import taskqueue_services
from core.domain import topic_domain
from core.domain import topic_services
from core.domain import user_services
from core.platform import models
from core.platform.search import elastic_search_services
from core.platform.taskqueue import cloud_tasks_emulator
import feconf
import main
import main_mail
import main_taskqueue
from proto import text_classifier_pb2
import python_utils
import schema_utils
import utils
import contextlib2
import elasticsearch
from google.appengine.api import mail
from google.appengine.ext import deferred
from google.appengine.ext import testbed
import requests_mock
import webtest
(
auth_models, exp_models, feedback_models, question_models, skill_models,
story_models, suggestion_models, topic_models,) = (
models.Registry.import_models([
models.NAMES.auth, models.NAMES.exploration, models.NAMES.feedback,
models.NAMES.question, models.NAMES.skill, models.NAMES.story,
models.NAMES.suggestion, models.NAMES.topic]))
current_user_services = models.Registry.import_current_user_services()
datastore_services = models.Registry.import_datastore_services()
email_services = models.Registry.import_email_services()
memory_cache_services = models.Registry.import_cache_services()
platform_auth_services = models.Registry.import_auth_services()
platform_taskqueue_services = models.Registry.import_taskqueue_services()
# Prefix to append to all lines printed by tests to the console.
# We are using the b' prefix as all the stdouts are in bytes.
LOG_LINE_PREFIX = b'LOG_INFO_TEST: '
# List of model classes that don't have Wipeout or Takeout, related class
# methods defined because they're not used directly but only as
# base classes for the other models.
BASE_MODEL_CLASSES_WITHOUT_DATA_POLICIES = (
'BaseCommitLogEntryModel',
'BaseHumanMaintainedModel',
'BaseMapReduceBatchResultsModel',
'BaseModel',
'BaseSnapshotContentModel',
'BaseSnapshotMetadataModel',
'VersionedModel',
)
def get_filepath_from_filename(filename, rootdir):
"""Returns filepath using the filename. Different files are present in
different subdirectories in the rootdir. So, we walk through the rootdir and
match the all the filenames with the given filename. When a match is found
the function returns the complete path of the filename by using
os.path.join(root, filename).
For example signup-page.mainpage.html is present in
core/templates/pages/signup-page and error-page.mainpage.html is present in
core/templates/pages/error-pages. So we walk through core/templates/pages
and a match for signup-page.component.html is found in signup-page
subdirectory and a match for error-page.directive.html is found in
error-pages subdirectory.
Args:
filename: str. The name of the file.
rootdir: str. The directory to search the file in.
Returns:
str | None. The path of the file if file is found otherwise
None.
"""
# This is required since error files are served according to error status
# code. The file served is error-page.mainpage.html but it is compiled and
# stored as error-page-{status_code}.mainpage.html. So, we need to swap the
# name here to obtain the correct filepath.
if filename.startswith('error-page'):
filename = 'error-page.mainpage.html'
matches = list(itertools.chain.from_iterable(
(os.path.join(subdir, f) for f in filenames if f == filename)
for subdir, _, filenames in os.walk(rootdir)))
if len(matches) > 1:
raise Exception('Multiple files found with name: %s' % filename)
return matches[0] if matches else None
def mock_load_template(filename):
"""Mock for load_template function. This mock is required for backend tests
since we do not have webpack compilation before backend tests. The folder to
search templates is webpack_bundles which is generated after webpack
compilation. Since this folder will be missing, load_template function will
return an error. So, we use a mock for load_template which returns the html
file from the source directory instead.
Args:
filename: str. The name of the file for which template is to be
returned.
Returns:
str. The contents of the given file.
"""
filepath = get_filepath_from_filename(
filename, os.path.join('core', 'templates', 'pages'))
with python_utils.open_file(filepath, 'r') as f:
return f.read()
def check_image_png_or_webp(image_string):
"""Checks if the image is in png or webp format only.
Args:
image_string: str. Image url in base64 format.
Returns:
bool. Returns true if image is in WebP format.
"""
return image_string.startswith(('data:image/png', 'data:image/webp'))
def get_storage_model_module_names():
"""Get all module names in storage."""
# As models.NAMES is an enum, it cannot be iterated over. So we use the
# __dict__ property which can be iterated over.
for name in models.NAMES.__dict__:
if '__' not in name:
yield name
def get_storage_model_classes():
"""Get all model classes in storage."""
for module_name in get_storage_model_module_names():
(module,) = models.Registry.import_models([module_name])
for member_name, member_obj in inspect.getmembers(module):
if inspect.isclass(member_obj):
clazz = getattr(module, member_name)
all_base_classes = [
base_class.__name__ for base_class in inspect.getmro(
clazz)]
if 'Model' in all_base_classes:
yield clazz
class ElasticSearchStub(python_utils.OBJECT):
"""This stub class mocks the functionality of ES in
elastic_search_services.py.
IMPORTANT NOTE TO DEVELOPERS: These mock functions are NOT guaranteed to
be exact implementations of elasticsearch functionality. If the results of
this mock and the local dev elasticsearch instance differ, the mock
functions should be updated so that their behaviour matches what a local
dev instance would return. (For example, this mock always has a 'version'
of 1 in the return dict and an arbitrary '_seq_no', although the version
number increments with every PUT in the elasticsearch Python client
library and the '_seq_no' increments with every operation.)
"""
_DB = {}
def reset(self):
"""Helper method that clears the mock database."""
self._DB.clear()
def _generate_index_not_found_error(self, index_name):
"""Helper method that generates an elasticsearch 'index not found' 404
error.
Args:
index_name: str. The index that was not found.
Returns:
elasticsearch.NotFoundError. A manually-constructed error
indicating that the index was not found.
"""
raise elasticsearch.NotFoundError(
404, 'index_not_found_exception', {
'status': 404,
'error': {
'reason': 'no such index [%s]' % index_name,
'root_cause': [{
'reason': 'no such index [%s]' % index_name,
'index': index_name,
'index_uuid': '_na_',
'type': 'index_not_found_exception',
'resource.type': 'index_or_alias',
'resource.id': index_name
}],
'index': index_name,
'index_uuid': '_na_',
'type': 'index_not_found_exception',
'resource.type': 'index_or_alias',
'resource.id': index_name
}
}
)
def mock_create_index(self, index_name):
"""Creates an index with the given name.
Args:
index_name: str. The name of the index to create.
Returns:
dict. A dict representing the ElasticSearch API response.
Raises:
elasticsearch.RequestError. An index with the given name already
exists.
"""
if index_name in self._DB:
raise elasticsearch.RequestError(
400, 'resource_already_exists_exception',
'index [%s/RaNdOmStRiNgOfAlPhAs] already exists' % index_name)
self._DB[index_name] = []
return {
'index': index_name,
'acknowledged': True,
'shards_acknowledged': True
}
def mock_index(self, index_name, document, id=None): # pylint: disable=redefined-builtin
"""Adds a document with the given ID to the index.
Note that, unfortunately, we have to keep the name of "id" for the
last kwarg, although it conflicts with a Python builtin. This is
because the name is an existing part of the API defined at
https://elasticsearch-py.readthedocs.io/en/v7.10.1/api.html
Args:
index_name: str. The name of the index to create.
document: dict. The document to store.
id: str. The unique identifier of the document.
Returns:
dict. A dict representing the ElasticSearch API response.
Raises:
elasticsearch.RequestError. An index with the given name already
exists.
"""
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
self._DB[index_name] = [
d for d in self._DB[index_name] if d['id'] != id]
self._DB[index_name].append(document)
return {
'_index': index_name,
'_shards': {
'total': 2,
'successful': 1,
'failed': 0,
},
'_seq_no': 96,
'_primary_term': 1,
'result': 'created',
'_id': id,
'_version': 1,
'_type': '_doc',
}
def mock_exists(self, index_name, doc_id):
"""Checks whether a document with the given ID exists in the mock
database.
Args:
index_name: str. The name of the index to check.
doc_id: str. The document id to check.
Returns:
bool. Whether the document exists in the index.
Raises:
elasticsearch.NotFoundError: The given index name was not found.
"""
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
return any([d['id'] == doc_id for d in self._DB[index_name]])
def mock_delete(self, index_name, doc_id):
"""Deletes a document from an index in the mock database. Does nothing
if the document is not in the index.
Args:
index_name: str. The name of the index to delete the document from.
doc_id: str. The document id to be deleted from the index.
Returns:
dict. A dict representing the ElasticSearch API response.
Raises:
Exception. The document does not exist in the index.
elasticsearch.NotFoundError. The given index name was not found, or
the given doc_id was not found in the given index.
"""
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
docs = [d for d in self._DB[index_name] if d['id'] != doc_id]
if len(self._DB[index_name]) != len(docs):
self._DB[index_name] = docs
return {
'_type': '_doc',
'_seq_no': 99,
'_shards': {
'total': 2,
'successful': 1,
'failed': 0
},
'result': 'deleted',
'_primary_term': 1,
'_index': index_name,
'_version': 4,
'_id': '0'
}
raise elasticsearch.NotFoundError(
404, {
'_index': index_name,
'_type': '_doc',
'_id': doc_id,
'_version': 1,
'result': 'not_found',
'_shards': {
'total': 2,
'successful': 1,
'failed': 0
},
'_seq_no': 103,
'_primary_term': 1
})
def mock_delete_by_query(self, index_name, query):
"""Deletes documents from an index based on the given query.
Note that this mock only supports a specific for the query, i.e. the
one which clears the entire index. It asserts that all calls to this
function use that query format.
Args:
index_name: str. The name of the index to delete the documents from.
query: dict. The query that defines which documents to delete.
Returns:
dict. A dict representing the ElasticSearch response.
Raises:
AssertionError. The query is not in the correct form.
elasticsearch.NotFoundError. The given index name was not found.
"""
assert query.keys() == ['query']
assert query['query'] == {
'match_all': {}
}
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
index_size = len(self._DB[index_name])
del self._DB[index_name][:]
return {
'took': 72,
'version_conflicts': 0,
'noops': 0,
'throttled_until_millis': 0,
'failures': [],
'throttled_millis': 0,
'total': index_size,
'batches': 1,
'requests_per_second': -1.0,
'retries': {u'search': 0, u'bulk': 0},
'timed_out': False,
'deleted': index_size
}
def mock_search(self, body=None, index=None, params=None):
"""Searches and returns documents that match the given query.
Args:
body: dict. A dictionary search definition that uses Query DSL.
index: str. The name of the index to search.
params: dict. A dict with two keys: `size` and `from`. The
corresponding values are ints which represent the number of
results to fetch, and the offset from which to fetch them,
respectively.
Returns:
dict. A dict representing the ElasticSearch response.
Raises:
AssertionError. The given arguments are not supported by this mock.
elasticsearch.NotFoundError. The given index name was not found.
"""
assert body is not None
# "_all" and "" are special index names that are used to search across
# all indexes. We do not allow their use.
assert index not in ['_all', '', None]
assert sorted(params.keys()) == ['from', 'size']
if index not in self._DB:
raise self._generate_index_not_found_error(index)
result_docs = []
result_doc_ids = set([])
for doc in self._DB[index]:
if not doc['id'] in result_doc_ids:
result_docs.append(doc)
result_doc_ids.add(doc['id'])
filters = body['query']['bool']['filter']
terms = body['query']['bool']['must']
for f in filters:
for k, v in f['match'].items():
result_docs = [doc for doc in result_docs if doc[k] in v]
if terms:
filtered_docs = []
for term in terms:
for _, v in term.items():
values = v['query'].split(' ')
for doc in result_docs:
strs = [val for val in doc.values() if isinstance(
val, python_utils.BASESTRING)]
words = []
for s in strs:
words += s.split(' ')
if all([value in words for value in values]):
filtered_docs.append(doc)
result_docs = filtered_docs
formatted_result_docs = [{
'_id': doc['id'],
'_score': 0.0,
'_type': '_doc',
'_index': index,
'_source': doc
} for doc in result_docs[
params['from']: params['from'] + params['size']
]]
return {
'timed_out': False,
'_shards': {
'failed': 0,
'total': 1,
'successful': 1,
'skipped': 0
},
'took': 4,
'hits': {
'hits': formatted_result_docs
},
'total': {
'value': len(formatted_result_docs),
'relation': 'eq'
},
'max_score': max(
[0.0] + [d['_score'] for d in formatted_result_docs]),
}
class AuthServicesStub(python_utils.OBJECT):
"""Test-only implementation of the public API in core.platform.auth."""
def __init__(self):
"""Initializes a new instance that emulates an empty auth server."""
self._user_id_by_auth_id = {}
self._external_user_id_associations = set()
@classmethod
def install_stub(cls, test):
"""Installs a new instance of the stub onto the given test instance.
Args:
test: GenericTestBase. The test instance to install the stub on.
Returns:
callable. A function that will uninstall the stub when called.
"""
with contextlib2.ExitStack() as stack:
stub = cls()
stack.enter_context(test.swap(
platform_auth_services, 'establish_auth_session',
stub.establish_auth_session))
stack.enter_context(test.swap(
platform_auth_services, 'destroy_auth_session',
stub.destroy_auth_session))
stack.enter_context(test.swap(
platform_auth_services, 'get_auth_claims_from_request',
stub.get_auth_claims_from_request))
stack.enter_context(test.swap(
platform_auth_services, 'mark_user_for_deletion',
stub.mark_user_for_deletion))
stack.enter_context(test.swap(
platform_auth_services, 'delete_external_auth_associations',
stub.delete_external_auth_associations))
stack.enter_context(test.swap(
platform_auth_services,
'verify_external_auth_associations_are_deleted',
stub.verify_external_auth_associations_are_deleted))
stack.enter_context(test.swap(
platform_auth_services, 'get_auth_id_from_user_id',
stub.get_auth_id_from_user_id))
stack.enter_context(test.swap(
platform_auth_services, 'get_user_id_from_auth_id',
stub.get_user_id_from_auth_id))
stack.enter_context(test.swap(
platform_auth_services, 'get_multi_user_ids_from_auth_ids',
stub.get_multi_user_ids_from_auth_ids))
stack.enter_context(test.swap(
platform_auth_services, 'get_multi_auth_ids_from_user_ids',
stub.get_multi_auth_ids_from_user_ids))
stack.enter_context(test.swap(
platform_auth_services, 'associate_auth_id_with_user_id',
stub.associate_auth_id_with_user_id))
stack.enter_context(test.swap(
platform_auth_services,
'associate_multi_auth_ids_with_user_ids',
stub.associate_multi_auth_ids_with_user_ids))
# Standard usage of ExitStack: enter a bunch of context managers
# from the safety of an ExitStack's context. Once they've all been
# opened, pop_all() of them off of the original context so they can
# *stay* open. Calling the function returned will exit all of them
# in reverse order.
# https://docs.python.org/3/library/contextlib.html#cleaning-up-in-an-enter-implementation
return stack.pop_all().close
@classmethod
def establish_auth_session(cls, unused_request, unused_response):
"""Sets login cookies to maintain a user's sign-in session.
Args:
unused_request: webapp2.Request. Unused because os.environ handles
sessions.
unused_response: webapp2.Response. Unused because os.environ handles
sessions.
"""
pass
@classmethod
def destroy_auth_session(cls, unused_response):
"""Clears login cookies from the given response headers.
Args:
unused_response: webapp2.Response. Unused because os.environ handles
sessions.
"""
pass
@classmethod
def get_auth_claims_from_request(cls, unused_request):
"""Authenticates the request and returns claims about its authorizer.
This stub obtains authorization information from os.environ. To make the
operation more authentic, this method also creates a new "external"
association for the user to simulate a genuine "provided" value.
Args:
unused_request: webapp2.Request. The HTTP request to authenticate.
Unused because auth-details are extracted from environment
variables.
Returns:
AuthClaims|None. Claims about the currently signed in user. If no
user is signed in, then returns None.
"""
auth_id = os.environ.get('USER_ID', '')
email = os.environ.get('USER_EMAIL', '')
role_is_super_admin = os.environ.get('USER_IS_ADMIN', '0') == '1'
if auth_id:
return auth_domain.AuthClaims(auth_id, email, role_is_super_admin)
return None
def mark_user_for_deletion(self, user_id):
"""Marks the user, and all of their auth associations, as deleted.
Since the stub does not use models, this operation actually deletes the
user's association. The "external" associations, however, are not
deleted yet.
Args:
user_id: str. The unique ID of the user whose associations should be
deleted.
"""
self._user_id_by_auth_id = {
a: u for a, u in self._user_id_by_auth_id.items() if u != user_id
}
def delete_external_auth_associations(self, user_id):
"""Deletes all associations that refer to the user outside of Oppia.
Args:
user_id: str. The unique ID of the user whose associations should be
deleted.
"""
self._external_user_id_associations.discard(user_id)
def verify_external_auth_associations_are_deleted(self, user_id):
"""Returns true if and only if we have successfully verified that all
external associations have been deleted.
Args:
user_id: str. The unique ID of the user whose associations should be
checked.
Returns:
bool. True if and only if we have successfully verified that all
external associations have been deleted.
"""
return user_id not in self._external_user_id_associations
def get_auth_id_from_user_id(self, user_id):
"""Returns the auth ID associated with the given user ID.
Args:
user_id: str. The user ID.
Returns:
str|None. The auth ID associated with the given user ID, or None if
no association exists.
"""
return python_utils.NEXT(
(a for a, u in self._user_id_by_auth_id.items() if u == user_id),
None)
def get_user_id_from_auth_id(self, auth_id):
"""Returns the user ID associated with the given auth ID.
Args:
auth_id: str. The auth ID.
Returns:
str|None. The user ID associated with the given auth ID, or None if
no association exists.
"""
return self._user_id_by_auth_id.get(auth_id, None)
def get_multi_user_ids_from_auth_ids(self, auth_ids):
"""Returns the user IDs associated with the given auth IDs.
Args:
auth_ids: list(str). The auth IDs.
Returns:
list(str|None). The user IDs associated with each of the given auth
IDs, or None for associations which don't exist.
"""
return [self._user_id_by_auth_id.get(a, None) for a in auth_ids]
def get_multi_auth_ids_from_user_ids(self, user_ids):
"""Returns the auth IDs associated with the given user IDs.
Args:
user_ids: list(str). The user IDs.
Returns:
list(str|None). The auth IDs associated with each of the given user
IDs, or None for associations which don't exist.
"""
auth_id_by_user_id = {u: a for a, u in self._user_id_by_auth_id.items()}
return [auth_id_by_user_id.get(u, None) for u in user_ids]
def associate_auth_id_with_user_id(self, auth_id_user_id_pair):
"""Commits the association between auth ID and user ID.
This method also adds the user to the "external" set of associations.
Args:
auth_id_user_id_pair: auth_domain.AuthIdUserIdPair. The association
to commit.
Raises:
Exception. The IDs are already associated with a value.
"""
auth_id, user_id = auth_id_user_id_pair
if auth_id in self._user_id_by_auth_id:
raise Exception(
'auth_id=%r is already associated with user_id=%r' % (
auth_id, self._user_id_by_auth_id[auth_id]))
auth_models.UserAuthDetailsModel(
id=user_id, firebase_auth_id=auth_id).put()
self._external_user_id_associations.add(user_id)
self._user_id_by_auth_id[auth_id] = user_id
def associate_multi_auth_ids_with_user_ids(self, auth_id_user_id_pairs):
"""Commits the associations between auth IDs and user IDs.
This method also adds the users to the "external" set of associations.
Args:
auth_id_user_id_pairs: list(auth_domain.AuthIdUserIdPair). The
associations to commit.
Raises:
Exception. One or more auth associations already exist.
"""
collisions = ', '.join(
'{auth_id=%r: user_id=%r}' % (a, self._user_id_by_auth_id[a])
for a, _ in auth_id_user_id_pairs if a in self._user_id_by_auth_id)
if collisions:
raise Exception('already associated: %s' % collisions)
datastore_services.put_multi(
[auth_models.UserAuthDetailsModel(
id=user_id, firebase_auth_id=auth_id)
for auth_id, user_id in auth_id_user_id_pairs])
self._external_user_id_associations.add(
u for _, u in auth_id_user_id_pairs)
self._user_id_by_auth_id.update(auth_id_user_id_pairs)
class TaskqueueServicesStub(python_utils.OBJECT):
"""The stub class that mocks the API functionality offered by the platform
layer, namely the platform.taskqueue taskqueue services API.
"""
def __init__(self, test_base):
"""Initializes a taskqueue services stub that replaces the API
functionality of core.platform.taskqueue.
Args:
test_base: GenericTestBase. The current test base.
"""
self._test_base = test_base
self._client = cloud_tasks_emulator.Emulator(
task_handler=self._task_handler, automatic_task_handling=False)
def _task_handler(self, url, payload, queue_name, task_name=None):
"""Makes a POST request to the task URL in the test app.
Args:
url: str. URL of the handler function.
payload: dict(str : *). Payload to pass to the request. Defaults
to None if no payload is required.
queue_name: str. The name of the queue to add the task to.
task_name: str|None. Optional. The name of the task.
"""
headers = {
'X-Appengine-QueueName': python_utils.convert_to_bytes(queue_name),
'X-Appengine-TaskName': (
# Maps empty strings to None so the output can become 'None'.
python_utils.convert_to_bytes(task_name or None)),
'X-AppEngine-Fake-Is-Admin': python_utils.convert_to_bytes(1),
}
csrf_token = self._test_base.get_new_csrf_token()
self._test_base.post_task(url, payload, headers, csrf_token=csrf_token)
def create_http_task(
self, queue_name, url, payload=None, scheduled_for=None,
task_name=None):
"""Creates a Task in the corresponding queue that will be executed when
the 'scheduled_for' countdown expires using the cloud tasks emulator.
Args:
queue_name: str. The name of the queue to add the task to.
url: str. URL of the handler function.
payload: dict(str : *). Payload to pass to the request. Defaults to
None if no payload is required.
scheduled_for: datetime|None. The naive datetime object for the time
to execute the task. Ignored by this stub.
task_name: str|None. Optional. The name of the task.
"""
# Causes the task to execute immediately by setting the scheduled_for
# time to 0. If we allow scheduled_for to be non-zero, then tests that
# rely on the actions made by the task will become unreliable.
scheduled_for = 0
self._client.create_task(
queue_name, url, payload, scheduled_for=scheduled_for,
task_name=task_name)
def count_jobs_in_taskqueue(self, queue_name=None):
"""Returns the total number of tasks in a single queue if a queue name
is specified or the entire taskqueue if no queue name is specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
Returns:
int. The total number of tasks in a single queue or in the entire
taskqueue.
"""
return self._client.get_number_of_tasks(queue_name=queue_name)
def process_and_flush_tasks(self, queue_name=None):
"""Executes all of the tasks in a single queue if a queue name is
specified or all of the tasks in the taskqueue if no queue name is
specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
"""
self._client.process_and_flush_tasks(queue_name=queue_name)
def get_pending_tasks(self, queue_name=None):
"""Returns a list of the tasks in a single queue if a queue name is
specified or a list of all of the tasks in the taskqueue if no queue
name is specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
Returns:
list(Task). List of tasks in a single queue or in the entire
taskqueue.
"""
return self._client.get_tasks(queue_name=queue_name)
class MemoryCacheServicesStub(python_utils.OBJECT):
"""The stub class that mocks the API functionality offered by the platform
layer, namely the platform.cache cache services API.
"""
_CACHE_DICT = {}
def get_memory_cache_stats(self):
"""Returns a mock profile of the cache dictionary. This mock does not
have the functionality to test for peak memory usage and total memory
usage so the values for those attributes will be 0.
Returns:
MemoryCacheStats. MemoryCacheStats object containing the total
number of keys in the cache dictionary.
"""
return caching_domain.MemoryCacheStats(0, 0, len(self._CACHE_DICT))
def flush_cache(self):
"""Wipes the cache dictionary clean."""
self._CACHE_DICT.clear()
def get_multi(self, keys):
"""Looks up a list of keys in cache dictionary.
Args:
keys: list(str). A list of keys (strings) to look up.
Returns:
list(str). A list of values in the cache dictionary corresponding to
the keys that are passed in.
"""
assert isinstance(keys, list)
return [self._CACHE_DICT.get(key, None) for key in keys]
def set_multi(self, key_value_mapping):
"""Sets multiple keys' values at once in the cache dictionary.
Args:
key_value_mapping: dict(str, str). Both the key and value are
strings. The value can either be a primitive binary-safe string
or the JSON-encoded string version of the object.
Returns:
bool. Whether the set action succeeded.
"""
assert isinstance(key_value_mapping, dict)
self._CACHE_DICT.update(key_value_mapping)
return True
def delete_multi(self, keys):
"""Deletes multiple keys in the cache dictionary.
Args:
keys: list(str). The keys to delete.
Returns:
int. Number of successfully deleted keys.
"""
assert all(isinstance(key, python_utils.BASESTRING) for key in keys)
keys_to_delete = [key for key in keys if key in self._CACHE_DICT]
for key in keys_to_delete:
del self._CACHE_DICT[key]
return len(keys_to_delete)
class TestBase(unittest.TestCase):
"""Base class for all tests."""
maxDiff = 2500
# A test unicode string.
UNICODE_TEST_STRING = 'unicode ¡马!'
def _get_unicode_test_string(self, suffix):
"""Returns a string that contains unicode characters and ends with the
given suffix. This is used to test that functions behave correctly when
handling strings with unicode characters.
Args:
suffix: str. The suffix to append to the UNICODE_TEST_STRING.
Returns:
str. A string that contains unicode characters and ends with the
given suffix.
"""
return '%s%s' % (self.UNICODE_TEST_STRING, suffix)
def _assert_validation_error(self, item, error_substring):
"""Checks that the given item passes default validation."""
with self.assertRaisesRegexp(utils.ValidationError, error_substring):
item.validate()
def log_line(self, line):
"""Print the line with a prefix that can be identified by the script
that calls the test.
"""
# We are using the b' prefix as all the stdouts are in bytes.
python_utils.PRINT(
b'%s%s' % (LOG_LINE_PREFIX, python_utils.convert_to_bytes(line)))
def shortDescription(self):
"""Additional information logged during unit test invocation."""
# Suppress default logging of docstrings.
return None
def get_updated_param_dict(
self, param_dict, param_changes, exp_param_specs):
"""Updates a param dict using the given list of param_changes.
Note that the list of parameter changes is ordered. Parameter changes
later in the list may depend on parameter changes that have been set
earlier in the same list.
"""
new_param_dict = copy.deepcopy(param_dict)
for param_change in param_changes:
try:
obj_type = exp_param_specs[param_change.name].obj_type
except:
raise Exception('Parameter %s not found' % param_change.name)
new_param_dict[param_change.name] = (
param_change.get_normalized_value(obj_type, new_param_dict))
return new_param_dict
def get_static_asset_filepath(self):
"""Returns filepath to the static files on disk ('' or 'build/')."""
return '' if constants.DEV_MODE else os.path.join('build')
def get_static_asset_url(self, asset_suffix):
"""Returns the relative path for the asset, appending it to the
corresponding cache slug. asset_suffix should have a leading slash.
"""
return '/assets%s%s' % (utils.get_asset_dir_prefix(), asset_suffix)
@contextlib.contextmanager
def capture_logging(self, min_level=logging.NOTSET):
"""Context manager that captures logs into a list.
Strips whitespace from messages for convenience.
https://docs.python.org/3/howto/logging-cookbook.html#using-a-context-manager-for-selective-logging
Args:
min_level: int. The minimum logging level captured by the context
manager. By default, all logging levels are captured. Values
should be one of the following values from the logging module:
NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL.
Yields:
list(str). A live-feed of the logging messages captured so-far.
"""
captured_logs = []
class ListStream(python_utils.OBJECT):
"""Stream-like object that appends writes to the captured logs."""
def write(self, msg):
"""Appends stripped messages to captured logs."""
captured_logs.append(msg.strip())
def flush(self):
"""Does nothing."""
pass
list_stream_handler = logging.StreamHandler(stream=ListStream())
logger = logging.getLogger()
old_level = logger.level
logger.addHandler(list_stream_handler)
logger.setLevel(min_level)
try:
yield captured_logs
finally:
logger.setLevel(old_level)
logger.removeHandler(list_stream_handler)
@contextlib.contextmanager
def swap(self, obj, attr, newvalue):
"""Swap an object's attribute value within the context of a 'with'
statement. The object can be anything that supports getattr and setattr,
such as class instances, modules, etc.
Example usage:
import math
with self.swap(math, 'sqrt', lambda x: 42):
print math.sqrt(16.0) # prints 42
print math.sqrt(16.0) # prints 4 as expected.
To mock class methods, pass the function to the classmethod decorator
first, for example:
import types
with self.swap(
SomePythonClass, 'some_classmethod',
classmethod(new_classmethod)):
NOTE: self.swap and other context managers that are created using
contextlib.contextmanager use generators that yield exactly once. This
means that you can only use them once after construction, otherwise,
the generator will immediately raise StopIteration, and contextlib will
raise a RuntimeError.
"""
original = getattr(obj, attr)
setattr(obj, attr, newvalue)
try:
yield
finally:
setattr(obj, attr, original)
@contextlib.contextmanager
def swap_to_always_return(self, obj, attr, value=None):
"""Swap obj.attr with a function that always returns the given value."""
def function_that_always_returns(*unused_args, **unused_kwargs):
"""Returns the input value."""
return value
with self.swap(obj, attr, function_that_always_returns):
yield
@contextlib.contextmanager
def swap_to_always_raise(self, obj, attr, error=Exception):
"""Swap obj.attr with a function that always raises the given error."""
def function_that_always_raises(*unused_args, **unused_kwargs):
"""Raises the input exception."""
raise error
with self.swap(obj, attr, function_that_always_raises):
yield
@contextlib.contextmanager
def swap_with_checks(
self, obj, attr, new_value, expected_args=None,
expected_kwargs=None, called=True):
"""Swap an object's function value within the context of a 'with'
statement. The object can be anything that supports getattr and setattr,
such as class instances, modules, etc.
Examples:
If you want to check subprocess.Popen is invoked twice like
`subprocess.Popen(['python'], shell=True)` and
`subprocess.Popen(['python2], shell=False), you can first define the
mock function, then the swap, and just run the target function in
context, as follows:
def mock_popen(command, shell):
return
popen_swap = self.swap_with_checks(
subprocess, 'Popen', mock_popen,
expected_args=[(['python'],), (['python2'],)],
expected_kwargs=[{'shell': True}, {'shell': False}])
with popen_swap:
function_that_invokes_popen()
Args:
obj: *. The Python object whose attribute you want to swap.
attr: str. The name of the function to be swapped.
new_value: function. The new function you want to use.
expected_args: None|list(tuple). The expected args that you want
this function to be invoked with. When its value is None, args
will not be checked. If the value type is list, the function
will check whether the called args is the first element in the
list. If matched, this tuple will be removed from the list.
expected_kwargs: None|list(dict). The expected keyword args you want
this function to be invoked with. Similar to expected_args.
called: bool. Whether the function is expected to be invoked. This
will always be checked.
Yields:
context. The context with function replaced.
"""
original = getattr(obj, attr)
# The actual error message will also include detail assert error message
# via the `self.longMessage` below.
msg = 'Expected checks failed when swapping out in %s.%s tests.' % (
obj.__name__, attr)
def wrapper(*args, **kwargs):
"""Wrapper function for the new value. This function will do the
check before the wrapped function is invoked. After the function
finished, the wrapper will update how many times this function is
invoked.
Args:
*args: list(*). The args passed into `attr` function.
**kwargs: dict. The key word args passed into `attr` function.
Returns:
*. Result of `new_value`.
"""
wrapper.called = True
if expected_args is not None:
self.assertEqual(args, expected_args[0], msg=msg)
expected_args.pop(0)
if expected_kwargs is not None:
self.assertEqual(kwargs, expected_kwargs[0], msg=msg)
expected_kwargs.pop(0)
result = new_value(*args, **kwargs)
return result
wrapper.called = False
setattr(obj, attr, wrapper)
error_occurred = False
try:
# This will show the detailed assert message.
self.longMessage = True
yield
except Exception:
error_occurred = True
# Raise issues thrown by the called function or assert error.
raise
finally:
setattr(obj, attr, original)
if not error_occurred:
self.assertEqual(wrapper.called, called, msg=msg)
self.assertFalse(expected_args, msg=msg)
self.assertFalse(expected_kwargs, msg=msg)
self.longMessage = False
def assertRaises(self, *args, **kwargs):
raise NotImplementedError(
'self.assertRaises should not be used in these tests. Please use '
'self.assertRaisesRegexp instead.')
def assertRaisesRegexp( # pylint: disable=keyword-arg-before-vararg
self, expected_exception, expected_regexp, callable_obj=None,
*args, **kwargs):
if not expected_regexp:
raise Exception(
'Please provide a sufficiently strong regexp string to '
'validate that the correct error is being raised.')
return super(TestBase, self).assertRaisesRegexp(
expected_exception, expected_regexp,
callable_obj=callable_obj, *args, **kwargs)
def assert_matches_regexps(self, items, regexps, full_match=False):
"""Asserts that each item matches the corresponding regexp.
If there are any missing or extra items that do not correspond to a
regexp element, then the assertion fails.
Args:
items: list(str). The string elements being matched.
regexps: list(str|RegexObject). The patterns that each item is
expected to match.
full_match: bool. Whether to require items to match exactly with the
corresponding pattern.
Raises:
AssertionError. At least one item does not match its corresponding
pattern, or the number of items does not match the number of
regexp patterns.
"""
get_match = re.match if full_match else re.search
differences = [
'~ [i=%d]:\t%r does not match: %r' % (i, item, regexp)
for i, (regexp, item) in enumerate(python_utils.ZIP(regexps, items))
if get_match(regexp, item, re.DOTALL) is None
]
if len(items) < len(regexps):
extra_regexps = regexps[len(items):]
differences.extend(
'- [i=%d]:\tmissing item expected to match: %r' % (i, regexp)
for i, regexp in enumerate(extra_regexps, start=len(items)))
if len(regexps) < len(items):
extra_items = items[len(regexps):]
differences.extend(
'+ [i=%d]:\textra item %r' % (i, item)
for i, item in enumerate(extra_items, start=len(regexps)))
if differences:
error_message = 'Lists differ:\n\t%s' % '\n\t'.join(differences)
raise AssertionError(error_message)
class AppEngineTestBase(TestBase):
"""Minimal base class for tests that need Google App Engine functionality.
This class is primarily designed for unit tests in core.platform, where we
write adapters around Oppia's third-party dependencies. Generally, our unit
tests depend on stub implementations of these adapters to protect them from
platform-specific behavior. Such stubs are installed in the
GenericTestBase.run() method.
Most of the unit tests in our code base do, and should, inherit from
`GenericTestBase` to stay platform-agnostic. The platform layer itself,
however, can _not_ mock out platform-specific behavior. Those unit tests
need to interact with a real implementation. This base class provides the
bare-minimum functionality and stubs necessary to do so.
"""
# Environment values that our tests depend on.
AUTH_DOMAIN = 'example.com'
HTTP_HOST = 'localhost'
SERVER_NAME = 'localhost'
SERVER_PORT = '8080'
DEFAULT_VERSION_HOSTNAME = '%s:%s' % (HTTP_HOST, SERVER_PORT)
def __init__(self, *args, **kwargs):
super(AppEngineTestBase, self).__init__(*args, **kwargs)
# Defined outside of setUp() because we access it from methods, but can
# only install it during the run() method. Defining it in __init__
# satisfies pylint's attribute-defined-outside-init warning.
self._platform_taskqueue_services_stub = TaskqueueServicesStub(self)
def setUp(self):
super(AppEngineTestBase, self).setUp()
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.setup_env(
overwrite=True,
auth_domain=self.AUTH_DOMAIN, http_host=self.HTTP_HOST,
server_name=self.SERVER_NAME, server_port=self.SERVER_PORT,
default_version_hostname=self.DEFAULT_VERSION_HOSTNAME)
# Google App Engine service stubs.
self.testbed.init_app_identity_stub()
self.testbed.init_blobstore_stub()
self.testbed.init_files_stub()
self.testbed.init_memcache_stub()
self.testbed.init_search_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_user_stub()
policy = (
datastore_services.make_instantaneous_global_consistency_policy())
self.testbed.init_datastore_v3_stub(consistency_policy=policy)
# The root path tells the testbed where to find the queue.yaml file.
self.testbed.init_taskqueue_stub(root_path=os.getcwd())
self._testbed_taskqueue_stub = (
self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME))
# Set up apps for testing.
self.testapp = webtest.TestApp(main.app)
self.taskqueue_testapp = webtest.TestApp(main_taskqueue.app)
self.mail_testapp = webtest.TestApp(main_mail.app)
def tearDown(self):
self.testbed.deactivate()
super(AppEngineTestBase, self).tearDown()
def run(self, result=None):
"""Run the test, collecting the result into the specified TestResult.
Reference URL:
https://docs.python.org/3/library/unittest.html#unittest.TestCase.run
AppEngineTestBase's override of run() wraps super().run() in "swap"
contexts which stub out the platform taskqueue services.
Args:
result: TestResult | None. Holds onto the results of each test. If
None, a temporary result object is created (by calling the
defaultTestResult() method) and used instead.
"""
platform_taskqueue_services_swap = self.swap(
platform_taskqueue_services, 'create_http_task',
self._platform_taskqueue_services_stub.create_http_task)
with platform_taskqueue_services_swap:
super(AppEngineTestBase, self).run(result=result)
def _get_all_queue_names(self):
"""Returns a list of all queue names."""
return [q['name'] for q in self._testbed_taskqueue_stub.GetQueues()]
def count_jobs_in_taskqueue(self, queue_name):
"""Returns the total number of tasks in a single queue if a queue name
is specified or the entire taskqueue if no queue name is specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
Returns:
int. The total number of tasks in a single queue or in the entire
taskqueue.
"""
return self._platform_taskqueue_services_stub.count_jobs_in_taskqueue(
queue_name=queue_name)
def process_and_flush_pending_tasks(self, queue_name=None):
"""Executes all of the tasks in a single queue if a queue name is
specified or all of the tasks in the taskqueue if no queue name is
specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
"""
self._platform_taskqueue_services_stub.process_and_flush_tasks(
queue_name=queue_name)
def get_pending_tasks(self, queue_name=None):
"""Returns a list of the tasks in a single queue if a queue name is
specified or a list of all of the tasks in the taskqueue if no queue
name is specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
Returns:
list(Task). List of tasks in a single queue or in the entire
taskqueue.
"""
return self._platform_taskqueue_services_stub.get_pending_tasks(
queue_name=queue_name)
def count_jobs_in_mapreduce_taskqueue(self, queue_name):
"""Counts the jobs in the given MapReduce taskqueue."""
return len(self.get_pending_mapreduce_tasks(queue_name=queue_name))
def get_pending_mapreduce_tasks(self, queue_name=None):
"""Returns the jobs in the given MapReduce taskqueue. If queue_name is
None, defaults to returning the jobs in all available queues.
"""
queue_names = None if queue_name is None else [queue_name]
return self._testbed_taskqueue_stub.get_filtered_tasks(
queue_names=queue_names)
def _execute_mapreduce_tasks(self, tasks):
"""Execute MapReduce queued tasks.
Args:
tasks: list(google.appengine.api.taskqueue.taskqueue.Task). The
queued tasks.
"""
for task in tasks:
if task.url == '/_ah/queue/deferred':
deferred.run(task.payload)
else:
# All other tasks will be for MapReduce or taskqueue.
params = task.payload or ''
headers = {
'Content-Length': python_utils.convert_to_bytes(len(params))
}
headers.update(
(key, python_utils.convert_to_bytes(val))
for key, val in task.headers.items())
app = (
self.taskqueue_testapp if task.url.startswith('/task') else
self.testapp)
response = app.post(
task.url, params=params, headers=headers,
expect_errors=True)
if response.status_code != 200:
raise RuntimeError('MapReduce task failed: %r' % task)
def process_and_flush_pending_mapreduce_tasks(self, queue_name=None):
"""Runs and flushes pending MapReduce tasks. If queue_name is None, does
so for all queues; otherwise, this only runs and flushes tasks for the
specified queue.
For more information on taskqueue_stub, see:
https://code.google.com/p/googleappengine/source/browse/trunk/python/google/appengine/api/taskqueue/taskqueue_stub.py
"""
queue_names = (
self._get_all_queue_names() if queue_name is None else [queue_name])
get_enqueued_tasks = lambda: list(
self._testbed_taskqueue_stub.get_filtered_tasks(
queue_names=queue_names))
# Loops until get_enqueued_tasks() returns an empty list.
for tasks in iter(get_enqueued_tasks, []):
for queue in queue_names:
self._testbed_taskqueue_stub.FlushQueue(queue)
self._execute_mapreduce_tasks(tasks)
def run_but_do_not_flush_pending_mapreduce_tasks(self):
""""Runs, but does not flush, the pending MapReduce tasks."""
queue_names = self._get_all_queue_names()
tasks = self._testbed_taskqueue_stub.get_filtered_tasks(
queue_names=queue_names)
for queue in queue_names:
self._testbed_taskqueue_stub.FlushQueue(queue)
self._execute_mapreduce_tasks(tasks)
class GenericTestBase(AppEngineTestBase):
"""Base test class with common/generic helper methods.
Unless a class is testing for "platform"-specific behavior (e.g., testing
third-party library code or database model implementations), always inherit
from this base class. Otherwise, inherit from unittest.TestCase (preferred)
or AppEngineTestBase if Google App Engine services/behavior is needed.
TODO(#12135): Split this enormous test base into smaller, focused pieces.
"""
# NOTE: For tests that do not/can not use the default super-admin, authors
# can override the following class-level constant.
AUTO_CREATE_DEFAULT_SUPERADMIN_USER = True
# This is the value that gets returned by default when
# app_identity.get_application_id() is called during tests.
EXPECTED_TEST_APP_ID = 'dummy-cloudsdk-project-id'
SUPER_ADMIN_EMAIL = 'tmpsuperadmin@example.com'
SUPER_ADMIN_USERNAME = 'tmpsuperadm1n'
# Dummy strings representing user attributes. Note that it is up to the
# individual test to actually register these users as editors, admins, etc.
ADMIN_EMAIL = 'admin@example.com'
# Usernames containing the string 'admin' are reserved, so we use 'adm'
# instead.
ADMIN_USERNAME = 'adm'
MODERATOR_EMAIL = 'moderator@example.com'
MODERATOR_USERNAME = 'moderator'
OWNER_EMAIL = 'owner@example.com'
OWNER_USERNAME = 'owner'
EDITOR_EMAIL = 'editor@example.com'
EDITOR_USERNAME = 'editor'
TOPIC_MANAGER_EMAIL = 'topicmanager@example.com'
TOPIC_MANAGER_USERNAME = 'topicmanager'
VOICE_ARTIST_EMAIL = 'voiceartist@example.com'
VOICE_ARTIST_USERNAME = 'voiceartist'
VIEWER_EMAIL = 'viewer@example.com'
VIEWER_USERNAME = 'viewer'
NEW_USER_EMAIL = 'new.user@example.com'
NEW_USER_USERNAME = 'newuser'
DEFAULT_END_STATE_NAME = 'End'
PSEUDONYMOUS_ID = 'pid_%s' % ('a' * 32)
VERSION_0_STATES_DICT = {
feconf.DEFAULT_INIT_STATE_NAME: {
'content': [{'type': 'text', 'value': ''}],
'param_changes': [],
'interaction': {
'customization_args': {},
'id': 'Continue',
'handlers': [{
'name': 'submit',
'rule_specs': [{
'dest': 'END',
'feedback': [],
'param_changes': [],
'definition': {'rule_type': 'default'},
}],
}],
},
},
}
VERSION_27_STATE_DICT = {
'content': {'content_id': 'content', 'html': ''},
'param_changes': [],
'content_ids_to_audio_translations': {
'content': {},
'default_outcome': {},
'hint_1': {},
'solution': {},
},
'written_translations': {
'translations_mapping': {
'content': {},
'default_outcome': {},
'hint_1': {},
'solution': {},
},
},
'interaction': {
'solution': {
'correct_answer': 'Solution',
'explanation': {
'content_id': 'solution',
'html': '<p>Solution explanation</p>',
},
'answer_is_exclusive': False,
},
'answer_groups': [],
'default_outcome': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': '',
},
'dest': None,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': True,
},
'customization_args': {
'rows': {'value': 1},
'placeholder': {'value': 'Enter text here'},
},
'confirmed_unclassified_answers': [],
'id': 'TextInput',
'hints': [{
'hint_content': {
'content_id': 'hint_1',
'html': '<p>Hint 1</p>',
},
}],
},
'classifier_model_id': None,
}
VERSION_21_STATE_DICT = {
'END': {
'classifier_model_id': None,
'content': {
'content_id': 'content',
'html': 'Congratulations, you have finished!',
},
'content_ids_to_audio_translations': {'content': {}},
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {
'recommendedExplorationIds': {'value': []},
},
'default_outcome': None,
'hints': [],
'id': 'EndExploration',
'solution': None,
},
'param_changes': [],
},
'Introduction': {
'classifier_model_id': None,
'content': {'content_id': 'content', 'html': ''},
'content_ids_to_audio_translations': {
'content': {},
'default_outcome': {},
'feedback_1': {},
},
'interaction': {
'answer_groups': [{
'outcome': {
'dest': 'END',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Correct!</p>',
},
'labelled_as_correct': False,
'missing_prerequisite_skill_id': None,
'param_changes': [],
'refresher_exploration_id': None,
},
'rule_specs': [{
'inputs': {'x': 'InputString'},
'rule_type': 'Equals',
}],
'tagged_misconception_id': None,
'training_data': ['answer1', 'answer2', 'answer3'],
}],
'confirmed_unclassified_answers': [],
'customization_args': {
'placeholder': {'value': ''},
'rows': {'value': 1},
},
'default_outcome': {
'dest': 'Introduction',
'feedback': {'content_id': 'default_outcome', 'html': ''},
'labelled_as_correct': False,
'missing_prerequisite_skill_id': None,
'param_changes': [],
'refresher_exploration_id': None,
},
'hints': [],
'id': 'TextInput',
'solution': None,
},
'param_changes': [],
},
}
VERSION_1_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math '
'raw_latex-with-value="&quot;+,-,-,+&quot;">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'prerequisite_skill_ids': [],
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_2_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math '
'raw_latex-with-value="&quot;+,-,-,+&quot;">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'prerequisite_skill_ids': [],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_3_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math '
'raw_latex-with-value="&quot;+,-,-,+&quot;">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'description': '',
'prerequisite_skill_ids': [],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_4_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math math_content-with-value="{'
'&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, '
'&quot;svg_filename&quot;: &quot;&quot;'
'}">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'description': '',
'prerequisite_skill_ids': [],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_1_SUBTOPIC_DICT = {
'skill_ids': ['skill_1'],
'id': 1,
'title': 'A subtitle',
}
# Dictionary-like data structures within sample YAML must be formatted
# alphabetically to match string equivalence with YAML generation tests. The
# indentations are also important, since it is used to define nesting (just
# like Python).
#
# If evaluating differences in YAML, conversion to dict form via
# utils.dict_from_yaml can isolate differences quickly.
SAMPLE_YAML_CONTENT = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
states:
%s:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: %s
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: null
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: New state
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: null
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: %d
tags: []
title: Title
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME, feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_STATE_SCHEMA_VERSION)
SAMPLE_UNTITLED_YAML_CONTENT = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
states:
%s:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: %s
feedback: []
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
fallbacks: []
id: null
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: New state
feedback: []
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
fallbacks: []
id: null
param_changes: []
states_schema_version: %d
tags: []
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.LAST_UNTITLED_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME, feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_STATE_SCHEMA_VERSION)
def run(self, result=None):
"""Run the test, collecting the result into the specified TestResult.
Reference URL:
https://docs.python.org/3/library/unittest.html#unittest.TestCase.run
GenericTestBase's override of run() wraps super().run() in swap
contexts to mock out the cache and taskqueue services.
Args:
result: TestResult | None. Holds onto the results of each test. If
None, a temporary result object is created (by calling the
defaultTestResult() method) and used instead.
"""
memory_cache_services_stub = MemoryCacheServicesStub()
memory_cache_services_stub.flush_cache()
es_stub = ElasticSearchStub()
es_stub.reset()
with contextlib2.ExitStack() as stack:
stack.callback(AuthServicesStub.install_stub(self))
stack.enter_context(self.swap(
elastic_search_services.ES.indices, 'create',
es_stub.mock_create_index))
stack.enter_context(self.swap(
elastic_search_services.ES, 'index',
es_stub.mock_index))
stack.enter_context(self.swap(
elastic_search_services.ES, 'exists',
es_stub.mock_exists))
stack.enter_context(self.swap(
elastic_search_services.ES, 'delete',
es_stub.mock_delete))
stack.enter_context(self.swap(
elastic_search_services.ES, 'delete_by_query',
es_stub.mock_delete_by_query))
stack.enter_context(self.swap(
elastic_search_services.ES, 'search',
es_stub.mock_search))
stack.enter_context(self.swap(
memory_cache_services, 'flush_cache',
memory_cache_services_stub.flush_cache))
stack.enter_context(self.swap(
memory_cache_services, 'get_multi',
memory_cache_services_stub.get_multi))
stack.enter_context(self.swap(
memory_cache_services, 'set_multi',
memory_cache_services_stub.set_multi))
stack.enter_context(self.swap(
memory_cache_services, 'get_memory_cache_stats',
memory_cache_services_stub.get_memory_cache_stats))
stack.enter_context(self.swap(
memory_cache_services, 'delete_multi',
memory_cache_services_stub.delete_multi))
super(GenericTestBase, self).run(result=result)
def setUp(self):
super(GenericTestBase, self).setUp()
if self.AUTO_CREATE_DEFAULT_SUPERADMIN_USER:
self.signup_superadmin_user()
def tearDown(self):
datastore_services.delete_multi(
datastore_services.query_everything().iter(keys_only=True))
super(GenericTestBase, self).tearDown()
def login(self, email, is_super_admin=False):
"""Sets the environment variables to simulate a login.
Args:
email: str. The email of the user who is to be logged in.
is_super_admin: bool. Whether the user is a super admin.
"""
self.testbed.setup_env(
overwrite=True,
user_email=email, user_id=self.get_auth_id_from_email(email),
user_is_admin=('1' if is_super_admin else '0'))
def logout(self):
"""Simulates a logout by resetting the environment variables."""
self.testbed.setup_env(
overwrite=True, user_email='', user_id='', user_is_admin='0')
@contextlib.contextmanager
def mock_datetime_utcnow(self, mocked_datetime):
"""Mocks response from datetime.datetime.utcnow method.
Example usage:
import datetime
mocked_datetime_utcnow = (
datetime.datetime.utcnow() - datetime.timedelta(days=1))
with self.mock_datetime_utcnow(mocked_datetime_utcnow):
print datetime.datetime.utcnow() # prints time reduced by 1 day
print datetime.datetime.utcnow() # prints current time.
Args:
mocked_datetime: datetime.datetime. The datetime which will be used
instead of the current UTC datetime.
Yields:
None. Empty yield statement.
"""
with datastore_services.mock_datetime_for_datastore(mocked_datetime):
yield
@contextlib.contextmanager
def login_context(self, email, is_super_admin=False):
"""Log in with the given email under the context of a 'with' statement.
Args:
email: str. An email associated with a user account.
is_super_admin: bool. Whether the user is a super admin.
Yields:
str. The id of the user associated with the given email, who is now
'logged in'.
"""
self.login(email, is_super_admin=is_super_admin)
try:
yield self.get_user_id_from_email(email)
finally:
self.logout()
@contextlib.contextmanager
def super_admin_context(self):
"""Log in as a global admin under the context of a 'with' statement.
Yields:
str. The id of the user associated with the given email, who is now
'logged in'.
"""
email = self.SUPER_ADMIN_EMAIL
with self.login_context(email, is_super_admin=True) as user_id:
yield user_id
def signup(self, email, username):
"""Complete the signup process for the user with the given username.
Args:
email: str. Email of the given user.
username: str. Username of the given user.
"""
user_services.create_new_user(self.get_auth_id_from_email(email), email)
with self.login_context(email), requests_mock.Mocker() as m:
# We mock out all HTTP requests while trying to signup to avoid
# calling out to real backend services.
m.request(requests_mock.ANY, requests_mock.ANY)
response = self.get_html_response(feconf.SIGNUP_URL)
self.assertEqual(response.status_int, 200)
response = self.testapp.post(feconf.SIGNUP_DATA_URL, params={
'csrf_token': self.get_new_csrf_token(),
'payload': json.dumps(
{'username': username, 'agreed_to_terms': True}),
})
self.assertEqual(response.status_int, 200)
def signup_superadmin_user(self):
"""Signs up a superadmin user. Must be called at the end of setUp()."""
self.signup(self.SUPER_ADMIN_EMAIL, self.SUPER_ADMIN_USERNAME)
def set_config_property(self, config_obj, new_config_value):
"""Sets a given configuration object's value to the new value specified
using a POST request.
"""
with self.super_admin_context():
self.post_json('/adminhandler', {
'action': 'save_config_properties',
'new_config_property_values': {
config_obj.name: new_config_value,
},
}, csrf_token=self.get_new_csrf_token())
def set_user_role(self, username, user_role):
"""Sets the given role for this user.
Args:
username: str. Username of the given user.
user_role: str. Role of the given user.
"""
with self.super_admin_context():
self.post_json('/adminrolehandler', {
'username': username,
'role': user_role,
}, csrf_token=self.get_new_csrf_token())
def set_admins(self, admin_usernames):
"""Sets role of given users as ADMIN.
Args:
admin_usernames: list(str). List of usernames.
"""
for name in admin_usernames:
self.set_user_role(name, feconf.ROLE_ID_ADMIN)
def set_topic_managers(self, topic_manager_usernames):
"""Sets role of given users as TOPIC_MANAGER.
Args:
topic_manager_usernames: list(str). List of usernames.
"""
for name in topic_manager_usernames:
self.set_user_role(name, feconf.ROLE_ID_TOPIC_MANAGER)
def set_moderators(self, moderator_usernames):
"""Sets role of given users as MODERATOR.
Args:
moderator_usernames: list(str). List of usernames.
"""
for name in moderator_usernames:
self.set_user_role(name, feconf.ROLE_ID_MODERATOR)
def set_banned_users(self, banned_usernames):
"""Sets role of given users as BANNED_USER.
Args:
banned_usernames: list(str). List of usernames.
"""
for name in banned_usernames:
self.set_user_role(name, feconf.ROLE_ID_BANNED_USER)
def set_collection_editors(self, collection_editor_usernames):
"""Sets role of given users as COLLECTION_EDITOR.
Args:
collection_editor_usernames: list(str). List of usernames.
"""
for name in collection_editor_usernames:
self.set_user_role(name, feconf.ROLE_ID_COLLECTION_EDITOR)
def get_user_id_from_email(self, email):
"""Gets the user ID corresponding to the given email.
Args:
email: str. A valid email stored in the App Engine database.
Returns:
str|None. ID of the user possessing the given email, or None if
the user does not exist.
"""
user_settings = user_services.get_user_settings_by_auth_id(
self.get_auth_id_from_email(email))
return user_settings and user_settings.user_id
@classmethod
def get_auth_id_from_email(cls, email):
"""Returns a mock auth ID corresponding to the given email.
This method can use any algorithm to produce results as long as, during
the runtime of each test case/method, it is:
1. Pure (same input always returns the same output).
2. One-to-one (no two distinct inputs return the same output).
3. An integer byte-string (integers are always valid in auth IDs).
Args:
email: str. The email address of the user.
Returns:
bytes. The mock auth ID of a user possessing the given email.
"""
# Although the hash function doesn't guarantee a one-to-one mapping, in
# practice it is sufficient for our tests. We make it a positive integer
# because those are always valid auth IDs.
return python_utils.convert_to_bytes(abs(hash(email)))
def _get_response(
self, url, expected_content_type, params=None,
expected_status_int=200):
"""Get a response, transformed to a Python object.
Args:
url: str. The URL to fetch the response.
expected_content_type: str. The content type to expect.
params: dict. A dictionary that will be encoded into a query string.
expected_status_int: int. The integer status code to expect. Will be
200 if not specified.
Returns:
webtest.TestResponse. The test response.
"""
if params is not None:
self.assertIsInstance(params, dict)
expect_errors = expected_status_int >= 400
# This swap is required to ensure that the templates are fetched from
# source directory instead of webpack_bundles since webpack_bundles is
# only produced after webpack compilation which is not performed during
# backend tests.
with self.swap(base, 'load_template', mock_load_template):
response = self.testapp.get(
url, params=params, expect_errors=expect_errors,
status=expected_status_int)
if expect_errors:
self.assertTrue(response.status_int >= 400)
else:
self.assertTrue(200 <= response.status_int < 400)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(response.status_int, expected_status_int)
self.assertEqual(response.content_type, expected_content_type)
return response
def get_html_response(self, url, params=None, expected_status_int=200):
"""Get a HTML response, transformed to a Python object.
Args:
url: str. The URL to fetch the response.
params: dict. A dictionary that will be encoded into a query string.
expected_status_int: int. The integer status code to expect. Will
be 200 if not specified.
Returns:
webtest.TestResponse. The test response.
"""
return self._get_response(
url, 'text/html', params=params,
expected_status_int=expected_status_int)
def get_custom_response(
self, url, expected_content_type, params=None,
expected_status_int=200):
"""Get a response other than HTML or JSON as a Python object.
Args:
url: str. The URL to fetch the response.
expected_content_type: str. The content type to expect.
params: dict. A dictionary that will be encoded into a query string.
expected_status_int: int. The integer status code to expect. Will be
200 if not specified.
Returns:
webtest.TestResponse. The test response.
"""
self.assertNotIn(
expected_content_type, ['text/html', 'application/json'])
return self._get_response(
url, expected_content_type, params=params,
expected_status_int=expected_status_int)
def get_response_without_checking_for_errors(
self, url, expected_status_int_list, params=None):
"""Get a response, transformed to a Python object and checks for a list
of status codes.
Args:
url: str. The URL to fetch the response.
expected_status_int_list: list(int). A list of integer status code
to expect.
params: dict. A dictionary that will be encoded into a query string.
Returns:
webtest.TestResponse. The test response.
"""
if params is not None:
self.assertIsInstance(
params, dict,
msg='Expected params to be a dict, received %s' % params)
# This swap is required to ensure that the templates are fetched from
# source directory instead of webpack_bundles since webpack_bundles is
# only produced after webpack compilation which is not performed during
# backend tests.
with self.swap(base, 'load_template', mock_load_template):
response = self.testapp.get(url, params=params, expect_errors=True)
self.assertIn(response.status_int, expected_status_int_list)
return response
def _parse_json_response(self, json_response, expect_errors):
"""Convert a JSON server response to an object (such as a dict)."""
if expect_errors:
self.assertTrue(json_response.status_int >= 400)
else:
self.assertTrue(200 <= json_response.status_int < 400)
self.assertEqual(json_response.content_type, 'application/json')
self.assertTrue(json_response.body.startswith(feconf.XSSI_PREFIX))
return json.loads(json_response.body[len(feconf.XSSI_PREFIX):])
def get_json(self, url, params=None, expected_status_int=200):
"""Get a JSON response, transformed to a Python object."""
if params is not None:
self.assertIsInstance(params, dict)
expect_errors = expected_status_int >= 400
json_response = self.testapp.get(
url, params=params, expect_errors=expect_errors,
status=expected_status_int)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def post_json(
self, url, payload, csrf_token=None, expected_status_int=200,
upload_files=None):
"""Post an object to the server by JSON; return the received object."""
data = {'payload': json.dumps(payload)}
if csrf_token:
data['csrf_token'] = csrf_token
expect_errors = expected_status_int >= 400
json_response = self._send_post_request(
self.testapp, url, data, expect_errors,
expected_status_int=expected_status_int, upload_files=upload_files)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def delete_json(self, url, params='', expected_status_int=200):
"""Delete object on the server using a JSON call."""
if params:
self.assertIsInstance(
params, dict,
msg='Expected params to be a dict, received %s' % params)
expect_errors = expected_status_int >= 400
json_response = self.testapp.delete(
url, params=params, expect_errors=expect_errors,
status=expected_status_int)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def _send_post_request(
self, app, url, data, expect_errors, expected_status_int=200,
upload_files=None, headers=None):
"""Sends a post request with the data provided to the url specified.
Args:
app: TestApp. The WSGI application which receives the request and
produces response.
url: str. The URL to send the POST request to.
data: *. To be put in the body of the request. If params is an
iterator, it will be urlencoded. If it is a string, it will not
be encoded, but placed in the body directly. Can be a
collections.OrderedDict with webtest.forms.Upload fields
included.
expect_errors: bool. Whether errors are expected.
expected_status_int: int. The expected status code.
upload_files: list(tuple). List of
(fieldname, filename, file_content) tuples. Can also provide
just (fieldname, filename) to have the file contents will be
read from disk.
headers: dict(str, *). Extra headers to send.
Returns:
webtest.TestResponse. The response of the POST request.
"""
# Convert the files to bytes.
if upload_files is not None:
upload_files = tuple(
tuple(python_utils.convert_to_bytes(f) for f in upload_file)
for upload_file in upload_files)
return app.post(
url, params=data, headers=headers, status=expected_status_int,
upload_files=upload_files, expect_errors=expect_errors)
def post_email(
self, recipient_email, sender_email, subject, body, html_body=None,
expect_errors=False, expected_status_int=200):
"""Post an email from the sender to the recipient.
Args:
recipient_email: str. The email of the recipient.
sender_email: str. The email of the sender.
subject: str. The subject of the email.
body: str. The body of the email.
html_body: str. The HTML body of the email.
expect_errors: bool. Whether errors are expected.
expected_status_int: int. The expected status code of the JSON
response.
Returns:
json. A JSON response generated by _send_post_request function.
"""
email = mail.EmailMessage(
sender=sender_email, to=recipient_email, subject=subject, body=body)
if html_body is not None:
email.html = html_body
mime_email = email.to_mime_message()
headers = {
'Content-Type': mime_email.get_content_type(),
}
data = mime_email.as_string()
incoming_email_url = '/_ah/mail/%s' % recipient_email
return self._send_post_request(
self.mail_testapp, incoming_email_url, data, expect_errors,
headers=headers, expected_status_int=expected_status_int)
def post_task(
self, url, payload, headers, csrf_token=None, expect_errors=False,
expected_status_int=200):
"""Posts an object to the server by JSON with the specific headers
specified; return the received object.
"""
if csrf_token:
payload['csrf_token'] = csrf_token
return self.taskqueue_testapp.post(
url, params=json.dumps(payload), headers=headers,
status=expected_status_int, expect_errors=expect_errors,
content_type='application/json')
def put_json(self, url, payload, csrf_token=None, expected_status_int=200):
"""PUT an object to the server with JSON and return the response."""
params = {'payload': json.dumps(payload)}
if csrf_token:
params['csrf_token'] = csrf_token
expect_errors = expected_status_int >= 400
json_response = self.testapp.put(
url, params=params, expect_errors=expect_errors)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def get_new_csrf_token(self):
"""Generates CSRF token for test."""
response = self.get_json('/csrfhandler')
return response['token']
def save_new_default_exploration(
self, exploration_id, owner_id, title='A title'):
"""Saves a new default exploration written by owner_id.
Args:
exploration_id: str. The id of the new validated exploration.
owner_id: str. The user_id of the creator of the exploration.
title: str. The title of the exploration.
Returns:
Exploration. The exploration domain object.
"""
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title=title, category='Algebra')
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def set_interaction_for_state(self, state, interaction_id):
"""Sets the interaction_id, sets the fully populated default interaction
customization arguments, and increments next_content_id_index as needed.
Args:
state: State. The state domain object to set the interaction for.
interaction_id: str. The interaction id to set. Also sets the
default customization args for the given interaction id.
"""
# We wrap next_content_id_index in a dict so that modifying it in the
# inner function modifies the value.
next_content_id_index_dict = {'value': state.next_content_id_index}
def traverse_schema_and_assign_content_ids(value, schema, contentId):
"""Generates content_id from recursively traversing the schema, and
assigning to the current value.
Args:
value: *. The current traversed value in customization
arguments.
schema: dict. The current traversed schema.
contentId: str. The content_id generated so far.
"""
is_subtitled_html_spec = (
schema['type'] == schema_utils.SCHEMA_TYPE_CUSTOM and
schema['obj_type'] ==
schema_utils.SCHEMA_OBJ_TYPE_SUBTITLED_HTML)
is_subtitled_unicode_spec = (
schema['type'] == schema_utils.SCHEMA_TYPE_CUSTOM and
schema['obj_type'] ==
schema_utils.SCHEMA_OBJ_TYPE_SUBTITLED_UNICODE)
if is_subtitled_html_spec or is_subtitled_unicode_spec:
value['content_id'] = '%s_%i' % (
contentId, next_content_id_index_dict['value'])
next_content_id_index_dict['value'] += 1
elif schema['type'] == schema_utils.SCHEMA_TYPE_LIST:
for x in value:
traverse_schema_and_assign_content_ids(
x, schema['items'], contentId)
elif schema['type'] == schema_utils.SCHEMA_TYPE_DICT:
for schema_property in schema['properties']:
traverse_schema_and_assign_content_ids(
x[schema_property.name],
schema_property['schema'],
'%s_%s' % (contentId, schema_property.name))
interaction = (
interaction_registry.Registry.get_interaction_by_id(interaction_id))
ca_specs = interaction.customization_arg_specs
customization_args = {}
for ca_spec in ca_specs:
ca_name = ca_spec.name
ca_value = ca_spec.default_value
traverse_schema_and_assign_content_ids(
ca_value, ca_spec.schema, 'ca_%s' % ca_name)
customization_args[ca_name] = {'value': ca_value}
state.update_interaction_id(interaction_id)
state.update_interaction_customization_args(customization_args)
state.update_next_content_id_index(next_content_id_index_dict['value'])
def save_new_valid_exploration(
self, exploration_id, owner_id, title='A title',
category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE, end_state_name=None,
interaction_id='TextInput', correctness_feedback_enabled=False):
"""Saves a new strictly-validated exploration.
Args:
exploration_id: str. The id of the new validated exploration.
owner_id: str. The user_id of the creator of the exploration.
title: str. The title of the exploration.
category: str. The category this exploration belongs to.
objective: str. The objective of this exploration.
language_code: str. The language_code of this exploration.
end_state_name: str. The name of the end state for the exploration.
interaction_id: str. The id of the interaction.
correctness_feedback_enabled: bool. Whether correctness feedback is
enabled for the exploration.
Returns:
Exploration. The exploration domain object.
"""
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title=title, category=category,
language_code=language_code)
self.set_interaction_for_state(
exploration.states[exploration.init_state_name], interaction_id)
exploration.objective = objective
exploration.correctness_feedback_enabled = correctness_feedback_enabled
# If an end state name is provided, add terminal node with that name.
if end_state_name is not None:
exploration.add_states([end_state_name])
end_state = exploration.states[end_state_name]
self.set_interaction_for_state(end_state, 'EndExploration')
end_state.update_interaction_default_outcome(None)
# Link first state to ending state (to maintain validity).
init_state = exploration.states[exploration.init_state_name]
init_interaction = init_state.interaction
init_interaction.default_outcome.dest = end_state_name
if correctness_feedback_enabled:
init_interaction.default_outcome.labelled_as_correct = True
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def save_new_linear_exp_with_state_names_and_interactions(
self, exploration_id, owner_id, state_names, interaction_ids,
title='A title', category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new strictly-validated exploration with a sequence of states.
Args:
exploration_id: str. The id of the new validated exploration.
owner_id: str. The user_id of the creator of the exploration.
state_names: list(str). The names of states to be linked
sequentially in the exploration. Must be a non-empty list and
contain no duplicates.
interaction_ids: list(str). The names of the interaction ids to be
assigned to each state. Values will be cycled, so it doesn't
need to be the same size as state_names, but it must be
non-empty.
title: str. The title of the exploration.
category: str. The category this exploration belongs to.
objective: str. The objective of this exploration.
language_code: str. The language_code of this exploration.
Returns:
Exploration. The exploration domain object.
"""
if not state_names:
raise ValueError('must provide at least one state name')
if not interaction_ids:
raise ValueError('must provide at least one interaction type')
interaction_ids = itertools.cycle(interaction_ids)
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title=title, init_state_name=state_names[0],
category=category, objective=objective, language_code=language_code)
exploration.add_states(state_names[1:])
for from_state_name, dest_state_name in (
python_utils.ZIP(state_names[:-1], state_names[1:])):
from_state = exploration.states[from_state_name]
self.set_interaction_for_state(
from_state, python_utils.NEXT(interaction_ids))
from_state.interaction.default_outcome.dest = dest_state_name
end_state = exploration.states[state_names[-1]]
self.set_interaction_for_state(end_state, 'EndExploration')
end_state.update_interaction_default_outcome(None)
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def save_new_exp_with_states_schema_v0(self, exp_id, user_id, title):
"""Saves a new default exploration with a default version 0 states dict.
This function should only be used for creating explorations in tests
involving migration of datastore explorations that use an old states
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating explorations. This is
because the latter approach would result in an exploration with the
*current* states schema version.
Args:
exp_id: str. The exploration ID.
user_id: str. The user_id of the creator.
title: str. The title of the exploration.
"""
exp_model = exp_models.ExplorationModel(
id=exp_id, category='category', title=title,
objective='Old objective', language_code='en', tags=[], blurb='',
author_notes='', states_schema_version=0,
init_state_name=feconf.DEFAULT_INIT_STATE_NAME,
states=self.VERSION_0_STATES_DICT, param_specs={}, param_changes=[])
rights_manager.create_new_exploration_rights(exp_id, user_id)
commit_message = 'New exploration created with title \'%s\'.' % title
exp_model.commit(user_id, commit_message, [{
'cmd': 'create_new',
'title': 'title',
'category': 'category',
}])
exp_rights = exp_models.ExplorationRightsModel.get_by_id(exp_id)
exp_summary_model = exp_models.ExpSummaryModel(
id=exp_id, title=title, category='category',
objective='Old objective', language_code='en', tags=[],
ratings=feconf.get_empty_ratings(),
scaled_average_rating=feconf.EMPTY_SCALED_AVERAGE_RATING,
status=exp_rights.status,
community_owned=exp_rights.community_owned,
owner_ids=exp_rights.owner_ids, contributor_ids=[],
contributors_summary={})
exp_summary_model.put()
# Create an ExplorationIssues model to match the behavior of creating
# new explorations.
stats_services.create_exp_issues_for_new_exploration(exp_id, 1)
def save_new_exp_with_custom_states_schema_version(
self, exp_id, user_id, states_dict, version):
"""Saves a new default exploration with the given version of state dict.
This function should only be used for creating explorations in tests
involving migration of datastore explorations that use an old states
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating explorations. This is
because the latter approach would result in an exploration with the
*current* states schema version.
Args:
exp_id: str. The exploration ID.
user_id: str. The user_id of the creator.
states_dict: dict. The dict representation of all the states.
version: int. Custom states schema version.
"""
exp_model = exp_models.ExplorationModel(
id=exp_id, category='category', title='title',
objective='Old objective', language_code='en', tags=[], blurb='',
author_notes='', states_schema_version=version,
init_state_name=feconf.DEFAULT_INIT_STATE_NAME, states=states_dict,
param_specs={}, param_changes=[])
rights_manager.create_new_exploration_rights(exp_id, user_id)
commit_message = 'New exploration created with title \'title\'.'
exp_model.commit(user_id, commit_message, [{
'cmd': 'create_new',
'title': 'title',
'category': 'category',
}])
exp_rights = exp_models.ExplorationRightsModel.get_by_id(exp_id)
exp_summary_model = exp_models.ExpSummaryModel(
id=exp_id, title='title', category='category',
objective='Old objective', language_code='en', tags=[],
ratings=feconf.get_empty_ratings(),
scaled_average_rating=feconf.EMPTY_SCALED_AVERAGE_RATING,
status=exp_rights.status,
community_owned=exp_rights.community_owned,
owner_ids=exp_rights.owner_ids, contributor_ids=[],
contributors_summary={})
exp_summary_model.put()
def save_new_exp_with_states_schema_v21(self, exp_id, user_id, title):
"""Saves a new default exploration with a default version 21 states
dictionary. Version 21 is where training data of exploration is stored
with the states dict.
This function should only be used for creating explorations in tests
involving migration of datastore explorations that use an old states
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating explorations. This is
because the latter approach would result in an exploration with the
*current* states schema version.
Args:
exp_id: str. The exploration ID.
user_id: str. The user_id of the creator.
title: str. The title of the exploration.
"""
exp_model = exp_models.ExplorationModel(
id=exp_id, category='category', title=title,
objective='Old objective', language_code='en', tags=[], blurb='',
author_notes='', states_schema_version=21,
init_state_name=feconf.DEFAULT_INIT_STATE_NAME,
states=self.VERSION_21_STATE_DICT, param_specs={}, param_changes=[])
rights_manager.create_new_exploration_rights(exp_id, user_id)
commit_message = 'New exploration created with title \'%s\'.' % title
exp_model.commit(user_id, commit_message, [{
'cmd': 'create_new',
'title': 'title',
'category': 'category',
}])
exp_rights = exp_models.ExplorationRightsModel.get_by_id(exp_id)
exp_summary_model = exp_models.ExpSummaryModel(
id=exp_id, title=title, category='category',
objective='Old objective', language_code='en', tags=[],
ratings=feconf.get_empty_ratings(),
scaled_average_rating=feconf.EMPTY_SCALED_AVERAGE_RATING,
status=exp_rights.status,
community_owned=exp_rights.community_owned,
owner_ids=exp_rights.owner_ids, contributor_ids=[],
contributors_summary={})
exp_summary_model.put()
def publish_exploration(self, owner_id, exploration_id):
"""Publish the exploration with the given exploration_id.
Args:
owner_id: str. The user_id of the owner of the exploration.
exploration_id: str. The ID of the new exploration.
"""
committer = user_services.UserActionsInfo(owner_id)
rights_manager.publish_exploration(committer, exploration_id)
def save_new_default_collection(
self, collection_id, owner_id, title='A title',
category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new default collection written by owner_id.
Args:
collection_id: str. The id of the new default collection.
owner_id: str. The user_id of the creator of the collection.
title: str. The title of the collection.
category: str. The category this collection belongs to.
objective: str. The objective of this collection.
language_code: str. The language_code of this collection.
Returns:
Collection. The collection domain object.
"""
collection = collection_domain.Collection.create_default_collection(
collection_id, title=title, category=category, objective=objective,
language_code=language_code)
collection_services.save_new_collection(owner_id, collection)
return collection
def save_new_valid_collection(
self, collection_id, owner_id, title='A title',
category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE,
exploration_id='an_exploration_id',
end_state_name=DEFAULT_END_STATE_NAME):
"""Creates an Oppia collection and adds a node saving the exploration
details.
Args:
collection_id: str. ID for the collection to be created.
owner_id: str. The user_id of the creator of the collection.
title: str. Title for the collection.
category: str. The category of the exploration.
objective: str. Objective for the exploration.
language_code: str. The language code for the exploration.
exploration_id: str. The exploration_id for the Oppia exploration.
end_state_name: str. The name of the end state for the exploration.
Returns:
Collection. A newly-created collection containing the corresponding
exploration details.
"""
collection = collection_domain.Collection.create_default_collection(
collection_id, title=title, category=category, objective=objective,
language_code=language_code)
# Check whether exploration with given exploration_id exists or not.
exploration = (
exp_fetchers.get_exploration_by_id(exploration_id, strict=False))
if exploration is None:
exploration = self.save_new_valid_exploration(
exploration_id, owner_id, title=title, category=category,
objective=objective, end_state_name=end_state_name)
collection.add_node(exploration.id)
collection_services.save_new_collection(owner_id, collection)
return collection
def publish_collection(self, owner_id, collection_id):
"""Publish the collection with the given collection_id.
Args:
owner_id: str. The user_id of the owner of the collection.
collection_id: str. ID of the collection to be published.
"""
committer = user_services.UserActionsInfo(owner_id)
rights_manager.publish_collection(committer, collection_id)
def save_new_story(
self, story_id, owner_id, corresponding_topic_id,
title='Title', description='Description', notes='Notes',
language_code=constants.DEFAULT_LANGUAGE_CODE,
url_fragment='title', meta_tag_content='story meta tag content'):
"""Creates an Oppia Story and saves it.
NOTE: Callers are responsible for ensuring that the
'corresponding_topic_id' provided is valid, unless a test explicitly
requires it to be invalid.
Args:
story_id: str. ID for the story to be created.
owner_id: str. The user_id of the creator of the story.
title: str. The title of the story.
description: str. The high level description of the story.
notes: str. A set of notes, that describe the characters,
main storyline, and setting.
corresponding_topic_id: str. The id of the topic to which the story
belongs.
language_code: str. The ISO 639-1 code for the language this story
is written in.
url_fragment: str. The url fragment of the story.
meta_tag_content: str. The meta tag content of the story.
Returns:
Story. A newly-created story.
"""
story = story_domain.Story.create_default_story(
story_id, title, description, corresponding_topic_id, url_fragment)
story.title = title
story.description = description
story.notes = notes
story.language_code = language_code
story.url_fragment = url_fragment
story.meta_tag_content = meta_tag_content
story_services.save_new_story(owner_id, story)
return story
def save_new_story_with_story_contents_schema_v1(
self, story_id, thumbnail_filename, thumbnail_bg_color,
owner_id, title, description, notes, corresponding_topic_id,
language_code=constants.DEFAULT_LANGUAGE_CODE,
url_fragment='story-frag',
meta_tag_content='story meta tag content'):
"""Saves a new story with a default version 1 story contents data dict.
This function should only be used for creating stories in tests
involving migration of datastore stories that use an old story contents
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating stories. This is because
the latter approach would result in a story with the *current* story
contents schema version.
Args:
story_id: str. ID for the story to be created.
thumbnail_filename: str|None. Thumbnail filename for the story.
thumbnail_bg_color: str|None. Thumbnail background color for the
story.
owner_id: str. The user_id of the creator of the story.
title: str. The title of the story.
description: str. The high level description of the story.
notes: str. A set of notes, that describe the characters, main
storyline, and setting.
corresponding_topic_id: str. The id of the topic to which the story
belongs.
language_code: str. The ISO 639-1 code for the language this story
is written in.
url_fragment: str. The URL fragment for the story.
meta_tag_content: str. The meta tag content of the story.
"""
story_model = story_models.StoryModel(
id=story_id, thumbnail_filename=thumbnail_filename,
thumbnail_bg_color=thumbnail_bg_color, description=description,
title=title, language_code=language_code,
story_contents_schema_version=1, notes=notes,
corresponding_topic_id=corresponding_topic_id,
story_contents=self.VERSION_1_STORY_CONTENTS_DICT,
url_fragment=url_fragment, meta_tag_content=meta_tag_content)
commit_message = 'New story created with title \'%s\'.' % title
story_model.commit(
owner_id, commit_message,
[{'cmd': story_domain.CMD_CREATE_NEW, 'title': title}])
def save_new_subtopic(self, subtopic_id, owner_id, topic_id):
"""Creates an Oppia subtopic and saves it.
Args:
subtopic_id: str. ID for the subtopic to be created.
owner_id: str. The user_id of the creator of the topic.
topic_id: str. ID for the topic that the subtopic belongs to.
Returns:
SubtopicPage. A newly-created subtopic.
"""
subtopic_page = (
subtopic_page_domain.SubtopicPage.create_default_subtopic_page(
subtopic_id, topic_id))
subtopic_changes = [
subtopic_page_domain.SubtopicPageChange({
'cmd': subtopic_page_domain.CMD_CREATE_NEW,
'topic_id': topic_id,
'subtopic_id': subtopic_id,
})
]
subtopic_page_services.save_subtopic_page(
owner_id, subtopic_page, 'Create new subtopic', subtopic_changes)
return subtopic_page
def save_new_topic(
self, topic_id, owner_id, name='topic', abbreviated_name='topic',
url_fragment='topic',
thumbnail_filename='topic.svg',
thumbnail_bg_color=(
constants.ALLOWED_THUMBNAIL_BG_COLORS['topic'][0]),
description='description', canonical_story_ids=None,
additional_story_ids=None, uncategorized_skill_ids=None,
subtopics=None, next_subtopic_id=0,
language_code=constants.DEFAULT_LANGUAGE_CODE,
meta_tag_content='topic meta tag content',
practice_tab_is_displayed=False,
page_title_fragment_for_web='topic page title'):
"""Creates an Oppia Topic and saves it.
Args:
topic_id: str. ID for the topic to be created.
owner_id: str. The user_id of the creator of the topic.
name: str. The name of the topic.
abbreviated_name: str. The abbreviated name of the topic.
url_fragment: str. The url fragment of the topic.
thumbnail_filename: str|None. The thumbnail filename of the topic.
thumbnail_bg_color: str|None. The thumbnail background color of the
topic.
description: str. The description of the topic.
canonical_story_ids: list(str). The list of ids of canonical stories
that are part of the topic.
additional_story_ids: list(str). The list of ids of additional
stories that are part of the topic.
uncategorized_skill_ids: list(str). The list of ids of skills that
are not part of any subtopic.
subtopics: list(Subtopic). The different subtopics that are part of
this topic.
next_subtopic_id: int. The id for the next subtopic.
language_code: str. The ISO 639-1 code for the language this topic
is written in.
meta_tag_content: str. The meta tag content for the topic.
practice_tab_is_displayed: bool. Whether the practice tab should be
displayed.
page_title_fragment_for_web: str. The page title fragment for the
topic.
Returns:
Topic. A newly-created topic.
"""
canonical_story_references = [
topic_domain.StoryReference.create_default_story_reference(story_id)
for story_id in (canonical_story_ids or [])
]
additional_story_references = [
topic_domain.StoryReference.create_default_story_reference(story_id)
for story_id in (additional_story_ids or [])
]
uncategorized_skill_ids = uncategorized_skill_ids or []
subtopics = subtopics or []
topic = topic_domain.Topic(
topic_id, name, abbreviated_name, url_fragment, thumbnail_filename,
thumbnail_bg_color, description, canonical_story_references,
additional_story_references, uncategorized_skill_ids, subtopics,
feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION, next_subtopic_id,
language_code, 0, feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION,
meta_tag_content, practice_tab_is_displayed,
page_title_fragment_for_web)
topic_services.save_new_topic(owner_id, topic)
return topic
def save_new_topic_with_subtopic_schema_v1(
self, topic_id, owner_id, name, abbreviated_name, url_fragment,
canonical_name, description, thumbnail_filename, thumbnail_bg_color,
canonical_story_references, additional_story_references,
uncategorized_skill_ids, next_subtopic_id,
language_code=constants.DEFAULT_LANGUAGE_CODE,
meta_tag_content='topic meta tag content',
practice_tab_is_displayed=False,
page_title_fragment_for_web='topic page title'):
"""Saves a new topic with a default version 1 subtopic data dict.
This function should only be used for creating topics in tests involving
migration of datastore topics that use an old subtopic schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating topics. This is because
the latter approach would result in a topic with the *current* subtopic
schema version.
Args:
topic_id: str. ID for the topic to be created.
owner_id: str. The user_id of the creator of the topic.
name: str. The name of the topic.
abbreviated_name: str. The abbreviated name of the topic.
url_fragment: str. The url fragment of the topic.
canonical_name: str. The canonical name (lowercase) of the topic.
description: str. The description of the topic.
thumbnail_filename: str. The thumbnail file name of the topic.
thumbnail_bg_color: str. The thumbnail background color of the
topic.
canonical_story_references: list(StoryReference). A set of story
reference objects representing the canonical stories that are
part of this topic.
additional_story_references: list(StoryReference). A set of story
reference object representing the additional stories that are
part of this topic.
uncategorized_skill_ids: list(str). The list of ids of skills that
are not part of any subtopic.
next_subtopic_id: int. The id for the next subtopic.
language_code: str. The ISO 639-1 code for the language this topic
is written in.
meta_tag_content: str. The meta tag content for the topic.
practice_tab_is_displayed: bool. Whether the practice tab should be
displayed.
page_title_fragment_for_web: str. The page title fragment for the
topic.
"""
topic_rights_model = topic_models.TopicRightsModel(
id=topic_id, manager_ids=[], topic_is_published=True)
topic_model = topic_models.TopicModel(
id=topic_id, name=name, abbreviated_name=abbreviated_name,
url_fragment=url_fragment, thumbnail_filename=thumbnail_filename,
thumbnail_bg_color=thumbnail_bg_color,
canonical_name=canonical_name, description=description,
language_code=language_code,
canonical_story_references=canonical_story_references,
additional_story_references=additional_story_references,
uncategorized_skill_ids=uncategorized_skill_ids,
subtopic_schema_version=1,
story_reference_schema_version=(
feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION),
next_subtopic_id=next_subtopic_id,
subtopics=[self.VERSION_1_SUBTOPIC_DICT],
meta_tag_content=meta_tag_content,
practice_tab_is_displayed=practice_tab_is_displayed,
page_title_fragment_for_web=page_title_fragment_for_web)
commit_message = 'New topic created with name \'%s\'.' % name
topic_rights_model.commit(
committer_id=owner_id,
commit_message='Created new topic rights',
commit_cmds=[{'cmd': topic_domain.CMD_CREATE_NEW}])
topic_model.commit(
owner_id, commit_message,
[{'cmd': topic_domain.CMD_CREATE_NEW, 'name': name}])
def save_new_question(
self, question_id, owner_id, question_state_data,
linked_skill_ids, inapplicable_skill_misconception_ids=None,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Creates an Oppia Question and saves it.
Args:
question_id: str. ID for the question to be created.
owner_id: str. The id of the user creating the question.
question_state_data: State. The state data for the question.
linked_skill_ids: list(str). List of skill IDs linked to the
question.
inapplicable_skill_misconception_ids: list(str). List of skill
misconceptions ids that are not applicable to the question.
language_code: str. The ISO 639-1 code for the language this
question is written in.
Returns:
Question. A newly-created question.
"""
# This needs to be done because default arguments can not be of list
# type.
question = question_domain.Question(
question_id, question_state_data,
feconf.CURRENT_STATE_SCHEMA_VERSION, language_code, 0,
linked_skill_ids, inapplicable_skill_misconception_ids or [])
question_services.add_question(owner_id, question)
return question
def save_new_question_with_state_data_schema_v27(
self, question_id, owner_id, linked_skill_ids,
inapplicable_skill_misconception_ids=None,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new default question with a default version 27 state data
dict.
This function should only be used for creating questions in tests
involving migration of datastore questions that use an old state data
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating questions. This is because
the latter approach would result in an question with the *current* state
data schema version.
Args:
question_id: str. ID for the question to be created.
owner_id: str. The id of the user creating the question.
linked_skill_ids: list(str). The skill IDs linked to the question.
inapplicable_skill_misconception_ids: list(str). List of skill
misconceptions ids that are not applicable to the question.
language_code: str. The ISO 639-1 code for the language this
question is written in.
"""
# This needs to be done because default arguments can not be of list
# type.
question_model = question_models.QuestionModel(
id=question_id, question_state_data=self.VERSION_27_STATE_DICT,
language_code=language_code, version=1,
question_state_data_schema_version=27,
linked_skill_ids=linked_skill_ids,
inapplicable_skill_misconception_ids=(
inapplicable_skill_misconception_ids or []))
question_model.commit(
owner_id, 'New question created',
[{'cmd': question_domain.CMD_CREATE_NEW}])
def save_new_question_suggestion_with_state_data_schema_v27(
self, author_id, skill_id, suggestion_id=None,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new question suggestion with a default version 27 state data
dict.
This function should only be used for creating question suggestion in
tests involving migration of datastore question suggestions that use an
old state data schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating questions. This is because
the latter approach would result in an question with the *current* state
data schema version.
"""
score_category = (
suggestion_models.SCORE_TYPE_QUESTION +
suggestion_models.SCORE_CATEGORY_DELIMITER + skill_id)
change = {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': {
'question_state_data': self.VERSION_27_STATE_DICT,
'question_state_data_schema_version': 27,
'language_code': language_code,
'linked_skill_ids': [skill_id],
'inapplicable_skill_misconception_ids': []
},
'skill_id': skill_id,
'skill_difficulty': 0.3
}
if suggestion_id is None:
suggestion_id = (
feedback_models.GeneralFeedbackThreadModel.
generate_new_thread_id(
feconf.ENTITY_TYPE_SKILL, skill_id))
suggestion_models.GeneralSuggestionModel.create(
feconf.SUGGESTION_TYPE_ADD_QUESTION,
feconf.ENTITY_TYPE_SKILL, skill_id, 1,
suggestion_models.STATUS_IN_REVIEW, author_id, None, change,
score_category, suggestion_id, language_code)
return suggestion_id
def save_new_skill(
self, skill_id, owner_id, description='description',
misconceptions=None, rubrics=None, skill_contents=None,
language_code=constants.DEFAULT_LANGUAGE_CODE,
prerequisite_skill_ids=None):
"""Creates an Oppia Skill and saves it.
Args:
skill_id: str. ID for the skill to be created.
owner_id: str. The user_id of the creator of the skill.
description: str. The description of the skill.
misconceptions: list(Misconception)|None. A list of Misconception
objects that contains the various misconceptions of the skill.
rubrics: list(Rubric)|None. A list of Rubric objects that contain
the rubric for each difficulty of the skill.
skill_contents: SkillContents|None. A SkillContents object
containing the explanation and examples of the skill.
language_code: str. The ISO 639-1 code for the language this skill
is written in.
prerequisite_skill_ids: list(str)|None. The prerequisite skill IDs
for the skill.
Returns:
Skill. A newly-created skill.
"""
skill = (
skill_domain.Skill.create_default_skill(skill_id, description, []))
if misconceptions is not None:
skill.misconceptions = misconceptions
skill.next_misconception_id = len(misconceptions) + 1
if skill_contents is not None:
skill.skill_contents = skill_contents
if prerequisite_skill_ids is not None:
skill.prerequisite_skill_ids = prerequisite_skill_ids
if rubrics is not None:
skill.rubrics = rubrics
else:
skill.rubrics = [
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[0], ['Explanation 1']),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[1], ['Explanation 2']),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[2], ['Explanation 3']),
]
skill.language_code = language_code
skill.version = 0
skill_services.save_new_skill(owner_id, skill)
return skill
def save_new_skill_with_defined_schema_versions(
self, skill_id, owner_id, description, next_misconception_id,
misconceptions=None, rubrics=None, skill_contents=None,
misconceptions_schema_version=1, rubric_schema_version=1,
skill_contents_schema_version=1,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new default skill with the given versions for misconceptions
and skill contents.
This function should only be used for creating skills in tests involving
migration of datastore skills that use an old schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating skills. This is because
the latter approach would result in a skill with the *current* schema
version.
Args:
skill_id: str. ID for the skill to be created.
owner_id: str. The user_id of the creator of the skill.
description: str. The description of the skill.
next_misconception_id: int. The misconception id to be used by the
next misconception added.
misconceptions: list(Misconception.to_dict()). The list of
misconception dicts associated with the skill.
rubrics: list(Rubric.to_dict()). The list of rubric dicts associated
with the skill.
skill_contents: SkillContents.to_dict(). A SkillContents dict
containing the explanation and examples of the skill.
misconceptions_schema_version: int. The schema version for the
misconceptions object.
rubric_schema_version: int. The schema version for the rubric
object.
skill_contents_schema_version: int. The schema version for the
skill_contents object.
language_code: str. The ISO 639-1 code for the language this skill
is written in.
"""
skill_model = skill_models.SkillModel(
id=skill_id, description=description, language_code=language_code,
misconceptions=misconceptions, rubrics=rubrics,
skill_contents=skill_contents,
next_misconception_id=next_misconception_id,
misconceptions_schema_version=misconceptions_schema_version,
rubric_schema_version=rubric_schema_version,
skill_contents_schema_version=skill_contents_schema_version,
superseding_skill_id=None, all_questions_merged=False)
skill_model.commit(
owner_id, 'New skill created.',
[{'cmd': skill_domain.CMD_CREATE_NEW}])
def _create_valid_question_data(self, default_dest_state_name):
"""Creates a valid question_data dict.
Args:
default_dest_state_name: str. The default destination state.
Returns:
dict. The default question_data dict.
"""
state = state_domain.State.create_default_state(
default_dest_state_name, is_initial_state=True)
state.update_interaction_id('TextInput')
solution_dict = {
'answer_is_exclusive': False,
'correct_answer': 'Solution',
'explanation': {
'content_id': 'solution',
'html': '<p>This is a solution.</p>',
},
}
hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml('hint_1', '<p>This is a hint.</p>')),
]
solution = state_domain.Solution.from_dict(
state.interaction.id, solution_dict)
state.update_interaction_solution(solution)
state.update_interaction_hints(hints_list)
state.update_interaction_customization_args({
'placeholder': {
'value': {
'content_id': 'ca_placeholder',
'unicode_str': 'Enter text here',
},
},
'rows': {'value': 1},
})
state.update_next_content_id_index(2)
state.interaction.default_outcome.labelled_as_correct = True
state.interaction.default_outcome.dest = None
return state
class LinterTestBase(GenericTestBase):
"""Base class for linter tests."""
def setUp(self):
super(LinterTestBase, self).setUp()
self.linter_stdout = []
def mock_print(*args):
"""Mock for python_utils.PRINT. Append the values to print to
linter_stdout list.
Args:
*args: list(*). Variable length argument list of values to print
in the same line of output.
"""
self.linter_stdout.append(
' '.join(python_utils.UNICODE(arg) for arg in args))
self.print_swap = self.swap(python_utils, 'PRINT', mock_print)
def assert_same_list_elements(self, phrases, stdout):
"""Checks to see if all of the phrases appear in at least one of the
stdout outputs.
Args:
phrases: list(str). A list of phrases we are trying to find in one
of the stdout outputs. For example, python linting outputs a
success string that includes data we don't have easy access to,
like how long the test took, so we may want to search for a
substring of that success string in stdout.
stdout: list(str). A list of the output results from the method's
execution.
"""
self.assertTrue(
any(all(p in output for p in phrases) for output in stdout))
def assert_failed_messages_count(self, stdout, expected_failed_count):
"""Assert number of expected failed checks to actual number of failed
checks.
Args:
stdout: list(str). A list of linter output messages.
expected_failed_count: int. Expected number of failed messages.
"""
failed_count = sum(msg.startswith('FAILED') for msg in stdout)
self.assertEqual(failed_count, expected_failed_count)
class AuditJobsTestBase(GenericTestBase):
"""Base class for audit jobs tests."""
def run_job_and_check_output(
self, expected_output, sort=False, literal_eval=False):
"""Helper function to run job and compare output.
Args:
expected_output: list(*). The expected result of the job.
sort: bool. Whether to sort the outputs before comparison.
literal_eval: bool. Whether to use ast.literal_eval before
comparison.
"""
self.process_and_flush_pending_tasks()
job_id = self.job_class.create_new()
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 0)
self.job_class.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
self.process_and_flush_pending_tasks()
actual_output = self.job_class.get_output(job_id)
if literal_eval:
actual_output_dict = {}
expected_output_dict = {}
for item in (ast.literal_eval(value) for value in actual_output):
value = item[1]
if isinstance(value, list):
value = sorted(value)
actual_output_dict[item[0]] = value
for item in (ast.literal_eval(value) for value in expected_output):
value = item[1]
if isinstance(value, list):
value = sorted(value)
expected_output_dict[item[0]] = value
self.assertItemsEqual(actual_output_dict, expected_output_dict)
for key in actual_output_dict:
self.assertEqual(
actual_output_dict[key], expected_output_dict[key])
elif sort:
self.assertEqual(sorted(actual_output), sorted(expected_output))
else:
self.assertEqual(actual_output, expected_output)
class EmailMessageMock(python_utils.OBJECT):
"""Mock for core.platform.models email services messages."""
def __init__(
self, sender_email, recipient_email, subject, plaintext_body,
html_body, bcc=None, reply_to=None, recipient_variables=None):
"""Inits a mock email message with all the necessary data.
Args:
sender_email: str. The email address of the sender. This should be
in the form 'SENDER_NAME <SENDER_EMAIL_ADDRESS>' or
'SENDER_EMAIL_ADDRESS'. Must be utf-8.
recipient_email: str. The email address of the recipient. Must be
utf-8.
subject: str. The subject line of the email, Must be utf-8.
plaintext_body: str. The plaintext body of the email. Must be utf-8.
html_body: str. The HTML body of the email. Must fit in a datastore
entity. Must be utf-8.
bcc: list(str)|None. Optional argument. List of bcc emails. Emails
must be utf-8.
reply_to: str|None. Optional argument. Reply address formatted like
“reply+<reply_id>@<incoming_email_domain_name> reply_id is the
unique id of the sender.
recipient_variables: dict|None. Optional argument. If batch sending
requires differentiating each email based on the recipient, we
assign a unique id to each recipient, including info relevant to
that recipient so that we can reference it when composing the
email like so:
recipient_variables = {
'bob@example.com': {'first': 'Bob', 'id': 1},
'alice@example.com': {'first': 'Alice', 'id': 2},
}
subject = 'Hey, %recipient.first%'
For more information about this format, see:
https://documentation.mailgun.com/en/latest/user_manual.html#batch-sending
"""
self.sender = sender_email
self.to = recipient_email
self.subject = subject
self.body = plaintext_body
self.html = html_body
self.bcc = bcc
self.reply_to = reply_to
self.recipient_variables = recipient_variables
class GenericEmailTestBase(GenericTestBase):
"""Base class for tests requiring email services."""
emails_dict = collections.defaultdict(list)
def run(self, result=None):
"""Adds a context swap on top of the test_utils.run() method so that
test classes extending GenericEmailTestBase will automatically have a
mailgun api key, mailgun domain name and mocked version of
send_email_to_recipients().
"""
with self.swap(
email_services, 'send_email_to_recipients',
self._send_email_to_recipients):
super(EmailTestBase, self).run(result=result)
def setUp(self):
super(GenericEmailTestBase, self).setUp()
self._wipe_emails_dict()
def _wipe_emails_dict(self):
"""Reset email dictionary for a new test."""
self.emails_dict = collections.defaultdict(list)
def _send_email_to_recipients(
self, sender_email, recipient_emails, subject, plaintext_body,
html_body, bcc=None, reply_to=None, recipient_variables=None):
"""Mocks sending an email to each email in recipient_emails.
Args:
sender_email: str. The email address of the sender. This should be
in the form 'SENDER_NAME <SENDER_EMAIL_ADDRESS>' or
'SENDER_EMAIL_ADDRESS'. Must be utf-8.
recipient_emails: list(str). The email addresses of the recipients.
Must be utf-8.
subject: str. The subject line of the email, Must be utf-8.
plaintext_body: str. The plaintext body of the email. Must be utf-8.
html_body: str. The HTML body of the email. Must fit in a datastore
entity. Must be utf-8.
bcc: list(str)|None. Optional argument. List of bcc emails. Must be
utf-8.
reply_to: str|None. Optional Argument. Reply address formatted like
“reply+<reply_id>@<incoming_email_domain_name> reply_id is the
unique id of the sender.
recipient_variables: dict|None. Optional Argument. If batch sending
requires differentiating each email based on the recipient, we
assign a unique id to each recipient, including info relevant to
that recipient so that we can reference it when composing the
email like so:
recipient_variables = {
'bob@example.com': {'first': 'Bob', 'id': 1},
'alice@example.com': {'first': 'Alice', 'id': 2},
}
subject = 'Hey, %recipient.first%'
For more information about this format, see:
https://documentation.mailgun.com/en/latest/user_manual.html#batch-sending
Returns:
bool. Whether the emails are sent successfully.
"""
bcc_emails = None
if bcc:
bcc_emails = bcc[0] if len(bcc) == 1 else bcc
new_email = EmailMessageMock(
sender_email, recipient_emails, subject, plaintext_body, html_body,
bcc=bcc_emails, reply_to=(reply_to if reply_to else None),
recipient_variables=(
recipient_variables if recipient_variables else None))
for recipient_email in recipient_emails:
self.emails_dict[recipient_email].append(new_email)
return True
def _get_sent_email_messages(self, to):
"""Gets messages to a single recipient email.
Args:
to: str. The recipient email address.
Returns:
list(EmailMessageMock). The list of email messages corresponding to
that recipient email.
"""
return self.emails_dict[to] if to in self.emails_dict else []
def _get_all_sent_email_messages(self):
"""Gets the entire messages dictionary.
Returns:
dict(str, list(EmailMessageMock)). The dict keyed by recipient
email. Each value contains a list of EmailMessageMock objects
corresponding to that recipient email; in other words, all
individual emails sent to that specific recipient email.
"""
return self.emails_dict
EmailTestBase = GenericEmailTestBase
class ClassifierTestBase(GenericEmailTestBase):
"""Base class for classifier test classes that need common functions
for related to reading classifier data and mocking the flow of the
storing the trained models through post request.
This class is derived from GenericEmailTestBase because the
TrainedClassifierHandlerTests test suite requires email services test
functions in addition to the classifier functions defined below.
"""
def post_blob(self, url, payload, expected_status_int=200):
"""Post a BLOB object to the server; return the received object.
Note that this method should only be used for
classifier.TrainedClassifierHandler handler and for no one else. The
reason being, we don't have any general mechanism for security for
transferring binary data. TrainedClassifierHandler implements a
specific mechanism which is restricted to the handler.
Args:
url: str. The URL to which BLOB object in payload should be sent
through a post request.
payload: bytes. Binary data which needs to be sent.
expected_status_int: int. The status expected as a response of post
request.
Returns:
dict. Parsed JSON response received upon invoking the post request.
"""
data = payload
expect_errors = False
if expected_status_int >= 400:
expect_errors = True
response = self._send_post_request(
self.testapp, url, data,
expect_errors, expected_status_int=expected_status_int,
headers={b'content-type': b'application/octet-stream'})
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
# Reference URL:
# https://github.com/Pylons/webtest/blob/
# bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119 .
self.assertEqual(response.status_int, expected_status_int)
return self._parse_json_response(response, expect_errors)
def _get_classifier_data_from_classifier_training_job(
self, classifier_training_job):
"""Retrieves classifier training job from GCS using metadata stored in
classifier_training_job.
Args:
classifier_training_job: ClassifierTrainingJob. Domain object
containing metadata of the training job which is used to
retrieve the trained model.
Returns:
FrozenModel. Protobuf object containing classifier data.
"""
filename = classifier_training_job.classifier_data_filename
file_system_class = fs_services.get_entity_file_system_class()
fs = fs_domain.AbstractFileSystem(file_system_class(
feconf.ENTITY_TYPE_EXPLORATION, classifier_training_job.exp_id))
classifier_data = utils.decompress_from_zlib(fs.get(filename))
classifier_data_proto = text_classifier_pb2.TextClassifierFrozenModel()
classifier_data_proto.ParseFromString(classifier_data)
return classifier_data_proto
class FunctionWrapper(python_utils.OBJECT):
"""A utility for making function wrappers. Create a subclass and override
any or both of the pre_call_hook and post_call_hook methods. See these
methods for more info.
"""
def __init__(self, func):
"""Creates a new FunctionWrapper instance.
Args:
func: a callable, or data descriptor. If it's a descriptor, then
__get__ should return a bound method. For example, func can be
a function, a method, a static or class method, but not a
@property.
"""
self._func = func
self._instance = None
def __call__(self, *args, **kwargs):
"""Overrides the call method for the function to call pre_call_hook
method which would be called before the function is executed and
post_call_hook which would be called after the function is executed.
"""
if self._instance is not None:
args = [self._instance] + list(args)
args_dict = inspect.getcallargs(self._func, *args, **kwargs)
self.pre_call_hook(args_dict)
result = self._func(*args, **kwargs)
self.post_call_hook(args_dict, result)
return result
def __get__(self, instance, owner):
# We have to implement __get__ because otherwise, we don't have a chance
# to bind to the instance self._func was bound to. See the following SO
# answer: https://stackoverflow.com/a/22555978/675311
self._instance = instance
return self
def pre_call_hook(self, args):
"""Override this to do tasks that should be executed before the actual
function call.
Args:
args: list(*). Set of arguments that the function accepts.
"""
pass
def post_call_hook(self, args, result):
"""Override this to do tasks that should be executed after the actual
function call.
Args:
args: list(*). Set of arguments that the function accepts.
result: *. Result returned from the function.
"""
pass
class CallCounter(FunctionWrapper):
"""A function wrapper that keeps track of how often the function is called.
Note that the counter is incremented before each call, so it is also
increased when the function raises an exception.
"""
def __init__(self, f):
"""Counts the number of times the given function has been called. See
FunctionWrapper for arguments.
"""
super(CallCounter, self).__init__(f)
self._times_called = 0
@property
def times_called(self):
"""Property that returns the number of times the wrapped function has
been called.
Returns:
int. The number of times the wrapped function has been called.
"""
return self._times_called
def pre_call_hook(self, args):
"""Method that is called before each function call to increment the
counter tracking the number of times a function is called. This will
also be called even when the function raises an exception.
Args:
args: list(*). Set of arguments that the function accepts.
"""
self._times_called += 1
class FailingFunction(FunctionWrapper):
"""A function wrapper that makes a function fail, raising a given exception.
It can be set to succeed after a given number of calls.
"""
INFINITY = 'infinity'
def __init__(self, f, exception, num_tries_before_success):
"""Create a new Failing function.
Args:
f: func. See FunctionWrapper.
exception: Exception. The exception to be raised.
num_tries_before_success: int. The number of times to raise an
exception, before a call succeeds. If this is 0, all calls will
succeed, if it is FailingFunction. INFINITY, all calls will
fail.
"""
super(FailingFunction, self).__init__(f)
self._exception = exception
self._num_tries_before_success = num_tries_before_success
self._always_fail = (
self._num_tries_before_success == FailingFunction.INFINITY)
self._times_called = 0
if not (self._num_tries_before_success >= 0 or self._always_fail):
raise ValueError(
'num_tries_before_success should either be an '
'integer greater than or equal to 0, '
'or FailingFunction.INFINITY')
def pre_call_hook(self, args):
"""Method that is called each time before the actual function call to
check if the exception is to be raised based on the number of tries
before success.
Args:
args: list(*). Set of arguments this function accepts.
"""
self._times_called += 1
call_should_fail = (
self._num_tries_before_success >= self._times_called)
if call_should_fail or self._always_fail:
raise self._exception
| [
[
[
688,
703
]
],
[
[
766,
782
]
],
[
[
830,
833
],
[
141536,
141539
],
[
141785,
141788
]
],
[
[
841,
852
],
[
144834,
144845
],
[
145569,
145580
]
],
[
[
860,
870
],
[
38510,
38520
],
[
40016,
40026
],
[
41283,
41293
],
[
41680,
41690
],
[
42080,
42090
],
[
74861,
74871
],
[
75704,
75714
],
[
76323,
76333
]
],
[
[
878,
882
],
[
37619,
37623
]
],
[
[
890,
897
],
[
7285,
7292
],
[
7328,
7335
],
[
7505,
7512
],
[
153331,
153338
]
],
[
[
905,
914
],
[
5298,
5307
],
[
102550,
102559
]
],
[
[
922,
926
],
[
77599,
77603
],
[
86973,
86977
],
[
88219,
88223
],
[
93562,
93566
],
[
93901,
93905
]
],
[
[
934,
941
],
[
38576,
38583
],
[
39659,
39666
],
[
39720,
39727
]
],
[
[
949,
951
],
[
5435,
5437
],
[
5338,
5340
],
[
6302,
6304
],
[
23745,
23747
],
[
23791,
23793
],
[
23854,
23856
],
[
38195,
38197
],
[
51091,
51093
]
],
[
[
959,
961
],
[
47629,
47631
],
[
47657,
47659
],
[
47878,
47880
]
],
[
[
969,
977
],
[
35801,
35809
]
],
[
[
1001,
1010
],
[
98800,
98809
],
[
101258,
101267
],
[
110736,
110745
],
[
111805,
111814
],
[
114086,
114095
],
[
115880,
115889
],
[
119350,
119359
],
[
119610,
119619
],
[
122842,
122851
],
[
126828,
126837
],
[
128179,
128188
],
[
130027,
130036
],
[
132043,
132052
],
[
134491,
134500
],
[
38171,
38180
],
[
133721,
133730
],
[
133831,
133840
],
[
133941,
133950
]
],
[
[
1040,
1044
],
[
82865,
82869
],
[
86278,
86282
]
],
[
[
1069,
1080
],
[
23937,
23948
]
],
[
[
1105,
1119
],
[
34135,
34149
]
],
[
[
1144,
1161
],
[
111349,
111366
],
[
112753,
112770
]
],
[
[
1186,
1205
],
[
111534,
111553
],
[
113396,
113415
]
],
[
[
1230,
1240
],
[
69777,
69787
],
[
71104,
71114
],
[
95345,
95355
],
[
99779,
99789
],
[
102606,
102616
]
],
[
[
1265,
1277
],
[
113044,
113056
]
],
[
[
1302,
1314
],
[
95465,
95477
],
[
100932,
100944
],
[
103415,
103427
]
],
[
[
1339,
1348
],
[
151910,
151919
]
],
[
[
1373,
1384
],
[
151854,
151865
]
],
[
[
1409,
1429
],
[
97965,
97985
]
],
[
[
1454,
1469
],
[
127688,
127703
],
[
129842,
129857
],
[
130839,
130854
]
],
[
[
1494,
1511
],
[
127909,
127926
]
],
[
[
1536,
1550
],
[
104675,
104689
],
[
107043,
107057
],
[
109226,
109240
],
[
110489,
110503
],
[
113844,
113858
]
],
[
[
1575,
1587
],
[
133127,
133139
],
[
133680,
133692
],
[
133790,
133802
],
[
133900,
133912
],
[
136850,
136862
]
],
[
[
1612,
1626
],
[
134086,
134100
]
],
[
[
1651,
1663
],
[
137181,
137193
],
[
137626,
137638
],
[
137661,
137673
],
[
137755,
137767
]
],
[
[
1688,
1702
],
[
105694,
105708
]
],
[
[
1727,
1739
],
[
115229,
115241
],
[
118101,
118113
]
],
[
[
1764,
1778
],
[
115590,
115604
]
],
[
[
1803,
1823
],
[
118608,
118628
],
[
118753,
118773
],
[
118818,
118838
]
],
[
[
1848,
1870
],
[
118970,
118992
]
],
[
[
1895,
1913
],
[
141019,
141037
],
[
141200,
141218
]
],
[
[
1938,
1950
],
[
121476,
121488
],
[
121663,
121675
],
[
121915,
121927
],
[
126476,
126488
],
[
126594,
126606
]
],
[
[
1975,
1989
],
[
122418,
122432
]
],
[
[
2014,
2027
],
[
76988,
77001
],
[
80779,
80792
],
[
110441,
110454
],
[
113796,
113809
]
],
[
[
2054,
2060
],
[
2673,
2679
],
[
2717,
2723
],
[
2736,
2742
],
[
2762,
2768
],
[
2797,
2803
],
[
2820,
2826
],
[
2840,
2846
],
[
2872,
2878
],
[
2897,
2903
],
[
2944,
2950
],
[
3012,
3018
],
[
3073,
3079
],
[
3137,
3143
],
[
3202,
3208
],
[
3271,
3277
],
[
6970,
6976
],
[
7201,
7207
]
],
[
[
2094,
2117
],
[
72203,
72226
],
[
72352,
72375
],
[
72485,
72508
],
[
72620,
72643
],
[
72755,
72778
],
[
72908,
72931
]
],
[
[
2154,
2174
],
[
29707,
29727
]
],
[
[
2182,
2188
],
[
59353,
59359
],
[
69741,
69747
],
[
69832,
69838
],
[
69864,
69870
],
[
69900,
69906
],
[
71068,
71074
],
[
71161,
71167
],
[
71193,
71199
],
[
71229,
71235
],
[
77366,
77372
],
[
77482,
77488
],
[
79141,
79147
],
[
79453,
79459
],
[
79753,
79759
],
[
80044,
80050
],
[
80382,
80388
],
[
86936,
86942
],
[
87007,
87013
],
[
104554,
104560
],
[
105254,
105260
],
[
105316,
105322
],
[
106937,
106943
],
[
107619,
107625
],
[
107681,
107687
],
[
109105,
109111
],
[
109805,
109811
],
[
109867,
109873
],
[
122177,
122183
],
[
122265,
122271
],
[
125918,
125924
],
[
127772,
127778
],
[
131504,
131510
],
[
131610,
131616
],
[
131659,
131665
],
[
151970,
151976
]
],
[
[
2196,
2200
],
[
51287,
51291
]
],
[
[
2208,
2217
],
[
51410,
51419
]
],
[
[
2225,
2239
],
[
51346,
51360
]
],
[
[
2258,
2277
],
[
152138,
152157
]
],
[
[
2285,
2297
],
[
7659,
7671
],
[
19053,
19065
],
[
29227,
29239
],
[
33520,
33532
],
[
142435,
142447
],
[
152310,
152322
],
[
6355,
6367
],
[
17881,
17893
],
[
25779,
25791
],
[
30350,
30362
],
[
30525,
30537
],
[
30617,
30629
],
[
35560,
35572
],
[
36951,
36963
],
[
37011,
37023
],
[
39289,
39301
],
[
47805,
47817
],
[
55390,
55402
],
[
55509,
55521
],
[
81801,
81813
],
[
91502,
91514
],
[
102926,
102938
],
[
103113,
103125
],
[
139050,
139062
],
[
138969,
138981
]
],
[
[
2305,
2317
],
[
96766,
96778
],
[
96856,
96868
],
[
96977,
96989
],
[
97067,
97079
],
[
97394,
97406
],
[
97607,
97619
]
],
[
[
2325,
2330
],
[
36655,
36660
],
[
38460,
38465
],
[
152061,
152066
]
],
[
[
2339,
2350
],
[
19681,
19692
],
[
72046,
72057
]
],
[
[
2358,
2371
],
[
8889,
8902
],
[
10206,
10219
],
[
13985,
13998
]
],
[
[
2405,
2409
],
[
92566,
92570
]
],
[
[
2443,
2451
],
[
55165,
55173
]
],
[
[
2485,
2492
],
[
50139,
50146
],
[
51179,
51186
]
],
[
[
2500,
2513
],
[
77102,
77115
],
[
77281,
77294
],
[
77300,
77313
]
],
[
[
2521,
2528
],
[
51271,
51278
],
[
51330,
51337
],
[
51394,
51401
]
],
[
[
2536,
2547
],
[
27909,
27920
],
[
28887,
28898
]
],
[
[
2549,
2559
],
[
104308,
104318
],
[
105001,
105011
],
[
105081,
105091
],
[
106683,
106693
],
[
107364,
107374
],
[
107444,
107454
],
[
108858,
108868
],
[
109552,
109562
],
[
109632,
109642
]
],
[
[
2561,
2576
],
[
131400,
131415
]
],
[
[
2578,
2593
],
[
129375,
129390
]
],
[
[
2595,
2607
],
[
136223,
136235
]
],
[
[
2613,
2625
],
[
117469,
117481
]
],
[
[
2627,
2644
],
[
130676,
130693
],
[
130728,
130745
],
[
131549,
131566
],
[
131710,
131727
]
],
[
[
2646,
2658
],
[
125168,
125180
],
[
125287,
125299
]
],
[
[
2920,
2941
]
],
[
[
2991,
3009
],
[
28844,
28862
],
[
50824,
50842
],
[
74026,
74044
],
[
74071,
74089
],
[
75615,
75633
]
],
[
[
3056,
3070
],
[
145203,
145217
]
],
[
[
3113,
3134
],
[
73043,
73064
],
[
73197,
73218
],
[
73347,
73368
],
[
73497,
73518
],
[
73673,
73694
]
],
[
[
3177,
3199
],
[
19800,
19822
],
[
19955,
19977
],
[
20106,
20128
],
[
20273,
20295
],
[
20428,
20450
],
[
20605,
20627
],
[
20822,
20844
],
[
20981,
21003
],
[
21140,
21162
],
[
21315,
21337
],
[
21490,
21512
],
[
21661,
21683
]
],
[
[
3241,
3268
],
[
52198,
52225
]
],
[
[
3443,
3458
],
[
36994,
37009
]
],
[
[
3656,
3696
]
],
[
[
3914,
3940
],
[
6256,
6282
]
],
[
[
5601,
5619
],
[
82888,
82906
],
[
86301,
86319
]
],
[
[
6429,
6452
]
],
[
[
6749,
6779
],
[
7147,
7177
]
],
[
[
7051,
7076
]
],
[
[
7641,
7658
],
[
71988,
72005
]
],
[
[
19036,
19052
],
[
72107,
72123
]
],
[
[
29205,
29226
],
[
50019,
50040
]
],
[
[
33496,
33519
],
[
71895,
71918
]
],
[
[
35792,
35800
],
[
48604,
48612
],
[
46672,
46680
]
],
[
[
48586,
48603
],
[
57424,
57441
],
[
49695,
49712
],
[
50083,
50100
],
[
51498,
51515
],
[
52381,
52398
]
],
[
[
57408,
57423
],
[
138448,
138463
],
[
140344,
140359
],
[
144740,
144755
],
[
73789,
73804
],
[
73867,
73882
],
[
74145,
74160
]
],
[
[
138433,
138447
],
[
138541,
138555
]
],
[
[
140326,
140343
]
],
[
[
142418,
142434
],
[
147734,
147750
]
],
[
[
144719,
144739
],
[
148981,
149001
],
[
149029,
149049
],
[
145386,
145406
]
],
[
[
148965,
148978
],
[
145310,
145323
]
],
[
[
149010,
149028
]
],
[
[
152294,
152309
],
[
154438,
154453
],
[
155594,
155609
]
],
[
[
154426,
154437
],
[
154841,
154852
]
],
[
[
155578,
155593
],
[
156293,
156308
],
[
156506,
156521
]
]
] |
"""
:author: Thomas Delaet <thomas@delaet.org>
"""
from velbus.modules.vmb4ry import VMB4RYModule
from velbus.modules.vmbin import VMB6INModule
from velbus.modules.vmbin import VMB7INModule
| [
[
[
86,
98
]
],
[
[
132,
144
]
],
[
[
178,
190
]
]
] |
from unittest.mock import MagicMock
from django.urls import reverse
from hijack.contrib.admin import HijackUserAdminMixin
from hijack.tests.test_app.models import Post
class TestHijackUserAdminMixin:
def test_user_admin(self, admin_client):
url = reverse("admin:test_app_customuser_changelist")
response = admin_client.get(url)
assert response.status_code == 200
assert (
b'<button type="submit" class="button">HIJACK</button>' in response.content
)
def test_related_user(self, admin_client, admin_user):
url = reverse("admin:test_app_post_changelist")
Post.objects.create(author=admin_user)
response = admin_client.get(url)
assert response.status_code == 200
assert b"Hijack admin" in response.content
def test_get_hijack_success_url__obj_absolute_url(self, rf):
obj = Post()
obj.get_absolute_url = MagicMock(return_value="/path/to/obj/")
admin = HijackUserAdminMixin()
assert admin.get_hijack_success_url(None, obj) == "/path/to/obj/"
def test_get_hijack_success_url__obj_no_absolute_url(self, rf):
obj = Post()
admin = HijackUserAdminMixin()
assert admin.get_hijack_success_url(None, obj) == "/accounts/profile/"
def test_get_hijack_success_url__hijack_success_url(self, rf):
obj = Post()
obj.get_absolute_url = MagicMock(return_value="/path/to/obj/")
admin = HijackUserAdminMixin()
admin.hijack_success_url = "/custom/success/path/"
assert admin.get_hijack_success_url(None, obj) == "/custom/success/path/"
| [
[
[
26,
35
],
[
926,
935
],
[
1407,
1416
]
],
[
[
61,
68
],
[
263,
270
],
[
584,
591
]
],
[
[
103,
123
],
[
982,
1002
],
[
1185,
1205
],
[
1463,
1483
]
],
[
[
165,
169
],
[
634,
638
],
[
888,
892
],
[
1162,
1166
],
[
1369,
1373
]
],
[
[
178,
202
]
]
] |
"""
Django settings for modelpractice project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ligk%x$+)qey=q+&d_nca7%s-_@zn4%g=kg_4+p!ga7n)-4nb@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'modelpractice.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'modelpractice.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
[
[
321,
323
],
[
408,
410
],
[
424,
426
],
[
440,
442
],
[
2236,
2238
]
],
[
[
397,
405
],
[
2249,
2257
]
],
[
[
673,
683
]
],
[
[
806,
811
]
],
[
[
820,
833
]
],
[
[
867,
881
]
],
[
[
1075,
1085
]
],
[
[
1489,
1501
]
],
[
[
1526,
1535
]
],
[
[
2011,
2027
]
],
[
[
2141,
2150
]
],
[
[
2386,
2410
]
],
[
[
2889,
2902
]
],
[
[
2914,
2923
]
],
[
[
2933,
2941
]
],
[
[
2950,
2958
]
],
[
[
2967,
2973
]
],
[
[
3085,
3095
]
]
] |
# -*- coding: utf-8 -*-
""" Command line configuration parser """
import sys
import os.path
import argparse
import configparser
def parse():
""" Parse command line options """
parser = argparse.ArgumentParser(
description='Dynamic DynamoDB - Auto provisioning AWS DynamoDB')
parser.add_argument(
'-c', '--config',
help='Read configuration from a configuration file')
parser.add_argument(
'--dry-run',
action='store_true',
help='Run without making any changes to your DynamoDB table')
parser.add_argument(
'--run-once',
action='store_true',
help='Run once and then exit Dynamic DynamoDB, instead of looping')
parser.add_argument(
'--show-config',
action='store_true',
help='Parse config files, print parsed data and then exit Dynamic DynamoDB')
parser.add_argument(
'--check-interval',
type=int,
help="""How many seconds should we wait between
the checks (default: 300)""")
parser.add_argument(
'--log-file',
help='Send output to the given log file')
parser.add_argument(
'--log-level',
choices=['debug', 'info', 'warning', 'error'],
help='Log level to use (default: info)')
parser.add_argument(
'--log-config-file',
help=(
'Use a custom Python logging configuration file. Overrides both '
'--log-level and --log-file.'
))
parser.add_argument(
'--version',
action='store_true',
help='Print current version number')
parser.add_argument(
'--aws-access-key-id',
help="Override Boto configuration with the following AWS access key")
parser.add_argument(
'--aws-secret-access-key',
help="Override Boto configuration with the following AWS secret key")
daemon_ag = parser.add_argument_group('Daemon options')
daemon_ag.add_argument(
'--daemon',
help=(
'Run Dynamic DynamoDB in daemon mode. Valid modes are '
'[start|stop|restart|foreground]'))
daemon_ag.add_argument(
'--instance',
default='default',
help=(
'Name of the Dynamic DynamoDB instance. '
'Used to run multiple instances of Dynamic DynamoDB. '
'Give each instance a unique name and control them separately '
'with the --daemon flag. (default: default)'))
daemon_ag.add_argument(
'--pid-file-dir',
default='/tmp',
help='Directory where pid file is located in. Defaults to /tmp')
dynamodb_ag = parser.add_argument_group('DynamoDB options')
dynamodb_ag.add_argument(
'-r', '--region',
help='AWS region to operate in (default: us-east-1')
dynamodb_ag.add_argument(
'-t', '--table-name',
help=(
'Table(s) to target. '
'The name is treated as a regular expression. '
'E.g. "^my_table.*$" or "my_table"'))
r_scaling_ag = parser.add_argument_group('Read units scaling properties')
r_scaling_ag.add_argument(
'--reads-upper-threshold',
type=int,
help="""Scale up the reads with --increase-reads-with if
the currently consumed read units reaches this many
percent (default: 90)""")
r_scaling_ag.add_argument(
'--throttled-reads-upper-threshold',
type=int,
help="""Scale up the reads with --increase-reads-with if
the count of throttled read events exceeds this
count (default: 0)""")
r_scaling_ag.add_argument(
'--reads-lower-threshold',
type=int,
help="""Scale down the reads with --decrease-reads-with if the
currently consumed read units is as low as this
percentage (default: 30)""")
r_scaling_ag.add_argument(
'--increase-reads-with',
type=int,
help="""How much should we increase the read units with?
(default: 50, max: 100 if --increase-reads-unit = percent)""")
r_scaling_ag.add_argument(
'--decrease-reads-with',
type=int,
help="""How much should we decrease the read units with?
(default: 50)""")
r_scaling_ag.add_argument(
'--increase-reads-unit',
type=str,
help='Do you want to scale in percent or units? (default: percent)')
r_scaling_ag.add_argument(
'--decrease-reads-unit',
type=str,
help='Do you want to scale in percent or units? (default: percent)')
r_scaling_ag.add_argument(
'--min-provisioned-reads',
type=int,
help="""Minimum number of provisioned reads""")
r_scaling_ag.add_argument(
'--max-provisioned-reads',
type=int,
help="""Maximum number of provisioned reads""")
r_scaling_ag.add_argument(
'--num-read-checks-before-scale-down',
type=int,
help="""Number of consecutive checks that must meet criteria
before a scale down event occurs""")
r_scaling_ag.add_argument(
'--num-read-checks-reset-percent',
type=int,
help="""Percentage Value that will cause the num_read_checks_before
scale_down var to reset back to 0""")
w_scaling_ag = parser.add_argument_group('Write units scaling properties')
w_scaling_ag.add_argument(
'--writes-upper-threshold',
type=int,
help="""Scale up the writes with --increase-writes-with
if the currently consumed write units reaches this
many percent (default: 90)""")
w_scaling_ag.add_argument(
'--throttled-writes-upper-threshold',
type=int,
help="""Scale up the reads with --increase-writes-with if
the count of throttled write events exceeds this
count (default: 0)""")
w_scaling_ag.add_argument(
'--writes-lower-threshold',
type=int,
help="""Scale down the writes with --decrease-writes-with
if the currently consumed write units is as low as this
percentage (default: 30)""")
w_scaling_ag.add_argument(
'--increase-writes-with',
type=int,
help="""How much should we increase the write units with?
(default: 50,
max: 100 if --increase-writes-unit = 'percent')""")
w_scaling_ag.add_argument(
'--decrease-writes-with',
type=int,
help="""How much should we decrease the write units with?
(default: 50)""")
w_scaling_ag.add_argument(
'--increase-writes-unit',
type=str,
help='Do you want to scale in percent or units? (default: percent)')
w_scaling_ag.add_argument(
'--decrease-writes-unit',
type=str,
help='Do you want to scale in percent or units? (default: percent)')
w_scaling_ag.add_argument(
'--min-provisioned-writes',
type=int,
help="""Minimum number of provisioned writes""")
w_scaling_ag.add_argument(
'--max-provisioned-writes',
type=int,
help="""Maximum number of provisioned writes""")
w_scaling_ag.add_argument(
'--num-write-checks-before-scale-down',
type=int,
help="""Number of consecutive checks that must meet criteria
before a scale down event occurs""")
w_scaling_ag.add_argument(
'--num-write-checks-reset-percent',
type=int,
help="""Percentage Value that will cause the num_write_checks_before
scale_down var to reset back to 0""")
args = parser.parse_args()
# Print the version and quit
if args.version:
# Read the dynamic-dynamodb.conf configuration file
internal_config_file = configparser.RawConfigParser()
internal_config_file.optionxform = lambda option: option
internal_config_file.read(
os.path.abspath(
os.path.join(
os.path.dirname(__file__), '../dynamic-dynamodb.conf')))
print('Dynamic DynamoDB version: {0}'.format(
internal_config_file.get('general', 'version')))
sys.exit(0)
# Replace any new values in the configuration
configuration = {}
for arg in args.__dict__:
if args.__dict__.get(arg) is not None:
configuration[arg] = args.__dict__.get(arg)
return configuration
| [
[
[
73,
76
],
[
8231,
8234
]
],
[
[
84,
91
],
[
7983,
7985
],
[
8016,
8018
],
[
8050,
8052
]
],
[
[
99,
107
],
[
195,
203
]
],
[
[
115,
127
],
[
7840,
7852
]
],
[
[
134,
139
]
]
] |
# -*- coding: utf-8 -*-
from requests import Session
from requests.adapters import HTTPAdapter
from requests.exceptions import ConnectionError
import json
from time import sleep, time
from pprint import pprint
from itertools import cycle
from .storage import nodes, api_total
#from .proxy import Proxy
class Http():
http = Session()
proxies = None
class RpcClient(Http):
RPS_DELAY = 0.10 # ~3 requests per second
last_request = 0.0
""" Simple Steem JSON-RPC API
This class serves as an abstraction layer for easy use of the Steem API.
rpc = RpcClient(nodes=nodes) or rpc = RpcClient()
Args:
nodes (list): A list of Steem HTTP RPC nodes to connect to.
any call available to that port can be issued using the instance
rpc.call('command', *parameters)
"""
headers = {'User-Agent': 'thallid', 'content-type': 'application/json'}
def __init__(self, report=False, **kwargs):
self.api_total = api_total
self.report = report
self.PROXY = kwargs.get("PROXY", False)
if self.PROXY: self.proxies = Proxy()
self.nodes = cycle(kwargs.get("nodes", nodes)) # Перебор нод
self.url = next(self.nodes)
self.num_retries = kwargs.get("num_retries", 3) # Количество попыток подключения к ноде
adapter = HTTPAdapter(max_retries=self.num_retries)
for node in nodes:
self.http.mount(node, adapter)
def get_response(self, payload):
data = json.dumps(payload, ensure_ascii=False).encode('utf8')
while True:
n = 1
proxies = self.proxies.get_http() if self.PROXY else None
while n < self.num_retries:
try:
# Ограничение по запросам в секунду
delay = self.RPS_DELAY - (time() - self.last_request)
if delay > 0: sleep(delay)
#response = self.http.post(self.url, data=data, headers=self.headers, proxies=proxies, auth=auth)
response = self.http.post(self.url, data=data, headers=self.headers, proxies=proxies, timeout=30)
self.last_request = time()
if response.status_code == 503:
proxies = self.proxies.new_http() if self.PROXY else None # next proxy
print('new proxy', proxies)
else:
return response
#except ConnectionError as ce:
except:
#print('ce', ce)
sleeptime = (n - 1) * 2
if self.report:
print("Lost connection to node during rpcconnect(): %s (%d/%d) " % (self.url, n, self.num_retries))
print("Retrying in %d seconds" % sleeptime)
sleep(sleeptime)
n += 1
self.url = next(self.nodes) # next node
print("Trying to connect to node %s" % self.url, 'error in get_response rpc_client', proxies)
return False
def call(self, name, *params, **kwargs):
# Определяем для name своё api
api = self.api_total[name]
#method = kwargs.get('method', 'condenser_api.') #steem
method = kwargs.get('method', 'call')
parameters = kwargs.get('params', [api, name, params])
#payload = {"method": method + name, "params": parameters, "id": 1, "jsonrpc": '2.0'} #steem
payload = {"method": method, "params": parameters, "id": 1, "jsonrpc": '2.0'}
result = None
n = 1
while n < self.num_retries:
response = self.get_response(payload)
if response:
if response.status_code == 200:
try:
res = response.json()
if 'error' in res:
if self.report:
#pprint(res["error"]["message"])
print('ERROR IN RES', res["error"]["message"])
else:
result = res["result"]
break
except:
print('ERROR JSON', response)
#elif response.status_code == 503:
# proxies = self.proxies.new_http() if self.PROXY else None # next proxy
# print('new proxy', proxies)
else:
if self.report:
print(n, 'ERROR status_code', response.status_code, response.text)
else:
print('not connection to node', self.url)
print('response', response)
n += 1
self.url = next(self.nodes) # next node
sleep(n * 2)
print("Trying to connect to node %s" % self.url, 'for method', name)
return result
#----- main -----
if __name__ == '__main__':
pass | [
[
[
46,
53
],
[
329,
336
]
],
[
[
84,
95
],
[
1248,
1259
]
],
[
[
128,
143
]
],
[
[
152,
156
],
[
1397,
1401
]
],
[
[
174,
179
],
[
1707,
1712
],
[
2436,
2441
],
[
3920,
3925
]
],
[
[
181,
185
],
[
1660,
1664
],
[
1956,
1960
]
],
[
[
205,
211
]
],
[
[
234,
239
],
[
1063,
1068
]
],
[
[
262,
267
],
[
1089,
1094
],
[
1304,
1309
]
],
[
[
269,
278
],
[
929,
938
]
],
[
[
312,
316
],
[
373,
377
]
],
[
[
363,
372
]
]
] |
import json
import pathlib
import sys
import time
from typing import Sequence
import bentoml
from bentoml.adapters import (
DataframeInput,
FileInput,
ImageInput,
JsonInput,
MultiImageInput,
)
from bentoml.frameworks.sklearn import SklearnModelArtifact
from bentoml.handlers import DataframeHandler # deprecated
from bentoml.service.artifacts.pickle import PickleArtifact
from bentoml.types import InferenceResult, InferenceTask
@bentoml.env(infer_pip_packages=True)
@bentoml.artifacts([PickleArtifact("model"), SklearnModelArtifact('sk_model')])
class ExampleService(bentoml.BentoService):
"""
Example BentoService class made for testing purpose
"""
@bentoml.api(
input=DataframeInput(dtype={"col1": "int"}),
mb_max_latency=1000,
mb_max_batch_size=2000,
batch=True,
)
def predict_dataframe(self, df):
return self.artifacts.model.predict_dataframe(df)
@bentoml.api(DataframeHandler, dtype={"col1": "int"}, batch=True) # deprecated
def predict_dataframe_v1(self, df):
return self.artifacts.model.predict_dataframe(df)
@bentoml.api(
input=MultiImageInput(input_names=('original', 'compared')), batch=True
)
def predict_multi_images(self, originals, compareds):
return self.artifacts.model.predict_multi_images(originals, compareds)
@bentoml.api(input=ImageInput(), batch=True)
def predict_image(self, images):
return self.artifacts.model.predict_image(images)
@bentoml.api(
input=JsonInput(), mb_max_latency=1000, mb_max_batch_size=2000, batch=True,
)
def predict_with_sklearn(self, jsons):
return self.artifacts.sk_model.predict(jsons)
@bentoml.api(input=FileInput(), batch=True)
def predict_file(self, files):
return self.artifacts.model.predict_file(files)
@bentoml.api(input=JsonInput(), batch=True)
def predict_json(self, input_datas):
return self.artifacts.model.predict_json(input_datas)
@bentoml.api(input=JsonInput(), batch=True)
def predict_strict_json(self, input_datas, tasks: Sequence[InferenceTask] = None):
filtered_jsons = []
for j, t in zip(input_datas, tasks):
if t.http_headers.content_type != "application/json":
t.discard(http_status=400, err_msg="application/json only")
else:
filtered_jsons.append(j)
return self.artifacts.model.predict_json(filtered_jsons)
@bentoml.api(input=JsonInput(), batch=True)
def predict_direct_json(self, input_datas, tasks: Sequence[InferenceTask] = None):
filtered_jsons = []
for j, t in zip(input_datas, tasks):
if t.http_headers.content_type != "application/json":
t.discard(http_status=400, err_msg="application/json only")
else:
filtered_jsons.append(j)
rets = self.artifacts.model.predict_json(filtered_jsons)
return [
InferenceResult(http_status=200, data=json.dumps(result)) for result in rets
]
@bentoml.api(input=JsonInput(), mb_max_latency=10000 * 1000, batch=True)
def echo_with_delay(self, input_datas):
data = input_datas[0]
time.sleep(data['b'] + data['a'] * len(input_datas))
return input_datas
if __name__ == "__main__":
artifacts_path = sys.argv[1]
bento_dist_path = sys.argv[2]
service = ExampleService()
service.artifacts.load_all(artifacts_path)
pathlib.Path(bento_dist_path).mkdir(parents=True, exist_ok=True)
service.save_to_dir(bento_dist_path)
| [
[
[
7,
11
],
[
3026,
3030
]
],
[
[
19,
26
],
[
3494,
3501
]
],
[
[
34,
37
],
[
3365,
3368
],
[
3399,
3402
]
],
[
[
45,
49
],
[
3235,
3239
]
],
[
[
69,
77
],
[
2112,
2120
],
[
2587,
2595
]
],
[
[
86,
93
],
[
592,
599
],
[
454,
461
],
[
492,
499
],
[
693,
700
],
[
947,
954
],
[
1130,
1137
],
[
1372,
1379
],
[
1517,
1524
],
[
1723,
1730
],
[
1863,
1870
],
[
2015,
2022
],
[
2490,
2497
],
[
3081,
3088
]
],
[
[
129,
143
],
[
720,
734
]
],
[
[
149,
158
],
[
1741,
1750
]
],
[
[
164,
174
],
[
1390,
1400
]
],
[
[
180,
189
],
[
1544,
1553
],
[
1881,
1890
],
[
2033,
2042
],
[
2508,
2517
],
[
3099,
3108
]
],
[
[
195,
210
],
[
1157,
1172
]
],
[
[
253,
273
],
[
536,
556
]
],
[
[
303,
319
],
[
959,
975
]
],
[
[
379,
393
],
[
511,
525
]
],
[
[
420,
435
],
[
2988,
3003
]
],
[
[
437,
450
],
[
2121,
2134
],
[
2596,
2609
]
],
[
[
577,
591
],
[
3425,
3439
]
],
[
[
3348,
3362
],
[
3473,
3487
]
],
[
[
3381,
3396
],
[
3507,
3522
],
[
3583,
3598
]
],
[
[
3415,
3422
],
[
3446,
3453
],
[
3563,
3570
]
]
] |
from PIL import Image
images = []
for i in range(9):
images.append(Image.open(f"../examples/lf/results/render_0{i}_{i}.0.png"))
images[0].save("../examples/lf/out.gif", save_all=True, append_images=images[1:], duration=100, loop=0) | [
[
[
16,
21
],
[
72,
77
]
],
[
[
23,
29
],
[
58,
64
],
[
133,
139
],
[
203,
209
]
],
[
[
39,
40
],
[
117,
118
],
[
121,
122
]
]
] |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from model_utils import FieldTracker
from waldur_core.core import models as core_models
from waldur_core.structure import models as structure_models
class VirtualMachineMixin(models.Model):
class Meta:
abstract = True
guest_os = models.CharField(
max_length=50,
help_text=_(
'Defines the valid guest operating system '
'types used for configuring a virtual machine'
),
)
cores = models.PositiveSmallIntegerField(
default=0, help_text=_('Number of cores in a VM')
)
cores_per_socket = models.PositiveSmallIntegerField(
default=1, help_text=_('Number of cores per socket in a VM')
)
ram = models.PositiveIntegerField(
default=0, help_text=_('Memory size in MiB'), verbose_name=_('RAM')
)
disk = models.PositiveIntegerField(default=0, help_text=_('Disk size in MiB'))
class VirtualMachine(
VirtualMachineMixin, core_models.RuntimeStateMixin, structure_models.BaseResource
):
class RuntimeStates:
POWERED_OFF = 'POWERED_OFF'
POWERED_ON = 'POWERED_ON'
SUSPENDED = 'SUSPENDED'
CHOICES = (
(POWERED_OFF, 'Powered off'),
(POWERED_ON, 'Powered on'),
(SUSPENDED, 'Suspended'),
)
class GuestPowerStates:
RUNNING = 'RUNNING'
SHUTTING_DOWN = 'SHUTTING_DOWN'
RESETTING = 'RESETTING'
STANDBY = 'STANDBY'
NOT_RUNNING = 'NOT_RUNNING'
UNAVAILABLE = 'UNAVAILABLE'
CHOICES = (
(RUNNING, 'Running'),
(SHUTTING_DOWN, 'Shutting down'),
(RESETTING, 'Resetting'),
(STANDBY, 'Standby'),
(NOT_RUNNING, 'Not running'),
(UNAVAILABLE, 'Unavailable'),
)
class ToolsStates:
STARTING = 'STARTING'
RUNNING = 'RUNNING'
NOT_RUNNING = 'NOT_RUNNING'
CHOICES = (
(STARTING, 'Starting'),
(RUNNING, 'Running'),
(NOT_RUNNING, 'Not running'),
)
template = models.ForeignKey('Template', null=True, on_delete=models.SET_NULL)
cluster = models.ForeignKey('Cluster', null=True, on_delete=models.SET_NULL)
datastore = models.ForeignKey('Datastore', null=True, on_delete=models.SET_NULL)
folder = models.ForeignKey('Folder', null=True, on_delete=models.SET_NULL)
networks = models.ManyToManyField('Network', blank=True)
guest_power_enabled = models.BooleanField(
default=False,
help_text='Flag indicating if the virtual machine is ready to process soft power operations.',
)
guest_power_state = models.CharField(
'The power state of the guest operating system.',
max_length=150,
blank=True,
choices=GuestPowerStates.CHOICES,
)
tools_installed = models.BooleanField(default=False)
tools_state = models.CharField(
'Current running status of VMware Tools running in the guest operating system.',
max_length=50,
blank=True,
choices=ToolsStates.CHOICES,
)
tracker = FieldTracker()
@classmethod
def get_backend_fields(cls):
return super(VirtualMachine, cls).get_backend_fields() + (
'runtime_state',
'cores',
'cores_per_socket',
'ram',
'disk',
'tools_installed',
'tools_state',
)
@classmethod
def get_url_name(cls):
return 'vmware-virtual-machine'
@property
def total_disk(self):
return self.disks.aggregate(models.Sum('size'))['size__sum'] or 0
def __str__(self):
return self.name
class Port(core_models.RuntimeStateMixin, structure_models.BaseResource):
vm = models.ForeignKey(on_delete=models.CASCADE, to=VirtualMachine)
network = models.ForeignKey(on_delete=models.CASCADE, to='Network')
mac_address = models.CharField(
max_length=32, blank=True, verbose_name=_('MAC address')
)
@classmethod
def get_backend_fields(cls):
return super(Port, cls).get_backend_fields() + ('name', 'mac_address')
@classmethod
def get_url_name(cls):
return 'vmware-port'
def __str__(self):
return self.name
class Disk(structure_models.BaseResource):
size = models.PositiveIntegerField(help_text=_('Size in MiB'))
vm = models.ForeignKey(
on_delete=models.CASCADE, to=VirtualMachine, related_name='disks'
)
@classmethod
def get_url_name(cls):
return 'vmware-disk'
def __str__(self):
return self.name
@classmethod
def get_backend_fields(cls):
return super(Disk, cls).get_backend_fields() + ('name', 'size')
class Template(
VirtualMachineMixin, core_models.DescribableMixin, structure_models.ServiceProperty
):
created = models.DateTimeField()
modified = models.DateTimeField()
@classmethod
def get_url_name(cls):
return 'vmware-template'
def __str__(self):
return self.name
class Cluster(structure_models.ServiceProperty):
@classmethod
def get_url_name(cls):
return 'vmware-cluster'
def __str__(self):
return '%s / %s' % (self.settings, self.name)
class CustomerCluster(models.Model):
customer = models.ForeignKey(structure_models.Customer, on_delete=models.CASCADE)
cluster = models.ForeignKey('Cluster', on_delete=models.CASCADE)
def __str__(self):
return '%s / %s' % (self.customer, self.cluster)
class Meta:
unique_together = ('customer', 'cluster')
class Network(structure_models.ServiceProperty):
type = models.CharField(max_length=255)
@classmethod
def get_url_name(cls):
return 'vmware-network'
def __str__(self):
return '%s / %s' % (self.settings, self.name)
class CustomerNetwork(models.Model):
# This model allows to specify allowed networks for VM provision
customer = models.ForeignKey(structure_models.Customer, on_delete=models.CASCADE)
network = models.ForeignKey('Network', on_delete=models.CASCADE)
def __str__(self):
return '%s / %s' % (self.customer, self.network)
class Meta:
unique_together = ('customer', 'network')
class CustomerNetworkPair(models.Model):
# This model allows to specify allowed networks for existing VM NIC provision
customer = models.ForeignKey(structure_models.Customer, on_delete=models.CASCADE)
network = models.ForeignKey('Network', on_delete=models.CASCADE)
def __str__(self):
return '%s / %s' % (self.customer, self.network)
class Meta:
unique_together = ('customer', 'network')
class Datastore(structure_models.ServiceProperty):
type = models.CharField(max_length=255)
capacity = models.PositiveIntegerField(
help_text="Capacity, in MB.", null=True, blank=True
)
free_space = models.PositiveIntegerField(
help_text="Available space, in MB.", null=True, blank=True
)
@classmethod
def get_url_name(cls):
return 'vmware-datastore'
def __str__(self):
return '%s / %s' % (self.settings, self.name)
class CustomerDatastore(models.Model):
customer = models.ForeignKey(structure_models.Customer, on_delete=models.CASCADE)
datastore = models.ForeignKey('Datastore', on_delete=models.CASCADE)
def __str__(self):
return '%s / %s' % (self.customer, self.datastore)
class Meta:
unique_together = ('customer', 'datastore')
class Folder(structure_models.ServiceProperty):
def __str__(self):
return '%s / %s' % (self.settings, self.name)
@classmethod
def get_url_name(cls):
return 'vmware-folder'
class CustomerFolder(models.Model):
customer = models.ForeignKey(structure_models.Customer, on_delete=models.CASCADE)
folder = models.ForeignKey('Folder', on_delete=models.CASCADE)
def __str__(self):
return '%s / %s' % (self.customer, self.folder)
class Meta:
unique_together = ('customer', 'folder')
| [
[
[
22,
28
],
[
263,
269
],
[
334,
340
],
[
540,
546
],
[
661,
667
],
[
780,
786
],
[
902,
908
],
[
2138,
2144
],
[
2189,
2195
],
[
2220,
2226
],
[
2270,
2276
],
[
2303,
2309
],
[
2355,
2361
],
[
2385,
2391
],
[
2434,
2440
],
[
2466,
2472
],
[
2538,
2544
],
[
2715,
2721
],
[
2905,
2911
],
[
2958,
2964
],
[
3821,
3827
],
[
3849,
3855
],
[
3898,
3904
],
[
3926,
3932
],
[
3974,
3980
],
[
4372,
4378
],
[
4437,
4443
],
[
4474,
4480
],
[
4905,
4911
],
[
4943,
4949
],
[
5322,
5328
],
[
5352,
5358
],
[
5407,
5413
],
[
5437,
5443
],
[
5476,
5482
],
[
5702,
5708
],
[
5914,
5920
],
[
6013,
6019
],
[
6068,
6074
],
[
6098,
6104
],
[
6137,
6143
],
[
6329,
6335
],
[
6441,
6447
],
[
6496,
6502
],
[
6526,
6532
],
[
6565,
6571
],
[
6793,
6799
],
[
6841,
6847
],
[
6953,
6959
],
[
7238,
7244
],
[
7268,
7274
],
[
7323,
7329
],
[
7355,
7361
],
[
7396,
7402
],
[
7790,
7796
],
[
7820,
7826
],
[
7875,
7881
],
[
7904,
7910
],
[
7942,
7948
],
[
3649,
3655
]
],
[
[
66,
84
],
[
393,
394
],
[
603,
604
],
[
724,
725
],
[
838,
839
],
[
876,
877
],
[
951,
952
],
[
4040,
4041
],
[
4410,
4411
]
],
[
[
109,
121
],
[
3165,
3177
]
],
[
[
152,
173
],
[
1023,
1034
],
[
3749,
3760
],
[
4825,
4836
]
],
[
[
208,
234
],
[
1054,
1070
],
[
3780,
3796
],
[
4329,
4345
],
[
4855,
4871
],
[
5109,
5125
],
[
5370,
5386
],
[
5656,
5672
],
[
6031,
6047
],
[
6459,
6475
],
[
6747,
6763
],
[
7286,
7302
],
[
7579,
7595
],
[
7838,
7854
]
],
[
[
243,
262
],
[
1002,
1021
],
[
4804,
4823
]
],
[
[
982,
996
],
[
3868,
3882
],
[
4493,
4507
],
[
3252,
3266
]
],
[
[
3744,
3748
],
[
4135,
4139
]
],
[
[
4324,
4328
],
[
4731,
4735
]
],
[
[
4790,
4798
]
],
[
[
5101,
5108
]
],
[
[
5306,
5321
]
],
[
[
5648,
5655
]
],
[
[
5898,
5913
]
],
[
[
6309,
6328
]
],
[
[
6737,
6746
]
],
[
[
7220,
7237
]
],
[
[
7572,
7578
]
],
[
[
7775,
7789
]
]
] |
# -*- coding: utf-8 -*-
# Meta
__version__ = "0.0.4"
__author__ = 'Rhys Elsmore'
__email__ = 'me@rhys.io'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2014 Rhys Elsmore'
# Module Namespace
from .core import MetricsLogger, GroupMetricsLogger
from .api import timer, increment, sample, measure, unique, group
| [
[
[
33,
44
]
],
[
[
55,
65
]
],
[
[
83,
92
]
],
[
[
108,
119
]
],
[
[
135,
148
]
],
[
[
220,
233
]
],
[
[
235,
253
]
],
[
[
271,
276
]
],
[
[
278,
287
]
],
[
[
289,
295
]
],
[
[
297,
304
]
],
[
[
306,
312
]
],
[
[
314,
319
]
]
] |
## Copyright 2019 The Rules Protobuf Authors. All rights reserved.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
load("//proto:repositories.bzl", "rules_proto_dependencies")
load("//proto:repositories.bzl", "rules_proto_toolchains")
_DEPRECATED_REPOSITORY_RULE_MESSAGE = " ".join([
"{old_rule}() is deprecated.",
"Please import @build_bazel_rules_proto//proto:repositories.bzl and use {new_rule}().",
"See https://github.com/Yannic/rules_proto/issues/6",
])
def proto_import_dependencies():
print(_DEPRECATED_REPOSITORY_RULE_MESSAGE.format(
old_rule = "proto_import_dependencies",
new_rule = "rules_proto_dependencies",
))
rules_proto_dependencies()
def proto_register_toolchains():
print(_DEPRECATED_REPOSITORY_RULE_MESSAGE.format(
old_rule = "proto_register_toolchains",
new_rule = "rules_proto_toolchains",
))
rules_proto_toolchains()
| [
[
[
746,
781
],
[
1027,
1062
],
[
1249,
1284
]
],
[
[
988,
1013
]
],
[
[
1210,
1235
]
]
] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# =============================================================================
from __future__ import division
import math
import unittest
import random
import numpy as np
from singa import tensor
from singa import singa_wrap as singa_api
from singa import autograd
from cuda_helper import gpu_dev, cpu_dev
class TestTensorMethods(unittest.TestCase):
def setUp(self):
self.shape = (2, 3)
self.t = tensor.Tensor(self.shape)
self.s = tensor.Tensor(self.shape)
self.t.set_value(0)
self.s.set_value(0)
def test_tensor_fields(self):
t = self.t
shape = self.shape
self.assertTupleEqual(t.shape, shape)
self.assertEqual(t.shape[0], shape[0])
self.assertEqual(t.shape[1], shape[1])
self.assertEqual(tensor.product(shape), 2 * 3)
self.assertEqual(t.ndim(), 2)
self.assertEqual(t.size(), 2 * 3)
self.assertEqual(t.memsize(), 2 * 3 * tensor.sizeof(tensor.float32))
self.assertFalse(t.is_transpose())
def test_unary_operators(self):
t = self.t
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 0.0)
t += 1.23
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23)
t -= 0.23
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23 - 0.23)
t *= 2.5
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], (1.23 - 0.23) * 2.5)
t /= 2
self.assertAlmostEqual(
tensor.to_numpy(t)[0, 0], (1.23 - 0.23) * 2.5 / 2)
def test_binary_operators(self):
t = self.t
t += 3.2
s = self.s
s += 2.1
a = t + s
self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], 3.2 + 2.1, 5)
a = t - s
self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], 3.2 - 2.1, 5)
a = t * s
self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], 3.2 * 2.1, 5)
''' not implemented yet
a = t / s
self.assertAlmostEqual(tensor.to_numpy(a)[0,0], 3.2/2.1, 5)
'''
def test_comparison_operators(self):
t = self.t
t += 3.45
a = t < 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
a = t <= 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
a = t > 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
a = t >= 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
a = t == 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
a = tensor.lt(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
a = tensor.le(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
a = tensor.gt(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
a = tensor.ge(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
a = tensor.eq(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
def test_tensor_copy(self):
t = tensor.Tensor((2, 3))
t += 1.23
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23)
tc = t.copy()
tdc = t.deepcopy()
self.assertAlmostEqual(tensor.to_numpy(tc)[0, 0], 1.23)
self.assertAlmostEqual(tensor.to_numpy(tdc)[0, 0], 1.23)
t += 1.23
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 2.46)
self.assertAlmostEqual(tensor.to_numpy(tc)[0, 0], 2.46)
self.assertAlmostEqual(tensor.to_numpy(tdc)[0, 0], 1.23)
def test_copy_data(self):
t = self.t
t += 1.23
s = self.s
s += 5.43
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23)
tensor.copy_data_to_from(t, s, 2)
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 5.43, 5)
self.assertAlmostEqual(tensor.to_numpy(t)[0, 1], 5.43, 5)
self.assertAlmostEqual(tensor.to_numpy(t)[0, 2], 1.23)
def test_global_method(self):
t = self.t
t += 12.34
a = tensor.log(t)
self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], math.log(12.34))
def test_random(self):
x = tensor.Tensor((1000,))
x.gaussian(1, 0.01)
self.assertAlmostEqual(tensor.average(x), 1, 3)
def test_radd(self):
x = tensor.Tensor((3,))
x.set_value(1)
y = 1 + x
self.assertEqual(tensor.average(y), 2.)
def test_rsub(self):
x = tensor.Tensor((3,))
x.set_value(1)
y = 1 - x
self.assertEqual(tensor.average(y), 0.)
def test_rmul(self):
x = tensor.Tensor((3,))
x.set_value(1)
y = 2 * x
self.assertEqual(tensor.average(y), 2.)
def test_rdiv(self):
x = tensor.Tensor((3,))
x.set_value(1)
y = 2 / x
self.assertEqual(tensor.average(y), 2.)
def matmul_high_dim_helper(self, dev):
configs = [
[(1, 12, 7, 64), (1, 12, 64, 7)],
[(1, 7, 768), (768, 768)],
]
print()
for config in configs:
X = np.random.random(config[0]).astype(np.float32)
x = tensor.from_numpy(X)
x.to_device(dev)
W = np.random.random(config[1]).astype(np.float32)
w = tensor.from_numpy(W)
w.to_device(dev)
y_t = np.matmul(X, W)
y = autograd.matmul(x, w)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), y_t, 3)
def test_matmul_high_dim_cpu(self):
self.matmul_high_dim_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_matmul_high_dim_gpu(self):
self.matmul_high_dim_helper(gpu_dev)
def test_tensor_inplace_api(self):
""" tensor inplace methods alter internal state and also return self
"""
x = tensor.Tensor((3,))
y = x.set_value(1)
self.assertTrue(y is x)
x = tensor.Tensor((3,))
y = x.uniform(1, 2)
self.assertTrue(y is x)
x = tensor.Tensor((3,))
y = x.bernoulli(1)
self.assertTrue(y is x)
x = tensor.Tensor((3,))
y = x.gaussian(1, 2)
self.assertTrue(y is x)
def test_numpy_convert(self):
a = np.asarray([[1, 0, 0], [0, 1, 0]], dtype=np.int)
t = tensor.from_numpy(a)
b = tensor.to_numpy(t)
self.assertEqual(np.sum(a - b), 0)
a = np.asarray([[1, 0, 0], [0, 1, 0]], dtype=np.float32)
t = tensor.from_numpy(a)
b = tensor.to_numpy(t)
self.assertEqual(np.sum(a - b), 0.)
def test_transpose(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
A1 = np.transpose(a)
tA1 = tensor.transpose(ta)
TA1 = tensor.to_numpy(tA1)
A2 = np.transpose(a, [0, 2, 1])
tA2 = tensor.transpose(ta, [0, 2, 1])
TA2 = tensor.to_numpy(tA2)
np.testing.assert_array_almost_equal(TA1, A1)
np.testing.assert_array_almost_equal(TA2, A2)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_gpu_6d_transpose(self,dev=gpu_dev):
s0 = (2,3,4,5,6,7)
axes1=[5,4,3,2,1,0]
s1 = (2,7,6,5,4,3)
s2 = (2,4,3,5,7,6)
a = np.random.random(s1)
ta = tensor.from_numpy(a)
ta.to_device(dev)
ta = tensor.reshape(ta,s1)
ta = tensor.transpose(ta,axes1)
ta = tensor.reshape(ta,s2)
a = np.reshape(a,s1)
a = np.transpose(a,axes1)
a = np.reshape(a,s2)
np.testing.assert_array_almost_equal(tensor.to_numpy(ta), a)
def test_einsum(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
res1 = np.einsum('kij,kij->kij', a, a)
tres1 = tensor.einsum('kij,kij->kij', ta, ta)
Tres1 = tensor.to_numpy(tres1)
res2 = np.einsum('kij,kih->kjh', a, a)
tres2 = tensor.einsum('kij,kih->kjh', ta, ta)
Tres2 = tensor.to_numpy(tres2)
self.assertAlmostEqual(np.sum(Tres1 - res1), 0., places=3)
self.assertAlmostEqual(np.sum(Tres2 - res2), 0., places=3)
def test_repeat(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
ta_repeat1 = tensor.repeat(ta, 2, axis=None)
a_repeat1 = np.repeat(a, 2, axis=None)
Ta_repeat1 = tensor.to_numpy(ta_repeat1)
ta_repeat2 = tensor.repeat(ta, 4, axis=1)
a_repeat2 = np.repeat(a, 4, axis=1)
Ta_repeat2 = tensor.to_numpy(ta_repeat2)
self.assertAlmostEqual(np.sum(Ta_repeat1 - a_repeat1), 0., places=3)
self.assertAlmostEqual(np.sum(Ta_repeat2 - a_repeat2), 0., places=3)
def test_sum(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
a_sum0 = np.sum(a)
ta_sum0 = tensor.sum(ta)
Ta_sum0 = tensor.to_numpy(ta_sum0)
a_sum1 = np.sum(a, axis=1)
ta_sum1 = tensor.sum(ta, axis=1)
Ta_sum1 = tensor.to_numpy(ta_sum1)
a_sum2 = np.sum(a, axis=2)
ta_sum2 = tensor.sum(ta, axis=2)
Ta_sum2 = tensor.to_numpy(ta_sum2)
self.assertAlmostEqual(np.sum(a_sum0 - Ta_sum0), 0., places=3)
self.assertAlmostEqual(np.sum(a_sum1 - Ta_sum1), 0., places=3)
self.assertAlmostEqual(np.sum(a_sum2 - Ta_sum2), 0., places=3)
def test_tensordot(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
res1 = np.tensordot(a, a, axes=1)
tres1 = tensor.tensordot(ta, ta, axes=1)
Tres1 = tensor.to_numpy(tres1)
self.assertAlmostEqual(np.sum(Tres1 - res1), 0., places=3)
np.testing.assert_array_almost_equal(Tres1, res1)
res2 = np.tensordot(a, a, axes=([0, 1], [2, 1]))
tres2 = tensor.tensordot(ta, ta, axes=([0, 1], [2, 1]))
np.testing.assert_array_almost_equal(tensor.to_numpy(tres2), res2)
def test_reshape(self):
a = np.array([[[1.1, 1.1, 1.4], [1.1, 1.1, 1.1]],
[[1.1, 1.1, 1.3], [1.6, 1.1, 1.2]]])
ta = tensor.from_numpy(a)
tb = tensor.reshape(ta, [2, 6])
self.assertAlmostEqual(tb.shape[0], 2., places=3)
self.assertAlmostEqual(tb.shape[1], 6., places=3)
np.testing.assert_array_almost_equal(tensor.to_numpy(tb),
a.reshape((2, 6)))
def test_transpose_then_reshape(self):
a = np.array([[[1.1, 1.1], [1.1, 1.1], [1.4, 1.3]],
[[1.1, 1.6], [1.1, 1.1], [1.1, 1.2]]])
TRANSPOSE_AXES = (2, 0, 1)
RESHAPE_DIMS = (2, 6)
ta = tensor.from_numpy(a)
ta = ta.transpose(TRANSPOSE_AXES)
ta = ta.reshape(RESHAPE_DIMS)
np.testing.assert_array_almost_equal(
tensor.to_numpy(ta),
np.reshape(a.transpose(TRANSPOSE_AXES), RESHAPE_DIMS))
def _concatenate_helper(self, dev):
np1 = np.random.random([5, 6, 7, 8]).astype(np.float32)
np2 = np.random.random([5, 6, 7, 1]).astype(np.float32)
np3 = np.concatenate((np1, np2), axis=3)
t1 = tensor.Tensor(device=dev, data=np1)
t2 = tensor.Tensor(device=dev, data=np2)
t3 = tensor.concatenate((t1, t2), 3)
np.testing.assert_array_almost_equal(tensor.to_numpy(t3), np3)
def test_concatenate_cpu(self):
self._concatenate_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_concatenate_gpu(self):
self._concatenate_helper(gpu_dev)
def _subscription_helper(self, dev):
np1 = np.random.random((5, 5, 5, 5)).astype(np.float32)
sg_tensor = tensor.Tensor(device=dev, data=np1)
sg_tensor_ret = sg_tensor[1:3, :, 1:, :-1]
np.testing.assert_array_almost_equal((tensor.to_numpy(sg_tensor_ret)),
np1[1:3, :, 1:, :-1])
def test_subscription_cpu(self):
self._subscription_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_subscription_gpu(self):
self._subscription_helper(gpu_dev)
def _ceil_helper(self, dev):
np1 = np.random.random([5, 6, 7, 8]).astype(np.float32)
np1 = np1 * 10
np2 = np.ceil(np1)
t1 = tensor.Tensor(device=dev, data=np1)
t2 = tensor.ceil(t1)
np.testing.assert_array_almost_equal(tensor.to_numpy(t2), np2)
def test_ceil_cpu(self):
self._ceil_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_ceil_gpu(self):
self._ceil_helper(gpu_dev)
def _astype_helper(self, dev):
shape1 = [2, 3]
shape2 = [3, 2]
np_flt = np.random.random(shape1).astype(np.float32)
np_flt = np_flt * 10 - 5
np_int = np_flt.astype(np.int32)
np_flt2 = np_int.astype(np.float32)
t2 = tensor.Tensor(device=dev, data=np_flt)
t2 = t2.as_type('int')
np.testing.assert_array_almost_equal(tensor.to_numpy(t2), np_int)
t1 = t2.reshape(shape2)
np.testing.assert_array_almost_equal(tensor.to_numpy(t1),
np_int.reshape(shape2))
t1 = t1.as_type('float')
np.testing.assert_array_almost_equal(tensor.to_numpy(t1),
np_flt2.reshape(shape2))
def test_astype_cpu(self):
self._astype_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_astype_gpu(self):
self._astype_helper(gpu_dev)
def _3d_matmul_helper(self, dev):
np_x1 = np.random.randn(2, 3, 4).astype(np.float32)
np_x2 = np.random.randn(2, 4, 3).astype(np.float32)
x1 = tensor.from_numpy(np_x1)
x1.to_device(dev)
x2 = tensor.from_numpy(np_x2)
x2.to_device(dev)
y = autograd.matmul(x1, x2)
np_y = np.matmul(np_x1, np_x2)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), np_y)
np_x1 = np.random.randn(2, 3, 4).astype(np.float32)
np_x2 = np.random.randn(2, 4, 5).astype(np.float32)
x1 = tensor.from_numpy(np_x1)
x1.to_device(dev)
x2 = tensor.from_numpy(np_x2)
x2.to_device(dev)
y = autograd.matmul(x1, x2)
np_y = np.matmul(np_x1, np_x2)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), np_y)
def test_3d_matmul_cpu(self):
self._3d_matmul_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_3d_matmul_gpu(self):
self._3d_matmul_helper(gpu_dev)
def _4d_matmul_helper(self, dev):
np_x1 = np.random.randn(2, 12, 256, 64).astype(np.float32)
np_x2 = np.random.randn(2, 12, 64, 256).astype(np.float32)
x1 = tensor.from_numpy(np_x1)
x1.to_device(dev)
x2 = tensor.from_numpy(np_x2)
x2.to_device(dev)
y = autograd.matmul(x1, x2)
np_y = np.matmul(np_x1, np_x2)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), np_y)
np_x1 = np.random.randn(2, 12, 256, 64).astype(np.float32)
np_x2 = np.random.randn(2, 12, 64, 1024).astype(np.float32)
x1 = tensor.from_numpy(np_x1)
x1.to_device(dev)
x2 = tensor.from_numpy(np_x2)
x2.to_device(dev)
y = autograd.matmul(x1, x2)
np_y = np.matmul(np_x1, np_x2)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), np_y)
def test_4d_matmul_cpu(self):
self._4d_matmul_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_4d_matmul_gpu(self):
self._4d_matmul_helper(gpu_dev)
def _matmul_transpose_helper(self, dev):
X = np.random.random((1, 256, 12, 64)).astype(np.float32)
x = tensor.from_numpy(X)
x.to_device(dev)
W = np.random.random((1, 256, 12, 64)).astype(np.float32)
w = tensor.from_numpy(W)
w.to_device(dev)
X = np.transpose(X, (0, 2, 1, 3))
W = np.transpose(W, (0, 2, 1, 3))
W = np.transpose(W, (0, 1, 3, 2))
Y = np.matmul(X, W)
x = autograd.transpose(x, (0, 2, 1, 3))
w = autograd.transpose(w, (0, 2, 1, 3))
w = autograd.transpose(w, (0, 1, 3, 2))
y = autograd.matmul(x, w)
np.testing.assert_array_almost_equal(tensor.to_numpy(x), X)
np.testing.assert_array_almost_equal(tensor.to_numpy(w), W)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), Y)
def test_matmul_transpose_cpu(self):
self._matmul_transpose_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_matmul_transpose_gpu(self):
self._matmul_transpose_helper(gpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_gaussian_gpu(self, dev=gpu_dev):
x = tensor.Tensor((3, 5, 3, 5), device=dev)
x.gaussian(0, 1)
x = tensor.Tensor((4, 5, 3, 2), device=dev)
x.gaussian(0, 1)
def _kfloat32_int(self, dev=gpu_dev):
np.random.seed(0)
x_val = np.random.random((2, 3)).astype(np.float32) * 10
x = tensor.from_numpy(x_val)
x.to_device(dev)
scalar = np.random.random((1,))[0] * 100
y = x + scalar
self.assertEqual(y.dtype, tensor.float32)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), x_val + scalar)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_kfloat32_int_gpu(self):
self._kfloat32_int(gpu_dev)
def test_kfloat32_int_cpu(self):
self._kfloat32_int(cpu_dev)
def _kint_float(self, dev=gpu_dev):
np.random.seed(0)
x_val = np.random.randint(0, 10, (2, 3))
x = tensor.from_numpy(x_val)
x.to_device(dev)
scalar = random.random() * 100
y = x + scalar
self.assertEqual(y.dtype, tensor.float32)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), x_val + scalar, 5)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_kint_float_gpu(self):
self._kint_float(gpu_dev)
def test_kint_float_cpu(self):
self._kint_float(cpu_dev)
def _kint_kint(self, dev=gpu_dev):
a_np = np.array([[[17, 4, 9, 22, 18], [-9, 9, -1, -1, 4],
[1, 14, 7, 1, 4], [3, 14, -2, 3, -8]],
[[-25, 6, 8, -7, 22], [-14, 0, -1, 15, 14],
[1, 3, -8, -19, -3], [1, 12, 12, -3, -3]],
[[-10, -14, -17, 19, -5], [-4, -12, 7, -16, -2],
[-8, 3, -5, -11, 0], [4, 0, 3, -6, -3]]],
dtype=np.int32)
b_np = np.array([[[-6, -3, -8, -17, 1], [-4, -16, 4, -9, 0],
[7, 1, 11, -12, 4], [-6, -8, -5, -3, 0]],
[[-11, 9, 4, -15, 14], [18, 11, -1, -10, 10],
[-4, 12, 2, 9, 3], [7, 0, 17, 1, 4]],
[[18, -13, -12, 9, -11], [19, -4, -7, 19, 14],
[18, 9, -8, 19, -2], [8, 9, -1, 6, 9]]],
dtype=np.int32)
ta = tensor.from_numpy(a_np)
tb = tensor.from_numpy(b_np)
ta.to_device(dev)
tb.to_device(dev)
y = ta - tb
np.testing.assert_array_almost_equal(tensor.to_numpy(y), a_np - b_np)
def test_kint_kint_cpu(self, dev=cpu_dev):
self._kint_kint(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_kint_kint_gpu(self, dev=gpu_dev):
self._kint_kint(gpu_dev)
def _kint_kint_bc(self, dev=gpu_dev):
a_np = np.array([[[17, 4, 9, 22, 18], [-9, 9, -1, -1, 4],
[1, 14, 7, 1, 4], [3, 14, -2, 3, -8]],
[[-25, 6, 8, -7, 22], [-14, 0, -1, 15, 14],
[1, 3, -8, -19, -3], [1, 12, 12, -3, -3]],
[[-10, -14, -17, 19, -5], [-4, -12, 7, -16, -2],
[-8, 3, -5, -11, 0], [4, 0, 3, -6, -3]]],
dtype=np.int32)
b_np = np.array([[-6, -3, -8, -17, 1], [-4, -16, 4, -9, 0],
[7, 1, 11, -12, 4], [-6, -8, -5, -3, 0]],
dtype=np.int32)
ta = tensor.from_numpy(a_np)
tb = tensor.from_numpy(b_np)
ta.to_device(dev)
tb.to_device(dev)
y = ta - tb
np.testing.assert_array_almost_equal(tensor.to_numpy(y), a_np - b_np)
def test_kint_kint_bc_cpu(self, dev=cpu_dev):
self._kint_kint_bc(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_kint_kint_bc_gpu(self, dev=gpu_dev):
self._kint_kint_bc(gpu_dev)
if __name__ == '__main__':
unittest.main()
| [
[
[
888,
896
]
],
[
[
905,
909
],
[
4788,
4792
]
],
[
[
917,
925
],
[
1122,
1130
],
[
6247,
6255
],
[
7805,
7813
],
[
12528,
12536
],
[
13115,
13123
],
[
13630,
13638
],
[
14593,
14601
],
[
15633,
15641
],
[
16708,
16716
],
[
17775,
17783
],
[
17932,
17940
],
[
18600,
18608
],
[
19190,
19198
],
[
20645,
20653
],
[
21774,
21782
],
[
21956,
21964
]
],
[
[
933,
939
],
[
19005,
19011
]
],
[
[
947,
958
],
[
5762,
5764
],
[
5797,
5799
],
[
5892,
5894
],
[
5927,
5929
],
[
6024,
6026
],
[
6090,
6092
],
[
6941,
6943
],
[
6982,
6984
],
[
7079,
7081
],
[
7110,
7112
],
[
7151,
7153
],
[
7252,
7254
],
[
7314,
7316
],
[
7410,
7412
],
[
7483,
7485
],
[
7582,
7584
],
[
7699,
7701
],
[
7753,
7755
],
[
8038,
8040
],
[
8244,
8246
],
[
8273,
8275
],
[
8307,
8309
],
[
8333,
8335
],
[
8435,
8437
],
[
8531,
8533
],
[
8606,
8608
],
[
8746,
8748
],
[
8903,
8905
],
[
8970,
8972
],
[
9047,
9049
],
[
9143,
9145
],
[
9276,
9278
],
[
9422,
9424
],
[
9527,
9529
],
[
9604,
9606
],
[
9687,
9689
],
[
9783,
9785
],
[
9860,
9862
],
[
9963,
9965
],
[
10082,
10084
],
[
10216,
10218
],
[
10287,
10289
],
[
10358,
10360
],
[
10441,
10443
],
[
10537,
10539
],
[
10613,
10615
],
[
10759,
10761
],
[
10803,
10805
],
[
10869,
10871
],
[
10983,
10985
],
[
11091,
11093
],
[
11394,
11396
],
[
11572,
11574
],
[
11870,
11872
],
[
11953,
11955
],
[
12063,
12065
],
[
12101,
12103
],
[
12127,
12129
],
[
12165,
12167
],
[
12191,
12193
],
[
12380,
12382
],
[
12725,
12727
],
[
12763,
12765
],
[
12890,
12892
],
[
13307,
13309
],
[
13345,
13347
],
[
13394,
13396
],
[
13496,
13498
],
[
13859,
13861
],
[
13891,
13893
],
[
13968,
13970
],
[
14010,
14012
],
[
14114,
14116
],
[
14221,
14223
],
[
14390,
14392
],
[
14779,
14781
],
[
14811,
14813
],
[
14839,
14841
],
[
14871,
14873
],
[
15062,
15064
],
[
15094,
15096
],
[
15174,
15176
],
[
15206,
15208
],
[
15234,
15236
],
[
15266,
15268
],
[
15457,
15459
],
[
15489,
15491
],
[
15825,
15827
],
[
15864,
15866
],
[
15892,
15894
],
[
15931,
15933
],
[
16122,
16124
],
[
16154,
16156
],
[
16234,
16236
],
[
16273,
16275
],
[
16301,
16303
],
[
16341,
16343
],
[
16532,
16534
],
[
16564,
16566
],
[
16904,
16906
],
[
16946,
16948
],
[
17029,
17031
],
[
17071,
17073
],
[
17154,
17156
],
[
17196,
17198
],
[
17238,
17240
],
[
17280,
17282
],
[
17484,
17486
],
[
17552,
17554
],
[
17620,
17622
],
[
18246,
18248
],
[
18280,
18282
],
[
18312,
18314
],
[
18408,
18410
],
[
18521,
18523
],
[
18859,
18861
],
[
18893,
18895
],
[
19108,
19110
],
[
19447,
19449
],
[
19873,
19875
],
[
19898,
19900
],
[
20324,
20326
],
[
20488,
20490
],
[
20846,
20848
],
[
21272,
21274
],
[
21297,
21299
],
[
21447,
21449
],
[
21611,
21613
]
],
[
[
978,
984
],
[
1209,
1215
],
[
1252,
1258
],
[
1580,
1586
],
[
1736,
1742
],
[
1750,
1756
],
[
1897,
1903
],
[
1977,
1983
],
[
2058,
2064
],
[
2145,
2151
],
[
2251,
2257
],
[
2461,
2467
],
[
2550,
2556
],
[
2639,
2645
],
[
2934,
2940
],
[
3010,
3016
],
[
3085,
3091
],
[
3161,
3167
],
[
3237,
3243
],
[
3278,
3284
],
[
3322,
3328
],
[
3363,
3369
],
[
3407,
3413
],
[
3448,
3454
],
[
3492,
3498
],
[
3533,
3539
],
[
3577,
3583
],
[
3618,
3624
],
[
3662,
3668
],
[
3736,
3742
],
[
3807,
3813
],
[
3919,
3925
],
[
3983,
3989
],
[
4066,
4072
],
[
4129,
4135
],
[
4193,
4199
],
[
4363,
4369
],
[
4403,
4409
],
[
4468,
4474
],
[
4534,
4540
],
[
4600,
4606
],
[
4717,
4723
],
[
4762,
4768
],
[
4845,
4851
],
[
4927,
4933
],
[
4990,
4996
],
[
5076,
5082
],
[
5137,
5143
],
[
5223,
5229
],
[
5284,
5290
],
[
5370,
5376
],
[
5431,
5437
],
[
5517,
5523
],
[
5825,
5831
],
[
5955,
5961
],
[
6127,
6133
],
[
6536,
6542
],
[
6628,
6634
],
[
6721,
6727
],
[
6813,
6819
],
[
7002,
7008
],
[
7035,
7041
],
[
7175,
7181
],
[
7208,
7214
],
[
7448,
7454
],
[
7513,
7519
],
[
7548,
7554
],
[
7623,
7629
],
[
7669,
7675
],
[
8073,
8079
],
[
8134,
8140
],
[
8169,
8175
],
[
8209,
8215
],
[
8370,
8376
],
[
8569,
8575
],
[
8654,
8660
],
[
8708,
8714
],
[
8794,
8800
],
[
8848,
8854
],
[
9181,
9187
],
[
9224,
9230
],
[
9324,
9330
],
[
9373,
9379
],
[
9467,
9473
],
[
9821,
9827
],
[
9888,
9894
],
[
9921,
9927
],
[
9999,
10005
],
[
10040,
10046
],
[
10118,
10124
],
[
10159,
10165
],
[
10576,
10582
],
[
10656,
10662
],
[
10705,
10711
],
[
10927,
10933
],
[
11020,
11026
],
[
11209,
11215
],
[
11243,
11249
],
[
11431,
11437
],
[
11760,
11766
],
[
11920,
11926
],
[
12240,
12246
],
[
12289,
12295
],
[
12339,
12345
],
[
12417,
12423
],
[
12795,
12801
],
[
12928,
12934
],
[
13421,
13427
],
[
13471,
13477
],
[
13533,
13539
],
[
14036,
14042
],
[
14151,
14157
],
[
14258,
14264
],
[
14427,
14433
],
[
14896,
14902
],
[
14960,
14966
],
[
15131,
15137
],
[
15291,
15297
],
[
15355,
15361
],
[
15526,
15532
],
[
15956,
15962
],
[
16020,
16026
],
[
16191,
16197
],
[
16366,
16372
],
[
16430,
16436
],
[
16601,
16607
],
[
16970,
16976
],
[
17095,
17101
],
[
17521,
17527
],
[
17589,
17595
],
[
17657,
17663
],
[
18053,
18059
],
[
18130,
18136
],
[
18341,
18347
],
[
18497,
18503
],
[
18558,
18564
],
[
18938,
18944
],
[
19084,
19090
],
[
19145,
19151
],
[
20347,
20353
],
[
20384,
20390
],
[
20525,
20531
],
[
21470,
21476
],
[
21507,
21513
],
[
21648,
21654
]
],
[
[
1003,
1026
],
[
6267,
6276
],
[
7825,
7834
],
[
12548,
12557
],
[
13135,
13144
],
[
13650,
13659
],
[
14613,
14622
],
[
15653,
15662
],
[
16728,
16737
],
[
17795,
17804
],
[
17952,
17961
],
[
18620,
18629
],
[
19210,
19219
],
[
20665,
20674
],
[
21794,
21803
]
],
[
[
1045,
1053
],
[
6056,
6064
],
[
15023,
15031
],
[
15418,
15426
],
[
16083,
16091
],
[
16493,
16501
],
[
17309,
17317
],
[
17357,
17365
],
[
17405,
17413
],
[
17453,
17461
]
],
[
[
1079,
1086
],
[
7907,
7914
],
[
18031,
18038
],
[
18228,
18235
],
[
18841,
18848
],
[
19422,
19429
],
[
20745,
20752
],
[
20821,
20828
],
[
21877,
21884
],
[
6386,
6393
],
[
12660,
12667
],
[
13249,
13256
],
[
13748,
13755
],
[
14715,
14722
],
[
15761,
15768
],
[
16836,
16843
],
[
17917,
17924
],
[
18727,
18734
],
[
19313,
19320
],
[
20779,
20786
],
[
21914,
21921
]
],
[
[
1088,
1095
],
[
20596,
20603
],
[
21722,
21729
],
[
6232,
6239
],
[
12513,
12520
],
[
13100,
13107
],
[
13615,
13622
],
[
14578,
14585
],
[
15618,
15625
],
[
16693,
16700
],
[
17760,
17767
],
[
18801,
18808
],
[
19383,
19390
],
[
20630,
20637
],
[
21759,
21766
]
],
[
[
1104,
1121
]
]
] |
import json
from rest_framework.test import APITestCase
from django.urls import reverse
from rest_framework import status
from django.contrib.auth import get_user_model
from authors.apps.articles.models import Articles
from authors.apps.profiles.models import Profile
class TestGetEndpoint(APITestCase):
def setUp(self):
""" Prepares table for tests """
self.token = self.get_user_token()
self.slug = "life_love_death"
self.title = "Life Love and Death"
self.description = "What is life?"
self.body = "This is the real life body."
self.tagList = "life,love,death"
self.author = 'TestAuthor'
self.article = Articles(
slug=self.slug,
title=self.title,
description=self.description,
body=self.body,
tagList=self.tagList,
author=Profile.objects.get(username=self.author))
self.article.save()
def test_get_all_articles(self):
"""
This tests getting all articles successfully
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articles')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_successfully_not_getting_articles_if_token_not_used(self):
"""
Unauthorized error returned if no token is passed in
"""
url = reverse('articles')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_get_article_id(self):
"""
Tests the pk of the article is true
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articles')
response = self.client.get(url)
self.assertIn(b"1", response.content)
def test_articles_are_paginated(self):
"""
This tests if the returned articles are paginated
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articles')
response = self.client.get(url).render()
# this checks the number of articles in the database
self.assertIn(b"1", response.content)
# next is null since there is only one article posted
self.assertIn(b"null", response.content)
# previous is null since only one article has been posted
# the page_size holds ten articles per page
self.assertIn(b"null", response.content) # previous
def test_get_specific_article(self):
"""
This gets a specific article
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articleSpecific', kwargs={'slug': 'life_love_death'})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_getting_and_checking_articles_content(self):
"""
This checks if the right content of an article is returned
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articles')
response = self.client.get(url).render()
# checks if the body passed during posting is the one returned
self.assertIn(b"This is the real life body.", response.content)
# checks if id returned is 1
self.assertIn(b"1", response.content)
def test_wrong_request(self):
"""
Checks request for a non existing article
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse(
'articleSpecific', kwargs={
'slug': 'life_love_death_live'})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response.render()
self.assertIn(b"Article does not exist", response.content)
def get_user_token(self):
user = {
"user": {
"username": "TestAuthor",
"email": "test_user@email.com",
"password": "test123user#Password"
}
}
response = self.client.post(
reverse('register'), data=user, format='json')
user = get_user_model()
user = user.objects.get(username="TestAuthor")
user.is_active = True
user.save()
response.render()
data = response.content
token = json.loads(data.decode('utf-8'))['user']['token']
return token
| [
[
[
7,
11
],
[
4509,
4513
]
],
[
[
45,
56
],
[
294,
305
]
],
[
[
81,
88
],
[
1154,
1161
],
[
1453,
1460
],
[
1782,
1789
],
[
2102,
2109
],
[
2759,
2766
],
[
3167,
3174
],
[
3660,
3667
],
[
4251,
4258
]
],
[
[
116,
122
],
[
1261,
1267
],
[
1560,
1566
],
[
2909,
2915
],
[
3845,
3851
]
],
[
[
155,
169
],
[
4313,
4327
]
],
[
[
212,
220
],
[
689,
697
]
],
[
[
262,
269
],
[
880,
887
]
],
[
[
278,
293
]
]
] |
from abc import ABCMeta, abstractmethod
from frozendict import frozendict
class ResourceManager(metaclass=ABCMeta):
def __init__(self):
self.wv_filename = ""
self.parsed_filename = ""
@abstractmethod
def write(self):
"""
parse the raw file/files and write the data to disk
:return:
"""
pass
@abstractmethod
def read(self):
"""
read the parsed file from disk
:return:
"""
pass
def read_hashable(self):
return frozendict(self.read())
| [
[
[
16,
23
],
[
109,
116
]
],
[
[
25,
39
],
[
213,
227
],
[
369,
383
]
],
[
[
64,
74
],
[
542,
552
]
],
[
[
83,
98
]
]
] |
import torch.utils.data as data
import os
import os.path
from numpy.random import randint
from ops.io import load_proposal_file
from transforms import *
from ops.utils import temporal_iou
class SSNInstance:
def __init__(
self,
start_frame,
end_frame,
video_frame_count,
fps=1,
label=None,
best_iou=None,
overlap_self=None,
):
self.start_frame = start_frame
self.end_frame = min(end_frame, video_frame_count)
self._label = label
self.fps = fps
self.coverage = (end_frame - start_frame) / video_frame_count
self.best_iou = best_iou
self.overlap_self = overlap_self
self.loc_reg = None
self.size_reg = None
def compute_regression_targets(self, gt_list, fg_thresh):
if self.best_iou < fg_thresh:
# background proposals do not need this
return
# find the groundtruth instance with the highest IOU
ious = [
temporal_iou(
(self.start_frame, self.end_frame), (gt.start_frame, gt.end_frame)
)
for gt in gt_list
]
best_gt_id = np.argmax(ious)
best_gt = gt_list[best_gt_id]
prop_center = (self.start_frame + self.end_frame) / 2
gt_center = (best_gt.start_frame + best_gt.end_frame) / 2
prop_size = self.end_frame - self.start_frame + 1
gt_size = best_gt.end_frame - best_gt.start_frame + 1
# get regression target:
# (1). center shift propotional to the proposal duration
# (2). logarithm of the groundtruth duration over proposal duraiton
self.loc_reg = (gt_center - prop_center) / prop_size
try:
self.size_reg = math.log(gt_size / prop_size)
except:
print((gt_size, prop_size, self.start_frame, self.end_frame))
raise
@property
def start_time(self):
return self.start_frame / self.fps
@property
def end_time(self):
return self.end_frame / self.fps
@property
def label(self):
return self._label if self._label is not None else -1
@property
def regression_targets(self):
return [self.loc_reg, self.size_reg] if self.loc_reg is not None else [0, 0]
class SSNVideoRecord:
def __init__(self, prop_record):
self._data = prop_record
frame_count = int(self._data[1])
# build instance record
self.gt = [
SSNInstance(
int(x[1]), int(x[2]), frame_count, label=int(x[0]), best_iou=1.0
)
for x in self._data[2]
if int(x[2]) > int(x[1])
]
self.gt = list([x for x in self.gt if x.start_frame < frame_count])
self.proposals = [
SSNInstance(
int(x[3]),
int(x[4]),
frame_count,
label=int(x[0]),
best_iou=float(x[1]),
overlap_self=float(x[2]),
)
for x in self._data[3]
if int(x[4]) > int(x[3])
]
self.proposals = list(
[x for x in self.proposals if x.start_frame < frame_count]
)
@property
def id(self):
return self._data[0]
@property
def num_frames(self):
return int(self._data[1])
def get_fg(self, fg_thresh, with_gt=True):
fg = [p for p in self.proposals if p.best_iou > fg_thresh]
if with_gt:
fg.extend(self.gt)
for x in fg:
x.compute_regression_targets(self.gt, fg_thresh)
return fg
def get_negatives(
self,
incomplete_iou_thresh,
bg_iou_thresh,
bg_coverage_thresh=0.01,
incomplete_overlap_thresh=0.7,
):
tag = [0] * len(self.proposals)
incomplete_props = []
background_props = []
for i in range(len(tag)):
if (
self.proposals[i].best_iou < incomplete_iou_thresh
and self.proposals[i].overlap_self > incomplete_overlap_thresh
):
tag[i] = 1 # incomplete
incomplete_props.append(self.proposals[i])
for i in range(len(tag)):
if (
tag[i] == 0
and self.proposals[i].best_iou < bg_iou_thresh
and self.proposals[i].coverage > bg_coverage_thresh
):
background_props.append(self.proposals[i])
return incomplete_props, background_props
class SSNDataSet(data.Dataset):
def __init__(
self,
root_path,
prop_file=None,
body_seg=5,
aug_seg=2,
video_centric=True,
new_length=1,
modality="RGB",
image_tmpl="img_{:05d}.jpg",
transform=None,
random_shift=True,
test_mode=False,
prop_per_video=8,
fg_ratio=1,
bg_ratio=1,
incomplete_ratio=6,
fg_iou_thresh=0.7,
bg_iou_thresh=0.01,
incomplete_iou_thresh=0.3,
bg_coverage_thresh=0.02,
incomplete_overlap_thresh=0.7,
gt_as_fg=True,
reg_stats=None,
test_interval=6,
verbose=True,
exclude_empty=True,
epoch_multiplier=1,
):
self.root_path = root_path
self.prop_file = prop_file
self.verbose = verbose
self.body_seg = body_seg
self.aug_seg = aug_seg
self.video_centric = video_centric
self.exclude_empty = exclude_empty
self.epoch_multiplier = epoch_multiplier
self.new_length = new_length
self.modality = modality
self.image_tmpl = image_tmpl
self.transform = transform
self.random_shift = random_shift
self.test_mode = test_mode
self.test_interval = test_interval
self.fg_iou_thresh = fg_iou_thresh
self.incomplete_iou_thresh = incomplete_iou_thresh
self.bg_iou_thresh = bg_iou_thresh
self.bg_coverage_thresh = bg_coverage_thresh
self.incomplete_overlap_thresh = incomplete_overlap_thresh
self.starting_ratio = 0.5
self.ending_ratio = 0.5
self.gt_as_fg = gt_as_fg
denum = fg_ratio + bg_ratio + incomplete_ratio
self.fg_per_video = int(prop_per_video * (fg_ratio / denum))
self.bg_per_video = int(prop_per_video * (bg_ratio / denum))
self.incomplete_per_video = (
prop_per_video - self.fg_per_video - self.bg_per_video
)
self._parse_prop_file(stats=reg_stats)
def _load_image(self, directory, idx):
if self.modality == "RGB" or self.modality == "RGBDiff":
return [
Image.open(
os.path.join(directory, self.image_tmpl.format(idx))
).convert("RGB")
]
elif self.modality == "Flow":
x_img = Image.open(
os.path.join(directory, self.image_tmpl.format("x", idx))
).convert("L")
y_img = Image.open(
os.path.join(directory, self.image_tmpl.format("y", idx))
).convert("L")
return [x_img, y_img]
def _parse_prop_file(self, stats=None):
prop_info = load_proposal_file(self.prop_file)
self.video_list = [SSNVideoRecord(p) for p in prop_info]
if self.exclude_empty:
self.video_list = list([x for x in self.video_list if len(x.gt) > 0])
self.video_dict = {v.id: v for v in self.video_list}
# construct three pools:
# 1. Foreground
# 2. Background
# 3. Incomplete
self.fg_pool = []
self.bg_pool = []
self.incomp_pool = []
for v in self.video_list:
self.fg_pool.extend(
[(v.id, prop) for prop in v.get_fg(self.fg_iou_thresh, self.gt_as_fg)]
)
incomp, bg = v.get_negatives(
self.incomplete_iou_thresh,
self.bg_iou_thresh,
self.bg_coverage_thresh,
self.incomplete_overlap_thresh,
)
self.incomp_pool.extend([(v.id, prop) for prop in incomp])
self.bg_pool.extend([(v.id, prop) for prop in bg])
if stats is None:
self._compute_regresssion_stats()
else:
self.stats = stats
if self.verbose:
print(
(
"""
SSNDataset: Proposal file {prop_file} parsed.
There are {pnum} usable proposals from {vnum} videos.
{fnum} foreground proposals
{inum} incomplete_proposals
{bnum} background_proposals
Sampling config:
FG/BG/INC: {fr}/{br}/{ir}
Video Centric: {vc}
Epoch size multiplier: {em}
Regression Stats:
Location: mean {stats[0][0]:.05f} std {stats[1][0]:.05f}
Duration: mean {stats[0][1]:.05f} std {stats[1][1]:.05f}
""".format(
prop_file=self.prop_file,
pnum=len(self.fg_pool)
+ len(self.bg_pool)
+ len(self.incomp_pool),
fnum=len(self.fg_pool),
inum=len(self.incomp_pool),
bnum=len(self.bg_pool),
fr=self.fg_per_video,
br=self.bg_per_video,
ir=self.incomplete_per_video,
vnum=len(self.video_dict),
vc=self.video_centric,
stats=self.stats,
em=self.epoch_multiplier,
)
)
)
else:
print(
(
"""
SSNDataset: Proposal file {prop_file} parsed.
""".format(
prop_file=self.prop_file
)
)
)
def _video_centric_sampling(self, video):
fg = video.get_fg(self.fg_iou_thresh, self.gt_as_fg)
incomp, bg = video.get_negatives(
self.incomplete_iou_thresh,
self.bg_iou_thresh,
self.bg_coverage_thresh,
self.incomplete_overlap_thresh,
)
def sample_video_proposals(
proposal_type, video_id, video_pool, requested_num, dataset_pool
):
if len(video_pool) == 0:
# if there is nothing in the video pool, go fetch from the dataset pool
return [
(dataset_pool[x], proposal_type)
for x in np.random.choice(
len(dataset_pool), requested_num, replace=False
)
]
else:
replicate = len(video_pool) < requested_num
idx = np.random.choice(
len(video_pool), requested_num, replace=replicate
)
return [((video_id, video_pool[x]), proposal_type) for x in idx]
out_props = []
out_props.extend(
sample_video_proposals(0, video.id, fg, self.fg_per_video, self.fg_pool)
) # sample foreground
out_props.extend(
sample_video_proposals(
1, video.id, incomp, self.incomplete_per_video, self.incomp_pool
)
) # sample incomp.
out_props.extend(
sample_video_proposals(2, video.id, bg, self.bg_per_video, self.bg_pool)
) # sample background
return out_props
def _random_sampling(self):
out_props = []
out_props.extend(
[
(x, 0)
for x in np.random.choice(
self.fg_pool, self.fg_per_video, replace=False
)
]
)
out_props.extend(
[
(x, 1)
for x in np.random.choice(
self.incomp_pool, self.incomplete_per_video, replace=False
)
]
)
out_props.extend(
[
(x, 2)
for x in np.random.choice(
self.bg_pool, self.bg_per_video, replace=False
)
]
)
return out_props
def _sample_indices(self, valid_length, num_seg):
"""
:param record: VideoRecord
:return: list
"""
average_duration = (valid_length + 1) // num_seg
if average_duration > 0:
# normal cases
offsets = np.multiply(list(range(num_seg)), average_duration) + randint(
average_duration, size=num_seg
)
elif valid_length > num_seg:
offsets = np.sort(randint(valid_length, size=num_seg))
else:
offsets = np.zeros((num_seg,))
return offsets
def _get_val_indices(self, valid_length, num_seg):
if valid_length > num_seg:
tick = valid_length / float(num_seg)
offsets = np.array([int(tick / 2.0 + tick * x) for x in range(num_seg)])
else:
offsets = np.zeros((num_seg,))
return offsets
def _sample_ssn_indices(self, prop, frame_cnt):
start_frame = prop.start_frame + 1
end_frame = prop.end_frame
duration = end_frame - start_frame + 1
assert duration != 0, (prop.start_frame, prop.end_frame, prop.best_iou)
valid_length = duration - self.new_length
valid_starting = max(1, start_frame - int(duration * self.starting_ratio))
valid_ending = min(
frame_cnt - self.new_length + 1,
end_frame + int(duration * self.ending_ratio),
)
valid_starting_length = start_frame - valid_starting - self.new_length + 1
valid_ending_length = valid_ending - end_frame - self.new_length + 1
starting_scale = (valid_starting_length + self.new_length - 1) / (
duration * self.starting_ratio
)
ending_scale = (valid_ending_length + self.new_length - 1) / (
duration * self.ending_ratio
)
# get starting
starting_offsets = (
self._sample_indices(valid_starting_length, self.aug_seg)
if self.random_shift
else self._get_val_indices(valid_starting_length, self.aug_seg)
) + valid_starting
course_offsets = (
self._sample_indices(valid_length, self.body_seg)
if self.random_shift
else self._get_val_indices(valid_length, self.body_seg)
) + start_frame
ending_offsets = (
self._sample_indices(valid_ending_length, self.aug_seg)
if self.random_shift
else self._get_val_indices(valid_ending_length, self.aug_seg)
) + end_frame
offsets = np.concatenate((starting_offsets, course_offsets, ending_offsets))
stage_split = [
self.aug_seg,
self.aug_seg + self.body_seg,
self.aug_seg * 2 + self.body_seg,
]
return offsets, starting_scale, ending_scale, stage_split
def _load_prop_data(self, prop):
# read frame count
frame_cnt = self.video_dict[prop[0][0]].num_frames
# sample segment indices
prop_indices, starting_scale, ending_scale, stage_split = self._sample_ssn_indices(
prop[0][1], frame_cnt
)
# turn prop into standard format
# get label
if prop[1] == 0:
label = prop[0][1].label
elif prop[1] == 1:
label = prop[0][1].label # incomplete
elif prop[1] == 2:
label = 0 # background
else:
raise ValueError()
frames = []
for idx, seg_ind in enumerate(prop_indices):
p = int(seg_ind)
for x in range(self.new_length):
frames.extend(self._load_image(prop[0][0], min(frame_cnt, p + x)))
# get regression target
if prop[1] == 0:
reg_targets = prop[0][1].regression_targets
reg_targets = (
(reg_targets[0] - self.stats[0][0]) / self.stats[1][0],
(reg_targets[1] - self.stats[0][1]) / self.stats[1][1],
)
else:
reg_targets = (0.0, 0.0)
return (
frames,
label,
reg_targets,
starting_scale,
ending_scale,
stage_split,
prop[1],
)
def _compute_regresssion_stats(self):
if self.verbose:
print("computing regression target normalizing constants")
targets = []
for video in self.video_list:
fg = video.get_fg(self.fg_iou_thresh, False)
for p in fg:
targets.append(list(p.regression_targets))
self.stats = np.array((np.mean(targets, axis=0), np.std(targets, axis=0)))
def get_test_data(self, video, test_interval, gen_batchsize=4):
props = video.proposals
video_id = video.id
frame_cnt = video.num_frames
frame_ticks = (
np.arange(0, frame_cnt - self.new_length, test_interval, dtype=np.int) + 1
)
num_sampled_frames = len(frame_ticks)
# avoid empty proposal list
if len(props) == 0:
props.append(SSNInstance(0, frame_cnt - 1, frame_cnt))
# process proposals to subsampled sequences
rel_prop_list = []
proposal_tick_list = []
scaling_list = []
for proposal in props:
rel_prop = proposal.start_frame / frame_cnt, proposal.end_frame / frame_cnt
rel_duration = rel_prop[1] - rel_prop[0]
rel_starting_duration = rel_duration * self.starting_ratio
rel_ending_duration = rel_duration * self.ending_ratio
rel_starting = rel_prop[0] - rel_starting_duration
rel_ending = rel_prop[1] + rel_ending_duration
real_rel_starting = max(0.0, rel_starting)
real_rel_ending = min(1.0, rel_ending)
starting_scaling = (rel_prop[0] - real_rel_starting) / rel_starting_duration
ending_scaling = (real_rel_ending - rel_prop[1]) / rel_ending_duration
proposal_ticks = (
int(real_rel_starting * num_sampled_frames),
int(rel_prop[0] * num_sampled_frames),
int(rel_prop[1] * num_sampled_frames),
int(real_rel_ending * num_sampled_frames),
)
rel_prop_list.append(rel_prop)
proposal_tick_list.append(proposal_ticks)
scaling_list.append((starting_scaling, ending_scaling))
# load frames
# Since there are many frames for each video during testing, instead of returning the read frames,
# we return a generator which gives the frames in small batches, this lower the memory burden
# and runtime overhead. Usually setting batchsize=4 would fit most cases.
def frame_gen(batchsize):
frames = []
cnt = 0
for idx, seg_ind in enumerate(frame_ticks):
p = int(seg_ind)
for x in range(self.new_length):
frames.extend(self._load_image(video_id, min(frame_cnt, p + x)))
cnt += 1
if cnt % batchsize == 0:
frames = self.transform(frames)
yield frames
frames = []
if len(frames):
frames = self.transform(frames)
yield frames
return (
frame_gen(gen_batchsize),
len(frame_ticks),
torch.from_numpy(np.array(rel_prop_list)),
torch.from_numpy(np.array(proposal_tick_list)),
torch.from_numpy(np.array(scaling_list)),
)
def get_training_data(self, index):
if self.video_centric:
video = self.video_list[index]
props = self._video_centric_sampling(video)
else:
props = self._random_sampling()
out_frames = []
out_prop_len = []
out_prop_scaling = []
out_prop_type = []
out_prop_labels = []
out_prop_reg_targets = []
out_stage_split = []
for idx, p in enumerate(props):
prop_frames, prop_label, reg_targets, starting_scale, ending_scale, stage_split, prop_type = self._load_prop_data(
p
)
processed_frames = self.transform(prop_frames)
out_frames.append(processed_frames)
out_prop_len.append(self.body_seg + 2 * self.aug_seg)
out_prop_scaling.append([starting_scale, ending_scale])
out_prop_labels.append(prop_label)
out_prop_reg_targets.append(reg_targets)
out_prop_type.append(prop_type)
out_stage_split.append(stage_split)
out_prop_len = torch.from_numpy(np.array(out_prop_len))
out_prop_scaling = torch.from_numpy(
np.array(out_prop_scaling, dtype=np.float32)
)
out_prop_labels = torch.from_numpy(np.array(out_prop_labels))
out_prop_reg_targets = torch.from_numpy(
np.array(out_prop_reg_targets, dtype=np.float32)
)
out_prop_type = torch.from_numpy(np.array(out_prop_type))
out_stage_split = torch.from_numpy(np.array(out_stage_split))
out_frames = torch.cat(out_frames)
return (
out_frames,
out_prop_len,
out_prop_scaling,
out_prop_type,
out_prop_labels,
out_prop_reg_targets,
out_stage_split,
)
def get_all_gt(self):
gt_list = []
for video in self.video_list:
vid = video.id
gt_list.extend(
[
[
vid,
x.label - 1,
x.start_frame / video.num_frames,
x.end_frame / video.num_frames,
]
for x in video.gt
]
)
return gt_list
def __getitem__(self, index):
real_index = index % len(self.video_list)
if self.test_mode:
return self.get_test_data(self.video_list[real_index], self.test_interval)
else:
return self.get_training_data(real_index)
def __len__(self):
return len(self.video_list) * self.epoch_multiplier
| [
[
[
7,
31
],
[
4576,
4580
]
],
[
[
40,
42
]
],
[
[
50,
57
],
[
6773,
6775
],
[
6959,
6961
],
[
7092,
7094
]
],
[
[
83,
90
],
[
12799,
12806
],
[
12936,
12943
]
],
[
[
110,
128
],
[
7277,
7295
]
],
[
[
152,
153
],
[
1189,
1191
],
[
1772,
1776
],
[
6741,
6746
],
[
6931,
6936
],
[
7064,
7069
],
[
11873,
11875
],
[
12088,
12090
],
[
12315,
12317
],
[
12745,
12747
],
[
12928,
12930
],
[
13009,
13011
],
[
13217,
13219
],
[
13316,
13318
],
[
15024,
15026
],
[
17045,
17047
],
[
17055,
17057
],
[
17081,
17083
],
[
17309,
17311
],
[
17372,
17374
],
[
19868,
19873
],
[
19885,
19887
],
[
19923,
19928
],
[
19940,
19942
],
[
19983,
19988
],
[
20000,
20002
],
[
21121,
21126
],
[
21138,
21140
],
[
21189,
21194
],
[
21219,
21221
],
[
21252,
21254
],
[
21300,
21305
],
[
21317,
21319
],
[
21375,
21380
],
[
21405,
21407
],
[
21442,
21444
],
[
21488,
21493
],
[
21505,
21507
],
[
21556,
21561
],
[
21573,
21575
],
[
21621,
21626
],
[
10792,
10794
],
[
11022,
11024
]
],
[
[
176,
188
],
[
1017,
1029
]
],
[
[
197,
208
],
[
2507,
2518
],
[
2814,
2825
],
[
17531,
17542
]
],
[
[
2314,
2328
],
[
7340,
7354
]
],
[
[
4565,
4575
]
]
] |
from .routes import app as websockets_routes
| [
[
[
20,
44
]
]
] |
import json
from server import db
from sqlalchemy.ext import mutable
class JsonEncodedDict(db.TypeDecorator):
impl = db.Text
def process_bind_param(self, value, dialect):
if value is None:
return '{}'
else:
return json.dumps(value)
def process_result_value(self, value, dialect):
if value is None:
return {}
else:
return json.loads(value)
mutable.MutableDict.associate_with(JsonEncodedDict)
user_location_table = db.Table('user_location_table',
db.Column('user_id', db.Integer, db.ForeignKey('user.id'), nullable=False),
db.Column('location_id',db.Integer, db.ForeignKey('location.id'), nullable=False),
) | [
[
[
7,
11
],
[
263,
267
],
[
415,
419
]
],
[
[
31,
33
],
[
92,
94
],
[
122,
124
],
[
509,
511
],
[
570,
572
],
[
591,
593
],
[
603,
605
],
[
674,
676
],
[
698,
700
],
[
710,
712
]
],
[
[
61,
68
],
[
434,
441
]
],
[
[
76,
91
],
[
469,
484
]
],
[
[
487,
506
]
]
] |
import os
import sys
import re
import types
import itertools
import matplotlib.pyplot as plt
import numpy
import scipy.stats
import numpy.ma
import Stats
import Histogram
from cgatReport.Tracker import *
from cpgReport import *
##########################################################################
class replicatedIntervalSummary(cpgTracker):
"""Summary stats of intervals called by the peak finder. """
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data = self.getRow(
"SELECT COUNT(*) as Intervals, round(AVG(length),0) as Mean_length, round(AVG(nprobes),0) as Mean_reads FROM %(track)s_replicated_intervals" % locals())
return data
##########################################################################
class replicatedIntervalLengths(cpgTracker):
"""Distribution of interval length. """
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data = self.getAll(
"SELECT length FROM %(track)s_replicated_intervals" % locals())
return data
##########################################################################
class replicatedIntervalPeakValues(cpgTracker):
"""Distribution of maximum interval coverage (the number of reads at peak). """
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data = self.getAll(
"SELECT peakval FROM %(track)s_replicated_intervals" % locals())
return data
##########################################################################
class replicatedIntervalAverageValues(cpgTracker):
"""Distribution of average coverage (the average number of reads within the interval) """
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data = self.getAll(
"SELECT avgval FROM %(track)s_replicated_intervals" % locals())
return data
##########################################################################
class replicatedIntervalFoldChange(cpgTracker):
"""return fold changes for all intervals. """
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data = self.getAll(
"SELECT fold FROM %(track)s_replicated_intervals" % locals())
return data
##########################################################################
##########################################################################
##########################################################################
class replicatedIntervalPeakLocation(cpgTracker):
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT (PeakCenter - start) / CAST( Length as FLOAT) - 0.5 FROM %(track)s_replicated_intervals" % locals())
data2 = self.getValues(
"SELECT (end - PeakCenter) / CAST( Length as FLOAT) - 0.5 FROM %(track)s_replicated_intervals" % locals())
return {"distance": data1 + data2}
##########################################################################
class replicatedIntervalPeakDistance(cpgTracker):
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT PeakCenter - start FROM %(track)s_replicated_intervals" % locals())
data2 = self.getValues(
"SELECT end - PeakCenter FROM %(track)s_replicated_intervals" % locals())
return {"distance": data1 + data2}
##########################################################################
##########################################################################
##########################################################################
class replicatedIntervalCpGDensity(cpgTracker):
pattern = "(.*)_replicated_composition"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT pCpG FROM %(track)s_replicated_composition" % locals())
data2 = self.getValues(
"SELECT pCpG FROM %(track)s_replicated_composition_control" % locals())
data3 = self.getValues(
"SELECT pCpG FROM %(track)s_replicated_composition_flanking5" % locals())
data4 = self.getValues(
"SELECT pCpG FROM %(track)s_replicated_composition_flanking3" % locals())
return odict(list(zip(("CAPseq composition", "Control composition", "5` Flank Composition", "3` Flank Composition"), (data1, data2, data3, data4))))
##########################################################################
class replicatedIntervalCpGObsExp1(cpgTracker):
pattern = "(.*)_replicated_composition"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT CpG_ObsExp1 FROM %(track)s_replicated_composition" % locals())
data2 = self.getValues(
"SELECT CpG_ObsExp1 FROM %(track)s_replicated_composition_control" % locals())
data3 = self.getValues(
"SELECT CpG_ObsExp1 FROM %(track)s_replicated_composition_flanking5" % locals())
data4 = self.getValues(
"SELECT CpG_ObsExp1 FROM %(track)s_replicated_composition_flanking3" % locals())
return odict(list(zip(("CAPseq composition", "Control composition", "5` Flank Composition", "3` Flank Composition"), (data1, data2, data3, data4))))
##########################################################################
class replicatedIntervalCpGObsExp2(cpgTracker):
pattern = "(.*)_replicated_composition"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT CpG_ObsExp FROM %(track)s_replicated_composition" % locals())
data2 = self.getValues(
"SELECT CpG_ObsExp FROM %(track)s_replicated_composition_control" % locals())
data3 = self.getValues(
"SELECT CpG_ObsExp FROM %(track)s_replicated_composition_flanking5" % locals())
data4 = self.getValues(
"SELECT CpG_ObsExp FROM %(track)s_replicated_composition_flanking3" % locals())
return odict(list(zip(("CAPseq composition", "Control composition", "5` Flank Composition", "3` Flank Composition"), (data1, data2, data3, data4))))
##########################################################################
class replicatedIntervalGCContent(cpgTracker):
pattern = "(.*)_replicated_composition"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT pGC FROM %(track)s_replicated_composition" % locals())
data2 = self.getValues(
"SELECT pGC FROM %(track)s_replicated_composition_control" % locals())
data3 = self.getValues(
"SELECT pGC FROM %(track)s_replicated_composition_flanking5" % locals())
data4 = self.getValues(
"SELECT pGC FROM %(track)s_replicated_composition_flanking3" % locals())
return odict(list(zip(("CAPseq composition", "Control composition", "5` Flank Composition", "3` Flank Composition"), (data1, data2, data3, data4))))
| [
[
[
7,
9
]
],
[
[
17,
20
]
],
[
[
28,
30
]
],
[
[
38,
43
]
],
[
[
51,
60
]
],
[
[
68,
92
]
],
[
[
100,
105
]
],
[
[
113,
124
]
],
[
[
132,
140
]
],
[
[
148,
153
]
],
[
[
161,
170
]
],
[
[
203,
204
]
],
[
[
227,
228
],
[
339,
349
],
[
826,
836
],
[
1206,
1216
],
[
1630,
1640
],
[
2060,
2070
],
[
2596,
2606
],
[
3155,
3165
],
[
3796,
3806
],
[
4627,
4637
],
[
5486,
5496
],
[
6340,
6350
],
[
4372,
4377
],
[
5231,
5236
],
[
6086,
6091
],
[
6912,
6917
]
],
[
[
313,
338
]
],
[
[
800,
825
]
],
[
[
1177,
1205
]
],
[
[
1598,
1629
]
],
[
[
2031,
2059
]
],
[
[
2565,
2595
]
],
[
[
3124,
3154
]
],
[
[
3767,
3795
]
],
[
[
4598,
4626
]
],
[
[
5457,
5485
]
],
[
[
6312,
6339
]
]
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-07-04 00:02
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('library', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='module',
name='professions',
),
migrations.AddField(
model_name='module',
name='profession',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.DO_NOTHING, to='library.Profession'),
preserve_default=False,
),
]
| [
[
[
96,
112
]
],
[
[
136,
146
],
[
206,
216
],
[
322,
332
],
[
430,
440
]
],
[
[
148,
154
],
[
533,
539
]
],
[
[
162,
187
],
[
572,
578
]
],
[
[
196,
205
]
]
] |
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
},
}
ROOT_URLCONF = 'django_autoconfig.autourlconf'
INSTALLED_APPS = [
'django.contrib.auth',
'nuit',
]
STATIC_URL = '/static/'
STATIC_ROOT = '.static'
from django_autoconfig.autoconfig import configure_settings
configure_settings(globals())
| [
[
[
0,
9
]
],
[
[
88,
100
]
],
[
[
135,
149
]
],
[
[
195,
205
]
],
[
[
219,
230
]
],
[
[
284,
302
],
[
303,
321
]
]
] |
"""
Django settings for my_blog project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@7+q1q@_=iniipvuc%nfs)5qauaax2g0cnc1fxzos52t-9ml=m'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'sarah.1024z@gmail.com'
EMAIL_HOST_PASSWORD = 'rzan2015'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'my_blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'my_blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
[
[
315,
317
],
[
402,
404
],
[
418,
420
],
[
434,
436
],
[
2434,
2436
]
],
[
[
391,
399
],
[
2447,
2455
]
],
[
[
667,
677
]
],
[
[
800,
805
]
],
[
[
813,
823
]
],
[
[
843,
858
]
],
[
[
885,
904
]
],
[
[
918,
928
]
],
[
[
935,
948
]
],
[
[
956,
969
]
],
[
[
1018,
1031
]
],
[
[
1065,
1079
]
],
[
[
1285,
1295
]
],
[
[
1699,
1711
]
],
[
[
1730,
1739
]
],
[
[
2215,
2231
]
],
[
[
2339,
2348
]
],
[
[
2584,
2608
]
],
[
[
3087,
3100
]
],
[
[
3112,
3121
]
],
[
[
3131,
3139
]
],
[
[
3148,
3156
]
],
[
[
3165,
3171
]
],
[
[
3283,
3293
]
]
] |
"""
Logic for uploading to s3 based on supplied template file and s3 bucket
"""
# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
import logging
import os
import boto3
import click
import docker
from botocore.config import Config
from samcli.commands.package.exceptions import PackageFailedError
from samcli.lib.package.artifact_exporter import Template
from samcli.lib.package.ecr_uploader import ECRUploader
from samcli.lib.package.code_signer import CodeSigner
from samcli.lib.package.s3_uploader import S3Uploader
from samcli.lib.utils.botoconfig import get_boto_config_with_user_agent
from samcli.yamlhelper import yaml_dump
LOG = logging.getLogger(__name__)
class PackageContext:
MSG_PACKAGED_TEMPLATE_WRITTEN = (
"\nSuccessfully packaged artifacts and wrote output template "
"to file {output_file_name}."
"\n"
"Execute the following command to deploy the packaged template"
"\n"
"sam deploy --template-file {output_file_path} "
"--stack-name <YOUR STACK NAME>"
"\n"
)
def __init__(
self,
template_file,
s3_bucket,
image_repository,
s3_prefix,
kms_key_id,
output_template_file,
use_json,
force_upload,
no_progressbar,
metadata,
region,
profile,
on_deploy=False,
signing_profiles=None,
):
self.template_file = template_file
self.s3_bucket = s3_bucket
self.image_repository = image_repository
self.s3_prefix = s3_prefix
self.kms_key_id = kms_key_id
self.output_template_file = output_template_file
self.use_json = use_json
self.force_upload = force_upload
self.no_progressbar = no_progressbar
self.metadata = metadata
self.region = region
self.profile = profile
self.on_deploy = on_deploy
self.s3_uploader = None
self.code_signer = None
self.signing_profiles = signing_profiles
self.ecr_uploader = None
self.uploader = {}
def __enter__(self):
return self
def __exit__(self, *args):
pass
def run(self):
region_name = self.region if self.region else None
s3_client = boto3.client(
"s3",
config=get_boto_config_with_user_agent(signature_version="s3v4", region_name=region_name),
)
ecr_client = boto3.client("ecr", config=get_boto_config_with_user_agent(region_name=region_name))
docker_client = docker.from_env()
self.s3_uploader = S3Uploader(
s3_client, self.s3_bucket, self.s3_prefix, self.kms_key_id, self.force_upload, self.no_progressbar
)
# attach the given metadata to the artifacts to be uploaded
self.s3_uploader.artifact_metadata = self.metadata
self.ecr_uploader = ECRUploader(docker_client, ecr_client, self.image_repository)
code_signer_client = boto3.client("signer")
self.code_signer = CodeSigner(code_signer_client, self.signing_profiles)
# NOTE(srirammv): move this to its own class.
self.uploader = {"s3": self.s3_uploader, "ecr": self.ecr_uploader}
try:
exported_str = self._export(self.template_file, self.use_json)
self.write_output(self.output_template_file, exported_str)
if self.output_template_file and not self.on_deploy:
msg = self.MSG_PACKAGED_TEMPLATE_WRITTEN.format(
output_file_name=self.output_template_file,
output_file_path=os.path.abspath(self.output_template_file),
)
click.echo(msg)
except OSError as ex:
raise PackageFailedError(template_file=self.template_file, ex=str(ex)) from ex
def _export(self, template_path, use_json):
template = Template(template_path, os.getcwd(), self.uploader, self.code_signer)
exported_template = template.export()
if use_json:
exported_str = json.dumps(exported_template, indent=4, ensure_ascii=False)
else:
exported_str = yaml_dump(exported_template)
return exported_str
def write_output(self, output_file_name, data):
if output_file_name is None:
click.echo(data)
return
with open(output_file_name, "w") as fp:
fp.write(data)
| [
[
[
655,
659
],
[
4576,
4580
]
],
[
[
667,
674
],
[
1169,
1176
]
],
[
[
682,
684
],
[
4128,
4130
],
[
4435,
4437
]
],
[
[
693,
698
],
[
2800,
2805
],
[
2966,
2971
],
[
3502,
3507
]
],
[
[
706,
711
],
[
4206,
4211
],
[
4837,
4842
]
],
[
[
719,
725
],
[
3076,
3082
]
],
[
[
754,
760
]
],
[
[
809,
827
],
[
4270,
4288
]
],
[
[
877,
885
],
[
4411,
4419
]
],
[
[
930,
941
],
[
3410,
3421
]
],
[
[
985,
995
],
[
3552,
3562
]
],
[
[
1039,
1049
],
[
3122,
3132
]
],
[
[
1090,
1121
],
[
2851,
2882
],
[
2993,
3024
]
],
[
[
1152,
1161
],
[
4677,
4686
]
],
[
[
1163,
1166
]
],
[
[
1205,
1219
]
]
] |
from model import common
import torch.nn as nn
import torch.nn.init as init
url = {
'r20f64': ''
}
def make_model(args, parent=False):
return VDSR(args)
class VDSR(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(VDSR, self).__init__()
n_resblocks = args.n_resblocks
n_feats = args.n_feats
kernel_size = 3
url_name = 'r{}f{}'.format(n_resblocks, n_feats)
if url_name in url:
self.url = url[url_name]
else:
self.url = None
self.sub_mean = common.MeanShift(args.rgb_range)
self.add_mean = common.MeanShift(args.rgb_range, sign=1)
def basic_block(in_channels, out_channels, act):
return common.BasicBlock(
conv, in_channels, out_channels, kernel_size,
bias=True, bn=False, act=act
)
# define body module
m_body = []
m_body.append(basic_block(args.n_colors, n_feats, nn.ReLU(True)))
for _ in range(n_resblocks - 2):
m_body.append(basic_block(n_feats, n_feats, nn.ReLU(True)))
m_body.append(basic_block(n_feats, args.n_colors, None))
self.body = nn.Sequential(*m_body)
def forward(self, x):
x = self.sub_mean(x)
res = self.body(x)
res += x
x = self.add_mean(res)
return x
# cd ..(src), export PYTHONPATH=`pwd`
# if __name__ == '__main__':
# import torch
# import utility
# from option import args
# torch.manual_seed(args.seed)
# checkpoint = utility.checkpoint(args)
# print(args)
# model = VDSR(args)
# print(model)
| [
[
[
18,
24
],
[
222,
228
],
[
565,
571
],
[
622,
628
],
[
740,
746
]
],
[
[
33,
47
],
[
176,
178
],
[
988,
990
],
[
1101,
1103
],
[
1203,
1205
]
],
[
[
55,
76
]
],
[
[
78,
81
],
[
456,
459
],
[
484,
487
]
],
[
[
110,
120
]
],
[
[
171,
175
],
[
153,
157
],
[
258,
262
]
]
] |
import PIL.Image
import PIL.ImageColor
import PIL.ImageEnhance
import zeit.cms.repository.folder
import zeit.connector.interfaces
import zeit.content.image.interfaces
import zope.app.appsetup.product
import zope.component
import zope.interface
import zope.security.proxy
class ImageTransform(object):
zope.interface.implements(zeit.content.image.interfaces.ITransform)
zope.component.adapts(zeit.content.image.interfaces.IImage)
MAXIMUM_IMAGE_SIZE = 5000
def __init__(self, context):
self.context = context
try:
self.image = PIL.Image.open(
zope.security.proxy.removeSecurityProxy(context.open()))
self.image.load()
except IOError:
raise zeit.content.image.interfaces.ImageProcessingError(
"Cannot transform image %s" % context.__name__)
def thumbnail(self, width, height, filter=PIL.Image.ANTIALIAS):
image = self.image.copy()
image.thumbnail((width, height), filter)
return self._construct_image(image)
def resize(self, width=None, height=None, filter=PIL.Image.ANTIALIAS):
if width is None and height is None:
raise TypeError('Need at least one of width and height.')
orig_width, orig_height = self.image.size
if width is None:
width = orig_width * height / orig_height
elif height is None:
height = orig_height * width / orig_width
image = self.image.resize((width, height), filter)
return self._construct_image(image)
def create_variant_image(
self, variant, size=None, fill_color=None, format=None):
"""Create variant image from source image.
Will crop the image according to the zoom, focus point and size. In
addition, the image is scaled down to size (if given) and image
enhancements, like brightness, are applied.
The default variant skips cropping, but still applies image
enhancements, so it can be used as a high quality preview of image
enhancements in the frontend.
"""
if not variant.is_default:
image = self._crop_variant_image(variant, size=size)
else:
# Alpha channel is usually activated when cropping,
# so we must do it by hand since we skipped cropping
image = self._enable_alpha_channel(self.image)
# Apply enhancements like brightness
if variant.brightness is not None:
image = PIL.ImageEnhance.Brightness(image).enhance(
variant.brightness)
if variant.contrast is not None:
image = PIL.ImageEnhance.Contrast(image).enhance(
variant.contrast)
if variant.saturation is not None:
image = PIL.ImageEnhance.Color(image).enhance(
variant.saturation)
if variant.sharpness is not None:
image = PIL.ImageEnhance.Sharpness(image).enhance(
variant.sharpness)
# Optionally fill the background of transparent images
if fill_color is not None and self._color_mode == 'RGBA':
fill_color = PIL.ImageColor.getrgb('#' + fill_color)
opaque = PIL.Image.new('RGB', image.size, fill_color)
opaque.paste(image, (0, 0), image)
image = opaque
return self._construct_image(image, format)
def _crop_variant_image(self, variant, size=None):
"""Crop variant image from source image.
Determines crop position using zoom, focus point and size constraint.
The result image will have the exact dimensions that are predefined by
the size argument, if provided. Otherwise it depends on the variant
ratio and zoom only, giving back the best image quality, i.e. will not
scale down.
"""
source_width, source_height = self.image.size
if (source_width == 0 or source_height == 0):
return self.image
zoomed_width = source_width
zoomed_height = source_height
if variant.zoom > 0:
zoomed_width = int(source_width * variant.zoom)
zoomed_height = int(source_height * variant.zoom)
target_ratio = variant.ratio
if target_ratio is None:
target_ratio = float(source_width) / float(source_height)
target_width, target_height = self._fit_ratio_to_image(
zoomed_width, zoomed_height, target_ratio)
if size:
w, h = size
override_ratio = float(w) / float(h)
target_width, target_height = self._fit_ratio_to_image(
target_width, target_height, override_ratio)
x, y = self._determine_crop_position(
variant, target_width, target_height)
image = self._crop(
self.image, x, y, x + target_width, y + target_height)
if size:
w, h = size
if w > self.MAXIMUM_IMAGE_SIZE:
w = self.MAXIMUM_IMAGE_SIZE
if h > self.MAXIMUM_IMAGE_SIZE:
h = self.MAXIMUM_IMAGE_SIZE
image = image.resize((w, h), PIL.Image.ANTIALIAS)
return image
def _fit_ratio_to_image(self, source_width, source_height, target_ratio):
"""Calculate the biggest (width, height) inside the source that adheres
to target ratio"""
original_ratio = float(source_width) / float(source_height)
if target_ratio > original_ratio:
width = source_width
height = int(source_width / target_ratio)
else:
width = int(source_height * target_ratio)
height = source_height
return width, height
def _determine_crop_position(self, variant, target_width, target_height):
width, height = self.image.size
x = int(width * variant.focus_x - target_width * variant.focus_x)
y = int(height * variant.focus_y - target_height * variant.focus_y)
return x, y
def _crop(self, pil_image, x1, y1, x2, y2):
pil_image = pil_image.crop((x1, y1, x2, y2))
pil_image = self._enable_alpha_channel(pil_image)
return pil_image
@property
def _color_mode(self):
# XXX This is a rather crude heuristic.
return 'RGBA' if self.context.format == 'PNG' else 'RGB'
def _enable_alpha_channel(self, pil_image):
"""Enable alpha channel for PNG images by converting to RGBA."""
if pil_image.mode != self._color_mode:
pil_image = pil_image.convert(self._color_mode)
return pil_image
def _construct_image(self, pil_image, format=None):
image = zeit.content.image.image.TemporaryImage()
if not format:
format = self.context.format
image.mimeType = self.context.mimeType
else:
image.mimeType = 'image/' + format.lower() # XXX crude heuristic.
# XXX Maybe encoder setting should be made configurable.
if format in ('JPG', 'JPEG'):
options = {'progressive': True, 'quality': 85, 'optimize': True}
elif format == 'PNG':
options = {'optimize': True}
elif format == 'WEBP':
options = {'quality': 85}
else:
options = {}
pil_image.save(image.open('w'), format, **options)
image.__parent__ = self.context
image_times = zope.dublincore.interfaces.IDCTimes(self.context, None)
if image_times and image_times.modified:
thumb_times = zope.dublincore.interfaces.IDCTimes(image)
thumb_times.modified = image_times.modified
return image
@zope.component.adapter(zeit.content.image.interfaces.IImage)
@zope.interface.implementer(zeit.content.image.interfaces.IPersistentThumbnail)
def persistent_thumbnail_factory(context):
config = zope.app.appsetup.product.getProductConfiguration(
'zeit.content.image') or {}
method_name = config.get('thumbnail-method', 'thumbnail')
width = config.get('thumbnail-width', 50)
if width:
width = int(width)
else:
width = None
height = config.get('thumbnail-height', 50)
if height:
height = int(height)
else:
height = None
thumbnail_container = zeit.content.image.interfaces.IThumbnailFolder(
context)
image_name = context.__name__
if image_name not in thumbnail_container:
transform = zeit.content.image.interfaces.ITransform(context)
method = getattr(transform, method_name)
thumbnail = method(width, height)
thumbnail_properties = (
zeit.connector.interfaces.IWebDAVWriteProperties(thumbnail))
image_properties = zeit.connector.interfaces.IWebDAVReadProperties(
context)
for (name, namespace), value in image_properties.items():
if namespace != 'DAV:':
thumbnail_properties[(name, namespace)] = value
thumbnail_properties.pop(zeit.connector.interfaces.UUID_PROPERTY, None)
thumbnail_container[image_name] = thumbnail
return thumbnail_container[image_name]
@zope.component.adapter(zeit.content.image.interfaces.IImage)
@zope.interface.implementer(zeit.content.image.interfaces.IThumbnailFolder)
def thumbnail_folder_factory(context):
name = u'thumbnails'
folder = context.__parent__
if name not in folder:
folder[name] = zeit.cms.repository.folder.Folder()
return folder[name]
| [
[
[
7,
16
]
],
[
[
24,
38
]
],
[
[
46,
62
],
[
898,
901
],
[
1101,
1104
],
[
574,
577
],
[
2514,
2517
],
[
2655,
2658
],
[
2794,
2797
],
[
2931,
2934
],
[
3164,
3167
],
[
3225,
3228
],
[
5142,
5145
]
],
[
[
70,
96
]
],
[
[
104,
129
]
],
[
[
137,
166
],
[
334,
338
],
[
402,
406
],
[
7663,
7667
],
[
7729,
7733
],
[
9134,
9138
],
[
9200,
9204
],
[
735,
739
],
[
6656,
6660
],
[
8255,
8259
],
[
8420,
8424
],
[
8607,
8611
],
[
8695,
8699
],
[
8964,
8968
],
[
9394,
9398
]
],
[
[
174,
199
]
],
[
[
207,
221
]
],
[
[
229,
243
]
],
[
[
251,
270
],
[
308,
312
],
[
380,
384
],
[
7640,
7644
],
[
7702,
7706
],
[
9111,
9115
],
[
9173,
9177
],
[
606,
610
],
[
7386,
7390
],
[
7517,
7521
],
[
7837,
7841
]
],
[
[
279,
293
]
],
[
[
7785,
7813
]
],
[
[
9252,
9276
]
]
] |
#
# Tests for current input functions
#
import pybamm
import numbers
import unittest
import numpy as np
class TestCurrentFunctions(unittest.TestCase):
def test_constant_current(self):
# test simplify
current = pybamm.electrical_parameters.current_with_time
parameter_values = pybamm.ParameterValues(
{
"Typical current [A]": 2,
"Typical timescale [s]": 1,
"Current function [A]": 2,
}
)
processed_current = parameter_values.process_symbol(current)
self.assertIsInstance(processed_current.simplify(), pybamm.Scalar)
def test_get_current_data(self):
# test process parameters
dimensional_current = pybamm.electrical_parameters.dimensional_current_with_time
parameter_values = pybamm.ParameterValues(
{
"Typical current [A]": 2,
"Typical timescale [s]": 1,
"Current function [A]": "[current data]car_current",
}
)
dimensional_current_eval = parameter_values.process_symbol(dimensional_current)
def current(t):
return dimensional_current_eval.evaluate(t=t)
standard_tests = StandardCurrentFunctionTests([current], always_array=True)
standard_tests.test_all()
def test_user_current(self):
# create user-defined sin function
def my_fun(t, A, omega):
return A * pybamm.sin(2 * np.pi * omega * t)
# choose amplitude and frequency
A = pybamm.electrical_parameters.I_typ
omega = pybamm.Parameter("omega")
def current(t):
return my_fun(t, A, omega)
# set and process parameters
parameter_values = pybamm.ParameterValues(
{
"Typical current [A]": 2,
"Typical timescale [s]": 1,
"omega": 3,
"Current function [A]": current,
}
)
dimensional_current = pybamm.electrical_parameters.dimensional_current_with_time
dimensional_current_eval = parameter_values.process_symbol(dimensional_current)
def user_current(t):
return dimensional_current_eval.evaluate(t=t)
# check output types
standard_tests = StandardCurrentFunctionTests([user_current])
standard_tests.test_all()
# check output correct value
time = np.linspace(0, 3600, 600)
np.testing.assert_array_almost_equal(
user_current(time), 2 * np.sin(2 * np.pi * 3 * time)
)
class StandardCurrentFunctionTests(object):
def __init__(self, function_list, always_array=False):
self.function_list = function_list
self.always_array = always_array
def test_output_type(self):
for function in self.function_list:
if self.always_array is True:
assert isinstance(function(0), np.ndarray)
else:
assert isinstance(function(0), numbers.Number)
assert isinstance(function(np.zeros(3)), np.ndarray)
assert isinstance(function(np.zeros([3, 3])), np.ndarray)
def test_all(self):
self.test_output_type()
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
| [
[
[
47,
53
],
[
3364,
3370
],
[
232,
238
],
[
306,
312
],
[
626,
632
],
[
743,
749
],
[
829,
835
],
[
1557,
1563
],
[
1608,
1614
],
[
1763,
1769
],
[
2018,
2024
],
[
1469,
1475
]
],
[
[
61,
68
],
[
3019,
3026
]
],
[
[
76,
84
],
[
133,
141
],
[
3402,
3410
]
],
[
[
92,
103
],
[
2440,
2442
],
[
2474,
2476
],
[
2548,
2550
],
[
2559,
2561
],
[
2942,
2944
],
[
3074,
3076
],
[
3088,
3090
],
[
3139,
3141
],
[
3158,
3160
],
[
1484,
1486
]
],
[
[
112,
132
]
],
[
[
2595,
2623
],
[
1243,
1271
],
[
2308,
2336
]
],
[
[
3309,
3312
],
[
3329,
3332
]
],
[
[
3347,
3352
]
]
] |
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class SubDomainExist(object):
def __init__(self, domain=None, isExist=None):
"""
:param domain: (Optional) 子域名
:param isExist: (Optional) 子域名的存在状态,1:存在,2:不存在,3:zone不存在
"""
self.domain = domain
self.isExist = isExist
| [
[
[
676,
690
]
]
] |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetPublicIPAddressResult',
'AwaitableGetPublicIPAddressResult',
'get_public_ip_address',
]
@pulumi.output_type
class GetPublicIPAddressResult:
"""
Public IP address resource.
"""
def __init__(__self__, dns_settings=None, etag=None, id=None, idle_timeout_in_minutes=None, ip_address=None, ip_configuration=None, location=None, name=None, provisioning_state=None, public_ip_address_version=None, public_ip_allocation_method=None, resource_guid=None, tags=None, type=None):
if dns_settings and not isinstance(dns_settings, dict):
raise TypeError("Expected argument 'dns_settings' to be a dict")
pulumi.set(__self__, "dns_settings", dns_settings)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if idle_timeout_in_minutes and not isinstance(idle_timeout_in_minutes, int):
raise TypeError("Expected argument 'idle_timeout_in_minutes' to be a int")
pulumi.set(__self__, "idle_timeout_in_minutes", idle_timeout_in_minutes)
if ip_address and not isinstance(ip_address, str):
raise TypeError("Expected argument 'ip_address' to be a str")
pulumi.set(__self__, "ip_address", ip_address)
if ip_configuration and not isinstance(ip_configuration, dict):
raise TypeError("Expected argument 'ip_configuration' to be a dict")
pulumi.set(__self__, "ip_configuration", ip_configuration)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_ip_address_version and not isinstance(public_ip_address_version, str):
raise TypeError("Expected argument 'public_ip_address_version' to be a str")
pulumi.set(__self__, "public_ip_address_version", public_ip_address_version)
if public_ip_allocation_method and not isinstance(public_ip_allocation_method, str):
raise TypeError("Expected argument 'public_ip_allocation_method' to be a str")
pulumi.set(__self__, "public_ip_allocation_method", public_ip_allocation_method)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="dnsSettings")
def dns_settings(self) -> Optional['outputs.PublicIPAddressDnsSettingsResponse']:
"""
The FQDN of the DNS record associated with the public IP address.
"""
return pulumi.get(self, "dns_settings")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="idleTimeoutInMinutes")
def idle_timeout_in_minutes(self) -> Optional[int]:
"""
The idle timeout of the public IP address.
"""
return pulumi.get(self, "idle_timeout_in_minutes")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[str]:
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="ipConfiguration")
def ip_configuration(self) -> 'outputs.IPConfigurationResponse':
"""
IPConfiguration
"""
return pulumi.get(self, "ip_configuration")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicIPAddressVersion")
def public_ip_address_version(self) -> Optional[str]:
"""
The public IP address version. Possible values are: 'IPv4' and 'IPv6'.
"""
return pulumi.get(self, "public_ip_address_version")
@property
@pulumi.getter(name="publicIPAllocationMethod")
def public_ip_allocation_method(self) -> Optional[str]:
"""
The public IP allocation method. Possible values are: 'Static' and 'Dynamic'.
"""
return pulumi.get(self, "public_ip_allocation_method")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> Optional[str]:
"""
The resource GUID property of the public IP resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetPublicIPAddressResult(GetPublicIPAddressResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPublicIPAddressResult(
dns_settings=self.dns_settings,
etag=self.etag,
id=self.id,
idle_timeout_in_minutes=self.idle_timeout_in_minutes,
ip_address=self.ip_address,
ip_configuration=self.ip_configuration,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
public_ip_address_version=self.public_ip_address_version,
public_ip_allocation_method=self.public_ip_allocation_method,
resource_guid=self.resource_guid,
tags=self.tags,
type=self.type)
def get_public_ip_address(expand: Optional[str] = None,
public_ip_address_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPublicIPAddressResult:
"""
Public IP address resource.
:param str expand: Expands referenced resources.
:param str public_ip_address_name: The name of the subnet.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['publicIpAddressName'] = public_ip_address_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20161201:getPublicIPAddress', __args__, opts=opts, typ=GetPublicIPAddressResult).value
return AwaitableGetPublicIPAddressResult(
dns_settings=__ret__.dns_settings,
etag=__ret__.etag,
id=__ret__.id,
idle_timeout_in_minutes=__ret__.idle_timeout_in_minutes,
ip_address=__ret__.ip_address,
ip_configuration=__ret__.ip_configuration,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
public_ip_address_version=__ret__.public_ip_address_version,
public_ip_allocation_method=__ret__.public_ip_allocation_method,
resource_guid=__ret__.resource_guid,
tags=__ret__.tags,
type=__ret__.type)
| [
[
[
176,
184
]
],
[
[
192,
198
]
],
[
[
206,
220
],
[
457,
463
],
[
3660,
3666
],
[
3946,
3952
],
[
4162,
4168
],
[
4314,
4320
],
[
4567,
4573
],
[
4708,
4714
],
[
4935,
4941
],
[
5105,
5111
],
[
5253,
5259
],
[
5558,
5564
],
[
5845,
5851
],
[
6145,
6151
],
[
6381,
6387
],
[
6553,
6559
],
[
1001,
1007
],
[
1175,
1181
],
[
1327,
1333
],
[
1538,
1544
],
[
1752,
1758
],
[
1960,
1966
],
[
2154,
2160
],
[
2320,
2326
],
[
2520,
2526
],
[
2769,
2775
],
[
3038,
3044
],
[
3269,
3275
],
[
3447,
3453
],
[
3605,
3611
],
[
3893,
3899
],
[
4117,
4123
],
[
4271,
4277
],
[
4503,
4509
],
[
4657,
4663
],
[
4878,
4884
],
[
5056,
5062
],
[
5208,
5214
],
[
5499,
5505
],
[
5779,
5785
],
[
6077,
6083
],
[
6327,
6333
],
[
6508,
6514
],
[
6656,
6662
],
[
7759,
7765
],
[
8268,
8274
],
[
8382,
8388
]
],
[
[
240,
243
]
],
[
[
245,
252
],
[
6426,
6433
]
],
[
[
254,
262
],
[
3724,
3732
],
[
3982,
3990
],
[
4196,
4204
],
[
4398,
4406
],
[
4627,
4635
],
[
4975,
4983
],
[
5329,
5337
],
[
5646,
5654
],
[
5937,
5945
],
[
6211,
6219
],
[
6417,
6425
],
[
7555,
7563
],
[
7627,
7635
],
[
7696,
7704
],
[
7750,
7758
]
],
[
[
264,
272
]
],
[
[
274,
279
]
],
[
[
296,
306
],
[
8343,
8353
]
],
[
[
308,
315
]
],
[
[
330,
337
]
],
[
[
339,
346
]
],
[
[
482,
506
],
[
6723,
6747
],
[
6873,
6897
],
[
8482,
8506
]
],
[
[
6689,
6722
],
[
7792,
7825
],
[
8526,
8559
]
],
[
[
7525,
7546
]
]
] |
from moler.cmd.unix.ps import Ps
from moler.observable_connection import ObservableConnection, get_connection
from moler.io.raw.terminal import ThreadedTerminal
# v.1 - combine all manually
# moler_conn = ObservableConnection()
# terminal = ThreadedTerminal(moler_connection=moler_conn)
# v.2 - let factory combine
terminal = get_connection(io_type='terminal', variant='threaded')
# v.3 - let factory select default variant
# terminal = get_connection(io_type='terminal')
with terminal.open():
ps_cmd = Ps(connection=terminal.moler_connection, options="-ef")
processes = ps_cmd()
for proc in processes:
if 'python' in proc['CMD']:
print("PID: {} CMD: {}".format(proc['PID'], proc['CMD']))
# result:
"""
PID: 1817 CMD: /usr/bin/python /usr/share/system-config-printer/applet.py
PID: 21825 CMD: /usr/bin/python /home/gl/moler/examples/command/unix_ps.py
"""
| [
[
[
30,
32
],
[
508,
510
]
],
[
[
73,
93
]
],
[
[
95,
109
],
[
327,
341
]
],
[
[
144,
160
]
],
[
[
316,
324
],
[
478,
486
],
[
522,
530
]
],
[
[
499,
505
],
[
581,
587
]
],
[
[
569,
578
],
[
606,
615
]
],
[
[
598,
602
],
[
640,
644
],
[
696,
700
],
[
709,
713
]
]
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# __coconut_hash__ = 0xde71c936
# Compiled with Coconut version 2.0.0-a_dev33 [How Not to Be Seen]
# Coconut Header: -------------------------------------------------------------
from __future__ import print_function, absolute_import, unicode_literals, division
import sys as _coconut_sys, os as _coconut_os
_coconut_file_dir = _coconut_os.path.dirname(_coconut_os.path.abspath(__file__))
_coconut_cached_module = _coconut_sys.modules.get(str("__coconut__"))
if _coconut_cached_module is not None and _coconut_os.path.dirname(_coconut_cached_module.__file__) != _coconut_file_dir: # type: ignore
del _coconut_sys.modules[str("__coconut__")]
_coconut_sys.path.insert(0, _coconut_file_dir)
_coconut_module_name = _coconut_os.path.splitext(_coconut_os.path.basename(_coconut_file_dir))[0]
if _coconut_module_name and _coconut_module_name[0].isalpha() and all(c.isalpha() or c.isdigit() for c in _coconut_module_name) and "__init__.py" in _coconut_os.listdir(_coconut_file_dir):
_coconut_full_module_name = str(_coconut_module_name + ".__coconut__")
import __coconut__ as _coconut__coconut__
_coconut__coconut__.__name__ = _coconut_full_module_name
for _coconut_v in vars(_coconut__coconut__).values():
if getattr(_coconut_v, "__module__", None) == str("__coconut__"):
try:
_coconut_v.__module__ = _coconut_full_module_name
except AttributeError:
_coconut_v_type = type(_coconut_v)
if getattr(_coconut_v_type, "__module__", None) == str("__coconut__"):
_coconut_v_type.__module__ = _coconut_full_module_name
_coconut_sys.modules[_coconut_full_module_name] = _coconut__coconut__
from __coconut__ import *
from __coconut__ import _coconut_tail_call, _coconut_tco, _coconut_call_set_names, _coconut_handle_cls_kwargs, _coconut_handle_cls_stargs, _namedtuple_of, _coconut, _coconut_MatchError, _coconut_iter_getitem, _coconut_base_compose, _coconut_forward_compose, _coconut_back_compose, _coconut_forward_star_compose, _coconut_back_star_compose, _coconut_forward_dubstar_compose, _coconut_back_dubstar_compose, _coconut_pipe, _coconut_star_pipe, _coconut_dubstar_pipe, _coconut_back_pipe, _coconut_back_star_pipe, _coconut_back_dubstar_pipe, _coconut_none_pipe, _coconut_none_star_pipe, _coconut_none_dubstar_pipe, _coconut_bool_and, _coconut_bool_or, _coconut_none_coalesce, _coconut_minus, _coconut_map, _coconut_partial, _coconut_get_function_match_error, _coconut_base_pattern_func, _coconut_addpattern, _coconut_sentinel, _coconut_assert, _coconut_mark_as_match, _coconut_reiterable, _coconut_self_match_types, _coconut_dict_merge, _coconut_exec, _coconut_comma_op, _coconut_multi_dim_arr
_coconut_sys.path.pop(0)
# Compiled Coconut: -----------------------------------------------------------
from argparse import ArgumentParser
from collections import namedtuple
if _coconut_sys.version_info < (3, 3):
from collections import Iterable
else:
from collections.abc import Iterable
import hace
parser = ArgumentParser()
parser.add_argument("--host", type=str, default="localhost", help="Host address")
parser.add_argument("-p", "--port", type=int, default="6006", help="Server Port")
parser.add_argument("-e", "--env", type=str, default="op2", help="ACE Environment ID, see GACE doc for what's available")
parser.add_argument("-n", "--num", type=int, default=1, help="Number of Pooled Envs")
parser.add_argument("--pdk", type=str, default="xh035-3V3", help="ACE backend, see GACE doc for what's available")
@_coconut_tco
def isiterable(obj):
return _coconut_tail_call(isinstance, obj, Iterable)
def make_env(env_id, #type: str
backend, #type: str
num=1 #type: int
):
env = (hace.make_env(env_id, backend) if num == 1 else hace.make_same_env_pool(num, env_id, backend))
return env
def simulate_pool(envs, sizings #type: dict[int, dict[str, float]]
):
sizing = dict(((int(i)), (s)) for i, s in sizings.items())
perf = hace.evaluate_circuit_pool(envs, sizing)
return perf
def simulate_single(env, sizing #type: dict[str, float]
):
perf = hace.evaluate_circuit(env, sizing)
return perf
def simulate(env, sizing):
perf = (simulate_pool(env, sizing) if isiterable(env) else simulate_single(env, sizing))
return perf
def performance(env):
perf = ((hace.current_performance_pool if isiterable(env) else hace.current_performance))(env)
return perf
def sizing(env):
size = ((hace.current_sizing_pool if isiterable(env) else hace.current_sizing))(env)
return size
def performance_parameters(env):
pps = {"params": ((hace.performance_identifiers_pool if isiterable(env) else hace.performance_identifiers))(env)}
return pps
def sizing_parameters(env):
sps = {"params": ((hace.sizing_identifiers_pool if isiterable(env) else hace.sizing_identifiers))(env)}
return sps
def initial_sizing(env):
init = ((hace.initial_sizing_pool if isiterable(env) else hace.initial_sizing))(env)
return init
def random_sizing(env):
rng = ((hace.random_sizing_pool if isiterable(env) else hace.random_sizing))(env)
return rng
| [
[
[
251,
265
]
],
[
[
267,
282
]
],
[
[
284,
300
]
],
[
[
302,
310
]
],
[
[
318,
337
],
[
463,
475
],
[
654,
666
],
[
695,
707
],
[
1678,
1690
],
[
2762,
2774
],
[
2943,
2955
]
],
[
[
339,
356
],
[
377,
388
],
[
402,
413
],
[
550,
561
],
[
765,
776
],
[
791,
802
],
[
989,
1000
]
],
[
[
357,
374
],
[
611,
628
],
[
723,
740
],
[
817,
834
],
[
1009,
1026
]
],
[
[
438,
460
],
[
511,
533
],
[
575,
597
]
],
[
[
742,
762
],
[
843,
863
],
[
868,
888
],
[
946,
966
],
[
1065,
1085
]
],
[
[
1033,
1058
],
[
1185,
1210
],
[
1400,
1425
],
[
1648,
1673
],
[
1699,
1724
]
],
[
[
1115,
1149
],
[
1154,
1173
],
[
1238,
1257
],
[
1728,
1747
]
],
[
[
1219,
1229
],
[
1288,
1298
],
[
1376,
1386
],
[
1500,
1510
]
],
[
[
1477,
1492
],
[
1539,
1554
],
[
1619,
1634
]
],
[
[
1772,
1773
]
],
[
[
1798,
1816
],
[
3637,
3655
]
],
[
[
1818,
1830
],
[
3592,
3604
]
],
[
[
1832,
1855
]
],
[
[
1857,
1883
]
],
[
[
1885,
1911
]
],
[
[
1913,
1927
]
],
[
[
1929,
1937
]
],
[
[
1939,
1958
]
],
[
[
1960,
1981
]
],
[
[
1983,
2004
]
],
[
[
2006,
2030
]
],
[
[
2032,
2053
]
],
[
[
2055,
2084
]
],
[
[
2086,
2112
]
],
[
[
2114,
2146
]
],
[
[
2148,
2177
]
],
[
[
2179,
2192
]
],
[
[
2194,
2212
]
],
[
[
2214,
2235
]
],
[
[
2237,
2255
]
],
[
[
2257,
2280
]
],
[
[
2282,
2308
]
],
[
[
2310,
2328
]
],
[
[
2330,
2353
]
],
[
[
2355,
2381
]
],
[
[
2383,
2400
]
],
[
[
2402,
2418
]
],
[
[
2420,
2442
]
],
[
[
2444,
2458
]
],
[
[
2460,
2472
]
],
[
[
2474,
2490
]
],
[
[
2492,
2525
]
],
[
[
2527,
2553
]
],
[
[
2555,
2574
]
],
[
[
2576,
2593
]
],
[
[
2595,
2610
]
],
[
[
2612,
2634
]
],
[
[
2636,
2655
]
],
[
[
2657,
2682
]
],
[
[
2684,
2703
]
],
[
[
2705,
2718
]
],
[
[
2720,
2737
]
],
[
[
2739,
2761
]
],
[
[
2890,
2904
],
[
3086,
3100
]
],
[
[
2929,
2939
]
],
[
[
3007,
3015
],
[
3673,
3681
]
],
[
[
3054,
3062
],
[
3673,
3681
]
],
[
[
3071,
3075
],
[
3785,
3789
],
[
3833,
3837
],
[
4046,
4050
],
[
4180,
4184
],
[
4406,
4410
],
[
4460,
4464
],
[
4540,
4544
],
[
4589,
4593
],
[
4690,
4694
],
[
4748,
4752
],
[
4853,
4857
],
[
4906,
4910
],
[
4993,
4997
],
[
5042,
5046
],
[
5123,
5127
],
[
5171,
5175
]
],
[
[
3077,
3083
],
[
3103,
3109
],
[
3185,
3191
],
[
3267,
3273
],
[
3389,
3395
],
[
3475,
3481
]
],
[
[
3609,
3619
],
[
4302,
4312
],
[
4439,
4449
],
[
4568,
4578
],
[
4727,
4737
],
[
4885,
4895
],
[
5021,
5031
],
[
5150,
5160
]
],
[
[
3689,
3697
]
],
[
[
3901,
3914
],
[
4272,
4285
]
],
[
[
4109,
4124
],
[
4323,
4338
]
],
[
[
4237,
4245
]
],
[
[
4375,
4386
]
],
[
[
4514,
4520
]
],
[
[
4638,
4660
]
],
[
[
4806,
4823
]
],
[
[
4959,
4973
]
],
[
[
5091,
5104
]
]
] |
import sys
import os
import copy
import json
import datetime
opt = dict()
opt['dataset'] = '../data/citeseer'
opt['hidden_dim'] = 16
opt['input_dropout'] = 0.5
opt['dropout'] = 0
opt['optimizer'] = 'adam'
opt['lr'] = 0.01
opt['decay'] = 5e-4
opt['self_link_weight'] = 1.0
opt['pre_epoch'] = 2000
opt['epoch'] = 100
opt['iter'] = 1
opt['use_gold'] = 1
opt['draw'] = 'smp'
opt['tau'] = 0.0
opt['save'] = 'exp_citeseer'
opt['mixup_alpha'] =1.0
opt['partition_num'] = 0
opt['task_ratio'] = 0
### ict hyperparameters ###
opt['ema_decay'] = 0.999
opt['consistency_type'] = "mse"
opt['consistency_rampup_starts'] = 500
opt['consistency_rampup_ends'] = 1000
opt['mixup_consistency'] = 10.0
def generate_command(opt):
cmd = 'python3 train.py'
for opt, val in opt.items():
cmd += ' --' + opt + ' ' + str(val)
return cmd
def run(opt):
opt_ = copy.deepcopy(opt)
os.system(generate_command(opt_))
os.system('rm record.txt')
os.system('echo -n -> record.txt')
os.system('rm record_val.txt')
os.system('echo -n -> record_val.txt')
partition_num_list = [8,9,10,11,12,13,14,15,16]
task_ratio_list = [0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
for p in partition_num_list:
for t in task_ratio_list:
os.system('rm record.txt')
os.system('echo -n -> record.txt')
opt['partition_num'] = p
opt['task_ratio'] = t
for k in range(10):
seed = k + 1
opt['seed'] = seed
run(opt)
os.system('python result_cal.py')
with open('record_val.txt', 'a') as f:
f.write(str(p) + ' ' + str(t) + '\n')
| [
[
[
7,
10
]
],
[
[
18,
20
],
[
922,
924
],
[
949,
951
],
[
984,
986
],
[
1015,
1017
],
[
1223,
1225
],
[
1258,
1260
],
[
1470,
1472
],
[
886,
888
]
],
[
[
28,
32
],
[
863,
867
]
],
[
[
40,
44
]
],
[
[
52,
60
]
],
[
[
62,
65
],
[
76,
79
],
[
112,
115
],
[
135,
138
],
[
162,
165
],
[
181,
184
],
[
207,
210
],
[
224,
227
],
[
244,
247
],
[
274,
277
],
[
298,
301
],
[
317,
320
],
[
333,
336
],
[
353,
356
],
[
373,
376
],
[
390,
393
],
[
419,
422
],
[
443,
446
],
[
468,
471
],
[
520,
523
],
[
545,
548
],
[
577,
580
],
[
616,
619
],
[
654,
657
],
[
1302,
1305
],
[
1335,
1338
],
[
1422,
1425
],
[
1457,
1460
]
],
[
[
693,
709
],
[
896,
912
]
],
[
[
842,
845
],
[
1453,
1456
]
],
[
[
1055,
1073
],
[
1165,
1183
]
],
[
[
1103,
1118
],
[
1198,
1213
]
],
[
[
1160,
1161
],
[
1325,
1326
],
[
1576,
1577
]
],
[
[
1193,
1194
],
[
1355,
1356
],
[
1591,
1592
]
],
[
[
1369,
1370
],
[
1404,
1405
]
],
[
[
1397,
1401
],
[
1436,
1440
]
],
[
[
1549,
1550
],
[
1564,
1565
]
]
] |
"""
Copyright (c) 2016, Jose Dolz .All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
Jose Dolz. Dec, 2016.
email: jose.dolz.upv@gmail.com
LIVIA Department, ETS, Montreal.
"""
import os
import numpy as np
from Modules.IO.sampling import getSamplesSubepoch
from Modules.General.Utils import dump_model_to_gzip_file
from Modules.General.Utils import getImagesSet
from Modules.General.Utils import load_model_from_gzip_file
from Modules.Parsers.parsersUtils import parserConfigIni
from startTesting import segmentVolume
def startTraining(networkModelName,configIniName):
print (" ************************************************ STARTING TRAINING **************************************************")
print (" ********************** Starting training model (Reading parameters) **********************")
myParserConfigIni = parserConfigIni()
myParserConfigIni.readConfigIniFile(configIniName,1)
# Image type (0: Nifti, 1: Matlab)
imageType = myParserConfigIni.imageTypesTrain
print (" --- Do training in {} epochs with {} subEpochs each...".format(myParserConfigIni.numberOfEpochs, myParserConfigIni.numberOfSubEpochs))
print ("-------- Reading Images names used in training/validation -------------")
##-----##
# from sklearn.model_selection import KFold
# import numpy as np
# y1 = myParserConfigIni.indexesForTraining
# #x1 = myParserConfigIni.indexesForValidation
# kf = KFold(n_splits= 5)
#
# for train_index, test_index in kf.split(y1):
# print("TRAIN:", train_index, "TEST:", test_index)
# y, x = np.array(y1)[train_index], np.array(y1)[test_index]
##-----##
# from sklearn.model_selection import LeavePOut
# lpo = LeavePOut(p=5)
# y1 = myParserConfigIni.indexesForTraining
# for train, test in lpo.split(y1):
# y, x = np.array(y1)[train], np.array(y1)[test]
##-----train##
from sklearn.cross_validation import LeaveOneOut
loo = LeaveOneOut(4)
y1 = myParserConfigIni.indexesForTraining
x1 = myParserConfigIni.indexesForValidation
for train_index, test_index in loo:
print("TRAIN:", train_index, "TEST:", test_index)
y, x = np.array(y1)[train_index], np.array(y1)[test_index]
##------he
# from sklearn.model_selection import train_test_split
# X_train, X_test, Y_train, Y_test = train_test_split(DataX, DataY, test_size=0.2)
# -- Get list of images used for training -- #
(imageNames_Train, names_Train) = getImagesSet(myParserConfigIni.imagesFolder,y) # Images
(groundTruthNames_Train, gt_names_Train) = getImagesSet(myParserConfigIni.GroundTruthFolder,y) # Ground truth
(roiNames_Train, roi_names_Train) = getImagesSet(myParserConfigIni.ROIFolder,y) # ROI
# -- Get list of images used for validation -- #
(imageNames_Val, names_Val) = getImagesSet(myParserConfigIni.imagesFolder,x) # Images
(groundTruthNames_Val, gt_names_Val) = getImagesSet(myParserConfigIni.GroundTruthFolder,x) # Ground truth
(roiNames_Val, roi_names_Val) = getImagesSet(myParserConfigIni.ROIFolder,x) # ROI
# Print names
print (" ================== Images for training ================")
for i in range(0,len(names_Train)):
if len(roi_names_Train) > 0:
print(" Image({}): {} | GT: {} | ROI {} ".format(i,names_Train[i], gt_names_Train[i], roi_names_Train[i] ))
else:
print(" Image({}): {} | GT: {} ".format(i,names_Train[i], gt_names_Train[i] ))
print (" ================== Images for validation ================")
for i in range(0,len(names_Val)):
if len(roi_names_Train) > 0:
print(" Image({}): {} | GT: {} | ROI {} ".format(i,names_Val[i], gt_names_Val[i], roi_names_Val[i] ))
else:
print(" Image({}): {} | GT: {} ".format(i,names_Val[i], gt_names_Val[i]))
print (" ===============================================================")
# --------------- Load my LiviaNet3D object ---------------
print (" ... Loading model from {}".format(networkModelName))
myLiviaNet3D = load_model_from_gzip_file(networkModelName)
print (" ... Network architecture successfully loaded....")
# Asign parameters to loaded Net
myLiviaNet3D.numberOfEpochs = myParserConfigIni.numberOfEpochs
myLiviaNet3D.numberOfSubEpochs = myParserConfigIni.numberOfSubEpochs
myLiviaNet3D.numberOfSamplesSupEpoch = myParserConfigIni.numberOfSamplesSupEpoch
myLiviaNet3D.firstEpochChangeLR = myParserConfigIni.firstEpochChangeLR
myLiviaNet3D.frequencyChangeLR = myParserConfigIni.frequencyChangeLR
numberOfEpochs = myLiviaNet3D.numberOfEpochs
numberOfSubEpochs = myLiviaNet3D.numberOfSubEpochs
numberOfSamplesSupEpoch = myLiviaNet3D.numberOfSamplesSupEpoch
# --------------- -------------- ---------------
# --------------- Start TRAINING ---------------
# --------------- -------------- ---------------
# Get sample dimension values
receptiveField = myLiviaNet3D.receptiveField
sampleSize_Train = myLiviaNet3D.sampleSize_Train
trainingCost = []
if myParserConfigIni.applyPadding == 1:
applyPadding = True
else:
applyPadding = False
learningRateModifiedEpoch = 0
# Run over all the (remaining) epochs and subepochs
for e_i in xrange(numberOfEpochs):
# Recover last trained epoch
numberOfEpochsTrained = myLiviaNet3D.numberOfEpochsTrained
print(" ============== EPOCH: {}/{} =================".format(numberOfEpochsTrained+1,numberOfEpochs))
costsOfEpoch = []
for subE_i in xrange(numberOfSubEpochs):
epoch_nr = subE_i+1
print (" --- SubEPOCH: {}/{}".format(epoch_nr,myLiviaNet3D.numberOfSubEpochs))
# Get all the samples that will be used in this sub-epoch
[imagesSamplesAll,
gt_samplesAll] = getSamplesSubepoch(numberOfSamplesSupEpoch,
imageNames_Train,
groundTruthNames_Train,
roiNames_Train,
imageType,
sampleSize_Train,
receptiveField,
applyPadding
)
# Variable that will contain weights for the cost function
# --- In its current implementation, all the classes have the same weight
weightsCostFunction = np.ones(myLiviaNet3D.n_classes, dtype='float32')
numberBatches = len(imagesSamplesAll) / myLiviaNet3D.batch_Size
myLiviaNet3D.trainingData_x.set_value(imagesSamplesAll, borrow=True)
myLiviaNet3D.trainingData_y.set_value(gt_samplesAll, borrow=True)
costsOfBatches = []
evalResultsSubepoch = np.zeros([ myLiviaNet3D.n_classes, 4 ], dtype="int32")
for b_i in xrange(numberBatches):
# TODO: Make a line that adds a point at each trained batch (Or percentage being updated)
costErrors = myLiviaNet3D.networkModel_Train(b_i, weightsCostFunction)
meanBatchCostError = costErrors[0]
costsOfBatches.append(meanBatchCostError)
myLiviaNet3D.updateLayersMatricesBatchNorm()
#======== Calculate and Report accuracy over subepoch
meanCostOfSubepoch = sum(costsOfBatches) / float(numberBatches)
print(" ---------- Cost of this subEpoch: {}".format(meanCostOfSubepoch))
# Release data
myLiviaNet3D.trainingData_x.set_value(np.zeros([1,1,1,1,1], dtype="float32"))
myLiviaNet3D.trainingData_y.set_value(np.zeros([1,1,1,1], dtype="float32"))
# Get mean cost epoch
costsOfEpoch.append(meanCostOfSubepoch)
meanCostOfEpoch = sum(costsOfEpoch) / float(numberOfSubEpochs)
# Include the epoch cost to the main training cost and update current mean
trainingCost.append(meanCostOfEpoch)
currentMeanCost = sum(trainingCost) / float(str( e_i + 1))
print(" ---------- Training on Epoch #" + str(e_i) + " finished ----------" )
print(" ---------- Cost of Epoch: {} / Mean training error {}".format(meanCostOfEpoch,currentMeanCost))
print(" -------------------------------------------------------- " )
# ------------- Update Learning Rate if required ----------------#
if e_i >= myLiviaNet3D.firstEpochChangeLR :
if learningRateModifiedEpoch == 0:
currentLR = myLiviaNet3D.learning_rate.get_value()
newLR = currentLR / 2.0
myLiviaNet3D.learning_rate.set_value(newLR)
print(" ... Learning rate has been changed from {} to {}".format(currentLR, newLR))
learningRateModifiedEpoch = e_i
else:
if (e_i) == (learningRateModifiedEpoch + myLiviaNet3D.frequencyChangeLR):
currentLR = myLiviaNet3D.learning_rate.get_value()
newLR = currentLR / 2.0
myLiviaNet3D.learning_rate.set_value(newLR)
print(" ... Learning rate has been changed from {} to {}".format(currentLR, newLR))
learningRateModifiedEpoch = e_i
# ---------------------- Start validation ---------------------- #
numberImagesToSegment = len(imageNames_Val)
print(" ********************** Starting validation **********************")
# Run over the images to segment
for i_d in xrange(numberImagesToSegment) :
print("------------- Segmenting subject: {} ....total: {}/{}... -------------".format(names_Val[i_d],str(i_d+1),str(numberImagesToSegment)))
strideValues = myLiviaNet3D.lastLayer.outputShapeTest[2:]
segmentVolume(myLiviaNet3D,
i_d,
imageNames_Val, # Full path
names_Val, # Only image name
groundTruthNames_Val,
roiNames_Val,
imageType,
applyPadding,
receptiveField,
sampleSize_Train,
strideValues,
myLiviaNet3D.batch_Size,
0 # Validation (0) or testing (1)
)
print(" ********************** Validation DONE ********************** ")
# ------ In this point the training is done at Epoch n ---------#
# Increase number of epochs trained
myLiviaNet3D.numberOfEpochsTrained += 1
# --------------- Save the model ---------------
BASE_DIR = os.getcwd()
path_Temp = os.path.join(BASE_DIR,'outputFiles')
netFolderName = os.path.join(path_Temp,myLiviaNet3D.folderName)
netFolderName = os.path.join(netFolderName,'Networks')
modelFileName = netFolderName + "/" + myLiviaNet3D.networkName + "_Epoch" + str (myLiviaNet3D.numberOfEpochsTrained)
dump_model_to_gzip_file(myLiviaNet3D, modelFileName)
strFinal = " Network model saved in " + netFolderName + " as " + myLiviaNet3D.networkName + "_Epoch" + str (myLiviaNet3D.numberOfEpochsTrained)
print (strFinal)
print("................ The whole Training is done.....")
print(" ************************************************************************************ ")
| [
[
[
1158,
1160
],
[
12171,
12173
],
[
12203,
12205
],
[
12264,
12266
],
[
12337,
12339
]
],
[
[
1169,
1180
],
[
3153,
3155
],
[
3180,
3182
],
[
7703,
7705
],
[
8101,
8103
],
[
8903,
8905
],
[
8993,
8995
]
],
[
[
1213,
1231
],
[
6965,
6983
]
],
[
[
1267,
1290
],
[
12510,
12533
]
],
[
[
1325,
1337
],
[
3466,
3478
],
[
3570,
3582
],
[
3684,
3696
],
[
3831,
3843
],
[
3931,
3943
],
[
4041,
4053
]
],
[
[
1372,
1397
],
[
5091,
5116
]
],
[
[
1439,
1454
],
[
1813,
1828
]
],
[
[
1480,
1493
],
[
11204,
11217
]
],
[
[
1500,
1513
]
]
] |
from numbers import Number
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import math as m
from scipy.stats import norm
"""
Minigolf task.
References
----------
- Penner, A. R. "The physics of putting." Canadian Journal of Physics 80.2 (2002): 83-96.
"""
class MiniGolf(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30
}
def __init__(self):
self.min_pos = 0.0
self.max_pos = 20.0
self.min_action = 1e-5
self.max_action = 10.0
self.putter_length = 1.0 # [0.7:1.0]
self.friction = 0.131 # [0.065:0.196]
self.hole_size = 0.10 # [0.10:0.15]
self.sigma_noise = 0.3
self.ball_radius = 0.02135
self.min_variance = 1e-2 # Minimum variance for computing the densities
# gym attributes
self.viewer = None
low = np.array([self.min_pos])
high = np.array([self.max_pos])
self.action_space = spaces.Box(low=self.min_action,
high=self.max_action,
shape=(1,), dtype=float)
self.observation_space = spaces.Box(low=low, high=high, dtype=float)
# initialize state
self.seed()
self.reset()
def setParams(self, env_param):
self.putter_length = env_param[0]
self.friction = env_param[1]
self.hole_size = env_param[2]
self.sigma_noise = m.sqrt(env_param[-1])
def step(self, action, render=False):
action = np.clip(action, self.min_action, self.max_action / 2)
noise = 10
while abs(noise) > 1:
noise = self.np_random.randn() * self.sigma_noise
u = action * self.putter_length * (1 + noise)
deceleration = 5 / 7 * self.friction * 9.81
t = u / deceleration
xn = self.state - u * t + 0.5 * deceleration * t ** 2
reward = 0
done = True
if self.state > 0:
reward = -1
done = False
elif self.state < -4:
reward = -100
self.state = xn
return self.get_state(), float(reward), done, {'state': self.get_state(), 'action': action, 'danger': float(self.state) < -4}
# Custom param for transfer
def getEnvParam(self):
return np.asarray([np.ravel(self.putter_length), np.ravel(self.friction), np.ravel(self.hole_size),
np.ravel(self.sigma_noise ** 2)])
def reset(self, state=None):
if state is None:
self.state = np.array([self.np_random.uniform(low=self.min_pos,
high=self.max_pos)])
else:
self.state = np.array(state)
return self.get_state()
def get_state(self):
return np.array(self.state)
def get_true_state(self):
"""For testing purposes"""
return np.array(self.state)
def clip_state(self, state):
return state
# return np.clip(state, self.min_pos, self.max_pos)
def clip_action(self, action):
return action
# return np.clip(action, self.min_action, self.max_action)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def getDensity_old(self, env_parameters, state, action, next_state):
if state < next_state:
return 0
action = np.clip(action, self.min_action, self.max_action / 2)
action = 1e-8 if action == 0 else action
putter_length = env_parameters[0]
friction = env_parameters[1]
sigma_noise = env_parameters[-1]
deceleration = 5 / 7 * friction * 9.81
u = np.sqrt(2 * deceleration * (state - next_state))
noise = (u / (action * putter_length) - 1) / sigma_noise
return norm.pdf(noise)
def density_old(self, env_parameters, state, action, next_state):
"""
:param env_parameters: list of env_params
:param state: NxTx1
:param action: NxT
:param next_state: NxTx1
:return: pdf NxTx1xn_param
"""
assert state.ndim == 4 and action.ndim == 3 and next_state.ndim == 4
mask = state < next_state
action = np.clip(action, self.min_action, self.max_action / 2)
action[action == 0] = 1e-8
pdf = np.zeros((state.shape[0], state.shape[1], 1, env_parameters.shape[0]))
diff = np.abs(state - next_state) # take the abs for the sqrt, but mask negative values later
for i in range(env_parameters.shape[0]):
deceleration = 5 / 7 * env_parameters[i, 1] * 9.81
u = np.sqrt(2 * deceleration * diff[:, :, :, i])
noise = (u / (action[:, :, np.newaxis, i] * env_parameters[i, 0]) - 1) / env_parameters[i, -1]
pdf[:, :, :, i] = norm.pdf(noise) * (1 - mask[:, :, :, i]) # set to zero impossible transitions
return pdf[:, :, 0, :]
def densityCurrent_old(self, state, action, next_state):
"""
:param state: NxTx1
:param action: NxT
:param next_state: NxTx1
:return: pdf NxTx1xn_param
"""
assert state.ndim == 3 and action.ndim == 2 and next_state.ndim == 3
mask = state < next_state
action = np.clip(action, self.min_action, self.max_action / 2)
action[action == 0] = 1e-8
diff = np.abs(state - next_state) # take the abs for the sqrt, but mask negative values later
deceleration = 5 / 7 * self.friction * 9.81
u = np.sqrt(2 * deceleration * diff)
noise = (u / (action[:, :, np.newaxis] * self.putter_length) - 1) / self.sigma_noise
pdf = norm.pdf(noise) * (1 - mask) # set to zero impossible transitions
return pdf[:, :, 0]
def stepDenoisedCurrent_old(self, state, action):
"""
Computes steps without noise.
"""
assert state.ndim == 3 and action.ndim == 2
action = np.clip(action, self.min_action, self.max_action / 2)[:, :, np.newaxis]
u = action * self.putter_length
deceleration = 5 / 7 * self.friction * 9.81
t = u / deceleration
return state - u * t + 0.5 * deceleration * t ** 2
def stepDenoisedCurrent(self, state, action):
"""
Computes the mean transitions.
"""
assert state.ndim == 3 and action.ndim == 2
action = np.clip(action, self.min_action, self.max_action / 2)[:, :, np.newaxis]
u = action * self.putter_length
deceleration = 5 / 7 * self.friction * 9.81
return state - 0.5 * u ** 2 * (1 + self.sigma_noise ** 2) / deceleration
def variance(self, action):
"""
Next-state variance given the action
"""
assert action.ndim == 2
deceleration = 5 / 7 * self.friction * 9.81
action = np.clip(action, self.min_action, self.max_action / 2)
k = action ** 2 * self.putter_length ** 2 / (2 * deceleration)
return 2 * k ** 2 * self.sigma_noise ** 2 * (self.sigma_noise ** 2 + 2) + self.min_variance
def densityCurrent(self, state, action, next_state):
"""
:param state: NxTx1
:param action: NxT
:param next_state: NxTx1
:return: pdf NxTx1xn_param
"""
assert state.ndim == 3 and action.ndim == 2 and next_state.ndim == 3
mean_ns = self.stepDenoisedCurrent(state, action)
var_ns = self.variance(action)
return norm.pdf((next_state - mean_ns)[:, :, 0] / np.sqrt(var_ns))
def density(self, env_parameters, state, action, next_state):
"""
:param env_parameters: list of env_params
:param state: NxTx1
:param action: NxT
:param next_state: NxTx1
:return: pdf NxTx1xn_param
"""
assert state.ndim == 4 and action.ndim == 3 and next_state.ndim == 4
action = np.clip(action, self.min_action, self.max_action / 2)
pdf = np.zeros((state.shape[0], state.shape[1], 1, env_parameters.shape[0]))
for i in range(env_parameters.shape[0]):
deceleration = 5 / 7 * env_parameters[i, 1] * 9.81
k = action ** 2 * env_parameters[i, 0] ** 2 / (2 * deceleration)
# Compute mean next-state
mean_ns = state[:, :, :, i] - k[:, :, np.newaxis, i] * (1 + env_parameters[i, -1])
# Compute variance next-state
var_ns = 2 * k[:, :, np.newaxis, i] ** 2 * env_parameters[i, -1] * (
env_parameters[i, -1] + 2) + self.min_variance
pdf[:, :, :, i] = norm.pdf((next_state[:, :, :, i] - mean_ns) / np.sqrt(var_ns))
return pdf[:, :, 0, :]
class ComplexMiniGolf(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30
}
def __init__(self):
self.horizon = 20
self.gamma = 0.99
self.min_pos = 0.0
self.max_pos = 20.0
self.min_action = 1e-5
self.max_action = 10.0
self.putter_length = 1.0 # [0.7:1.0]
# self.friction = 0.131 # [0.065:0.196]
self.friction_low = 0.131
self.friction_high = 0.19 # 0.190
self.hole_size = 0.10 # [0.10:0.15]
self.sigma_noise = 0.3
self.ball_radius = 0.02135
self.min_variance = 1e-2 # Minimum variance for computing the densities
# gym attributes
self.viewer = None
low = np.array([self.min_pos])
high = np.array([self.max_pos])
self.action_space = spaces.Box(low=self.min_action,
high=self.max_action,
shape=(1,))
self.observation_space = spaces.Box(low=low, high=high)
# initialize state
self.seed()
self.reset()
def setParams(self, env_param):
self.putter_length = env_param[0]
self.friction = env_param[1]
self.hole_size = env_param[2]
self.sigma_noise = m.sqrt(env_param[-1])
def computeFriction(self, state):
# if state < (self.max_pos - self.min_pos) / 3:
# friction = self.friction_low
# elif state < (self.max_pos - self.min_pos) * 2 / 3:
# friction = self.friction_low
# else:
# friction = self.friction_high
# return friction
delta_f = self.friction_high - self.friction_low
delta_p = self.max_pos - self.min_pos
return self.friction_low + (delta_f / delta_p) * state
def step(self, action, render=False):
action = np.clip(action, self.min_action, self.max_action / 2)
noise = 10
while abs(noise) > 1:
noise = self.np_random.randn() * self.sigma_noise
u = action * self.putter_length * (1 + noise)
friction = self.computeFriction(self.state)
deceleration = 5 / 7 * friction * 9.81
t = u / deceleration
xn = self.state - u * t + 0.5 * deceleration * t ** 2
# reward = 0
# done = True
# if u < v_min:
# reward = -1
# done = False
# elif u > v_max:
# reward = -100
reward = 0
done = True
if self.state > 0:
reward = -1
done = False
elif self.state < -4:
reward = -100
state = self.state
self.state = xn
# TODO the last three values should not be used
return self.get_state(), float(reward), done, {"state": state, "next_state": self.state, "action": action}
# Custom param for transfer
def getEnvParam(self):
return np.asarray([np.ravel(self.putter_length), np.ravel(self.friction), np.ravel(self.hole_size),
np.ravel(self.sigma_noise ** 2)])
def reset(self, state=None):
# TODO change reset
if state is None:
self.state = np.array([self.np_random.uniform(low=self.min_pos,
high=self.max_pos)])
else:
self.state = np.array(state)
return self.get_state()
def get_state(self):
return np.array(self.state)
def get_true_state(self):
"""For testing purposes"""
return np.array(self.state)
def clip_state(self, state):
return state
# return np.clip(state, self.min_pos, self.max_pos)
def clip_action(self, action):
return action
# return np.clip(action, self.min_action, self.max_action)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reward(self, state, action, next_state):
# FIXME: two problems. (1,probably fixed) When the next_state is less than state. (2) reward of -100 is never returned
friction = self.computeFriction(state)
deceleration = 5 / 7 * friction * 9.81
u = np.sqrt(2 * deceleration * max((state - next_state), 0))
v_min = np.sqrt(10 / 7 * friction * 9.81 * state)
v_max = np.sqrt((2 * self.hole_size - self.ball_radius) ** 2 * (9.81 / (2 * self.ball_radius)) + v_min ** 2)
reward = 0
done = True
if u < v_min:
reward = -1
done = False
elif u > v_max:
reward = -100
return reward, done | [
[
[
20,
26
]
],
[
[
35,
38
],
[
312,
315
],
[
8752,
8755
]
],
[
[
55,
61
],
[
1018,
1024
],
[
1208,
1214
],
[
9589,
9595
],
[
9766,
9772
]
],
[
[
84,
91
],
[
3281,
3288
],
[
12636,
12643
]
],
[
[
99,
110
],
[
925,
927
],
[
965,
967
],
[
1584,
1586
],
[
2357,
2359
],
[
2369,
2371
],
[
2399,
2401
],
[
2424,
2426
],
[
2477,
2479
],
[
2596,
2598
],
[
2765,
2767
],
[
2855,
2857
],
[
2957,
2959
],
[
3472,
3474
],
[
3756,
3758
],
[
4299,
4301
],
[
4402,
4404
],
[
4488,
4490
],
[
4705,
4707
],
[
4789,
4791
],
[
5337,
5339
],
[
5441,
5443
],
[
5594,
5596
],
[
5662,
5664
],
[
6018,
6020
],
[
6078,
6080
],
[
6455,
6457
],
[
6515,
6517
],
[
6905,
6907
],
[
7569,
7571
],
[
7945,
7947
],
[
8013,
8015
],
[
8363,
8365
],
[
8483,
8485
],
[
8679,
8681
],
[
9496,
9498
],
[
9536,
9538
],
[
10624,
10626
],
[
11684,
11686
],
[
11696,
11698
],
[
11726,
11728
],
[
11751,
11753
],
[
11804,
11806
],
[
11951,
11953
],
[
12120,
12122
],
[
12210,
12212
],
[
12312,
12314
],
[
12966,
12968
],
[
13040,
13042
],
[
13098,
13100
]
],
[
[
118,
127
],
[
1502,
1503
],
[
10047,
10048
]
],
[
[
152,
156
],
[
3886,
3890
],
[
4887,
4891
],
[
5734,
5738
],
[
7526,
7530
],
[
8633,
8637
]
],
[
[
303,
311
]
],
[
[
8736,
8751
]
]
] |
class UnoInfo:
def __init__(self):
self.dataPins = 13
self.analogInPins = 5
self.GND = 3
self.pow = [3.3, 5]
self.TX = 1
self.RX = 0
def getMainInfo(self):
return {"0": self.dataPins, "1": self.GND, "2": self.pow}
def getDigitalPins(self):
return self.dataPins
def getAnalogPins(self):
return self.analogInPins
def getAmountGND(self):
return self.GND
def getPowOut(self):
return self.pow
def getTXSlot(self):
return self.TX
def getRXSlot(self):
return self.RX
| [
[
[
8,
15
]
]
] |
#!/usr/bin/env python3
import paho.mqtt.client as mqtt
import json
import random
import math
import time
import ssl
config_mqtt_broker_ip = "iot.fh-muenster.de"
config_mqtt_client_id = "dummy-receiver-" + str(random.randint(1000, 9999));
config_mqtt_topic = "sensor/60:01:94:4A:AF:7A"
ts_last_message = int(round(time.time() * 1000))
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
client.subscribe(config_mqtt_topic)
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print(msg.topic + " " + str(msg.payload))
mqtt_c = mqtt.Client(config_mqtt_client_id)
mqtt_c.on_connect = on_connect
mqtt_c.on_message = on_message
mqtt_c.tls_set(ca_certs="ca.pem")
#mqtt_c.tls_insecure_set(True)
mqtt_c.connect(config_mqtt_broker_ip, 8883, 60)
mqtt_c.loop_forever();
| [
[
[
31,
55
],
[
716,
720
]
],
[
[
63,
67
]
],
[
[
75,
81
],
[
211,
217
]
],
[
[
89,
93
]
],
[
[
101,
105
],
[
320,
324
]
],
[
[
113,
116
]
],
[
[
118,
139
],
[
895,
916
]
],
[
[
163,
184
],
[
728,
749
]
],
[
[
240,
257
],
[
533,
550
]
],
[
[
292,
307
]
],
[
[
426,
436
],
[
771,
781
]
],
[
[
628,
638
],
[
802,
812
]
],
[
[
707,
713
],
[
751,
757
],
[
782,
788
],
[
813,
819
],
[
880,
886
],
[
929,
935
]
]
] |
# Online References used :
# https://github.com/imadmali/movie-scraper/blob/master/MojoLinkExtract.py
# https://www.crummy.com/software/BeautifulSoup/bs4/doc/
# https://nycdatascience.com/blog/student-works/scraping-box-office-mojo/
# https://www.youtube.com/watch?v=XQgXKtPSzUI
# https://www.youtube.com/watch?v=aIPqt-OdmS0
# https://www.youtube.com/watch?v=XQgXKtPSzUI
from bs4 import BeautifulSoup
import pandas as pd
import os
import requests
import glob
import re
def scrape_data_for_actors():
file_path = os.path.join(os.path.join(os.environ['USERPROFILE']),
'Desktop') # This is written in order to save the txt file in the user's specified location on the machine
file_path = os.path.join(file_path,
'BoxOfficeMojo2_virti_bipin') # Folder name to be created where the file will be stored
if not os.path.exists(str(file_path)):
os.mkdir(str(file_path)) # If path does not exist create the path
os.chdir(file_path) # Change the directory of the file path
if len(glob.glob(
"*")) != 0: # The glob module finds all the pathnames matching a specified pattern according to the rules used by the Unix shell
file_list = glob.glob("*")
for file in file_list:
os.remove(file)
# The url of the BoxOffice Mojo to be scraped
url = 'https://www.boxofficemojo.com/people/?view=Actor&pagenum=1&sort=sumgross&order=DESC&&p=.htm'
pages_data = [] # List to store the pages data
total_pages = []
response = requests.get(url) # Get the response of the url after passing the user input
soup = BeautifulSoup(response.content,
'html.parser') # Using the beautiful soup library to parse the html content and format it
for page in soup.find_all('a', href=lambda href: href and "page" in href): # find the href in a tags
pages_data.append(page['href']) # append the data in the pages_data list
for page in pages_data:
if 'page' in page: # If "page" found in href
index = page.find('page') # Take the index of that page if found
# print("Index", index)
if page[index:index + 10] not in total_pages:
# For extracting the total number of pages
total_pages.append(page[
index:index + 10]) # for example : page=2 so in order to get the total number of pages and iterate through it it goes from 1 till end of pages for pagination
# print("Total Pages", total_pages)
average_gross_list = []
for num in range(1, len(total_pages) + 1, 1):
try:
url = 'https://www.boxofficemojo.com/people/?view=Actor&pagenum={}&sort=sumgross&order=DESC&&p=.htm'.format(num) # This one works well
# Get the Response
print("Page number {}".format(num))
response_from_url = requests.get(url)
html = response_from_url.text
soup = BeautifulSoup(html,
'lxml') # lxml is a pretty extensive library written for parsing XML and HTML documents very quickly
table = soup.find('table', {"cellspacing": "1"})
# Using dataframes
df = pd.read_html(str(table),skiprows=1)
df = df[0]
df = df.iloc[:, :6] # This is used to slice the dataframe to cut off the date sections.
df.columns = ['rank', 'person', 'total gross', 'number of movies', 'Average', 'number 1 picture']
df['id'] = ''
id_list = []
title_list = df['rank'].tolist()
new_index = [i for i in range(1,len(title_list)+1)]
df.index = new_index
for link in soup.findAll('a', {'href': re.compile("\?id=")}):
id_list.append(link.get('href'))
id_list = [x.split('=')[1] for x in id_list]
id_list = [x.split('.')[0] for x in id_list]
id_list = id_list[1:]
id_dict = dict(zip(title_list, id_list))
for index in df.index:
df.loc[index, 'id'] = id_dict[df.loc[index, 'rank']]
df.to_csv("actors.csv", index=False, mode='a')
except Exception as e:
print(e)
continue
file_list = glob.glob("*.csv")
df_container = []
for file in file_list:
df = pd.read_csv(file)
df_container.append(df)
df_combined = pd.concat(df_container)
df_combined.to_csv("actors.txt", index=False, sep="\t")
df = pd.read_csv("actors.txt", sep="\t")
# Data Cleaning
df['Average'] = df['Average'].apply(lambda x: x.replace('$', '')) # replace dollar signs
df['Average'] = df['Average'].apply(lambda x: x.replace(',', '')) # replace commas
df['Average'] = pd.to_numeric(df['Average'], errors='coerce')
df = df.sort_values(by='Average', ascending=False)
actor_with_highest_average_earning = df.iloc[0]['person']
print("actor(s) with the highest average earnings per movie is {}".format(actor_with_highest_average_earning))
new_df = pd.read_csv("actors.txt", sep="\t")
new_df['number of movies'] = pd.to_numeric(new_df['number of movies'], errors='coerce')
actor_most_movies = new_df.loc[new_df['number of movies'].idxmax()].person
print("actor(s) with the maximum number of movies is {}".format(actor_most_movies))
if __name__ == '__main__':
scrape_data_for_actors()
| [
[
[
388,
401
],
[
1714,
1727
],
[
3152,
3165
]
],
[
[
409,
421
],
[
3432,
3434
],
[
4657,
4659
],
[
4734,
4736
],
[
4836,
4838
],
[
5112,
5114
],
[
5422,
5424
],
[
5496,
5498
]
],
[
[
429,
431
],
[
523,
525
],
[
536,
538
],
[
549,
551
],
[
738,
740
],
[
899,
901
],
[
943,
945
],
[
1018,
1020
],
[
1342,
1344
]
],
[
[
439,
447
],
[
1621,
1629
],
[
3065,
3073
]
],
[
[
455,
459
],
[
1095,
1099
],
[
1276,
1280
],
[
4563,
4567
]
],
[
[
467,
469
],
[
3984,
3986
]
],
[
[
477,
499
],
[
5760,
5782
]
]
] |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
class PyPyqt5(SIPPackage):
"""PyQt is a set of Python v2 and v3 bindings for The Qt Company's Qt
application framework and runs on all platforms supported by Qt including
Windows, OS X, Linux, iOS and Android. PyQt5 supports Qt v5."""
homepage = "https://www.riverbankcomputing.com/software/pyqt/intro"
url = "https://www.riverbankcomputing.com/static/Downloads/PyQt5/5.13.0/PyQt5_gpl-5.13.0.tar.gz"
list_url = "https://www.riverbankcomputing.com/software/pyqt/download5"
sip_module = 'PyQt5.sip'
import_modules = [
'PyQt5', 'PyQt5.QtCore', 'PyQt5.QtGui', 'PyQt5.QtHelp',
'PyQt5.QtMultimedia', 'PyQt5.QtMultimediaWidgets', 'PyQt5.QtNetwork',
'PyQt5.QtOpenGL', 'PyQt5.QtPrintSupport', 'PyQt5.QtQml',
'PyQt5.QtQuick', 'PyQt5.QtSvg', 'PyQt5.QtTest', 'PyQt5.QtWebChannel',
'PyQt5.QtWebSockets', 'PyQt5.QtWidgets', 'PyQt5.QtXml',
'PyQt5.QtXmlPatterns'
]
version('5.13.0', sha256='0cdbffe5135926527b61cc3692dd301cd0328dd87eeaf1313e610787c46faff9')
version('5.12.3', sha256='0db0fa37debab147450f9e052286f7a530404e2aaddc438e97a7dcdf56292110')
variant('qsci', default=False, description='Build with QScintilla python bindings')
# Without opengl support, I got the following error:
# sip: QOpenGLFramebufferObject is undefined
depends_on('qt@5:+opengl')
depends_on('python@2.6:', type=('build', 'run'))
depends_on('py-enum34', type=('build', 'run'), when='^python@:3.3')
depends_on('qscintilla', when='+qsci')
# For building Qscintilla python bindings
resource(name='qscintilla',
url='https://www.riverbankcomputing.com/static/Downloads/QScintilla/2.10.2/QScintilla_gpl-2.10.2.tar.gz',
sha256='14b31d20717eed95ea9bea4cd16e5e1b72cee7ebac647cba878e0f6db6a65ed0',
destination='spack-resource-qscintilla',
when='^qscintilla@2.10.2'
)
# https://www.riverbankcomputing.com/static/Docs/PyQt5/installation.html
def configure_args(self):
args = [
'--pyuic5-interpreter', self.spec['python'].command.path,
'--sipdir', self.prefix.share.sip.PyQt5,
'--stubsdir', join_path(site_packages_dir, 'PyQt5'),
]
if '+qsci' in self.spec:
args.extend(['--qsci-api-destdir', self.prefix.share.qsci])
return args
@run_after('install')
def make_qsci(self):
if '+qsci' in self.spec:
rsrc_py_path = os.path.join(
self.stage.source_path,
'spack-resource-qscintilla/QScintilla_gpl-' +
str(self.spec['qscintilla'].version), 'Python')
with working_dir(rsrc_py_path):
pydir = join_path(site_packages_dir, 'PyQt5')
python = self.spec['python'].command
python('configure.py', '--pyqt=PyQt5',
'--sip=' + self.prefix.bin.sip,
'--qsci-incdir=' +
self.spec['qscintilla'].prefix.include,
'--qsci-libdir=' + self.spec['qscintilla'].prefix.lib,
'--qsci-sipdir=' + self.prefix.share.sip.PyQt5,
'--apidir=' + self.prefix.share.qsci,
'--destdir=' + pydir,
'--pyqt-sipdir=' + self.prefix.share.sip.PyQt5,
'--sip-incdir=' + python_include_dir,
'--stubsdir=' + pydir)
# Fix build errors
# "QAbstractScrollArea: No such file or directory"
# "qprinter.h: No such file or directory"
# ".../Qsci.so: undefined symbol: _ZTI10Qsci...."
qscipro = FileFilter('Qsci/Qsci.pro')
link_qscilibs = 'LIBS += -L' + self.prefix.lib +\
' -lqscintilla2_qt5'
qscipro.filter('TEMPLATE = lib',
'TEMPLATE = lib\nQT += widgets' +
'\nQT += printsupport\n' + link_qscilibs)
make()
# Fix installation prefixes
makefile = FileFilter('Makefile')
makefile.filter(r'\$\(INSTALL_ROOT\)', '')
makefile = FileFilter('Qsci/Makefile')
makefile.filter(r'\$\(INSTALL_ROOT\)', '')
make('install')
| [
[
[
216,
217
],
[
244,
254
],
[
1175,
1182
],
[
1272,
1279
],
[
1370,
1377
],
[
1565,
1575
],
[
1596,
1606
],
[
1649,
1659
],
[
1722,
1732
],
[
1812,
1820
],
[
2600,
2609
],
[
2420,
2429
],
[
2430,
2447
],
[
2903,
2914
],
[
2954,
2963
],
[
2964,
2981
],
[
3627,
3645
],
[
3946,
3956
],
[
4285,
4289
],
[
4364,
4374
],
[
4473,
4483
],
[
4577,
4581
]
],
[
[
225,
227
],
[
2706,
2708
]
],
[
[
236,
243
]
]
] |
AUTHPLUGIN_FIXTURE = '{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1alpha1","spec":{},"status":{"token":"test"}}'
| [
[
[
0,
18
]
]
] |
import sqlite3
from flask import Flask, jsonify, request
app = Flask(__name__)
app.debug = True
api_key = "RANDOM ACCESS KEY HERE"
def CheckAPIKey(key):
if key == api_key:
return True
else:
return False
@app.route('/')
def HomeDir():
return jsonify({'msg': "invalid_endpoint"})
@app.route('/api/v1/hwid')
def HwidDir():
db = sqlite3.connect('auth.db')
c = db.cursor()
opt = request.args.get('type')
hwid = request.args.get('hwid')
key = request.args.get('apikey')
if opt == 'add':
two_step = CheckAPIKey(key)
if two_step == True:
c.execute(f'INSERT INTO hwids VALUES ("{hwid}")')
db.commit()
return jsonify({'msg': "success"})
if two_step == False:
return jsonify({'msg': "invalid_apikey"})
if opt == 'check':
c.execute(f"SELECT * FROM hwids WHERE hwid='{hwid}'")
if hwid in str(c.fetchall()):
return jsonify({'msg': "success"})
else:
return jsonify({'msg': "invalid_hwid"})
else:
return jsonify({'msg': "invalid_type"})
if __name__ == "__main__":
app.run() | [
[
[
7,
14
],
[
382,
389
]
],
[
[
34,
39
],
[
67,
72
]
],
[
[
41,
48
],
[
289,
296
],
[
740,
747
],
[
819,
826
],
[
1002,
1009
],
[
1065,
1072
],
[
1127,
1134
]
],
[
[
50,
57
],
[
441,
448
],
[
478,
485
],
[
514,
521
]
],
[
[
61,
64
],
[
84,
87
],
[
246,
249
],
[
330,
333
],
[
1195,
1198
]
],
[
[
104,
111
],
[
179,
186
]
],
[
[
146,
157
],
[
585,
596
]
],
[
[
266,
273
]
],
[
[
361,
368
]
]
] |
import os
import shutil
import subprocess
from pathlib import Path
from dotenv import dotenv_values
COMPOSAPY_ROOT_DIR = Path(__file__).parent
COMP_APP_PROD_DIR = COMPOSAPY_ROOT_DIR.parent.parent.joinpath("Product")
DATALAB_SERVICE_STATIC_DIR = COMP_APP_PROD_DIR.joinpath(
"CompAnalytics.DataLabService", "static"
)
TF_EXE_PATH = Path(dotenv_values(".local.env").get("TF_EXE_PATH"))
class CopyFileToSolutionException(Exception):
pass
class TfsException(Exception):
pass
def grant_permissions(path: Path) -> None:
subprocess.check_output(
["icacls", f"{path}", "/grant", "Everyone:F", "/t"],
stderr=subprocess.STDOUT,
)
def tfs_command(cwd: Path, *args) -> None:
run = subprocess.run([f"{TF_EXE_PATH}", *args], cwd=cwd, capture_output=True)
if run.returncode > 1:
raise TfsException(
f"Return code greater than 1, failed tf.exe with args: {args} and cwd: {cwd}."
f"Return Code: {run.returncode}\n"
f"StdOut: {run.stdout}\n"
f"StdErr: {run.stderr}\n"
)
def update_composapy_readme_artifacts(readme_artifacts: list[Path]) -> None:
for artifact in readme_artifacts:
destination_path = DATALAB_SERVICE_STATIC_DIR.joinpath(artifact.name)
shutil.copy(artifact, destination_path)
grant_permissions(destination_path)
def update_composapy_wheel(wheel: Path) -> None:
wheel_dest = DATALAB_SERVICE_STATIC_DIR.joinpath("wheels")
old_wheels = sorted(wheel_dest.glob("composapy-*.whl"))
# add new composapy wheel to local save_dir
try:
shutil.copy(wheel, wheel_dest)
grant_permissions(wheel_dest)
except Exception:
raise CopyFileToSolutionException(
f"Failed to copy wheel from {wheel} to {wheel_dest}."
)
# add new composapy wheel to tfs tracking
tfs_command(wheel_dest, "add", wheel.name)
if len(old_wheels) == 0:
return
# remove old composapy wheels from tfs tracking and local save_dir after new wheel was
# successfully loaded
# ...
# ...
# ... tfs is dumb
for old_wheel in old_wheels:
if old_wheel.name != wheel.name:
try:
tfs_command(wheel_dest, "delete", old_wheel.name)
except Exception:
pass
try:
tfs_command(wheel_dest, "undo", old_wheel.name)
except Exception:
pass
try:
os.remove(Path(old_wheel))
except Exception:
pass # if tfs did not fail to remove the file, this is expected
def update_static_wheel_deps() -> None:
tfs_command(DATALAB_SERVICE_STATIC_DIR, "add", "*", "/recursive")
def update_composapy_tests(tests: Path) -> None:
tests_dest = COMP_APP_PROD_DIR.joinpath("UnitTests", "TestData", "composapy")
# add/replace tests in local save_dir
try:
shutil.copytree(tests, tests_dest, dirs_exist_ok=True)
except Exception:
raise CopyFileToSolutionException(
f"Failed to copy tests from {tests} to {tests_dest}."
)
grant_permissions(tests_dest)
# add specific tfs test file dependencies here
tfs_command(tests_dest, "add", "test_*.py") # tests/test_*.py
tfs_command(tests_dest, "add", "conftest.py") # tests/conftest.py
tfs_command(tests_dest, "add", "__init__.py") # tests/__init__.py
tfs_command(tests_dest, "add", ".test.env") # tests/.test.env
tfs_command(tests_dest, "add", "TestFiles", "/recursive") # tests/TestFiles/*
## cleanup unwanted cache from previous command
tfs_command(tests_dest.joinpath("TestFiles"), "undo", ".pytest_cache", "/recursive")
| [
[
[
7,
9
],
[
2477,
2479
]
],
[
[
17,
23
],
[
1271,
1277
],
[
1595,
1601
],
[
2921,
2927
]
],
[
[
31,
41
],
[
536,
546
],
[
637,
647
],
[
717,
727
]
],
[
[
62,
66
],
[
122,
126
],
[
335,
339
],
[
517,
521
],
[
685,
689
],
[
1131,
1135
],
[
1391,
1395
],
[
2487,
2491
],
[
2764,
2768
]
],
[
[
86,
99
],
[
340,
353
]
],
[
[
101,
119
],
[
164,
182
]
],
[
[
144,
161
],
[
246,
263
],
[
2796,
2813
]
],
[
[
217,
243
],
[
1212,
1238
],
[
1423,
1449
],
[
2674,
2700
]
],
[
[
321,
332
],
[
736,
747
]
],
[
[
396,
423
],
[
1700,
1727
],
[
3012,
3039
]
],
[
[
453,
465
],
[
830,
842
]
],
[
[
493,
510
],
[
1319,
1336
],
[
1634,
1651
],
[
3121,
3138
]
],
[
[
668,
679
],
[
1856,
1867
],
[
2211,
2222
],
[
2345,
2356
],
[
2662,
2673
],
[
3207,
3218
],
[
3274,
3285
],
[
3345,
3356
],
[
3416,
3427
],
[
3483,
3494
],
[
3619,
3630
]
],
[
[
1074,
1107
]
],
[
[
1361,
1383
]
],
[
[
2622,
2646
]
],
[
[
2734,
2756
]
]
] |
from sympy import Symbol, gamma, oo, nan, zoo, factorial, sqrt, Rational, log,\
polygamma, EulerGamma, pi, uppergamma, S, expand_func, loggamma, sin, cos, \
O, cancel
x = Symbol('x')
y = Symbol('y')
n = Symbol('n', integer=True)
def test_gamma():
assert gamma(nan) == nan
assert gamma(oo) == oo
assert gamma(-100) == zoo
assert gamma(0) == zoo
assert gamma(1) == 1
assert gamma(2) == 1
assert gamma(3) == 2
assert gamma(102) == factorial(101)
assert gamma(Rational(1,2)) == sqrt(pi)
assert gamma(Rational(3, 2)) == Rational(1, 2)*sqrt(pi)
assert gamma(Rational(5, 2)) == Rational(3, 4)*sqrt(pi)
assert gamma(Rational(7, 2)) == Rational(15, 8)*sqrt(pi)
assert gamma(Rational(-1, 2)) == -2*sqrt(pi)
assert gamma(Rational(-3, 2)) == Rational(4, 3)*sqrt(pi)
assert gamma(Rational(-5, 2)) == -Rational(8, 15)*sqrt(pi)
assert gamma(Rational(-15, 2)) == Rational(256, 2027025)*sqrt(pi)
assert gamma(x).diff(x) == gamma(x)*polygamma(0, x)
assert gamma(x - 1).expand(func=True) == gamma(x)/(x-1)
assert gamma(x + 2).expand(func=True, mul=False) == x*(x+1)*gamma(x)
assert expand_func(gamma(x + Rational(3, 2))) == \
(x + Rational(1, 2))*gamma(x + Rational(1, 2))
assert expand_func(gamma(x - Rational(1, 2))) == \
gamma(Rational(1, 2) + x)/(x - Rational(1, 2))
def test_gamma_series():
assert gamma(x + 1).series(x, 0, 3) == \
1 - x*EulerGamma + x**2*EulerGamma**2/2 + pi**2*x**2/12 + O(x**3)
def test_lowergamma():
pass
def test_uppergamma():
assert uppergamma(4, 0) == 6
def test_polygamma():
assert polygamma(n, nan) == nan
assert polygamma(0, oo) == oo
assert polygamma(1, oo) == 0
assert polygamma(5, oo) == 0
assert polygamma(0, -9) == zoo
assert polygamma(0, -9) == zoo
assert polygamma(0, -1) == zoo
assert polygamma(0, 0) == zoo
assert polygamma(0, 1) == -EulerGamma
assert polygamma(0, 7) == Rational(49, 20) - EulerGamma
assert polygamma(1, 1) == pi**2/6
assert polygamma(1, 2) == pi**2/6 - 1
assert polygamma(1, 3) == pi**2/6 - Rational(5, 4)
assert polygamma(3, 1) == pi**4 / 15
assert polygamma(3, 5) == 6*(Rational(-22369,20736) + pi**4/90)
assert polygamma(5, 1) == 8 * pi**6 / 63
assert polygamma(3, 7*x).diff(x) == 7*polygamma(4, 7*x)
def test_polygamma_expand_func():
assert polygamma(0, x).expand(func=True) == polygamma(0, x)
assert polygamma(0, 2*x).expand(func=True) == \
polygamma(0, x)/2 + polygamma(0, Rational(1, 2) + x)/2 + log(2)
assert polygamma(1, 2*x).expand(func=True) == \
polygamma(1, x)/4 + polygamma(1, Rational(1, 2) + x)/4
assert polygamma(2, x).expand(func=True) == \
polygamma(2, x)
assert polygamma(0, -1 + x).expand(func=True) == \
polygamma(0, x) - 1/(x - 1)
assert polygamma(0, 1 + x).expand(func=True) == \
1/x + polygamma(0, x )
assert polygamma(0, 2 + x).expand(func=True) == \
1/x + 1/(1 + x) + polygamma(0, x)
assert polygamma(0, 3 + x).expand(func=True) == \
polygamma(0, x) + 1/x + 1/(1 + x) + 1/(2 + x)
assert polygamma(0, 4 + x).expand(func=True) == \
polygamma(0, x) + 1/x + 1/(1 + x) + 1/(2 + x) + 1/(3 + x)
assert polygamma(1, 1 + x).expand(func=True) == \
polygamma(1, x) - 1/x**2
assert polygamma(1, 2 + x).expand(func=True, multinomial=False) == \
polygamma(1, x) - 1/x**2 - 1/(1 + x)**2
assert polygamma(1, 3 + x).expand(func=True, multinomial=False) == \
polygamma(1, x) - 1/x**2 - 1/(1 + x)**2 - 1/(2 + x)**2
assert polygamma(1, 4 + x).expand(func=True, multinomial=False) == \
polygamma(1, x) - 1/x**2 - 1/(1 + x)**2 - \
1/(2 + x)**2 - 1/(3 + x)**2
assert polygamma(0, x + y).expand(func=True) == \
polygamma(0, x + y)
assert polygamma(1, x + y).expand(func=True) == \
polygamma(1, x + y)
assert polygamma(1, 3 + 4*x + y).expand(func=True, multinomial=False) == \
polygamma(1, y + 4*x) - 1/(y + 4*x)**2 - \
1/(1 + y + 4*x)**2 - 1/(2 + y + 4*x)**2
assert polygamma(3, 3 + 4*x + y).expand(func=True, multinomial=False) == \
polygamma(3, y + 4*x) - 6/(y + 4*x)**4 - \
6/(1 + y + 4*x)**4 - 6/(2 + y + 4*x)**4
assert polygamma(3, 4*x + y + 1).expand(func=True, multinomial=False) == \
polygamma(3, y + 4*x) - 6/(y + 4*x)**4
e = polygamma(3, 4*x + y + S(3)/2)
assert e.expand(func=True) == e
e = polygamma(3, x + y + S(3)/4)
assert e.expand(func = True, basic = False) == e
def test_loggamma():
s1 = loggamma(1/(x+sin(x))+cos(x)).nseries(x,n=4)
s2 = (-log(2*x)-1)/(2*x) - log(x/pi)/2 + (4-log(2*x))*x/24 + O(x**2)
assert cancel(s1 - s2).removeO() == 0
s1 = loggamma(1/x).series(x)
s2 = (1/x-S(1)/2)*log(1/x) - 1/x + log(2*pi)/2 + \
x/12 - x**3/360 + x**5/1260 + O(x**7)
assert cancel(s1 - s2).removeO() == 0
def tN(N, M):
assert loggamma(1/x)._eval_nseries(x,n=N,logx=None).getn() == M
tN(0, 0)
tN(1, 1)
tN(2, 3)
tN(3, 3)
tN(4, 5)
tN(5, 5)
def test_polygamma_expansion():
# A. & S., pa. 259 and 260
assert polygamma(0, 1/x).nseries(x, n=3) \
== -log(x) - x/2 - x**2/12 + O(x**4)
assert polygamma(1, 1/x).series(x, n=5) \
== x + x**2/2 + x**3/6 + O(x**5)
assert polygamma(3, 1/x).nseries(x, n=8) \
== 2*x**3 + 3*x**4 + 2*x**5 - x**7 + 4*x**9/3 + O(x**11)
| [
[
[
18,
24
],
[
188,
194
],
[
204,
210
],
[
220,
226
]
],
[
[
26,
31
],
[
276,
281
],
[
305,
310
],
[
333,
338
],
[
363,
368
],
[
391,
396
],
[
416,
421
],
[
441,
446
],
[
467,
472
],
[
508,
513
],
[
553,
558
],
[
613,
618
],
[
673,
678
],
[
735,
740
],
[
784,
789
],
[
845,
850
],
[
909,
914
],
[
980,
985
],
[
1000,
1005
],
[
1037,
1042
],
[
1071,
1076
],
[
1097,
1102
],
[
1150,
1155
],
[
1183,
1188
],
[
1244,
1249
],
[
1294,
1299
],
[
1334,
1339
],
[
1418,
1423
]
],
[
[
33,
35
],
[
311,
313
],
[
318,
320
],
[
1701,
1703
],
[
1708,
1710
],
[
1735,
1737
],
[
1768,
1770
]
],
[
[
37,
40
],
[
282,
285
],
[
290,
293
],
[
1664,
1667
],
[
1672,
1675
]
],
[
[
42,
45
],
[
348,
351
],
[
375,
378
],
[
1809,
1812
],
[
1845,
1848
],
[
1880,
1883
],
[
1915,
1918
]
],
[
[
47,
56
],
[
481,
490
]
],
[
[
58,
62
],
[
532,
536
],
[
593,
597
],
[
653,
657
],
[
714,
718
],
[
764,
768
],
[
825,
829
],
[
888,
892
],
[
959,
963
]
],
[
[
64,
72
],
[
514,
522
],
[
559,
567
],
[
578,
586
],
[
619,
627
],
[
638,
646
],
[
679,
687
],
[
698,
706
],
[
741,
749
],
[
790,
798
],
[
810,
818
],
[
851,
859
],
[
872,
880
],
[
915,
923
],
[
936,
944
],
[
1193,
1201
],
[
1228,
1236
],
[
1254,
1262
],
[
1304,
1312
],
[
1340,
1348
],
[
1365,
1373
],
[
1992,
2000
],
[
2143,
2151
],
[
2232,
2240
],
[
2568,
2576
],
[
2695,
2703
]
],
[
[
74,
77
],
[
2592,
2595
],
[
4748,
4751
],
[
4768,
4771
],
[
4785,
4788
],
[
4907,
4910
],
[
4925,
4928
],
[
5326,
5329
]
],
[
[
88,
97
],
[
1009,
1018
],
[
1651,
1660
],
[
1688,
1697
],
[
1722,
1731
],
[
1755,
1764
],
[
1789,
1798
],
[
1825,
1834
],
[
1860,
1869
],
[
1896,
1905
],
[
1931,
1940
],
[
1973,
1982
],
[
2034,
2043
],
[
2072,
2081
],
[
2114,
2123
],
[
2169,
2178
],
[
2210,
2219
],
[
2278,
2287
],
[
2324,
2333
],
[
2355,
2364
],
[
2419,
2428
],
[
2456,
2465
],
[
2483,
2492
],
[
2535,
2544
],
[
2555,
2564
],
[
2610,
2619
],
[
2662,
2671
],
[
2682,
2691
],
[
2728,
2737
],
[
2778,
2787
],
[
2805,
2814
],
[
2860,
2869
],
[
2899,
2908
],
[
2959,
2968
],
[
2987,
2996
],
[
3059,
3068
],
[
3086,
3095
],
[
3140,
3149
],
[
3197,
3206
],
[
3251,
3260
],
[
3320,
3329
],
[
3374,
3383
],
[
3410,
3419
],
[
3483,
3492
],
[
3534,
3543
],
[
3607,
3616
],
[
3673,
3682
],
[
3746,
3755
],
[
3840,
3849
],
[
3894,
3903
],
[
3925,
3934
],
[
3979,
3988
],
[
4010,
4019
],
[
4089,
4098
],
[
4194,
4203
],
[
4273,
4282
],
[
4378,
4387
],
[
4457,
4466
],
[
4504,
4513
],
[
4579,
4588
],
[
5275,
5284
],
[
5370,
5379
],
[
5460,
5469
]
],
[
[
99,
109
],
[
1466,
1476
],
[
1484,
1494
],
[
1951,
1961
],
[
2011,
2021
]
],
[
[
111,
113
],
[
537,
539
],
[
598,
600
],
[
658,
660
],
[
719,
721
],
[
769,
771
],
[
830,
832
],
[
893,
895
],
[
964,
966
],
[
1502,
1504
],
[
2053,
2055
],
[
2091,
2093
],
[
2133,
2135
],
[
2188,
2190
],
[
2257,
2259
],
[
2301,
2303
],
[
4774,
4776
],
[
4931,
4933
]
],
[
[
115,
125
],
[
1594,
1604
]
],
[
[
127,
128
],
[
4527,
4528
],
[
4600,
4601
],
[
4899,
4900
]
],
[
[
130,
141
],
[
1171,
1182
],
[
1282,
1293
]
],
[
[
143,
151
],
[
4692,
4700
],
[
4861,
4869
],
[
5065,
5073
]
],
[
[
153,
156
],
[
4706,
4709
]
],
[
[
158,
161
],
[
4714,
4717
]
],
[
[
173,
174
],
[
1518,
1519
],
[
4802,
4803
],
[
4981,
4982
],
[
5351,
5352
],
[
5441,
5442
],
[
5555,
5556
]
],
[
[
176,
182
],
[
4821,
4827
],
[
5000,
5006
]
],
[
[
184,
185
],
[
986,
987
],
[
994,
995
],
[
1006,
1007
],
[
1022,
1023
],
[
1043,
1044
],
[
1077,
1078
],
[
1081,
1082
],
[
1103,
1104
],
[
1142,
1143
],
[
1145,
1146
],
[
1156,
1157
],
[
1189,
1190
],
[
1224,
1225
],
[
1250,
1251
],
[
1300,
1301
],
[
1357,
1358
],
[
1361,
1362
],
[
1424,
1425
],
[
1438,
1439
],
[
1464,
1465
],
[
1479,
1480
],
[
1508,
1509
],
[
1520,
1521
],
[
2339,
2340
],
[
2347,
2348
],
[
2370,
2371
],
[
2432,
2433
],
[
2469,
2470
],
[
2498,
2499
],
[
2548,
2549
],
[
2585,
2586
],
[
2625,
2626
],
[
2675,
2676
],
[
2712,
2713
],
[
2741,
2742
],
[
2791,
2792
],
[
2823,
2824
],
[
2873,
2874
],
[
2881,
2882
],
[
2916,
2917
],
[
2955,
2956
],
[
2972,
2973
],
[
3004,
3005
],
[
3043,
3044
],
[
3054,
3055
],
[
3072,
3073
],
[
3103,
3104
],
[
3153,
3154
],
[
3160,
3161
],
[
3171,
3172
],
[
3183,
3184
],
[
3214,
3215
],
[
3264,
3265
],
[
3271,
3272
],
[
3282,
3283
],
[
3294,
3295
],
[
3306,
3307
],
[
3337,
3338
],
[
3387,
3388
],
[
3394,
3395
],
[
3427,
3428
],
[
3496,
3497
],
[
3503,
3504
],
[
3517,
3518
],
[
3551,
3552
],
[
3620,
3621
],
[
3627,
3628
],
[
3641,
3642
],
[
3656,
3657
],
[
3690,
3691
],
[
3759,
3760
],
[
3766,
3767
],
[
3780,
3781
],
[
3808,
3809
],
[
3823,
3824
],
[
3853,
3854
],
[
3907,
3908
],
[
3938,
3939
],
[
3992,
3993
],
[
4029,
4030
],
[
4108,
4109
],
[
4122,
4123
],
[
4156,
4157
],
[
4177,
4178
],
[
4213,
4214
],
[
4292,
4293
],
[
4306,
4307
],
[
4340,
4341
],
[
4361,
4362
],
[
4393,
4394
],
[
4476,
4477
],
[
4490,
4491
],
[
4519,
4520
],
[
4592,
4593
],
[
4704,
4705
],
[
4710,
4711
],
[
4718,
4719
],
[
4730,
4731
],
[
4754,
4755
],
[
4763,
4764
],
[
4772,
4773
],
[
4791,
4792
],
[
4795,
4796
],
[
4804,
4805
],
[
4872,
4873
],
[
4882,
4883
],
[
4897,
4898
],
[
4913,
4914
],
[
4920,
4921
],
[
4950,
4951
],
[
4957,
4958
],
[
4968,
4969
],
[
4983,
4984
],
[
5290,
5291
],
[
5301,
5302
],
[
5330,
5331
],
[
5335,
5336
],
[
5341,
5342
],
[
5353,
5354
],
[
5385,
5386
],
[
5395,
5396
],
[
5419,
5420
],
[
5423,
5424
],
[
5432,
5433
],
[
5443,
5444
],
[
5475,
5476
],
[
5486,
5487
],
[
5512,
5513
],
[
5521,
5522
],
[
5530,
5531
],
[
5537,
5538
],
[
5546,
5547
],
[
5557,
5558
],
[
5076,
5077
],
[
5093,
5094
]
],
[
[
200,
201
],
[
3857,
3858
],
[
3911,
3912
],
[
3942,
3943
],
[
3996,
3997
],
[
4033,
4034
],
[
4102,
4103
],
[
4116,
4117
],
[
4150,
4151
],
[
4171,
4172
],
[
4217,
4218
],
[
4286,
4287
],
[
4300,
4301
],
[
4334,
4335
],
[
4355,
4356
],
[
4397,
4398
],
[
4470,
4471
],
[
4484,
4485
],
[
4523,
4524
],
[
4596,
4597
]
],
[
[
216,
217
],
[
1661,
1662
]
],
[
[
251,
261
]
],
[
[
1386,
1403
]
],
[
[
1531,
1546
]
],
[
[
1564,
1579
]
],
[
[
1621,
1635
]
],
[
[
2378,
2404
]
],
[
[
4666,
4679
]
],
[
[
5205,
5229
]
]
] |
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict
import re
import string
from jina.hub.crafters.nlp.Sentencizer import Sentencizer
import pickle
# class Splitter(Sentencizer):
# count = 0
# separator = "|"
#
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
#
# def craft(self, text: str, *args, **kwargs) -> Dict:
# print('================== test2')
# return dict(text=text, meta_info=text[:5].encode("utf-8"))
class SentenceSplitter(Sentencizer):
count = 0
separator = "|"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def craft(self, text: str, *args, **kwargs) -> Dict:
results = []
ret = text
with open("tokenizer/eng_sentence_tokenizer.pkl", 'rb') as f:
sent_tokenizer = pickle.load(f)
for ci, (s, e) in enumerate(sent_tokenizer.span_tokenize(ret)):
f = ret[s:e]
f = f[:self.max_sent_len]
if len(f) > self.min_sent_len:
results.append(dict(
text=f,
offset=ci,
weight=1.0 if self.uniform_weight else len(f) / len(text),
location=[s, e],
meta_info='testID'.encode("utf-8")
))
return results
| [
[
[
0,
13
]
],
[
[
75,
86
]
],
[
[
122,
126
],
[
766,
770
]
],
[
[
134,
136
]
],
[
[
144,
150
]
],
[
[
197,
208
],
[
582,
593
]
],
[
[
216,
222
],
[
911,
917
]
],
[
[
565,
581
]
]
] |
import os
import os.path
import requests
import time
from pathlib import Path
from talon import ctrl, ui, Module, Context, actions, clip
import tempfile
# Courtesy of https://github.com/anonfunc/talon-user/blob/master/apps/jetbrains.py
extendCommands = []
# Each IDE gets its own port, as otherwise you wouldn't be able
# to run two at the same time and switch between them.
# Note that MPS and IntelliJ ultimate will conflict...
port_mapping = {
"com.google.android.studio": 8652,
"com.jetbrains.AppCode": 8655,
"com.jetbrains.CLion": 8657,
"com.jetbrains.datagrip": 8664,
"com.jetbrains.goland-EAP": 8659,
"com.jetbrains.goland": 8659,
"com.jetbrains.intellij-EAP": 8653,
"com.jetbrains.intellij.ce": 8654,
"com.jetbrains.intellij": 8653,
"com.jetbrains.PhpStorm": 8662,
"com.jetbrains.pycharm": 8658,
"com.jetbrains.rider": 8660,
"com.jetbrains.rubymine": 8661,
"com.jetbrains.rubymine-EAP": 8661,
"com.jetbrains.WebStorm": 8663,
"google-android-studio": 8652,
"idea64.exe": 8654,
"IntelliJ IDEA": 8654,
"jetbrains-appcode": 8655,
"jetbrains-clion": 8657,
"jetbrains-datagrip": 8664,
"jetbrains-goland-eap": 8659,
"jetbrains-goland": 8659,
"jetbrains-idea-ce": 8654,
"jetbrains-idea-eap": 8654,
"jetbrains-idea": 8654,
"jetbrains-phpstorm": 8662,
"jetbrains-pycharm-ce": 8658,
"jetbrains-pycharm": 8658,
"jetbrains-rider": 8660,
"JetBrains Rider": 8660,
"jetbrains-rubymine": 8661,
"jetbrains-rubymine-eap": 8661,
"jetbrains-studio": 8652,
"jetbrains-webstorm": 8663,
"RubyMine": 8661,
"RubyMine-EAP": 8661,
"PyCharm": 8658,
"pycharm64.exe": 8658,
"WebStorm": 8663,
"webstorm64.exe": 8663,
}
def _get_nonce(port, file_prefix):
file_name = file_prefix + str(port)
try:
with open(os.path.join(tempfile.gettempdir(), file_name), "r") as fh:
return fh.read()
except FileNotFoundError as e:
try:
home = str(Path.home())
with open(os.path.join(home, file_name), "r") as fh:
return fh.read()
except FileNotFoundError as eb:
print(f"Could not find {file_name} in tmp or home")
return None
except IOError as e:
print(e)
return None
def send_idea_command(cmd):
print("Sending {}".format(cmd))
active_app = ui.active_app()
bundle = active_app.bundle or active_app.name
port = port_mapping.get(bundle, None)
nonce = _get_nonce(port, ".vcidea_") or _get_nonce(port, "vcidea_")
proxies = {"http": None, "https": None}
print(f"sending {bundle} {port} {nonce}")
if port and nonce:
response = requests.get(
"http://localhost:{}/{}/{}".format(port, nonce, cmd),
proxies=proxies,
timeout=(0.05, 3.05),
)
response.raise_for_status()
return response.text
def get_idea_location():
return send_idea_command("location").split()
def idea_commands(commands):
command_list = commands.split(",")
print("executing jetbrains", commands)
global extendCommands
extendCommands = command_list
for cmd in command_list:
if cmd:
send_idea_command(cmd.strip())
time.sleep(0.1)
ctx = Context()
mod = Module()
mod.apps.jetbrains = "app.name: /jetbrains/"
mod.apps.jetbrains = "app.name: IntelliJ IDEA"
mod.apps.jetbrains = "app.name: PyCharm"
mod.apps.jetbrains = "app.name: RubyMine"
mod.apps.jetbrains = "app.name: RubyMine-EAP"
mod.apps.jetbrains = """
os: mac
and app.bundle: com.google.android.studio
"""
# windows
mod.apps.jetbrains = "app.name: idea64.exe"
mod.apps.jetbrains = "app.name: PyCharm64.exe"
mod.apps.jetbrains = "app.name: pycharm64.exe"
mod.apps.jetbrains = "app.name: webstorm64.exe"
mod.apps.jetbrains = """
os: mac
and app.bundle: com.jetbrains.pycharm
"""
mod.apps.jetbrains = """
os: windows
and app.name: JetBrains Rider
os: windows
and app.exe: rider64.exe
"""
@mod.action_class
class Actions:
def idea(commands: str):
"""Send a command to Jetbrains product"""
idea_commands(commands)
def idea_grab(times: int):
"""Copies specified number of words to the left"""
old_clip = clip.get()
try:
original_line, original_column = get_idea_location()
for _ in range(times):
send_idea_command("action EditorSelectWord")
send_idea_command("action EditorCopy")
send_idea_command("goto {} {}".format(original_line, original_column))
send_idea_command("action EditorPaste")
finally:
clip.set(old_clip)
global extendCommands
extendCommands = []
ctx.matches = r"""
app: jetbrains
"""
@ctx.action_class("app")
class AppActions:
def tab_next():
actions.user.idea("action NextTab")
def tab_previous():
actions.user.idea("action PreviousTab")
def tab_close():
actions.user.idea("action CloseContent")
def tab_reopen():
actions.user.idea("action ReopenClosedTab")
@ctx.action_class("code")
class CodeActions:
# talon code actions
def toggle_comment():
actions.user.idea("action CommentByLineComment")
@ctx.action_class("edit")
class EditActions:
# talon edit actions
def copy():
actions.user.idea("action EditorCopy")
def cut():
actions.user.idea("action EditorCut")
def delete():
actions.user.idea("action EditorBackSpace")
def paste():
actions.user.idea("action EditorPaste")
def find_next():
actions.user.idea("action FindNext")
def find_previous():
actions.user.idea("action FindPrevious")
def find(text: str = None):
actions.user.idea("action Find")
def line_clone():
actions.user.idea("action EditorDuplicate")
def line_swap_down():
actions.user.idea("action MoveLineDown")
def line_swap_up():
actions.user.idea("action MoveLineUp")
def indent_more():
actions.user.idea("action EditorIndentLineOrSelection")
def indent_less():
actions.user.idea("action EditorUnindentSelection")
def select_line(n: int = None):
actions.user.idea("action EditorSelectLine")
def select_word():
actions.user.idea("action EditorSelectWord")
def select_all():
actions.user.idea("action $SelectAll")
def file_start():
actions.user.idea("action EditorTextStart")
def file_end():
actions.user.idea("action EditorTextEnd")
def extend_file_start():
actions.user.idea("action EditorTextStartWithSelection")
def extend_file_end():
actions.user.idea("action EditorTextEndWithSelection")
def jump_line(n: int):
actions.user.idea("goto {} 0".format(n))
# move the cursor to the first nonwhite space character of the line
actions.user.idea("action EditorLineEnd")
actions.user.idea("action EditorLineStart")
@ctx.action_class("win")
class WinActions:
def filename():
title = actions.win.title()
result = title.split(" ")
# iterate over reversed result
# to support titles such as
# Class.Library2 – a.js
for word in reversed(result):
if "." in word:
return word
return ""
@ctx.action_class("user")
class UserActions:
def tab_jump(number: int):
# depends on plugin GoToTabs
if number < 10:
actions.user.idea("action GoToTab{}".format(number))
def extend_until_line(line: int):
actions.user.idea("extend {}".format(line))
def select_range(line_start: int, line_end: int):
# if it's a single line, select the entire thing including the ending new-line5
if line_start == line_end:
actions.user.idea("goto {} 0".format(line_start))
actions.user.idea("action EditorSelectLine"),
else:
actions.user.idea("range {} {}".format(line_start, line_end))
def extend_camel_left():
actions.user.idea("action EditorPreviousWordInDifferentHumpsModeWithSelection")
def extend_camel_right():
actions.user.idea("action EditorNextWordInDifferentHumpsModeWithSelection")
def camel_left():
actions.user.idea("action EditorPreviousWordInDifferentHumpsMode")
def camel_right():
actions.user.idea("action EditorNextWordInDifferentHumpsMode")
def line_clone(line: int):
actions.user.idea("clone {}".format(line))
| [
[
[
7,
9
]
],
[
[
17,
24
],
[
1866,
1868
],
[
2061,
2063
]
],
[
[
32,
40
],
[
2722,
2730
]
],
[
[
48,
52
],
[
3289,
3293
]
],
[
[
73,
77
],
[
2026,
2030
]
],
[
[
96,
100
]
],
[
[
102,
104
],
[
2410,
2412
]
],
[
[
106,
112
],
[
3329,
3335
]
],
[
[
114,
121
],
[
3313,
3320
]
],
[
[
123,
130
],
[
4872,
4879
],
[
4941,
4948
],
[
5011,
5018
],
[
5083,
5090
],
[
5233,
5240
],
[
5378,
5385
],
[
5441,
5448
],
[
5506,
5513
],
[
5576,
5583
],
[
5646,
5653
],
[
5717,
5724
],
[
5799,
5806
],
[
5863,
5870
],
[
5942,
5949
],
[
6016,
6023
],
[
6087,
6094
],
[
6175,
6182
],
[
6272,
6279
],
[
6349,
6356
],
[
6425,
6432
],
[
6495,
6502
],
[
6568,
6575
],
[
6648,
6655
],
[
6741,
6748
],
[
6832,
6839
],
[
6957,
6964
],
[
7007,
7014
],
[
7132,
7139
],
[
7558,
7565
],
[
7658,
7665
],
[
7892,
7899
],
[
7954,
7961
],
[
8026,
8033
],
[
8126,
8133
],
[
8245,
8252
],
[
8352,
8359
],
[
8451,
8458
],
[
8554,
8561
]
],
[
[
132,
136
],
[
4274,
4278
],
[
4674,
4678
]
],
[
[
144,
152
],
[
1879,
1887
]
],
[
[
238,
252
]
],
[
[
433,
445
],
[
2487,
2499
]
],
[
[
1768,
1778
],
[
2530,
2540
],
[
2562,
2572
]
],
[
[
2333,
2350
],
[
2978,
2995
],
[
3246,
3263
],
[
4414,
4431
],
[
4471,
4488
],
[
4522,
4539
],
[
4605,
4622
]
],
[
[
2946,
2963
],
[
4343,
4360
]
],
[
[
3022,
3035
],
[
4140,
4153
]
],
[
[
3307,
3310
],
[
4761,
4764
],
[
4802,
4805
],
[
5130,
5133
],
[
5285,
5288
],
[
7054,
7057
],
[
7410,
7413
]
],
[
[
3323,
3326
],
[
3339,
3342
],
[
3384,
3387
],
[
3431,
3434
],
[
3472,
3475
],
[
3514,
3517
],
[
3560,
3563
],
[
3649,
3652
],
[
3693,
3696
],
[
3740,
3743
],
[
3787,
3790
],
[
3835,
3838
],
[
3910,
3913
],
[
4021,
4024
]
],
[
[
4044,
4051
]
],
[
[
4832,
4842
]
],
[
[
5161,
5172
]
],
[
[
5316,
5327
]
],
[
[
7084,
7094
]
],
[
[
7441,
7452
]
],
[
[
3159,
3173
]
],
[
[
4739,
4753
]
]
] |
"""
2D Distributions
================
Some plots visualize a transformation of the original data set. Use a
stat parameter to choose a common transformation to visualize.
Each stat creates additional variables to map aesthetics to. These
variables use a common ..name.. syntax.
Look at the examples of 2D distributions below.
"""
# sphinx_gallery_thumbnail_path = "gallery_py\_stats\_2d_distributions.png"
import pandas as pd
from lets_plot import *
LetsPlot.setup_html()
# %%
df = pd.read_csv('https://raw.githubusercontent.com/JetBrains/lets-plot-docs/master/data/mpg.csv')
# %%
w, h = 400, 300
p = ggplot(df, aes('cty', 'hwy')) + ggsize(w, h)
p11 = p + geom_bin2d() + ggtitle('geom="bin2d" + default stat')
p12 = p + geom_point(aes(color='..count..'), stat='bin2d', size=3, shape=15) + \
ggtitle('geom="point" + stat="bin2d"')
p21 = p + geom_density2d() + ggtitle('geom="density2d" + default stat')
p22 = p + geom_point(stat='density2d', size=.5) + ggtitle('geom="point" + stat="density2d"')
bunch = GGBunch()
bunch.add_plot(p11, 0, 0)
bunch.add_plot(p12, w, 0)
bunch.add_plot(p21, 0, h)
bunch.add_plot(p22, w, h)
bunch | [
[
[
445,
457
],
[
524,
526
]
],
[
[
483,
484
],
[
486,
494
],
[
650,
656
],
[
661,
664
],
[
682,
688
],
[
706,
716
],
[
721,
728
],
[
771,
781
],
[
782,
785
],
[
853,
860
],
[
903,
917
],
[
922,
929
],
[
976,
986
],
[
1016,
1023
],
[
1070,
1077
]
],
[
[
519,
521
],
[
657,
659
]
],
[
[
629,
630
],
[
689,
690
],
[
1128,
1129
],
[
1182,
1183
]
],
[
[
632,
633
],
[
692,
693
],
[
1158,
1159
],
[
1185,
1186
]
],
[
[
646,
647
],
[
702,
703
],
[
767,
768
],
[
899,
900
],
[
972,
973
]
],
[
[
696,
699
],
[
1096,
1099
]
],
[
[
761,
764
],
[
1123,
1126
]
],
[
[
893,
896
],
[
1150,
1153
]
],
[
[
966,
969
],
[
1177,
1180
]
],
[
[
1062,
1067
],
[
1081,
1086
],
[
1108,
1113
],
[
1135,
1140
],
[
1162,
1167
],
[
1189,
1194
]
]
] |
from dataclasses import replace
from dataclass_abc import dataclass_abc
from rxbp.indexed.indexedflowable import IndexedFlowable
from rxbp.indexed.indexedsharedflowable import IndexedSharedFlowable
from rxbp.indexed.mixins.indexedflowablemixin import IndexedFlowableMixin
from rxbp.typing import ValueType
@dataclass_abc
class IndexedSharedFlowableImpl(IndexedSharedFlowable[ValueType]):
underlying: IndexedFlowableMixin
def _copy(
self,
is_shared: bool = None,
**kwargs,
):
return replace(self, **kwargs)
| [
[
[
24,
31
],
[
543,
550
]
],
[
[
59,
72
],
[
311,
324
]
],
[
[
115,
130
]
],
[
[
178,
199
],
[
357,
378
]
],
[
[
253,
273
],
[
408,
428
]
],
[
[
298,
307
],
[
379,
388
]
],
[
[
331,
356
]
]
] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#@title Input protein sequence(s), then hit `Runtime` -> `Run all`
#from google.colab import files
import os.path
import re
import hashlib
import random
def add_hash(x,y):
return x+"_"+hashlib.sha1(y.encode()).hexdigest()[:5]
with open("protein") as f:
query_sequence = f.read()
#query_sequence = '' #@param {type:"string"}
#@markdown - Use `:` to specify inter-protein chainbreaks for **modeling complexes** (supports homo- and hetro-oligomers). For example **PI...SK:PI...SK** for a mono-dimer
# remove whitespaces
query_sequence = "".join(query_sequence.split())
jobname = 'test' #@param {type:"string"}
# remove whitespaces
basejobname = "".join(jobname.split())
basejobname = re.sub(r'\W+', '', basejobname)
jobname = add_hash(basejobname, query_sequence)
while os.path.isfile(f"{jobname}.csv"):
jobname = add_hash(basejobname, ''.join(random.sample(query_sequence,len(query_sequence))))
with open(f"{jobname}.csv", "w") as text_file:
text_file.write(f"id,sequence\n{jobname},{query_sequence}")
queries_path=f"{jobname}.csv"
# number of models to use
use_amber = False #@param {type:"boolean"}
use_templates = False #@param {type:"boolean"}
#save_to_google_drive = False #@param {type:"boolean"}
#@markdown - if the save_to_google_drive option was selected, the result zip will be uploaded to your Google Drive
#@markdown ### Advanced settings
msa_mode = "MMseqs2 (UniRef+Environmental)" #@param ["MMseqs2 (UniRef+Environmental)", "MMseqs2 (UniRef only)","single_sequence","custom"]
model_type = "auto" #@param ["auto", "AlphaFold2-ptm", "AlphaFold2-multimer"]
#@markdown - "auto" = protein structure prediction using "AlphaFold2-ptm" and complex prediction "AlphaFold-multimer". For complexes "AlphaFold-multimer" and "AlphaFold-ptm" can be used.
pair_mode = "unpaired+paired" #@param ["unpaired+paired","paired","unpaired"] {type:"string"}
#@markdown - "unpaired+paired" = pair sequences from same species and add unpaired MSA, "unpaired" = generate seperate MSA for each chain, "paired" - only use sequences that were sucessfully paired.
num_recycles = 1 #@param [1,3,6,12,24,48] {type:"raw"}
#@markdown Don't forget to hit `Runtime` -> `Run all` after updating the form.
# decide which a3m to use
if msa_mode.startswith("MMseqs2"):
a3m_file = f"{jobname}.a3m"
elif msa_mode == "custom":
a3m_file = f"{jobname}.custom.a3m"
if not os.path.isfile(a3m_file):
custom_msa_dict = files.upload()
custom_msa = list(custom_msa_dict.keys())[0]
header = 0
import fileinput
for line in fileinput.FileInput(custom_msa,inplace=1):
if line.startswith(">"):
header = header + 1
if not line.rstrip():
continue
if line.startswith(">") == False and header == 1:
query_sequence = line.rstrip()
print(line, end='')
os.rename(custom_msa, a3m_file)
queries_path=a3m_file
print(f"moving {custom_msa} to {a3m_file}")
else:
a3m_file = f"{jobname}.single_sequence.a3m"
with open(a3m_file, "w") as text_file:
text_file.write(">1\n%s" % query_sequence)
# Removed
#if save_to_google_drive:
# from pydrive.drive import GoogleDrive
# from pydrive.auth import GoogleAuth
# from google.colab import auth
# from oauth2client.client import GoogleCredentials
# auth.authenticate_user()
# gauth = GoogleAuth()
# gauth.credentials = GoogleCredentials.get_application_default()
# drive = GoogleDrive(gauth)
# print("You are logged into Google Drive and are good to go!")
# In[ ]:
#@title Run Prediction
import sys
from colabfold.download import download_alphafold_params, default_data_dir
from colabfold.utils import setup_logging
from colabfold.batch import get_queries, run, set_model_type
from colabfold.colabfold import plot_protein
from pathlib import Path
import matplotlib.pyplot as plt
# For some reason we need that to get pdbfixer to import
if use_amber and '/usr/local/lib/python3.7/site-packages/' not in sys.path:
sys.path.insert(0, '/usr/local/lib/python3.7/site-packages/')
def prediction_callback(unrelaxed_protein, length, prediction_result, input_features):
fig = plot_protein(unrelaxed_protein, Ls=length, dpi=100)
plt.show()
plt.close()
result_dir="."
setup_logging(Path(".").joinpath("log.txt"))
queries, is_complex = get_queries(queries_path)
model_type = set_model_type(is_complex, model_type)
download_alphafold_params(model_type, Path("."))
run(
queries=queries,
result_dir=result_dir,
use_templates=use_templates,
use_amber=use_amber,
msa_mode=msa_mode,
model_type=model_type,
num_models=5,
num_recycles=num_recycles,
model_order=[1, 2, 3, 4, 5],
is_complex=is_complex,
data_dir=Path("."),
keep_existing_results=False,
recompile_padding=1.0,
rank_by="auto",
pair_mode=pair_mode,
stop_at_score=float(100),
prediction_callback=prediction_callback,
)
# In[ ]:
| [
[
[
156,
163
],
[
831,
833
],
[
2430,
2432
],
[
2961,
2963
]
],
[
[
171,
173
],
[
745,
747
]
],
[
[
181,
188
],
[
240,
247
]
],
[
[
196,
202
],
[
909,
915
]
],
[
[
208,
216
],
[
787,
795
],
[
879,
887
]
],
[
[
307,
308
],
[
331,
332
]
],
[
[
314,
328
],
[
605,
619
]
],
[
[
580,
594
],
[
809,
823
],
[
923,
937
],
[
942,
956
],
[
1055,
1069
],
[
3207,
3221
]
],
[
[
630,
637
],
[
714,
721
]
],
[
[
692,
703
],
[
764,
775
]
],
[
[
731,
742
],
[
796,
807
],
[
888,
899
]
],
[
[
777,
784
],
[
849,
856
],
[
975,
982
],
[
1045,
1052
],
[
1090,
1097
],
[
2339,
2346
],
[
2398,
2405
],
[
3099,
3106
]
],
[
[
869,
876
],
[
849,
856
],
[
975,
982
],
[
1045,
1052
],
[
1090,
1097
],
[
2339,
2346
],
[
2398,
2405
],
[
3099,
3106
]
],
[
[
998,
1007
],
[
1013,
1022
]
],
[
[
1074,
1086
],
[
4474,
4486
]
],
[
[
1131,
1140
],
[
4059,
4068
],
[
4689,
4698
]
],
[
[
1174,
1187
],
[
4660,
4673
]
],
[
[
1426,
1434
],
[
2289,
2297
],
[
2358,
2366
],
[
4713,
4721
]
],
[
[
1565,
1575
],
[
4528,
4538
]
],
[
[
1831,
1840
],
[
4981,
4990
]
],
[
[
2124,
2136
],
[
4789,
4801
]
],
[
[
2325,
2333
]
],
[
[
2384,
2392
],
[
2445,
2453
],
[
2983,
2991
],
[
3014,
3022
],
[
3063,
3071
]
],
[
[
2464,
2479
],
[
2523,
2538
]
],
[
[
2505,
2515
],
[
2634,
2644
],
[
2971,
2981
],
[
3047,
3057
]
],
[
[
2558,
2564
],
[
2719,
2725
],
[
2838,
2844
]
],
[
[
2584,
2593
],
[
2614,
2623
]
],
[
[
2606,
2610
],
[
2672,
2676
],
[
2749,
2753
],
[
2804,
2808
],
[
2885,
2889
],
[
2938,
2942
]
],
[
[
2710,
2716
],
[
2838,
2844
],
[
2719,
2725
]
],
[
[
2868,
2882
],
[
3207,
3221
]
],
[
[
3001,
3013
],
[
4474,
4486
]
],
[
[
3085,
3093
],
[
3143,
3151
]
],
[
[
3161,
3170
],
[
3180,
3189
]
],
[
[
3711,
3714
],
[
4122,
4125
],
[
4136,
4139
]
],
[
[
3747,
3772
],
[
4540,
4565
]
],
[
[
3774,
3790
]
],
[
[
3819,
3832
],
[
4395,
4408
]
],
[
[
3861,
3872
],
[
4462,
4473
]
],
[
[
3874,
3877
],
[
4589,
4592
]
],
[
[
3879,
3893
],
[
4501,
4515
]
],
[
[
3927,
3939
],
[
4296,
4308
]
],
[
[
3960,
3964
],
[
4409,
4413
],
[
4578,
4582
],
[
4876,
4880
]
],
[
[
3972,
3996
],
[
4352,
4355
],
[
4367,
4370
]
],
[
[
4203,
4222
],
[
5046,
5065
]
],
[
[
4380,
4390
],
[
4630,
4640
]
],
[
[
4440,
4447
],
[
4606,
4613
]
],
[
[
4449,
4459
],
[
4516,
4526
],
[
4851,
4861
]
],
[
[
4488,
4498
],
[
4566,
4576
],
[
4742,
4752
]
]
] |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import time
from azure_devtools.scenario_tests import AllowLargeResponse
from azure.cli.core.util import CLIError
from azure.cli.testsdk.base import execute
from azure.cli.testsdk.exceptions import CliTestError
from azure.cli.testsdk import (
JMESPathCheck,
JMESPathCheckExists,
JMESPathCheckGreaterThan,
NoneCheck,
ResourceGroupPreparer,
ScenarioTest,
StorageAccountPreparer,
TestCli,
LiveScenarioTest)
from azure.cli.testsdk.preparers import (
AbstractPreparer,
SingleValueReplacer)
from azure.cli.command_modules.sql.custom import (
ClientAuthenticationType,
ClientType)
from datetime import datetime, timedelta
from time import sleep
# Constants
server_name_prefix = 'clitestserver'
server_name_max_length = 63
class SqlServerPreparer(AbstractPreparer, SingleValueReplacer):
def __init__(self, name_prefix=server_name_prefix, parameter_name='server', location='westus',
admin_user='admin123', admin_password='SecretPassword123',
resource_group_parameter_name='resource_group', skip_delete=True):
super(SqlServerPreparer, self).__init__(name_prefix, server_name_max_length)
self.location = location
self.parameter_name = parameter_name
self.admin_user = admin_user
self.admin_password = admin_password
self.resource_group_parameter_name = resource_group_parameter_name
self.skip_delete = skip_delete
def create_resource(self, name, **kwargs):
group = self._get_resource_group(**kwargs)
template = 'az sql server create -l {} -g {} -n {} -u {} -p {}'
execute(TestCli(), template.format(self.location, group, name, self.admin_user, self.admin_password))
return {self.parameter_name: name}
def remove_resource(self, name, **kwargs):
if not self.skip_delete:
group = self._get_resource_group(**kwargs)
execute(TestCli(), 'az sql server delete -g {} -n {} --yes --no-wait'.format(group, name))
def _get_resource_group(self, **kwargs):
try:
return kwargs.get(self.resource_group_parameter_name)
except KeyError:
template = 'To create a sql server account a resource group is required. Please add ' \
'decorator @{} in front of this storage account preparer.'
raise CliTestError(template.format(ResourceGroupPreparer.__name__,
self.resource_group_parameter_name))
class SqlServerMgmtScenarioTest(ScenarioTest):
@ResourceGroupPreparer(parameter_name='resource_group_1')
@ResourceGroupPreparer(parameter_name='resource_group_2')
def test_sql_server_mgmt(self, resource_group_1, resource_group_2, resource_group_location):
server_name_1 = self.create_random_name(server_name_prefix, server_name_max_length)
server_name_2 = self.create_random_name(server_name_prefix, server_name_max_length)
admin_login = 'admin123'
admin_passwords = ['SecretPassword123', 'SecretPassword456']
loc = 'westeurope'
user = admin_login
# test create sql server with minimal required parameters
server_1 = self.cmd('sql server create -g {} --name {} -l {} '
'--admin-user {} --admin-password {}'
.format(resource_group_1, server_name_1, loc, user, admin_passwords[0]),
checks=[
JMESPathCheck('name', server_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', user),
JMESPathCheck('identity', None)]).get_output_in_json()
# test list sql server should be 1
self.cmd('sql server list -g {}'.format(resource_group_1), checks=[JMESPathCheck('length(@)', 1)])
# test update sql server
self.cmd('sql server update -g {} --name {} --admin-password {} -i'
.format(resource_group_1, server_name_1, admin_passwords[1]),
checks=[
JMESPathCheck('name', server_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', user),
JMESPathCheck('identity.type', 'SystemAssigned')])
# test update without identity parameter, validate identity still exists
# also use --id instead of -g/-n
self.cmd('sql server update --id {} --admin-password {}'
.format(server_1['id'], admin_passwords[0]),
checks=[
JMESPathCheck('name', server_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', user),
JMESPathCheck('identity.type', 'SystemAssigned')])
# test create another sql server, with identity this time
self.cmd('sql server create -g {} --name {} -l {} -i '
'--admin-user {} --admin-password {}'
.format(resource_group_2, server_name_2, loc, user, admin_passwords[0]),
checks=[
JMESPathCheck('name', server_name_2),
JMESPathCheck('resourceGroup', resource_group_2),
JMESPathCheck('administratorLogin', user),
JMESPathCheck('identity.type', 'SystemAssigned')])
# test list sql server in that group should be 1
self.cmd('sql server list -g {}'.format(resource_group_2), checks=[JMESPathCheck('length(@)', 1)])
# test list sql server in the subscription should be at least 2
self.cmd('sql server list', checks=[JMESPathCheckGreaterThan('length(@)', 1)])
# test show sql server
self.cmd('sql server show -g {} --name {}'
.format(resource_group_1, server_name_1),
checks=[
JMESPathCheck('name', server_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', user)])
self.cmd('sql server show --id {}'
.format(server_1['id']),
checks=[
JMESPathCheck('name', server_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', user)])
self.cmd('sql server list-usages -g {} -n {}'
.format(resource_group_1, server_name_1),
checks=[JMESPathCheck('[0].resourceName', server_name_1)])
# test delete sql server
self.cmd('sql server delete --id {} --yes'
.format(server_1['id']), checks=NoneCheck())
self.cmd('sql server delete -g {} --name {} --yes'
.format(resource_group_2, server_name_2), checks=NoneCheck())
# test list sql server should be 0
self.cmd('sql server list -g {}'.format(resource_group_1), checks=[NoneCheck()])
class SqlServerFirewallMgmtScenarioTest(ScenarioTest):
@ResourceGroupPreparer()
@SqlServerPreparer()
def test_sql_firewall_mgmt(self, resource_group, resource_group_location, server):
rg = resource_group
firewall_rule_1 = 'rule1'
start_ip_address_1 = '0.0.0.0'
end_ip_address_1 = '255.255.255.255'
firewall_rule_2 = 'rule2'
start_ip_address_2 = '123.123.123.123'
end_ip_address_2 = '123.123.123.124'
# allow_all_azure_ips_rule = 'AllowAllAzureIPs'
# allow_all_azure_ips_address = '0.0.0.0'
# test sql server firewall-rule create
fw_rule_1 = self.cmd('sql server firewall-rule create --name {} -g {} --server {} '
'--start-ip-address {} --end-ip-address {}'
.format(firewall_rule_1, rg, server,
start_ip_address_1, end_ip_address_1),
checks=[
JMESPathCheck('name', firewall_rule_1),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('startIpAddress', start_ip_address_1),
JMESPathCheck('endIpAddress', end_ip_address_1)]).get_output_in_json()
# test sql server firewall-rule show by group/server/name
self.cmd('sql server firewall-rule show --name {} -g {} --server {}'
.format(firewall_rule_1, rg, server),
checks=[
JMESPathCheck('name', firewall_rule_1),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('startIpAddress', start_ip_address_1),
JMESPathCheck('endIpAddress', end_ip_address_1)])
# test sql server firewall-rule show by id
self.cmd('sql server firewall-rule show --id {}'
.format(fw_rule_1['id']),
checks=[
JMESPathCheck('name', firewall_rule_1),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('startIpAddress', start_ip_address_1),
JMESPathCheck('endIpAddress', end_ip_address_1)])
# test sql server firewall-rule update by group/server/name
self.cmd('sql server firewall-rule update --name {} -g {} --server {} '
'--start-ip-address {} --end-ip-address {}'
.format(firewall_rule_1, rg, server,
start_ip_address_2, end_ip_address_2),
checks=[
JMESPathCheck('name', firewall_rule_1),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('startIpAddress', start_ip_address_2),
JMESPathCheck('endIpAddress', end_ip_address_2)])
# test sql server firewall-rule update by id
self.cmd('sql server firewall-rule update --id {} '
'--start-ip-address {}'
.format(fw_rule_1['id'], start_ip_address_1),
checks=[
JMESPathCheck('name', firewall_rule_1),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('startIpAddress', start_ip_address_1),
JMESPathCheck('endIpAddress', end_ip_address_2)])
self.cmd('sql server firewall-rule update --name {} -g {} --server {} '
'--end-ip-address {}'
.format(firewall_rule_1, rg, server,
end_ip_address_1),
checks=[
JMESPathCheck('name', firewall_rule_1),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('startIpAddress', start_ip_address_1),
JMESPathCheck('endIpAddress', end_ip_address_1)])
# test sql server firewall-rule create another rule
self.cmd('sql server firewall-rule create --name {} -g {} --server {} '
'--start-ip-address {} --end-ip-address {}'
.format(firewall_rule_2, rg, server,
start_ip_address_2, end_ip_address_2),
checks=[
JMESPathCheck('name', firewall_rule_2),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('startIpAddress', start_ip_address_2),
JMESPathCheck('endIpAddress', end_ip_address_2)])
# test sql server firewall-rule list
self.cmd('sql server firewall-rule list -g {} --server {}'
.format(rg, server), checks=[JMESPathCheck('length(@)', 2)])
# # test sql server firewall-rule create azure ip rule
# self.cmd('sql server firewall-rule allow-all-azure-ips -g {} --server {} '
# .format(rg, server), checks=[
# JMESPathCheck('name', allow_all_azure_ips_rule),
# JMESPathCheck('resourceGroup', rg),
# JMESPathCheck('startIpAddress', allow_all_azure_ips_address),
# JMESPathCheck('endIpAddress', allow_all_azure_ips_address)])
# # test sql server firewall-rule list
# self.cmd('sql server firewall-rule list -g {} --server {}'
# .format(rg, server), checks=[JMESPathCheck('length(@)', 3)])
# test sql server firewall-rule delete
self.cmd('sql server firewall-rule delete --id {}'
.format(fw_rule_1['id']), checks=NoneCheck())
self.cmd('sql server firewall-rule list -g {} --server {}'
.format(rg, server), checks=[JMESPathCheck('length(@)', 1)])
self.cmd('sql server firewall-rule delete --name {} -g {} --server {}'
.format(firewall_rule_2, rg, server), checks=NoneCheck())
self.cmd('sql server firewall-rule list -g {} --server {}'
.format(rg, server), checks=[NoneCheck()])
class SqlServerDbMgmtScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location='eastus2')
@SqlServerPreparer(location='eastus2')
def test_sql_db_mgmt(self, resource_group, resource_group_location, server):
database_name = "cliautomationdb01"
database_name_2 = "cliautomationdb02"
database_name_3 = "cliautomationdb03"
update_service_objective = 'S1'
update_storage = '10GB'
update_storage_bytes = str(10 * 1024 * 1024 * 1024)
rg = resource_group
loc_display = 'East US 2'
# test sql db commands
db1 = self.cmd('sql db create -g {} --server {} --name {}'
.format(rg, server, database_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('location', loc_display),
JMESPathCheck('elasticPoolName', None),
JMESPathCheck('status', 'Online'),
JMESPathCheck('zoneRedundant', False)]).get_output_in_json()
self.cmd('sql db list -g {} --server {}'
.format(rg, server),
checks=[
JMESPathCheck('length(@)', 2),
JMESPathCheck('sort([].name)', sorted([database_name, 'master'])),
JMESPathCheck('[0].resourceGroup', rg),
JMESPathCheck('[1].resourceGroup', rg)])
self.cmd('sql db list-usages -g {} --server {} --name {}'
.format(rg, server, database_name),
checks=[JMESPathCheck('[0].resourceName', database_name)])
# Show by group/server/name
self.cmd('sql db show -g {} --server {} --name {}'
.format(rg, server, database_name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', rg)])
# Show by id
self.cmd('sql db show --id {}'
.format(db1['id']),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', rg)])
# Update by group/server/name
self.cmd('sql db update -g {} -s {} -n {} --service-objective {} --max-size {}'
' --set tags.key1=value1'
.format(rg, server, database_name,
update_service_objective, update_storage),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('requestedServiceObjectiveName', update_service_objective),
JMESPathCheck('maxSizeBytes', update_storage_bytes),
JMESPathCheck('tags.key1', 'value1')])
# Update by id
self.cmd('sql db update --id {} --set tags.key2=value2'
.format(db1['id']),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('requestedServiceObjectiveName', update_service_objective),
JMESPathCheck('maxSizeBytes', update_storage_bytes),
JMESPathCheck('tags.key2', 'value2')])
# Rename by group/server/name
db2 = self.cmd('sql db rename -g {} -s {} -n {} --new-name {}'
.format(rg, server, database_name, database_name_2),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name_2)]).get_output_in_json()
# Rename by id
db3 = self.cmd('sql db rename --id {} --new-name {}'
.format(db2['id'], database_name_3),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name_3)]).get_output_in_json()
# Delete by group/server/name
self.cmd('sql db delete -g {} --server {} --name {} --yes'
.format(rg, server, database_name_3),
checks=[NoneCheck()])
# Delete by id
self.cmd('sql db delete --id {} --yes'
.format(db3['id']),
checks=[NoneCheck()])
class SqlServerDbOperationMgmtScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location='southeastasia')
@SqlServerPreparer(location='southeastasia')
def test_sql_db_operation_mgmt(self, resource_group, resource_group_location, server):
database_name = "cliautomationdb01"
update_service_objective = 'S1'
# Create db
self.cmd('sql db create -g {} -s {} -n {}'
.format(resource_group, server, database_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('status', 'Online')])
# Update DB with --no-wait
self.cmd('sql db update -g {} -s {} -n {} --service-objective {} --no-wait'
.format(resource_group, server, database_name, update_service_objective))
# List operations
ops = list(
self.cmd('sql db op list -g {} -s {} -d {}'
.format(resource_group, server, database_name, update_service_objective),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].resourceGroup', resource_group),
JMESPathCheck('[0].databaseName', database_name)
])
.get_output_in_json())
# Cancel operation
self.cmd('sql db op cancel -g {} -s {} -d {} -n {}'
.format(resource_group, server, database_name, ops[0]['name']))
class SqlServerConnectionPolicyScenarioTest(ScenarioTest):
@ResourceGroupPreparer()
@SqlServerPreparer()
def test_sql_server_connection_policy(self, resource_group, resource_group_location, server):
# Show
self.cmd('sql server conn-policy show -g {} -s {}'
.format(resource_group, server),
checks=[JMESPathCheck('connectionType', 'Default')])
# Update
for type in ('Proxy', 'Default', 'Redirect'):
self.cmd('sql server conn-policy update -g {} -s {} -t {}'
.format(resource_group, server, type),
checks=[JMESPathCheck('connectionType', type)])
class AzureActiveDirectoryAdministratorScenarioTest(LiveScenarioTest):
# convert to ScenarioTest and re-record when ISSUE #6011 is fixed
@ResourceGroupPreparer()
@SqlServerPreparer()
def test_aad_admin(self, resource_group, server):
rg = resource_group
sn = server
oid = '5e90ef3b-9b42-4777-819b-25c36961ea4d'
oid2 = 'e4d43337-d52c-4a0c-b581-09055e0359a0'
user = 'DSEngAll'
user2 = 'TestUser'
self.cmd('sql server ad-admin create -s {} -g {} -i {} -u {}'
.format(sn, rg, oid, user),
checks=[JMESPathCheck('login', user),
JMESPathCheck('sid', oid)])
self.cmd('sql server ad-admin list -s {} -g {}'
.format(sn, rg),
checks=[JMESPathCheck('[0].login', user)])
self.cmd('sql server ad-admin update -s {} -g {} -u {} -i {}'
.format(sn, rg, user2, oid2),
checks=[JMESPathCheck('login', user2),
JMESPathCheck('sid', oid2)])
self.cmd('sql server ad-admin delete -s {} -g {}'
.format(sn, rg))
self.cmd('sql server ad-admin list -s {} -g {}'
.format(sn, rg),
checks=[JMESPathCheck('login', None)])
class SqlServerDbCopyScenarioTest(ScenarioTest):
@ResourceGroupPreparer(parameter_name='resource_group_1')
@ResourceGroupPreparer(parameter_name='resource_group_2')
@SqlServerPreparer(parameter_name='server1', resource_group_parameter_name='resource_group_1')
@SqlServerPreparer(parameter_name='server2', resource_group_parameter_name='resource_group_2')
def test_sql_db_copy(self, resource_group_1, resource_group_2,
resource_group_location,
server1, server2):
database_name = "cliautomationdb01"
database_copy_name = "cliautomationdb02"
service_objective = 'S1'
rg = resource_group_1
loc_display = 'West US'
# create database
self.cmd('sql db create -g {} --server {} --name {}'
.format(rg, server1, database_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('location', loc_display),
JMESPathCheck('elasticPoolName', None),
JMESPathCheck('status', 'Online')])
# copy database to same server (min parameters)
self.cmd('sql db copy -g {} --server {} --name {} '
'--dest-name {}'
.format(rg, server1, database_name, database_copy_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_copy_name)
])
# copy database to other server (max parameters)
self.cmd('sql db copy -g {} --server {} --name {} '
'--dest-name {} --dest-resource-group {} --dest-server {} '
'--service-objective {}'
.format(rg, server1, database_name, database_copy_name,
resource_group_2, server2, service_objective),
checks=[
JMESPathCheck('resourceGroup', resource_group_2),
JMESPathCheck('name', database_copy_name),
JMESPathCheck('requestedServiceObjectiveName', service_objective)
])
def _get_earliest_restore_date(db):
return datetime.strptime(db['earliestRestoreDate'], "%Y-%m-%dT%H:%M:%S.%f+00:00")
def _get_deleted_date(deleted_db):
return datetime.strptime(deleted_db['deletionDate'], "%Y-%m-%dT%H:%M:%S.%f+00:00")
def _create_db_wait_for_first_backup(test, rg, server, database_name):
# create db
db = test.cmd('sql db create -g {} --server {} --name {}'
.format(rg, server, database_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('status', 'Online')]).get_output_in_json()
# Wait until earliestRestoreDate is in the past. When run live, this will take at least
# 10 minutes. Unforunately there's no way to speed this up.
earliest_restore_date = _get_earliest_restore_date(db)
while datetime.utcnow() <= earliest_restore_date:
sleep(10) # seconds
return db
class SqlServerDbRestoreScenarioTest(ScenarioTest):
@ResourceGroupPreparer()
@SqlServerPreparer()
def test_sql_db_restore(self, resource_group, resource_group_location, server):
rg = resource_group
database_name = 'cliautomationdb01'
# Standalone db
restore_service_objective = 'S1'
restore_edition = 'Standard'
restore_standalone_database_name = 'cliautomationdb01restore1'
restore_pool_database_name = 'cliautomationdb01restore2'
elastic_pool = 'cliautomationpool1'
# create elastic pool
self.cmd('sql elastic-pool create -g {} -s {} -n {}'
.format(rg, server, elastic_pool))
# Create database and wait for first backup to exist
_create_db_wait_for_first_backup(self, rg, server, database_name)
# Restore to standalone db
self.cmd('sql db restore -g {} -s {} -n {} -t {} --dest-name {}'
' --service-objective {} --edition {}'
.format(rg, server, database_name, datetime.utcnow().isoformat(),
restore_standalone_database_name, restore_service_objective,
restore_edition),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', restore_standalone_database_name),
JMESPathCheck('requestedServiceObjectiveName',
restore_service_objective),
JMESPathCheck('status', 'Online')])
# Restore to db into pool
self.cmd('sql db restore -g {} -s {} -n {} -t {} --dest-name {}'
' --elastic-pool {}'
.format(rg, server, database_name, datetime.utcnow().isoformat(),
restore_pool_database_name, elastic_pool),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', restore_pool_database_name),
JMESPathCheck('elasticPoolName', elastic_pool),
JMESPathCheck('status', 'Online')])
class SqlServerDbRestoreDeletedScenarioTest(ScenarioTest):
@ResourceGroupPreparer()
@SqlServerPreparer()
def test_sql_db_restore_deleted(self, resource_group, resource_group_location, server):
rg = resource_group
database_name = 'cliautomationdb01'
# Standalone db
restore_service_objective = 'S1'
restore_edition = 'Standard'
restore_database_name1 = 'cliautomationdb01restore1'
restore_database_name2 = 'cliautomationdb01restore2'
# Create database and wait for first backup to exist
_create_db_wait_for_first_backup(self, rg, server, database_name)
# Delete database
self.cmd('sql db delete -g {} -s {} -n {} --yes'.format(rg, server, database_name))
# Wait for deleted database to become visible. When run live, this will take around
# 5-10 minutes. Unforunately there's no way to speed this up. Use timeout to ensure
# test doesn't loop forever if there's a bug.
start_time = datetime.now()
timeout = timedelta(0, 15 * 60) # 15 minutes timeout
while True:
deleted_dbs = list(self.cmd('sql db list-deleted -g {} -s {}'.format(rg, server)).get_output_in_json())
if deleted_dbs:
# Deleted db found, stop polling
break
# Deleted db not found, sleep (if running live) and then poll again.
if self.is_live:
self.assertTrue(datetime.now() < start_time + timeout, 'Deleted db not found before timeout expired.')
sleep(10) # seconds
deleted_db = deleted_dbs[0]
# Restore deleted to latest point in time
self.cmd('sql db restore -g {} -s {} -n {} --deleted-time {} --dest-name {}'
' --service-objective {} --edition {}'
.format(rg, server, database_name, _get_deleted_date(deleted_db).isoformat(),
restore_database_name1, restore_service_objective,
restore_edition),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', restore_database_name1),
JMESPathCheck('requestedServiceObjectiveName',
restore_service_objective),
JMESPathCheck('status', 'Online')])
# Restore deleted to earlier point in time
self.cmd('sql db restore -g {} -s {} -n {} -t {} --deleted-time {} --dest-name {}'
.format(rg, server, database_name, _get_earliest_restore_date(deleted_db).isoformat(),
_get_deleted_date(deleted_db).isoformat(), restore_database_name2),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', restore_database_name2),
JMESPathCheck('status', 'Online')])
class SqlServerDbSecurityScenarioTest(ScenarioTest):
def _get_storage_endpoint(self, storage_account, resource_group):
return self.cmd('storage account show -g {} -n {}'
' --query primaryEndpoints.blob'
.format(resource_group, storage_account)).get_output_in_json()
def _get_storage_key(self, storage_account, resource_group):
return self.cmd('storage account keys list -g {} -n {} --query [0].value'
.format(resource_group, storage_account)).get_output_in_json()
@ResourceGroupPreparer()
@ResourceGroupPreparer(parameter_name='resource_group_2')
@SqlServerPreparer()
@StorageAccountPreparer()
@StorageAccountPreparer(parameter_name='storage_account_2',
resource_group_parameter_name='resource_group_2')
def test_sql_db_security_mgmt(self, resource_group, resource_group_2,
resource_group_location, server,
storage_account, storage_account_2):
database_name = "cliautomationdb01"
# get storage account endpoint and key
storage_endpoint = self._get_storage_endpoint(storage_account, resource_group)
key = self._get_storage_key(storage_account, resource_group)
# create db
self.cmd('sql db create -g {} -s {} -n {}'
.format(resource_group, server, database_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('status', 'Online')])
# get audit policy
self.cmd('sql db audit-policy show -g {} -s {} -n {}'
.format(resource_group, server, database_name),
checks=[JMESPathCheck('resourceGroup', resource_group)])
# update audit policy - enable
state_enabled = 'Enabled'
retention_days = 30
audit_actions_input = 'DATABASE_LOGOUT_GROUP DATABASE_ROLE_MEMBER_CHANGE_GROUP'
audit_actions_expected = ['DATABASE_LOGOUT_GROUP',
'DATABASE_ROLE_MEMBER_CHANGE_GROUP']
self.cmd('sql db audit-policy update -g {} -s {} -n {}'
' --state {} --storage-key {} --storage-endpoint={}'
' --retention-days={} --actions {}'
.format(resource_group, server, database_name, state_enabled, key,
storage_endpoint, retention_days, audit_actions_input),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('storageAccountAccessKey', ''), # service doesn't return it
JMESPathCheck('storageEndpoint', storage_endpoint),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('auditActionsAndGroups', audit_actions_expected)])
# update audit policy - specify storage account and resource group. use secondary key
storage_endpoint_2 = self._get_storage_endpoint(storage_account_2, resource_group_2)
self.cmd('sql db audit-policy update -g {} -s {} -n {} --storage-account {}'
.format(resource_group, server, database_name, storage_account_2,
resource_group_2),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('storageAccountAccessKey', ''), # service doesn't return it
JMESPathCheck('storageEndpoint', storage_endpoint_2),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('auditActionsAndGroups', audit_actions_expected)])
# update audit policy - disable
state_disabled = 'Disabled'
self.cmd('sql db audit-policy update -g {} -s {} -n {} --state {}'
.format(resource_group, server, database_name, state_disabled),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_disabled),
JMESPathCheck('storageAccountAccessKey', ''), # service doesn't return it
JMESPathCheck('storageEndpoint', storage_endpoint_2),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('auditActionsAndGroups', audit_actions_expected)])
# get threat detection policy
self.cmd('sql db threat-policy show -g {} -s {} -n {}'
.format(resource_group, server, database_name),
checks=[JMESPathCheck('resourceGroup', resource_group)])
# update threat detection policy - enable
disabled_alerts_input = 'Sql_Injection_Vulnerability Access_Anomaly'
disabled_alerts_expected = 'Sql_Injection_Vulnerability;Access_Anomaly'
email_addresses_input = 'test1@example.com test2@example.com'
email_addresses_expected = 'test1@example.com;test2@example.com'
email_account_admins = 'Enabled'
self.cmd('sql db threat-policy update -g {} -s {} -n {}'
' --state {} --storage-key {} --storage-endpoint {}'
' --retention-days {} --email-addresses {} --disabled-alerts {}'
' --email-account-admins {}'
.format(resource_group, server, database_name, state_enabled, key,
storage_endpoint, retention_days, email_addresses_input,
disabled_alerts_input, email_account_admins),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('storageAccountAccessKey', key),
JMESPathCheck('storageEndpoint', storage_endpoint),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('emailAddresses', email_addresses_expected),
JMESPathCheck('disabledAlerts', disabled_alerts_expected),
JMESPathCheck('emailAccountAdmins', email_account_admins)])
# update threat policy - specify storage account and resource group. use secondary key
key_2 = self._get_storage_key(storage_account_2, resource_group_2)
self.cmd('sql db threat-policy update -g {} -s {} -n {} --storage-account {}'
.format(resource_group, server, database_name, storage_account_2,
resource_group_2),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('storageAccountAccessKey', key_2),
JMESPathCheck('storageEndpoint', storage_endpoint_2),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('emailAddresses', email_addresses_expected),
JMESPathCheck('disabledAlerts', disabled_alerts_expected),
JMESPathCheck('emailAccountAdmins', email_account_admins)])
# update threat policy - disable
self.cmd('sql db audit-policy update -g {} -s {} -n {} --state {}'
.format(resource_group, server, database_name, state_disabled),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_disabled),
JMESPathCheck('storageAccountAccessKey', ''), # service doesn't return it
JMESPathCheck('storageEndpoint', storage_endpoint_2),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('auditActionsAndGroups', audit_actions_expected)])
class SqlServerDwMgmtScenarioTest(ScenarioTest):
# pylint: disable=too-many-instance-attributes
@ResourceGroupPreparer()
@SqlServerPreparer()
def test_sql_dw_mgmt(self, resource_group, resource_group_location, server):
database_name = "cliautomationdb01"
update_service_objective = 'DW200'
update_storage = '20TB'
update_storage_bytes = str(20 * 1024 * 1024 * 1024 * 1024)
rg = resource_group
loc_display = 'West US'
# test sql db commands
dw = self.cmd('sql dw create -g {} --server {} --name {}'
.format(rg, server, database_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('location', loc_display),
JMESPathCheck('edition', 'DataWarehouse'),
JMESPathCheck('status', 'Online')]).get_output_in_json()
# Sanity check that the default max size is not equal to the size that we will update to
# later. That way we know that update is actually updating the size.
self.assertNotEqual(dw['maxSizeBytes'], update_storage_bytes,
'Initial max size in bytes is equal to the value we want to update to later,'
' so we will not be able to verify that update max size is actually updating.')
# DataWarehouse is a little quirky and is considered to be both a database and its
# separate own type of thing. (Why? Because it has the same REST endpoint as regular
# database, so it must be a database. However it has only a subset of supported operations,
# so to clarify which operations are supported by dw we group them under `sql dw`.) So the
# dw shows up under both `db list` and `dw list`.
self.cmd('sql db list -g {} --server {}'
.format(rg, server),
checks=[
JMESPathCheck('length(@)', 2), # includes dw and master
JMESPathCheck('sort([].name)', sorted([database_name, 'master'])),
JMESPathCheck('[0].resourceGroup', rg),
JMESPathCheck('[1].resourceGroup', rg)])
self.cmd('sql dw list -g {} --server {}'
.format(rg, server),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', database_name),
JMESPathCheck('[0].resourceGroup', rg)])
self.cmd('sql db show -g {} --server {} --name {}'
.format(rg, server, database_name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', rg)])
# pause/resume
self.cmd('sql dw pause -g {} --server {} --name {}'
.format(rg, server, database_name),
checks=[NoneCheck()])
self.cmd('sql dw show --id {}'
.format(dw['id']),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('status', 'Paused')])
self.cmd('sql dw resume -g {} --server {} --name {}'
.format(rg, server, database_name),
checks=[NoneCheck()])
self.cmd('sql dw show -g {} --server {} --name {}'
.format(rg, server, database_name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('status', 'Online')])
# Update DW storage
self.cmd('sql dw update -g {} -s {} -n {} --max-size {}'
' --set tags.key1=value1'
.format(rg, server, database_name, update_storage),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('maxSizeBytes', update_storage_bytes),
JMESPathCheck('tags.key1', 'value1')])
# Update DW service objective
self.cmd('sql dw update --id {} --service-objective {}'
.format(dw['id'], update_service_objective),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('requestedServiceObjectiveName', update_service_objective),
JMESPathCheck('maxSizeBytes', update_storage_bytes),
JMESPathCheck('tags.key1', 'value1')])
# Delete DW
self.cmd('sql dw delete -g {} --server {} --name {} --yes'
.format(rg, server, database_name),
checks=[NoneCheck()])
self.cmd('sql dw delete --id {} --yes'
.format(dw['id']),
checks=[NoneCheck()])
class SqlServerDnsAliasMgmtScenarioTest(ScenarioTest):
# create 2 servers in the same resource group, and 1 server in a different resource group
@ResourceGroupPreparer(parameter_name="resource_group_1",
parameter_name_for_location="resource_group_location_1")
@ResourceGroupPreparer(parameter_name="resource_group_2",
parameter_name_for_location="resource_group_location_2")
@SqlServerPreparer(parameter_name="server_name_1",
resource_group_parameter_name="resource_group_1")
@SqlServerPreparer(parameter_name="server_name_2",
resource_group_parameter_name="resource_group_1")
@SqlServerPreparer(parameter_name="server_name_3",
resource_group_parameter_name="resource_group_2")
def test_sql_server_dns_alias_mgmt(self,
resource_group_1, resource_group_location_1,
resource_group_2, resource_group_location_2,
server_name_1, server_name_2, server_name_3):
# helper class so that it's clear which servers are in which groups
class ServerInfo(object): # pylint: disable=too-few-public-methods
def __init__(self, name, group, location):
self.name = name
self.group = group
self.location = location
s1 = ServerInfo(server_name_1, resource_group_1, resource_group_location_1)
s2 = ServerInfo(server_name_2, resource_group_1, resource_group_location_1)
s3 = ServerInfo(server_name_3, resource_group_2, resource_group_location_2)
alias_name = 'alias1'
# verify setup
for s in (s1, s2, s3):
self.cmd('sql server show -g {} -n {}'
.format(s.group, s.name),
checks=[
JMESPathCheck('name', s.name),
JMESPathCheck('resourceGroup', s.group)])
# Create server dns alias
self.cmd('sql server dns-alias create -n {} -s {} -g {}'
.format(alias_name, s1.name, s1.group),
checks=[
JMESPathCheck('name', alias_name),
JMESPathCheck('resourceGroup', s1.group)
])
# Check that alias is created on a right server
self.cmd('sql server dns-alias list -s {} -g {}'
.format(s1.name, s1.group),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', alias_name)
])
# Repoint alias to the server within the same resource group
self.cmd('sql server dns-alias set -n {} --original-server {} -s {} -g {}'
.format(alias_name, s1.name, s2.name, s2.group),
checks=[NoneCheck()])
# List the aliases on old server to check if alias is not pointing there
self.cmd('sql server dns-alias list -s {} -g {}'
.format(s1.name, s1.group),
checks=[
JMESPathCheck('length(@)', 0)
])
# Check if alias is pointing to new server
self.cmd('sql server dns-alias list -s {} -g {}'
.format(s2.name, s2.group),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', alias_name)
])
# Repoint alias to the same server (to check that operation is idempotent)
self.cmd('sql server dns-alias set -n {} --original-server {} -s {} -g {}'
.format(alias_name, s1.name, s2.name, s2.group),
checks=[NoneCheck()])
# Check if alias is pointing to the right server
self.cmd('sql server dns-alias list -s {} -g {}'
.format(s2.name, s2.group),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', alias_name)
])
# Repoint alias to the server within the same resource group
self.cmd('sql server dns-alias set -n {} --original-server {} --original-resource-group {} -s {} -g {}'
.format(alias_name, s2.name, s2.group, s3.name, s3.group),
checks=[NoneCheck()])
# List the aliases on old server to check if alias is not pointing there
self.cmd('sql server dns-alias list -s {} -g {}'
.format(s2.name, s2.group),
checks=[
JMESPathCheck('length(@)', 0)
])
# Check if alias is pointing to new server
self.cmd('sql server dns-alias list -s {} -g {}'
.format(s3.name, s3.group),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', alias_name)
])
# Drop alias
self.cmd('sql server dns-alias delete -n {} -s {} -g {}'
.format(alias_name, s3.name, s3.group),
checks=[NoneCheck()])
# Verify that alias got dropped correctly
self.cmd('sql server dns-alias list -s {} -g {}'
.format(s3.name, s3.group),
checks=[
JMESPathCheck('length(@)', 0)
])
class SqlServerDbReplicaMgmtScenarioTest(ScenarioTest):
# create 2 servers in the same resource group, and 1 server in a different resource group
@ResourceGroupPreparer(parameter_name="resource_group_1",
parameter_name_for_location="resource_group_location_1")
@ResourceGroupPreparer(parameter_name="resource_group_2",
parameter_name_for_location="resource_group_location_2")
@SqlServerPreparer(parameter_name="server_name_1",
resource_group_parameter_name="resource_group_1")
@SqlServerPreparer(parameter_name="server_name_2",
resource_group_parameter_name="resource_group_1")
@SqlServerPreparer(parameter_name="server_name_3",
resource_group_parameter_name="resource_group_2")
def test_sql_db_replica_mgmt(self,
resource_group_1, resource_group_location_1,
resource_group_2, resource_group_location_2,
server_name_1, server_name_2, server_name_3):
database_name = "cliautomationdb01"
service_objective = 'S1'
# helper class so that it's clear which servers are in which groups
class ServerInfo(object): # pylint: disable=too-few-public-methods
def __init__(self, name, group, location):
self.name = name
self.group = group
self.location = location
s1 = ServerInfo(server_name_1, resource_group_1, resource_group_location_1)
s2 = ServerInfo(server_name_2, resource_group_1, resource_group_location_1)
s3 = ServerInfo(server_name_3, resource_group_2, resource_group_location_2)
# verify setup
for s in (s1, s2, s3):
self.cmd('sql server show -g {} -n {}'
.format(s.group, s.name),
checks=[
JMESPathCheck('name', s.name),
JMESPathCheck('resourceGroup', s.group)])
# create db in first server
self.cmd('sql db create -g {} -s {} -n {}'
.format(s1.group, s1.name, database_name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', s1.group)])
# create replica in second server with min params
# partner resouce group unspecified because s1.group == s2.group
self.cmd('sql db replica create -g {} -s {} -n {} --partner-server {}'
.format(s1.group, s1.name, database_name,
s2.name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', s2.group)])
# check that the replica was created in the correct server
self.cmd('sql db show -g {} -s {} -n {}'
.format(s2.group, s2.name, database_name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', s2.group)])
# create replica in third server with max params
# --elastic-pool is untested
self.cmd('sql db replica create -g {} -s {} -n {} --partner-server {}'
' --partner-resource-group {} --service-objective {}'
.format(s1.group, s1.name, database_name,
s3.name, s3.group, service_objective),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', s3.group),
JMESPathCheck('requestedServiceObjectiveName', service_objective)])
# check that the replica was created in the correct server
self.cmd('sql db show -g {} -s {} -n {}'
.format(s3.group, s3.name, database_name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', s3.group)])
# list replica links on s1 - it should link to s2 and s3
self.cmd('sql db replica list-links -g {} -s {} -n {}'
.format(s1.group, s1.name, database_name),
checks=[JMESPathCheck('length(@)', 2)])
# list replica links on s3 - it should link only to s1
self.cmd('sql db replica list-links -g {} -s {} -n {}'
.format(s3.group, s3.name, database_name),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].role', 'Secondary'),
JMESPathCheck('[0].partnerRole', 'Primary')])
# Failover to s3.
self.cmd('sql db replica set-primary -g {} -s {} -n {}'
.format(s3.group, s3.name, database_name),
checks=[NoneCheck()])
# list replica links on s3 - it should link to s1 and s2
self.cmd('sql db replica list-links -g {} -s {} -n {}'
.format(s3.group, s3.name, database_name),
checks=[JMESPathCheck('length(@)', 2)])
# Stop replication from s3 to s2 twice. Second time should be no-op.
for _ in range(2):
# Delete link
self.cmd('sql db replica delete-link -g {} -s {} -n {} --partner-resource-group {}'
' --partner-server {} --yes'
.format(s3.group, s3.name, database_name, s2.group, s2.name),
checks=[NoneCheck()])
# Verify link was deleted. s3 should still be the primary.
self.cmd('sql db replica list-links -g {} -s {} -n {}'
.format(s3.group, s3.name, database_name),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].role', 'Primary'),
JMESPathCheck('[0].partnerRole', 'Secondary')])
# Failover to s3 again (should be no-op, it's already primary)
self.cmd('sql db replica set-primary -g {} -s {} -n {} --allow-data-loss'
.format(s3.group, s3.name, database_name),
checks=[NoneCheck()])
# s3 should still be the primary.
self.cmd('sql db replica list-links -g {} -s {} -n {}'
.format(s3.group, s3.name, database_name),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].role', 'Primary'),
JMESPathCheck('[0].partnerRole', 'Secondary')])
# Force failover back to s1
self.cmd('sql db replica set-primary -g {} -s {} -n {} --allow-data-loss'
.format(s1.group, s1.name, database_name),
checks=[NoneCheck()])
class SqlElasticPoolsMgmtScenarioTest(ScenarioTest):
def __init__(self, method_name):
super(SqlElasticPoolsMgmtScenarioTest, self).__init__(method_name)
self.pool_name = "cliautomationpool01"
def verify_activities(self, activities, resource_group, server):
if isinstance(activities, list.__class__):
raise AssertionError("Actual value '{}' expected to be list class."
.format(activities))
for activity in activities:
if isinstance(activity, dict.__class__):
raise AssertionError("Actual value '{}' expected to be dict class"
.format(activities))
if activity['resourceGroup'] != resource_group:
raise AssertionError("Actual value '{}' != Expected value {}"
.format(activity['resourceGroup'], resource_group))
elif activity['serverName'] != server:
raise AssertionError("Actual value '{}' != Expected value {}"
.format(activity['serverName'], server))
elif activity['currentElasticPoolName'] != self.pool_name:
raise AssertionError("Actual value '{}' != Expected value {}"
.format(activity['currentElasticPoolName'], self.pool_name))
return True
@ResourceGroupPreparer(location='eastus2')
@SqlServerPreparer(location='eastus2')
def test_sql_elastic_pools_mgmt(self, resource_group, resource_group_location, server):
database_name = "cliautomationdb02"
pool_name2 = "cliautomationpool02"
edition = 'Standard'
dtu = 1200
db_dtu_min = 10
db_dtu_max = 50
storage = '1200GB'
storage_mb = 1228800
updated_dtu = 50
updated_db_dtu_min = 10
updated_db_dtu_max = 50
updated_storage = '50GB'
updated_storage_mb = 51200
db_service_objective = 'S1'
rg = resource_group
loc_display = 'East US 2'
# test sql elastic-pool commands
elastic_pool_1 = self.cmd('sql elastic-pool create -g {} --server {} --name {} '
'--dtu {} --edition {} --db-dtu-min {} --db-dtu-max {} '
'--storage {}'
.format(rg, server, self.pool_name, dtu,
edition, db_dtu_min, db_dtu_max, storage),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', self.pool_name),
JMESPathCheck('location', loc_display),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('dtu', dtu),
JMESPathCheck('databaseDtuMin', db_dtu_min),
JMESPathCheck('databaseDtuMax', db_dtu_max),
JMESPathCheck('edition', edition),
JMESPathCheck('storageMb', storage_mb)]).get_output_in_json()
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(rg, server, self.pool_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', self.pool_name),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('databaseDtuMin', db_dtu_min),
JMESPathCheck('databaseDtuMax', db_dtu_max),
JMESPathCheck('edition', edition),
JMESPathCheck('storageMb', storage_mb),
JMESPathCheck('zoneRedundant', False)])
self.cmd('sql elastic-pool show --id {}'
.format(elastic_pool_1['id']),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', self.pool_name),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('databaseDtuMin', db_dtu_min),
JMESPathCheck('databaseDtuMax', db_dtu_max),
JMESPathCheck('edition', edition),
JMESPathCheck('storageMb', storage_mb)])
self.cmd('sql elastic-pool list -g {} --server {}'
.format(rg, server),
checks=[
JMESPathCheck('[0].resourceGroup', rg),
JMESPathCheck('[0].name', self.pool_name),
JMESPathCheck('[0].state', 'Ready'),
JMESPathCheck('[0].databaseDtuMin', db_dtu_min),
JMESPathCheck('[0].databaseDtuMax', db_dtu_max),
JMESPathCheck('[0].edition', edition),
JMESPathCheck('[0].storageMb', storage_mb)])
self.cmd('sql elastic-pool update -g {} --server {} --name {} '
'--dtu {} --storage {} --set tags.key1=value1'
.format(rg, server, self.pool_name,
updated_dtu, updated_storage),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', self.pool_name),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('dtu', updated_dtu),
JMESPathCheck('edition', edition),
JMESPathCheck('databaseDtuMin', db_dtu_min),
JMESPathCheck('databaseDtuMax', db_dtu_max),
JMESPathCheck('storageMb', updated_storage_mb),
JMESPathCheck('tags.key1', 'value1')])
self.cmd('sql elastic-pool update --id {} '
'--dtu {} --db-dtu-min {} --db-dtu-max {} --storage {}'
.format(elastic_pool_1['id'], dtu,
updated_db_dtu_min, updated_db_dtu_max,
storage),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', self.pool_name),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('dtu', dtu),
JMESPathCheck('databaseDtuMin', updated_db_dtu_min),
JMESPathCheck('databaseDtuMax', updated_db_dtu_max),
JMESPathCheck('storageMb', storage_mb),
JMESPathCheck('tags.key1', 'value1')])
self.cmd('sql elastic-pool update -g {} --server {} --name {} '
'--remove tags.key1'
.format(rg, server, self.pool_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', self.pool_name),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('tags', {})])
# create a second pool with minimal params
self.cmd('sql elastic-pool create -g {} --server {} --name {} '
.format(rg, server, pool_name2),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', pool_name2),
JMESPathCheck('location', loc_display),
JMESPathCheck('state', 'Ready')])
self.cmd('sql elastic-pool list -g {} -s {}'.format(rg, server),
checks=[JMESPathCheck('length(@)', 2)])
# Create a database directly in an Azure sql elastic pool
self.cmd('sql db create -g {} --server {} --name {} '
'--elastic-pool {}'
.format(rg, server, database_name, self.pool_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('elasticPoolName', self.pool_name),
JMESPathCheck('requestedServiceObjectiveName', 'ElasticPool'),
JMESPathCheck('status', 'Online')])
# Move database to second pool. Specify service objective just for fun
self.cmd('sql db update -g {} -s {} -n {} --elastic-pool {}'
' --service-objective ElasticPool'
.format(rg, server, database_name, pool_name2),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('elasticPoolName', pool_name2),
JMESPathCheck('requestedServiceObjectiveName', 'ElasticPool'),
JMESPathCheck('status', 'Online')])
# Remove database from pool
self.cmd('sql db update -g {} -s {} -n {} --service-objective {}'
.format(rg, server, database_name, db_service_objective),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('elasticPoolName', None),
JMESPathCheck('requestedServiceObjectiveName', db_service_objective),
JMESPathCheck('status', 'Online')])
# Move database back into pool
self.cmd('sql db update -g {} -s {} -n {} --elastic-pool {}'
' --service-objective ElasticPool'
.format(rg, server, database_name, self.pool_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('elasticPoolName', self.pool_name),
JMESPathCheck('requestedServiceObjectiveName', 'ElasticPool'),
JMESPathCheck('status', 'Online')])
# List databases in a pool
self.cmd('sql elastic-pool list-dbs -g {} -s {} -n {}'
.format(rg, server, self.pool_name),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].resourceGroup', rg),
JMESPathCheck('[0].name', database_name),
JMESPathCheck('[0].elasticPoolName', self.pool_name)])
# List databases in a pool - alternative command
self.cmd('sql db list -g {} -s {} --elastic-pool {}'
.format(rg, server, self.pool_name),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].resourceGroup', rg),
JMESPathCheck('[0].name', database_name),
JMESPathCheck('[0].elasticPoolName', self.pool_name)])
# self.cmd('sql elastic-pool db show-activity -g {} --server {} --elastic-pool {}'
# .format(rg, server, pool_name),
# checks=[
# JMESPathCheck('length(@)', 1),
# JMESPathCheck('[0].resourceGroup', rg),
# JMESPathCheck('[0].serverName', server),
# JMESPathCheck('[0].currentElasticPoolName', pool_name)])
# activities = self.cmd('sql elastic-pools db show-activity -g {} '
# '--server-name {} --elastic-pool-name {}'
# .format(rg, server, pool_name),
# checks=[JMESPathCheck('type(@)', 'array')])
# self.verify_activities(activities, resource_group)
# delete sql server database
self.cmd('sql db delete -g {} --server {} --name {} --yes'
.format(rg, server, database_name),
checks=[NoneCheck()])
# delete sql elastic pool
self.cmd('sql elastic-pool delete -g {} --server {} --name {}'
.format(rg, server, self.pool_name),
checks=[NoneCheck()])
# delete sql elastic pool by id
self.cmd('sql elastic-pool delete --id {}'
.format(elastic_pool_1['id']),
checks=[NoneCheck()])
class SqlElasticPoolOperationMgmtScenarioTest(ScenarioTest):
def __init__(self, method_name):
super(SqlElasticPoolOperationMgmtScenarioTest, self).__init__(method_name)
self.pool_name = "operationtestep1"
@ResourceGroupPreparer(location='southeastasia')
@SqlServerPreparer(location='southeastasia')
def test_sql_elastic_pool_operation_mgmt(self, resource_group, resource_group_location, server):
edition = 'Premium'
dtu = 125
db_dtu_min = 0
db_dtu_max = 50
storage = '50GB'
storage_mb = 51200
update_dtu = 250
update_db_dtu_min = 50
update_db_dtu_max = 250
# Create elastic pool
self.cmd('sql elastic-pool create -g {} --server {} --name {} '
'--dtu {} --edition {} --db-dtu-min {} --db-dtu-max {} --storage {}'
.format(resource_group, server, self.pool_name, dtu, edition, db_dtu_min, db_dtu_max, storage),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', self.pool_name),
JMESPathCheck('edition', edition),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('dtu', dtu),
JMESPathCheck('databaseDtuMin', db_dtu_min),
JMESPathCheck('databaseDtuMax', db_dtu_max),
JMESPathCheck('storageMb', storage_mb)])
# Update elastic pool
self.cmd('sql elastic-pool update -g {} --server {} --name {} '
'--dtu {} --db-dtu-min {} --db-dtu-max {}'
.format(resource_group, server, self.pool_name, update_dtu, update_db_dtu_min, update_db_dtu_max))
# List operations on the elastic pool
ops = list(self.cmd('sql elastic-pool op list -g {} --server {} --elastic-pool {}'
.format(resource_group, server, self.pool_name)).get_output_in_json())
# Cancel operation
try:
self.cmd('sql elastic-pool op cancel -g {} --server {} --elastic-pool {} --name {}'
.format(resource_group, server, self.pool_name, ops[0]['name']))
except Exception as e:
expectedmessage = "Cannot cancel management operation {} in current state.".format(ops[0]['name'])
if expectedmessage in str(e):
pass
class SqlServerCapabilityScenarioTest(ScenarioTest):
@AllowLargeResponse()
def test_sql_capabilities(self):
location = 'westus'
# New capabilities are added quite frequently and the state of each capability depends
# on your subscription. So it's not a good idea to make strict checks against exactly
# which capabilities are returned. The idea is to just check the overall structure.
db_max_size_length_jmespath = 'length([].supportedServiceLevelObjectives[].supportedMaxSizes[])'
# Get all db capabilities
self.cmd('sql db list-editions -l {}'.format(location),
checks=[
# At least standard and premium edition exist
JMESPathCheckExists("[?name == 'Standard']"),
JMESPathCheckExists("[?name == 'Premium']"),
# At least s0 and p1 service objectives exist
JMESPathCheckExists("[].supportedServiceLevelObjectives[] | [?name == 'S0']"),
JMESPathCheckExists("[].supportedServiceLevelObjectives[] | [?name == 'P1']"),
# Max size data is omitted
JMESPathCheck(db_max_size_length_jmespath, 0)])
# Get all db capabilities with size data
self.cmd('sql db list-editions -l {} --show-details max-size'.format(location),
checks=[
# Max size data is included
JMESPathCheckGreaterThan(db_max_size_length_jmespath, 0)])
# Search for db edition - note that it's case insensitive
self.cmd('sql db list-editions -l {} --edition standard'.format(location),
checks=[
# Standard edition exists, other editions don't
JMESPathCheckExists("[?name == 'Standard']"),
JMESPathCheck("length([?name != 'Standard'])", 0)])
# Search for db service objective - note that it's case insensitive
# Checked items:
# * Standard edition exists, other editions don't
# * S0 service objective exists, others don't exist
self.cmd('sql db list-editions -l {} --edition standard --service-objective s0'.format(location),
checks=[JMESPathCheckExists("[?name == 'Standard']"),
JMESPathCheck("length([?name != 'Standard'])", 0),
JMESPathCheckExists("[].supportedServiceLevelObjectives[] | [?name == 'S0']"),
JMESPathCheck("length([].supportedServiceLevelObjectives[] | [?name != 'S0'])", 0)])
pool_max_size_length_jmespath = 'length([].supportedElasticPoolDtus[].supportedMaxSizes[])'
pool_db_max_dtu_length_jmespath = 'length([].supportedElasticPoolDtus[].supportedPerDatabaseMaxDtus[])'
pool_db_min_dtu_length_jmespath = ('length([].supportedElasticPoolDtus[].supportedPerDatabaseMaxDtus[]'
'.supportedPerDatabaseMinDtus[])')
pool_db_max_size_length_jmespath = 'length([].supportedElasticPoolDtus[].supportedPerDatabaseMaxSizes[])'
# Get all elastic pool capabilities
self.cmd('sql elastic-pool list-editions -l {}'.format(location),
checks=[JMESPathCheckExists("[?name == 'Standard']"), # At least standard and premium edition exist
JMESPathCheckExists("[?name == 'Premium']"),
JMESPathCheck(pool_max_size_length_jmespath, 0), # Optional details are omitted
JMESPathCheck(pool_db_max_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_min_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_max_size_length_jmespath, 0)])
# Search for elastic pool edition - note that it's case insensitive
self.cmd('sql elastic-pool list-editions -l {} --edition standard'.format(location),
checks=[JMESPathCheckExists("[?name == 'Standard']"), # Standard edition exists, other editions don't
JMESPathCheck("length([?name != 'Standard'])", 0)])
# Search for dtu limit
self.cmd('sql elastic-pool list-editions -l {} --dtu 100'.format(location),
checks=[
# All results have 100 dtu
JMESPathCheckGreaterThan('length([].supportedElasticPoolDtus[?limit == `100`][])', 0),
JMESPathCheck('length([].supportedElasticPoolDtus[?limit != `100`][])', 0)])
# Get all db capabilities with pool max size
self.cmd('sql elastic-pool list-editions -l {} --show-details max-size'.format(location),
checks=[JMESPathCheckGreaterThan(pool_max_size_length_jmespath, 0),
JMESPathCheck(pool_db_max_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_min_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_max_size_length_jmespath, 0)])
# Get all db capabilities with per db max size
self.cmd('sql elastic-pool list-editions -l {} --show-details db-max-size'.format(location),
checks=[JMESPathCheck(pool_max_size_length_jmespath, 0),
JMESPathCheck(pool_db_max_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_min_dtu_length_jmespath, 0),
JMESPathCheckGreaterThan(pool_db_max_size_length_jmespath, 0)])
# Get all db capabilities with per db max dtu
self.cmd('sql elastic-pool list-editions -l {} --edition standard --show-details db-max-dtu'.format(location),
checks=[JMESPathCheck(pool_max_size_length_jmespath, 0),
JMESPathCheckGreaterThan(pool_db_max_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_min_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_max_size_length_jmespath, 0)])
# Get all db capabilities with per db min dtu (which is nested under per db max dtu)
self.cmd('sql elastic-pool list-editions -l {} --edition standard --show-details db-min-dtu'.format(location),
checks=[JMESPathCheck(pool_max_size_length_jmespath, 0),
JMESPathCheckGreaterThan(pool_db_max_dtu_length_jmespath, 0),
JMESPathCheckGreaterThan(pool_db_min_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_max_size_length_jmespath, 0)])
# Get all db capabilities with everything
self.cmd('sql elastic-pool list-editions -l {} --edition standard --show-details db-min-dtu db-max-dtu '
'db-max-size max-size'.format(location),
checks=[JMESPathCheckGreaterThan(pool_max_size_length_jmespath, 0),
JMESPathCheckGreaterThan(pool_db_max_dtu_length_jmespath, 0),
JMESPathCheckGreaterThan(pool_db_min_dtu_length_jmespath, 0),
JMESPathCheckGreaterThan(pool_db_max_size_length_jmespath, 0)])
class SqlServerImportExportMgmtScenarioTest(ScenarioTest):
@ResourceGroupPreparer()
@SqlServerPreparer()
@StorageAccountPreparer()
def test_sql_db_import_export_mgmt(self, resource_group, resource_group_location, server, storage_account):
location_long_name = 'West US'
admin_login = 'admin123'
admin_password = 'SecretPassword123'
db_name = 'cliautomationdb01'
db_name2 = 'cliautomationdb02'
db_name3 = 'cliautomationdb03'
blob = 'testbacpac.bacpac'
blob2 = 'testbacpac2.bacpac'
container = 'bacpacs'
firewall_rule_1 = 'allowAllIps'
start_ip_address_1 = '0.0.0.0'
end_ip_address_1 = '0.0.0.0'
# create server firewall rule
self.cmd('sql server firewall-rule create --name {} -g {} --server {} '
'--start-ip-address {} --end-ip-address {}'
.format(firewall_rule_1, resource_group, server,
start_ip_address_1, end_ip_address_1),
checks=[JMESPathCheck('name', firewall_rule_1),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('startIpAddress', start_ip_address_1),
JMESPathCheck('endIpAddress', end_ip_address_1)])
# create dbs
self.cmd('sql db create -g {} --server {} --name {}'
.format(resource_group, server, db_name),
checks=[JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', db_name),
JMESPathCheck('location', location_long_name),
JMESPathCheck('elasticPoolName', None),
JMESPathCheck('status', 'Online')])
self.cmd('sql db create -g {} --server {} --name {}'
.format(resource_group, server, db_name2),
checks=[JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', db_name2),
JMESPathCheck('location', location_long_name),
JMESPathCheck('elasticPoolName', None),
JMESPathCheck('status', 'Online')])
self.cmd('sql db create -g {} --server {} --name {}'
.format(resource_group, server, db_name3),
checks=[JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', db_name3),
JMESPathCheck('location', location_long_name),
JMESPathCheck('elasticPoolName', None),
JMESPathCheck('status', 'Online')])
# get storage account endpoint
storage_endpoint = self.cmd('storage account show -g {} -n {}'
' --query primaryEndpoints.blob'
.format(resource_group, storage_account)).get_output_in_json()
bacpacUri = '{}{}/{}'.format(storage_endpoint, container, blob)
bacpacUri2 = '{}{}/{}'.format(storage_endpoint, container, blob2)
# get storage account key
storageKey = self.cmd('storage account keys list -g {} -n {} --query [0].value'
.format(resource_group, storage_account)).get_output_in_json()
# Set Expiry
expiryString = '9999-12-25T00:00:00Z'
# Get sas key
sasKey = self.cmd('storage blob generate-sas --account-name {} -c {} -n {} --permissions rw --expiry {}'.format(
storage_account, container, blob2, expiryString)).get_output_in_json()
# create storage account blob container
self.cmd('storage container create -n {} --account-name {} --account-key {} '
.format(container, storage_account, storageKey),
checks=[JMESPathCheck('created', True)])
# export database to blob container using both keys
self.cmd('sql db export -s {} -n {} -g {} -p {} -u {}'
' --storage-key {} --storage-key-type StorageAccessKey'
' --storage-uri {}'
.format(server, db_name, resource_group, admin_password, admin_login, storageKey, bacpacUri),
checks=[JMESPathCheck('blobUri', bacpacUri),
JMESPathCheck('databaseName', db_name),
JMESPathCheck('requestType', 'Export'),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('serverName', server),
JMESPathCheck('status', 'Completed')])
self.cmd('sql db export -s {} -n {} -g {} -p {} -u {}'
' --storage-key {} --storage-key-type SharedAccessKey'
' --storage-uri {}'
.format(server, db_name, resource_group, admin_password, admin_login, sasKey, bacpacUri2),
checks=[JMESPathCheck('blobUri', bacpacUri2),
JMESPathCheck('databaseName', db_name),
JMESPathCheck('requestType', 'Export'),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('serverName', server),
JMESPathCheck('status', 'Completed')])
# import bacpac to second database using Storage Key
self.cmd('sql db import -s {} -n {} -g {} -p {} -u {}'
' --storage-key {} --storage-key-type StorageAccessKey'
' --storage-uri {}'
.format(server, db_name2, resource_group, admin_password, admin_login, storageKey, bacpacUri),
checks=[JMESPathCheck('blobUri', bacpacUri),
JMESPathCheck('databaseName', db_name2),
JMESPathCheck('name', 'import'),
JMESPathCheck('requestType', 'Import'),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('serverName', server),
JMESPathCheck('status', 'Completed')])
# import bacpac to third database using SAS key
self.cmd('sql db import -s {} -n {} -g {} -p {} -u {}'
' --storage-key {} --storage-key-type SharedAccessKey'
' --storage-uri {}'
.format(server, db_name3, resource_group, admin_password, admin_login, sasKey, bacpacUri2),
checks=[JMESPathCheck('blobUri', bacpacUri2),
JMESPathCheck('databaseName', db_name3),
JMESPathCheck('name', 'import'),
JMESPathCheck('requestType', 'Import'),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('serverName', server),
JMESPathCheck('status', 'Completed')])
class SqlServerConnectionStringScenarioTest(ScenarioTest):
def test_sql_db_conn_str(self):
# ADO.NET, username/password
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c ado.net').get_output_in_json()
self.assertEqual(conn_str, 'Server=tcp:myserver.database.windows.net,1433;Database=mydb;User ID=<username>;Password=<password>;Encrypt=true;Connection Timeout=30;')
# ADO.NET, ADPassword
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c ado.net -a ADPassword').get_output_in_json()
self.assertEqual(conn_str, 'Server=tcp:myserver.database.windows.net,1433;Database=mydb;User ID=<username>;Password=<password>;Encrypt=true;Connection Timeout=30;Authentication="Active Directory Password"')
# ADO.NET, ADIntegrated
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c ado.net -a ADIntegrated').get_output_in_json()
self.assertEqual(conn_str, 'Server=tcp:myserver.database.windows.net,1433;Database=mydb;Encrypt=true;Connection Timeout=30;Authentication="Active Directory Integrated"')
# SqlCmd, username/password
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c sqlcmd').get_output_in_json()
self.assertEqual(conn_str, 'sqlcmd -S tcp:myserver.database.windows.net,1433 -d mydb -U <username> -P <password> -N -l 30')
# SqlCmd, ADPassword
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c sqlcmd -a ADPassword').get_output_in_json()
self.assertEqual(conn_str, 'sqlcmd -S tcp:myserver.database.windows.net,1433 -d mydb -U <username> -P <password> -G -N -l 30')
# SqlCmd, ADIntegrated
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c sqlcmd -a ADIntegrated').get_output_in_json()
self.assertEqual(conn_str, 'sqlcmd -S tcp:myserver.database.windows.net,1433 -d mydb -G -N -l 30')
# JDBC, user name/password
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c jdbc').get_output_in_json()
self.assertEqual(conn_str, 'jdbc:sqlserver://myserver.database.windows.net:1433;database=mydb;user=<username>@myserver;password=<password>;encrypt=true;trustServerCertificate=false;hostNameInCertificate=*.database.windows.net;loginTimeout=30')
# JDBC, ADPassword
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c jdbc -a ADPassword').get_output_in_json()
self.assertEqual(conn_str, 'jdbc:sqlserver://myserver.database.windows.net:1433;database=mydb;user=<username>;password=<password>;encrypt=true;trustServerCertificate=false;hostNameInCertificate=*.database.windows.net;loginTimeout=30;authentication=ActiveDirectoryPassword')
# JDBC, ADIntegrated
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c jdbc -a ADIntegrated').get_output_in_json()
self.assertEqual(conn_str, 'jdbc:sqlserver://myserver.database.windows.net:1433;database=mydb;encrypt=true;trustServerCertificate=false;hostNameInCertificate=*.database.windows.net;loginTimeout=30;authentication=ActiveDirectoryIntegrated')
# PHP PDO, user name/password
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c php_pdo').get_output_in_json()
self.assertEqual(conn_str, '$conn = new PDO("sqlsrv:server = tcp:myserver.database.windows.net,1433; Database = mydb; LoginTimeout = 30; Encrypt = 1; TrustServerCertificate = 0;", "<username>", "<password>");')
# PHP PDO, ADPassword
self.cmd('sql db show-connection-string -s myserver -n mydb -c php_pdo -a ADPassword', expect_failure=True)
# PHP PDO, ADIntegrated
self.cmd('sql db show-connection-string -s myserver -n mydb -c php_pdo -a ADIntegrated', expect_failure=True)
# PHP, user name/password
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c php').get_output_in_json()
self.assertEqual(conn_str, '$connectionOptions = array("UID"=>"<username>@myserver", "PWD"=>"<password>", "Database"=>mydb, "LoginTimeout" => 30, "Encrypt" => 1, "TrustServerCertificate" => 0); $serverName = "tcp:myserver.database.windows.net,1433"; $conn = sqlsrv_connect($serverName, $connectionOptions);')
# PHP, ADPassword
self.cmd('sql db show-connection-string -s myserver -n mydb -c php -a ADPassword', expect_failure=True)
# PHP, ADIntegrated
self.cmd('sql db show-connection-string -s myserver -n mydb -c php -a ADIntegrated', expect_failure=True)
# ODBC, user name/password
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c odbc').get_output_in_json()
self.assertEqual(conn_str, 'Driver={ODBC Driver 13 for SQL Server};Server=tcp:myserver.database.windows.net,1433;Database=mydb;Uid=<username>@myserver;Pwd=<password>;Encrypt=yes;TrustServerCertificate=no;')
# ODBC, ADPassword
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c odbc -a ADPassword').get_output_in_json()
self.assertEqual(conn_str, 'Driver={ODBC Driver 13 for SQL Server};Server=tcp:myserver.database.windows.net,1433;Database=mydb;Uid=<username>@myserver;Pwd=<password>;Encrypt=yes;TrustServerCertificate=no;Authentication=ActiveDirectoryPassword')
# ODBC, ADIntegrated
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c odbc -a ADIntegrated').get_output_in_json()
self.assertEqual(conn_str, 'Driver={ODBC Driver 13 for SQL Server};Server=tcp:myserver.database.windows.net,1433;Database=mydb;Encrypt=yes;TrustServerCertificate=no;Authentication=ActiveDirectoryIntegrated')
class SqlTransparentDataEncryptionScenarioTest(ScenarioTest):
def wait_for_encryption_scan(self, rg, sn, db_name):
active_scan = True
retry_attempts = 5
while active_scan:
tdeactivity = self.cmd('sql db tde list-activity -g {} -s {} -d {}'
.format(rg, sn, db_name)).get_output_in_json()
# if tdeactivity is an empty array, there is no ongoing encryption scan
active_scan = (len(tdeactivity) > 0)
time.sleep(10)
retry_attempts -= 1
if retry_attempts <= 0:
raise CliTestError("Encryption scan still ongoing: {}.".format(tdeactivity))
@ResourceGroupPreparer()
@SqlServerPreparer()
def test_sql_tde(self, resource_group, server):
rg = resource_group
sn = server
db_name = self.create_random_name("sqltdedb", 20)
# create database
self.cmd('sql db create -g {} --server {} --name {}'
.format(rg, sn, db_name))
# validate encryption is on by default
self.cmd('sql db tde show -g {} -s {} -d {}'
.format(rg, sn, db_name),
checks=[JMESPathCheck('status', 'Enabled')])
self.wait_for_encryption_scan(rg, sn, db_name)
# disable encryption
self.cmd('sql db tde set -g {} -s {} -d {} --status Disabled'
.format(rg, sn, db_name),
checks=[JMESPathCheck('status', 'Disabled')])
self.wait_for_encryption_scan(rg, sn, db_name)
# validate encryption is disabled
self.cmd('sql db tde show -g {} -s {} -d {}'
.format(rg, sn, db_name),
checks=[JMESPathCheck('status', 'Disabled')])
# enable encryption
self.cmd('sql db tde set -g {} -s {} -d {} --status Enabled'
.format(rg, sn, db_name),
checks=[JMESPathCheck('status', 'Enabled')])
self.wait_for_encryption_scan(rg, sn, db_name)
# validate encryption is enabled
self.cmd('sql db tde show -g {} -s {} -d {}'
.format(rg, sn, db_name),
checks=[JMESPathCheck('status', 'Enabled')])
@ResourceGroupPreparer()
@SqlServerPreparer()
def test_sql_tdebyok(self, resource_group, server):
resource_prefix = 'sqltdebyok'
# add identity to server
server_resp = self.cmd('sql server update -g {} -n {} -i'
.format(resource_group, server)).get_output_in_json()
server_identity = server_resp['identity']['principalId']
# create db
db_name = self.create_random_name(resource_prefix, 20)
self.cmd('sql db create -g {} --server {} --name {}'
.format(resource_group, server, db_name))
# create vault and acl server identity
vault_name = self.create_random_name(resource_prefix, 24)
self.cmd('keyvault create -g {} -n {} --enable-soft-delete true'
.format(resource_group, vault_name))
self.cmd('keyvault set-policy -g {} -n {} --object-id {} --key-permissions wrapKey unwrapKey get list'
.format(resource_group, vault_name, server_identity))
# create key
key_name = self.create_random_name(resource_prefix, 32)
key_resp = self.cmd('keyvault key create -n {} -p software --vault-name {}'
.format(key_name, vault_name)).get_output_in_json()
kid = key_resp['key']['kid']
# add server key
server_key_resp = self.cmd('sql server key create -g {} -s {} -k {}'
.format(resource_group, server, kid),
checks=[
JMESPathCheck('uri', kid),
JMESPathCheck('serverKeyType', 'AzureKeyVault')])
server_key_name = server_key_resp.get_output_in_json()['name']
# validate show key
self.cmd('sql server key show -g {} -s {} -k {}'
.format(resource_group, server, kid),
checks=[
JMESPathCheck('uri', kid),
JMESPathCheck('serverKeyType', 'AzureKeyVault'),
JMESPathCheck('name', server_key_name)])
# validate list key (should return 2 items)
self.cmd('sql server key list -g {} -s {}'
.format(resource_group, server),
checks=[JMESPathCheck('length(@)', 2)])
# validate encryption protector is service managed via show
self.cmd('sql server tde-key show -g {} -s {}'
.format(resource_group, server),
checks=[
JMESPathCheck('serverKeyType', 'ServiceManaged'),
JMESPathCheck('serverKeyName', 'ServiceManaged')])
# update encryption protector to akv key
self.cmd('sql server tde-key set -g {} -s {} -t AzureKeyVault -k {}'
.format(resource_group, server, kid),
checks=[
JMESPathCheck('serverKeyType', 'AzureKeyVault'),
JMESPathCheck('serverKeyName', server_key_name),
JMESPathCheck('uri', kid)])
# validate encryption protector is akv via show
self.cmd('sql server tde-key show -g {} -s {}'
.format(resource_group, server),
checks=[
JMESPathCheck('serverKeyType', 'AzureKeyVault'),
JMESPathCheck('serverKeyName', server_key_name),
JMESPathCheck('uri', kid)])
# update encryption protector to service managed
self.cmd('sql server tde-key set -g {} -s {} -t ServiceManaged'
.format(resource_group, server),
checks=[
JMESPathCheck('serverKeyType', 'ServiceManaged'),
JMESPathCheck('serverKeyName', 'ServiceManaged')])
# validate encryption protector is service managed via show
self.cmd('sql server tde-key show -g {} -s {}'
.format(resource_group, server),
checks=[
JMESPathCheck('serverKeyType', 'ServiceManaged'),
JMESPathCheck('serverKeyName', 'ServiceManaged')])
# delete server key
self.cmd('sql server key delete -g {} -s {} -k {}'
.format(resource_group, server, kid))
# wait for key to be deleted
time.sleep(10)
# validate deleted server key via list (should return 1 item)
self.cmd('sql server key list -g {} -s {}'
.format(resource_group, server),
checks=[JMESPathCheck('length(@)', 1)])
class SqlServerVnetMgmtScenarioTest(ScenarioTest):
@ResourceGroupPreparer()
@SqlServerPreparer()
def test_sql_vnet_mgmt(self, resource_group, resource_group_location, server):
rg = resource_group
vnet_rule_1 = 'rule1'
vnet_rule_2 = 'rule2'
# Create vnet's - vnet1 and vnet2
vnetName1 = 'vnet1'
vnetName2 = 'vnet2'
subnetName = 'subnet1'
addressPrefix = '10.0.1.0/24'
endpoint = 'Microsoft.Sql'
# Vnet 1 without service endpoints to test ignore-missing-vnet-service-endpoint feature
self.cmd('network vnet create -g {} -n {}'.format(rg, vnetName1))
self.cmd('network vnet subnet create -g {} --vnet-name {} -n {} --address-prefix {}'
.format(rg, vnetName1, subnetName, addressPrefix))
vnet1 = self.cmd('network vnet subnet show -n {} --vnet-name {} -g {}'
.format(subnetName, vnetName1, rg)).get_output_in_json()
vnet_id_1 = vnet1['id']
# Vnet 2
self.cmd('network vnet create -g {} -n {}'.format(rg, vnetName2))
self.cmd('network vnet subnet create -g {} --vnet-name {} -n {} --address-prefix {} --service-endpoints {}'
.format(rg, vnetName2, subnetName, addressPrefix, endpoint),
checks=JMESPathCheck('serviceEndpoints[0].service', 'Microsoft.Sql'))
vnet2 = self.cmd('network vnet subnet show -n {} --vnet-name {} -g {}'
.format(subnetName, vnetName2, rg)).get_output_in_json()
vnet_id_2 = vnet2['id']
# test sql server vnet-rule create using subnet name and vnet name and ignore-missing-vnet-service-endpoint flag
self.cmd('sql server vnet-rule create --name {} -g {} --server {} --subnet {} --vnet-name {} -i'
.format(vnet_rule_1, rg, server, subnetName, vnetName1))
# test sql server vnet-rule show rule 1
self.cmd('sql server vnet-rule show --name {} -g {} --server {}'
.format(vnet_rule_1, rg, server),
checks=[
JMESPathCheck('name', vnet_rule_1),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('ignoreMissingVnetServiceEndpoint', True)])
# test sql server vnet-rule create using subnet id
self.cmd('sql server vnet-rule create --name {} -g {} --server {} --subnet {}'
.format(vnet_rule_2, rg, server, vnet_id_2),
checks=[
JMESPathCheck('name', vnet_rule_2),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('virtualNetworkSubnetId', vnet_id_2),
JMESPathCheck('ignoreMissingVnetServiceEndpoint', False)])
# test sql server vnet-rule update rule 1 with vnet 2
self.cmd('sql server vnet-rule update --name {} -g {} --server {} --subnet {}'
.format(vnet_rule_1, rg, server, vnet_id_2),
checks=[
JMESPathCheck('name', vnet_rule_1),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('virtualNetworkSubnetId', vnet_id_2),
JMESPathCheck('ignoreMissingVnetServiceEndpoint', False)])
# test sql server vnet-rule update rule 2 with vnet 1 and ignore-missing-vnet-service-endpoint flag
self.cmd('sql server vnet-rule update --name {} -g {} --server {} --subnet {} -i'
.format(vnet_rule_2, rg, server, vnet_id_1),
checks=[JMESPathCheck('name', vnet_rule_2),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('virtualNetworkSubnetId', vnet_id_1),
JMESPathCheck('ignoreMissingVnetServiceEndpoint', True)])
# test sql server vnet-rule list
self.cmd('sql server vnet-rule list -g {} --server {}'.format(rg, server),
checks=[JMESPathCheck('length(@)', 2)])
# test sql server vnet-rule delete rule 1
self.cmd('sql server vnet-rule delete --name {} -g {} --server {}'.format(vnet_rule_1, rg, server),
checks=NoneCheck())
# test sql server vnet-rule delete rule 2
self.cmd('sql server vnet-rule delete --name {} -g {} --server {}'.format(vnet_rule_2, rg, server),
checks=NoneCheck())
class SqlSubscriptionUsagesScenarioTest(ScenarioTest):
def test_sql_subscription_usages(self):
self.cmd('sql list-usages -l westus',
checks=[JMESPathCheckGreaterThan('length(@)', 2)])
self.cmd('sql show-usage -l westus -u SubscriptionFreeDatabaseDaysLeft',
checks=[
JMESPathCheck('name', 'SubscriptionFreeDatabaseDaysLeft'),
JMESPathCheckGreaterThan('limit', 0)])
class SqlZoneResilienceScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location='eastus2')
@SqlServerPreparer(location='eastus2')
def test_sql_zone_resilient_database(self, resource_group, resource_group_location, server):
database_name = "createUnzonedUpdateToZonedDb"
database_name_2 = "createZonedUpdateToUnzonedDb"
database_name_3 = "updateNoParamForUnzonedDb"
database_name_4 = "updateNoParamForZonedDb"
rg = resource_group
loc_display = "East US 2"
# Test creating database with zone resilience set to false. Expect regular database created.
self.cmd('sql db create -g {} --server {} --name {} --edition {} --zone-redundant {}'
.format(rg, server, database_name, "Premium", False),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('location', loc_display),
JMESPathCheck('elasticPoolName', None),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('zoneRedundant', False)])
# Test running update on regular database with zone resilience set to true. Expect zone resilience to update to true.
self.cmd('sql db update -g {} -s {} -n {} --service-objective {} --zone-redundant'
.format(rg, server, database_name, 'P1'),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('elasticPoolName', None),
JMESPathCheck('status', 'Online'),
JMESPathCheck('requestedServiceObjectiveName', 'P1'),
JMESPathCheck('zoneRedundant', True)])
# Test creating database with zone resilience set to true. Expect zone resilient database created.
self.cmd('sql db create -g {} --server {} --name {} --edition {} --z'
.format(rg, server, database_name_2, "Premium"),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name_2),
JMESPathCheck('location', loc_display),
JMESPathCheck('elasticPoolName', None),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('zoneRedundant', True)])
# Test running update on zoned database with zone resilience set to false. Expect zone resilience to update to false
self.cmd('sql db update -g {} -s {} -n {} --service-objective {} --z {}'
.format(rg, server, database_name_2, 'P1', False),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name_2),
JMESPathCheck('elasticPoolName', None),
JMESPathCheck('status', 'Online'),
JMESPathCheck('requestedServiceObjectiveName', 'P1'),
JMESPathCheck('zoneRedundant', False)])
# Create database with no zone resilience set. Expect regular database created.
self.cmd('sql db create -g {} --server {} --name {} --edition {}'
.format(rg, server, database_name_3, "Premium"),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name_3),
JMESPathCheck('location', loc_display),
JMESPathCheck('elasticPoolName', None),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('zoneRedundant', False)])
# Test running update on regular database with no zone resilience set. Expect zone resilience to stay false.
self.cmd('sql db update -g {} -s {} -n {} --service-objective {}'
.format(rg, server, database_name_3, 'P2'),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name_3),
JMESPathCheck('elasticPoolName', None),
JMESPathCheck('status', 'Online'),
JMESPathCheck('requestedServiceObjectiveName', 'P2'),
JMESPathCheck('zoneRedundant', False)])
# Create database with zone resilience set. Expect zone resilient database created.
self.cmd('sql db create -g {} --server {} --name {} --edition {} --zone-redundant'
.format(rg, server, database_name_4, "Premium"),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name_4),
JMESPathCheck('location', loc_display),
JMESPathCheck('elasticPoolName', None),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('zoneRedundant', True)])
# Test running update on zoned database with no zone resilience set. Expect zone resilience to stay true.
self.cmd('sql db update -g {} -s {} -n {} --service-objective {}'
.format(rg, server, database_name_4, 'P2'),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name_4),
JMESPathCheck('elasticPoolName', None),
JMESPathCheck('status', 'Online'),
JMESPathCheck('requestedServiceObjectiveName', 'P2'),
JMESPathCheck('zoneRedundant', True)])
@ResourceGroupPreparer(location='eastus2')
@SqlServerPreparer(location='eastus2')
def test_sql_zone_resilient_pool(self, resource_group, resource_group_location, server):
pool_name = "createUnzonedUpdateToZonedPool"
pool_name_2 = "createZonedUpdateToUnzonedPool"
pool_name_3 = "updateNoParamForUnzonedPool"
pool_name_4 = "updateNoParamForZonedPool"
rg = resource_group
# Test creating pool with zone resilience set to false. Expect regular pool created.
self.cmd('sql elastic-pool create -g {} --server {} --name {} --edition {} --z {}'
.format(rg, server, pool_name, "Premium", False))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(rg, server, pool_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', pool_name),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('zoneRedundant', False)])
# Test running update on regular pool with zone resilience set to true. Expect zone resilience to update to true
self.cmd('sql elastic-pool update -g {} -s {} -n {} --z'
.format(rg, server, pool_name))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(rg, server, pool_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', pool_name),
JMESPathCheck('zoneRedundant', True)])
# Test creating pool with zone resilience set to true. Expect zone resilient pool created.
self.cmd('sql elastic-pool create -g {} --server {} --name {} --edition {} --zone-redundant'
.format(rg, server, pool_name_2, "Premium"))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(rg, server, pool_name_2),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', pool_name_2),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('zoneRedundant', True)])
# Test running update on zoned pool with zone resilience set to false. Expect zone resilience to update to false
self.cmd('sql elastic-pool update -g {} -s {} -n {} --zone-redundant {}'
.format(rg, server, pool_name_2, False))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(rg, server, pool_name_2),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', pool_name_2),
JMESPathCheck('zoneRedundant', False)])
# Create pool with no zone resilience set. Expect regular pool created.
self.cmd('sql elastic-pool create -g {} --server {} --name {} --edition {}'
.format(rg, server, pool_name_3, "Premium"))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(rg, server, pool_name_3),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', pool_name_3),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('zoneRedundant', False)])
# Test running update on regular pool with no zone resilience set. Expect zone resilience to stay false
self.cmd('sql elastic-pool update -g {} -s {} -n {} --dtu {}'
.format(rg, server, pool_name_3, 250))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(rg, server, pool_name_3),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', pool_name_3),
JMESPathCheck('dtu', 250),
JMESPathCheck('zoneRedundant', False)])
# Create pool with zone resilience set. Expect zone resilient pool created.
self.cmd('sql elastic-pool create -g {} --server {} --name {} --edition {} --zone-redundant'
.format(rg, server, pool_name_4, "Premium"))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(rg, server, pool_name_4),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', pool_name_4),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('zoneRedundant', True)])
# Test running update on zoned pool with no zone resilience set. Expect zone resilience to stay true
self.cmd('sql elastic-pool update -g {} -s {} -n {} --dtu {}'
.format(rg, server, pool_name_4, 250, True))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(rg, server, pool_name_4),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', pool_name_4),
JMESPathCheck('dtu', 250),
JMESPathCheck('zoneRedundant', True)])
| [
[
[
353,
357
],
[
89923,
89927
],
[
95944,
95948
]
],
[
[
401,
419
],
[
69771,
69789
]
],
[
[
453,
461
]
],
[
[
497,
504
],
[
1979,
1986
],
[
2272,
2279
]
],
[
[
546,
558
],
[
2713,
2725
],
[
90028,
90040
]
],
[
[
595,
608
],
[
3843,
3856
],
[
3913,
3926
],
[
3995,
4008
],
[
4070,
4083
],
[
4244,
4257
],
[
4512,
4525
],
[
4571,
4584
],
[
4642,
4655
],
[
4706,
4719
],
[
5054,
5067
],
[
5113,
5126
],
[
5184,
5197
],
[
5248,
5261
],
[
5621,
5634
],
[
5680,
5693
],
[
5751,
5764
],
[
5815,
5828
],
[
5999,
6012
],
[
6380,
6393
],
[
6439,
6452
],
[
6510,
6523
],
[
6687,
6700
],
[
6746,
6759
],
[
6817,
6830
],
[
7000,
7013
],
[
8471,
8484
],
[
8544,
8557
],
[
8613,
8626
],
[
8699,
8712
],
[
9016,
9029
],
[
9077,
9090
],
[
9134,
9147
],
[
9208,
9221
],
[
9457,
9470
],
[
9518,
9531
],
[
9575,
9588
],
[
9649,
9662
],
[
10074,
10087
],
[
10135,
10148
],
[
10192,
10205
],
[
10266,
10279
],
[
10581,
10594
],
[
10642,
10655
],
[
10699,
10712
],
[
10773,
10786
],
[
11088,
11101
],
[
11149,
11162
],
[
11206,
11219
],
[
11280,
11293
],
[
11697,
11710
],
[
11758,
11771
],
[
11815,
11828
],
[
11889,
11902
],
[
12098,
12111
],
[
13140,
13153
],
[
14224,
14237
],
[
14287,
14300
],
[
14352,
14365
],
[
14419,
14432
],
[
14486,
14499
],
[
14548,
14561
],
[
14744,
14757
],
[
14796,
14809
],
[
14884,
14897
],
[
14945,
14958
],
[
15131,
15144
],
[
15378,
15391
],
[
15437,
15450
],
[
15619,
15632
],
[
15678,
15691
],
[
16052,
16065
],
[
16109,
16122
],
[
16168,
16181
],
[
16263,
16276
],
[
16337,
16350
],
[
16548,
16561
],
[
16605,
16618
],
[
16664,
16677
],
[
16759,
16772
],
[
16833,
16846
],
[
17117,
17130
],
[
17180,
17193
],
[
17446,
17459
],
[
17509,
17522
],
[
18439,
18452
],
[
18508,
18521
],
[
18567,
18580
],
[
19067,
19080
],
[
19123,
19136
],
[
19200,
19213
],
[
19843,
19856
],
[
20120,
20133
],
[
20761,
20774
],
[
20816,
20829
],
[
20960,
20973
],
[
21138,
21151
],
[
21194,
21207
],
[
21432,
21445
],
[
22375,
22388
],
[
22432,
22445
],
[
22491,
22504
],
[
22552,
22565
],
[
22613,
22626
],
[
22921,
22934
],
[
22978,
22991
],
[
23469,
23482
],
[
23540,
23553
],
[
23604,
23617
],
[
24192,
24205
],
[
24250,
24263
],
[
24310,
24323
],
[
25933,
25946
],
[
25990,
26003
],
[
26068,
26081
],
[
26199,
26212
],
[
26579,
26592
],
[
26636,
26649
],
[
26708,
26721
],
[
26777,
26790
],
[
28903,
28916
],
[
28960,
28973
],
[
29028,
29041
],
[
29159,
29172
],
[
29582,
29595
],
[
29639,
29652
],
[
29707,
29720
],
[
31239,
31252
],
[
31308,
31321
],
[
31367,
31380
],
[
31583,
31596
],
[
32352,
32365
],
[
32421,
32434
],
[
32481,
32494
],
[
32577,
32590
],
[
32650,
32663
],
[
32719,
32732
],
[
33231,
33244
],
[
33300,
33313
],
[
33360,
33373
],
[
33456,
33469
],
[
33531,
33544
],
[
33600,
33613
],
[
33945,
33958
],
[
34014,
34027
],
[
34075,
34088
],
[
34171,
34184
],
[
34246,
34259
],
[
34315,
34328
],
[
34572,
34585
],
[
35561,
35574
],
[
35630,
35643
],
[
35690,
35703
],
[
35758,
35771
],
[
35831,
35844
],
[
35900,
35913
],
[
35980,
35993
],
[
36060,
36073
],
[
36551,
36564
],
[
36620,
36633
],
[
36680,
36693
],
[
36750,
36763
],
[
36825,
36838
],
[
36894,
36907
],
[
36974,
36987
],
[
37054,
37067
],
[
37359,
37372
],
[
37428,
37441
],
[
37489,
37502
],
[
37585,
37598
],
[
37660,
37673
],
[
37729,
37742
],
[
38492,
38505
],
[
38554,
38567
],
[
38618,
38631
],
[
38684,
38697
],
[
38753,
38766
],
[
39845,
39858
],
[
39923,
39936
],
[
40011,
40024
],
[
40072,
40085
],
[
40248,
40261
],
[
40300,
40313
],
[
40363,
40376
],
[
40564,
40577
],
[
40623,
40636
],
[
40959,
40972
],
[
41018,
41031
],
[
41075,
41088
],
[
41425,
41438
],
[
41484,
41497
],
[
41541,
41554
],
[
41830,
41843
],
[
41887,
41900
],
[
41946,
41959
],
[
42020,
42033
],
[
42271,
42284
],
[
42328,
42341
],
[
42387,
42400
],
[
42482,
42495
],
[
42556,
42569
],
[
44832,
44845
],
[
44888,
44901
],
[
45134,
45147
],
[
45190,
45203
],
[
45457,
45470
],
[
45509,
45522
],
[
46056,
46069
],
[
46307,
46320
],
[
46359,
46372
],
[
46896,
46909
],
[
46948,
46961
],
[
47534,
47547
],
[
47785,
47798
],
[
47837,
47850
],
[
48278,
48291
],
[
50286,
50299
],
[
50342,
50355
],
[
50579,
50592
],
[
50638,
50651
],
[
51033,
51046
],
[
51092,
51105
],
[
51359,
51372
],
[
51418,
51431
],
[
51876,
51889
],
[
51935,
51948
],
[
51998,
52011
],
[
52290,
52303
],
[
52349,
52362
],
[
52606,
52619
],
[
52872,
52885
],
[
52924,
52937
],
[
52985,
52998
],
[
53435,
53448
],
[
54128,
54141
],
[
54184,
54197
],
[
54247,
54260
],
[
54761,
54774
],
[
54813,
54826
],
[
54872,
54885
],
[
57732,
57745
],
[
57806,
57819
],
[
57883,
57896
],
[
57961,
57974
],
[
58032,
58045
],
[
58097,
58110
],
[
58180,
58193
],
[
58263,
58276
],
[
58336,
58349
],
[
58569,
58582
],
[
58626,
58639
],
[
58686,
58699
],
[
58740,
58753
],
[
58806,
58819
],
[
58872,
58885
],
[
58928,
58941
],
[
58989,
59002
],
[
59174,
59187
],
[
59231,
59244
],
[
59291,
59304
],
[
59345,
59358
],
[
59411,
59424
],
[
59477,
59490
],
[
59533,
59546
],
[
59719,
59732
],
[
59780,
59793
],
[
59844,
59857
],
[
59902,
59915
],
[
59972,
59985
],
[
60042,
60055
],
[
60102,
60115
],
[
60440,
60453
],
[
60497,
60510
],
[
60557,
60570
],
[
60611,
60624
],
[
60667,
60680
],
[
60723,
60736
],
[
60789,
60802
],
[
60855,
60868
],
[
60924,
60937
],
[
61288,
61301
],
[
61345,
61358
],
[
61405,
61418
],
[
61459,
61472
],
[
61507,
61520
],
[
61581,
61594
],
[
61655,
61668
],
[
61716,
61729
],
[
61967,
61980
],
[
62024,
62037
],
[
62084,
62097
],
[
62138,
62151
],
[
62387,
62400
],
[
62444,
62457
],
[
62500,
62513
],
[
62561,
62574
],
[
62694,
62707
],
[
63008,
63021
],
[
63065,
63078
],
[
63124,
63137
],
[
63195,
63208
],
[
63279,
63292
],
[
63628,
63641
],
[
63685,
63698
],
[
63744,
63757
],
[
63811,
63824
],
[
63895,
63908
],
[
64164,
64177
],
[
64221,
64234
],
[
64280,
64293
],
[
64341,
64354
],
[
64432,
64445
],
[
64745,
64758
],
[
64802,
64815
],
[
64861,
64874
],
[
64932,
64945
],
[
65016,
65029
],
[
65252,
65265
],
[
65304,
65317
],
[
65365,
65378
],
[
65428,
65441
],
[
65703,
65716
],
[
65755,
65768
],
[
65816,
65829
],
[
65879,
65892
],
[
68307,
68320
],
[
68376,
68389
],
[
68436,
68449
],
[
68492,
68505
],
[
68546,
68559
],
[
68594,
68607
],
[
68660,
68673
],
[
68726,
68739
],
[
70905,
70918
],
[
71579,
71592
],
[
72057,
72070
],
[
72237,
72250
],
[
73171,
73184
],
[
73277,
73290
],
[
73353,
73366
],
[
73429,
73442
],
[
73797,
73810
],
[
74168,
74181
],
[
74507,
74520
],
[
74583,
74596
],
[
74659,
74672
],
[
74894,
74907
],
[
74968,
74981
],
[
75044,
75057
],
[
75383,
75396
],
[
75544,
75557
],
[
75620,
75633
],
[
75911,
75924
],
[
76159,
76172
],
[
77827,
77840
],
[
77892,
77905
],
[
77965,
77978
],
[
78043,
78056
],
[
78260,
78273
],
[
78333,
78346
],
[
78390,
78403
],
[
78462,
78475
],
[
78527,
78540
],
[
78710,
78723
],
[
78783,
78796
],
[
78841,
78854
],
[
78913,
78926
],
[
78978,
78991
],
[
79161,
79174
],
[
79234,
79247
],
[
79292,
79305
],
[
79364,
79377
],
[
79429,
79442
],
[
80627,
80640
],
[
81030,
81043
],
[
81092,
81105
],
[
81157,
81170
],
[
81222,
81235
],
[
81295,
81308
],
[
81357,
81370
],
[
81702,
81715
],
[
81765,
81778
],
[
81830,
81843
],
[
81895,
81908
],
[
81968,
81981
],
[
82030,
82043
],
[
82441,
82454
],
[
82503,
82516
],
[
82569,
82582
],
[
82627,
82640
],
[
82692,
82705
],
[
82765,
82778
],
[
82827,
82840
],
[
83229,
83242
],
[
83292,
83305
],
[
83358,
83371
],
[
83416,
83429
],
[
83481,
83494
],
[
83554,
83567
],
[
83616,
83629
],
[
90612,
90625
],
[
90873,
90886
],
[
91131,
91144
],
[
91335,
91348
],
[
91591,
91604
],
[
93201,
93214
],
[
93267,
93280
],
[
93576,
93589
],
[
93624,
93637
],
[
93694,
93707
],
[
93914,
93927
],
[
94167,
94180
],
[
94238,
94251
],
[
94518,
94531
],
[
94588,
94601
],
[
94658,
94671
],
[
94895,
94908
],
[
94965,
94978
],
[
95035,
95048
],
[
95290,
95303
],
[
95361,
95374
],
[
95633,
95646
],
[
95704,
95717
],
[
96156,
96169
],
[
97506,
97519
],
[
98284,
98297
],
[
98341,
98354
],
[
98398,
98411
],
[
98712,
98725
],
[
98769,
98782
],
[
98826,
98839
],
[
98899,
98912
],
[
99217,
99230
],
[
99274,
99287
],
[
99331,
99344
],
[
99404,
99417
],
[
99749,
99762
],
[
99810,
99823
],
[
99871,
99884
],
[
99948,
99961
],
[
100156,
100169
],
[
100924,
100937
],
[
101879,
101892
],
[
101936,
101949
],
[
101995,
102008
],
[
102056,
102069
],
[
102117,
102130
],
[
102175,
102188
],
[
102540,
102553
],
[
102597,
102610
],
[
102656,
102669
],
[
102717,
102730
],
[
102773,
102786
],
[
102848,
102861
],
[
103187,
103200
],
[
103244,
103257
],
[
103305,
103318
],
[
103366,
103379
],
[
103427,
103440
],
[
103485,
103498
],
[
103847,
103860
],
[
103904,
103917
],
[
103965,
103978
],
[
104026,
104039
],
[
104082,
104095
],
[
104157,
104170
],
[
104474,
104487
],
[
104531,
104544
],
[
104592,
104605
],
[
104653,
104666
],
[
104714,
104727
],
[
104772,
104785
],
[
105113,
105126
],
[
105170,
105183
],
[
105231,
105244
],
[
105292,
105305
],
[
105348,
105361
],
[
105423,
105436
],
[
105761,
105774
],
[
105818,
105831
],
[
105879,
105892
],
[
105940,
105953
],
[
106001,
106014
],
[
106059,
106072
],
[
106396,
106409
],
[
106453,
106466
],
[
106514,
106527
],
[
106575,
106588
],
[
106631,
106644
],
[
106706,
106719
],
[
107587,
107600
],
[
107644,
107657
],
[
107699,
107712
],
[
107753,
107766
],
[
107811,
107824
],
[
108254,
108267
],
[
108311,
108324
],
[
108366,
108379
],
[
108837,
108850
],
[
108894,
108907
],
[
108951,
108964
],
[
109005,
109018
],
[
109063,
109076
],
[
109532,
109545
],
[
109589,
109602
],
[
109646,
109659
],
[
110082,
110095
],
[
110139,
110152
],
[
110196,
110209
],
[
110250,
110263
],
[
110308,
110321
],
[
110756,
110769
],
[
110813,
110826
],
[
110870,
110883
],
[
110918,
110931
],
[
111375,
111388
],
[
111432,
111445
],
[
111489,
111502
],
[
111543,
111556
],
[
111601,
111614
],
[
112051,
112064
],
[
112108,
112121
],
[
112165,
112178
],
[
112213,
112226
]
],
[
[
614,
633
],
[
70457,
70476
],
[
70524,
70543
],
[
70657,
70676
],
[
70757,
70776
],
[
71512,
71531
],
[
71986,
72005
],
[
72133,
72152
],
[
72983,
73002
],
[
73101,
73120
],
[
73677,
73696
]
],
[
[
639,
663
],
[
6148,
6172
],
[
71187,
71211
],
[
74060,
74084
],
[
74422,
74446
],
[
75120,
75144
],
[
75457,
75481
],
[
75985,
76009
],
[
76072,
76096
],
[
76459,
76483
],
[
76544,
76568
],
[
76631,
76655
],
[
76718,
76742
],
[
100752,
100776
],
[
101004,
101028
]
],
[
[
669,
678
],
[
7185,
7194
],
[
7323,
7332
],
[
7455,
7464
],
[
13014,
13023
],
[
13314,
13323
],
[
13440,
13449
],
[
17757,
17766
],
[
17904,
17913
],
[
40822,
40831
],
[
41251,
41260
],
[
42761,
42770
],
[
42884,
42893
],
[
45811,
45820
],
[
46675,
46684
],
[
47289,
47298
],
[
48064,
48073
],
[
53207,
53216
],
[
53856,
53865
],
[
54534,
54543
],
[
55124,
55133
],
[
66901,
66910
],
[
67100,
67109
],
[
67279,
67288
],
[
100371,
100380
],
[
100567,
100576
]
],
[
[
684,
705
],
[
2912,
2933
],
[
2974,
2995
],
[
7531,
7552
],
[
13510,
13531
],
[
17983,
18004
],
[
19547,
19568
],
[
20309,
20330
],
[
21519,
21540
],
[
21581,
21602
],
[
24740,
24761
],
[
26879,
26900
],
[
30312,
30333
],
[
30341,
30362
],
[
37901,
37922
],
[
43055,
43076
],
[
43201,
43222
],
[
48485,
48506
],
[
48631,
48652
],
[
56545,
56566
],
[
67526,
67547
],
[
76848,
76869
],
[
90105,
90126
],
[
91634,
91655
],
[
96246,
96267
],
[
101101,
101122
],
[
106751,
106772
],
[
2742,
2763
]
],
[
[
711,
723
],
[
2892,
2904
],
[
7511,
7523
],
[
13490,
13502
],
[
17963,
17975
],
[
19527,
19539
],
[
21499,
21511
],
[
24720,
24732
],
[
26859,
26871
],
[
29783,
29795
],
[
37830,
37842
],
[
42940,
42952
],
[
48371,
48383
],
[
55178,
55190
],
[
67341,
67353
],
[
69751,
69763
],
[
76828,
76840
],
[
83701,
83713
],
[
89462,
89474
],
[
96226,
96238
],
[
100622,
100634
],
[
101081,
101093
]
],
[
[
729,
751
],
[
30428,
30450
],
[
30458,
30480
],
[
76902,
76924
]
],
[
[
757,
764
],
[
1987,
1994
],
[
2280,
2287
]
],
[
[
770,
786
],
[
20214,
20230
]
],
[
[
834,
850
],
[
1142,
1158
]
],
[
[
856,
875
],
[
1160,
1179
]
],
[
[
932,
956
]
],
[
[
962,
972
]
],
[
[
995,
1003
],
[
23739,
23747
],
[
23862,
23870
],
[
24593,
24601
],
[
25726,
25734
],
[
26433,
26441
],
[
27832,
27840
],
[
28289,
28297
]
],
[
[
1005,
1014
],
[
27865,
27874
]
],
[
[
1032,
1037
],
[
24645,
24650
],
[
28392,
28397
]
],
[
[
1051,
1069
],
[
1217,
1235
],
[
3176,
3194
],
[
3268,
3286
]
],
[
[
1088,
1110
],
[
1502,
1524
],
[
3196,
3218
],
[
3288,
3310
]
],
[
[
1124,
1141
],
[
7560,
7577
],
[
13557,
13574
],
[
18036,
18053
],
[
19576,
19593
],
[
20338,
20355
],
[
21643,
21660
],
[
21742,
21759
],
[
24769,
24786
],
[
26908,
26925
],
[
30403,
30420
],
[
37930,
37947
],
[
43347,
43364
],
[
43475,
43492
],
[
43603,
43620
],
[
48777,
48794
],
[
48905,
48922
],
[
49033,
49050
],
[
56592,
56609
],
[
67579,
67596
],
[
76877,
76894
],
[
90134,
90151
],
[
91663,
91680
],
[
96275,
96292
],
[
101148,
101165
],
[
106798,
106815
],
[
1455,
1472
]
],
[
[
2866,
2891
]
],
[
[
7477,
7510
]
],
[
[
13462,
13489
]
],
[
[
17926,
17962
]
],
[
[
19489,
19526
]
],
[
[
20168,
20213
]
],
[
[
21471,
21498
]
],
[
[
23696,
23722
],
[
24552,
24578
],
[
29390,
29416
]
],
[
[
23820,
23837
],
[
28694,
28711
],
[
29467,
29484
]
],
[
[
23944,
23976
],
[
25443,
25475
],
[
27387,
27419
]
],
[
[
24689,
24719
]
],
[
[
26821,
26858
]
],
[
[
29751,
29782
]
],
[
[
37802,
37829
]
],
[
[
42906,
42939
]
],
[
[
48336,
48370
]
],
[
[
55146,
55177
],
[
55244,
55275
]
],
[
[
67301,
67340
],
[
67407,
67446
]
],
[
[
69719,
69750
]
],
[
[
76790,
76827
]
],
[
[
83663,
83700
]
],
[
[
89421,
89461
]
],
[
[
96196,
96225
]
],
[
[
100588,
100621
]
],
[
[
101051,
101080
]
]
] |
default_app_config = "grandchallenge.evaluation.apps.EvaluationConfig"
| [
[
[
0,
18
]
]
] |
class StoryPytestError(Exception):
"""Base error of all stories-pytest errors."""
pass
| [
[
[
6,
22
]
]
] |
from datetime import datetime
import re
from urllib.parse import quote
from django.db import models
from django.utils.html import urlize
from django.utils.timezone import make_aware, utc
from django.utils.translation import ugettext_lazy as _
from requests_oauthlib import OAuth1
import requests
from mezzanine.conf import settings
from mezzanine.twitter import (
QUERY_TYPE_CHOICES,
QUERY_TYPE_USER,
QUERY_TYPE_LIST,
QUERY_TYPE_SEARCH,
)
from mezzanine.twitter import get_auth_settings
from mezzanine.twitter.managers import TweetManager
re_usernames = re.compile(r"(^|\W)@([0-9a-zA-Z+_]+)", re.IGNORECASE)
re_hashtags = re.compile(r"#([0-9a-zA-Z+_]+)", re.IGNORECASE)
replace_hashtags = '<a href="http://twitter.com/search?q=%23\\1">#\\1</a>'
replace_usernames = '\\1<a href="http://twitter.com/\\2">@\\2</a>'
class TwitterQueryException(Exception):
pass
class Query(models.Model):
type = models.CharField(_("Type"), choices=QUERY_TYPE_CHOICES, max_length=10)
value = models.CharField(_("Value"), max_length=140)
interested = models.BooleanField("Interested", default=True)
class Meta:
verbose_name = _("Twitter query")
verbose_name_plural = _("Twitter queries")
ordering = ("-id",)
def __str__(self):
return "%s: %s" % (self.get_type_display(), self.value)
def run(self):
"""
Request new tweets from the Twitter API.
"""
try:
value = quote(self.value)
except KeyError:
value = self.value
urls = {
QUERY_TYPE_USER: (
"https://api.twitter.com/1.1/statuses/"
"user_timeline.json?screen_name=%s"
"&include_rts=true" % value.lstrip("@")
),
QUERY_TYPE_LIST: (
"https://api.twitter.com/1.1/lists/statuses.json"
"?list_id=%s&include_rts=true" % value
),
QUERY_TYPE_SEARCH: "https://api.twitter.com/1.1/search/tweets.json"
"?q=%s" % value,
}
try:
url = urls[self.type]
except KeyError:
raise TwitterQueryException("Invalid query type: %s" % self.type)
auth_settings = get_auth_settings()
if not auth_settings:
from mezzanine.conf import registry
if self.value == registry["TWITTER_DEFAULT_QUERY"]["default"]:
# These are some read-only keys and secrets we use
# for the default query (eg nothing has been configured)
auth_settings = (
"KxZTRD3OBft4PP0iQW0aNQ",
"sXpQRSDUVJ2AVPZTfh6MrJjHfOGcdK4wRb1WTGQ",
"1368725588-ldWCsd54AJpG2xcB5nyTHyCeIC3RJcNVUAkB1OI",
"r9u7qS18t8ad4Hu9XVqmCGxlIpzoCN3e1vx6LOSVgyw3R",
)
else:
raise TwitterQueryException("Twitter OAuth settings missing")
try:
tweets = requests.get(url, auth=OAuth1(*auth_settings)).json()
except Exception as e:
raise TwitterQueryException("Error retrieving: %s" % e)
try:
raise TwitterQueryException(tweets["errors"][0]["message"])
except (IndexError, KeyError, TypeError):
pass
if self.type == "search":
tweets = tweets["statuses"]
for tweet_json in tweets:
remote_id = str(tweet_json["id"])
tweet, created = self.tweets.get_or_create(remote_id=remote_id)
if not created:
continue
if "retweeted_status" in tweet_json:
user = tweet_json["user"]
tweet.retweeter_user_name = user["screen_name"]
tweet.retweeter_full_name = user["name"]
tweet.retweeter_profile_image_url = user["profile_image_url"]
tweet_json = tweet_json["retweeted_status"]
if self.type == QUERY_TYPE_SEARCH:
tweet.user_name = tweet_json["user"]["screen_name"]
tweet.full_name = tweet_json["user"]["name"]
tweet.profile_image_url = tweet_json["user"]["profile_image_url"]
date_format = "%a %b %d %H:%M:%S +0000 %Y"
else:
user = tweet_json["user"]
tweet.user_name = user["screen_name"]
tweet.full_name = user["name"]
tweet.profile_image_url = user["profile_image_url"]
date_format = "%a %b %d %H:%M:%S +0000 %Y"
tweet.text = urlize(tweet_json["text"])
tweet.text = re_usernames.sub(replace_usernames, tweet.text)
tweet.text = re_hashtags.sub(replace_hashtags, tweet.text)
if getattr(settings, "TWITTER_STRIP_HIGH_MULTIBYTE", False):
chars = [ch for ch in tweet.text if ord(ch) < 0x800]
tweet.text = "".join(chars)
d = datetime.strptime(tweet_json["created_at"], date_format)
tweet.created_at = make_aware(d, utc)
try:
tweet.save()
except Warning:
pass
tweet.save()
self.interested = False
self.save()
class Tweet(models.Model):
remote_id = models.CharField(_("Twitter ID"), max_length=50)
created_at = models.DateTimeField(_("Date/time"), null=True)
text = models.TextField(_("Message"), null=True)
profile_image_url = models.URLField(_("Profile image URL"), null=True)
user_name = models.CharField(_("User name"), max_length=100, null=True)
full_name = models.CharField(_("Full name"), max_length=100, null=True)
retweeter_profile_image_url = models.URLField(
_("Profile image URL (Retweeted by)"), null=True
)
retweeter_user_name = models.CharField(
_("User name (Retweeted by)"), max_length=100, null=True
)
retweeter_full_name = models.CharField(
_("Full name (Retweeted by)"), max_length=100, null=True
)
query = models.ForeignKey("Query", on_delete=models.CASCADE, related_name="tweets")
objects = TweetManager()
class Meta:
verbose_name = _("Tweet")
verbose_name_plural = _("Tweets")
ordering = ("-created_at",)
def __str__(self):
return "%s: %s" % (self.user_name, self.text)
def is_retweet(self):
return self.retweeter_user_name is not None
| [
[
[
21,
29
],
[
4920,
4928
]
],
[
[
37,
39
],
[
575,
577
],
[
614,
616
],
[
643,
645
],
[
676,
678
]
],
[
[
66,
71
],
[
1470,
1475
]
],
[
[
95,
101
],
[
898,
904
],
[
925,
931
],
[
1008,
1014
],
[
1070,
1076
],
[
5213,
5219
],
[
5245,
5251
],
[
5311,
5317
],
[
5370,
5376
],
[
5436,
5442
],
[
5503,
5509
],
[
5579,
5585
],
[
5673,
5679
],
[
5779,
5785
],
[
5894,
5900
],
[
5995,
6001
],
[
6032,
6038
]
],
[
[
132,
138
],
[
4547,
4553
]
],
[
[
173,
183
],
[
5008,
5018
]
],
[
[
185,
188
],
[
5022,
5025
]
],
[
[
226,
244
],
[
942,
943
],
[
1025,
1026
],
[
1158,
1159
],
[
1207,
1208
],
[
5262,
5263
],
[
5332,
5333
],
[
5387,
5388
],
[
5452,
5453
],
[
5520,
5521
],
[
5596,
5597
],
[
5698,
5699
],
[
5805,
5806
],
[
5920,
5921
],
[
6141,
6142
],
[
6182,
6183
]
],
[
[
275,
281
],
[
3002,
3008
]
],
[
[
289,
297
],
[
2979,
2987
]
],
[
[
326,
334
],
[
4741,
4749
]
],
[
[
371,
389
],
[
961,
979
]
],
[
[
395,
410
],
[
1573,
1588
]
],
[
[
416,
431
],
[
1783,
1798
]
],
[
[
437,
454
],
[
1950,
1967
],
[
3945,
3962
]
],
[
[
488,
505
],
[
2231,
2248
]
],
[
[
545,
557
],
[
6086,
6098
]
],
[
[
560,
572
],
[
4599,
4611
]
],
[
[
629,
640
],
[
4672,
4683
]
],
[
[
691,
707
],
[
4688,
4704
]
],
[
[
766,
783
],
[
4616,
4633
]
],
[
[
841,
862
],
[
2147,
2168
],
[
2889,
2910
],
[
3082,
3103
],
[
3163,
3184
]
],
[
[
892,
897
]
],
[
[
5207,
5212
]
]
] |
"""carfinder URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from finder import views
from finder import forms
from django.conf.urls.static import static
from django.conf import settings
from django.conf.urls import include, url, handler404
# from django.views.generic.simple import direct_to_template
from django.views.generic.base import TemplateView
index = views.Index()
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.Index.as_view(), name='index'),
path('upload/', forms.upload, name='upload'),
path('upload/predictImage', index.predict_image, name="predictImage"),
path('list/',index.list_of_cars, name='listOfCars')
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# cmd to run the hadler python manage.py collectstatic
handler404 = 'finder.views.error_404' | [
[
[
659,
664
],
[
1045,
1050
]
],
[
[
689,
693
],
[
1030,
1034
],
[
1067,
1071
],
[
1118,
1122
],
[
1168,
1172
],
[
1243,
1247
]
],
[
[
713,
718
],
[
995,
1000
],
[
1076,
1081
]
],
[
[
738,
743
],
[
1134,
1139
]
],
[
[
780,
786
],
[
1299,
1305
]
],
[
[
811,
819
],
[
1306,
1314
],
[
1340,
1348
]
],
[
[
849,
856
]
],
[
[
858,
861
]
],
[
[
863,
873
]
],
[
[
973,
985
]
],
[
[
987,
992
],
[
1196,
1201
],
[
1256,
1261
]
],
[
[
1010,
1021
]
],
[
[
1416,
1426
]
]
] |
from rest_framework.views import APIView
from knox.auth import TokenAuthentication
from rest_framework.permissions import IsAdminUser, IsAuthenticated, AllowAny
from rest_framework.response import Response
from jaseci.utils.utils import logger
from jaseci.api.public_api import public_api
from jaseci.element.element import element
from jaseci_serv.base.orm_hook import orm_hook
from jaseci_serv.base.models import JaseciObject, GlobalVars
from time import time
class JResponse(Response):
def __init__(self, master, *args, **kwargs):
super().__init__(*args, **kwargs)
self.master = master
for i in self.master._h.save_obj_list:
self.master._h.commit_obj_to_redis(i)
self.master._h.skip_redis_update = True
def close(self):
super(JResponse, self).close()
# Commit db changes after response to user
self.master._h.commit()
class AbstractJacAPIView(APIView):
"""
The builder set of Jaseci APIs
"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def post(self, request):
"""
General post function that parses api signature to load parms
SuperSmart Post - can read signatures of master and process
bodies accordingly
"""
self.proc_request(request)
api_result = self.caller.general_interface_to_api(
self.cmd, type(self).__name__)
self.log_request_time()
return self.issue_response(api_result)
def log_request_time(self):
"""Api call preamble"""
TY = '\033[33m'
TG = '\033[32m'
EC = '\033[m' # noqa
tot_time = time()-self.start_time
save_count = 0
if(isinstance(self.caller, element)):
save_count = len(self.caller._h.save_obj_list)
logger.info(str(
f'API call to {TG}{type(self).__name__}{EC}'
f' completed in {TY}{tot_time:.3f} seconds{EC}'
f' saving {TY}{save_count}{EC} objects.'))
def proc_request(self, request):
"""Parse request to field set"""
pl_peek = str(dict(request.data))[:256]
logger.info(str(
f'Incoming call to {type(self).__name__} with {pl_peek}'))
self.start_time = time()
self.cmd = request.data
self.set_caller(request)
self.res = "Not valid interaction!"
def set_caller(self, request):
"""Assigns the calling api interface obj"""
self.caller = request.user.get_master()
def issue_response(self, api_result):
"""Issue response from call"""
# self.caller._h.commit()
# return Response(api_result)
# for i in self.caller._h.save_obj_list:
# self.caller._h.commit_obj_to_redis(i)
return JResponse(self.caller, api_result)
class AbstractAdminJacAPIView(AbstractJacAPIView):
"""
The abstract base for Jaseci Admin APIs
"""
permission_classes = (IsAuthenticated, IsAdminUser)
class AbstractPublicJacAPIView(AbstractJacAPIView):
"""
The abstract base for Jaseci Admin APIs
"""
permission_classes = (AllowAny,)
def set_caller(self, request):
"""Assigns the calling api interface obj"""
self.caller = public_api(orm_hook(
objects=JaseciObject.objects,
globs=GlobalVars.objects
))
def issue_response(self, api_result):
"""Issue response from call"""
# If committer set, results should be saved back
if(self.caller.committer):
return JResponse(self.caller.committer, api_result)
else:
return Response(api_result)
| [
[
[
33,
40
],
[
927,
934
]
],
[
[
63,
82
],
[
1018,
1037
]
],
[
[
122,
133
],
[
2992,
3003
]
],
[
[
135,
150
],
[
1066,
1081
],
[
2975,
2990
]
],
[
[
152,
160
],
[
3145,
3153
]
],
[
[
197,
205
],
[
480,
488
],
[
3648,
3656
]
],
[
[
237,
243
],
[
1841,
1847
],
[
2165,
2171
]
],
[
[
278,
288
],
[
3266,
3276
]
],
[
[
324,
331
],
[
1763,
1770
]
],
[
[
370,
378
],
[
3277,
3285
]
],
[
[
415,
427
],
[
3307,
3319
]
],
[
[
429,
439
],
[
3347,
3357
]
],
[
[
457,
461
],
[
1682,
1686
],
[
2279,
2283
]
],
[
[
470,
479
],
[
792,
801
],
[
2801,
2810
],
[
3570,
3579
]
],
[
[
908,
926
],
[
2868,
2886
],
[
3038,
3056
]
],
[
[
2844,
2867
]
],
[
[
3013,
3037
]
]
] |
# Generated by Django 3.2.7 on 2021-12-02 21:01
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('Author', '0001_initial'),
('Posts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='inbox',
name='iPosts',
field=models.ManyToManyField(blank=True, default=list, to='Posts.Post'),
),
migrations.AddField(
model_name='followers',
name='items',
field=models.ManyToManyField(blank=True, default=list, related_name='items', to=settings.AUTH_USER_MODEL),
),
]
| [
[
[
73,
81
],
[
670,
678
]
],
[
[
104,
114
],
[
141,
151
],
[
311,
321
],
[
495,
505
]
],
[
[
116,
122
],
[
409,
415
],
[
596,
602
]
],
[
[
131,
140
]
]
] |
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from datetime import datetime
from DateTime.DateTime import DateTime
import six
MAX32 = int(2 ** 31 - 1)
def safe_callable(ob):
# Works with ExtensionClasses and Acquisition.
try:
ob.__class__
try:
return bool(ob.__call__)
except AttributeError:
return isinstance(ob, six.class_types)
except AttributeError:
return callable(ob)
def datetime_to_minutes(value, precision=1,
max_value=MAX32, min_value=-MAX32):
if value is None:
return value
if isinstance(value, (str, datetime)):
value = DateTime(value)
if isinstance(value, DateTime):
value = value.millis() / 1000 / 60 # flatten to minutes
# flatten to precision
if precision > 1:
value = value - (value % precision)
value = int(value)
if value > max_value or value < min_value:
# value must be integer fitting in the range (default 32bit)
raise OverflowError(
'{0} is not within the range of dates allowed.'.format(value))
return value
| [
[
[
635,
643
],
[
1197,
1205
]
],
[
[
675,
683
],
[
1225,
1233
],
[
1267,
1275
]
],
[
[
691,
694
],
[
943,
946
]
],
[
[
696,
701
],
[
1096,
1101
],
[
1114,
1119
]
],
[
[
727,
740
]
],
[
[
1022,
1041
]
]
] |
# Given a list of pts text files, build a complete dataset from it.
import glob
import os
import PIL.Image
import cv2
import numpy as np
from time import time
from argparse import ArgumentParser
from scipy.spatial import cKDTree
import tensorflow as tf
import SaddlePoints
import errno
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if os.path.isdir(path):
pass
else:
raise
# Given chessboard corners, get all 7x7 = 49 internal x-corner positions.
def getXcorners(corners):
# Get Xcorners for image
ideal_corners = np.array([[0,1],[1,1],[1,0],[0,0]],dtype=np.float32)
M = cv2.getPerspectiveTransform(ideal_corners, corners) # From ideal to real.
# 7x7 internal grid of 49 x-corners/
xx,yy = np.meshgrid(np.arange(7, dtype=np.float32), np.arange(7, dtype=np.float32))
all_ideal_grid_pts = np.vstack([xx.flatten(), yy.flatten()]).T
all_ideal_grid_pts = (all_ideal_grid_pts + 1) / 8.0
chess_xcorners = cv2.perspectiveTransform(np.expand_dims(all_ideal_grid_pts,0), M)[0,:,:]
return chess_xcorners
def getPointsNearPoints(ptsA, ptsB, MIN_DIST_PX=3):
# Returns a mask for points in A that are close by MIN_DIST_PX to points in B
min_dists, min_dist_idx = cKDTree(ptsB).query(ptsA, 1)
mask = min_dists < MIN_DIST_PX
return mask
# Load image from path
def loadImage(img_filepath):
print ("Processing %s" % (img_filepath))
img = PIL.Image.open(img_filepath)
if (img.size[0] > 640):
img = img.resize((640, 480), PIL.Image.BICUBIC)
gray = np.array(img.convert('L'))
rgb = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
return rgb, gray
def getTiles(pts, img_gray, WINSIZE=10):
# NOTE : Assumes no point is within WINSIZE of an edge!
# Points Nx2, columns should be x and y, not r and c.
# WINSIZE = the number of pixels out from the point that a tile should be.
# Build tiles of size Nx(2*WINSIZE+1)x(2*WINSIZE+1)
img_shape = np.array([img_gray.shape[1], img_gray.shape[0]])
tiles = np.zeros([len(pts), WINSIZE*2+1, WINSIZE*2+1], dtype=img_gray.dtype)
for i, pt in enumerate(np.round(pts).astype(np.int64)):
tiles[i,:,:] = img_gray[pt[1]-WINSIZE:pt[1]+WINSIZE+1,
pt[0]-WINSIZE:pt[0]+WINSIZE+1]
return tiles
def getTilesColor(pts, img, WINSIZE=10):
# NOTE : Assumes no point is within WINSIZE of an edge!
# Points Nx2, columns should be x and y, not r and c.
# WINSIZE = the number of pixels out from the point that a tile should be.
# Build tiles of size Nx(2*WINSIZE+1)x(2*WINSIZE+1)
img_shape = np.array([img.shape[1], img.shape[0]])
tiles = np.zeros([len(pts), WINSIZE*2+1, WINSIZE*2+1, 3], dtype=img.dtype)
for i, pt in enumerate(np.round(pts).astype(np.int64)):
tiles[i,:,:,:] = img[pt[1]-WINSIZE:pt[1]+WINSIZE+1,
pt[0]-WINSIZE:pt[0]+WINSIZE+1, :]
return tiles
# View image with chessboard lines overlaid.
def addOverlay(idx, img, corners, good_xcorners, bad_pts):
for pt in np.round(bad_pts).astype(np.int64):
cv2.rectangle(img, tuple(pt-2),tuple(pt+2), (0,0,255), -1)
for pt in np.round(good_xcorners).astype(np.int64):
cv2.rectangle(img, tuple(pt-2),tuple(pt+2), (0,255,0), -1)
cv2.polylines(img,
[np.round(corners).astype(np.int32)],
isClosed=True, thickness=2, color=(255,0,255))
cv2.putText(img,
'Frame % 4d' % (idx),
(5,15), cv2.FONT_HERSHEY_PLAIN, 1.0,(255,255,255),0)
def visualizeTiles(tiles):
# Assumes no more than 49 tiles, only plots the first 49
N = len(tiles)
# assert N <= 49
assert tiles.shape[1] == tiles.shape[2] # square tiles
side = tiles.shape[1]
cols = 7#int(np.ceil(np.sqrt(N)))
rows = 7#int(np.ceil(N/(cols)))+1
tile_img = np.zeros([rows*side, cols*side, 3], dtype=tiles.dtype)
for i in range(min(N,49)):
r, c = side*(int(i/cols)), side*(i%cols)
tile_img[r:r+side, c:c+side,:] = tiles[i,:,:,:]
return tile_img
# Converting the values into features
# _int64 is used for numeric values
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
# _bytes is used for string/char values
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def main(args):
for pointfile in args.pointfiles:
with open(pointfile, 'r') as f:
lines = f.readlines()
video_filepath = lines[0]
images_path = os.path.dirname(pointfile)
# Writing to TFrecord
video_filename = os.path.basename(video_filepath)[:-5]
folder_path = "%s/winsize_%s_color" % (args.tfrecords_path, args.winsize)
mkdir_p(folder_path)
tfrecord_path = "%s/%s_ws%d.tfrecords" % (folder_path, video_filename, args.winsize)
with tf.python_io.TFRecordWriter(tfrecord_path) as writer:
for line in lines[1:]:
tA = time()
parts = line.split(',')
idx = int(parts[0])
# if (idx < 260):
# continue
corners = np.array(parts[1:], dtype=np.float32).reshape([4,2])
xcorners = getXcorners(corners)
filename = "%s/frame_%03d.jpg" % (images_path, idx)
img, gray = loadImage(filename)
# Saddle points
spts, gx, gy = SaddlePoints.getFinalSaddlePoints(gray, WINSIZE=args.winsize)
good_spt_mask = getPointsNearPoints(spts, xcorners)
good_xcorners = spts[good_spt_mask]
bad_spts = spts[~good_spt_mask]
# Only keep the same # of bad points as good
# Shuffle bad points so we get a good smattering.
N = len(good_xcorners)
np.random.shuffle(bad_spts)
bad_spts = bad_spts[:N]
# good_xcorners, bad_xcorners, bad_spts, spts, keep_mask = getXcornersNearSaddlePts(gray, xcorners)
tiles = getTilesColor(good_xcorners, img, WINSIZE=args.winsize)
bad_tiles = getTilesColor(bad_spts, img, WINSIZE=args.winsize)
# Write tiles to tf-records
for tile in tiles:
feature = { 'label': _int64_feature(1),
'image': _bytes_feature(tf.compat.as_bytes(tile.tostring())) }
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
for tile in bad_tiles:
feature = { 'label': _int64_feature(0),
'image': _bytes_feature(tf.compat.as_bytes(tile.tostring())) }
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
if args.viztiles:
tile_img = visualizeTiles(tiles)
bad_tile_img = visualizeTiles(bad_tiles)
print('\t Took %.1f ms.' % ((time() - tA)*1000))
if args.vizoverlay:
overlay_img = img.copy()
addOverlay(idx, overlay_img, corners, good_xcorners, bad_spts)
cv2.imshow('frame',overlay_img)
if args.viztiles:
cv2.imshow('tiles', tile_img)
cv2.imshow('bad_tiles', bad_tile_img)
if (args.vizoverlay or args.viztiles):
if (cv2.waitKey(1) & 0xFF == ord('q')):
break
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("pointfiles", nargs='+',
help="All pts.txt points files containing filename and chessboard coordinates.")
parser.add_argument("-savetf",
action='store_true', help="Whether to save tfrecords")
parser.add_argument("-viztiles",
action='store_true', help="Whether to visualize tiles or not")
parser.add_argument("-vizoverlay",
action='store_true', help="Whether to visualize overlay")
parser.add_argument("--tfrecords_path", default='datasets/tfrecords',
help="Folder to store tfrecord output")
parser.add_argument("-ws", "--winsize", dest="winsize", default=10, type=int,
help="Half window size (full kernel = 2*winsize + 1)")
args = parser.parse_args()
print(args)
main(args) | [
[
[
75,
79
]
],
[
[
87,
89
],
[
317,
319
],
[
382,
384
],
[
4398,
4400
],
[
4473,
4475
]
],
[
[
97,
106
],
[
1423,
1426
],
[
1511,
1514
]
],
[
[
114,
117
],
[
642,
645
],
[
981,
984
],
[
1574,
1577
],
[
1602,
1605
],
[
3027,
3030
],
[
3145,
3148
],
[
3208,
3211
],
[
3329,
3332
],
[
3385,
3388
],
[
6818,
6821
],
[
6895,
6898
],
[
6935,
6938
],
[
7035,
7038
]
],
[
[
125,
136
],
[
583,
585
],
[
624,
626
],
[
766,
768
],
[
778,
780
],
[
797,
799
],
[
810,
812
],
[
829,
831
],
[
865,
867
],
[
1006,
1008
],
[
1539,
1541
],
[
1587,
1589
],
[
1942,
1944
],
[
2001,
2003
],
[
2095,
2097
],
[
2116,
2118
],
[
2563,
2565
],
[
2612,
2614
],
[
2704,
2706
],
[
2725,
2727
],
[
2987,
2989
],
[
3012,
3014
],
[
3099,
3101
],
[
3130,
3132
],
[
3235,
3237
],
[
3260,
3262
],
[
3719,
3721
],
[
4943,
4945
],
[
4969,
4971
],
[
5543,
5545
]
],
[
[
154,
158
],
[
4809,
4813
],
[
6650,
6654
]
],
[
[
180,
194
],
[
7128,
7142
]
],
[
[
221,
228
],
[
1240,
1247
]
],
[
[
236,
252
],
[
4029,
4031
],
[
4057,
4059
],
[
4169,
4171
],
[
4197,
4199
],
[
4713,
4715
],
[
6016,
6018
],
[
6075,
6077
],
[
6101,
6103
],
[
6317,
6319
],
[
6376,
6378
],
[
6402,
6404
]
],
[
[
260,
272
],
[
5185,
5197
]
],
[
[
280,
285
]
],
[
[
291,
298
],
[
4593,
4600
]
],
[
[
516,
527
],
[
5015,
5026
]
],
[
[
1084,
1103
],
[
5272,
5291
]
],
[
[
1344,
1353
],
[
5117,
5126
]
],
[
[
1645,
1653
]
],
[
[
2266,
2279
],
[
5729,
5742
],
[
5805,
5818
]
],
[
[
2920,
2930
],
[
6744,
6754
]
],
[
[
3435,
3449
],
[
6539,
6553
],
[
6586,
6600
]
],
[
[
3997,
4011
],
[
5951,
5965
],
[
6252,
6266
]
],
[
[
4137,
4151
],
[
6001,
6015
],
[
6302,
6316
]
],
[
[
4238,
4242
],
[
7978,
7982
]
],
[
[
7119,
7125
],
[
7147,
7153
],
[
7297,
7303
],
[
7407,
7413
],
[
7527,
7533
],
[
7644,
7650
],
[
7778,
7784
],
[
7942,
7948
]
],
[
[
7935,
7939
],
[
7970,
7974
],
[
7983,
7987
]
]
] |
# Question 1
# This function converts miles to kilometers (km).
# Complete the function to return the result of the conversion
# Call the function to convert the trip distance from miles to kilometers
# Fill in the blank to print the result of the conversion
# Calculate the round-trip in kilometers by doubling the result, and fill in the blank to print the result
# 1) Complete the function to return the result of the conversion
def convert_distance(miles):
km = miles * 1.6 # approximately 1.6 km in 1 mile
return km
my_trip_miles = 55
# 2) Convert my_trip_miles to kilometers by calling the function above
my_trip_km = convert_distance(my_trip_miles)
# 3) Fill in the blank to print the result of the conversion
print("The distance in kilometers is " + str(my_trip_km))
# 4) Calculate the round-trip in kilometers by doubling the result,
# and fill in the blank to print the result
print("The round-trip in kilometers is " + str(my_trip_km * 2))
| [
[
[
458,
474
],
[
651,
667
]
],
[
[
546,
559
],
[
668,
681
]
],
[
[
638,
648
],
[
791,
801
],
[
967,
977
]
]
] |
#!/usr/bin/env python
import tyrell.spec as Stuff
from tyrell.interpreter import PostOrderInterpreter
from tyrell.enumerator import SmtEnumerator, RelaxedRandomEnumerator
from tyrell.decider import Example, ExampleConstraintDecider, SimpleSpiceDecider, ExampleConstraintPruningDecider
from tyrell.synthesizer import Synthesizer
from tyrell.logger import get_logger
from skidl import *
from skidl.pyspice import *
logger = get_logger('tyrell')
class Circuitsv():
def __init__(self, op):
self.circuit = Circuit()
self.vin = Net('VI')
self.vout = Net('VO')
self.ground = Net('GND')
self.sanity = Net('Sanity')
self.pow = []
self.op = op
self.circuit += self.vin, self.vout, self.ground, self.sanity
def reinit(self):
self.circuit.reset()
self.circuit = Circuit()
self.vin = Net('VI')
self.vout = Net('VO')
self.ground = Net('GND')
self.sanity = Net('Sanity')
self.pow = []
self.circuit += self.vin, self.vout, self.ground, self.sanity
class ToyInterpreter(PostOrderInterpreter):
def __init__(self, circuit):
self.circuit = circuit
def eval_get_ground(self, node, args):
return self.circuit.ground
def eval_get_Resistance(self, node, args):
return int(args[0])
def eval_get_outnet(self, node, args):
return self.circuit.vout
def eval_get_Supply(self, node, args):
if len(self.circuit.pow) == 0:
with self.circuit.circuit:
vdc = V(dc_value=5 @ u_V)
self.circuit.pow.append(vdc)
self.circuit.pow[0]['n']+=self.circuit.ground
return self.circuit.pow[0]
def eval_startCon(self, node, args):
if self.circuit.op == "mult":
return None
logger.info("startCon")
self.circuit.vin += args[0][1]
return self.circuit
def eval_startConPart(self, node, args):
logger.info("startConPart")
return self.circuit
def eval_toTransist(self, node, args):
logger.info("toTransist")
return args[0]
def eval_toResist(self, node, args):
return args[0]
def eval_Rout(self, node, args):
logger.info("Rout")
with self.circuit.circuit:
r = R(value = args[1])
r[2]+=args[0]
return r
def eval_Tout(self, node, args):
logger.info("Tout")
with self.circuit.circuit:
q = BJT(model='2n2222a')
q['c']+=args[0]
q['b']+=args[1]
q['e']+=args[2]
return q
def eval_NO(self, node, args):
with self.circuit.circuit:
self.circuit.vout+=args[0][1]
return self.circuit.vout
def eval_NI(self, node, args):
with self.circuit.circuit:
self.circuit.vin+=args[0][1]
return self.circuit.vin
def eval_Nout(self, node, args):
with self.circuit.circuit:
n = Net()
n += args[0][1]
return n
def eval_GR(self, node, args):
logger.info("GR")
self.circuit.ground += args[0][1]
return self.circuit.ground
def eval_Rpow(self, node, args):
with self.circuit.circuit:
r = R(value = args[1])
r[2]+=args[0]['p']
return r
#Abstract Interpreter
def apply_vin(self, val):
return val
def apply_vout(self, val):
return val
def apply_ground(self, val):
return val
def main():
logger.info('Parsing Spec...')
# TBD: parse the DSL definition file and store it to `spec`
spec = Stuff.parse_file('example/divmult3.tyrell')
logger.info('Parsing succeeded')
circ = Circuitsv("mult")
logger.info('Building synthesizer...')
synthesizer = Synthesizer(
enumerator=RelaxedRandomEnumerator(spec, max_depth=6, min_depth=4, seed=None),
decider=SimpleSpiceDecider(
spec=spec, # TBD: provide the spec here
interpreter=ToyInterpreter(circ),
examples=[Example(input=[circ],output=["mult", 16.5])] # TBD: provide the example here
)
)
logger.info('Synthesizing programs...')
prog = synthesizer.synthesize()
if prog is not None:
logger.info('Solution found: {}'.format(prog))
else:
logger.info('Solution not found!')
if __name__ == '__main__':
logger.setLevel('DEBUG')
main()
| [
[
[
30,
50
],
[
3643,
3648
]
],
[
[
82,
102
],
[
1091,
1111
]
],
[
[
133,
146
]
],
[
[
148,
171
],
[
3847,
3870
]
],
[
[
199,
206
],
[
4071,
4078
]
],
[
[
208,
232
]
],
[
[
234,
252
],
[
3931,
3949
]
],
[
[
254,
285
]
],
[
[
317,
328
],
[
3815,
3826
]
],
[
[
355,
365
],
[
423,
433
]
],
[
[
384,
385
]
],
[
[
412,
413
],
[
514,
521
],
[
543,
546
],
[
573,
576
],
[
605,
608
],
[
638,
641
],
[
839,
846
],
[
868,
871
],
[
898,
901
],
[
930,
933
],
[
963,
966
],
[
1554,
1555
],
[
1569,
1572
],
[
2311,
2312
],
[
2489,
2492
],
[
2990,
2993
],
[
3269,
3270
]
],
[
[
414,
420
],
[
4411,
4417
],
[
1828,
1834
],
[
1972,
1978
],
[
2080,
2086
],
[
2240,
2246
],
[
2418,
2424
],
[
3085,
3091
],
[
3537,
3543
],
[
3691,
3697
],
[
3758,
3764
],
[
4168,
4174
],
[
4278,
4284
],
[
4343,
4349
]
],
[
[
450,
459
],
[
3735,
3744
]
],
[
[
1076,
1090
],
[
4027,
4041
]
],
[
[
3525,
3529
],
[
4440,
4444
]
]
] |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
def cf_managementgroups(cli_ctx, **_):
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azext_managementgroups.managementgroups import ManagementGroupsAPI
return get_mgmt_service_client(
cli_ctx,
ManagementGroupsAPI,
subscription_bound=False)
def management_groups_client_factory(cli_ctx, _):
return cf_managementgroups(cli_ctx).management_groups
def management_group_subscriptions_client_factory(cli_ctx, _):
return cf_managementgroups(cli_ctx).management_group_subscriptions
| [
[
[
351,
370
],
[
720,
739
],
[
843,
862
]
],
[
[
663,
695
]
],
[
[
773,
818
]
]
] |
import scipy.stats as stats
import numpy as np
import warnings
from ecdfgof import adtest, kstest
warnings.filterwarnings("ignore")
_long = [
("alpha", stats.alpha),
("anglit", stats.anglit),
("arcsine", stats.arcsine),
("argus", stats.argus),
("beta", stats.beta),
("betaprime", stats.betaprime),
("bradford", stats.bradford),
("burr", stats.burr),
("burr12", stats.burr12),
("cauchy", stats.cauchy),
("chi", stats.chi),
("chi2", stats.chi2),
("cosine", stats.cosine),
("crystalball", stats.crystalball),
("dgamma", stats.dgamma),
("dweibull", stats.dweibull),
# ("erlang", stats.erlang),
("expon", stats.expon),
("exponnorm", stats.exponnorm),
("exponweib", stats.exponweib),
("exponpow", stats.exponpow),
("f", stats.f),
("fatiguelife", stats.fatiguelife),
("fisk", stats.fisk),
("foldcauchy", stats.foldcauchy),
("foldnorm", stats.foldnorm),
# ("frechet_r", stats.frechet_r),
# ("frechet_l", stats.frechet_l),
("genlogistic", stats.genlogistic),
("gennorm", stats.gennorm),
("genpareto", stats.genpareto),
("genexpon", stats.genexpon),
("genextreme", stats.genextreme),
("gausshyper", stats.gausshyper),
("gamma", stats.gamma),
("gengamma", stats.gengamma),
("genhalflogistic", stats.genhalflogistic),
("gilbrat", stats.gilbrat),
("gompertz", stats.gompertz),
("gumbel_r", stats.gumbel_r),
("gumbel_l", stats.gumbel_l),
("halfcauchy", stats.halfcauchy),
("halflogistic", stats.halflogistic),
("halfnorm", stats.halfnorm),
("halfgennorm", stats.halfgennorm),
("hypsecant", stats.hypsecant),
("invgamma", stats.invgamma),
("invgauss", stats.invgauss),
("invweibull", stats.invweibull),
("johnsonsb", stats.johnsonsb),
("johnsonsu", stats.johnsonsu),
("kappa4", stats.kappa4),
("kappa3", stats.kappa3),
("ksone", stats.ksone),
("kstwobign", stats.kstwobign),
("laplace", stats.laplace),
("levy", stats.levy),
("levy_l", stats.levy_l),
("levy_stable", stats.levy_stable),
("logistic", stats.logistic),
("loggamma", stats.loggamma),
("loglaplace", stats.loglaplace),
("lognorm", stats.lognorm),
("lomax", stats.lomax),
("maxwell", stats.maxwell),
("mielke", stats.mielke),
("moyal", stats.moyal),
("nakagami", stats.nakagami),
("ncx2", stats.ncx2),
("ncf", stats.ncf),
("nct", stats.nct),
("norm", stats.norm),
("norminvgauss", stats.norminvgauss),
("pareto", stats.pareto),
("pearson3", stats.pearson3),
("powerlaw", stats.powerlaw),
("powerlognorm", stats.powerlognorm),
("powernorm", stats.powernorm),
# ("rdist", stats.rdist),
# ("reciprocal", stats.reciprocal),
("rayleigh", stats.rayleigh),
("rice", stats.rice),
("recipinvgauss", stats.recipinvgauss),
("semicircular", stats.semicircular),
("skewnorm", stats.skewnorm),
("t", stats.t),
("trapz", stats.trapz),
("triang", stats.triang),
("truncexpon", stats.truncexpon),
# ("truncnorm", stats.truncnorm),
("tukeylambda", stats.tukeylambda),
("uniform", stats.uniform),
# ("vonmises", stats.vonmises),
("vonmises_line", stats.vonmises_line),
("wald", stats.wald),
("weibull_min", stats.weibull_min),
("weibull_max", stats.weibull_max),
# ("wrapcauchy", stats.wrapcauchy),
]
_short = [
("alpha", stats.alpha),
("beta", stats.beta),
("cauchy", stats.cauchy),
("chi2", stats.chi2),
# ("cosine", stats.cosine),
("expon", stats.expon),
("exponnorm", stats.exponnorm),
("f", stats.f),
("gamma", stats.gamma),
("laplace", stats.laplace),
("levy", stats.levy),
("levy_stable", stats.levy_stable),
("logistic", stats.logistic),
("loggamma", stats.loggamma),
("loglaplace", stats.loglaplace),
("lognorm", stats.lognorm),
("norm", stats.norm),
("pareto", stats.pareto),
("powerlaw", stats.powerlaw),
("t", stats.t),
("triang", stats.triang),
("uniform", stats.uniform),
("weibull_min", stats.weibull_min),
("weibull_max", stats.weibull_max),
]
def fit(data, scipydist, name=None):
# fit distribution using maximum likelihood
params = scipydist.fit(data)
# create a "frozen" distribution object
dist = scipydist(*params)
# calculate log likelihood function and info criteria
loglike = dist.logpdf(data).sum()
bic = np.log(len(data)) * len(params) - 2.0 * loglike # Schwarz
aic = 2.0 * len(params) - 2.0 * loglike # Akaike
# p-values for GOF tests
ad_pval = adtest(data, dist)[1] # Anderson-Darling
ks_pval = kstest(data, dist)[1] # Kolmogorov-Smirnov
return {"bic": bic, "aic": aic, "ad_pval": ad_pval,
"ks_pval": ks_pval, "dist": dist, "name": name}
def _fit_all(data, dist_list):
results = list(map(lambda x: fit(data, x[1], x[0]), dist_list))
return sorted(results, key=lambda r: r["bic"]) # lowest BIC to highest
def _fstr(value):
return ("%.3f" % value).rjust(8)
def _result_line(r, header=False):
if header is True:
return " distribution, BIC, AIC, KS p-val, AD p-val\n"
else:
return ("%s, %s, %s, %s, %s\n" %
(r["name"].rjust(15), _fstr(r["bic"]), _fstr(r["aic"]),
_fstr(r["ks_pval"]), _fstr(r["ad_pval"])))
def compare(data, long=False):
dist_list = _long if long is True else _short
results = _fit_all(data, dist_list)
lines = [_result_line(None, header=True)] + list(map(_result_line, results))
return "".join(lines)
| [
[
[
9,
29
],
[
179,
184
],
[
225,
230
],
[
272,
277
],
[
320,
325
],
[
366,
371
],
[
411,
416
],
[
461,
466
],
[
510,
515
],
[
555,
560
],
[
602,
607
],
[
649,
654
],
[
693,
698
],
[
738,
743
],
[
785,
790
],
[
837,
842
],
[
884,
889
],
[
982,
987
],
[
1028,
1033
],
[
1078,
1083
],
[
1128,
1133
],
[
1177,
1182
],
[
1219,
1224
],
[
1271,
1276
],
[
1316,
1321
],
[
1367,
1372
],
[
1520,
1525
],
[
1572,
1577
],
[
1620,
1625
],
[
1670,
1675
],
[
1719,
1724
],
[
1770,
1775
],
[
1821,
1826
],
[
1867,
1872
],
[
1916,
1921
],
[
1972,
1977
],
[
2020,
2025
],
[
2069,
2074
],
[
2118,
2123
],
[
2167,
2172
],
[
2218,
2223
],
[
2271,
2276
],
[
2320,
2325
],
[
2372,
2377
],
[
2422,
2427
],
[
2471,
2476
],
[
2520,
2525
],
[
2571,
2576
],
[
2621,
2626
],
[
2671,
2676
],
[
2718,
2723
],
[
2765,
2770
],
[
2811,
2816
],
[
2861,
2866
],
[
2909,
2914
],
[
2954,
2959
],
[
3001,
3006
],
[
3053,
3058
],
[
3102,
3107
],
[
3151,
3156
],
[
3202,
3207
],
[
3250,
3255
],
[
3296,
3301
],
[
3344,
3349
],
[
3391,
3396
],
[
3437,
3442
],
[
3486,
3491
],
[
3531,
3536
],
[
3575,
3580
],
[
3619,
3624
],
[
3664,
3669
],
[
3717,
3722
],
[
3764,
3769
],
[
3813,
3818
],
[
3862,
3867
],
[
3915,
3920
],
[
4066,
4071
],
[
4115,
4120
],
[
4160,
4165
],
[
4214,
4219
],
[
4267,
4272
],
[
4316,
4321
],
[
4358,
4363
],
[
4404,
4409
],
[
4451,
4456
],
[
4554,
4559
],
[
4606,
4611
],
[
4705,
4710
],
[
4759,
4764
],
[
4804,
4809
],
[
4856,
4861
],
[
4984,
4989
],
[
5031,
5036
],
[
5077,
5082
],
[
5125,
5130
],
[
5221,
5226
],
[
5268,
5273
],
[
5319,
5324
],
[
5362,
5367
],
[
5409,
5414
],
[
5458,
5463
],
[
5504,
5509
],
[
5557,
5562
],
[
5607,
5612
],
[
5657,
5662
],
[
5709,
5714
],
[
5758,
5763
],
[
5804,
5809
],
[
5852,
5857
],
[
5902,
5907
],
[
5945,
5950
],
[
5993,
5998
],
[
6042,
6047
],
[
6095,
6100
]
],
[
[
37,
48
],
[
6429,
6431
]
],
[
[
56,
64
],
[
101,
109
]
],
[
[
85,
91
],
[
6600,
6606
]
],
[
[
93,
99
],
[
6656,
6662
]
],
[
[
137,
142
],
[
7426,
7431
]
],
[
[
4940,
4946
],
[
7453,
7459
]
],
[
[
6132,
6135
],
[
6883,
6886
]
],
[
[
6823,
6831
],
[
7474,
7482
]
],
[
[
7000,
7005
],
[
7283,
7288
],
[
7300,
7305
],
[
7334,
7339
],
[
7355,
7360
]
],
[
[
7057,
7069
],
[
7513,
7525
],
[
7557,
7569
]
],
[
[
7383,
7390
]
]
] |
import pytest
from brownie import interface, RewardsManager, Contract
from utils.voting import create_vote
from utils.config import (lido_dao_voting_address,
lido_dao_agent_address,
balancer_deployed_manager,
lido_dao_token_manager_address,
ldo_token_address)
from utils.evm_script import encode_call_script
def test_erc_20_recover_via_voting(ldo_holder, rewards_manager, helpers, accounts, dao_voting, ldo_token, stranger):
# manager_contract = Contract.from_abi('RewardsManager', balancer_deployed_manager, RewardsManager.abi)
agent_contract = interface.Agent(lido_dao_agent_address)
ldo_token.transfer(rewards_manager, 10**18, {"from": ldo_holder})
assert ldo_token.balanceOf(rewards_manager) == 10**18
encoded_recover_calldata = rewards_manager.recover_erc20.encode_input(ldo_token_address, 10**18, stranger)
recover_script = encode_call_script([(rewards_manager.address, encoded_recover_calldata)])
forwrded_script = encode_call_script([(lido_dao_agent_address, agent_contract.forward.encode_input(recover_script))])
(vote_id, _) = create_vote(
voting=interface.Voting(lido_dao_voting_address),
token_manager=interface.TokenManager(lido_dao_token_manager_address),
vote_desc='',
evm_script=forwrded_script,
tx_params={"from": ldo_holder})
helpers.execute_vote(vote_id=vote_id,
accounts=accounts,
dao_voting=dao_voting)
assert ldo_token.balanceOf(rewards_manager) == 0
assert ldo_token.balanceOf(stranger) == 10**18
| [
[
[
7,
13
]
],
[
[
35,
44
],
[
666,
675
],
[
1216,
1225
],
[
1281,
1290
]
],
[
[
46,
60
]
],
[
[
62,
70
]
],
[
[
97,
108
],
[
1188,
1199
]
],
[
[
135,
158
],
[
1233,
1256
]
],
[
[
186,
208
],
[
682,
704
],
[
1085,
1107
]
],
[
[
236,
261
]
],
[
[
289,
319
],
[
1304,
1334
]
],
[
[
347,
364
],
[
910,
927
]
],
[
[
395,
413
],
[
968,
986
],
[
1064,
1082
]
],
[
[
419,
449
]
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.