text
stringlengths 4
1.02M
| meta
dict |
---|---|
from nose.tools import raises, eq_
from mhctools import NetMHC, NetMHC3, NetMHC4
from mhcnames import normalize_allele_name
def run_class_with_executable(mhc_class, mhc_executable):
alleles = [normalize_allele_name("HLA-A*02:01")]
predictor = mhc_class(
alleles=alleles,
program_name=mhc_executable)
sequence_dict = {
"SMAD4-001": "ASIINFKELA",
"TP53-001": "ASILLLVFYW"
}
return predictor.predict_subsequences(
sequence_dict=sequence_dict,
peptide_lengths=[9])
@raises(SystemError)
def test_executable_mismatch_3_4():
run_class_with_executable(NetMHC3, "netMHC")
@raises(SystemError)
def test_executable_mismatch_4_3():
run_class_with_executable(NetMHC4, "netMHC-3.4")
def test_wrapper_function():
alleles = [normalize_allele_name("HLA-A*02:01")]
wrapped_4 = NetMHC(
alleles=alleles,
default_peptide_lengths=[9],
program_name="netMHC")
eq_(type(wrapped_4), NetMHC4)
wrapped_3 = NetMHC(
alleles=alleles,
default_peptide_lengths=[9],
program_name="netMHC-3.4")
eq_(type(wrapped_3), NetMHC3)
@raises(SystemError, OSError)
def test_wrapper_failure():
alleles = [normalize_allele_name("HLA-A*02:01")]
NetMHC(alleles=alleles,
default_peptide_lengths=[9],
program_name="netMHC-none")
def test_multiple_lengths_netmhc3():
alleles = [normalize_allele_name("H-2-Kb")]
predictor = NetMHC3(alleles=alleles,
default_peptide_lengths=[9],
program_name="netMHC-3.4")
protein_sequence_dict = {
'seq': 'AETDEIKILLEE',
}
binding_predictions = predictor.predict_subsequences(
protein_sequence_dict,
peptide_lengths=[10, 11])
eq_(5, len(binding_predictions))
| {
"content_hash": "2cfb18b2cb70407ff02225104f230f92",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 57,
"avg_line_length": 30.338983050847457,
"alnum_prop": 0.6502793296089385,
"repo_name": "hammerlab/mhctools",
"id": "ac806fd742be433566bffbe8000c04643c9832d7",
"size": "1790",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_netmhc_version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "141478"
},
{
"name": "Shell",
"bytes": "155"
}
],
"symlink_target": ""
} |
"""This module contains functions for compiling circuits to prepare
Slater determinants and fermionic Gaussian states."""
import numpy
from openfermion.ops import QuadraticHamiltonian
from openfermion.ops._givens_rotations import (
fermionic_gaussian_decomposition, givens_decomposition)
def gaussian_state_preparation_circuit(
quadratic_hamiltonian,
occupied_orbitals=None,
spin_sector=None):
r"""Obtain the description of a circuit which prepares a fermionic Gaussian
state.
Fermionic Gaussian states can be regarded as eigenstates of quadratic
Hamiltonians. If the Hamiltonian conserves particle number, then these are
just Slater determinants. See arXiv:1711.05395 for a detailed description
of how this procedure works.
The circuit description is returned as a sequence of elementary
operations; operations that can be performed in parallel are grouped
together. Each elementary operation is either
- the string 'pht', indicating the particle-hole transformation
on the last fermionic mode, which is the operator :math:`\mathcal{B}`
such that
.. math::
\begin{align}
\mathcal{B} a_N \mathcal{B}^\dagger &= a_N^\dagger,\\
\mathcal{B} a_j \mathcal{B}^\dagger &= a_j, \quad
j = 1, \ldots, N-1,
\end{align}
or
- a tuple :math:`(i, j, \theta, \varphi)`, indicating the operation
.. math::
\exp[i \varphi a_j^\dagger a_j]
\exp[\theta (a_i^\dagger a_j - a_j^\dagger a_i)],
a Givens rotation of modes :math:`i` and :math:`j` by angles
:math:`\theta` and :math:`\varphi`.
Args:
quadratic_hamiltonian(QuadraticHamiltonian):
The Hamiltonian whose eigenstate is desired.
occupied_orbitals(list):
A list of integers representing the indices of the occupied
orbitals in the desired Gaussian state. If this is None
(the default), then it is assumed that the ground state is
desired, i.e., the orbitals with negative energies are filled.
spin_sector (optional str): An optional integer specifying
a spin sector to restrict to: 0 for spin-up and 1 for
spin-down. If specified, the returned circuit acts on modes
indexed by spatial indices (rather than spin indices).
Should only be specified if the Hamiltonian
includes a spin degree of freedom and spin-up modes
do not interact with spin-down modes.
Returns
-------
circuit_description (list[tuple]):
A list of operations describing the circuit. Each operation
is a tuple of objects describing elementary operations that
can be performed in parallel. Each elementary operation
is either the string 'pht', indicating a particle-hole
transformation on the last fermionic mode, or a tuple of
the form :math:`(i, j, \theta, \varphi)`,
indicating a Givens rotation
of modes :math:`i` and :math:`j` by angles :math:`\theta`
and :math:`\varphi`.
start_orbitals (list):
The occupied orbitals to start with. This describes the
initial state that the circuit should be applied to: it should
be a Slater determinant (in the computational basis) with these
orbitals filled.
"""
if not isinstance(quadratic_hamiltonian, QuadraticHamiltonian):
raise ValueError('Input must be an instance of QuadraticHamiltonian.')
orbital_energies, transformation_matrix, _ = (
quadratic_hamiltonian.diagonalizing_bogoliubov_transform(
spin_sector=spin_sector)
)
if quadratic_hamiltonian.conserves_particle_number:
if occupied_orbitals is None:
# The ground state is desired, so we fill the orbitals that have
# negative energy
occupied_orbitals = numpy.where(orbital_energies < 0.0)[0]
# Get the unitary rows which represent the Slater determinant
slater_determinant_matrix = transformation_matrix[occupied_orbitals]
# Get the circuit description
circuit_description = slater_determinant_preparation_circuit(
slater_determinant_matrix)
start_orbitals = range(len(occupied_orbitals))
else:
# TODO implement this
if spin_sector is not None:
raise NotImplementedError(
'Specifying spin sector for non-particle-conserving '
'Hamiltonians is not yet supported.'
)
# Rearrange the transformation matrix because the circuit generation
# routine expects it to describe annihilation operators rather than
# creation operators
n_qubits = quadratic_hamiltonian.n_qubits
left_block = transformation_matrix[:, :n_qubits]
right_block = transformation_matrix[:, n_qubits:]
# Can't use numpy.block because that requires numpy>=1.13.0
new_transformation_matrix = numpy.empty((n_qubits, 2 * n_qubits),
dtype=complex)
new_transformation_matrix[:, :n_qubits] = numpy.conjugate(right_block)
new_transformation_matrix[:, n_qubits:] = numpy.conjugate(left_block)
# Get the circuit description
decomposition, left_decomposition, diagonal, left_diagonal = (
fermionic_gaussian_decomposition(new_transformation_matrix))
if occupied_orbitals is None:
# The ground state is desired, so the circuit should be applied
# to the vaccuum state
start_orbitals = []
circuit_description = list(reversed(decomposition))
else:
start_orbitals = occupied_orbitals
# The circuit won't be applied to the ground state, so we need to
# use left_decomposition
circuit_description = list(reversed(
decomposition + left_decomposition))
return circuit_description, start_orbitals
def slater_determinant_preparation_circuit(slater_determinant_matrix):
r"""Obtain the description of a circuit which prepares a Slater determinant.
The input is an :math:`N_f \times N` matrix :math:`Q` with orthonormal
rows. Such a matrix describes the Slater determinant
.. math::
b^\dagger_1 \cdots b^\dagger_{N_f} \lvert \text{vac} \rangle,
where
.. math::
b^\dagger_j = \sum_{k = 1}^N Q_{jk} a^\dagger_k.
The output is the description of a circuit which prepares this
Slater determinant, up to a global phase.
The starting state which the circuit should be applied to
is a Slater determinant (in the computational basis) with
the first :math:`N_f` orbitals filled.
Args:
slater_determinant_matrix: The matrix :math:`Q` which describes the
Slater determinant to be prepared.
Returns:
circuit_description:
A list of operations describing the circuit. Each operation
is a tuple of elementary operations that can be performed in
parallel. Each elementary operation is a tuple of the form
:math:`(i, j, \theta, \varphi)`, indicating a Givens rotation
of modes :math:`i` and :math:`j` by angles :math:`\theta`
and :math:`\varphi`.
"""
decomposition, left_unitary, diagonal = givens_decomposition(
slater_determinant_matrix)
circuit_description = list(reversed(decomposition))
return circuit_description
| {
"content_hash": "ca4491d19aa3bb391a817c65e5d2c589",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 80,
"avg_line_length": 42.67039106145251,
"alnum_prop": 0.6492537313432836,
"repo_name": "jarrodmcc/OpenFermion",
"id": "31a9f862b7897546aa2da5316a89d1cd58e3b0fc",
"size": "8201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/openfermion/utils/_slater_determinants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1370322"
},
{
"name": "Shell",
"bytes": "10029"
}
],
"symlink_target": ""
} |
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import TestSCons
_python_ = TestSCons._python_
_exe = TestSCons._exe
test = TestSCons.TestSCons()
test.write('myrpcgen.py', """
import getopt
import sys
cmd_opts, args = getopt.getopt(sys.argv[1:], 'chlmo:x', [])
for opt, arg in cmd_opts:
if opt == '-o': output = open(arg, 'wb')
output.write(" ".join(sys.argv) + "\\n")
for a in args:
contents = open(a, 'rb').read()
output.write(contents.replace('RPCGEN', 'myrpcgen.py'))
output.close()
sys.exit(0)
""")
test.write('SConstruct', """\
env = Environment(RPCGEN = r'%(_python_)s myrpcgen.py',
RPCGENCLIENTFLAGS = '-x',
tools=['default', 'rpcgen'])
env.RPCGenHeader('rpcif')
env.RPCGenClient('rpcif')
env.RPCGenService('rpcif')
env.RPCGenXDR('rpcif')
""" % locals())
test.write('rpcif.x', """\
RPCGEN
""")
test.run()
output = "myrpcgen.py %s -o %s rpcif.x\nmyrpcgen.py\n"
output_clnt = "myrpcgen.py %s -x -o %s rpcif.x\nmyrpcgen.py\n"
expect_clnt = output_clnt % ('-l', test.workpath('rpcif_clnt.c'))
expect_h = output % ('-h', test.workpath('rpcif.h'))
expect_svc = output % ('-m', test.workpath('rpcif_svc.c'))
expect_xdr = output % ('-c', test.workpath('rpcif_xdr.c'))
test.must_match('rpcif_clnt.c', expect_clnt)
test.must_match('rpcif.h', expect_h)
test.must_match('rpcif_svc.c', expect_svc)
test.must_match('rpcif_xdr.c', expect_xdr)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "b75ffc7ab133cc334792dd472792e9c8",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 69,
"avg_line_length": 25.317460317460316,
"alnum_prop": 0.613166144200627,
"repo_name": "azatoth/scons",
"id": "7cf0144bda4c0bd5486c2b5d40468f1ca6cb717c",
"size": "2697",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "test/Rpcgen/RPCGENCLIENTFLAGS.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "259"
},
{
"name": "JavaScript",
"bytes": "17316"
},
{
"name": "Perl",
"bytes": "45214"
},
{
"name": "Python",
"bytes": "6716123"
},
{
"name": "Shell",
"bytes": "2535"
}
],
"symlink_target": ""
} |
import re
from setuptools import setup, find_packages
REQUIRES = (
'marshmallow>=2.0.0b1',
'SQLAlchemy>=0.7',
)
def find_version(fname):
"""Attempts to find the version number in the file names fname.
Raises RuntimeError if not found.
"""
version = ''
with open(fname, 'r') as fp:
reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
for line in fp:
m = reg.match(line)
if m:
version = m.group(1)
break
if not version:
raise RuntimeError('Cannot find version information')
return version
__version__ = find_version('marshmallow_sqlalchemy/__init__.py')
def read(fname):
with open(fname) as fp:
content = fp.read()
return content
setup(
name='marshmallow-sqlalchemy',
version=__version__,
description='SQLAlchemy integration with the marshmallow (de)serialization library',
long_description=read('README.rst'),
author='Steven Loria',
author_email='sloria1@gmail.com',
url='https://github.com/marshmallow-code/marshmallow-sqlalchemy',
packages=find_packages(exclude=("test*", )),
package_dir={'marshmallow-sqlalchemy': 'marshmallow-sqlalchemy'},
include_package_data=True,
install_requires=REQUIRES,
license=read("LICENSE"),
zip_safe=False,
keywords='sqlalchemy marshmallow',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
)
| {
"content_hash": "8e3c0a5a89b24193450ce77831dd1a1d",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 88,
"avg_line_length": 29.442622950819672,
"alnum_prop": 0.6119153674832962,
"repo_name": "jmcarp/marshmallow-sqlalchemy",
"id": "ec79dbdc3aab405816ecfbe9d9a26d1d6b6e2adb",
"size": "1820",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32009"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
'''
Cellaret v0.1.3 Markdown Browser & Editor
preferences.py
'''
'''
Copyright 2014 Roman Verin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import wx
from environment import CONFIG, PNG_CELLARET_24, SELECT_DIRECTORY, WORKING_DIRECTORY, BROWSER_WIDTH, BROWSER_HEIGHT, BROWSER_FONT_SIZE, PRINT_FILENAME, WEBBROWSER_OPEN_LINK, EDITOR_WIDTH, EDITOR_HEIGHT, STYLE_HIGHLIGHTING, DATETIME_FORMAT
# Cellaret Preferences (child wx.Frame)
#==============================================================================
class CellaretPreferences(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, None, size = (440, 340), title = _('Cellaret Preferences'), style = wx.CAPTION | wx.MINIMIZE_BOX | wx.CLOSE_BOX)
self.parent = parent
favicon = PNG_CELLARET_24.GetIcon()
self.SetIcon(favicon)
self.Centre()
nb = wx.Notebook(self, wx.ID_ANY, style=wx.NB_TOP)
self.main = wx.Panel(nb)
self.browser = wx.Panel(nb)
self.editor = wx.Panel(nb)
nb.AddPage(self.main, _('Main'))
nb.AddPage(self.browser, _('Browser'))
nb.AddPage(self.editor, _('Editor'))
self.main.SetFocus()
# Main Panel (Path 'General')
#=============================
wx.StaticText(self.main, wx.ID_ANY, _('Select Working directory'), (20, 20))
self.cb1SelectDirectory = wx.CheckBox(self.main, wx.ID_ANY, '', (20, 40))
self.cb1SelectDirectory.SetValue(SELECT_DIRECTORY)
self.workingDirectory = wx.TextCtrl(self.main, wx.ID_ANY, str(WORKING_DIRECTORY), (40, 40), (385, -1))
closeButton = wx.Button(self.main, wx.ID_CLOSE, pos=(225, 240))
self.okMainButton = wx.Button(self.main, wx.ID_OK, pos=(325, 240))
self.okMainButton.SetDefault()
# Browser Panel (Path 'Browser')
#================================
self.cb1Browser = wx.CheckBox(self.browser, wx.ID_ANY, _('Print Filename'), (200, 15))
self.cb2Browser = wx.CheckBox(self.browser, wx.ID_ANY, _('Open link in a web browser'), (200, 45))
self.cb1Browser.SetValue(PRINT_FILENAME)
self.cb2Browser.SetValue(WEBBROWSER_OPEN_LINK)
wx.StaticText(self.browser, wx.ID_ANY, _('Width:'), (20, 20))
wx.StaticText(self.browser, wx.ID_ANY, _('Height:'), (20, 50))
wx.StaticText(self.browser, wx.ID_ANY, _('Font size:'), (20, 80))
self.sc1Browser = wx.SpinCtrl(self.browser, wx.ID_ANY, str(BROWSER_WIDTH), (100, 15), (60, -1), min=200, max=2000)
self.sc2Browser = wx.SpinCtrl(self.browser, wx.ID_ANY, str(BROWSER_HEIGHT), (100, 45), (60, -1), min=200, max=2000)
self.sc3Browser = wx.SpinCtrl(self.browser, wx.ID_ANY, str(BROWSER_FONT_SIZE), (100, 75), (60, -1), min=8, max=24)
closeButton = wx.Button(self.browser, wx.ID_CLOSE, pos=(225, 240))
self.okBrowserButton = wx.Button(self.browser, wx.ID_OK, pos=(325, 240))
self.okBrowserButton.SetDefault()
# Editor Panel (Path 'Editor')
#==============================
self.cb1Editor = wx.CheckBox(self.editor, wx.ID_ANY, _('Style highlighting'), (200, 15))
self.cb1Editor.SetValue(STYLE_HIGHLIGHTING)
wx.StaticText(self.editor, wx.ID_ANY, _('Width:'), (20, 20))
wx.StaticText(self.editor, wx.ID_ANY, _('Height:'), (20, 50))
self.sc1Editor = wx.SpinCtrl(self.editor, wx.ID_ANY, str(EDITOR_WIDTH), (100, 15), (60, -1), min=200, max=2000)
self.sc2Editor = wx.SpinCtrl(self.editor, wx.ID_ANY, str(EDITOR_HEIGHT), (100, 45), (60, -1), min=200, max=2000)
wx.StaticText(self.editor, wx.ID_ANY, _('Format Date and Time:'), (20, 80))
self.datetimeFormat = wx.TextCtrl(self.editor, wx.ID_ANY, str(DATETIME_FORMAT), (200, 75), (225, -1))
closeButton = wx.Button(self.editor, wx.ID_CLOSE, pos=(225, 240))
self.okEditorButton = wx.Button(self.editor, wx.ID_OK, pos=(325, 240))
self.okEditorButton.SetDefault()
# Buttons
#=========
self.okMainButton.Bind(wx.EVT_BUTTON, self.OnSaveMain, id=wx.ID_OK)
self.okBrowserButton.Bind(wx.EVT_BUTTON, self.OnSaveBrowser, id=wx.ID_OK)
self.okEditorButton.Bind(wx.EVT_BUTTON, self.OnSaveEditor, id=wx.ID_OK)
self.Bind(wx.EVT_BUTTON, self.OnCancel, id=wx.ID_CLOSE)
self.statusbar = self.CreateStatusBar()
def OnSaveMain(self, event):
CONFIG.SetPath('General')
CONFIG.WriteInt('select_directory', self.cb1SelectDirectory.GetValue())
CONFIG.Write('working_directory', self.workingDirectory.GetValue())
CONFIG.SetPath('')
self.statusbar.SetStatusText(_('Main Configuration saved. Program restart required.'))
def OnSaveBrowser(self, event):
CONFIG.SetPath('Browser')
CONFIG.WriteInt('width', self.sc1Browser.GetValue())
CONFIG.WriteInt('height', self.sc2Browser.GetValue())
CONFIG.WriteInt('font_size', self.sc3Browser.GetValue())
CONFIG.WriteInt('print_filename', self.cb1Browser.GetValue())
CONFIG.WriteInt('open_link', self.cb2Browser.GetValue())
CONFIG.SetPath('')
self.statusbar.SetStatusText(_('Browser Configuration saved. Program restart required.'))
def OnSaveEditor(self, event):
CONFIG.SetPath('Editor')
CONFIG.WriteInt('width', self.sc1Editor.GetValue())
CONFIG.WriteInt('height', self.sc2Editor.GetValue())
CONFIG.WriteInt('style_highlighting', self.cb1Editor.GetValue())
CONFIG.Write('datetime_format', self.datetimeFormat.GetValue())
CONFIG.SetPath('')
self.statusbar.SetStatusText(_('Editor Configuration saved. Program restart required.'))
def OnCancel(self, event):
self.Destroy()
| {
"content_hash": "062f9acf49ac251aa9de6e7ce31e7daa",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 238,
"avg_line_length": 43.68181818181818,
"alnum_prop": 0.6918140825528963,
"repo_name": "cellabyte/Cellaret",
"id": "1421ebed36ff1cb1bf7fa19aa82023e55142610f",
"size": "5791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "application/preferences.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "68777"
},
{
"name": "Shell",
"bytes": "4288"
}
],
"symlink_target": ""
} |
__author__ = 'Alexander Grießer, Markus Holtermann'
__email__ = 'gieser@bitigheimer-htc.de, info@markusholtermann.eu'
__version__ = '0.0.0'
| {
"content_hash": "c8e72a06698d04927af62356f05a7d43",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 65,
"avg_line_length": 46.666666666666664,
"alnum_prop": 0.6928571428571428,
"repo_name": "MarkusH/berlin-school-data",
"id": "25501aa0e56ee7724fef4770c44d7bae90f59cef",
"size": "166",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "importer/importer/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "47955"
},
{
"name": "JavaScript",
"bytes": "39529"
},
{
"name": "Python",
"bytes": "15462"
}
],
"symlink_target": ""
} |
"""
GPLv3 license (ASTRA toolbox)
Note that the TomoPhantom package is released under Apache License, Version 2.0
Script to generate 2D analytical phantoms and their sinograms with added noise and artifacts
Sinograms then reconstructed using ASTRA TOOLBOX
>>>>> Dependencies (reconstruction): <<<<<
1. ASTRA toolbox: conda install -c astra-toolbox astra-toolbox
2. tomobar: conda install -c dkazanc tomobar
or install from https://github.com/dkazanc/ToMoBAR
This demo demonstrates frequent inaccuracies which are accosiated with X-ray imaging:
zingers, rings and noise
@author: Daniil Kazantsev
"""
import numpy as np
import matplotlib.pyplot as plt
from tomophantom import TomoP2D
import os
import tomophantom
from tomophantom.supp.qualitymetrics import QualityTools
model = 15 # select a model
N_size = 256 # set dimension of the phantom
path = os.path.dirname(tomophantom.__file__)
path_library2D = os.path.join(path, "Phantom2DLibrary.dat")
phantom_2D = TomoP2D.Model(model, N_size, path_library2D)
plt.close('all')
plt.figure(1)
plt.rcParams.update({'font.size': 21})
plt.imshow(phantom_2D, vmin=0, vmax=1, cmap="gray")
plt.colorbar(ticks=[0, 0.5, 1], orientation='vertical')
plt.title('{}''{}'.format('2D Phantom using model no.',model))
# create sinogram analytically
angles_num = int(0.5*np.pi*N_size); # angles number
angles = np.linspace(0.0,179.9,angles_num,dtype='float32')
angles_rad = angles*(np.pi/180.0)
P = N_size #detectors
sino_an = TomoP2D.ModelSino(model, N_size, P, angles, path_library2D)
plt.figure(2)
plt.rcParams.update({'font.size': 21})
plt.imshow(sino_an, cmap="gray")
plt.colorbar(ticks=[0, 150, 250], orientation='vertical')
plt.title('{}''{}'.format('Analytical sinogram of model no.',model))
#%%
# Adding artifacts and noise
from tomophantom.supp.artifacts import _Artifacts_
plt.close('all')
# forming dictionaries with artifact types
_noise_ = {'noise_type' : 'Poisson',
'noise_amplitude' : 10000}
# misalignment dictionary
_sinoshifts_ = {'datashifts_maxamplitude_pixel' : 10}
[noisy_sino_misalign, shifts] = _Artifacts_(sino_an, **_noise_, **_sinoshifts_)
# adding zingers and stripes
_zingers_ = {'zingers_percentage' : 2,
'zingers_modulus' : 10}
_stripes_ = {'stripes_percentage' : 0.8,
'stripes_maxthickness' : 2.0,
'stripes_intensity' : 0.25,
'stripes_type' : 'full',
'stripes_variability' : 0.002}
noisy_zing_stripe = _Artifacts_(sino_an, **_noise_, **_zingers_, **_stripes_)
plt.figure()
plt.rcParams.update({'font.size': 21})
plt.imshow(noisy_zing_stripe,cmap="gray")
plt.colorbar(ticks=[0, 150, 250], orientation='vertical')
plt.title('{}''{}'.format('Analytical noisy sinogram with artefacts.',model))
#%%
# initialise tomobar DIRECT reconstruction class ONCE
from tomobar.methodsDIR import RecToolsDIR
RectoolsDIR = RecToolsDIR(DetectorsDimH = P, # DetectorsDimH # detector dimension (horizontal)
DetectorsDimV = None, # DetectorsDimV # detector dimension (vertical) for 3D case only
CenterRotOffset = None, # Center of Rotation (CoR) scalar (for 3D case only)
AnglesVec = angles_rad, # array of angles in radians
ObjSize = N_size, # a scalar to define reconstructed object dimensions
device_projector='cpu')
#%%
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print ("Reconstructing analytical sinogram using Fourier Slice method")
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
RecFourier = RectoolsDIR.FOURIER(sino_an,'linear')
plt.figure()
plt.imshow(RecFourier, vmin=0, vmax=1, cmap="BuPu")
plt.colorbar(ticks=[0, 0.5, 1], orientation='vertical')
plt.title('Fourier slice reconstruction')
#%%
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print ("Reconstructing analytical sinogram using FBP (tomobar)...")
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
FBPrec_ideal = RectoolsDIR.FBP(sino_an) # ideal reconstruction
FBPrec_error = RectoolsDIR.FBP(noisy_zing_stripe) # reconstruction with artifacts
FBPrec_misalign = RectoolsDIR.FBP(noisy_sino_misalign) # reconstruction with misalignment
plt.figure()
plt.subplot(131)
plt.imshow(FBPrec_ideal, vmin=0, vmax=1, cmap="gray")
plt.colorbar(ticks=[0, 0.5, 1], orientation='vertical')
plt.title('Ideal FBP reconstruction')
plt.subplot(132)
plt.imshow(FBPrec_error, vmin=0, vmax=1, cmap="gray")
plt.colorbar(ticks=[0, 0.5, 1], orientation='vertical')
plt.title('Erroneous data FBP Reconstruction')
plt.subplot(133)
plt.imshow(FBPrec_misalign, vmin=0, vmax=1, cmap="gray")
plt.colorbar(ticks=[0, 0.5, 1], orientation='vertical')
plt.title('Misaligned noisy FBP Reconstruction')
plt.show()
plt.figure()
plt.imshow(abs(FBPrec_ideal-FBPrec_error), vmin=0, vmax=1, cmap="gray")
plt.colorbar(ticks=[0, 0.5, 1], orientation='vertical')
plt.title('FBP reconsrtuction differences')
#%%
# initialise tomobar ITERATIVE reconstruction class ONCE
from tomobar.methodsIR import RecToolsIR
RectoolsIR = RecToolsIR(DetectorsDimH = P, # DetectorsDimH # detector dimension (horizontal)
DetectorsDimV = None, # DetectorsDimV # detector dimension (vertical) for 3D case only
CenterRotOffset = None, # Center of Rotation (CoR) scalar (for 3D case only)
AnglesVec = angles_rad, # array of angles in radians
ObjSize = N_size, # a scalar to define reconstructed object dimensions
datafidelity='LS',# data fidelity, choose LS, PWLS (wip), GH (wip), Student (wip)
device_projector='gpu')
#%%
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print ("Reconstructing analytical sinogram using SIRT (tomobar)...")
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
# prepare dictionaries with parameters:
_data_ = {'projection_norm_data' : sino_an} # data dictionary
_algorithm_ = {'iterations' : 250}
SIRTrec_ideal = RectoolsIR.SIRT(_data_,_algorithm_) # ideal reconstruction
_data_ = {'projection_norm_data' : noisy_zing_stripe} # data dictionary
SIRTrec_error = RectoolsIR.SIRT(_data_,_algorithm_) # error reconstruction
plt.figure()
plt.subplot(121)
plt.imshow(SIRTrec_ideal, vmin=0, vmax=1, cmap="gray")
plt.colorbar(ticks=[0, 0.5, 1], orientation='vertical')
plt.title('Ideal SIRT reconstruction (ASTRA)')
plt.subplot(122)
plt.imshow(SIRTrec_error, vmin=0, vmax=3, cmap="gray")
plt.colorbar(ticks=[0, 0.5, 1], orientation='vertical')
plt.title('Erroneous data SIRT Reconstruction (ASTRA)')
plt.show()
plt.figure()
plt.imshow(abs(SIRTrec_ideal-SIRTrec_error), vmin=0, vmax=1, cmap="gray")
plt.colorbar(ticks=[0, 0.5, 1], orientation='vertical')
plt.title('SIRT reconsrtuction differences')
#%%
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print ("Reconstructing using ADMM method (tomobar)")
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
# Run ADMM reconstrucion algorithm with regularisation
_data_ = {'projection_norm_data' : noisy_zing_stripe} # data dictionary
_algorithm_ = {'iterations' : 15,
'ADMM_rho_const' : 5000.0}
# adding regularisation using the CCPi regularisation toolkit
_regularisation_ = {'method' : 'PD_TV',
'regul_param' : 0.1,
'iterations' : 150,
'device_regulariser': 'gpu'}
RecADMM_reg = RectoolsIR.ADMM(_data_, _algorithm_, _regularisation_)
plt.figure()
plt.imshow(RecADMM_reg, vmin=0, vmax=2, cmap="gray")
plt.colorbar(ticks=[0, 0.5, 3], orientation='vertical')
plt.title('ADMM reconstruction')
plt.show()
# calculate errors
Qtools = QualityTools(phantom_2D, RecADMM_reg)
RMSE_ADMM_reg = Qtools.rmse()
print("RMSE for regularised ADMM is {}".format(RMSE_ADMM_reg))
#%%
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print ("Reconstructing using FISTA method (tomobar)")
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
# prepare dictionaries with parameters:
_data_ = {'projection_norm_data' : noisy_zing_stripe,
'huber_threshold' : 4.5,
'ring_weights_threshold' : 10.0,
'ring_tuple_halfsizes': (9,5,0)
} # data dictionary
lc = RectoolsIR.powermethod(_data_) # calculate Lipschitz constant (run once to initialise)
_algorithm_ = {'iterations' : 350,
'lipschitz_const' : lc}
# adding regularisation using the CCPi regularisation toolkit
_regularisation_ = {'method' : 'PD_TV',
'regul_param' : 0.001,
'iterations' : 150,
'device_regulariser': 'gpu'}
# Run FISTA reconstrucion algorithm with regularisation
RecFISTA_reg = RectoolsIR.FISTA(_data_, _algorithm_, _regularisation_)
plt.figure()
plt.imshow(RecFISTA_reg, vmin=0, vmax=2, cmap="gray")
plt.colorbar(ticks=[0, 0.5, 2], orientation='vertical')
plt.title('TV-Regularised FISTA reconstruction')
plt.show()
# calculate errors
Qtools = QualityTools(phantom_2D, RecFISTA_reg)
RMSE_FISTA_reg = Qtools.rmse()
print("RMSE for regularised FISTA is {}".format(RMSE_FISTA_reg))
#%%
from tomophantom.supp.artifacts import _Artifacts_
# forming dictionaries with artefact types
_noise_ = {'noise_type' : 'Poisson',
'noise_sigma' : 200000, # noise amplitude
'noise_seed' : 0}
# partial volume effect dictionary
_pve_ = {'pve_strength' : 1}
_fresnel_propagator_ = {'fresnel_dist_observation' : 10,
'fresnel_scale_factor' : 10,
'fresnel_wavelenght' : 0.003}
noisy_sino_pve = _Artifacts_(sino_an, **_noise_, **_pve_)
FBPrec_pve = RectoolsDIR.FBP(noisy_sino_pve)
plt.figure()
plt.imshow(FBPrec_pve, vmin=0, vmax=1, cmap="gray")
plt.colorbar(ticks=[0, 0.5, 1], orientation='vertical')
plt.title('FBP reconstruction from PVE sinogram')
plt.show()
#%%
from tomophantom.supp.artifacts import _Artifacts_
# forming dictionaries with artefact types
_noise_ = {'noise_type' : 'Poisson',
'noise_amplitude' : 200000, # noise amplitude
'noise_seed' : 0}
_fresnel_propagator_ = {'fresnel_dist_observation' : 20,
'fresnel_scale_factor' : 10,
'fresnel_wavelenght' : 0.003}
noisy_sino_fresnel = _Artifacts_(sino_an, **_noise_, **_fresnel_propagator_)
FBPrec_fresnel = RectoolsDIR.FBP(noisy_sino_fresnel)
plt.figure()
plt.imshow(FBPrec_fresnel , vmin=0, vmax=1, cmap="gray")
plt.colorbar(ticks=[0, 0.5, 1], orientation='vertical')
plt.title('FBP reconstruction from sinogram with Fresnel propagator')
plt.show()
#%% | {
"content_hash": "6b56edfb825478b8579077235f7d9658",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 107,
"avg_line_length": 40.50950570342205,
"alnum_prop": 0.6466116012765158,
"repo_name": "dkazanc/TomoPhantom",
"id": "624be080e454eac0374e62c6bedb7ee9fbfa9ed0",
"size": "10701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Demos/Python/2D/Recon_artifacts_ASTRA.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "456"
},
{
"name": "C",
"bytes": "246720"
},
{
"name": "CMake",
"bytes": "13381"
},
{
"name": "Cython",
"bytes": "42752"
},
{
"name": "MATLAB",
"bytes": "12438"
},
{
"name": "Python",
"bytes": "53351"
},
{
"name": "Shell",
"bytes": "1697"
}
],
"symlink_target": ""
} |
import sys
import numpy as np
import lxmls.classifiers.linear_classifier as lc
from lxmls.util.my_math_utils import *
class Mira(lc.LinearClassifier):
def __init__(self, nr_rounds=10, regularizer=1.0, averaged=True):
lc.LinearClassifier.__init__(self)
self.trained = False
self.nr_rounds = nr_rounds
self.regularizer = regularizer
self.params_per_round = []
self.averaged = averaged
def train(self, x, y, seed=1):
self.params_per_round = []
x_orig = x[:, :]
x = self.add_intercept_term(x)
nr_x, nr_f = x.shape
nr_c = np.unique(y).shape[0]
w = np.zeros((nr_f, nr_c))
for round_nr in range(self.nr_rounds):
for nr in range(nr_x):
# use seed to generate permutation
np.random.seed(seed)
# generate a permutation on length nr_x
perm = np.random.permutation(nr_x)
# change the seed so next epoch we don't get the same permutation
seed += 1
inst = perm[nr]
scores = self.get_scores(x[inst:inst+1, :], w)
y_true = y[inst:inst+1, 0]
y_hat = self.get_label(x[inst:inst+1, :], w)
true_margin = scores[:, y_true]
predicted_margin = scores[:, y_hat]
dist = np.abs(y_true - y_hat)
# # Compute loss
loss = predicted_margin - true_margin + dist
# Compute stepsize
if y_hat != y_true:
if predicted_margin == true_margin:
stepsize = 1 / self.regularizer
else:
# stepsize = np.min([1/self.agress,loss/l2norm_squared(true_margin-predicted_margin)])
stepsize = np.min([1/self.regularizer, loss/l2norm_squared(x[inst:inst+1])])
w[:, y_true] += stepsize * x[inst:inst+1, :].transpose()
w[:, y_hat] -= stepsize * x[inst:inst+1, :].transpose()
self.params_per_round.append(w.copy())
self.trained = True
y_pred = self.test(x_orig, w)
acc = self.evaluate(y, y_pred)
self.trained = False
print("Rounds: %i Accuracy: %f" % (round_nr, acc))
self.trained = True
if self.averaged:
new_w = 0
for old_w in self.params_per_round:
new_w += old_w
new_w /= len(self.params_per_round)
return new_w
return w
| {
"content_hash": "07c1b8b69c4988e9aabf050f4d7d9cc8",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 110,
"avg_line_length": 37.492753623188406,
"alnum_prop": 0.5048318515655199,
"repo_name": "LxMLS/lxmls-toolkit",
"id": "bf6ee44ea827a1182c5d57751c24dc0b4dafef38",
"size": "2588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lxmls/classifiers/mira.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "168589"
},
{
"name": "Perl",
"bytes": "19058"
},
{
"name": "Python",
"bytes": "386710"
},
{
"name": "Shell",
"bytes": "865"
}
],
"symlink_target": ""
} |
from sqlalchemy import Table, Column, MetaData, Integer
from nova import log as logging
new_columns = [
Column('free_ram_mb', Integer()),
Column('free_disk_gb', Integer()),
Column('current_workload', Integer()),
Column('running_vms', Integer()),
]
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
compute_nodes = Table('compute_nodes', meta, autoload=True)
for column in new_columns:
compute_nodes.create_column(column)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
compute_nodes = Table('compute_nodes', meta, autoload=True)
for column in new_columns:
compute_notes.drop_column(column)
| {
"content_hash": "4439808623b406e0888b904731fe6d8e",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 63,
"avg_line_length": 26.64516129032258,
"alnum_prop": 0.6767554479418886,
"repo_name": "rcbops/nova-buildpackage",
"id": "2b5dd09ad42a7efc566496d0d75eb713f91bc8a6",
"size": "1433",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nova/db/sqlalchemy/migrate_repo/versions/073_add_capacity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7412"
},
{
"name": "Python",
"bytes": "5412903"
},
{
"name": "Shell",
"bytes": "24506"
}
],
"symlink_target": ""
} |
"""
Copyright 2015 SYSTRAN Software, Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class MorphologyExtractNPResponse(object):
"""
NOTE: This class is auto generated by the systran code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Systran model
:param dict systran_types: The key is attribute name and the value is attribute type.
:param dict attribute_map: The key is attribute name and the value is json key in definition.
"""
self.systran_types = {
'chunks': 'list[ChunkAnnotation]'
}
self.attribute_map = {
'chunks': 'chunks'
}
# List of extracted chunks
self.chunks = None # list[ChunkAnnotation]
def __repr__(self):
properties = []
for p in self.__dict__:
if p != 'systran_types' and p != 'attribute_map':
properties.append('{prop}={val!r}'.format(prop=p, val=self.__dict__[p]))
return '<{name} {props}>'.format(name=__name__, props=' '.join(properties))
| {
"content_hash": "dd18e4cf4a7fa3c947e3ba997aed6996",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 101,
"avg_line_length": 32.450980392156865,
"alnum_prop": 0.6259818731117824,
"repo_name": "SYSTRAN/nlp-api-python-client",
"id": "6004b9a3ed7b4fdc7972b691bbd58d925dd79259",
"size": "1694",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "systran_nlp_api/models/morphology_extract_np_response.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "129963"
}
],
"symlink_target": ""
} |
from corehq.preindex import CouchAppsPreindexPlugin
CouchAppsPreindexPlugin.register('mvp_apps', __file__)
| {
"content_hash": "d6020f7d14b4f84fe01f52cbc28872de",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 54,
"avg_line_length": 36,
"alnum_prop": 0.8240740740740741,
"repo_name": "SEL-Columbia/commcare-hq",
"id": "667a49b03295a5ac1c0a390c61a6f4918478df14",
"size": "108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "custom/_legacy/mvp_apps/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "768322"
},
{
"name": "JavaScript",
"bytes": "2647080"
},
{
"name": "Python",
"bytes": "7806659"
},
{
"name": "Shell",
"bytes": "28569"
}
],
"symlink_target": ""
} |
from vilya.libs import api_errors
from quixote.errors import TraversalError
from vilya.views.api.utils import jsonize
from ellen.utils import JagareError
class FileResolveUI(object):
_q_exports = []
def __init__(self, ret):
self._ret = ret
def __call__(self, request):
return self._ret
def _q_index(self, request):
return self._ret
def _q_lookup(self, request, path):
return self
class FilesUI(object):
_q_exports = []
def __init__(self, request, project):
self._project = project
def __call__(self, request):
return self._index(request)
def q_index(self, request):
return self._index(request)
@jsonize
def _index(self, request, path=None):
project = self._project
ref = request.get_form_var('ref') or project.default_branch
path = path or request.get_form_var('path') or ''
tree = None
file_ = None
type_ = None
readme = None
try:
recursive = bool(request.get_form_var('recursive'))
tree = project.repo.get_tree(ref, path, recursive=recursive,
recursive_with_tree_node=True)
type_ = "tree"
except JagareError, e:
if "Reference not found" in str(e):
raise api_errors.NotFoundError
else:
raise e
except TypeError:
file_ = project.repo.get_file(ref, path)
type_ = "blob"
except KeyError:
submodule = project.repo.get_submodule(ref, path)
if submodule:
return submodule.as_dic()
if type_ == "tree":
if isinstance(tree, basestring):
raise TraversalError("Got a blob instead of a tree")
for item in tree:
if item['type'] == "blob" and item['name'].startswith('README'): # noqa
readme = project.repo.get_file(ref, item["path"]).data
break
elif not file_:
raise TraversalError("file not found")
dic = {"ref": ref,
"type": type_,
"content": (tree.entries if type_ == "tree" \
else file_.data.encode('utf8'))}
if readme:
dic["readme"] = readme
return dic
def _q_lookup(self, request, path):
path = request.get_path().decode('utf8')
path_components = path.split('/')
path_components.reverse()
while path_components and path_components.pop() != "files":
continue
path_components.reverse()
path = '/'.join(path_components)
return FileResolveUI(self._index(request, path))
| {
"content_hash": "5822d520c2d1d21397fd360d2f38427f",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 88,
"avg_line_length": 30.252747252747252,
"alnum_prop": 0.5412277515437705,
"repo_name": "douban/code",
"id": "98ab886e74802abce750ed3fc106cfde9d0c616c",
"size": "2753",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "vilya/views/api/files.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "7956218"
},
{
"name": "HTML",
"bytes": "548630"
},
{
"name": "JavaScript",
"bytes": "7771620"
},
{
"name": "Makefile",
"bytes": "568"
},
{
"name": "Mako",
"bytes": "11668"
},
{
"name": "Python",
"bytes": "1486693"
},
{
"name": "Shell",
"bytes": "61416"
}
],
"symlink_target": ""
} |
import sys
import os
from ratings import *
# import builtin
# urls = [ "http://127.0.0.1:%d/ratings" % port for port in [ 8001 ] ]
class ratingsListener (object):
def __init__(self, start):
self.start = start
def onFuture(self, future):
end = os.times()[4]
elapsed = (end - start) * 1000
print("in onFuture after %dms, rating %s is %s" %
(elapsed, future.thingID, future.rating))
ratings = RatingsClient("ratings", MyHub())
ratings.setTimeout(5000)
print("Grabbing 100 ratings")
start = os.times()[4]
futures = [ ratings.get(str(i)) for i in range(100) ]
first = futures[98].rating
print("Before waiting, rating 98 was %s" % first)
futures[98].onFinished(ratingsListener(start))
resolutions = [ future.await(1000) for future in futures ]
end = os.times()[4]
elapsed = (end - start) * 1000
print("After waiting %dms, rating 98 is %s" % (elapsed, futures[98].rating))
for i in range(100):
future = futures[i]
if future.getError():
print("%d: failed! %s" % (i, future.getError()))
elif (future.thingID != i) or (future.rating != (i % 5)):
print("%3d: wrong! %s, %s" % (i, future.thingID, future.rating))
| {
"content_hash": "5a48c94fe1e6bcd4a8d0a6ee72fcd2ef",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 76,
"avg_line_length": 24.72340425531915,
"alnum_prop": 0.6445783132530121,
"repo_name": "datawire/datawire-connect",
"id": "e49b5592f47ea7cc25b3e4b427293d2d61443de6",
"size": "1172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/market/ratings/ratingsTest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "1163"
},
{
"name": "JavaScript",
"bytes": "2052"
},
{
"name": "Kotlin",
"bytes": "1520"
},
{
"name": "Makefile",
"bytes": "727"
},
{
"name": "Python",
"bytes": "4789"
}
],
"symlink_target": ""
} |
import asyncio
import pytest
from spinach.queuey import Queuey
def test_sync():
q = Queuey(2)
q.put_sync(1)
q.put_sync(2)
assert len(q._items) == 2
assert len(q._putters) == 0
assert len(q._getters) == 0
assert q.get_sync() == 1
assert q.get_sync() == 2
assert len(q._items) == 0
assert len(q._putters) == 0
assert len(q._getters) == 0
def test_async():
q = Queuey(2)
loop = asyncio.get_event_loop()
loop.run_until_complete(q.put_async(1))
loop.run_until_complete(q.put_async(2))
assert len(q._items) == 2
assert len(q._putters) == 0
assert len(q._getters) == 0
assert loop.run_until_complete(q.get_async()) == 1
assert loop.run_until_complete(q.get_async()) == 2
assert len(q._items) == 0
assert len(q._putters) == 0
assert len(q._getters) == 0
def test_noblock():
q = Queuey(1)
item, future_get = q._get_noblock()
assert item is None
assert future_get is not None
assert future_get.done() is False
future_put = q._put_noblock(1)
assert future_put is None
assert future_get.done() is True
assert future_get.result() == 1
future_put = q._put_noblock(2)
assert future_put is None
future_put = q._put_noblock(3)
assert future_put is not None
assert future_put.done() is False
item, future_get = q._get_noblock()
assert item == 2
assert future_get is None
assert future_put.done() is True
item, future_get = q._get_noblock()
assert item == 3
assert future_get is None
def test_max_unfinished_queue():
q = Queuey(maxsize=2)
assert q.empty()
assert q.available_slots() == 2
q.put_sync(None)
assert not q.full()
assert not q.empty()
assert q.available_slots() == 1
q.put_sync(None)
assert q.full()
assert q.available_slots() == 0
q.get_sync()
assert q.full()
assert q.available_slots() == 0
q.task_done()
assert not q.full()
assert q.available_slots() == 1
q.get_sync()
assert not q.empty()
assert q.available_slots() == 1
q.task_done()
assert q.empty()
assert q.available_slots() == 2
def test_too_many_task_done():
q = Queuey(10)
with pytest.raises(ValueError):
q.task_done()
| {
"content_hash": "ce2c3058e7d519e25bd16a409e61cba9",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 54,
"avg_line_length": 22.323529411764707,
"alnum_prop": 0.6047430830039525,
"repo_name": "NicolasLM/spinach",
"id": "934ef30abe66a70acdf1e53a5e58f3f5b94dae58",
"size": "2277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_queuey.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Lua",
"bytes": "12307"
},
{
"name": "Python",
"bytes": "145519"
}
],
"symlink_target": ""
} |
import copy
import mock
from nose import tools
from pylcp import api
from pylcp.crud import offers
from tests.crud import base as test_base
class TestOfferSetCRUD(object):
def setup(self):
self.mock_client = mock.create_autospec(api.Client)
self.offer_set_crud = offers.OfferSet(self.mock_client)
self.member_details = {
"firstName": "Frank",
"lastName": "Caron",
"memberId": "123123",
"membershipLevel": "G1",
"balance": 1234,
"memberValidation": "https://lcp.points.com/v1/lps/1/mvs/3"
}
self.session = {'channel': "storefront",
'referralCode': 'abc',
'clientIpAddress': '127.0.0.1',
'clientUserAgent': "FAST-WebCrawler/3.x Multimedia"}
self.offer_types = ["BUY", "GIFT"]
self.expected_payload = {'offerTypes': ['BUY', 'GIFT'],
'session': {'channel': 'storefront',
'clientIpAddress': '127.0.0.1',
'clientUserAgent': 'FAST-WebCrawler/3.x Multimedia',
'referralCode': 'abc'},
'user': {'balance': 1234,
'firstName': 'Frank',
'lastName': 'Caron',
'memberId': '123123',
'memberValidation': 'https://lcp.points.com/v1/lps/1/mvs/3',
'membershipLevel': 'G1'}}
def test_create_offerset_with_recipient(self):
recipient_details = copy.deepcopy(self.member_details)
recipient_details['memberId'] = '456456'
mocked_response = test_base.mock_response(headers={}, body=test_base.SAMPLE_RESPONSE)
self.mock_client.post.return_value = mocked_response
expected_payload = copy.deepcopy(self.expected_payload)
expected_payload.update({'recipient': recipient_details})
response = self.offer_set_crud.create(self.offer_types,
self.session,
self.member_details,
recipient_details)
tools.assert_equal(1, self.mock_client.post.call_count)
self.mock_client.post.assert_called_with('/offer-sets/', data=expected_payload, params=None)
test_base.assert_lcp_resource(mocked_response, response)
def test_create_offerset_without_recipient(self):
mocked_response = test_base.mock_response(headers={}, body=test_base.SAMPLE_RESPONSE)
self.mock_client.post.return_value = mocked_response
response = self.offer_set_crud.create(self.offer_types, self.session, self.member_details)
tools.assert_equal(1, self.mock_client.post.call_count)
self.mock_client.post.assert_called_with('/offer-sets/', data=self.expected_payload, params=None)
test_base.assert_lcp_resource(mocked_response, response)
def test_create_offer_set_with_optional_kwargs_add_top_level_params(self):
mocked_response = test_base.mock_response(headers={}, body=test_base.SAMPLE_RESPONSE)
self.mock_client.post.return_value = mocked_response
additional_params = {'a': 'b', 'c': 'd'}
response = self.offer_set_crud.create(self.offer_types,
self.session,
self.member_details,
**additional_params)
tools.assert_equal(1, self.mock_client.post.call_count)
expected_payload = copy.deepcopy(self.expected_payload)
expected_payload.update(additional_params)
self.mock_client.post.assert_called_with('/offer-sets/', data=expected_payload, params=None)
test_base.assert_lcp_resource(mocked_response, response)
def test_format_payload_with_recipient(self):
recipient_details = copy.deepcopy(self.member_details)
recipient_details['memberId'] = '456456'
expected_payload = {'offerTypes': self.offer_types,
'session': self.session,
'user': self.member_details,
'recipient': recipient_details}
payload = self.offer_set_crud._create_payload(self.offer_types,
self.session,
self.member_details,
recipient_details)
tools.assert_equal(expected_payload, payload)
def test_format_payload_no_recipient(self):
expected_payload = {'offerTypes': self.offer_types,
'session': self.session,
'user': self.member_details}
payload = self.offer_set_crud._create_payload(self.offer_types, self.session, self.member_details, None)
tools.assert_equal(expected_payload, payload)
def test_format_payload_with_optional_kwargs_adds_parameters_to_top_level(self):
additional_params = {'a': 'b', 'c': 'd'}
expected_payload = {'offerTypes': self.offer_types,
'session': self.session,
'user': self.member_details,
'a': 'b',
'c': 'd'}
payload = self.offer_set_crud._create_payload(self.offer_types,
self.session,
self.member_details,
None,
**additional_params)
tools.assert_equal(expected_payload, payload)
| {
"content_hash": "4b3ee9573ffce6f5804d286897d29f9d",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 112,
"avg_line_length": 46.95275590551181,
"alnum_prop": 0.5217172564145565,
"repo_name": "bradsokol/PyLCP",
"id": "753ce1bed2a12902f68777db9ae0b2a09cc03237",
"size": "5963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/crud/test_offers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "75098"
},
{
"name": "Shell",
"bytes": "851"
}
],
"symlink_target": ""
} |
import SCons.SConf
hdf5_fortran_prog_src = """
program HDF5_Test
use hdf5
end program HDF5_Test
"""
def CheckProg(context, prog_name):
"""
This function is from the latest version of SCons to support
older SCons version.
Configure check for a specific program.
Check whether program prog_name exists in path. If it is found,
returns the path for it, otherwise returns None.
"""
context.Message("Checking whether %s program exists..." % prog_name)
path = context.env.WhereIs(prog_name)
context.Result(bool(path))
return path
def CheckLibWithHeader(context, libs, header, language,
call = None, extra_libs = None, autoadd = 1):
"""
This function is from SCons but extended with additional flags, e.g.
the extra_libs.
Another (more sophisticated) test for a library.
Checks, if library and header is available for language (may be 'C'
or 'CXX'). Call maybe be a valid expression _with_ a trailing ';'.
As in CheckLib, we support library=None, to test if the call compiles
without extra link flags.
"""
prog_prefix, dummy = \
SCons.SConf.createIncludesFromHeaders(header, 0)
if libs == []:
libs = [None]
if not SCons.Util.is_List(libs):
libs = [libs]
res = SCons.Conftest.CheckLib(context, libs, None, prog_prefix,
call = call, language = language, extra_libs = extra_libs,
autoadd = autoadd)
context.did_show_result = 1
return not res
def CheckHDF5FortranInclude(context):
context.Message("Checking for Fortran HDF5 module... ")
ret = context.TryCompile(hdf5_fortran_prog_src, '.f90')
context.Result(ret)
return ret
def CheckV18API(context, h5cc):
context.Message('Checking for HDF5 v18 API... ')
ret = context.TryAction(h5cc + ' -showconfig | fgrep v18')[0]
context.Result(ret)
return ret
def generate(env, required = False, parallel = False, fortran = False, **kw):
if env.GetOption('help') or env.GetOption('clean'):
return
conf = env.Configure(custom_tests = {'CheckProg': CheckProg,
'CheckLibWithHeader': CheckLibWithHeader,
'CheckHDF5FortranInclude' : CheckHDF5FortranInclude,
'CheckV18API' : CheckV18API})
# Find h5cc or h5pcc
h5ccs = ['h5cc', 'h5pcc']
if parallel:
h5ccs[0], h5ccs[1] = h5ccs[1], h5ccs[0]
for h5cc in h5ccs:
h5cc = conf.CheckProg(h5cc)
if h5cc:
break
if not h5cc:
if required:
print 'Could not find h5cc or h5pcc. Make sure the path to the HDF5 library is correct!'
env.Exit(1)
else:
conf.Finish()
return
# Parse the output from the h5cc compiler wrapper
def parse_func(env, cmd):
# remove the compiler
cmd = cmd.partition(' ')[2]
return env.ParseFlags(cmd)
flags = env.ParseConfig([h5cc, '-show', '-shlib'], parse_func)
# Add the lib path
env.AppendUnique(LIBPATH=flags['LIBPATH'])
if fortran:
# Fortran module file
ret = conf.CheckHDF5FortranInclude()
if not ret:
if required:
print 'Could not find HDF5 for Fortran!'
env.Exit(1)
else:
conf.Finish()
return
# Fortran library
ret = conf.CheckLib('hdf5_fortran')
if not ret:
if required:
print 'Could not find HDF5 for Fortran!'
env.Exit(1)
else:
conf.Finish()
return
ret = conf.CheckLibWithHeader(flags['LIBS'][0], 'hdf5.h', 'c', extra_libs=flags['LIBS'][1:])
if not ret:
if required:
print 'Could not find the HDF5 library!'
env.Exit(1)
else:
conf.Finish()
return
# Check API Mapping
if not conf.CheckV18API(h5cc):
# TODO We might need to extent this list
conf.env.Append(CPPDEFINES=['H5Dcreate_vers=2',
'H5Dopen_vers=2',
'H5Gcreate_vers=2',
'H5Gopen_vers=2',
'H5Acreate_vers=2',
'H5Eget_auto_vers=2',
'H5Eset_auto_vers=2'])
conf.Finish()
def exists(env):
return True | {
"content_hash": "18aa96542b2777a6f8a1bbb8ab8cb90b",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 100,
"avg_line_length": 31.86013986013986,
"alnum_prop": 0.5581650570676031,
"repo_name": "TUM-I5/XdmfWriter",
"id": "ac726532cd3aadf27b13349b9837e2489d7bc65a",
"size": "6152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/site_scons/site_tools/Hdf5Tool.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3218"
},
{
"name": "C++",
"bytes": "124969"
},
{
"name": "CMake",
"bytes": "3343"
},
{
"name": "Python",
"bytes": "20195"
}
],
"symlink_target": ""
} |
from localq import LocalQServer, Status
from arteria.web.state import State as arteria_state
class JobRunnerAdapter:
"""
Specifies interface that should be used by jobrunners.
"""
def start(self, cmd, nbr_of_cores, run_dir, stdout=None, stderr=None):
"""
Start a job corresponding to cmd
:param cmd: to run
:param nbr_of_cores: cores the job needs
:param run_dir: where to run the job
:param stdout: Reroute stdout to here
:param stderr: Reroute stderr to here
:return: the jobid associated with it (None on failure).
"""
raise NotImplementedError("Subclasses should implement this!")
def stop(self, job_id):
"""
Stop job with job_id
:param job_id: of job to stop
:return: the job_id of the stopped job, or None if not found.
"""
raise NotImplementedError("Subclasses should implement this!")
def stop_all(self):
"""
Stop all jobs
:return: Nothing
"""
raise NotImplementedError("Subclasses should implement this!")
def status(self, job_id):
"""
Status of job with id
:param job_id: to get status for.
:return: It's status
"""
raise NotImplementedError("Subclasses should implement this!")
def status_all(self):
"""
Get status for all jobs
:param job_id: to get status for.
:return: A dict containing all jobs with job_id as key and status as value.
"""
raise NotImplementedError("Subclasses should implement this!")
class LocalQAdapter(JobRunnerAdapter):
"""
An implementation of `JobRunnerAdapter` running jobs through
localq (a jobrunner which will schedule jobs on a single node).
"""
@staticmethod
def localq2arteria_status(status):
"""
Convert a localq status to an arteria state
:param status: to convert
:return: the arteria state
"""
if status == Status.COMPLETED:
return arteria_state.DONE
elif status == Status.FAILED:
return arteria_state.ERROR
elif status == Status.PENDING:
return arteria_state.PENDING
elif status == Status.RUNNING:
return arteria_state.STARTED
elif status == Status.CANCELLED:
return arteria_state.CANCELLED
elif status == Status.NOT_FOUND:
return arteria_state.NONE
else:
return arteria_state.NONE
# TODO Make configurable
def __init__(self, nbr_of_cores, interval = 30, priority_method = "fifo"):
self.nbr_of_cores = nbr_of_cores
self.server = LocalQServer(nbr_of_cores, interval, priority_method)
self.server.run()
def start(self, cmd, nbr_of_cores, run_dir, stdout=None, stderr=None):
return self.server.add(cmd, nbr_of_cores, run_dir, stdout=stdout, stderr=stderr)
def stop(self, job_id):
return self.server.stop_job_with_id(job_id)
def stop_all(self):
return self.server.stop_all_jobs()
def status(self, job_id):
return LocalQAdapter.localq2arteria_status(self.server.get_status(job_id))
def status_all(self):
jobs_and_status = {}
for k, v in self.server.get_status_all().iteritems():
jobs_and_status[k] = LocalQAdapter.localq2arteria_status(v)
return jobs_and_status
| {
"content_hash": "db99b712c2945eb17d244484cec16c6f",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 88,
"avg_line_length": 33.09615384615385,
"alnum_prop": 0.6194073213248111,
"repo_name": "johandahlberg/arteria-bcl2fastq",
"id": "ae4952c5e8727ebde49ef2d881c14dfb5cf6063a",
"size": "3442",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "bcl2fastq/lib/jobrunner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "71168"
}
],
"symlink_target": ""
} |
'''common utility modules'''
__all__ = ['metrics', 'misc', 'topology', 'config', 'tracker_access',
'system_constants', 'system_config', 'tuple', 'proc', 'log']
| {
"content_hash": "643fa02fa43db9b4d72b7649f52fbc56",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 71,
"avg_line_length": 57,
"alnum_prop": 0.5847953216374269,
"repo_name": "mycFelix/heron",
"id": "223a4dd3b3a02b19a390923b133373c66e2b07c0",
"size": "956",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "heron/instance/src/python/utils/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "14063"
},
{
"name": "C++",
"bytes": "1723731"
},
{
"name": "CSS",
"bytes": "77708"
},
{
"name": "HCL",
"bytes": "5314"
},
{
"name": "HTML",
"bytes": "39432"
},
{
"name": "Java",
"bytes": "4888188"
},
{
"name": "JavaScript",
"bytes": "1107904"
},
{
"name": "M4",
"bytes": "18741"
},
{
"name": "Makefile",
"bytes": "1046"
},
{
"name": "Objective-C",
"bytes": "2143"
},
{
"name": "Perl",
"bytes": "9298"
},
{
"name": "Python",
"bytes": "1696662"
},
{
"name": "Ruby",
"bytes": "1930"
},
{
"name": "Scala",
"bytes": "130046"
},
{
"name": "Shell",
"bytes": "207639"
},
{
"name": "Smarty",
"bytes": "528"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from examples.concerts.policy import ConcertPolicy
from rasa_core.agent import Agent
from rasa_core.policies.memoization import MemoizationPolicy
if __name__ == '__main__':
logging.basicConfig(level="INFO")
training_data_file = 'examples/concerts/data/stories.md'
model_path = 'examples/concerts/models/policy/init'
agent = Agent("examples/concerts/concert_domain.yml",
policies=[MemoizationPolicy(), ConcertPolicy()])
agent.train(
training_data_file,
augmentation_factor=50,
max_history=2,
epochs=500,
batch_size=10,
validation_split=0.2
)
agent.persist(model_path)
| {
"content_hash": "aaf53233266c6efed862ca228a87152c",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 66,
"avg_line_length": 28.533333333333335,
"alnum_prop": 0.6682242990654206,
"repo_name": "deepak02/rasa_core",
"id": "abfb823b920a3b34c658c5f15ed8e082cec22b83",
"size": "856",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/concerts/train_init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "562"
},
{
"name": "Python",
"bytes": "273438"
}
],
"symlink_target": ""
} |
import torch
from torch import nn
from pose_format.torch.masked.tensor import MaskedTensor
from pose_format.torch.masked.torch import MaskedTorch
class DistanceRepresentation(nn.Module):
def distance(self, p1s: MaskedTensor, p2s: MaskedTensor) -> MaskedTensor:
diff = p1s - p2s # (..., Len, Dims)
square = diff.pow_(2)
sum_squares = square.sum(dim=-1)
return MaskedTorch.sqrt(sum_squares)
def forward(self, p1s: MaskedTensor, p2s: MaskedTensor) -> torch.Tensor:
"""
Euclidean distance between two points
:param p1s: MaskedTensor (Points, Batch, Len, Dims)
:param p2s: MaskedTensor (Points, Batch, Len, Dims)
:return: torch.Tensor (Points, Batch, Len)
"""
return self.distance(p1s, p2s).zero_filled()
| {
"content_hash": "1e98e9b0f4d218fb1f790301ae9a7f05",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 77,
"avg_line_length": 36.40909090909091,
"alnum_prop": 0.66167290886392,
"repo_name": "AmitMY/pose-format",
"id": "b839b138dae806f31e7189778d6e3d782a474e8e",
"size": "801",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pose_format/torch/representation/distance.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "294"
},
{
"name": "HTML",
"bytes": "3186"
},
{
"name": "Python",
"bytes": "167290"
},
{
"name": "Starlark",
"bytes": "10118"
},
{
"name": "TypeScript",
"bytes": "22828"
}
],
"symlink_target": ""
} |
import socket
import time
from api_tools.device import Device
from api_tools.ini_parser import Config
def backup():
# Get data from config
general = Config().get_general()
ftp = Config().get_ftp()
devices = Config().get_devices()
for dev in devices:
device = Device(dev['host'], 8728, dev['username'], dev['password'])
# Get date
now = time.strftime("%d-%m-%Y")
# Get hostname
dev_identity = device.execute(["/system/identity/print"])['name']
# Set fullname
backup_fullname = "{0}-{1}".format(dev_identity, now)
# Create backup file
device.execute(["/system/backup/save",
"=dont-encrypt=yes",
"=name={0}".format(backup_fullname)])
# Sleep while device working
time.sleep(1)
# Upload backup to ftp
device.execute(["/tool/fetch",
"=upload=yes",
"=address={0}".format(ftp['host']),
"=port={0}".format(ftp['port']),
"=user={0}".format(ftp['username']),
"=password={0}".format(ftp['password']),
"=mode=ftp",
"=src-path={0}.backup".format(backup_fullname),
"=dst-path={0}/{1}.backup".format(dev['dst-path'], backup_fullname)])
# Sleep while device working
time.sleep(1)
# Remove backup file
device.execute(["/file/remove",
"=numbers={0}.backup".format(backup_fullname)])
# Close socket
# device.close()
if __name__ == '__main__':
backup()
| {
"content_hash": "ef5d4d293aafc3c834fb62b6a48c67f5",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 93,
"avg_line_length": 29.120689655172413,
"alnum_prop": 0.4997039668442866,
"repo_name": "voronovim/mikrotik-api-tools",
"id": "3b34ac2246b0d43a9390f69685059f8036ff1e65",
"size": "1689",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api_tools/backup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14848"
}
],
"symlink_target": ""
} |
'''
*****************************************
Author: zhlinh
Email: zhlinhng@gmail.com
Version: 0.0.1
Created Time: 2016-03-06
Last_modify: 2016-03-06
******************************************
'''
'''
Given a binary tree, find its maximum depth.
The maximum depth is the number of nodes along
the longest path from the root node down to the farthest leaf node.
'''
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def maxDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if not root:
return 0
return 1 + max(self.maxDepth(root.left), self.maxDepth(root.right))
| {
"content_hash": "6e2e2605bed931320fd19aca045c7df7",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 75,
"avg_line_length": 24.242424242424242,
"alnum_prop": 0.535,
"repo_name": "zhlinh/leetcode",
"id": "27aed58214bdc2a6c91e3e639e77bf298cbbaacb",
"size": "846",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "0104.Maximum Depth of Binary Tree/solution.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "478111"
}
],
"symlink_target": ""
} |
from openerp import tools
from openerp import models,fields,api
from openerp.tools.translate import _
class is_comparatif_tarif_facture(models.Model):
_name='is.comparatif.tarif.facture'
_order='invoice_id,product_id'
_auto = False
invoice_id = fields.Many2one('account.invoice', 'Facture')
invoice_date = fields.Date('Date Facture')
order_id = fields.Many2one('sale.order', 'Commande')
partner_id = fields.Many2one('res.partner', 'Client')
pricelist_id = fields.Many2one('product.pricelist', 'Liste de prix')
product_id = fields.Many2one('product.template', 'Article')
quantity = fields.Float('Quantité')
uos_id = fields.Many2one('product.uom', 'Unité')
invoice_price = fields.Float('Prix facture')
pricelist_price = fields.Float('Prix liste de prix')
price_delta = fields.Float('Ecart de prix')
lot_livraison = fields.Float('Lot de livraison')
prix_lot_livraison = fields.Float('Prix au lot de livraison')
def init(self, cr):
tools.drop_view_if_exists(cr, 'is_comparatif_tarif_facture')
cr.execute("""
CREATE OR REPLACE FUNCTION is_prix_vente(pricelistid integer, productid integer, qt float, date date) RETURNS float AS $$
BEGIN
RETURN (
select price_surcharge
from product_pricelist ppl inner join product_pricelist_version ppv on ppv.pricelist_id=ppl.id
inner join product_pricelist_item ppi on ppi.price_version_id=ppv.id
where ppi.product_id=productid
and ppl.id=pricelistid
and min_quantity<=qt
and ppl.type='sale' and ppl.active='t'
and (ppv.date_end is null or ppv.date_end >= date)
and (ppv.date_start is null or ppv.date_start <= date)
and (ppi.date_end is null or ppi.date_end >= date)
and (ppi.date_start is null or ppi.date_start <= date)
order by ppi.sequence limit 1
);
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION get_lot_livraison(product_tmpl_id integer, partner_id integer) RETURNS float AS $$
BEGIN
RETURN (
coalesce((
select lot_livraison
from is_product_client ipc
where ipc.client_id=partner_id and ipc.product_id=product_tmpl_id limit 1
),0
)
);
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE view is_comparatif_tarif_facture AS (
select
ail.id id,
ai.id invoice_id,
ai.date_invoice invoice_date,
so.id order_id,
so.partner_id partner_id,
so.pricelist_id pricelist_id,
pt.id product_id,
ail.quantity quantity,
ail.uos_id uos_id,
ail.price_unit invoice_price,
coalesce(
is_prix_vente(
so.pricelist_id,
ail.product_id,
ail.quantity,
ai.date_invoice
),
0
) as pricelist_price,
coalesce(
is_prix_vente(
so.pricelist_id,
ail.product_id,
ail.quantity,
ai.date_invoice
),
0
)-ail.price_unit as price_delta,
get_lot_livraison(pt.id, so.partner_id) lot_livraison,
coalesce(
is_prix_vente(
so.pricelist_id,
ail.product_id,
get_lot_livraison(pt.id, so.partner_id),
ai.date_invoice
),
0
) as prix_lot_livraison
from account_invoice_line ail inner join account_invoice ai on ail.invoice_id=ai.id
inner join stock_move sm on ail.is_move_id=sm.id
inner join sale_order_line sol on sm.is_sale_line_id=sol.id
inner join sale_order so on sol.order_id=so.id
inner join product_product pp on ail.product_id=pp.id
inner join product_template pt on pp.product_tmpl_id=pt.id
where ai.state='draft' and ai.type in ('out_invoice', 'out_refund')
)
""")
| {
"content_hash": "38f7c56beb2fc6f5dd8854cee164b075",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 121,
"avg_line_length": 38.643478260869564,
"alnum_prop": 0.5378037803780378,
"repo_name": "tonygalmiche/is_plastigray",
"id": "fc1dd57fcecf27a645bb9d1518a66c579de00e0f",
"size": "4471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "report/is_comparatif_tarif_facture.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1702"
},
{
"name": "JavaScript",
"bytes": "22445"
},
{
"name": "Python",
"bytes": "1418292"
}
],
"symlink_target": ""
} |
from __future__ import print_function, unicode_literals
try:
unicode
except NameError:
basestring = unicode = str # Python 3
import six
class DoesNotExist(Exception):
"The requested object does not exist"
silent_variable_failure = True
class MultipleObjectsReturned(Exception):
"More than one object exists --> inconsistency"
silent_variable_failure = True
class Field(object):
def to_db(self, value=None):
if value is None:
value = ''
return value
def to_python(self, value=None):
return value
class Options(object):
def __init__(self, meta, attrs):
fields = []
for obj_name, obj in attrs.items():
if isinstance(obj, Field):
fields.append((obj_name, obj))
self.fields = dict(fields)
class ManagerDescriptor(object):
# This class ensures managers aren't accessible via model instances.
# Poll.objects works, but poll_obj.objects raises AttributeError.
def __init__(self, manager):
self.manager = manager
def __get__(self, instance, type=None):
if instance is not None:
raise AttributeError("Manager isn't accessible via %s instances"
% type.__name__)
return self.manager
class Manager(object):
def __init__(self):
self.model = None
self.reset_store()
def contribute_to_class(self, model, name):
self.model = model
setattr(model, name, ManagerDescriptor(self))
def reset_store(self):
self._storage = {}
def all(self):
return self._storage.values()
def filter_has_key(self, key):
key = unicode(key)
return [
obj for obj in self.all()
if key in [unicode(k) for k in obj.__dict__.keys()]
]
def filter(self, **kwargs):
def check_if_equals_or_in_set(key_n_value):
"""helper method for loop. Convenient but maybe hacky: checks
if value is in attr or if iterable inside the set/list"""
key, value = key_n_value
if hasattr(item, key):
items_value = getattr(item, key)
if type(items_value) in (list, set):
for items_value in items_value:
if items_value == value:
return True
else:
if items_value == value:
return True
return False
for item in self.all():
if all(map(check_if_equals_or_in_set, kwargs.items())):
yield item
def create(self, pk, **kwargs):
kwargs['pk'] = pk
instance = self.model(**kwargs)
assert pk not in self._storage, (
"%s object with pk %s already exists!" % (self.model, pk))
self._storage[pk] = instance
return instance
def get(self, pk):
if pk in self._storage:
return self._storage[pk]
else:
raise self.model.DoesNotExist
class ModelMeta(type):
def __new__(cls, name, bases, attrs):
super_new = super(ModelMeta, cls).__new__
parents = [b for b in bases if isinstance(b, ModelMeta)]
if not parents:
# If this isn't a subclass of Model, don't do anything special.
return super_new(cls, name, bases, attrs)
module = attrs.pop('__module__')
new_cls = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
if not attr_meta:
meta = getattr(new_cls, 'Meta', None)
else:
meta = attr_meta
setattr(new_cls, '_meta', Options(meta, attrs))
# Add all attributes to the class.
for obj_name, obj in attrs.items():
if isinstance(obj, Manager):
obj.contribute_to_class(new_cls, obj_name)
else:
setattr(new_cls, obj_name, obj)
if not hasattr(new_cls, "__unicode__"):
new_cls.__unicode__ = lambda self: self.pk
if not hasattr(new_cls, '__str__'):
new_cls.__str__ = lambda self: self.__unicode__()
new_cls.__repr__ = lambda self: u'<%s: %s>' % (
self.__class__.__name__, self.__unicode__())
return new_cls
class Model(six.with_metaclass(ModelMeta)):
MultipleObjectsReturned = MultipleObjectsReturned
DoesNotExist = DoesNotExist
def __init__(self, pk=None, **kwargs):
self.pk = pk
# Set the defined modelfields properly
for attr_name, field in self._meta.fields.items():
if attr_name in kwargs:
attr = kwargs.pop(attr_name)
value = field.to_python(attr)
else:
value = field.to_python()
setattr(self, attr_name, value)
# Set the not kwargs values not defined as fields
for attr_name, value in kwargs.items():
setattr(self, attr_name, value)
if kwargs:
raise ValueError(
'%s are not part of the schema for %s' % (
', '.join(kwargs.keys()), self.__class__.__name__))
def __eq__(self, other):
if not type(other) == type(self):
return False
if set(other.__dict__.keys()) != set(self.__dict__.keys()):
return False
for key in self.__dict__.keys():
if (not hasattr(other, key) or
getattr(self, key) != getattr(other, key)):
return False
return True
def __ne__(self, other):
if type(other) != type(self):
return True
if set(other.__dict__.keys()) != set(self.__dict__.keys()):
return True
for key in self.__dict__.keys():
if (not hasattr(other, key) or
getattr(other, key) != getattr(self, key)):
return True
return False
def __hash__(self):
# It is wrong to really compare the object here. This case is
# important to work with references in set() instances for instance
return hash(self.pk)
| {
"content_hash": "345a193a68103ab63be5423ac0602e26",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 76,
"avg_line_length": 30.686567164179106,
"alnum_prop": 0.5429636835278858,
"repo_name": "dmr/Ldtools",
"id": "4d98f08148918e90181c188dd61e670090ebf376",
"size": "6192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ldtools/metamodels.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "94570"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import pytest
from cz_urnnbn_api.api_structures import DigitalInstance
from ..test_xml_composer import data_context
# Fixtures ====================================================================
@pytest.fixture
def dig_inst_xml():
return data_context("digital_instances.xml")
@pytest.fixture
def no_dig_inst_xml():
return data_context("no_digital_instance.xml")
# Tests =======================================================================
def test_from_xml(dig_inst_xml):
res = DigitalInstance.from_xml(dig_inst_xml)
assert res
assert len(res) == 3
assert res[0] == DigitalInstance(
uid="33",
active=True,
url="http://kramerius.mzk.cz/search/handle/uuid:8ffd7a5b-82da-11e0-bc9f-0050569d679d",
digital_library_id="37",
created="2012-09-03T00:44:34.603+02:00",
)
assert res[1] == DigitalInstance(
uid="34",
active=True,
url="http://kramerius.mzk.cz/search/handle/uuid:8ffd7a5b-82da-11e0-bc9f-0050569d679d",
digital_library_id="38",
format="jpg;pdf",
created="2012-09-19T00:37:25.362+02:00",
accessibility="volně přístupné",
)
assert res[2] == DigitalInstance(
uid="35",
active=False,
url="http://kramerius.mzk.cz/search/handle/uuid:8ffd7a5b-82da-11e0-bc9f-0050569d679d",
digital_library_id="12",
format="jpg;pdf",
created="2012-09-19T00:39:41.117+02:00",
deactivated="2012-09-19T00:42:30.334+02:00",
accessibility="volně přístupné",
)
# test that access to non-existent attributes is dissabled
with pytest.raises(ValueError):
res[2].asd = 1
def test_from_xml_no_dig_instance(no_dig_inst_xml):
res = DigitalInstance.from_xml(no_dig_inst_xml)
assert res == []
def test_to_xml():
di = DigitalInstance(
url="http://kramerius3.mzk.cz/kramerius/handle/BOA001/935239",
digital_library_id="4",
format="jpg;pdf",
accessibility="volně přístupné",
)
assert di.to_xml() == """<?xml version="1.0" encoding="utf-8"?>
<digitalInstance xmlns="http://resolver.nkp.cz/v3/">
\t<url>http://kramerius3.mzk.cz/kramerius/handle/BOA001/935239</url>
\t<digitalLibraryId>4</digitalLibraryId>
\t<format>jpg;pdf</format>
\t<accessibility>volně přístupné</accessibility>
</digitalInstance>""".encode("utf-8")
def test_to_xml_only_required_arguments():
di = DigitalInstance(
url="http://kramerius3.mzk.cz/kramerius/handle/BOA001/935239",
digital_library_id="4",
)
assert di.to_xml() == """<?xml version="1.0" encoding="utf-8"?>
<digitalInstance xmlns="http://resolver.nkp.cz/v3/">
\t<url>http://kramerius3.mzk.cz/kramerius/handle/BOA001/935239</url>
\t<digitalLibraryId>4</digitalLibraryId>
</digitalInstance>""".encode("utf-8")
def test_to_xml_required_arguments_are_really_required():
with pytest.raises(AssertionError):
DigitalInstance(url="4", digital_library_id=None).to_xml()
with pytest.raises(AssertionError):
DigitalInstance(url=None, digital_library_id="4").to_xml()
| {
"content_hash": "b10ba254ed13c58d66a1c54d7d1fe448",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 94,
"avg_line_length": 30.475728155339805,
"alnum_prop": 0.6294998407136031,
"repo_name": "edeposit/cz-urnnbn-api",
"id": "f87e772fdb5ee6d7742ec538aad66d0edad75d80",
"size": "3320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/api_structures/test_digital_instance.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "67042"
},
{
"name": "Shell",
"bytes": "1197"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import pytest
from .. import message as msg
def test_invalid_subset_msg():
with pytest.raises(TypeError) as exc:
msg.SubsetMessage(None)
assert exc.value.args[0].startswith('Sender must be a subset')
def test_invalid_data_msg():
with pytest.raises(TypeError) as exc:
msg.DataMessage(None)
assert exc.value.args[0].startswith('Sender must be a data')
def test_invalid_data_collection_msg():
with pytest.raises(TypeError) as exc:
msg.DataCollectionMessage(None)
assert exc.value.args[0].startswith('Sender must be a DataCollection')
| {
"content_hash": "2412854bc62eb9f37bbdacd0987b6741",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 74,
"avg_line_length": 28.347826086956523,
"alnum_prop": 0.7147239263803681,
"repo_name": "saimn/glue",
"id": "4660b825bf1a5e031627c3620c78b68944deb5c7",
"size": "652",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "glue/core/tests/test_message.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1609137"
},
{
"name": "Shell",
"bytes": "1603"
}
],
"symlink_target": ""
} |
"""This module contains AWS SQS hook"""
from __future__ import annotations
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
class SqsHook(AwsBaseHook):
"""
Interact with Amazon Simple Queue Service.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
:class:`~airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["client_type"] = "sqs"
super().__init__(*args, **kwargs)
def create_queue(self, queue_name: str, attributes: dict | None = None) -> dict:
"""
Create queue using connection object
:param queue_name: name of the queue.
:param attributes: additional attributes for the queue (default: None)
For details of the attributes parameter see :py:meth:`SQS.create_queue`
:return: dict with the information about the queue
For details of the returned value see :py:meth:`SQS.create_queue`
"""
return self.get_conn().create_queue(QueueName=queue_name, Attributes=attributes or {})
def send_message(
self,
queue_url: str,
message_body: str,
delay_seconds: int = 0,
message_attributes: dict | None = None,
message_group_id: str | None = None,
) -> dict:
"""
Send message to the queue
:param queue_url: queue url
:param message_body: the contents of the message
:param delay_seconds: seconds to delay the message
:param message_attributes: additional attributes for the message (default: None)
For details of the attributes parameter see :py:meth:`botocore.client.SQS.send_message`
:param message_group_id: This applies only to FIFO (first-in-first-out) queues. (default: None)
For details of the attributes parameter see :py:meth:`botocore.client.SQS.send_message`
:return: dict with the information about the message sent
For details of the returned value see :py:meth:`botocore.client.SQS.send_message`
"""
params = {
"QueueUrl": queue_url,
"MessageBody": message_body,
"DelaySeconds": delay_seconds,
"MessageAttributes": message_attributes or {},
}
if message_group_id:
params["MessageGroupId"] = message_group_id
return self.get_conn().send_message(**params)
| {
"content_hash": "d1a09ede76973b6509af77c5f15a7d4c",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 103,
"avg_line_length": 38.196969696969695,
"alnum_prop": 0.6322887742959143,
"repo_name": "apache/airflow",
"id": "2d3fd9de20e0f235f5a2f21f65c3d14a04871f88",
"size": "3308",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "airflow/providers/amazon/aws/hooks/sqs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "71458"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "172957"
},
{
"name": "JavaScript",
"bytes": "143915"
},
{
"name": "Jinja",
"bytes": "38911"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23697738"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211306"
},
{
"name": "TypeScript",
"bytes": "521019"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import theano
import theano.tensor as T
from theano.tensor.signal import downsample
from .. import activations, initializations
from ..utils.theano_utils import shared_zeros
from ..layers.core import Layer
class Convolution1D(Layer):
def __init__(self, nb_filter, stack_size, filter_length,
init='uniform', activation='linear', weights=None,
image_shape=None, border_mode='valid', subsample_length=1,
W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None):
nb_row = 1
nb_col = filter_length
self.nb_filter = nb_filter
self.stack_size = stack_size
self.filter_length = filter_length
self.subsample_length = subsample_length
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.subsample = (1, subsample_length)
self.border_mode = border_mode
self.image_shape = image_shape
self.input = T.tensor4()
self.W_shape = (nb_filter, stack_size, nb_row, nb_col)
self.W = self.init(self.W_shape)
self.b = shared_zeros((nb_filter,))
self.params = [self.W, self.b]
self.regularizers = [W_regularizer, b_regularizer]
self.constraints = [W_constraint, b_constraint]
if weights is not None:
self.set_weights(weights)
def get_output(self, train):
X = self.get_input(train)
conv_out = theano.tensor.nnet.conv.conv2d(X, self.W,
border_mode=self.border_mode, subsample=self.subsample, image_shape=self.image_shape)
output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
return output
def get_config(self):
return {"name":self.__class__.__name__,
"nb_filter":self.nb_filter,
"stack_size":self.stack_size,
"filter_length":self.filter_length,
"init":self.init.__name__,
"activation":self.activation.__name__,
"image_shape":self.image_shape,
"border_mode":self.border_mode,
"subsample_length":self.subsample_length}
class MaxPooling1D(Layer):
def __init__(self, pool_length=2, ignore_border=True):
self.pool_length = pool_length
self.poolsize = (1, pool_length)
self.ignore_border = ignore_border
self.input = T.tensor4()
self.params = []
def get_output(self, train):
X = self.get_input(train)
output = downsample.max_pool_2d(X, self.poolsize, ignore_border=self.ignore_border)
return output
def get_config(self):
return {"name":self.__class__.__name__,
"pool_length":self.pool_length,
"ignore_border":self.ignore_border}
class Convolution2D(Layer):
def __init__(self, nb_filter, stack_size, nb_row, nb_col,
init='glorot_uniform', activation='linear', weights=None,
image_shape=None, border_mode='valid', subsample=(1,1),
W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None):
super(Convolution2D,self).__init__()
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.subsample = subsample
self.border_mode = border_mode
self.image_shape = image_shape
self.nb_filter = nb_filter
self.stack_size = stack_size
self.nb_row = nb_row
self.nb_col = nb_col
self.input = T.tensor4()
self.W_shape = (nb_filter, stack_size, nb_row, nb_col)
self.W = self.init(self.W_shape)
self.b = shared_zeros((nb_filter,))
self.params = [self.W, self.b]
self.regularizers = [W_regularizer, b_regularizer]
self.constraints = [W_constraint, b_constraint]
if weights is not None:
self.set_weights(weights)
def get_output(self, train):
X = self.get_input(train)
conv_out = theano.tensor.nnet.conv.conv2d(X, self.W,
border_mode=self.border_mode, subsample=self.subsample, image_shape=self.image_shape)
output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
return output
def get_config(self):
return {"name":self.__class__.__name__,
"nb_filter":self.nb_filter,
"stack_size":self.stack_size,
"nb_row":self.nb_row,
"nb_col":self.nb_col,
"init":self.init.__name__,
"activation":self.activation.__name__,
"image_shape":self.image_shape,
"border_mode":self.border_mode,
"subsample":self.subsample}
class MaxPooling2D(Layer):
def __init__(self, poolsize=(2, 2), ignore_border=True):
super(MaxPooling2D,self).__init__()
self.input = T.tensor4()
self.poolsize = poolsize
self.ignore_border = ignore_border
def get_output(self, train):
X = self.get_input(train)
output = downsample.max_pool_2d(X, self.poolsize, ignore_border=self.ignore_border)
return output
def get_config(self):
return {"name":self.__class__.__name__,
"poolsize":self.poolsize,
"ignore_border":self.ignore_border}
# class ZeroPadding2D(Layer): TODO
# class Convolution3D: TODO
# class MaxPooling3D: TODO
| {
"content_hash": "2c170bcbde0ea6d6bd4722010e8d4e74",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 97,
"avg_line_length": 33.61875,
"alnum_prop": 0.6055028815765012,
"repo_name": "aleju/keras",
"id": "6d4db99ec74835212fb9c4c29fdcf75cb716e379",
"size": "5403",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "keras/layers/convolutional.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "195188"
}
],
"symlink_target": ""
} |
import pyrtcdc
import base64
def on_channel(peer, channel):
print 'new channel %s created' %(channel.label)
channel.on_message = on_message
def on_candidate(peer, candidate):
print 'local candidate sdp:\n%s' %(candidate)
def on_message(channel, datatype, data):
print 'received data from channel %s: %s' %(channel.label, data)
channel.send(pyrtcdc.DATATYPE_STRING, 'hi')
peer = pyrtcdc.PeerConnection(on_channel, on_candidate, stun_server='stun.services.mozilla.com')
offer = peer.generate_offer()
print 'base64 encoded local offer sdp:\n%s\n' %(base64.b64encode(offer))
print 'enter base64 encoded remote offer sdp:'
while True:
roffer64 = raw_input('> ')
roffer = base64.b64decode(roffer64)
print 'remote offer sdp:\n%s' %(roffer)
res = peer.parse_offer(roffer)
if res >= 0:
offer = peer.generate_offer()
print 'new base64 encoded local offer sdp:\n%s\n' %(base64.b64encode(offer))
break
print 'invalid remote offer sdp'
print 'enter base64 encoded remote offer sdp:'
print 'enter remote candidate sdp:'
while True:
rcand = raw_input('> ')
if peer.parse_candidates(rcand) > 0:
break
print 'invalid remote candidate sdp'
print 'enter remote candidate sdp:'
peer.loop()
| {
"content_hash": "ccfb3ce769fa00a1f86036e5fa0c4f3b",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 96,
"avg_line_length": 29.214285714285715,
"alnum_prop": 0.7123064384678076,
"repo_name": "saghul/librtcdc",
"id": "738c6c679baf3a97c32ab25974c41783e01db12f",
"size": "1250",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/example.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "48318"
},
{
"name": "Makefile",
"bytes": "673"
},
{
"name": "Python",
"bytes": "12034"
},
{
"name": "Shell",
"bytes": "87"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division, absolute_import
from odin import nnet as N, backend as K
@N.Model
def gender(X, f, **kwargs):
nb_gender = kwargs.get('nb_gender', 4)
if f is None:
f = N.Sequence([
N.Dimshuffle(pattern=(0, 1, 2, 'x')),
N.Conv(num_filters=32, filter_size=3, strides=1, b_init=None, pad='valid'),
N.BatchNorm(activation=K.relu),
N.Pool(pool_size=2, mode='avg'),
N.Conv(num_filters=64, filter_size=3, strides=1, b_init=None, pad='valid'),
N.BatchNorm(activation=K.relu),
N.Pool(pool_size=2, mode='avg'),
N.Flatten(outdim=3),
N.Dense(num_units=512, b_init=None),
N.BatchNorm(axes=(0, 1)),
N.AutoRNN(num_units=128, rnn_mode='gru', num_layers=2,
input_mode='linear', direction_mode='unidirectional'),
N.Flatten(outdim=2),
N.Dense(num_units=nb_gender, activation=K.softmax)
], debug=True)
return f(X), f
| {
"content_hash": "eab2718dc0a828ede1d322d034ba68de",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 87,
"avg_line_length": 35.89655172413793,
"alnum_prop": 0.5581171950048031,
"repo_name": "imito/odin",
"id": "5a38b2edef6fb937adabe0f1ace7c2b88796c583",
"size": "1041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/models/model_tidigits.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1516670"
}
],
"symlink_target": ""
} |
import json
| {
"content_hash": "3b35ff97ae5bf6f29cd06575d2f4489e",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 11,
"avg_line_length": 4.666666666666667,
"alnum_prop": 0.7142857142857143,
"repo_name": "xiaoyongaa/ALL",
"id": "ac6262d423e19cd6cd81b0fb5c60e46ad70c4856",
"size": "347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "网络编程第四周/1/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "432257"
},
{
"name": "C++",
"bytes": "129981"
},
{
"name": "Groff",
"bytes": "26852"
},
{
"name": "HTML",
"bytes": "201234"
},
{
"name": "Python",
"bytes": "462513"
},
{
"name": "Shell",
"bytes": "9245"
}
],
"symlink_target": ""
} |
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1HorizontalPodAutoscalerSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
operations = [
]
# The key is attribute name
# and the value is attribute type.
swagger_types = {
'scale_target_ref': 'V1CrossVersionObjectReference',
'min_replicas': 'int',
'max_replicas': 'int',
'target_cpu_utilization_percentage': 'int'
}
# The key is attribute name
# and the value is json key in definition.
attribute_map = {
'scale_target_ref': 'scaleTargetRef',
'min_replicas': 'minReplicas',
'max_replicas': 'maxReplicas',
'target_cpu_utilization_percentage': 'targetCPUUtilizationPercentage'
}
def __init__(self, scale_target_ref=None, min_replicas=None, max_replicas=None, target_cpu_utilization_percentage=None):
"""
V1HorizontalPodAutoscalerSpec - a model defined in Swagger
"""
self._scale_target_ref = scale_target_ref
self._min_replicas = min_replicas
self._max_replicas = max_replicas
self._target_cpu_utilization_percentage = target_cpu_utilization_percentage
@property
def scale_target_ref(self):
"""
Gets the scale_target_ref of this V1HorizontalPodAutoscalerSpec.
reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource.
:return: The scale_target_ref of this V1HorizontalPodAutoscalerSpec.
:rtype: V1CrossVersionObjectReference
"""
return self._scale_target_ref
@scale_target_ref.setter
def scale_target_ref(self, scale_target_ref):
"""
Sets the scale_target_ref of this V1HorizontalPodAutoscalerSpec.
reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource.
:param scale_target_ref: The scale_target_ref of this V1HorizontalPodAutoscalerSpec.
:type: V1CrossVersionObjectReference
"""
self._scale_target_ref = scale_target_ref
@property
def min_replicas(self):
"""
Gets the min_replicas of this V1HorizontalPodAutoscalerSpec.
lower limit for the number of pods that can be set by the autoscaler, default 1.
:return: The min_replicas of this V1HorizontalPodAutoscalerSpec.
:rtype: int
"""
return self._min_replicas
@min_replicas.setter
def min_replicas(self, min_replicas):
"""
Sets the min_replicas of this V1HorizontalPodAutoscalerSpec.
lower limit for the number of pods that can be set by the autoscaler, default 1.
:param min_replicas: The min_replicas of this V1HorizontalPodAutoscalerSpec.
:type: int
"""
self._min_replicas = min_replicas
@property
def max_replicas(self):
"""
Gets the max_replicas of this V1HorizontalPodAutoscalerSpec.
upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.
:return: The max_replicas of this V1HorizontalPodAutoscalerSpec.
:rtype: int
"""
return self._max_replicas
@max_replicas.setter
def max_replicas(self, max_replicas):
"""
Sets the max_replicas of this V1HorizontalPodAutoscalerSpec.
upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.
:param max_replicas: The max_replicas of this V1HorizontalPodAutoscalerSpec.
:type: int
"""
self._max_replicas = max_replicas
@property
def target_cpu_utilization_percentage(self):
"""
Gets the target_cpu_utilization_percentage of this V1HorizontalPodAutoscalerSpec.
target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.
:return: The target_cpu_utilization_percentage of this V1HorizontalPodAutoscalerSpec.
:rtype: int
"""
return self._target_cpu_utilization_percentage
@target_cpu_utilization_percentage.setter
def target_cpu_utilization_percentage(self, target_cpu_utilization_percentage):
"""
Sets the target_cpu_utilization_percentage of this V1HorizontalPodAutoscalerSpec.
target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.
:param target_cpu_utilization_percentage: The target_cpu_utilization_percentage of this V1HorizontalPodAutoscalerSpec.
:type: int
"""
self._target_cpu_utilization_percentage = target_cpu_utilization_percentage
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(V1HorizontalPodAutoscalerSpec.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| {
"content_hash": "07eb19a84bfc03fc628d9a1aaba27634",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 179,
"avg_line_length": 34.54854368932039,
"alnum_prop": 0.6415624560910496,
"repo_name": "ftl-toolbox/lib_openshift",
"id": "49f76c746f5e71d142d3abf9103ad6c2cf8e83cf",
"size": "7134",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib_openshift/models/v1_horizontal_pod_autoscaler_spec.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "61160"
},
{
"name": "Python",
"bytes": "6149288"
},
{
"name": "Shell",
"bytes": "2825"
}
],
"symlink_target": ""
} |
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Load the data, converters convert the letter to a number
data= np.loadtxt('testdata.data', dtype= 'float32', delimiter = ',',
converters= {0: lambda ch: ord(ch)-ord('A')})
# split the data to two, 10000 each for train and test
train, test = np.vsplit(data,2)
train = data
test = data
# split trainData and testData to features and responses
responses, trainData = np.hsplit(train,[1])
labels, testData = np.hsplit(test,[1])
# Initiate the kNN, classify, measure accuracy.
knn = cv2.KNearest()
knn.train(trainData, responses)
import pdb; pdb.set_trace()
ret, result, neighbours, dist = knn.find_nearest(testData, k=5)
correct = np.count_nonzero(result == labels)
accuracy = correct*100.0/len(test)
print accuracy
| {
"content_hash": "5a8d0a2b4848a0e5c1a51c17cc97a080",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 68,
"avg_line_length": 30.807692307692307,
"alnum_prop": 0.7153558052434457,
"repo_name": "emschorsch/projector-hangman",
"id": "d099cdb32c1133451a4b88419354d5efec2df9a7",
"size": "938",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "char-training.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "67424"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import requests
from django.db import transaction
from api.files.serializers import get_file_download_link
from osf.models import ChronosJournal
from osf.models import ChronosSubmission
from osf.utils.workflows import ChronosSubmissionStatus, ReviewStates
from website.settings import (
CHRONOS_USE_FAKE_FILE, CHRONOS_FAKE_FILE_URL,
CHRONOS_API_KEY, CHRONOS_USERNAME, CHRONOS_PASSWORD, CHRONOS_HOST, VERIFY_CHRONOS_SSL_CERT
)
class ChronosSerializer(object):
@classmethod
def serialize_manuscript(cls, journal_id, preprint, status=ChronosSubmissionStatus.DRAFT.value):
"""Serialize an OSF preprint for submission to Chronos
It is currently unclear what ARTICLE_TYPE should be:
Possible options are:
* abstract
* addendum
* analytic-perspective
* announcement
* article-commentary
* Book
* brief-report
* case-report
* collection
* correction
* discussion
* dissertation
* editorial
* in-brief
* introduction
* letter
* meeting-report
* news
* oration
* partial-retraction
* product-review
* rapid-communication
* reply
* reprint
* research-article
* retraction
* review-article
* study-protocol
Returns:
dict: The serialized manuscript
"""
return {
'AUTHORS': [
cls.serialize_author(contrib)
for contrib in preprint.contributor_set.filter(visible=True).select_related('user')
],
'MANUSCRIPT_FILES': [
cls.serialize_file(preprint, preprint.primary_file)
],
'STATUS_CODE': status,
'ABSTRACT': preprint.description,
'ARTICLE_TYPE': 'research-article', # ??
'DOI': preprint.preprint_doi,
'MANUSCRIPT_TITLE': preprint.title,
'PROVIDER_MANUSCRIPT_ID': preprint._id,
'CHRONOS_JOURNAL_ID': journal_id,
'MANUSCRIPT_URL': preprint.url,
'KEYWORDS': ','.join(preprint.tags.all().values_list('name', flat=True)),
'ADDITIONAL_DATA': [
{
'DATA_NAME': 'Provider',
'DATA_TYPE': 'string',
'DATA_VALUE': preprint.provider.name,
}
],
'UNDERLYING_DATASET_URL': preprint.node.absolute_url if preprint.node else '',
}
@classmethod
def serialize_user(cls, user):
return {
'CHRONOS_USER_ID': user.chronos_user_id,
'EMAIL': user.username,
'GIVEN_NAME': user.given_name if str(user.given_name) and str(user.family_name) else user.fullname,
'ORCID_ID': user.social.get('orcid', None),
'PARTNER_USER_ID': user._id,
'SURNAME': user.family_name if str(user.given_name) and str(user.family_name) else None,
}
@classmethod
def serialize_author(cls, contributor):
ret = cls.serialize_user(contributor.user)
if contributor._order == 0:
contribution = 'firstAuthor'
else:
contribution = 'submittingAuthor'
ret.update({
'CONTRIBUTION': contribution,
'ORGANIZATION': '',
})
return ret
@classmethod
def serialize_file(cls, preprint, file_node):
"""Serialize an BaseFileNode for submission to Chronos.
It is currently unclear what MANUSCRIPT_FILE_CATEGORY should be.
Possible options are:
* supplementaryMaterial
* articleContent
* movie
* combinedPDF
* PublicationFiles
* supportingFile
* coverLetter
Note:
`FILE_DOWNLOAD_URL` MUST be accessible by Chronos as it attempts to download
all files given to it.
Args:
preprint: The Preprint that is being submitted
file_node: The AbstractFileNode to serialize. Should belong to `preprint`
Returns:
The serialized AbstractFileNode
"""
assert file_node.is_file
if CHRONOS_USE_FAKE_FILE:
file_url = CHRONOS_FAKE_FILE_URL
else:
file_url = get_file_download_link(file_node)
return {
'FILE_DOWNLOAD_URL': file_url,
'FILE_NAME': file_node.name,
'MANUSCRIPT_FILE_CATEGORY': 'Publication Files',
}
class ChronosClient(object):
def __init__(self, username=None, password=None, api_key=None, host=None):
username = username or CHRONOS_USERNAME
password = password or CHRONOS_PASSWORD
api_key = api_key or CHRONOS_API_KEY
host = host or CHRONOS_HOST
self._client = ChronosRestClient(username, password, api_key, host=host)
def sync_journals(self):
journals = []
for journal in self._client.get_journals():
journals.append(ChronosJournal.objects.update_or_create(journal_id=journal['JOURNAL_ID'], defaults={
'raw_response': journal,
'title': journal['TITLE'],
'name': journal['PUBLISHER_NAME'],
# Other Available fields: (Not currently used for anything so they are not parsed)
# 'E_ISSN':
# 'ISSN':
# 'JOURNAL_ID':
# 'JOURNAL_URL':
# 'PUBLISHER_ID':
# 'PUBLISHER_NAME':
})[0])
return journals
def sync_manuscript(self, submission):
return self._sync_manuscript(
submission,
self._client.get_manuscript(submission.publication_id)
)
def get_journals(self):
return ChronosJournal.objects.all()
def submit_manuscript(self, journal, preprint, submitter):
submission_qs = ChronosSubmission.objects.filter(preprint=preprint)
if submission_qs.filter(journal=journal).exists():
raise ValueError('{!r} already has an existing submission to {!r}.'.format(preprint, journal))
# 1 = draft, 2 = submitted, 3 = accepted, 4 = published
# Disallow submission if the current preprint has submissions that are submitted, accepted or publishes
# regardless of journals
if submission_qs.filter(status=2).exists():
raise ValueError('Cannot submit because a pending submission exists')
if submission_qs.filter(status=3).exists():
raise ValueError('Cannot submit because your submission was accepted')
if submission_qs.filter(status=4).exists():
raise ValueError('Cannot submit because your submission was published')
if preprint.machine_state != ReviewStates.ACCEPTED.value:
raise ValueError('Cannot submit to Chronos if the preprint is not accepted by moderators')
body = ChronosSerializer.serialize_manuscript(journal.journal_id, preprint)
body['USER'] = ChronosSerializer.serialize_user(submitter)
response = self._client.submit_manuscript(body)
with transaction.atomic():
submission = ChronosSubmission.objects.create(
journal=journal,
preprint=preprint,
submitter=submitter,
raw_response=response,
# Things parsed out of response
publication_id=response['PUBLICATION_ID'],
status=response['STATUS_CODE'],
submission_url=response['CHRONOS_SUBMISSION_URL'],
)
submitter.chronos_user_id = response['USER']['CHRONOS_USER_ID']
submitter.save()
for contrib, author in zip(preprint.contributor_set.filter(visible=True).select_related('user'), response['AUTHORS']):
assert author['PARTNER_USER_ID'] == contrib.user._id
contrib.user.chronos_user_id = author['CHRONOS_USER_ID']
contrib.user.save()
return submission
def update_manuscript(self, submission):
body = ChronosSerializer.serialize_manuscript(submission.journal.journal_id, submission.preprint, status=submission.status)
body['USER'] = ChronosSerializer.serialize_user(submission.submitter)
body['PUBLICATION_ID'] = submission.publication_id
return self._sync_manuscript(
submission,
self._client.update_manuscript(body),
)
def _sync_manuscript(self, submission, response):
with transaction.atomic():
# TODO pick of any interesting fields
submission.status = response['STATUS_CODE']
# Not present when fetching from the API
if response['CHRONOS_SUBMISSION_URL']:
submission.submission_url = response['CHRONOS_SUBMISSION_URL']
submission.save()
return submission
class ChronosRestClient(object):
def __init__(self, username, password, api_key, host='https://sandbox.api.chronos-oa.com'):
self._auth_key = None
self._session = requests.Session()
self._session.verify = VERIFY_CHRONOS_SSL_CERT
self._api_key = api_key
self._host = host
self._password = password
self._username = username
def get_journals(self):
return self._do_request('GET', '/partners/journal/all').json()
def submit_manuscript(self, body):
return self._do_request('POST', '/partners/submission', json=body).json()
def update_manuscript(self, body):
return self._do_request('POST', '/partners/manuscript', json=body).json()
def get_manuscript(self, manuscript_id):
return self._do_request('GET', '/partners/manuscript/{}'.format(manuscript_id)).json()
def get_journals_by_publisher(self, publisher):
raise NotImplementedError
def get_journals_by_issn(self, issn):
raise NotImplementedError
def _refresh_auth_key(self):
if not self._auth_key:
resp = self._session.post(
'{}/partners/login'.format(self._host),
json={
'password': self._password,
'username': self._username,
},
headers={
'api_key': self._api_key,
},
)
resp.raise_for_status()
self._auth_key = resp.json()['auth_key']
return self._auth_key
def _do_request(self, method, path, json=None):
self._refresh_auth_key()
resp = self._session.request(
method,
'{}{}'.format(self._host, path),
json=json,
headers={
'api_key': self._api_key,
'auth_key': self._auth_key,
}
)
resp.raise_for_status()
return resp
| {
"content_hash": "593b03e868f47cc201ca4fe673d20740",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 131,
"avg_line_length": 35.5928338762215,
"alnum_prop": 0.5876269790427382,
"repo_name": "pattisdr/osf.io",
"id": "6e9df29c774e97cba151f09c1f24903533ec501a",
"size": "10927",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "osf/external/chronos.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "92773"
},
{
"name": "Dockerfile",
"bytes": "8456"
},
{
"name": "HTML",
"bytes": "306671"
},
{
"name": "JavaScript",
"bytes": "1790426"
},
{
"name": "Mako",
"bytes": "647535"
},
{
"name": "Python",
"bytes": "9601810"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
} |
import os
import sys
import tempfile
import warnings
from typing import Union, List, Tuple, Iterable
from py4j.java_gateway import get_java_class, get_method
from pyflink.common.configuration import Configuration
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.table.sources import TableSource
from pyflink.common.typeinfo import TypeInformation
from pyflink.datastream.data_stream import DataStream
from pyflink.java_gateway import get_gateway
from pyflink.serializers import BatchedSerializer, PickleSerializer
from pyflink.table import Table, EnvironmentSettings, Expression, ExplainDetail, \
Module, ModuleEntry, TableSink, Schema, ChangelogMode
from pyflink.table.catalog import Catalog
from pyflink.table.serializers import ArrowSerializer
from pyflink.table.statement_set import StatementSet
from pyflink.table.table_config import TableConfig
from pyflink.table.table_descriptor import TableDescriptor
from pyflink.table.table_result import TableResult
from pyflink.table.types import _to_java_type, _create_type_verifier, RowType, DataType, \
_infer_schema_from_data, _create_converter, from_arrow_type, RowField, create_arrow_schema, \
_to_java_data_type
from pyflink.table.udf import UserDefinedFunctionWrapper, AggregateFunction, udaf, \
udtaf, TableAggregateFunction
from pyflink.table.utils import to_expression_jarray
from pyflink.util import java_utils
from pyflink.util.java_utils import get_j_env_configuration, is_local_deployment, load_java_class, \
to_j_explain_detail_arr, to_jarray, get_field
__all__ = [
'StreamTableEnvironment',
'TableEnvironment'
]
class TableEnvironment(object):
"""
A table environment is the base class, entry point, and central context for creating Table
and SQL API programs.
It is unified for bounded and unbounded data processing.
A table environment is responsible for:
- Connecting to external systems.
- Registering and retrieving :class:`~pyflink.table.Table` and other meta objects from a
catalog.
- Executing SQL statements.
- Offering further configuration options.
The path in methods such as :func:`create_temporary_view`
should be a proper SQL identifier. The syntax is following
[[catalog-name.]database-name.]object-name, where the catalog name and database are optional.
For path resolution see :func:`use_catalog` and :func:`use_database`. All keywords or other
special characters need to be escaped.
Example: `cat.1`.`db`.`Table` resolves to an object named 'Table' (table is a reserved
keyword, thus must be escaped) in a catalog named 'cat.1' and database named 'db'.
.. note::
This environment is meant for pure table programs. If you would like to convert from or to
other Flink APIs, it might be necessary to use one of the available language-specific table
environments in the corresponding bridging modules.
"""
def __init__(self, j_tenv, serializer=PickleSerializer()):
self._j_tenv = j_tenv
self._serializer = serializer
# When running in MiniCluster, launch the Python UDF worker using the Python executable
# specified by sys.executable if users have not specified it explicitly via configuration
# python.executable.
self._set_python_executable_for_local_executor()
self._config_chaining_optimization()
self._open()
@staticmethod
def create(environment_settings: Union[EnvironmentSettings, Configuration]) \
-> 'TableEnvironment':
"""
Creates a table environment that is the entry point and central context for creating Table
and SQL API programs.
:param environment_settings: The configuration or environment settings used to instantiate
the :class:`~pyflink.table.TableEnvironment`, the name is for backward compatibility.
:return: The :class:`~pyflink.table.TableEnvironment`.
"""
gateway = get_gateway()
if isinstance(environment_settings, Configuration):
environment_settings = EnvironmentSettings.new_instance() \
.with_configuration(environment_settings).build()
elif not isinstance(environment_settings, EnvironmentSettings):
raise TypeError("argument should be EnvironmentSettings or Configuration")
j_tenv = gateway.jvm.TableEnvironment.create(environment_settings._j_environment_settings)
return TableEnvironment(j_tenv)
def from_table_source(self, table_source: 'TableSource') -> 'Table':
"""
Creates a table from a table source.
Example:
::
>>> csv_table_source = CsvTableSource(
... csv_file_path, ['a', 'b'], [DataTypes.STRING(), DataTypes.BIGINT()])
>>> table_env.from_table_source(csv_table_source)
:param table_source: The table source used as table.
:return: The result table.
"""
warnings.warn("Deprecated in 1.11.", DeprecationWarning)
return Table(self._j_tenv.fromTableSource(table_source._j_table_source), self)
def register_catalog(self, catalog_name: str, catalog: Catalog):
"""
Registers a :class:`~pyflink.table.catalog.Catalog` under a unique name.
All tables registered in the :class:`~pyflink.table.catalog.Catalog` can be accessed.
:param catalog_name: The name under which the catalog will be registered.
:param catalog: The catalog to register.
"""
self._j_tenv.registerCatalog(catalog_name, catalog._j_catalog)
def get_catalog(self, catalog_name: str) -> Catalog:
"""
Gets a registered :class:`~pyflink.table.catalog.Catalog` by name.
:param catalog_name: The name to look up the :class:`~pyflink.table.catalog.Catalog`.
:return: The requested catalog, None if there is no
registered catalog with given name.
"""
catalog = self._j_tenv.getCatalog(catalog_name)
if catalog.isPresent():
return Catalog(catalog.get())
else:
return None
def load_module(self, module_name: str, module: Module):
"""
Loads a :class:`~pyflink.table.Module` under a unique name. Modules will be kept
in the loaded order.
ValidationException is thrown when there is already a module with the same name.
:param module_name: Name of the :class:`~pyflink.table.Module`.
:param module: The module instance.
.. versionadded:: 1.12.0
"""
self._j_tenv.loadModule(module_name, module._j_module)
def unload_module(self, module_name: str):
"""
Unloads a :class:`~pyflink.table.Module` with given name.
ValidationException is thrown when there is no module with the given name.
:param module_name: Name of the :class:`~pyflink.table.Module`.
.. versionadded:: 1.12.0
"""
self._j_tenv.unloadModule(module_name)
def use_modules(self, *module_names: str):
"""
Use an array of :class:`~pyflink.table.Module` with given names.
ValidationException is thrown when there is duplicate name or no module with the given name.
:param module_names: Names of the modules to be used.
.. versionadded:: 1.13.0
"""
j_module_names = to_jarray(get_gateway().jvm.String, module_names)
self._j_tenv.useModules(j_module_names)
def create_java_temporary_system_function(self, name: str, function_class_name: str):
"""
Registers a java user defined function class as a temporary system function.
Compared to .. seealso:: :func:`create_java_temporary_function`, system functions are
identified by a global name that is independent of the current catalog and current
database. Thus, this method allows to extend the set of built-in system functions like
TRIM, ABS, etc.
Temporary functions can shadow permanent ones. If a permanent function under a given name
exists, it will be inaccessible in the current session. To make the permanent function
available again one can drop the corresponding temporary system function.
Example:
::
>>> table_env.create_java_temporary_system_function("func",
... "java.user.defined.function.class.name")
:param name: The name under which the function will be registered globally.
:param function_class_name: The java full qualified class name of the function class
containing the implementation. The function must have a
public no-argument constructor and can be founded in current
Java classloader.
.. versionadded:: 1.12.0
"""
gateway = get_gateway()
java_function = gateway.jvm.Thread.currentThread().getContextClassLoader() \
.loadClass(function_class_name)
self._j_tenv.createTemporarySystemFunction(name, java_function)
def create_temporary_system_function(self, name: str,
function: Union[UserDefinedFunctionWrapper,
AggregateFunction]):
"""
Registers a python user defined function class as a temporary system function.
Compared to .. seealso:: :func:`create_temporary_function`, system functions are identified
by a global name that is independent of the current catalog and current database. Thus,
this method allows to extend the set of built-in system functions like TRIM, ABS, etc.
Temporary functions can shadow permanent ones. If a permanent function under a given name
exists, it will be inaccessible in the current session. To make the permanent function
available again one can drop the corresponding temporary system function.
Example:
::
>>> table_env.create_temporary_system_function(
... "add_one", udf(lambda i: i + 1, result_type=DataTypes.BIGINT()))
>>> @udf(result_type=DataTypes.BIGINT())
... def add(i, j):
... return i + j
>>> table_env.create_temporary_system_function("add", add)
>>> class SubtractOne(ScalarFunction):
... def eval(self, i):
... return i - 1
>>> table_env.create_temporary_system_function(
... "subtract_one", udf(SubtractOne(), result_type=DataTypes.BIGINT()))
:param name: The name under which the function will be registered globally.
:param function: The function class containing the implementation. The function must have a
public no-argument constructor and can be founded in current Java
classloader.
.. versionadded:: 1.12.0
"""
function = self._wrap_aggregate_function_if_needed(function)
java_function = function._java_user_defined_function()
self._j_tenv.createTemporarySystemFunction(name, java_function)
def drop_temporary_system_function(self, name: str) -> bool:
"""
Drops a temporary system function registered under the given name.
If a permanent function with the given name exists, it will be used from now on for any
queries that reference this name.
:param name: The name under which the function has been registered globally.
:return: true if a function existed under the given name and was removed.
.. versionadded:: 1.12.0
"""
return self._j_tenv.dropTemporarySystemFunction(name)
def create_java_function(self, path: str, function_class_name: str,
ignore_if_exists: bool = None):
"""
Registers a java user defined function class as a catalog function in the given path.
Compared to system functions with a globally defined name, catalog functions are always
(implicitly or explicitly) identified by a catalog and database.
There must not be another function (temporary or permanent) registered under the same path.
Example:
::
>>> table_env.create_java_function("func", "java.user.defined.function.class.name")
:param path: The path under which the function will be registered.
See also the :class:`~pyflink.table.TableEnvironment` class description for
the format of the path.
:param function_class_name: The java full qualified class name of the function class
containing the implementation. The function must have a
public no-argument constructor and can be founded in current
Java classloader.
:param ignore_if_exists: If a function exists under the given path and this flag is set,
no operation is executed. An exception is thrown otherwise.
.. versionadded:: 1.12.0
"""
gateway = get_gateway()
java_function = gateway.jvm.Thread.currentThread().getContextClassLoader() \
.loadClass(function_class_name)
if ignore_if_exists is None:
self._j_tenv.createFunction(path, java_function)
else:
self._j_tenv.createFunction(path, java_function, ignore_if_exists)
def drop_function(self, path: str) -> bool:
"""
Drops a catalog function registered in the given path.
:param path: The path under which the function will be registered.
See also the :class:`~pyflink.table.TableEnvironment` class description for
the format of the path.
:return: true if a function existed in the given path and was removed.
.. versionadded:: 1.12.0
"""
return self._j_tenv.dropFunction(path)
def create_java_temporary_function(self, path: str, function_class_name: str):
"""
Registers a java user defined function class as a temporary catalog function.
Compared to .. seealso:: :func:`create_java_temporary_system_function` with a globally
defined name, catalog functions are always (implicitly or explicitly) identified by a
catalog and database.
Temporary functions can shadow permanent ones. If a permanent function under a given name
exists, it will be inaccessible in the current session. To make the permanent function
available again one can drop the corresponding temporary function.
Example:
::
>>> table_env.create_java_temporary_function("func",
... "java.user.defined.function.class.name")
:param path: The path under which the function will be registered.
See also the :class:`~pyflink.table.TableEnvironment` class description for
the format of the path.
:param function_class_name: The java full qualified class name of the function class
containing the implementation. The function must have a
public no-argument constructor and can be founded in current
Java classloader.
.. versionadded:: 1.12.0
"""
gateway = get_gateway()
java_function = gateway.jvm.Thread.currentThread().getContextClassLoader() \
.loadClass(function_class_name)
self._j_tenv.createTemporaryFunction(path, java_function)
def create_temporary_function(self, path: str, function: Union[UserDefinedFunctionWrapper,
AggregateFunction]):
"""
Registers a python user defined function class as a temporary catalog function.
Compared to .. seealso:: :func:`create_temporary_system_function` with a globally defined
name, catalog functions are always (implicitly or explicitly) identified by a catalog and
database.
Temporary functions can shadow permanent ones. If a permanent function under a given name
exists, it will be inaccessible in the current session. To make the permanent function
available again one can drop the corresponding temporary function.
Example:
::
>>> table_env.create_temporary_function(
... "add_one", udf(lambda i: i + 1, result_type=DataTypes.BIGINT()))
>>> @udf(result_type=DataTypes.BIGINT())
... def add(i, j):
... return i + j
>>> table_env.create_temporary_function("add", add)
>>> class SubtractOne(ScalarFunction):
... def eval(self, i):
... return i - 1
>>> table_env.create_temporary_function(
... "subtract_one", udf(SubtractOne(), result_type=DataTypes.BIGINT()))
:param path: The path under which the function will be registered.
See also the :class:`~pyflink.table.TableEnvironment` class description for
the format of the path.
:param function: The function class containing the implementation. The function must have a
public no-argument constructor and can be founded in current Java
classloader.
.. versionadded:: 1.12.0
"""
function = self._wrap_aggregate_function_if_needed(function)
java_function = function._java_user_defined_function()
self._j_tenv.createTemporaryFunction(path, java_function)
def drop_temporary_function(self, path: str) -> bool:
"""
Drops a temporary system function registered under the given name.
If a permanent function with the given name exists, it will be used from now on for any
queries that reference this name.
:param path: The path under which the function will be registered.
See also the :class:`~pyflink.table.TableEnvironment` class description for
the format of the path.
:return: true if a function existed in the given path and was removed.
.. versionadded:: 1.12.0
"""
return self._j_tenv.dropTemporaryFunction(path)
def create_temporary_table(self, path: str, descriptor: TableDescriptor):
"""
Registers the given :class:`~pyflink.table.TableDescriptor` as a temporary catalog table.
The TableDescriptor is converted into a CatalogTable and stored in the catalog.
Temporary objects can shadow permanent ones. If a permanent object in a given path exists,
it will be inaccessible in the current session. To make the permanent object available again
one can drop the corresponding temporary object.
Examples:
::
>>> table_env.create_temporary_table("MyTable", TableDescriptor.for_connector("datagen")
... .schema(Schema.new_builder()
... .column("f0", DataTypes.STRING())
... .build())
... .option("rows-per-second", 10)
... .option("fields.f0.kind", "random")
... .build())
:param path: The path under which the table will be registered.
:param descriptor: Template for creating a CatalogTable instance.
.. versionadded:: 1.14.0
"""
self._j_tenv.createTemporaryTable(path, descriptor._j_table_descriptor)
def create_table(self, path: str, descriptor: TableDescriptor):
"""
Registers the given :class:`~pyflink.table.TableDescriptor` as a catalog table.
The TableDescriptor is converted into a CatalogTable and stored in the catalog.
If the table should not be permanently stored in a catalog, use
:func:`create_temporary_table` instead.
Examples:
::
>>> table_env.create_table("MyTable", TableDescriptor.for_connector("datagen")
... .schema(Schema.new_builder()
... .column("f0", DataTypes.STRING())
... .build())
... .option("rows-per-second", 10)
... .option("fields.f0.kind", "random")
... .build())
:param path: The path under which the table will be registered.
:param descriptor: Template for creating a CatalogTable instance.
.. versionadded:: 1.14.0
"""
self._j_tenv.createTable(path, descriptor._j_table_descriptor)
def register_table(self, name: str, table: Table):
"""
Registers a :class:`~pyflink.table.Table` under a unique name in the TableEnvironment's
catalog. Registered tables can be referenced in SQL queries.
Example:
::
>>> tab = table_env.from_elements([(1, 'Hi'), (2, 'Hello')], ['a', 'b'])
>>> table_env.register_table("source", tab)
:param name: The name under which the table will be registered.
:param table: The table to register.
.. note:: Deprecated in 1.10. Use :func:`create_temporary_view` instead.
"""
warnings.warn("Deprecated in 1.10. Use create_temporary_view instead.", DeprecationWarning)
self._j_tenv.registerTable(name, table._j_table)
def register_table_source(self, name: str, table_source: TableSource):
"""
Registers an external :class:`~pyflink.table.TableSource` in this
:class:`~pyflink.table.TableEnvironment`'s catalog. Registered tables can be referenced in
SQL queries.
Example:
::
>>> table_env.register_table_source("source",
... CsvTableSource("./1.csv",
... ["a", "b"],
... [DataTypes.INT(),
... DataTypes.STRING()]))
:param name: The name under which the table source is registered.
:param table_source: The table source to register.
.. note:: Deprecated in 1.10. Use :func:`execute_sql` instead.
"""
warnings.warn("Deprecated in 1.10. Use create_table instead.", DeprecationWarning)
self._j_tenv.registerTableSourceInternal(name, table_source._j_table_source)
def register_table_sink(self, name: str, table_sink: TableSink):
"""
Registers an external :class:`~pyflink.table.TableSink` with given field names and types in
this :class:`~pyflink.table.TableEnvironment`'s catalog. Registered sink tables can be
referenced in SQL DML statements.
Example:
::
>>> table_env.register_table_sink("sink",
... CsvTableSink(["a", "b"],
... [DataTypes.INT(),
... DataTypes.STRING()],
... "./2.csv"))
:param name: The name under which the table sink is registered.
:param table_sink: The table sink to register.
.. note:: Deprecated in 1.10. Use :func:`execute_sql` instead.
"""
warnings.warn("Deprecated in 1.10. Use create_table instead.", DeprecationWarning)
self._j_tenv.registerTableSinkInternal(name, table_sink._j_table_sink)
def scan(self, *table_path: str) -> Table:
"""
Scans a registered table and returns the resulting :class:`~pyflink.table.Table`.
A table to scan must be registered in the TableEnvironment. It can be either directly
registered or be an external member of a :class:`~pyflink.table.catalog.Catalog`.
See the documentation of :func:`~pyflink.table.TableEnvironment.use_database` or
:func:`~pyflink.table.TableEnvironment.use_catalog` for the rules on the path resolution.
Examples:
Scanning a directly registered table
::
>>> tab = table_env.scan("tableName")
Scanning a table from a registered catalog
::
>>> tab = table_env.scan("catalogName", "dbName", "tableName")
:param table_path: The path of the table to scan.
:throws: Exception if no table is found using the given table path.
:return: The resulting table.
.. note:: Deprecated in 1.10. Use :func:`from_path` instead.
"""
warnings.warn("Deprecated in 1.10. Use from_path instead.", DeprecationWarning)
gateway = get_gateway()
j_table_paths = java_utils.to_jarray(gateway.jvm.String, table_path)
j_table = self._j_tenv.scan(j_table_paths)
return Table(j_table, self)
def from_path(self, path: str) -> Table:
"""
Reads a registered table and returns the resulting :class:`~pyflink.table.Table`.
A table to scan must be registered in the :class:`~pyflink.table.TableEnvironment`.
See the documentation of :func:`use_database` or :func:`use_catalog` for the rules on the
path resolution.
Examples:
Reading a table from default catalog and database.
::
>>> tab = table_env.from_path("tableName")
Reading a table from a registered catalog.
::
>>> tab = table_env.from_path("catalogName.dbName.tableName")
Reading a table from a registered catalog with escaping. (`Table` is a reserved keyword).
Dots in e.g. a database name also must be escaped.
::
>>> tab = table_env.from_path("catalogName.`db.Name`.`Table`")
:param path: The path of a table API object to scan.
:return: Either a table or virtual table (=view).
.. seealso:: :func:`use_catalog`
.. seealso:: :func:`use_database`
.. versionadded:: 1.10.0
"""
return Table(get_method(self._j_tenv, "from")(path), self)
def from_descriptor(self, descriptor: TableDescriptor) -> Table:
"""
Returns a Table backed by the given TableDescriptor.
The TableDescriptor is registered as an inline (i.e. anonymous) temporary table
(see :func:`create_temporary_table`) using a unique identifier and then read. Note that
calling this method multiple times, even with the same descriptor, results in multiple
temporary tables. In such cases, it is recommended to register it under a name using
:func:`create_temporary_table` and reference it via :func:`from_path`
Examples:
::
>>> table_env.from_descriptor(TableDescriptor.for_connector("datagen")
... .schema(Schema.new_builder()
... .column("f0", DataTypes.STRING())
... .build())
... .build()
Note that the returned Table is an API object and only contains a pipeline description.
It actually corresponds to a <i>view</i> in SQL terms. Call :func:`execute` in Table to
trigger an execution.
:return: The Table object describing the pipeline for further transformations.
.. versionadded:: 1.14.0
"""
return Table(get_method(self._j_tenv, "from")(descriptor._j_table_descriptor), self)
def list_catalogs(self) -> List[str]:
"""
Gets the names of all catalogs registered in this environment.
:return: List of catalog names.
"""
j_catalog_name_array = self._j_tenv.listCatalogs()
return [item for item in j_catalog_name_array]
def list_modules(self) -> List[str]:
"""
Gets the names of all modules used in this environment.
:return: List of module names.
.. versionadded:: 1.10.0
"""
j_module_name_array = self._j_tenv.listModules()
return [item for item in j_module_name_array]
def list_full_modules(self) -> List[ModuleEntry]:
"""
Gets the names and statuses of all modules loaded in this environment.
:return: List of module names and use statuses.
.. versionadded:: 1.13.0
"""
j_module_entry_array = self._j_tenv.listFullModules()
return [ModuleEntry(entry.name(), entry.used()) for entry in j_module_entry_array]
def list_databases(self) -> List[str]:
"""
Gets the names of all databases in the current catalog.
:return: List of database names in the current catalog.
"""
j_database_name_array = self._j_tenv.listDatabases()
return [item for item in j_database_name_array]
def list_tables(self) -> List[str]:
"""
Gets the names of all tables and views in the current database of the current catalog.
It returns both temporary and permanent tables and views.
:return: List of table and view names in the current database of the current catalog.
"""
j_table_name_array = self._j_tenv.listTables()
return [item for item in j_table_name_array]
def list_views(self) -> List[str]:
"""
Gets the names of all views in the current database of the current catalog.
It returns both temporary and permanent views.
:return: List of view names in the current database of the current catalog.
.. versionadded:: 1.11.0
"""
j_view_name_array = self._j_tenv.listViews()
return [item for item in j_view_name_array]
def list_user_defined_functions(self) -> List[str]:
"""
Gets the names of all user defined functions registered in this environment.
:return: List of the names of all user defined functions registered in this environment.
"""
j_udf_name_array = self._j_tenv.listUserDefinedFunctions()
return [item for item in j_udf_name_array]
def list_functions(self) -> List[str]:
"""
Gets the names of all functions in this environment.
:return: List of the names of all functions in this environment.
.. versionadded:: 1.10.0
"""
j_function_name_array = self._j_tenv.listFunctions()
return [item for item in j_function_name_array]
def list_temporary_tables(self) -> List[str]:
"""
Gets the names of all temporary tables and views available in the current namespace
(the current database of the current catalog).
:return: A list of the names of all registered temporary tables and views in the current
database of the current catalog.
.. seealso:: :func:`list_tables`
.. versionadded:: 1.10.0
"""
j_table_name_array = self._j_tenv.listTemporaryTables()
return [item for item in j_table_name_array]
def list_temporary_views(self) -> List[str]:
"""
Gets the names of all temporary views available in the current namespace (the current
database of the current catalog).
:return: A list of the names of all registered temporary views in the current database
of the current catalog.
.. seealso:: :func:`list_tables`
.. versionadded:: 1.10.0
"""
j_view_name_array = self._j_tenv.listTemporaryViews()
return [item for item in j_view_name_array]
def drop_temporary_table(self, table_path: str) -> bool:
"""
Drops a temporary table registered in the given path.
If a permanent table with a given path exists, it will be used
from now on for any queries that reference this path.
:param table_path: The path of the registered temporary table.
:return: True if a table existed in the given path and was removed.
.. versionadded:: 1.10.0
"""
return self._j_tenv.dropTemporaryTable(table_path)
def drop_temporary_view(self, view_path: str) -> bool:
"""
Drops a temporary view registered in the given path.
If a permanent table or view with a given path exists, it will be used
from now on for any queries that reference this path.
:return: True if a view existed in the given path and was removed.
.. versionadded:: 1.10.0
"""
return self._j_tenv.dropTemporaryView(view_path)
def explain_sql(self, stmt: str, *extra_details: ExplainDetail) -> str:
"""
Returns the AST of the specified statement and the execution plan.
:param stmt: The statement for which the AST and execution plan will be returned.
:param extra_details: The extra explain details which the explain result should include,
e.g. estimated cost, changelog mode for streaming
:return: The statement for which the AST and execution plan will be returned.
.. versionadded:: 1.11.0
"""
j_extra_details = to_j_explain_detail_arr(extra_details)
return self._j_tenv.explainSql(stmt, j_extra_details)
def sql_query(self, query: str) -> Table:
"""
Evaluates a SQL query on registered tables and retrieves the result as a
:class:`~pyflink.table.Table`.
All tables referenced by the query must be registered in the TableEnvironment.
A :class:`~pyflink.table.Table` is automatically registered when its
:func:`~Table.__str__` method is called, for example when it is embedded into a String.
Hence, SQL queries can directly reference a :class:`~pyflink.table.Table` as follows:
::
>>> table = ...
# the table is not registered to the table environment
>>> table_env.sql_query("SELECT * FROM %s" % table)
:param query: The sql query string.
:return: The result table.
"""
j_table = self._j_tenv.sqlQuery(query)
return Table(j_table, self)
def execute_sql(self, stmt: str) -> TableResult:
"""
Execute the given single statement, and return the execution result.
The statement can be DDL/DML/DQL/SHOW/DESCRIBE/EXPLAIN/USE.
For DML and DQL, this method returns TableResult once the job has been submitted.
For DDL and DCL statements, TableResult is returned once the operation has finished.
:return content for DQL/SHOW/DESCRIBE/EXPLAIN,
the affected row count for `DML` (-1 means unknown),
or a string message ("OK") for other statements.
.. versionadded:: 1.11.0
"""
self._before_execute()
return TableResult(self._j_tenv.executeSql(stmt))
def create_statement_set(self) -> StatementSet:
"""
Create a StatementSet instance which accepts DML statements or Tables,
the planner can optimize all added statements and Tables together
and then submit as one job.
:return statement_set instance
.. versionadded:: 1.11.0
"""
_j_statement_set = self._j_tenv.createStatementSet()
return StatementSet(_j_statement_set, self)
def get_current_catalog(self) -> str:
"""
Gets the current default catalog name of the current session.
:return: The current default catalog name that is used for the path resolution.
.. seealso:: :func:`~pyflink.table.TableEnvironment.use_catalog`
"""
return self._j_tenv.getCurrentCatalog()
def use_catalog(self, catalog_name: str):
"""
Sets the current catalog to the given value. It also sets the default
database to the catalog's default one.
See also :func:`~TableEnvironment.use_database`.
This is used during the resolution of object paths. Both the catalog and database are
optional when referencing catalog objects such as tables, views etc. The algorithm looks for
requested objects in following paths in that order:
* ``[current-catalog].[current-database].[requested-path]``
* ``[current-catalog].[requested-path]``
* ``[requested-path]``
Example:
Given structure with default catalog set to ``default_catalog`` and default database set to
``default_database``. ::
root:
|- default_catalog
|- default_database
|- tab1
|- db1
|- tab1
|- cat1
|- db1
|- tab1
The following table describes resolved paths:
+----------------+-----------------------------------------+
| Requested path | Resolved path |
+================+=========================================+
| tab1 | default_catalog.default_database.tab1 |
+----------------+-----------------------------------------+
| db1.tab1 | default_catalog.db1.tab1 |
+----------------+-----------------------------------------+
| cat1.db1.tab1 | cat1.db1.tab1 |
+----------------+-----------------------------------------+
:param catalog_name: The name of the catalog to set as the current default catalog.
:throws: :class:`~pyflink.util.exceptions.CatalogException` thrown if a catalog with given
name could not be set as the default one.
.. seealso:: :func:`~pyflink.table.TableEnvironment.use_database`
"""
self._j_tenv.useCatalog(catalog_name)
def get_current_database(self) -> str:
"""
Gets the current default database name of the running session.
:return: The name of the current database of the current catalog.
.. seealso:: :func:`~pyflink.table.TableEnvironment.use_database`
"""
return self._j_tenv.getCurrentDatabase()
def use_database(self, database_name: str):
"""
Sets the current default database. It has to exist in the current catalog. That path will
be used as the default one when looking for unqualified object names.
This is used during the resolution of object paths. Both the catalog and database are
optional when referencing catalog objects such as tables, views etc. The algorithm looks for
requested objects in following paths in that order:
* ``[current-catalog].[current-database].[requested-path]``
* ``[current-catalog].[requested-path]``
* ``[requested-path]``
Example:
Given structure with default catalog set to ``default_catalog`` and default database set to
``default_database``. ::
root:
|- default_catalog
|- default_database
|- tab1
|- db1
|- tab1
|- cat1
|- db1
|- tab1
The following table describes resolved paths:
+----------------+-----------------------------------------+
| Requested path | Resolved path |
+================+=========================================+
| tab1 | default_catalog.default_database.tab1 |
+----------------+-----------------------------------------+
| db1.tab1 | default_catalog.db1.tab1 |
+----------------+-----------------------------------------+
| cat1.db1.tab1 | cat1.db1.tab1 |
+----------------+-----------------------------------------+
:throws: :class:`~pyflink.util.exceptions.CatalogException` thrown if the given catalog and
database could not be set as the default ones.
.. seealso:: :func:`~pyflink.table.TableEnvironment.use_catalog`
:param database_name: The name of the database to set as the current database.
"""
self._j_tenv.useDatabase(database_name)
def get_config(self) -> TableConfig:
"""
Returns the table config to define the runtime behavior of the Table API.
:return: Current table config.
"""
if not hasattr(self, "table_config"):
table_config = TableConfig()
table_config._j_table_config = self._j_tenv.getConfig()
setattr(self, "table_config", table_config)
return getattr(self, "table_config")
def register_java_function(self, name: str, function_class_name: str):
"""
Registers a java user defined function under a unique name. Replaces already existing
user-defined functions under this name. The acceptable function type contains
**ScalarFunction**, **TableFunction** and **AggregateFunction**.
Example:
::
>>> table_env.register_java_function("func1", "java.user.defined.function.class.name")
:param name: The name under which the function is registered.
:param function_class_name: The java full qualified class name of the function to register.
The function must have a public no-argument constructor and can
be founded in current Java classloader.
.. note:: Deprecated in 1.12. Use :func:`create_java_temporary_system_function` instead.
"""
warnings.warn("Deprecated in 1.12. Use :func:`create_java_temporary_system_function` "
"instead.", DeprecationWarning)
gateway = get_gateway()
java_function = gateway.jvm.Thread.currentThread().getContextClassLoader()\
.loadClass(function_class_name).newInstance()
# this is a temporary solution and will be unified later when we use the new type
# system(DataType) to replace the old type system(TypeInformation).
if not isinstance(self, StreamTableEnvironment) or self.__class__ == TableEnvironment:
if self._is_table_function(java_function):
self._register_table_function(name, java_function)
elif self._is_aggregate_function(java_function):
self._register_aggregate_function(name, java_function)
else:
self._j_tenv.registerFunction(name, java_function)
else:
self._j_tenv.registerFunction(name, java_function)
def register_function(self, name: str, function: UserDefinedFunctionWrapper):
"""
Registers a python user-defined function under a unique name. Replaces already existing
user-defined function under this name.
Example:
::
>>> table_env.register_function(
... "add_one", udf(lambda i: i + 1, result_type=DataTypes.BIGINT()))
>>> @udf(result_type=DataTypes.BIGINT())
... def add(i, j):
... return i + j
>>> table_env.register_function("add", add)
>>> class SubtractOne(ScalarFunction):
... def eval(self, i):
... return i - 1
>>> table_env.register_function(
... "subtract_one", udf(SubtractOne(), result_type=DataTypes.BIGINT()))
:param name: The name under which the function is registered.
:param function: The python user-defined function to register.
.. versionadded:: 1.10.0
.. note:: Deprecated in 1.12. Use :func:`create_temporary_system_function` instead.
"""
warnings.warn("Deprecated in 1.12. Use :func:`create_temporary_system_function` "
"instead.", DeprecationWarning)
function = self._wrap_aggregate_function_if_needed(function)
java_function = function._java_user_defined_function()
# this is a temporary solution and will be unified later when we use the new type
# system(DataType) to replace the old type system(TypeInformation).
if self.__class__ == TableEnvironment:
if self._is_table_function(java_function):
self._register_table_function(name, java_function)
elif self._is_aggregate_function(java_function):
self._register_aggregate_function(name, java_function)
else:
self._j_tenv.registerFunction(name, java_function)
else:
self._j_tenv.registerFunction(name, java_function)
def create_temporary_view(self,
view_path: str,
table_or_data_stream: Union[Table, DataStream],
*fields_or_schema: Union[str, Expression, Schema]):
"""
1. When table_or_data_stream is a :class:`~pyflink.table.Table`:
Registers a :class:`~pyflink.table.Table` API object as a temporary view similar to SQL
temporary views.
Temporary objects can shadow permanent ones. If a permanent object in a given path
exists, it will be inaccessible in the current session. To make the permanent object
available again you can drop the corresponding temporary object.
2. When table_or_data_stream is a :class:`~pyflink.datastream.DataStream`:
2.1 When fields_or_schema is a str or a sequence of :class:`~pyflink.table.Expression`:
Creates a view from the given {@link DataStream} in a given path with specified
field names. Registered views can be referenced in SQL queries.
1. Reference input fields by name: All fields in the schema definition are
referenced by name (and possibly renamed using an alias (as). Moreover, we can
define proctime and rowtime attributes at arbitrary positions using arbitrary names
(except those that exist in the result schema). In this mode, fields can be
reordered and projected out. This mode can be used for any input type, including
POJOs.
Example:
::
>>> stream = ...
# reorder the fields, rename the original 'f0' field to 'name' and add
# event-time attribute named 'rowtime'
# use str
>>> table_env.create_temporary_view(
... "cat.db.myTable",
... stream,
... "f1, rowtime.rowtime, f0 as 'name'")
# or use a sequence of expression
>>> table_env.create_temporary_view(
... "cat.db.myTable",
... stream,
... col("f1"),
... col("rowtime").rowtime,
... col("f0").alias('name'))
2. Reference input fields by position: In this mode, fields are simply renamed.
Event-time attributes can replace the field on their position in the input data
(if it is of correct type) or be appended at the end. Proctime attributes must be
appended at the end. This mode can only be used if the input type has a defined
field order (tuple, case class, Row) and none of the {@code fields} references a
field of the input type.
Example:
::
>>> stream = ...
# rename the original fields to 'a' and 'b' and extract the internally attached
# timestamp into an event-time attribute named 'rowtime'
# use str
>>> table_env.create_temporary_view(
... "cat.db.myTable", stream, "a, b, rowtime.rowtime")
# or use a sequence of expressions
>>> table_env.create_temporary_view(
... "cat.db.myTable",
... stream,
... col("a"),
... col("b"),
... col("rowtime").rowtime)
Temporary objects can shadow permanent ones. If a permanent object in a given path
exists, it will be inaccessible in the current session. To make the permanent object
available again you can drop the corresponding temporary object.
2.2 When fields_or_schema is a :class:`~pyflink.table.Schema`:
Creates a view from the given {@link DataStream} in a given path. Registered views
can be referenced in SQL queries.
See :func:`from_data_stream` for more information on how a
:class:`~pyflink.datastream.DataStream` is translated into a table.
Temporary objects can shadow permanent ones. If a permanent object in a given path
exists, it will be inaccessible in the current session. To make the permanent object
available again you can drop the corresponding temporary object.
.. note:: create_temporary_view by providing a Schema (case 2.) was added from flink
1.14.0.
:param view_path: The path under which the view will be registered. See also the
:class:`~pyflink.table.TableEnvironment` class description for the format
of the path.
:param table_or_data_stream: The Table or DataStream out of which to create the view.
:param fields_or_schema: The fields expressions(str) to map original fields of the
DataStream to the fields of the View or the customized schema for the final
table.
.. versionadded:: 1.10.0
"""
if isinstance(table_or_data_stream, Table):
self._j_tenv.createTemporaryView(view_path, table_or_data_stream._j_table)
else:
j_data_stream = table_or_data_stream._j_data_stream
JPythonConfigUtil = get_gateway().jvm.org.apache.flink.python.util.PythonConfigUtil
JPythonConfigUtil.configPythonOperator(j_data_stream.getExecutionEnvironment())
if len(fields_or_schema) == 0:
self._j_tenv.createTemporaryView(view_path, j_data_stream)
elif len(fields_or_schema) == 1 and isinstance(fields_or_schema[0], str):
self._j_tenv.createTemporaryView(
view_path,
j_data_stream,
fields_or_schema[0])
elif len(fields_or_schema) == 1 and isinstance(fields_or_schema[0], Schema):
self._j_tenv.createTemporaryView(
view_path,
j_data_stream,
fields_or_schema[0]._j_schema)
elif (len(fields_or_schema) > 0 and
all(isinstance(elem, Expression) for elem in fields_or_schema)):
self._j_tenv.createTemporaryView(
view_path,
j_data_stream,
to_expression_jarray(fields_or_schema))
else:
raise ValueError("Invalid arguments for 'fields': %r" %
','.join([repr(item) for item in fields_or_schema]))
def add_python_file(self, file_path: str):
"""
Adds a python dependency which could be python files, python packages or
local directories. They will be added to the PYTHONPATH of the python UDF worker.
Please make sure that these dependencies can be imported.
:param file_path: The path of the python dependency.
.. versionadded:: 1.10.0
"""
jvm = get_gateway().jvm
python_files = self.get_config().get(jvm.PythonOptions.PYTHON_FILES.key(), None)
if python_files is not None:
python_files = jvm.PythonDependencyUtils.FILE_DELIMITER.join([file_path, python_files])
else:
python_files = file_path
self.get_config().set(jvm.PythonOptions.PYTHON_FILES.key(), python_files)
def set_python_requirements(self,
requirements_file_path: str,
requirements_cache_dir: str = None):
"""
Specifies a requirements.txt file which defines the third-party dependencies.
These dependencies will be installed to a temporary directory and added to the
PYTHONPATH of the python UDF worker.
For the dependencies which could not be accessed in the cluster, a directory which contains
the installation packages of these dependencies could be specified using the parameter
"requirements_cached_dir". It will be uploaded to the cluster to support offline
installation.
Example:
::
# commands executed in shell
$ echo numpy==1.16.5 > requirements.txt
$ pip download -d cached_dir -r requirements.txt --no-binary :all:
# python code
>>> table_env.set_python_requirements("requirements.txt", "cached_dir")
.. note::
Please make sure the installation packages matches the platform of the cluster
and the python version used. These packages will be installed using pip,
so also make sure the version of Pip (version >= 20.3) and the version of
SetupTools (version >= 37.0.0).
:param requirements_file_path: The path of "requirements.txt" file.
:param requirements_cache_dir: The path of the local directory which contains the
installation packages.
.. versionadded:: 1.10.0
"""
jvm = get_gateway().jvm
python_requirements = requirements_file_path
if requirements_cache_dir is not None:
python_requirements = jvm.PythonDependencyUtils.PARAM_DELIMITER.join(
[python_requirements, requirements_cache_dir])
self.get_config().set(
jvm.PythonOptions.PYTHON_REQUIREMENTS.key(), python_requirements)
def add_python_archive(self, archive_path: str, target_dir: str = None):
"""
Adds a python archive file. The file will be extracted to the working directory of
python UDF worker.
If the parameter "target_dir" is specified, the archive file will be extracted to a
directory named ${target_dir}. Otherwise, the archive file will be extracted to a
directory with the same name of the archive file.
If python UDF depends on a specific python version which does not exist in the cluster,
this method can be used to upload the virtual environment.
Note that the path of the python interpreter contained in the uploaded environment
should be specified via the method :func:`pyflink.table.TableConfig.set_python_executable`.
The files uploaded via this method are also accessible in UDFs via relative path.
Example:
::
# command executed in shell
# assert the relative path of python interpreter is py_env/bin/python
$ zip -r py_env.zip py_env
# python code
>>> table_env.add_python_archive("py_env.zip")
>>> table_env.get_config().set_python_executable("py_env.zip/py_env/bin/python")
# or
>>> table_env.add_python_archive("py_env.zip", "myenv")
>>> table_env.get_config().set_python_executable("myenv/py_env/bin/python")
# the files contained in the archive file can be accessed in UDF
>>> def my_udf():
... with open("myenv/py_env/data/data.txt") as f:
... ...
.. note::
Please make sure the uploaded python environment matches the platform that the cluster
is running on and that the python version must be 3.5 or higher.
.. note::
Currently only zip-format is supported. i.e. zip, jar, whl, egg, etc.
The other archive formats such as tar, tar.gz, 7z, rar, etc are not supported.
:param archive_path: The archive file path.
:param target_dir: Optional, the target dir name that the archive file extracted to.
.. versionadded:: 1.10.0
"""
jvm = get_gateway().jvm
if target_dir is not None:
archive_path = jvm.PythonDependencyUtils.PARAM_DELIMITER.join(
[archive_path, target_dir])
python_archives = self.get_config().get(jvm.PythonOptions.PYTHON_ARCHIVES.key(), None)
if python_archives is not None:
python_files = jvm.PythonDependencyUtils.FILE_DELIMITER.join(
[python_archives, archive_path])
else:
python_files = archive_path
self.get_config().set(jvm.PythonOptions.PYTHON_ARCHIVES.key(), python_files)
def from_elements(self, elements: Iterable, schema: Union[DataType, List[str]] = None,
verify_schema: bool = True) -> Table:
"""
Creates a table from a collection of elements.
The elements types must be acceptable atomic types or acceptable composite types.
All elements must be of the same type.
If the elements types are composite types, the composite types must be strictly equal,
and its subtypes must also be acceptable types.
e.g. if the elements are tuples, the length of the tuples must be equal, the element types
of the tuples must be equal in order.
The built-in acceptable atomic element types contains:
**int**, **long**, **str**, **unicode**, **bool**,
**float**, **bytearray**, **datetime.date**, **datetime.time**, **datetime.datetime**,
**datetime.timedelta**, **decimal.Decimal**
The built-in acceptable composite element types contains:
**list**, **tuple**, **dict**, **array**, :class:`~pyflink.table.Row`
If the element type is a composite type, it will be unboxed.
e.g. table_env.from_elements([(1, 'Hi'), (2, 'Hello')]) will return a table like:
+----+-------+
| _1 | _2 |
+====+=======+
| 1 | Hi |
+----+-------+
| 2 | Hello |
+----+-------+
"_1" and "_2" are generated field names.
Example:
::
# use the second parameter to specify custom field names
>>> table_env.from_elements([(1, 'Hi'), (2, 'Hello')], ['a', 'b'])
# use the second parameter to specify custom table schema
>>> table_env.from_elements([(1, 'Hi'), (2, 'Hello')],
... DataTypes.ROW([DataTypes.FIELD("a", DataTypes.INT()),
... DataTypes.FIELD("b", DataTypes.STRING())]))
# use the third parameter to switch whether to verify the elements against the schema
>>> table_env.from_elements([(1, 'Hi'), (2, 'Hello')],
... DataTypes.ROW([DataTypes.FIELD("a", DataTypes.INT()),
... DataTypes.FIELD("b", DataTypes.STRING())]),
... False)
# create Table from expressions
>>> table_env.from_elements([row(1, 'abc', 2.0), row(2, 'def', 3.0)],
... DataTypes.ROW([DataTypes.FIELD("a", DataTypes.INT()),
... DataTypes.FIELD("b", DataTypes.STRING()),
... DataTypes.FIELD("c", DataTypes.FLOAT())]))
:param elements: The elements to create a table from.
:param schema: The schema of the table.
:param verify_schema: Whether to verify the elements against the schema.
:return: The result table.
"""
# verifies the elements against the specified schema
if isinstance(schema, RowType):
verify_func = _create_type_verifier(schema) if verify_schema else lambda _: True
def verify_obj(obj):
verify_func(obj)
return obj
elif isinstance(schema, DataType):
data_type = schema
schema = RowType().add("value", schema)
verify_func = _create_type_verifier(
data_type, name="field value") if verify_schema else lambda _: True
def verify_obj(obj):
verify_func(obj)
return obj
else:
def verify_obj(obj):
return obj
# infers the schema if not specified
if schema is None or isinstance(schema, (list, tuple)):
schema = _infer_schema_from_data(elements, names=schema)
converter = _create_converter(schema)
elements = map(converter, elements)
elif not isinstance(schema, RowType):
raise TypeError(
"schema should be RowType, list, tuple or None, but got: %s" % schema)
elements = list(elements)
# in case all the elements are expressions
if len(elements) > 0 and all(isinstance(elem, Expression) for elem in elements):
if schema is None:
return Table(self._j_tenv.fromValues(to_expression_jarray(elements)), self)
else:
return Table(self._j_tenv.fromValues(_to_java_data_type(schema),
to_expression_jarray(elements)),
self)
elif any(isinstance(elem, Expression) for elem in elements):
raise ValueError("It doesn't support part of the elements are Expression, while the "
"others are not.")
# verifies the elements against the specified schema
elements = map(verify_obj, elements)
# converts python data to sql data
elements = [schema.to_sql_type(element) for element in elements]
return self._from_elements(elements, schema)
def _from_elements(self, elements: List, schema: Union[DataType, List[str]]) -> Table:
"""
Creates a table from a collection of elements.
:param elements: The elements to create a table from.
:return: The result :class:`~pyflink.table.Table`.
"""
# serializes to a file, and we read the file in java
temp_file = tempfile.NamedTemporaryFile(delete=False, dir=tempfile.mkdtemp())
serializer = BatchedSerializer(self._serializer)
try:
with temp_file:
serializer.serialize(elements, temp_file)
row_type_info = _to_java_type(schema)
execution_config = self._get_j_env().getConfig()
gateway = get_gateway()
j_objs = gateway.jvm.PythonBridgeUtils.readPythonObjects(temp_file.name, True)
PythonTableUtils = gateway.jvm \
.org.apache.flink.table.utils.python.PythonTableUtils
PythonInputFormatTableSource = gateway.jvm \
.org.apache.flink.table.utils.python.PythonInputFormatTableSource
j_input_format = PythonTableUtils.getInputFormat(
j_objs, row_type_info, execution_config)
j_table_source = PythonInputFormatTableSource(
j_input_format, row_type_info)
return Table(self._j_tenv.fromTableSource(j_table_source), self)
finally:
os.unlink(temp_file.name)
def from_pandas(self, pdf,
schema: Union[RowType, List[str], Tuple[str], List[DataType],
Tuple[DataType]] = None,
splits_num: int = 1) -> Table:
"""
Creates a table from a pandas DataFrame.
Example:
::
>>> pdf = pd.DataFrame(np.random.rand(1000, 2))
# use the second parameter to specify custom field names
>>> table_env.from_pandas(pdf, ["a", "b"])
# use the second parameter to specify custom field types
>>> table_env.from_pandas(pdf, [DataTypes.DOUBLE(), DataTypes.DOUBLE()]))
# use the second parameter to specify custom table schema
>>> table_env.from_pandas(pdf,
... DataTypes.ROW([DataTypes.FIELD("a", DataTypes.DOUBLE()),
... DataTypes.FIELD("b", DataTypes.DOUBLE())]))
:param pdf: The pandas DataFrame.
:param schema: The schema of the converted table.
:param splits_num: The number of splits the given Pandas DataFrame will be split into. It
determines the number of parallel source tasks.
If not specified, the default parallelism will be used.
:return: The result table.
.. versionadded:: 1.11.0
"""
import pandas as pd
if not isinstance(pdf, pd.DataFrame):
raise TypeError("Unsupported type, expected pandas.DataFrame, got %s" % type(pdf))
import pyarrow as pa
arrow_schema = pa.Schema.from_pandas(pdf, preserve_index=False)
if schema is not None:
if isinstance(schema, RowType):
result_type = schema
elif isinstance(schema, (list, tuple)) and isinstance(schema[0], str):
result_type = RowType(
[RowField(field_name, from_arrow_type(field.type, field.nullable))
for field_name, field in zip(schema, arrow_schema)])
elif isinstance(schema, (list, tuple)) and isinstance(schema[0], DataType):
result_type = RowType(
[RowField(field_name, field_type) for field_name, field_type in zip(
arrow_schema.names, schema)])
else:
raise TypeError("Unsupported schema type, it could only be of RowType, a "
"list of str or a list of DataType, got %s" % schema)
else:
result_type = RowType([RowField(field.name, from_arrow_type(field.type, field.nullable))
for field in arrow_schema])
# serializes to a file, and we read the file in java
temp_file = tempfile.NamedTemporaryFile(delete=False, dir=tempfile.mkdtemp())
import pytz
serializer = ArrowSerializer(
create_arrow_schema(result_type.field_names(), result_type.field_types()),
result_type,
pytz.timezone(self.get_config().get_local_timezone()))
step = -(-len(pdf) // splits_num)
pdf_slices = [pdf.iloc[start:start + step] for start in range(0, len(pdf), step)]
data = [[c for (_, c) in pdf_slice.iteritems()] for pdf_slice in pdf_slices]
try:
with temp_file:
serializer.serialize(data, temp_file)
jvm = get_gateway().jvm
data_type = jvm.org.apache.flink.table.types.utils.TypeConversions\
.fromLegacyInfoToDataType(_to_java_type(result_type)).notNull()
data_type = data_type.bridgedTo(
load_java_class('org.apache.flink.table.data.RowData'))
j_arrow_table_source = \
jvm.org.apache.flink.table.runtime.arrow.ArrowUtils.createArrowTableSource(
data_type, temp_file.name)
return Table(self._j_tenv.fromTableSource(j_arrow_table_source), self)
finally:
os.unlink(temp_file.name)
def _set_python_executable_for_local_executor(self):
jvm = get_gateway().jvm
j_config = get_j_env_configuration(self._get_j_env())
if not j_config.containsKey(jvm.PythonOptions.PYTHON_EXECUTABLE.key()) \
and is_local_deployment(j_config):
j_config.setString(jvm.PythonOptions.PYTHON_EXECUTABLE.key(), sys.executable)
def _add_jars_to_j_env_config(self, config_key):
jvm = get_gateway().jvm
jar_urls = self.get_config().get(config_key, None)
if jar_urls is not None:
# normalize
jar_urls_list = []
for url in jar_urls.split(";"):
url = url.strip()
if url != "":
jar_urls_list.append(jvm.java.net.URL(url).toString())
j_configuration = get_j_env_configuration(self._get_j_env())
if j_configuration.containsKey(config_key):
for url in j_configuration.getString(config_key, "").split(";"):
url = url.strip()
if url != "" and url not in jar_urls_list:
jar_urls_list.append(url)
j_configuration.setString(config_key, ";".join(jar_urls_list))
def _get_j_env(self):
return self._j_tenv.getPlanner().getExecEnv()
@staticmethod
def _is_table_function(java_function):
java_function_class = java_function.getClass()
j_table_function_class = get_java_class(
get_gateway().jvm.org.apache.flink.table.functions.TableFunction)
return j_table_function_class.isAssignableFrom(java_function_class)
@staticmethod
def _is_aggregate_function(java_function):
java_function_class = java_function.getClass()
j_aggregate_function_class = get_java_class(
get_gateway().jvm.org.apache.flink.table.functions.ImperativeAggregateFunction)
return j_aggregate_function_class.isAssignableFrom(java_function_class)
def _register_table_function(self, name, table_function):
function_catalog = self._get_function_catalog()
gateway = get_gateway()
helper = gateway.jvm.org.apache.flink.table.functions.UserDefinedFunctionHelper
result_type = helper.getReturnTypeOfTableFunction(table_function)
function_catalog.registerTempSystemTableFunction(name, table_function, result_type)
def _register_aggregate_function(self, name, aggregate_function):
function_catalog = self._get_function_catalog()
gateway = get_gateway()
helper = gateway.jvm.org.apache.flink.table.functions.UserDefinedFunctionHelper
result_type = helper.getReturnTypeOfAggregateFunction(aggregate_function)
acc_type = helper.getAccumulatorTypeOfAggregateFunction(aggregate_function)
function_catalog.registerTempSystemAggregateFunction(
name, aggregate_function, result_type, acc_type)
def _get_function_catalog(self):
function_catalog_field = self._j_tenv.getClass().getDeclaredField("functionCatalog")
function_catalog_field.setAccessible(True)
function_catalog = function_catalog_field.get(self._j_tenv)
return function_catalog
def _before_execute(self):
jvm = get_gateway().jvm
jars_key = jvm.org.apache.flink.configuration.PipelineOptions.JARS.key()
classpaths_key = jvm.org.apache.flink.configuration.PipelineOptions.CLASSPATHS.key()
self._add_jars_to_j_env_config(jars_key)
self._add_jars_to_j_env_config(classpaths_key)
def _wrap_aggregate_function_if_needed(self, function) -> UserDefinedFunctionWrapper:
if isinstance(function, AggregateFunction):
function = udaf(function,
result_type=function.get_result_type(),
accumulator_type=function.get_accumulator_type(),
name=str(function.__class__.__name__))
elif isinstance(function, TableAggregateFunction):
function = udtaf(function,
result_type=function.get_result_type(),
accumulator_type=function.get_accumulator_type(),
name=str(function.__class__.__name__))
return function
def _config_chaining_optimization(self):
JChainingOptimizingExecutor = get_gateway().jvm.org.apache.flink.table.executor.python.\
ChainingOptimizingExecutor
exec_env_field = get_field(self._j_tenv.getClass(), "execEnv")
exec_env_field.set(self._j_tenv,
JChainingOptimizingExecutor(exec_env_field.get(self._j_tenv)))
def _open(self):
# start BeamFnLoopbackWorkerPoolServicer when executed in MiniCluster
def startup_loopback_server():
from pyflink.fn_execution.beam.beam_worker_pool_service import \
BeamFnLoopbackWorkerPoolServicer
self.get_config().set("python.loopback-server.address",
BeamFnLoopbackWorkerPoolServicer().start())
python_worker_execution_mode = os.environ.get('_python_worker_execution_mode')
if python_worker_execution_mode is None:
if is_local_deployment(get_j_env_configuration(self._get_j_env())):
startup_loopback_server()
elif python_worker_execution_mode == 'loopback':
if is_local_deployment(get_j_env_configuration(self._get_j_env())):
startup_loopback_server()
else:
raise ValueError("Loopback mode is enabled, however the job wasn't configured to "
"run in local deployment mode")
elif python_worker_execution_mode != 'process':
raise ValueError(
"It only supports to execute the Python worker in 'loopback' mode and 'process' "
"mode, unknown mode '%s' is configured" % python_worker_execution_mode)
class StreamTableEnvironment(TableEnvironment):
def __init__(self, j_tenv):
super(StreamTableEnvironment, self).__init__(j_tenv)
@staticmethod
def create(stream_execution_environment: StreamExecutionEnvironment = None, # type: ignore
environment_settings: EnvironmentSettings = None) -> 'StreamTableEnvironment':
"""
Creates a :class:`~pyflink.table.StreamTableEnvironment`.
Example:
::
# create with StreamExecutionEnvironment.
>>> env = StreamExecutionEnvironment.get_execution_environment()
>>> table_env = StreamTableEnvironment.create(env)
# create with StreamExecutionEnvironment and EnvironmentSettings.
>>> configuration = Configuration()
>>> configuration.set_string('execution.buffer-timeout', '1 min')
>>> environment_settings = EnvironmentSettings \\
... .new_instance() \\
... .in_streaming_mode() \\
... .with_configuration(configuration) \\
... .build()
>>> table_env = StreamTableEnvironment.create(
... env, environment_settings=environment_settings)
# create with EnvironmentSettings.
>>> table_env = StreamTableEnvironment.create(environment_settings=environment_settings)
:param stream_execution_environment: The
:class:`~pyflink.datastream.StreamExecutionEnvironment`
of the TableEnvironment.
:param environment_settings: The environment settings used to instantiate the
TableEnvironment.
:return: The StreamTableEnvironment created from given StreamExecutionEnvironment and
configuration.
"""
if stream_execution_environment is None and \
environment_settings is None:
raise ValueError("No argument found, the param 'stream_execution_environment' "
"or 'environment_settings' is required.")
gateway = get_gateway()
if environment_settings is not None:
if stream_execution_environment is None:
j_tenv = gateway.jvm.TableEnvironment.create(
environment_settings._j_environment_settings)
else:
j_tenv = gateway.jvm.StreamTableEnvironment.create(
stream_execution_environment._j_stream_execution_environment,
environment_settings._j_environment_settings)
else:
j_tenv = gateway.jvm.StreamTableEnvironment.create(
stream_execution_environment._j_stream_execution_environment)
return StreamTableEnvironment(j_tenv)
def from_data_stream(self,
data_stream: DataStream,
*fields_or_schema: Union[Expression, Schema]) -> Table:
"""
1. When fields_or_schema is a sequence of Expression:
Converts the given DataStream into a Table with specified field names.
There are two modes for mapping original fields to the fields of the Table:
1. Reference input fields by name:
All fields in the schema definition are referenced by name (and possibly renamed
using and alias (as). Moreover, we can define proctime and rowtime attributes at
arbitrary positions using arbitrary names (except those that exist in the result
schema). In this mode, fields can be reordered and projected out. This mode can be
used for any input type.
2. Reference input fields by position:
In this mode, fields are simply renamed. Event-time attributes can replace the field
on their position in the input data (if it is of correct type) or be appended at the
end. Proctime attributes must be appended at the end. This mode can only be used if
the input type has a defined field order (tuple, case class, Row) and none of the
fields references a field of the input type.
2. When fields_or_schema is a Schema:
Converts the given DataStream into a Table.
Column names and types of the Table are automatically derived from the TypeInformation
of the DataStream. If the outermost record's TypeInformation is a CompositeType, it will
be flattened in the first level. Composite nested fields will not be accessible.
Since the DataStream API does not support changelog processing natively, this method
assumes append-only/insert-only semantics during the stream-to-table conversion. Records
of class Row must describe RowKind.INSERT changes.
By default, the stream record's timestamp and watermarks are not propagated unless
explicitly declared.
This method allows to declare a Schema for the resulting table. The declaration is
similar to a {@code CREATE TABLE} DDL in SQL and allows to:
1. enrich or overwrite automatically derived columns with a custom DataType
2. reorder columns
3. add computed or metadata columns next to the physical columns
4. access a stream record's timestamp
5. declare a watermark strategy or propagate the DataStream watermarks
It is possible to declare a schema without physical/regular columns. In this case, those
columns will be automatically derived and implicitly put at the beginning of the schema
declaration.
The following examples illustrate common schema declarations and their semantics:
Example:
::
=== EXAMPLE 1 ===
no physical columns defined, they will be derived automatically,
e.g. BigDecimal becomes DECIMAL(38, 18)
>>> Schema.new_builder() \
... .column_by_expression("c1", "f1 + 42") \
... .column_by_expression("c2", "f1 - 1") \
... .build()
equal to: CREATE TABLE (f0 STRING, f1 DECIMAL(38, 18), c1 AS f1 + 42, c2 AS f1 - 1)
=== EXAMPLE 2 ===
physical columns defined, input fields and columns will be mapped by name,
columns are reordered and their data type overwritten,
all columns must be defined to show up in the final table's schema
>>> Schema.new_builder() \
... .column("f1", "DECIMAL(10, 2)") \
... .column_by_expression("c", "f1 - 1") \
... .column("f0", "STRING") \
... .build()
equal to: CREATE TABLE (f1 DECIMAL(10, 2), c AS f1 - 1, f0 STRING)
=== EXAMPLE 3 ===
timestamp and watermarks can be added from the DataStream API,
physical columns will be derived automatically
>>> Schema.new_builder() \
... .column_by_metadata("rowtime", "TIMESTAMP_LTZ(3)") \
... .watermark("rowtime", "SOURCE_WATERMARK()") \
... .build()
equal to:
CREATE TABLE (
f0 STRING,
f1 DECIMAL(38, 18),
rowtime TIMESTAMP(3) METADATA,
WATERMARK FOR rowtime AS SOURCE_WATERMARK()
)
.. note:: create_temporary_view by providing a Schema (case 2.) was added from flink
1.14.0.
:param data_stream: The datastream to be converted.
:param fields_or_schema: The fields expressions to map original fields of the DataStream to
the fields of the Table or the customized schema for the final table.
:return: The converted Table.
.. versionadded:: 1.12.0
"""
j_data_stream = data_stream._j_data_stream
JPythonConfigUtil = get_gateway().jvm.org.apache.flink.python.util.PythonConfigUtil
JPythonConfigUtil.configPythonOperator(j_data_stream.getExecutionEnvironment())
if len(fields_or_schema) == 0:
return Table(j_table=self._j_tenv.fromDataStream(j_data_stream), t_env=self)
elif all(isinstance(f, Expression) for f in fields_or_schema):
return Table(j_table=self._j_tenv.fromDataStream(
j_data_stream, to_expression_jarray(fields_or_schema)), t_env=self)
elif len(fields_or_schema) == 1 and isinstance(fields_or_schema[0], Schema):
return Table(j_table=self._j_tenv.fromDataStream(
j_data_stream, fields_or_schema[0]._j_schema), t_env=self)
raise ValueError("Invalid arguments for 'fields': %r" % fields_or_schema)
def from_changelog_stream(self,
data_stream: DataStream,
schema: Schema = None,
changelog_mode: ChangelogMode = None) -> Table:
"""
Converts the given DataStream of changelog entries into a Table.
Compared to :func:`from_data_stream`, this method consumes instances of Row and evaluates
the RowKind flag that is contained in every record during runtime. The runtime behavior is
similar to that of a DynamicTableSource.
If you don't specify the changelog_mode, the changelog containing all kinds of changes
(enumerated in RowKind) as the default ChangelogMode.
Column names and types of the Table are automatically derived from the TypeInformation of
the DataStream. If the outermost record's TypeInformation is a CompositeType, it will be
flattened in the first level. Composite nested fields will not be accessible.
By default, the stream record's timestamp and watermarks are not propagated unless
explicitly declared.
This method allows to declare a Schema for the resulting table. The declaration is similar
to a {@code CREATE TABLE} DDL in SQL and allows to:
1. enrich or overwrite automatically derived columns with a custom DataType
2. reorder columns
3. add computed or metadata columns next to the physical columns
4. access a stream record's timestamp
5. declare a watermark strategy or propagate the DataStream watermarks
6. declare a primary key
See :func:`from_data_stream` for more information and examples of how to declare a Schema.
:param data_stream: The changelog stream of Row.
:param schema: The customized schema for the final table.
:param changelog_mode: The expected kinds of changes in the incoming changelog.
:return: The converted Table.
"""
j_data_stream = data_stream._j_data_stream
JPythonConfigUtil = get_gateway().jvm.org.apache.flink.python.util.PythonConfigUtil
JPythonConfigUtil.configPythonOperator(j_data_stream.getExecutionEnvironment())
if schema is None:
return Table(self._j_tenv.fromChangelogStream(j_data_stream), t_env=self)
elif changelog_mode is None:
return Table(
self._j_tenv.fromChangelogStream(j_data_stream, schema._j_schema), t_env=self)
else:
return Table(
self._j_tenv.fromChangelogStream(
j_data_stream,
schema._j_schema,
changelog_mode._j_changelog_mode),
t_env=self)
def to_data_stream(self, table: Table) -> DataStream:
"""
Converts the given Table into a DataStream.
Since the DataStream API does not support changelog processing natively, this method
assumes append-only/insert-only semantics during the table-to-stream conversion. The records
of class Row will always describe RowKind#INSERT changes. Updating tables are
not supported by this method and will produce an exception.
Note that the type system of the table ecosystem is richer than the one of the DataStream
API. The table runtime will make sure to properly serialize the output records to the first
operator of the DataStream API. Afterwards, the Types semantics of the DataStream API
need to be considered.
If the input table contains a single rowtime column, it will be propagated into a stream
record's timestamp. Watermarks will be propagated as well.
:param table: The Table to convert.
:return: The converted DataStream.
"""
return DataStream(self._j_tenv.toDataStream(table._j_table))
def to_changelog_stream(self,
table: Table,
target_schema: Schema = None,
changelog_mode: ChangelogMode = None) -> DataStream:
"""
Converts the given Table into a DataStream of changelog entries.
Compared to :func:`to_data_stream`, this method produces instances of Row and sets the
RowKind flag that is contained in every record during runtime. The runtime behavior is
similar to that of a DynamicTableSink.
If you don't specify the changelog_mode, the changelog containing all kinds of changes
(enumerated in RowKind) as the default ChangelogMode.
The given Schema is used to configure the table runtime to convert columns and internal data
structures to the desired representation. The following example shows how to
convert a table column into a Row type.
Example:
::
>>> table_env.to_changelog_stream(
... table,
... Schema.new_builder() \
... .column("id", DataTypes.BIGINT())
... .column("payload", DataTypes.ROW(
... [DataTypes.FIELD("name", DataTypes.STRING()),
... DataTypes.FIELD("age", DataTypes.INT())]))
... .build())
Note that the type system of the table ecosystem is richer than the one of the DataStream
API. The table runtime will make sure to properly serialize the output records to the first
operator of the DataStream API. Afterwards, the Types semantics of the DataStream API need
to be considered.
If the input table contains a single rowtime column, it will be propagated into a stream
record's timestamp. Watermarks will be propagated as well.
If the rowtime should not be a concrete field in the final Row anymore, or the schema should
be symmetrical for both :func:`from_changelog_stream` and :func:`to_changelog_stream`, the
rowtime can also be declared as a metadata column that will be propagated into a stream
record's timestamp. It is possible to declare a schema without physical/regular columns.
In this case, those columns will be automatically derived and implicitly put at the
beginning of the schema declaration.
The following examples illustrate common schema declarations and their semantics:
Example:
::
given a Table of (id INT, name STRING, my_rowtime TIMESTAMP_LTZ(3))
=== EXAMPLE 1 ===
no physical columns defined, they will be derived automatically,
the last derived physical column will be skipped in favor of the metadata column
>>> Schema.new_builder() \
... .column_by_metadata("rowtime", "TIMESTAMP_LTZ(3)") \
... .build()
equal to: CREATE TABLE (id INT, name STRING, rowtime TIMESTAMP_LTZ(3) METADATA)
=== EXAMPLE 2 ===
physical columns defined, all columns must be defined
>>> Schema.new_builder() \
... .column("id", "INT") \
... .column("name", "STRING") \
... .column_by_metadata("rowtime", "TIMESTAMP_LTZ(3)") \
... .build()
equal to: CREATE TABLE (id INT, name STRING, rowtime TIMESTAMP_LTZ(3) METADATA)
:param table: The Table to convert. It can be updating or insert-only.
:param target_schema: The Schema that decides about the final external representation in
DataStream records.
:param changelog_mode: The required kinds of changes in the result changelog. An exception
will be thrown if the given updating table cannot be represented in this changelog mode.
:return: The converted changelog stream of Row.
"""
if target_schema is None:
return DataStream(self._j_tenv.toChangelogStream(table._j_table))
elif changelog_mode is None:
return DataStream(
self._j_tenv.toChangelogStream(table._j_table, target_schema._j_schema))
else:
return DataStream(
self._j_tenv.toChangelogStream(
table._j_table,
target_schema._j_schema,
changelog_mode._j_changelog_mode))
def to_append_stream(self, table: Table, type_info: TypeInformation) -> DataStream:
"""
Converts the given Table into a DataStream of a specified type. The Table must only have
insert (append) changes. If the Table is also modified by update or delete changes, the
conversion will fail.
The fields of the Table are mapped to DataStream as follows: Row and Tuple types: Fields are
mapped by position, field types must match.
:param table: The Table to convert.
:param type_info: The TypeInformation that specifies the type of the DataStream.
:return: The converted DataStream.
.. versionadded:: 1.12.0
"""
j_data_stream = self._j_tenv.toAppendStream(table._j_table, type_info.get_java_type_info())
return DataStream(j_data_stream=j_data_stream)
def to_retract_stream(self, table: Table, type_info: TypeInformation) -> DataStream:
"""
Converts the given Table into a DataStream of add and retract messages. The message will be
encoded as Tuple. The first field is a boolean flag, the second field holds the record of
the specified type.
A true flag indicates an add message, a false flag indicates a retract message.
The fields of the Table are mapped to DataStream as follows: Row and Tuple types: Fields are
mapped by position, field types must match.
:param table: The Table to convert.
:param type_info: The TypeInformation of the requested record type.
:return: The converted DataStream.
.. versionadded:: 1.12.0
"""
j_data_stream = self._j_tenv.toRetractStream(table._j_table, type_info.get_java_type_info())
return DataStream(j_data_stream=j_data_stream)
| {
"content_hash": "98ff4b95c8d2c63d12b8136aea98dd8b",
"timestamp": "",
"source": "github",
"line_count": 2030,
"max_line_length": 100,
"avg_line_length": 45.74729064039409,
"alnum_prop": 0.6035405472342167,
"repo_name": "godfreyhe/flink",
"id": "544b796bd0c5b391a229b6e1e72484b8eae62d56",
"size": "93825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flink-python/pyflink/table/table_environment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "20596"
},
{
"name": "Batchfile",
"bytes": "1863"
},
{
"name": "C",
"bytes": "847"
},
{
"name": "Cython",
"bytes": "132975"
},
{
"name": "Dockerfile",
"bytes": "5579"
},
{
"name": "FreeMarker",
"bytes": "93941"
},
{
"name": "GAP",
"bytes": "139536"
},
{
"name": "HTML",
"bytes": "155679"
},
{
"name": "HiveQL",
"bytes": "123152"
},
{
"name": "Java",
"bytes": "93613871"
},
{
"name": "JavaScript",
"bytes": "7038"
},
{
"name": "Less",
"bytes": "68979"
},
{
"name": "Makefile",
"bytes": "5134"
},
{
"name": "Python",
"bytes": "2812324"
},
{
"name": "Scala",
"bytes": "10688614"
},
{
"name": "Shell",
"bytes": "519375"
},
{
"name": "TypeScript",
"bytes": "326237"
},
{
"name": "q",
"bytes": "9630"
}
],
"symlink_target": ""
} |
"""Support for WaterHeater devices of (EMEA/EU) Honeywell TCC systems."""
from __future__ import annotations
import logging
from homeassistant.components.water_heater import (
SUPPORT_AWAY_MODE,
SUPPORT_OPERATION_MODE,
WaterHeaterEntity,
)
from homeassistant.const import PRECISION_TENTHS, PRECISION_WHOLE, STATE_OFF, STATE_ON
from homeassistant.core import HomeAssistant
from homeassistant.helpers.typing import ConfigType
import homeassistant.util.dt as dt_util
from . import EvoChild
from .const import DOMAIN, EVO_FOLLOW, EVO_PERMOVER
_LOGGER = logging.getLogger(__name__)
STATE_AUTO = "auto"
HA_STATE_TO_EVO = {STATE_AUTO: "", STATE_ON: "On", STATE_OFF: "Off"}
EVO_STATE_TO_HA = {v: k for k, v in HA_STATE_TO_EVO.items() if k != ""}
STATE_ATTRS_DHW = ["dhwId", "activeFaults", "stateStatus", "temperatureStatus"]
async def async_setup_platform(
hass: HomeAssistant, config: ConfigType, async_add_entities, discovery_info=None
) -> None:
"""Create a DHW controller."""
if discovery_info is None:
return
broker = hass.data[DOMAIN]["broker"]
_LOGGER.debug(
"Adding: DhwController (%s), id=%s",
broker.tcs.hotwater.zone_type,
broker.tcs.hotwater.zoneId,
)
new_entity = EvoDHW(broker, broker.tcs.hotwater)
async_add_entities([new_entity], update_before_add=True)
class EvoDHW(EvoChild, WaterHeaterEntity):
"""Base for a Honeywell TCC DHW controller (aka boiler)."""
def __init__(self, evo_broker, evo_device) -> None:
"""Initialize an evohome DHW controller."""
super().__init__(evo_broker, evo_device)
self._unique_id = evo_device.dhwId
self._name = "DHW controller"
self._icon = "mdi:thermometer-lines"
self._precision = PRECISION_TENTHS if evo_broker.client_v1 else PRECISION_WHOLE
self._supported_features = SUPPORT_AWAY_MODE | SUPPORT_OPERATION_MODE
@property
def state(self):
"""Return the current state."""
return EVO_STATE_TO_HA[self._evo_device.stateStatus["state"]]
@property
def current_operation(self) -> str:
"""Return the current operating mode (Auto, On, or Off)."""
if self._evo_device.stateStatus["mode"] == EVO_FOLLOW:
return STATE_AUTO
return EVO_STATE_TO_HA[self._evo_device.stateStatus["state"]]
@property
def operation_list(self) -> list[str]:
"""Return the list of available operations."""
return list(HA_STATE_TO_EVO)
@property
def is_away_mode_on(self):
"""Return True if away mode is on."""
is_off = EVO_STATE_TO_HA[self._evo_device.stateStatus["state"]] == STATE_OFF
is_permanent = self._evo_device.stateStatus["mode"] == EVO_PERMOVER
return is_off and is_permanent
async def async_set_operation_mode(self, operation_mode: str) -> None:
"""Set new operation mode for a DHW controller.
Except for Auto, the mode is only until the next SetPoint.
"""
if operation_mode == STATE_AUTO:
await self._evo_broker.call_client_api(self._evo_device.set_dhw_auto())
else:
await self._update_schedule()
until = dt_util.parse_datetime(self.setpoints.get("next_sp_from", ""))
until = dt_util.as_utc(until) if until else None
if operation_mode == STATE_ON:
await self._evo_broker.call_client_api(
self._evo_device.set_dhw_on(until=until)
)
else: # STATE_OFF
await self._evo_broker.call_client_api(
self._evo_device.set_dhw_off(until=until)
)
async def async_turn_away_mode_on(self):
"""Turn away mode on."""
await self._evo_broker.call_client_api(self._evo_device.set_dhw_off())
async def async_turn_away_mode_off(self):
"""Turn away mode off."""
await self._evo_broker.call_client_api(self._evo_device.set_dhw_auto())
async def async_turn_on(self):
"""Turn on."""
await self._evo_broker.call_client_api(self._evo_device.set_dhw_on())
async def async_turn_off(self):
"""Turn off."""
await self._evo_broker.call_client_api(self._evo_device.set_dhw_off())
async def async_update(self) -> None:
"""Get the latest state data for a DHW controller."""
await super().async_update()
for attr in STATE_ATTRS_DHW:
self._device_state_attrs[attr] = getattr(self._evo_device, attr)
| {
"content_hash": "7965ffe53a4a6464295438c097cb4584",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 87,
"avg_line_length": 35.328125,
"alnum_prop": 0.6318000884564352,
"repo_name": "FreekingDean/home-assistant",
"id": "495df9e697e2f25026a5a22bdd0567dd3e1a7c9b",
"size": "4522",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/evohome/water_heater.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2335"
},
{
"name": "Python",
"bytes": "36746639"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('apies', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='api',
name='active',
),
]
| {
"content_hash": "2489f2d2e539e8d9521204113eab11c2",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 40,
"avg_line_length": 18.352941176470587,
"alnum_prop": 0.5737179487179487,
"repo_name": "Sult/daf",
"id": "abfbf672c0c16f20cd8c1b23aa9de665c1917ec6",
"size": "336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/apies/migrations/0002_remove_api_active.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "349255"
},
{
"name": "HTML",
"bytes": "233658"
},
{
"name": "JavaScript",
"bytes": "521786"
},
{
"name": "Makefile",
"bytes": "285"
},
{
"name": "Python",
"bytes": "309731"
},
{
"name": "Shell",
"bytes": "680"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.db import models, migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "gvl.org.au",
"name": "gvldash"
}
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "example.com",
"name": "example.com"
}
)
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.RunPython(update_site_forward, update_site_backward),
] | {
"content_hash": "76a4c2190878c2a7ff2d1526cd9aca82",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 72,
"avg_line_length": 23.615384615384617,
"alnum_prop": 0.5928338762214984,
"repo_name": "gvlproject/gvldash",
"id": "66c40443c25fe3cd57e20df44b4f04675daf884b",
"size": "945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gvldash/contrib/sites/migrations/0002_set_site_domain_and_name.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8574"
},
{
"name": "HTML",
"bytes": "46267"
},
{
"name": "JavaScript",
"bytes": "12625"
},
{
"name": "Python",
"bytes": "52742"
}
],
"symlink_target": ""
} |
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.options import define,options
define("port",default=8000,help="run on the given port",type=int)
class IndexHandler(tornado.web.RequestHandler):
def get(self):
greeting = self.get_argument('greeting','Hello')
self.write(greeting+', friendly user')
def write_error(self,status_code,**kwargs):
self.write("Gosh darnit,user! You cause a %d error." %status_code)
if __name__ == "__main__":
tornado.options.parse_command_line()
app = tornado.web.Application(handlers=[(r"/",IndexHandler)])
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
| {
"content_hash": "2b648dc3fe933b8b3b0fddb284d13ed1",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 68,
"avg_line_length": 29.28,
"alnum_prop": 0.744535519125683,
"repo_name": "xiongerqi/enki",
"id": "e436f8b2cb4f17e379ee7d5b109baf8b648716e5",
"size": "756",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sample/web/tornado/http_code.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "252"
},
{
"name": "HTML",
"bytes": "1857"
},
{
"name": "Makefile",
"bytes": "63"
},
{
"name": "Python",
"bytes": "32972"
}
],
"symlink_target": ""
} |
from datetime import datetime
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import redirect, render
from djview.forms import CategoryForm, PageForm, UserForm, UserProfileForm
from djview.models import Category, Page
def category(request, category_name_slug):
context_dict = {}
try:
category = Category.objects.get(slug=category_name_slug)
context_dict['category_name'] = category.name
pages = Page.objects.filter(category=category)
context_dict['pages'] = pages
context_dict['category'] = category
context_dict['category_name_slug'] = category_name_slug
except Category.DoesNotExist:
pass
return render(request, 'category.html', context_dict)
def add_category(request):
if not request.method == 'POST':
form = CategoryForm()
return render(request, 'add_category.html', {'form': form})
form = CategoryForm(request.POST)
if form.is_valid():
form.save(commit=True)
return djview_index(request)
else:
print form.errors
return render(request, 'add_category.html', {'form': form})
def add_page(request, category_name_slug):
try:
cat = Category.objects.get(slug=category_name_slug)
except Category.DoesNotExist:
cat = None
if request.method == 'POST':
form = PageForm(request.POST)
if form.is_valid():
if cat:
page = form.save(commit=False)
page.category = cat
page.views = 0
page.save()
return redirect('djview_index')
else:
print form.errors
else:
form = PageForm()
context_dict = {'form': form, 'category': cat}
return render(request, 'add_page.html', context_dict)
def register(request):
registered = False
if request.method == 'POST':
user_form = UserForm(data=request.POST)
profile_form = UserProfileForm(data=request.POST)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
profile = profile_form.save(commit=False)
profile.user = user
if 'picture' in request.FILES:
profile.picture = request.FILES['picture']
profile.save()
registered = True
else:
print user_form.errors
print profile_form.errors
else:
user_form = UserForm()
profile_form = UserProfileForm()
context_dict = {'user_form': user_form,
'profile_form': profile_form,
'registered': registered}
return render(request, 'register.html', context_dict)
def user_login(request):
if not request.method == 'POST':
return render(request, 'login.html', {})
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if not user:
print 'Bad login credentials {0}:{1}'.format(username, password)
return HttpResponse('Invalid login details')
if not user.is_active:
return HttpResponse('You account is not active')
login(request, user)
return HttpResponseRedirect('/djview')
def djview_index(request):
category_list = Category.objects.all()
page_list = Page.objects.order_by('-views')[:5]
context_dict = {'categories': category_list,
'pages': page_list}
visits = request.session.get('visits')
if not visits:
visits = 1
reset_last_visit_time = False
last_visit = request.session.get('last_visit')
if last_visit:
last_visit_time = datetime.strptime(last_visit[:-7],
'%Y-%m-%d %H:%M:%S')
if (datetime.now() - last_visit_time).seconds > 0:
visits += 1
reset_last_visit_time = True
else:
reset_last_visit_time = True
if reset_last_visit_time:
request.session['last_visit'] = str(datetime.now())
request.session['visits'] = visits
context_dict['visits'] = visits
return render(request, 'index.html', context_dict)
def djview_about(request):
return render(request, 'about.html')
@login_required
def restricted(request):
return HttpResponse("Since you are logged in, you can see this text")
@login_required
def user_logout(request):
logout(request)
return HttpResponse('<a href="/djview/">Index</a>')
| {
"content_hash": "09931bcd9d5e376172cd517d64218392",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 74,
"avg_line_length": 26.942857142857143,
"alnum_prop": 0.6163308589607636,
"repo_name": "rklabs/djaaks",
"id": "bceca65ba7a7457e2e2ee6bfa77689a1a23475f8",
"size": "4715",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djview/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "11366"
},
{
"name": "Python",
"bytes": "18966"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import logging
import os
import signal
import sys
import time
import re
our_dir = os.path.dirname(os.path.abspath(__file__))
# For dev setups, we can find the API in the repo itself.
if os.path.exists(os.path.join(our_dir, '../api/zulip')):
sys.path.insert(0, '../api')
from zulip import Client
def exit_gracefully(signum, frame):
sys.exit(0)
class RateLimit(object):
def __init__(self, message_limit, interval_limit):
self.message_limit = message_limit
self.interval_limit = interval_limit
self.message_list = []
def is_legal(self):
self.message_list.append(time.time())
if len(self.message_list) > self.message_limit:
self.message_list.pop(0)
time_diff = self.message_list[-1] - self.message_list[0]
return time_diff >= self.interval_limit
else:
return True
class BotHandlerApi(object):
def __init__(self, client):
# Only expose a subset of our Client's functionality
user_profile = client.get_profile()
self._rate_limit = RateLimit(20, 5)
self._client = client
try:
self.full_name = user_profile['full_name']
self.email = user_profile['email']
except KeyError:
logging.error('Cannot fetch user profile, make sure you have set'
' up the zuliprc file correctly.')
sys.exit(1)
def send_message(self, *args, **kwargs):
if self._rate_limit.is_legal():
self._client.send_message(*args, **kwargs)
else:
logging.error('-----> !*!*!*MESSAGE RATE LIMIT REACHED, EXITING*!*!*! <-----\n'
'Is your bot trapped in an infinite loop by reacting to'
' its own messages?')
sys.exit(1)
def run_message_handler_for_bot(lib_module, quiet, config_file):
# Make sure you set up your ~/.zuliprc
client = Client(config_file=config_file)
restricted_client = BotHandlerApi(client)
message_handler = lib_module.handler_class()
class StateHandler(object):
def __init__(self):
self.state = None
def set_state(self, state):
self.state = state
def get_state(self):
return self.state
state_handler = StateHandler()
if not quiet:
print(message_handler.usage())
def extract_message_if_mentioned(message, client):
bot_mention = r'^@(\*\*{0}\*\*\s|{0}\s)(?=.*)'.format(client.full_name)
start_with_mention = re.compile(bot_mention).match(message['content'])
if start_with_mention:
query = message['content'][len(start_with_mention.group()):]
return query
else:
bot_response = 'Please mention me first, then type the query.'
if message['type'] == 'private':
client.send_message(dict(
type='private',
to=message['sender_email'],
content=bot_response,
))
else:
client.send_message(dict(
type='stream',
to=message['display_recipient'],
subject=message['subject'],
content=bot_response,
))
return None
def is_private(message, client):
# bot will not reply if the sender name is the same as the bot name
# to prevent infinite loop
if message['type'] == 'private':
return client.full_name != message['sender_full_name']
return False
def handle_message(message):
logging.info('waiting for next message')
is_mentioned = message['is_mentioned']
is_private_message = is_private(message, restricted_client)
# Strip at-mention botname from the message
if is_mentioned:
message['content'] = extract_message_if_mentioned(message=message, client=restricted_client)
if message['content'] is None:
return
if is_private_message or is_mentioned:
message_handler.handle_message(
message=message,
client=restricted_client,
state_handler=state_handler
)
signal.signal(signal.SIGINT, exit_gracefully)
logging.info('starting message handling...')
client.call_on_each_message(handle_message)
| {
"content_hash": "e0525cd7b663009c8658945fe1dd9d6c",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 104,
"avg_line_length": 33.62406015037594,
"alnum_prop": 0.575134168157424,
"repo_name": "aakash-cr7/zulip",
"id": "52d45b1454c527aeadc43656b5de4a2accad7f1d",
"size": "4472",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "contrib_bots/bot_lib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "311052"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "Groovy",
"bytes": "5509"
},
{
"name": "HTML",
"bytes": "549346"
},
{
"name": "JavaScript",
"bytes": "1634217"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "86990"
},
{
"name": "Python",
"bytes": "3555176"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "37821"
}
],
"symlink_target": ""
} |
import os
import copy
import subprocess
from testlib.test import TestFunction
from testlib.suite import TestSuite
from testlib.helper import log_call
from testlib.config import constants, config
from fixture import TempdirFixture, Gem5Fixture, VariableFixture
import verifier
def gem5_verify_config(name,
config,
config_args,
verifiers,
gem5_args=tuple(),
fixtures=[],
valid_isas=constants.supported_isas,
valid_variants=constants.supported_variants,
length=constants.supported_lengths[0]):
'''
Helper class to generate common gem5 tests using verifiers.
The generated TestSuite will run gem5 with the provided config and
config_args. After that it will run any provided verifiers to verify
details about the gem5 run.
.. seealso:: For the verifiers see :mod:`testlib.gem5.verifier`
:param name: Name of the test.
:param config: The config to give gem5.
:param config_args: A list of arguments to pass to the given config.
:param verifiers: An iterable with Verifier instances which will be placed
into a suite that will be ran after a gem5 run.
:param gem5_args: An iterable with arguments to give to gem5. (Arguments
that would normally go before the config path.)
:param valid_isas: An iterable with the isas that this test can be ran
for. If None given, will run for all supported_isas.
:param valid_variants: An iterable with the variant levels that
this test can be ran for. (E.g. opt, debug)
'''
fixtures = list(fixtures)
testsuites = []
for opt in valid_variants:
for isa in valid_isas:
# Create a tempdir fixture to be shared throughout the test.
tempdir = TempdirFixture()
gem5_returncode = VariableFixture(
name=constants.gem5_returncode_fixture_name)
# Common name of this generated testcase.
_name = '{given_name}-{isa}-{opt}'.format(
given_name=name,
isa=isa,
opt=opt)
# Create the running of gem5 subtest.
# NOTE: We specifically create this test before our verifiers so
# this is listed first.
tests = []
gem5_execution = TestFunction(
_create_test_run_gem5(config, config_args, gem5_args),
name=_name)
tests.append(gem5_execution)
# Create copies of the verifier subtests for this isa and
# variant.
for verifier in verifiers:
tests.append(verifier.instantiate_test(_name))
# Add the isa and variant to tags list.
tags = [isa, opt, length]
# Create the gem5 target for the specific architecture and
# variant.
_fixtures = copy.copy(fixtures)
_fixtures.append(Gem5Fixture(isa, opt))
_fixtures.append(tempdir)
_fixtures.append(gem5_returncode)
# Finally construct the self contained TestSuite out of our
# tests.
testsuites.append(TestSuite(
name=_name,
fixtures=_fixtures,
tags=tags,
tests=tests))
return testsuites
def _create_test_run_gem5(config, config_args, gem5_args):
def test_run_gem5(params):
'''
Simple \'test\' which runs gem5 and saves the result into a tempdir.
NOTE: Requires fixtures: tempdir, gem5
'''
fixtures = params.fixtures
if gem5_args is None:
_gem5_args = tuple()
elif isinstance(gem5_args, str):
# If just a single str, place it in an iterable
_gem5_args = (gem5_args,)
else:
_gem5_args = gem5_args
# FIXME/TODO: I don't like the idea of having to modify this test run
# or always collect results even if not using a verifier. There should
# be some configuration in here that only gathers certain results for
# certain verifiers.
#
# I.E. Only the returncode verifier will use the gem5_returncode
# fixture, but we always require it even if that verifier isn't being
# ran.
returncode = fixtures[constants.gem5_returncode_fixture_name]
tempdir = fixtures[constants.tempdir_fixture_name].path
gem5 = fixtures[constants.gem5_binary_fixture_name].path
command = [
gem5,
'-d', # Set redirect dir to tempdir.
tempdir,
'-re',# TODO: Change to const. Redirect stdout and stderr
]
command.extend(_gem5_args)
command.append(config)
# Config_args should set up the program args.
command.extend(config_args)
returncode.value = log_call(params.log, command)
return test_run_gem5
| {
"content_hash": "bbeee45825a72edbfd389a7469ef5097",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 78,
"avg_line_length": 37.333333333333336,
"alnum_prop": 0.5990079365079365,
"repo_name": "TUD-OS/gem5-dtu",
"id": "47cf421e16b83df92b8b3a3cb5873f5baccfb420",
"size": "6602",
"binary": false,
"copies": "2",
"ref": "refs/heads/dtu-mmu",
"path": "tests/gem5/suite.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "648342"
},
{
"name": "Awk",
"bytes": "3386"
},
{
"name": "C",
"bytes": "1717604"
},
{
"name": "C++",
"bytes": "35149040"
},
{
"name": "CMake",
"bytes": "79529"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "Forth",
"bytes": "15790"
},
{
"name": "HTML",
"bytes": "136898"
},
{
"name": "Java",
"bytes": "3179"
},
{
"name": "M4",
"bytes": "75007"
},
{
"name": "Makefile",
"bytes": "68265"
},
{
"name": "Objective-C",
"bytes": "24714"
},
{
"name": "Perl",
"bytes": "33696"
},
{
"name": "Python",
"bytes": "6073714"
},
{
"name": "Roff",
"bytes": "8783"
},
{
"name": "SWIG",
"bytes": "173"
},
{
"name": "Scala",
"bytes": "14236"
},
{
"name": "Shell",
"bytes": "101649"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "Vim Script",
"bytes": "4335"
},
{
"name": "sed",
"bytes": "3927"
}
],
"symlink_target": ""
} |
'''
Change Return Program
The user enters a cost and then the amount of money given in dollars. The program
will figure out the change and the number of quarters, dimes, nickels, pennies needed
for the change.
'''
import sys
from decimal import *
coins = ['quarters', 'dimes', 'nickels', 'pennies']
values = {'quarters' : 25, 'dimes' : 10, 'nickels' : 5, 'pennies' : 1}
def main(cost, given):
change = given - cost
print("change: ${0}".format(format(change/100, '.2f')))
nums = {'quarters' : 0, 'dimes' : 0, 'nickels': 0, 'pennies' : 0}
for coin in coins:
if(change >= values[coin]):
nums[coin] += int(change / values[coin])
change = change % values[coin]
for coin in coins:
print("{0}: {1}".format(coin, nums[coin]))
if __name__ == "__main__":
c = Decimal(sys.argv[1])
g = Decimal(sys.argv[2])
main(c*100, g*100)
| {
"content_hash": "ec1ba1619a7c41727b9eb7cecfdc6fb1",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 86,
"avg_line_length": 29.03448275862069,
"alnum_prop": 0.6377672209026128,
"repo_name": "mkmathur/Projects",
"id": "c9c2842e152544c60d08af8123d8ae8bd9ba15f9",
"size": "842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Numbers/change_return.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5771"
}
],
"symlink_target": ""
} |
from typing import MutableMapping, MutableSequence
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.batch.v1alpha",
manifest={
"Volume",
"NFS",
"PD",
"GCS",
},
)
class Volume(proto.Message):
r"""Volume describes a volume and parameters for it to be mounted
to a VM.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
nfs (google.cloud.batch_v1alpha.types.NFS):
A Network File System (NFS) volume. For
example, a Filestore file share.
This field is a member of `oneof`_ ``source``.
pd (google.cloud.batch_v1alpha.types.PD):
Deprecated: please use device_name instead.
This field is a member of `oneof`_ ``source``.
gcs (google.cloud.batch_v1alpha.types.GCS):
A Google Cloud Storage (GCS) volume.
This field is a member of `oneof`_ ``source``.
device_name (str):
Device name of an attached disk volume, which should align
with a device_name specified by
job.allocation_policy.instances[0].policy.disks[i].device_name
or defined by the given instance template in
job.allocation_policy.instances[0].instance_template.
This field is a member of `oneof`_ ``source``.
mount_path (str):
The mount path for the volume, e.g.
/mnt/disks/share.
mount_options (MutableSequence[str]):
For Google Cloud Storage (GCS), mount options
are the options supported by the gcsfuse tool
(https://github.com/GoogleCloudPlatform/gcsfuse).
For existing persistent disks, mount options
provided by the mount command
(https://man7.org/linux/man-pages/man8/mount.8.html)
except writing are supported. This is due to
restrictions of multi-writer mode
(https://cloud.google.com/compute/docs/disks/sharing-disks-between-vms).
For other attached disks and Network File System
(NFS), mount options are these supported by the
mount command
(https://man7.org/linux/man-pages/man8/mount.8.html).
"""
nfs: "NFS" = proto.Field(
proto.MESSAGE,
number=1,
oneof="source",
message="NFS",
)
pd: "PD" = proto.Field(
proto.MESSAGE,
number=2,
oneof="source",
message="PD",
)
gcs: "GCS" = proto.Field(
proto.MESSAGE,
number=3,
oneof="source",
message="GCS",
)
device_name: str = proto.Field(
proto.STRING,
number=6,
oneof="source",
)
mount_path: str = proto.Field(
proto.STRING,
number=4,
)
mount_options: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=5,
)
class NFS(proto.Message):
r"""Represents an NFS volume.
Attributes:
server (str):
The IP address of the NFS.
remote_path (str):
Remote source path exported from the NFS,
e.g., "/share".
"""
server: str = proto.Field(
proto.STRING,
number=1,
)
remote_path: str = proto.Field(
proto.STRING,
number=2,
)
class PD(proto.Message):
r"""Deprecated: please use device_name instead.
Attributes:
disk (str):
PD disk name, e.g. pd-1.
device (str):
PD device name, e.g. persistent-disk-1.
existing (bool):
Whether this is an existing PD. Default is
false. If false, i.e., new PD, we will format it
into ext4 and mount to the given path. If true,
i.e., existing PD, it should be in ext4 format
and we will mount it to the given path.
"""
disk: str = proto.Field(
proto.STRING,
number=1,
)
device: str = proto.Field(
proto.STRING,
number=2,
)
existing: bool = proto.Field(
proto.BOOL,
number=3,
)
class GCS(proto.Message):
r"""Represents a Google Cloud Storage volume.
Attributes:
remote_path (str):
Remote path, either a bucket name or a subdirectory of a
bucket, e.g.: bucket_name, bucket_name/subdirectory/
"""
remote_path: str = proto.Field(
proto.STRING,
number=1,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| {
"content_hash": "66f163a32e9d781fba10c662c35ff393",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 110,
"avg_line_length": 28.724550898203592,
"alnum_prop": 0.5776526996039191,
"repo_name": "googleapis/python-batch",
"id": "a379dfc23cc621fed40a6effd5ac8513bff109b0",
"size": "5397",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/batch_v1alpha/types/volume.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1240465"
},
{
"name": "Shell",
"bytes": "30657"
}
],
"symlink_target": ""
} |
import os
import sys
import inspect
__version__ = 'git'
#{ Initialization
def _init_externals():
"""Initialize external projects by putting them into the path"""
sys.path.append(os.path.join(os.path.dirname(__file__), 'ext', 'gitdb'))
try:
import gitdb
except ImportError:
raise ImportError("'gitdb' could not be found in your PYTHONPATH")
#END verify import
#} END initialization
#################
_init_externals()
#################
#{ Imports
from git.config import GitConfigParser
from git.objects import *
from git.refs import *
from git.diff import *
from git.exc import *
from git.db import *
from git.cmd import Git
from git.repo import Repo
from git.remote import *
from git.index import *
from git.util import (
LockFile,
BlockingLockFile,
Stats,
Actor
)
#} END imports
__all__ = [ name for name, obj in locals().items()
if not (name.startswith('_') or inspect.ismodule(obj)) ]
| {
"content_hash": "5cf864a623fe860f3e9df6201f267016",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 73,
"avg_line_length": 19.75,
"alnum_prop": 0.6635021097046413,
"repo_name": "qwinner/GitPython",
"id": "0658c3306964ff16f0b9f7c72fe824ee78b3651f",
"size": "1169",
"binary": false,
"copies": "2",
"ref": "refs/heads/0.3",
"path": "git/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import macropy.activate
import testFunctions
import testSanitization
| {
"content_hash": "a9dd785434790b022253867ad0922302",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 23,
"avg_line_length": 23,
"alnum_prop": 0.8985507246376812,
"repo_name": "jeanqasaur/jeeves",
"id": "d3f3c270fd4865ff901fe2b92a5582e3a1cbec92",
"size": "69",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/gallery/endorsement/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "9025"
},
{
"name": "Python",
"bytes": "358080"
}
],
"symlink_target": ""
} |
"""
do the unit tests!
"""
import os
import re
import sys
import unittest
from optparse import OptionParser
import paramiko
import threading
from paramiko.py3compat import PY2
sys.path.append('tests')
from tests.test_message import MessageTest
from tests.test_file import BufferedFileTest
from tests.test_buffered_pipe import BufferedPipeTest
from tests.test_util import UtilTest
from tests.test_hostkeys import HostKeysTest
from tests.test_pkey import KeyTest
from tests.test_kex import KexTest
from tests.test_packetizer import PacketizerTest
from tests.test_auth import AuthTest
from tests.test_transport import TransportTest
from tests.test_client import SSHClientTest
default_host = 'localhost'
default_user = os.environ.get('USER', 'nobody')
default_keyfile = os.path.join(os.environ.get('HOME', '/'), '.ssh/id_rsa')
default_passwd = None
def iter_suite_tests(suite):
"""Return all tests in a suite, recursing through nested suites"""
for item in suite._tests:
if isinstance(item, unittest.TestCase):
yield item
elif isinstance(item, unittest.TestSuite):
for r in iter_suite_tests(item):
yield r
else:
raise Exception('unknown object %r inside test suite %r'
% (item, suite))
def filter_suite_by_re(suite, pattern):
result = unittest.TestSuite()
filter_re = re.compile(pattern)
for test in iter_suite_tests(suite):
if filter_re.search(test.id()):
result.addTest(test)
return result
def main():
parser = OptionParser('usage: %prog [options]')
parser.add_option('--verbose', action='store_true', dest='verbose', default=False,
help='verbose display (one line per test)')
parser.add_option('--no-pkey', action='store_false', dest='use_pkey', default=True,
help='skip RSA/DSS private key tests (which can take a while)')
parser.add_option('--no-transport', action='store_false', dest='use_transport', default=True,
help='skip transport tests (which can take a while)')
parser.add_option('--no-sftp', action='store_false', dest='use_sftp', default=True,
help='skip SFTP client/server tests, which can be slow')
parser.add_option('--no-big-file', action='store_false', dest='use_big_file', default=True,
help='skip big file SFTP tests, which are slow as molasses')
parser.add_option('-R', action='store_false', dest='use_loopback_sftp', default=True,
help='perform SFTP tests against a remote server (by default, SFTP tests ' +
'are done through a loopback socket)')
parser.add_option('-H', '--sftp-host', dest='hostname', type='string', default=default_host,
metavar='<host>',
help='[with -R] host for remote sftp tests (default: %s)' % default_host)
parser.add_option('-U', '--sftp-user', dest='username', type='string', default=default_user,
metavar='<username>',
help='[with -R] username for remote sftp tests (default: %s)' % default_user)
parser.add_option('-K', '--sftp-key', dest='keyfile', type='string', default=default_keyfile,
metavar='<keyfile>',
help='[with -R] location of private key for remote sftp tests (default: %s)' %
default_keyfile)
parser.add_option('-P', '--sftp-passwd', dest='password', type='string', default=default_passwd,
metavar='<password>',
help='[with -R] (optional) password to unlock the private key for remote sftp tests')
options, args = parser.parse_args()
# setup logging
paramiko.util.log_to_file('test.log')
if options.use_sftp:
from tests.test_sftp import SFTPTest
if options.use_loopback_sftp:
SFTPTest.init_loopback()
else:
SFTPTest.init(options.hostname, options.username, options.keyfile, options.password)
if not options.use_big_file:
SFTPTest.set_big_file_test(False)
if options.use_big_file:
from tests.test_sftp_big import BigSFTPTest
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(MessageTest))
suite.addTest(unittest.makeSuite(BufferedFileTest))
suite.addTest(unittest.makeSuite(BufferedPipeTest))
suite.addTest(unittest.makeSuite(UtilTest))
suite.addTest(unittest.makeSuite(HostKeysTest))
if options.use_pkey:
suite.addTest(unittest.makeSuite(KeyTest))
suite.addTest(unittest.makeSuite(KexTest))
suite.addTest(unittest.makeSuite(PacketizerTest))
if options.use_transport:
suite.addTest(unittest.makeSuite(AuthTest))
suite.addTest(unittest.makeSuite(TransportTest))
suite.addTest(unittest.makeSuite(SSHClientTest))
if options.use_sftp:
suite.addTest(unittest.makeSuite(SFTPTest))
if options.use_big_file:
suite.addTest(unittest.makeSuite(BigSFTPTest))
verbosity = 1
if options.verbose:
verbosity = 2
runner = unittest.TextTestRunner(verbosity=verbosity)
if len(args) > 0:
filter = '|'.join(args)
suite = filter_suite_by_re(suite, filter)
result = runner.run(suite)
# Clean up stale threads from poorly cleaned-up tests.
# TODO: make that not a problem, jeez
for thread in threading.enumerate():
if thread is not threading.currentThread():
if PY2:
thread._Thread__stop()
else:
thread._stop()
# Exit correctly
if not result.wasSuccessful():
sys.exit(1)
if __name__ == '__main__':
main()
| {
"content_hash": "aa5feb1c61f807df1d51e8dda6608b3f",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 107,
"avg_line_length": 40.443661971830984,
"alnum_prop": 0.6430437053804632,
"repo_name": "mytliulei/DCNRobotInstallPackages",
"id": "2b3d4ed47e64a64ec9ea3e5dc379771f89b57ea8",
"size": "6582",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "windows/win32/paramiko-1.14.0/test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4187"
},
{
"name": "CSS",
"bytes": "13968"
},
{
"name": "HTML",
"bytes": "890167"
},
{
"name": "Makefile",
"bytes": "2272"
},
{
"name": "Python",
"bytes": "3559800"
},
{
"name": "Shell",
"bytes": "521"
}
],
"symlink_target": ""
} |
"""Functionality to check for the availability and version of dependencies."""
from __future__ import print_function
import re
# Keep urllib2 here since we this code should be able to be used
# by a default Python set up.
import urllib2
# The dictionary values are:
# module_name: minimum_version
LIBYAL_DEPENDENCIES = {
u'pybde': 20140531,
u'pyesedb': 20150409,
u'pyevt': 20120410,
u'pyevtx': 20141112,
u'pyewf': 20131210,
u'pyfsntfs': 20150831,
u'pyfwsi': 20150606,
u'pylnk': 20150830,
u'pymsiecf': 20150314,
u'pyolecf': 20150413,
u'pyqcow': 20131204,
u'pyregf': 20150315,
u'pysigscan': 20150627,
u'pysmdev': 20140529,
u'pysmraw': 20140612,
u'pyvhdi': 20131210,
u'pyvmdk': 20140421,
u'pyvshadow': 20131209}
# The tuple values are:
# module_name, version_attribute_name, minimum_version, maximum_version
PYTHON_DEPENDENCIES = [
(u'artifacts', u'__version__', u'20150409', None),
# The bencode module does not appear to have version information.
(u'bencode', u'', u'', None),
(u'binplist', u'__version__', u'0.1.4', None),
(u'construct', u'__version__', u'2.5.2', None),
(u'dateutil', u'__version__', u'1.5', None),
(u'dfvfs', u'__version__', u'20150915', None),
(u'dpkt', u'__version__', u'1.8', None),
# The protobuf module does not appear to have version information.
(u'google.protobuf', u'', u'', None),
(u'hachoir_core', u'__version__', u'1.3.3', None),
(u'hachoir_parser', u'__version__', u'1.3.4', None),
(u'hachoir_metadata', u'__version__', u'1.3.3', None),
(u'IPython', u'__version__', u'1.2.1', None),
(u'pefile', u'__version__', u'1.2.10-139', None),
(u'psutil', u'__version__', u'1.2.1', None),
(u'pyparsing', u'__version__', u'2.0.3', None),
# TODO: determine the version of pytz.
# pytz uses __version__ but has a different version indicator e.g. 2012d
(u'pytz', u'', u'', None),
(u'requests', u'__version__', u'2.2.1', None),
(u'six', u'__version__', u'1.1.0', None),
(u'sqlite3', u'sqlite_version', u'3.7.8', None),
(u'xlsxwriter', u'__version__', u'0.6.5', None),
(u'yaml', u'__version__', u'3.10', None),
(u'zmq', u'__version__', u'2.1.11', None)]
# The tuple values are:
# module_name, version_attribute_name, minimum_version, maximum_version
PYTHON_TEST_DEPENDENCIES = [
(u'mock', u'__version__', u'0.7.1', None)]
def _DownloadPageContent(download_url):
"""Downloads the page content.
Args:
download_url: the URL where to download the page content.
Returns:
The page content if successful, None otherwise.
"""
if not download_url:
return
url_object = urllib2.urlopen(download_url)
if url_object.code != 200:
return
return url_object.read()
def _GetLibyalGithubReleasesLatestVersion(library_name):
"""Retrieves the latest version number of a libyal library on GitHub releases.
Args:
library_name: the name of the libyal library.
Returns:
The latest version for a given libyal library on GitHub releases
or 0 on error.
"""
download_url = (
u'https://github.com/libyal/{0:s}/releases').format(library_name)
page_content = _DownloadPageContent(download_url)
if not page_content:
return 0
# The format of the project download URL is:
# /libyal/{project name}/releases/download/{git tag}/
# {project name}{status-}{version}.tar.gz
# Note that the status is optional and will be: beta, alpha or experimental.
expression_string = (
u'/libyal/{0:s}/releases/download/[^/]*/{0:s}-[a-z-]*([0-9]+)'
u'[.]tar[.]gz').format(library_name)
matches = re.findall(expression_string, page_content)
if not matches:
return 0
return int(max(matches))
# TODO: Remove when Google Drive support is no longer needed.
def _GetLibyalGoogleDriveLatestVersion(library_name):
"""Retrieves the latest version number of a libyal library on Google Drive.
Args:
library_name: the name of the libyal library.
Returns:
The latest version for a given libyal library on Google Drive
or 0 on error.
"""
download_url = u'https://code.google.com/p/{0:s}/'.format(library_name)
page_content = _DownloadPageContent(download_url)
if not page_content:
return 0
# The format of the library downloads URL is:
# https://googledrive.com/host/{random string}/
expression_string = (
b'<a href="(https://googledrive.com/host/[^/]*/)"[^>]*>Downloads</a>')
matches = re.findall(expression_string, page_content)
if not matches or len(matches) != 1:
return 0
page_content = _DownloadPageContent(matches[0])
if not page_content:
return 0
# The format of the library download URL is:
# /host/{random string}/{library name}-{status-}{version}.tar.gz
# Note that the status is optional and will be: beta, alpha or experimental.
expression_string = b'/host/[^/]*/{0:s}-[a-z-]*([0-9]+)[.]tar[.]gz'.format(
library_name)
matches = re.findall(expression_string, page_content)
if not matches:
return 0
return int(max(matches))
def _CheckLibyal(libyal_python_modules, latest_version_check=False):
"""Checks the availability of libyal libraries.
Args:
libyal_python_modules: a dictionary of libyal python module names (keys) and
versions (values).
latest_version_check: optional boolean value to indicate if the project
site should be checked for the latest version.
The default is False.
Returns:
True if the libyal libraries are available, False otherwise.
"""
connection_error = False
result = True
for module_name, module_version in sorted(libyal_python_modules.items()):
try:
module_object = map(__import__, [module_name])[0]
except ImportError:
print(u'[FAILURE]\tmissing: {0:s}.'.format(module_name))
result = False
continue
libyal_name = u'lib{0:s}'.format(module_name[2:])
installed_version = int(module_object.get_version())
latest_version = None
if latest_version_check:
try:
latest_version = _GetLibyalGithubReleasesLatestVersion(libyal_name)
except urllib2.URLError:
latest_version = None
if not latest_version:
try:
latest_version = _GetLibyalGoogleDriveLatestVersion(libyal_name)
except urllib2.URLError:
latest_version = None
if not latest_version:
print((
u'Unable to determine latest version of {0:s} ({1:s}).\n').format(
libyal_name, module_name))
latest_version = None
connection_error = True
if module_version is not None and installed_version < module_version:
print((
u'[FAILURE]\t{0:s} ({1:s}) version: {2:d} is too old, {3:d} or '
u'later required.').format(
libyal_name, module_name, installed_version, module_version))
result = False
elif latest_version and installed_version != latest_version:
print((
u'[INFO]\t\t{0:s} ({1:s}) version: {2:d} installed, '
u'version: {3:d} available.').format(
libyal_name, module_name, installed_version, latest_version))
else:
print(u'[OK]\t\t{0:s} ({1:s}) version: {2:d}'.format(
libyal_name, module_name, installed_version))
if connection_error:
print((
u'[INFO] to check for the latest versions this script needs Internet '
u'access.'))
return result
def _CheckPythonModule(
module_name, version_attribute_name, minimum_version,
maximum_version=None):
"""Checks the availability of a Python module.
Args:
module_name: the name of the module.
version_attribute_name: the name of the attribute that contains the module
version.
minimum_version: the minimum required version.
maximum_version: the maximum required version. This attribute is optional
and should only be used if there is a recent API change
that prevents the tool from running if a later version
is used.
Returns:
True if the Python module is available and conforms to the minimum required
version. False otherwise.
"""
try:
module_object = map(__import__, [module_name])[0]
except ImportError:
print(u'[FAILURE]\tmissing: {0:s}.'.format(module_name))
return False
if version_attribute_name and minimum_version:
module_version = getattr(module_object, version_attribute_name, None)
if not module_version:
print((
u'[FAILURE]\tunable to determine version information '
u'for: {0:s}').format(module_name))
return False
# Split the version string and convert every digit into an integer.
# A string compare of both version strings will yield an incorrect result.
split_regex = re.compile(r'\.|\-')
module_version_map = map(int, split_regex.split(module_version))
minimum_version_map = map(int, split_regex.split(minimum_version))
if module_version_map < minimum_version_map:
print((
u'[FAILURE]\t{0:s} version: {1:s} is too old, {2:s} or later '
u'required.').format(module_name, module_version, minimum_version))
return False
if maximum_version:
maximum_version_map = map(int, split_regex.split(maximum_version))
if module_version_map > maximum_version_map:
print((
u'[FAILURE]\t{0:s} version: {1:s} is too recent, {2:s} or earlier '
u'required.').format(module_name, module_version, maximum_version))
return False
print(u'[OK]\t\t{0:s} version: {1:s}'.format(module_name, module_version))
else:
print(u'[OK]\t\t{0:s}'.format(module_name))
return True
def _CheckPytsk(module_name, minimum_version_libtsk, minimum_version_pytsk):
"""Checks the availability of pytsk.
Args:
module_name: the name of the module.
minimum_version_libtsk: the minimum required version of libtsk.
minimum_version_pytsk: the minimum required version of pytsk.
Returns:
True if the pytsk Python module is available, False otherwise.
"""
try:
module_object = map(__import__, [module_name])[0]
except ImportError:
print(u'[FAILURE]\tmissing: {0:s}.'.format(module_name))
return False
module_version = module_object.TSK_VERSION_STR
# Split the version string and convert every digit into an integer.
# A string compare of both version strings will yield an incorrect result.
module_version_map = map(int, module_version.split(u'.'))
minimum_version_map = map(int, minimum_version_libtsk.split(u'.'))
if module_version_map < minimum_version_map:
print((
u'[FAILURE]\tSleuthKit (libtsk) version: {0:s} is too old, {1:s} or '
u'later required.').format(module_version, minimum_version_libtsk))
return False
print(u'[OK]\t\tSleuthKit version: {0:s}'.format(module_version))
if not hasattr(module_object, u'get_version'):
print(u'[FAILURE]\t{0:s} is too old, {1:s} or later required.'.format(
module_name, minimum_version_pytsk))
return False
module_version = module_object.get_version()
if module_version < minimum_version_pytsk:
print((
u'[FAILURE]\t{0:s} version: {1:s} is too old, {2:s} or later '
u'required.').format(
module_name, module_version, minimum_version_pytsk))
return False
print(u'[OK]\t\t{0:s} version: {1:s}'.format(module_name, module_version))
return True
def CheckDependencies(latest_version_check=False):
"""Checks the availability of the dependencies.
Args:
latest_version_check: Optional boolean value to indicate if the project
site should be checked for the latest version.
The default is False.
Returns:
True if the dependencies are available, False otherwise.
"""
print(u'Checking availability and versions of plaso dependencies.')
check_result = True
for values in PYTHON_DEPENDENCIES:
if not _CheckPythonModule(
values[0], values[1], values[2], maximum_version=values[3]):
check_result = False
if not _CheckPytsk(u'pytsk3', u'4.1.2', u'20140506'):
check_result = False
libyal_check_result = _CheckLibyal(
LIBYAL_DEPENDENCIES, latest_version_check=latest_version_check)
if not libyal_check_result:
check_result = False
print(u'')
return check_result
def CheckModuleVersion(module_name):
"""Checks the version requirements of a module.
Args:
module_name: the name of the module.
Raises:
ImportError: if the module does not exists or does not meet
the version requirements.
"""
# TODO: add support for non libyal dependencies.
if module_name not in LIBYAL_DEPENDENCIES:
return
try:
module_object = map(__import__, [module_name])[0]
except ImportError:
raise
module_version = module_object.get_version()
try:
module_version = int(module_version, 10)
except ValueError:
raise ImportError(u'Unable to determine version of module {0:s}')
if module_version < LIBYAL_DEPENDENCIES[module_name]:
raise ImportError(
u'Module {0:s} is too old, minimum required version {1!s}'.format(
module_name, module_version))
def CheckTestDependencies(latest_version_check=False):
"""Checks the availability of the dependencies when running tests.
Args:
latest_version_check: Optional boolean value to indicate if the project
site should be checked for the latest version.
The default is False.
Returns:
True if the dependencies are available, False otherwise.
"""
if not CheckDependencies(latest_version_check):
return False
print(u'Checking availability and versions of plaso test dependencies.')
for values in PYTHON_TEST_DEPENDENCIES:
if not _CheckPythonModule(
values[0], values[1], values[2], maximum_version=values[3]):
return False
return True
def GetInstallRequires():
"""Returns the install_requires for setup.py"""
install_requires = []
for values in PYTHON_DEPENDENCIES:
module_name = values[0]
module_version = values[2]
# Map the import name to the pypi name.
if module_name == u'yaml':
module_name = u'PyYAML'
elif module_name == u'sqlite3':
# Override the pysqlite version since it does not match
# the sqlite3 version.
module_name = u'pysqlite'
module_version = None
if not module_version:
install_requires.append(module_name)
else:
install_requires.append(u'{0:s} >= {1:s}'.format(
module_name, module_version))
install_requires.append(u'pytsk3 >= 4.1.2')
for module_name, module_version in sorted(LIBYAL_DEPENDENCIES.items()):
if not module_version:
install_requires.append(module_name)
else:
install_requires.append(u'{0:s} >= {1:d}'.format(
module_name, module_version))
return sorted(install_requires)
| {
"content_hash": "a73dc5f0a99f092b91de9689be2580f5",
"timestamp": "",
"source": "github",
"line_count": 462,
"max_line_length": 80,
"avg_line_length": 32.54112554112554,
"alnum_prop": 0.6515232140481575,
"repo_name": "8u1a/plaso",
"id": "f1ed0466ea1f7df71d541a87b4173f85e5dc1905",
"size": "15058",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaso/dependencies.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1276"
},
{
"name": "Makefile",
"bytes": "1151"
},
{
"name": "Protocol Buffer",
"bytes": "13930"
},
{
"name": "Python",
"bytes": "3179107"
},
{
"name": "Shell",
"bytes": "47305"
}
],
"symlink_target": ""
} |
"""A wrapper of Session API which runs monitors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib.learn.python.learn.wrapped_session import WrappedSession
from tensorflow.python.framework import ops
class MonitoredSession(WrappedSession):
"""A WrappedSession that calls monitors during calls to run().
The list of monitors to call is passed in the constructor. Before each call
to `run()` the session calls the `step_begin()` method of the monitors, which
can return additional ops or tensors to run. These are added to the arguments
of the call to `run()`.
When the `run()` call finishes, the session calls the `step_end()` methods of
the monitors, passing the values returned by the `run()` call corresponding to
the ops and tensors that each monitor requested.
If any call to the `step_end()` methods returns `True` the session will be
marked as needing to stop and its `should_stop()` method will now return
`True`.
This wrapped session requires a "global step" tensor on construction. This
should return a scalar int value. It is added to the list of tensors to fetch
in calls to `run()`
"""
def __init__(self, sess, monitors, global_step_tensor):
"""Initializes a MonitoredSession object.
Args:
sess: A `tf.Session` or a `WrappedSession` object.
monitors: An iterable of `tf.contrib.learn.BaseMonitor' objects.
global_step_tensor: A 'Tensor' which holds a scalar int value.
"""
WrappedSession.__init__(self, sess)
self._monitors = monitors
self._should_stop = False
self._global_step_tensor = global_step_tensor
self._last_step = None
def _check_stop(self):
"""See base class."""
return self._should_stop
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""See base class."""
if self.should_stop():
raise RuntimeError('Run called even after should_stop requested.')
if self._last_step is None:
self._last_step = WrappedSession.run(self, self._global_step_tensor)
monitors_step = self._last_step + 1
monitor_fetches = []
for monitor in self._monitors:
monitor_requests = monitor.step_begin(monitors_step)
if monitor_requests:
# TODO(ispir): remove following restriction after b/30136815 fixed
if not isinstance(monitor_requests, list):
raise ValueError('Monitor.step_begin should return a list.')
monitor_fetches.extend(monitor_requests)
actual_fetches = {
'caller': fetches,
self._global_step_tensor: self._global_step_tensor,
'monitors': [_as_graph_element(f, self.graph) for f in monitor_fetches]
}
# Do session run.
outputs = WrappedSession.run(self,
fetches=actual_fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
self._last_step = outputs[self._global_step_tensor]
# Call monitors step_end and stop if one of them tells to stop.
if monitor_fetches:
monitor_outputs = dict(zip(monitor_fetches, outputs['monitors']))
else:
monitor_outputs = {}
for monitor in self._monitors:
induce_stop = monitor.step_end(monitors_step, monitor_outputs)
self._should_stop = self._should_stop or induce_stop
# Call the post_step methods.
for monitor in self._monitors:
monitor.post_step(monitors_step, self._sess)
return outputs['caller']
# TODO(ispir): Remove following logic after forcing monitors returns tensors.
def _as_graph_element(obj, graph):
"""Retrieves Graph element."""
graph = graph or ops.get_default_graph()
if not isinstance(obj, six.string_types):
if not hasattr(obj, 'graph') or obj.graph != graph:
raise ValueError('Passed %s should have graph attribute that is equal '
'to current graph %s.' % (obj, graph))
return obj
if ':' in obj:
element = graph.as_graph_element(obj)
else:
element = graph.as_graph_element(obj + ':0')
# Check that there is no :1 (e.g. it's single output).
try:
graph.as_graph_element(obj + ':1')
except (KeyError, ValueError):
pass
else:
raise ValueError('Name %s is ambiguous, '
'as this `Operation` has multiple outputs '
'(at least 2).' % obj)
return element
| {
"content_hash": "71b6fdc03ba020386437f8168d337cac",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 80,
"avg_line_length": 36.443548387096776,
"alnum_prop": 0.6565611861031202,
"repo_name": "natanielruiz/android-yolo",
"id": "e4fbf606276961c60bd8ec8eefafd4bf2da91d66",
"size": "5244",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "jni-build/jni/include/tensorflow/contrib/learn/python/learn/monitored_session.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "179103"
},
{
"name": "C++",
"bytes": "11498765"
},
{
"name": "CMake",
"bytes": "36462"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "HTML",
"bytes": "520176"
},
{
"name": "Java",
"bytes": "95359"
},
{
"name": "JavaScript",
"bytes": "12951"
},
{
"name": "Jupyter Notebook",
"bytes": "1773504"
},
{
"name": "Makefile",
"bytes": "23603"
},
{
"name": "Objective-C",
"bytes": "5332"
},
{
"name": "Objective-C++",
"bytes": "45677"
},
{
"name": "Python",
"bytes": "9531722"
},
{
"name": "Shell",
"bytes": "238167"
},
{
"name": "TypeScript",
"bytes": "632244"
}
],
"symlink_target": ""
} |
"""
Functions are intended as helpers to be used in conjunction with the "getLinks.py"
script to collect links to movie pages on box office mojo so that you can scrape
data from the individual movie's pages
expected usage:
from src.lib import mojoScrapeLinks
"""
def harvestMovieLinks(candidateURL, iYear, iType, pattern):
"""
Takes a box office mojo URL for a given year and release type and parses it to
collect the links for each movie within the table.
"""
from bs4 import BeautifulSoup as bs
import urllib.request
import requests
sess = requests.Session()
adapter = requests.adapters.HTTPAdapter(max_retries=10)
sess.mount('http://', adapter)
# scrape:
links = []
#soup = bs(urllib.request.urlopen(candidateURL).read(), "lxml")
response = sess.get(candidateURL)
if response.status_code != 200:
return None
page = response.text
soup = bs(page,"lxml")
allTables = soup.findChildren('table')
linksTable = allTables[6]
nEntries = 0
for link in linksTable.find_all('a', href=pattern):
links.append("http://www.boxofficemojo.com" + link['href'])
nEntries +=1
return links
def writeLinksToCSV(iYear, iType, movieLinks, datadir):
"""
Takes the harvested links and writes the output as a csv file.
"""
import csv
print('All links found, writing csv with', len(movieLinks), ' links...')
# Write the csv file here
csvBaseName = datadir + "/bom-links-" + iType
csvfile = csvBaseName + "-" + str(iYear)
# Assuming a flat list
with open(csvfile, "w") as output:
writer = csv.writer(output, lineterminator='\n')
for iFullLink in movieLinks:
writer.writerow([iFullLink])
print('Saved to', csvfile, '...Done!')
return
def scrapeLinks(typeString, iYear, iType, datadir):
"""
For a given year and release type this function will run through all possible
pages of data on Box Office mojo and parse the links to every movie's own page.
The links are collected on a year-releaseType basis and saved as a csv
For every year release-type pair:
Function calls
- harvestMovieLinks() to collect the links
- writeLinksToCSV() to save the collected links to a csv
"""
import re
import os
import time
from random import randint
# starting point for search, can be any year
baseURL="http://www.boxofficemojo.com/yearly/chart/"
## wide releases need to obey the structure
# http://www.boxofficemojo.com/yearly/chart/?page=1&view=widedate&view2=domestic&yr=2016&p=.htm
## limited need to obey
## http://www.boxofficemojo.com/yearly/chart/?view=limited&view2=domestic&page=1&yr=2016&p=.htm
# pattern to search for
pattern = re.compile("/movies")
# initialize as an empty list
movieLinks=[]
for iPageNum in range(1,10):
candidateURL = baseURL + '?page=' + str(iPageNum) + '&view=' + typeString + '&view2=domestic&yr=' + str(iYear) + '&p=.htm'
print('point to:', candidateURL)
try:
newMovieLinks = harvestMovieLinks(candidateURL, iYear, iType, pattern)
movieLinks.extend(newMovieLinks)
time.sleep(randint(5,15))
except IndexError:
pass
print("there is not a page", iPageNum)
continue
writeLinksToCSV(iYear, iType, movieLinks, datadir)
| {
"content_hash": "fe486fd4aab06fcf397276b385cd41ad",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 130,
"avg_line_length": 29.620689655172413,
"alnum_prop": 0.6583236321303841,
"repo_name": "lachlandeer/bom-scraper",
"id": "0c4424c2952f46457d7d766edb8fbdc0034db1aa",
"size": "3436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lib/mojoScrapeLinks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3050"
},
{
"name": "Python",
"bytes": "29412"
}
],
"symlink_target": ""
} |
import logging
import shlex
from typing import Any, List, Optional
import wandb
from wandb.errors import LaunchError
from .abstract import AbstractRun, AbstractRunner
from .local_container import _run_entry_point
from .._project_spec import get_entry_point_command, LaunchProject
from ..builder.build import get_env_vars_dict
from ..utils import (
_is_wandb_uri,
download_wandb_python_deps,
parse_wandb_uri,
PROJECT_SYNCHRONOUS,
sanitize_wandb_api_key,
validate_wandb_python_deps,
)
_logger = logging.getLogger(__name__)
class LocalProcessRunner(AbstractRunner):
"""Runner class, uses a project to create a LocallySubmittedRun.
LocalProcessRunner is very similar to a LocalContainerRunner, except it does not
run the command inside a docker container. Instead, it runs the
command specified as a process directly on the bare metal machine.
"""
def run( # type: ignore
self,
launch_project: LaunchProject,
*args,
**kwargs,
) -> Optional[AbstractRun]:
if args is not None:
_logger.warning(f"LocalProcessRunner.run received unused args {args}")
if kwargs is not None:
_logger.warning(f"LocalProcessRunner.run received unused kwargs {kwargs}")
synchronous: bool = self.backend_config[PROJECT_SYNCHRONOUS]
entry_point = launch_project.get_single_entry_point()
cmd: List[Any] = []
if launch_project.project_dir is None:
raise LaunchError("Launch LocalProcessRunner received empty project dir")
# Check to make sure local python dependencies match run's requirement.txt
if launch_project.uri and _is_wandb_uri(launch_project.uri):
source_entity, source_project, run_name = parse_wandb_uri(
launch_project.uri
)
run_requirements_file = download_wandb_python_deps(
source_entity,
source_project,
run_name,
self._api,
launch_project.project_dir,
)
validate_wandb_python_deps(
run_requirements_file,
launch_project.project_dir,
)
elif launch_project.job:
assert launch_project._job_artifact is not None
validate_wandb_python_deps(
"requirements.frozen.txt",
launch_project.project_dir,
)
env_vars = get_env_vars_dict(launch_project, self._api)
for env_key, env_value in env_vars.items():
cmd += [f"{shlex.quote(env_key)}={shlex.quote(env_value)}"]
if not self.ack_run_queue_item(launch_project):
return None
entry_cmd = get_entry_point_command(entry_point, launch_project.override_args)
cmd += entry_cmd
command_str = " ".join(cmd).strip()
wandb.termlog(
"Launching run as a local process with command: {}".format(
sanitize_wandb_api_key(command_str)
)
)
run = _run_entry_point(command_str, launch_project.project_dir)
if synchronous:
run.wait()
return run
| {
"content_hash": "6b0d87c4c522faab9a143a318fb8c214",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 86,
"avg_line_length": 33.90425531914894,
"alnum_prop": 0.6209601506118607,
"repo_name": "wandb/client",
"id": "40ea7c611a7b6bef5353d7d30e524f028cf356bd",
"size": "3187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wandb/sdk/launch/runner/local_process.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4902"
},
{
"name": "Dockerfile",
"bytes": "3491"
},
{
"name": "Jupyter Notebook",
"bytes": "7751"
},
{
"name": "Makefile",
"bytes": "1863"
},
{
"name": "Objective-C",
"bytes": "80764"
},
{
"name": "Python",
"bytes": "3634228"
},
{
"name": "Shell",
"bytes": "4662"
}
],
"symlink_target": ""
} |
"""Provides FCO API Client and decorator."""
from functools import wraps
from fcoclient.exceptions import (NonRecoverableError, RecoverableError)
import requests
import json
from requests import codes as rsc
from time import sleep
# REST Client default settings
REST_RETRY_COUNT = 5
REST_RETRY_DELAY = 30
REST_FAILURE_EXCEPTION = NonRecoverableError
REST_INTERNAL_RETRY = True
REST_API_VERSION = '5.0'
REST_HEADERS = {'content-type': 'application/json'}
# Configurable properties dict keys
PROP_CLIENT_CONFIG = 'auth'
PROP_CLIENT_TOKEN = 'token'
PROP_CLIENT_USERNAME = 'username'
PROP_CLIENT_PASSWORD = 'password'
PROP_CLIENT_API_USERNAME = 'api_uuid'
PROP_CLIENT_API_PASSWORD = 'password'
PROP_CLIENT_CUSTOMER = 'customer'
PROP_CLIENT_SERVICE_URL = 'url'
PROP_CLIENT_CA_CERT = 'ca_cert'
# Configurable kwargs keys for client
KW_PAYLOAD = 'payload'
KW_PATTERN = 'pattern'
def _rest_client_retry_and_auth(f):
"""Authenticate, log and retry requests."""
@wraps(f)
def wrapper(self, endpoint, data=None, **kwargs):
# TODO: remove legacy block
try:
endpoint = endpoint.format(**kwargs.get('pattern', {}))
except IndexError:
NonRecoverableError('Unable to format endpoint, pattern: {}, '
'data: {}.'
.format(endpoint, kwargs.get(KW_PATTERN)))
url = '{service_url}/rest/user/{api_version}/{endpoint}'.format(
service_url=self.service_url, api_version=REST_API_VERSION,
endpoint=endpoint)
retry_count = self.retry_count
payload = kwargs.get(KW_PAYLOAD)
if data:
payload = data
while retry_count:
terminate = False
self.logger.info('Client function: %s', f.__name__)
self.logger.info('Client URL: %s', url)
self.logger.info('Client data: %s', data)
r = f(self, url, payload, self.auth, self.headers,
self.verify)
self.logger.debug('Client final URL: {}'.format(r.url))
if len(r.content) > 60:
self.logger.info('Content: {}'.format(r.content[:57] + '...'))
self.logger.debug('Full content: {}'.format(r.content))
else:
self.logger.info('Content: {}'.format(r.content))
self.logger.info('Status code: {}'.format(r.status_code))
if r.status_code == rsc.accepted or r.status_code == rsc.ok:
self.logger.debug('=' * 60)
# TODO: convert everything to unicode?
content = json.loads(r.content)
def to_str(uni):
"""Recursively turn unciode to str."""
if isinstance(uni, list):
gen = enumerate(uni)
string = [None]*len(uni)
elif isinstance(uni, dict):
gen = uni.items()
string = {}
elif isinstance(uni, basestring):
return str(uni)
else:
return uni
for k, v in gen:
string[to_str(k)] = to_str(v)
return string
return to_str(content)
if r.status_code == rsc.too_many_requests:
error = 'Server busy (too many requests); waiting and ' \
'retrying {} more time(s).'.format(retry_count)
elif r.status_code == rsc.bad_request:
error = 'Server responded with bad request; will not ' \
'retry.'
terminate = True
elif r.status_code == rsc.not_implemented:
error = 'Server responded with not implemented; will not' \
'retry.'
terminate = True
elif r.status_code == rsc.forbidden:
error = 'Server responded with forbidden; will not retry.'
terminate = True
elif r.status_code == rsc.service_unavailable:
error = 'Server responded with service unavailable; waiting ' \
'and retrying {} more time(s).'.format(retry_count)
else:
error = 'Other error; waiting and retrying {} more times(s).' \
.format(retry_count)
try:
error += ' (Message: {})'.format(
json.loads(r.content)['message'].strip())
except KeyError:
pass
if terminate:
self.logger.error(error)
raise NonRecoverableError(error)
elif self.internal_retry:
self.logger.warn(error)
retry_count -= 1
sleep(self.retry_delay)
else:
self.logger.warn(error)
raise RecoverableError(message=error,
retry_after=self.retry_delay)
self.logger.error('Giving up on client API request')
REST_FAILURE_EXCEPTION('Giving up on client API request')
return wrapper
# "Abstract" Client Classes
class APIClient(object):
"""FCO API Client."""
REQUIRED_AUTH = [None]
def __init__(self, auth, retry_count=REST_RETRY_COUNT,
retry_delay=REST_RETRY_DELAY, rest_headers=REST_HEADERS,
logger=None, internal_retry=REST_INTERNAL_RETRY):
"""Initialise FCO API Client."""
self.retry_count = retry_count
self.retry_delay = retry_delay
self.headers = rest_headers
self.logger = logger
self.internal_retry = internal_retry
self.auth2 = auth
@classmethod
def can_handle(cls, auth):
"""Determine if a class is suitable to handle authentication type."""
return all([auth.get(k) is not None for k in cls.REQUIRED_AUTH])
class RESTClient(APIClient):
"""FCO REST API Client."""
REQUIRED_AUTH = [None]
def __init__(self, *args, **kwargs):
"""Initialise FCO REST API Client."""
super(RESTClient, self).__init__(*args, **kwargs)
self.auth = ('', '')
self.service_url = None
self.verify = self.auth2.get(PROP_CLIENT_CA_CERT, True)
@_rest_client_retry_and_auth
def post(self, url, data, auth, headers, verify):
"""Make POST request to FCO API."""
return requests.post(url, data, auth=auth, headers=headers,
verify=verify)
@_rest_client_retry_and_auth
def get(self, url, data, auth, headers, verify):
"""Make GET request to FCO API."""
return requests.get(url, params=data, auth=auth, headers=headers,
verify=verify)
@_rest_client_retry_and_auth
def put(self, url, data, auth, headers, verify):
"""Make PUT request to FCO API."""
return requests.put(url, data, auth=auth, headers=headers,
verify=verify)
@_rest_client_retry_and_auth
def delete(self, url, data, auth, headers, verify):
"""Make DELETE request to FCO API."""
return requests.delete(url, params=data, auth=auth, headers=headers,
verify=verify)
# "Usable" Client Classes
class UserPassRESTClient(RESTClient):
"""Username and password based authentication REST client."""
REQUIRED_AUTH = [PROP_CLIENT_USERNAME, PROP_CLIENT_PASSWORD,
PROP_CLIENT_CUSTOMER, PROP_CLIENT_SERVICE_URL]
def __init__(self, *args, **kwargs):
"""Initialise UserPassRESTClient."""
super(UserPassRESTClient, self).__init__(*args, **kwargs)
try:
self.auth = ('{}/{}'.format(self.auth2[PROP_CLIENT_USERNAME],
self.auth2[PROP_CLIENT_CUSTOMER]),
self.auth2[PROP_CLIENT_PASSWORD])
self.service_url = self.auth2[PROP_CLIENT_SERVICE_URL]
except:
raise NonRecoverableError('Invalid auth to create REST client: {}'
.format(str(self.auth2)))
class APIUserPassRESTClient(RESTClient):
"""API user based authentication REST client."""
REQUIRED_AUTH = [PROP_CLIENT_API_USERNAME, PROP_CLIENT_API_PASSWORD,
PROP_CLIENT_CUSTOMER, PROP_CLIENT_SERVICE_URL]
def __init__(self, *args, **kwargs):
"""Initialise APIUserPassRESTClient."""
super(APIUserPassRESTClient, self).__init__(*args, **kwargs)
try:
self.auth = ('{}/{}'.format(self.auth2[PROP_CLIENT_API_USERNAME],
self.auth2[PROP_CLIENT_CUSTOMER]),
self.auth2[PROP_CLIENT_API_PASSWORD])
self.service_url = self.auth2[PROP_CLIENT_SERVICE_URL]
except:
raise NonRecoverableError('Invalid auth to create REST client: {}'
.format(str(self.auth2)))
class APITokenRESTClient(RESTClient):
"""API token based authentication REST client."""
REQUIRED_AUTH = [PROP_CLIENT_TOKEN, PROP_CLIENT_SERVICE_URL]
def __init__(self, *args, **kwargs):
"""Initialise APIUserPassRESTClient."""
super(APITokenRESTClient, self).__init__(*args, **kwargs)
try:
self.auth = (self.auth2[PROP_CLIENT_TOKEN], '')
self.service_url = self.auth2[PROP_CLIENT_SERVICE_URL]
except:
raise NonRecoverableError('Invalid auth to create REST client: {}'
.format(str(self.auth2)))
# Client functions
def get_client(auth, logger):
"""Get an instance of the appropriate API Client."""
for cls in UserPassRESTClient, APIUserPassRESTClient, APITokenRESTClient:
if cls.can_handle(auth):
logger.info('Using client: {}'.format(cls.__name__))
return cls(auth, logger=logger)
raise NonRecoverableError('Failed to determine FCO Client class based on '
'the following authentication arguments: {}'
.format(str(auth)))
| {
"content_hash": "5332d21b65273e868f1033d1dffcc3ca",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 79,
"avg_line_length": 36.60071942446043,
"alnum_prop": 0.5585257985257985,
"repo_name": "b-77/cloudify-flexiant-plugin",
"id": "eee57c9c719417a6db6c4be9156076b23a4e7f19",
"size": "10191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fcoclient/clients.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "586357"
}
],
"symlink_target": ""
} |
import urllib
######################################################
# This script will load the oculus &
# the inmoov arms so that you can teleoperate
# kwatters
# Env specific config
######################################################
leftCameraIndex = 1
leftCameraAngle = 180
rightCameraIndex = 0
rightCameraAngle = 0
# mjpeg-streamer seems to work well on ras pi, these are the urls
leftCameraUrl = "http://192.168.4.112:8081/?action=stream"
rightCameraUrl = "http://192.168.4.112:8080/?action=stream"
# The remote uri for MyRobotLab running on the InMoov
inMoovAddress = "tcp://192.168.4.112:6767"
######################################################
# HELPER FUNCTIONS
######################################################
def readUrl(url):
u = urllib.urlopen(url)
# just send the request, ignore the response
u.read(0)
u.close()
# Define a callback method for the oculus head tracking info
def onOculusData(data):
print(data)
# amplify the pitch recorded from the rift by a factor of 3
# To account for gear ratio of neck piston in inmoov (adjust as needed)
pitch = -1 * data.pitch * 3
# the center position for the neck is 90 degre3es
neckOffset = 130
neckPos = int(pitch + neckOffset)
# update the neck position
neckUrl = "http://192.168.4.112:8888/api/service/i01.head.neck/moveTo/" + str(neckPos)
print neckUrl
readUrl(neckUrl)
# neck.moveTo(neckPos)
# track the yaw
yaw = data.yaw
yaw = -1 * yaw
# center position (yaw = 0 / servo = 90 degrees)
rotHeadOffset = 90
rotheadPos = int(rotHeadOffset + yaw)
rotheadUrl = "http://192.168.4.112:8888/api/service/i01.head.rothead/moveTo/" + str(rotheadPos)
print rotheadUrl
readUrl(rotheadUrl)
# turn head left/right to track yaw
# rothead.moveTo(rotHeadPos)
# Track the Roll in software
rollgain = 1
roll = data.roll * rollgain
# left camera is 180 degrees rotated from the right camera
# as you roll clockwise, this counter balances that
# by rolling the camera counter clockwise
# rift.leftOpenCV.getFilter("left").setAngle(-roll+180);
# rift.rightOpenCV.getFilter("right").setAngle(-roll)
# TODO: track the affine filters for roll
######################################################
# Create the Rift
rift = Runtime.createAndStart("rift", "OculusRift")
rift.setLeftCameraIndex(leftCameraIndex)
rift.setLeftCameraAngle(leftCameraAngle)
rift.setRightCameraIndex(rightCameraIndex)
rift.setRightCameraAngle(rightCameraAngle)
# TODO: other calibration as necessary / desired.
# set the frame grabber
rift.setFrameGrabberType("org.myrobotlab.opencv.MJpegFrameGrabber");
rift.setLeftEyeURL(leftCameraUrl)
rift.setRightEyeURL(rightCameraUrl)
rift.setCvInputSource("network")
# TODO: rename this .. this is a lame name.
rift.initContext()
# Create the remote adapter for distributed MRL.
# remoteInMoov = Runtime.createAndStart("remoteInMoov", "RemoteAdapter")
# remoteInMoov.connect(inMoovAddress)
# add the callback to python from the rift.
rift.addListener("publishOculusData", "python", "onOculusData")
# lets add a joystick that can handle some inputs to the arms.
| {
"content_hash": "0ad2cfcea41a0f043c006efc6f1af3f4",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 99,
"avg_line_length": 37.632183908045974,
"alnum_prop": 0.6487477092241906,
"repo_name": "MyRobotLab/pyrobotlab",
"id": "6ed50093c9f1c2dc7505ba956b527bd06c01bda7",
"size": "3615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "home/kwatters/robotsforgood/RobotsForGood.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1827"
},
{
"name": "C",
"bytes": "126258"
},
{
"name": "C++",
"bytes": "373018"
},
{
"name": "Java",
"bytes": "156911"
},
{
"name": "Processing",
"bytes": "17022"
},
{
"name": "Python",
"bytes": "3309101"
},
{
"name": "Shell",
"bytes": "4635"
},
{
"name": "VBA",
"bytes": "11115"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function
from functools import partial
from itertools import product
import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import _num_samples
from sklearn.utils.validation import check_random_state
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import _named_check
from sklearn.metrics import accuracy_score
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import brier_score_loss
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import coverage_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import precision_score
from sklearn.metrics import r2_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import zero_one_loss
# TODO Curve are currently not covered by invariance test
# from sklearn.metrics import precision_recall_curve
# from sklearn.metrics import roc_curve
from sklearn.metrics.base import _average_binary_score
# Note toward developers about metric testing
# -------------------------------------------
# It is often possible to write one general test for several metrics:
#
# - invariance properties, e.g. invariance to sample order
# - common behavior for an argument, e.g. the "normalize" with value True
# will return the mean of the metrics and with value False will return
# the sum of the metrics.
#
# In order to improve the overall metric testing, it is a good idea to write
# first a specific test for the given metric and then add a general test for
# all metrics that have the same behavior.
#
# Two types of datastructures are used in order to implement this system:
# dictionaries of metrics and lists of metrics wit common properties.
#
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
#
# - REGRESSION_METRICS: all regression metrics.
# - CLASSIFICATION_METRICS: all classification metrics
# which compare a ground truth and the estimated targets as returned by a
# classifier.
# - THRESHOLDED_METRICS: all classification metrics which
# compare a ground truth and a score, e.g. estimated probabilities or
# decision function (format might vary)
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
REGRESSION_METRICS = {
"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"median_absolute_error": median_absolute_error,
"explained_variance_score": explained_variance_score,
"r2_score": partial(r2_score, multioutput='variance_weighted'),
}
CLASSIFICATION_METRICS = {
"accuracy_score": accuracy_score,
"balanced_accuracy_score": balanced_accuracy_score,
"unnormalized_accuracy_score": partial(accuracy_score, normalize=False),
"confusion_matrix": confusion_matrix,
"hamming_loss": hamming_loss,
"jaccard_similarity_score": jaccard_similarity_score,
"unnormalized_jaccard_similarity_score":
partial(jaccard_similarity_score, normalize=False),
"zero_one_loss": zero_one_loss,
"unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False),
# These are needed to test averaging
"precision_score": precision_score,
"recall_score": recall_score,
"f1_score": f1_score,
"f2_score": partial(fbeta_score, beta=2),
"f0.5_score": partial(fbeta_score, beta=0.5),
"matthews_corrcoef_score": matthews_corrcoef,
"weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5),
"weighted_f1_score": partial(f1_score, average="weighted"),
"weighted_f2_score": partial(fbeta_score, average="weighted", beta=2),
"weighted_precision_score": partial(precision_score, average="weighted"),
"weighted_recall_score": partial(recall_score, average="weighted"),
"micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5),
"micro_f1_score": partial(f1_score, average="micro"),
"micro_f2_score": partial(fbeta_score, average="micro", beta=2),
"micro_precision_score": partial(precision_score, average="micro"),
"micro_recall_score": partial(recall_score, average="micro"),
"macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5),
"macro_f1_score": partial(f1_score, average="macro"),
"macro_f2_score": partial(fbeta_score, average="macro", beta=2),
"macro_precision_score": partial(precision_score, average="macro"),
"macro_recall_score": partial(recall_score, average="macro"),
"samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5),
"samples_f1_score": partial(f1_score, average="samples"),
"samples_f2_score": partial(fbeta_score, average="samples", beta=2),
"samples_precision_score": partial(precision_score, average="samples"),
"samples_recall_score": partial(recall_score, average="samples"),
"cohen_kappa_score": cohen_kappa_score,
}
THRESHOLDED_METRICS = {
"coverage_error": coverage_error,
"label_ranking_loss": label_ranking_loss,
"log_loss": log_loss,
"unnormalized_log_loss": partial(log_loss, normalize=False),
"hinge_loss": hinge_loss,
"brier_score_loss": brier_score_loss,
"roc_auc_score": roc_auc_score,
"weighted_roc_auc": partial(roc_auc_score, average="weighted"),
"samples_roc_auc": partial(roc_auc_score, average="samples"),
"micro_roc_auc": partial(roc_auc_score, average="micro"),
"macro_roc_auc": partial(roc_auc_score, average="macro"),
"average_precision_score": average_precision_score,
"weighted_average_precision_score":
partial(average_precision_score, average="weighted"),
"samples_average_precision_score":
partial(average_precision_score, average="samples"),
"micro_average_precision_score":
partial(average_precision_score, average="micro"),
"macro_average_precision_score":
partial(average_precision_score, average="macro"),
"label_ranking_average_precision_score":
label_ranking_average_precision_score,
}
ALL_METRICS = dict()
ALL_METRICS.update(THRESHOLDED_METRICS)
ALL_METRICS.update(CLASSIFICATION_METRICS)
ALL_METRICS.update(REGRESSION_METRICS)
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that
# are symmetric with respect to their input argument y_true and y_pred.
#
# When you add a new metric or functionality, check if a general test
# is already written.
# Those metrics don't support binary inputs
METRIC_UNDEFINED_BINARY = [
"samples_f0.5_score",
"samples_f1_score",
"samples_f2_score",
"samples_precision_score",
"samples_recall_score",
"coverage_error",
"average_precision_score",
"weighted_average_precision_score",
"micro_average_precision_score",
"macro_average_precision_score",
"samples_average_precision_score",
"label_ranking_loss",
"label_ranking_average_precision_score",
]
# Those metrics don't support multiclass inputs
METRIC_UNDEFINED_MULTICLASS = [
"brier_score_loss",
"balanced_accuracy_score",
"roc_auc_score",
"micro_roc_auc",
"weighted_roc_auc",
"macro_roc_auc",
"samples_roc_auc",
# with default average='binary', multiclass is prohibited
"precision_score",
"recall_score",
"f1_score",
"f2_score",
"f0.5_score",
]
# Metric undefined with "binary" or "multiclass" input
METRIC_UNDEFINED_BINARY_MULTICLASS = set(METRIC_UNDEFINED_BINARY).union(
set(METRIC_UNDEFINED_MULTICLASS))
# Metrics with an "average" argument
METRICS_WITH_AVERAGING = [
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score"
]
# Threshold-based metrics with an "average" argument
THRESHOLDED_METRICS_WITH_AVERAGING = [
"roc_auc_score", "average_precision_score",
]
# Metrics with a "pos_label" argument
METRICS_WITH_POS_LABEL = [
"roc_curve",
"brier_score_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
# pos_label support deprecated; to be removed in 0.18:
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
]
# Metrics with a "labels" argument
# TODO: Handle multi_class metrics that has a labels argument as well as a
# decision function argument. e.g hinge_loss
METRICS_WITH_LABELS = [
"confusion_matrix",
"hamming_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"cohen_kappa_score",
]
# Metrics with a "normalize" option
METRICS_WITH_NORMALIZE_OPTION = [
"accuracy_score",
"jaccard_similarity_score",
"zero_one_loss",
]
# Threshold-based metrics with "multilabel-indicator" format support
THRESHOLDED_MULTILABEL_METRICS = [
"log_loss",
"unnormalized_log_loss",
"roc_auc_score", "weighted_roc_auc", "samples_roc_auc",
"micro_roc_auc", "macro_roc_auc",
"average_precision_score", "weighted_average_precision_score",
"samples_average_precision_score", "micro_average_precision_score",
"macro_average_precision_score",
"coverage_error", "label_ranking_loss",
]
# Classification metrics with "multilabel-indicator" format
MULTILABELS_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
]
# Regression metrics with "multioutput-continuous" format support
MULTIOUTPUT_METRICS = [
"mean_absolute_error", "mean_squared_error", "r2_score",
"explained_variance_score"
]
# Symmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) == metric(y_pred, y_true).
SYMMETRIC_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"f1_score", "micro_f1_score", "macro_f1_score",
"weighted_recall_score",
# P = R = F = accuracy in multiclass case
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"matthews_corrcoef_score", "mean_absolute_error", "mean_squared_error",
"median_absolute_error",
"cohen_kappa_score",
]
# Asymmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) != metric(y_pred, y_true).
NOT_SYMMETRIC_METRICS = [
"balanced_accuracy_score",
"explained_variance_score",
"r2_score",
"confusion_matrix",
"precision_score", "recall_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score",
"macro_f0.5_score", "macro_f2_score", "macro_precision_score",
"macro_recall_score", "log_loss", "hinge_loss"
]
# No Sample weight support
METRICS_WITHOUT_SAMPLE_WEIGHT = [
"confusion_matrix", # Left this one here because the tests in this file do
# not work for confusion_matrix, as its output is a
# matrix instead of a number. Testing of
# confusion_matrix with sample_weight is in
# test_classification.py
"median_absolute_error",
]
@ignore_warnings
def test_symmetry():
# Test the symmetry of score and loss functions
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
# We shouldn't forget any metrics
assert_equal(set(SYMMETRIC_METRICS).union(
NOT_SYMMETRIC_METRICS, THRESHOLDED_METRICS,
METRIC_UNDEFINED_BINARY_MULTICLASS),
set(ALL_METRICS))
assert_equal(
set(SYMMETRIC_METRICS).intersection(set(NOT_SYMMETRIC_METRICS)),
set([]))
# Symmetric metric
for name in SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_pred, y_true),
err_msg="%s is not symmetric" % name)
# Not symmetric metrics
for name in NOT_SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_true(np.any(metric(y_true, y_pred) != metric(y_pred, y_true)),
msg="%s seems to be symmetric" % name)
@ignore_warnings
def test_sample_order_invariance():
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0)
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_sample_order_invariance_multilabel_and_multioutput():
random_state = check_random_state(0)
# Generate some data
y_true = random_state.randint(0, 2, size=(20, 25))
y_pred = random_state.randint(0, 2, size=(20, 25))
y_score = random_state.normal(size=y_true.shape)
y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(y_true,
y_pred,
y_score,
random_state=0)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in THRESHOLDED_MULTILABEL_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_format_invariance_with_1d_vectors():
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_list = list(y1)
y2_list = list(y2)
y1_1d, y2_1d = np.array(y1), np.array(y2)
assert_equal(y1_1d.ndim, 1)
assert_equal(y2_1d.ndim, 1)
y1_column = np.reshape(y1_1d, (-1, 1))
y2_column = np.reshape(y2_1d, (-1, 1))
y1_row = np.reshape(y1_1d, (1, -1))
y2_row = np.reshape(y2_1d, (1, -1))
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
measure = metric(y1, y2)
assert_almost_equal(metric(y1_list, y2_list), measure,
err_msg="%s is not representation invariant "
"with list" % name)
assert_almost_equal(metric(y1_1d, y2_1d), measure,
err_msg="%s is not representation invariant "
"with np-array-1d" % name)
assert_almost_equal(metric(y1_column, y2_column), measure,
err_msg="%s is not representation invariant "
"with np-array-column" % name)
# Mix format support
assert_almost_equal(metric(y1_1d, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_list, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_1d, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_list, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
# These mix representations aren't allowed
assert_raises(ValueError, metric, y1_1d, y2_row)
assert_raises(ValueError, metric, y1_row, y2_1d)
assert_raises(ValueError, metric, y1_list, y2_row)
assert_raises(ValueError, metric, y1_row, y2_list)
assert_raises(ValueError, metric, y1_column, y2_row)
assert_raises(ValueError, metric, y1_row, y2_column)
# NB: We do not test for y1_row, y2_row as these may be
# interpreted as multilabel or multioutput data.
if (name not in (MULTIOUTPUT_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTILABELS_METRICS)):
assert_raises(ValueError, metric, y1_row, y2_row)
@ignore_warnings
def test_invariance_string_vs_numbers_labels():
# Ensure that classification metrics with string labels
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_str = np.array(["eggs", "spam"])[y1]
y2_str = np.array(["eggs", "spam"])[y2]
pos_label_str = "spam"
labels_str = ["eggs", "spam"]
for name, metric in CLASSIFICATION_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
measure_with_number = metric(y1, y2)
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number invariance "
"test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
if name in METRICS_WITH_LABELS:
metric_str = partial(metric_str, labels=labels_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string vs number "
"invariance test".format(name))
for name, metric in THRESHOLDED_METRICS.items():
if name not in METRIC_UNDEFINED_BINARY:
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_number = metric(y1, y2)
measure_with_str = metric_str(y1_str, y2)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric(y1_str.astype('O'), y2)
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
else:
# TODO those metrics doesn't support string label yet
assert_raises(ValueError, metric, y1_str, y2)
assert_raises(ValueError, metric, y1_str.astype('O'), y2)
def test_inf_nan_input():
invalids =[([0, 1], [np.inf, np.inf]),
([0, 1], [np.nan, np.nan]),
([0, 1], [np.nan, np.inf])]
METRICS = dict()
METRICS.update(THRESHOLDED_METRICS)
METRICS.update(REGRESSION_METRICS)
for metric in METRICS.values():
for y_true, y_score in invalids:
assert_raise_message(ValueError,
"contains NaN, infinity",
metric, y_true, y_score)
# Classification metrics all raise a mixed input exception
for metric in CLASSIFICATION_METRICS.values():
for y_true, y_score in invalids:
assert_raise_message(ValueError,
"Classification metrics can't handle a mix "
"of binary and continuous targets",
metric, y_true, y_score)
@ignore_warnings
def check_single_sample(name):
# Non-regression test: scores should work with a single sample.
# This is important for leave-one-out cross validation.
# Score functions tested are those that formerly called np.squeeze,
# which turns an array of size 1 into a 0-d array (!).
metric = ALL_METRICS[name]
# assert that no exception is thrown
for i, j in product([0, 1], repeat=2):
metric([i], [j])
@ignore_warnings
def check_single_sample_multioutput(name):
metric = ALL_METRICS[name]
for i, j, k, l in product([0, 1], repeat=4):
metric(np.array([[i, j]]), np.array([[k, l]]))
def test_single_sample():
for name in ALL_METRICS:
if (name in METRIC_UNDEFINED_BINARY_MULTICLASS or
name in THRESHOLDED_METRICS):
# Those metrics are not always defined with one sample
# or in multiclass classification
continue
yield check_single_sample, name
for name in MULTIOUTPUT_METRICS + MULTILABELS_METRICS:
yield check_single_sample_multioutput, name
def test_multioutput_number_of_output_differ():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0], [1, 0], [0, 0]])
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_raises(ValueError, metric, y_true, y_pred)
def test_multioutput_regression_invariance_to_dimension_shuffling():
# test invariance to dimension shuffling
random_state = check_random_state(0)
y_true = random_state.uniform(0, 2, size=(20, 5))
y_pred = random_state.uniform(0, 2, size=(20, 5))
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
error = metric(y_true, y_pred)
for _ in range(3):
perm = random_state.permutation(y_true.shape[1])
assert_almost_equal(metric(y_true[:, perm], y_pred[:, perm]),
error,
err_msg="%s is not dimension shuffling "
"invariant" % name)
@ignore_warnings
def test_multilabel_representation_invariance():
# Generate some data
n_classes = 4
n_samples = 50
_, y1 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=0, n_samples=n_samples,
allow_unlabeled=True)
_, y2 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=1, n_samples=n_samples,
allow_unlabeled=True)
# To make sure at least one empty label is present
y1 = np.vstack([y1, [[0] * n_classes]])
y2 = np.vstack([y2, [[0] * n_classes]])
y1_sparse_indicator = sp.coo_matrix(y1)
y2_sparse_indicator = sp.coo_matrix(y2)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
# XXX cruel hack to work with partial functions
if isinstance(metric, partial):
metric.__module__ = 'tmp'
metric.__name__ = name
measure = metric(y1, y2)
# Check representation invariance
assert_almost_equal(metric(y1_sparse_indicator,
y2_sparse_indicator),
measure,
err_msg="%s failed representation invariance "
"between dense and sparse indicator "
"formats." % name)
def test_raise_value_error_multilabel_sequences():
# make sure the multilabel-sequence format raises ValueError
multilabel_sequences = [
[[0, 1]],
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
[[]],
[()],
np.array([[], [1, 2]], dtype='object')]
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
for seq in multilabel_sequences:
assert_raises(ValueError, metric, seq, seq)
def test_normalize_option_binary_classification(n_samples=20):
# Test in the binary case
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multiclass_classification():
# Test in the multiclass case
random_state = check_random_state(0)
y_true = random_state.randint(0, 4, size=(20, ))
y_pred = random_state.randint(0, 4, size=(20, ))
n_samples = y_true.shape[0]
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multilabel_classification():
# Test in the multilabel case
n_classes = 4
n_samples = 100
# for both random_state 0 and 1, y_true and y_pred has at least one
# unlabelled entry
_, y_true = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=0,
allow_unlabeled=True,
n_samples=n_samples)
_, y_pred = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=1,
allow_unlabeled=True,
n_samples=n_samples)
# To make sure at least one empty label is present
y_true += [0]*n_classes
y_pred += [0]*n_classes
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure,
err_msg="Failed with %s" % name)
@ignore_warnings
def _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize,
is_multilabel):
n_samples, n_classes = y_true_binarize.shape
# No averaging
label_measure = metric(y_true, y_pred, average=None)
assert_array_almost_equal(label_measure,
[metric(y_true_binarize[:, i],
y_pred_binarize[:, i])
for i in range(n_classes)])
# Micro measure
micro_measure = metric(y_true, y_pred, average="micro")
assert_almost_equal(micro_measure, metric(y_true_binarize.ravel(),
y_pred_binarize.ravel()))
# Macro measure
macro_measure = metric(y_true, y_pred, average="macro")
assert_almost_equal(macro_measure, np.mean(label_measure))
# Weighted measure
weights = np.sum(y_true_binarize, axis=0, dtype=int)
if np.sum(weights) != 0:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, np.average(label_measure,
weights=weights))
else:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, 0)
# Sample measure
if is_multilabel:
sample_measure = metric(y_true, y_pred, average="samples")
assert_almost_equal(sample_measure,
np.mean([metric(y_true_binarize[i],
y_pred_binarize[i])
for i in range(n_samples)]))
assert_raises(ValueError, metric, y_true, y_pred, average="unknown")
assert_raises(ValueError, metric, y_true, y_pred, average="garbage")
def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize,
y_score):
is_multilabel = type_of_target(y_true).startswith("multilabel")
metric = ALL_METRICS[name]
if name in METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel)
elif name in THRESHOLDED_METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_score, y_true_binarize,
y_score, is_multilabel)
else:
raise ValueError("Metric is not recorded as having an average option")
def test_averaging_multiclass(n_samples=50, n_classes=3):
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples, ))
y_pred = random_state.randint(0, n_classes, size=(n_samples, ))
y_score = random_state.uniform(size=(n_samples, n_classes))
lb = LabelBinarizer().fit(y_true)
y_true_binarize = lb.transform(y_true)
y_pred_binarize = lb.transform(y_pred)
for name in METRICS_WITH_AVERAGING:
yield (_named_check(check_averaging, name), name, y_true,
y_true_binarize, y_pred, y_pred_binarize, y_score)
def test_averaging_multilabel(n_classes=5, n_samples=40):
_, y = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=5, n_samples=n_samples,
allow_unlabeled=False)
y_true = y[:20]
y_pred = y[20:]
y_score = check_random_state(0).normal(size=(20, n_classes))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING + THRESHOLDED_METRICS_WITH_AVERAGING:
yield (_named_check(check_averaging, name), name, y_true,
y_true_binarize, y_pred, y_pred_binarize, y_score)
def test_averaging_multilabel_all_zeroes():
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_score = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (_named_check(check_averaging, name), name, y_true,
y_true_binarize, y_pred, y_pred_binarize, y_score)
# Test _average_binary_score for weight.sum() == 0
binary_metric = (lambda y_true, y_score, average="macro":
_average_binary_score(
precision_score, y_true, y_score, average))
_check_averaging(binary_metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel=True)
def test_averaging_multilabel_all_ones():
y_true = np.ones((20, 3))
y_pred = np.ones((20, 3))
y_score = np.ones((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (_named_check(check_averaging, name), name, y_true,
y_true_binarize, y_pred, y_pred_binarize, y_score)
@ignore_warnings
def check_sample_weight_invariance(name, metric, y1, y2):
rng = np.random.RandomState(0)
sample_weight = rng.randint(1, 10, size=len(y1))
# check that unit weights gives the same score as no weight
unweighted_score = metric(y1, y2, sample_weight=None)
assert_almost_equal(
unweighted_score,
metric(y1, y2, sample_weight=np.ones(shape=len(y1))),
err_msg="For %s sample_weight=None is not equivalent to "
"sample_weight=ones" % name)
# check that the weighted and unweighted scores are unequal
weighted_score = metric(y1, y2, sample_weight=sample_weight)
assert_not_equal(
unweighted_score, weighted_score,
msg="Unweighted and weighted scores are unexpectedly "
"equal (%f) for %s" % (weighted_score, name))
# check that sample_weight can be a list
weighted_score_list = metric(y1, y2,
sample_weight=sample_weight.tolist())
assert_almost_equal(
weighted_score, weighted_score_list,
err_msg=("Weighted scores for array and list "
"sample_weight input are not equal (%f != %f) for %s") % (
weighted_score, weighted_score_list, name))
# check that integer weights is the same as repeated samples
repeat_weighted_score = metric(
np.repeat(y1, sample_weight, axis=0),
np.repeat(y2, sample_weight, axis=0), sample_weight=None)
assert_almost_equal(
weighted_score, repeat_weighted_score,
err_msg="Weighting %s is not equal to repeating samples" % name)
# check that ignoring a fraction of the samples is equivalent to setting
# the corresponding weights to zero
sample_weight_subset = sample_weight[1::2]
sample_weight_zeroed = np.copy(sample_weight)
sample_weight_zeroed[::2] = 0
y1_subset = y1[1::2]
y2_subset = y2[1::2]
weighted_score_subset = metric(y1_subset, y2_subset,
sample_weight=sample_weight_subset)
weighted_score_zeroed = metric(y1, y2,
sample_weight=sample_weight_zeroed)
assert_almost_equal(
weighted_score_subset, weighted_score_zeroed,
err_msg=("Zeroing weights does not give the same result as "
"removing the corresponding samples (%f != %f) for %s" %
(weighted_score_zeroed, weighted_score_subset, name)))
if not name.startswith('unnormalized'):
# check that the score is invariant under scaling of the weights by a
# common factor
for scaling in [2, 0.3]:
assert_almost_equal(
weighted_score,
metric(y1, y2, sample_weight=sample_weight * scaling),
err_msg="%s sample_weight is not invariant "
"under scaling" % name)
# Check that if number of samples in y_true and sample_weight are not
# equal, meaningful error is raised.
error_message = ("Found input variables with inconsistent numbers of "
"samples: [{}, {}, {}]".format(
_num_samples(y1), _num_samples(y2),
_num_samples(sample_weight) * 2))
assert_raise_message(ValueError, error_message, metric, y1, y2,
sample_weight=np.hstack([sample_weight,
sample_weight]))
def test_sample_weight_invariance(n_samples=50):
random_state = check_random_state(0)
# regression
y_true = random_state.random_sample(size=(n_samples,))
y_pred = random_state.random_sample(size=(n_samples,))
for name in ALL_METRICS:
if name not in REGRESSION_METRICS:
continue
if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
continue
metric = ALL_METRICS[name]
yield _named_check(check_sample_weight_invariance, name), name,\
metric, y_true, y_pred
# binary
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples,))
for name in ALL_METRICS:
if name in REGRESSION_METRICS:
continue
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_BINARY):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield _named_check(check_sample_weight_invariance, name), name,\
metric, y_true, y_score
else:
yield _named_check(check_sample_weight_invariance, name), name,\
metric, y_true, y_pred
# multiclass
random_state = check_random_state(0)
y_true = random_state.randint(0, 5, size=(n_samples, ))
y_pred = random_state.randint(0, 5, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples, 5))
for name in ALL_METRICS:
if name in REGRESSION_METRICS:
continue
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_BINARY_MULTICLASS):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield _named_check(check_sample_weight_invariance, name), name,\
metric, y_true, y_score
else:
yield _named_check(check_sample_weight_invariance, name), name,\
metric, y_true, y_pred
# multilabel indicator
_, ya = make_multilabel_classification(n_features=1, n_classes=20,
random_state=0, n_samples=100,
allow_unlabeled=False)
_, yb = make_multilabel_classification(n_features=1, n_classes=20,
random_state=1, n_samples=100,
allow_unlabeled=False)
y_true = np.vstack([ya, yb])
y_pred = np.vstack([ya, ya])
y_score = random_state.randint(1, 4, size=y_true.shape)
for name in (MULTILABELS_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTIOUTPUT_METRICS):
if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield (_named_check(check_sample_weight_invariance, name), name,
metric, y_true, y_score)
else:
yield (_named_check(check_sample_weight_invariance, name), name,
metric, y_true, y_pred)
@ignore_warnings
def test_no_averaging_labels():
# test labels argument when not using averaging
# in multi-class and multi-label cases
y_true_multilabel = np.array([[1, 1, 0, 0], [1, 1, 0, 0]])
y_pred_multilabel = np.array([[0, 0, 1, 1], [0, 1, 1, 0]])
y_true_multiclass = np.array([0, 1, 2])
y_pred_multiclass = np.array([0, 2, 3])
labels = np.array([3, 0, 1, 2])
_, inverse_labels = np.unique(labels, return_inverse=True)
for name in METRICS_WITH_AVERAGING:
for y_true, y_pred in [[y_true_multiclass, y_pred_multiclass],
[y_true_multilabel, y_pred_multilabel]]:
if name not in MULTILABELS_METRICS and y_pred.ndim > 1:
continue
metric = ALL_METRICS[name]
score_labels = metric(y_true, y_pred, labels=labels, average=None)
score = metric(y_true, y_pred, average=None)
assert_array_equal(score_labels, score[inverse_labels])
| {
"content_hash": "6e03d5ad4ae270a596f1239d79e7a42f",
"timestamp": "",
"source": "github",
"line_count": 1121,
"max_line_length": 79,
"avg_line_length": 39.410347903657446,
"alnum_prop": 0.6076190045044025,
"repo_name": "zorroblue/scikit-learn",
"id": "e68f4024b24af0f72dc1023806ecc7207ba9d78a",
"size": "44179",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sklearn/metrics/tests/test_common.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "451996"
},
{
"name": "C++",
"bytes": "140322"
},
{
"name": "Makefile",
"bytes": "1512"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "7346700"
},
{
"name": "Shell",
"bytes": "20756"
}
],
"symlink_target": ""
} |
import os
import sys
import warnings
import time
import socket
import re
import urllib2
import types
import SocketServer
import threading
import tempfile
import zipfile
import pickle
import glob #for directory scanning
import abc #abstract base class
import colorsys
import logging
import pygame as pg
import scipy.ndimage as ndimage
import scipy.stats.stats as sss #for auto white balance
import scipy.cluster.vq as scv
import scipy.linalg as nla # for linear algebra / least squares
import math # math... who does that
import copy # for deep copy
import numpy as np
import scipy.spatial.distance as spsd
import scipy.cluster.vq as cluster #for kmeans
import pygame as pg
import platform
import copy
import types
import time
import itertools #for track
from numpy import linspace
from scipy.interpolate import UnivariateSpline
from warnings import warn
from copy import copy
from math import *
from pkg_resources import load_entry_point
from SimpleHTTPServer import SimpleHTTPRequestHandler
from types import IntType, LongType, FloatType, InstanceType
from cStringIO import StringIO
from numpy import int32
from numpy import uint8
from EXIF import *
from pygame import gfxdraw
from pickle import *
# SimpleCV library includes
try:
import cv2
import cv2.cv as cv
except ImportError:
try:
import cv
except ImportError:
raise ImportError("Cannot load OpenCV library which is required by SimpleCV")
#optional libraries
PIL_ENABLED = True
try:
from PIL import Image as pil
from PIL import ImageFont as pilImageFont
from PIL import ImageDraw as pilImageDraw
from PIL import GifImagePlugin
getheader = GifImagePlugin.getheader
getdata = GifImagePlugin.getdata
except ImportError:
try:
import Image as pil
from GifImagePlugin import getheader, getdata
except ImportError:
PIL_ENABLED = False
FREENECT_ENABLED = True
try:
import freenect
except ImportError:
FREENECT_ENABLED = False
ZXING_ENABLED = True
try:
import zxing
except ImportError:
ZXING_ENABLED = False
OCR_ENABLED = True
try:
import tesseract
except ImportError:
OCR_ENABLED = False
PYSCREENSHOT_ENABLED = True
try:
import pyscreenshot
except ImportError:
PYSCREENSHOT_ENABLED = False
ORANGE_ENABLED = True
try:
try:
import orange
except ImportError:
import Orange; import orange
import orngTest #for cross validation
import orngStat
import orngEnsemble # for bagging / boosting
except ImportError:
ORANGE_ENABLED = False
class InitOptionsHandler(object):
"""
**summary**
this handler is supposed to store global variables. for now, its only value
defines if simplecv is being run on an ipython notebook.
"""
def __init__(self):
self.on_notebook = False
self.headless = False
def enable_notebook(self):
self.on_notebook = True
def set_headless(self):
# set SDL to use the dummy NULL video driver,
# so it doesn't need a windowing system.
os.environ["SDL_VIDEODRIVER"] = "dummy"
self.headless = True
init_options_handler = InitOptionsHandler()
try:
import pygame as pg
except ImportError:
init_options_handler.set_headless()
#couple quick typecheck helper functions
def is_number(n):
"""
Determines if it is a number or not
Returns: Type
"""
return type(n) in (IntType, LongType, FloatType)
def is_tuple(n):
"""
Determines if it is a tuple or not
Returns: Boolean
"""
return type(n) == tuple
def reverse_tuple(n):
"""
Reverses a tuple
Returns: Tuple
"""
return tuple(reversed(n))
def find(f, seq):
"""
Search for item in a list
Returns: Boolean
"""
for item in seq:
if (f == item):
return True
return False
def test():
"""
This function is meant to run builtin unittests
"""
print 'unit test'
def download_and_extract(URL):
"""
This function takes in a URL for a zip file, extracts it and returns
the temporary path it was extracted to
"""
if URL == None:
logger.warning("Please provide URL")
return None
tmpdir = tempfile.mkdtemp()
filename = os.path.basename(URL)
path = tmpdir + "/" + filename
zdata = urllib2.urlopen(URL)
print "Saving file to disk please wait...."
with open(path, "wb") as local_file:
local_file.write(zdata.read())
zfile = zipfile.ZipFile(path)
print "Extracting zipfile"
try:
zfile.extractall(tmpdir)
except:
logger.warning("Couldn't extract zip file")
return None
return tmpdir
def int_to_bin(i):
"""Integer to two bytes"""
i1 = i % 256
i2 = int(i/256)
return chr(i1) + chr(i2)
def npArray2cvMat(inputMat, dataType=cv.CV_32FC1):
"""
This function is a utility for converting numpy arrays to the cv.cvMat format.
Returns: cvMatrix
"""
if( type(inputMat) == np.ndarray ):
sz = len(inputMat.shape)
temp_mat = None
if( dataType == cv.CV_32FC1 or dataType == cv.CV_32FC2 or dataType == cv.CV_32FC3 or dataType == cv.CV_32FC4 ):
temp_mat = np.array(inputMat, dtype='float32')
elif( dataType == cv.CV_8UC1 or dataType == cv.CV_8UC2 or dataType == cv.CV_8UC3 or dataType == cv.CV_8UC3):
temp_mat = np.array(inputMat,dtype='uint8')
else:
logger.warning("MatrixConversionUtil: the input matrix type is not supported")
return None
if( sz == 1 ): #this needs to be changed so we can do row/col vectors
retVal = cv.CreateMat(inputMat.shape[0], 1, dataType)
cv.SetData(retVal, temp_mat.tostring(), temp_mat.dtype.itemsize * temp_mat.shape[0])
elif( sz == 2 ):
retVal = cv.CreateMat(temp_mat.shape[0], temp_mat.shape[1], dataType)
cv.SetData(retVal, temp_mat.tostring(), temp_mat.dtype.itemsize * temp_mat.shape[1])
elif( sz > 2 ):
logger.warning("MatrixConversionUtil: the input matrix type is not supported")
return None
return retVal
else:
logger.warning("MatrixConversionUtil: the input matrix type is not supported")
#Logging system - Global elements
consoleHandler = logging.StreamHandler()
formatter = logging.Formatter('%(levelname)s: %(message)s')
consoleHandler.setFormatter(formatter)
logger = logging.getLogger('Main Logger')
logger.addHandler(consoleHandler)
try:
import IPython
ipython_version = IPython.__version__
except ImportError:
ipython_version = None
#This is used with sys.excepthook to log all uncaught exceptions.
#By default, error messages ARE print to stderr.
def exception_handler(excType, excValue, traceback):
logger.error("", exc_info=(excType, excValue, traceback))
#print "Hey!",excValue
#excValue has the most important info about the error.
#It'd be possible to display only that and hide all the (unfriendly) rest.
sys.excepthook = exception_handler
def ipython_exception_handler(shell, excType, excValue, traceback,tb_offset=0):
logger.error("", exc_info=(excType, excValue, traceback))
#The two following functions are used internally.
def init_logging(log_level):
logger.setLevel(log_level)
def read_logging_level(log_level):
levels_dict = {
1: logging.DEBUG, "debug": logging.DEBUG,
2: logging.INFO, "info": logging.INFO,
3: logging.WARNING, "warning": logging.WARNING,
4: logging.ERROR, "error": logging.ERROR,
5: logging.CRITICAL, "critical": logging.CRITICAL
}
if isinstance(log_level,str):
log_level = log_level.lower()
if log_level in levels_dict:
return levels_dict[log_level]
else:
print "The logging level given is not valid"
return None
def get_logging_level():
"""
This function prints the current logging level of the main logger.
"""
levels_dict = {
10: "DEBUG",
20: "INFO",
30: "WARNING",
40: "ERROR",
50: "CRITICAL"
}
print "The current logging level is:", levels_dict[logger.getEffectiveLevel()]
def set_logging(log_level,myfilename = None):
"""
This function sets the threshold for the logging system and, if desired,
directs the messages to a logfile. Level options:
'DEBUG' or 1
'INFO' or 2
'WARNING' or 3
'ERROR' or 4
'CRITICAL' or 5
If the user is on the interactive shell and wants to log to file, a custom
excepthook is set. By default, if logging to file is not enabled, the way
errors are displayed on the interactive shell is not changed.
"""
if myfilename and ipython_version:
try:
if ipython_version.startswith("0.10"):
__IPYTHON__.set_custom_exc((Exception,), ipython_exception_handler)
else:
ip = get_ipython()
ip.set_custom_exc((Exception,), ipython_exception_handler)
except NameError: #In case the interactive shell is not being used
sys.exc_clear()
level = read_logging_level(log_level)
if level and myfilename:
fileHandler = logging.FileHandler(filename=myfilename)
fileHandler.setLevel(level)
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
logger.removeHandler(consoleHandler) #Console logging is disabled.
print "Now logging to",myfilename,"with level",log_level
elif level:
print "Now logging with level",log_level
logger.setLevel(level)
def system():
"""
**SUMMARY**
Output of this function includes various informations related to system and library.
Main purpose:
- While submiting a bug, report the output of this function
- Checking the current version and later upgrading the library based on the output
**RETURNS**
None
**EXAMPLE**
>>> import SimpleCV
>>> SimpleCV.system()
"""
try :
import platform
print "System : ", platform.system()
print "OS version : ", platform.version()
print "Python version :", platform.python_version()
try :
from cv2 import __version__
print "Open CV version : " + __version__
except ImportError :
print "Open CV2 version : " + "2.1"
if (PIL_ENABLED) :
print "PIL version : ", pil.VERSION
else :
print "PIL module not installed"
if (ORANGE_ENABLED) :
print "Orange Version : " + orange.version
else :
print "Orange module not installed"
try :
import pygame as pg
print "PyGame Version : " + pg.__version__
except ImportError:
print "PyGame module not installed"
try :
import pickle
print "Pickle Version : " + pickle.__version__
except :
print "Pickle module not installed"
except ImportError :
print "You need to install Platform to use this function"
print "to install you can use:"
print "easy_install platform"
return
class LazyProperty(object):
def __init__(self, func):
self._func = func
self.__name__ = func.__name__
self.__doc__ = func.__doc__
def __get__(self, obj, klass=None):
if obj is None: return None
result = obj.__dict__[self.__name__] = self._func(obj)
return result
#supported image formats regular expression
IMAGE_FORMATS = ('*.bmp','*.gif','*.jpg','*.jpe','*.jpeg','*.png','*.pbm','*.pgm','*.ppm','*.tif','*.tiff','*.webp')
#maximum image size -
MAX_DIMENSION = 2*6000 # about twice the size of a full 35mm images - if you hit this, you got a lot data.
LAUNCH_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__)))
| {
"content_hash": "f65aa4943882d6b1970f252da37d1e17",
"timestamp": "",
"source": "github",
"line_count": 431,
"max_line_length": 119,
"avg_line_length": 27.651972157772622,
"alnum_prop": 0.6505286121832522,
"repo_name": "jayrambhia/SimpleCV2",
"id": "43b4ce7eaf27325a1ab72de10989f456b1559365",
"size": "11964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SimpleCV/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "46344"
},
{
"name": "JavaScript",
"bytes": "41038"
},
{
"name": "Perl",
"bytes": "5044"
},
{
"name": "Python",
"bytes": "1698883"
},
{
"name": "Shell",
"bytes": "18995"
}
],
"symlink_target": ""
} |
import os
import sys
from mic import chroot, msger
from mic.utils import cmdln, misc, errors, fs_related
from mic.imager import fs
from mic.conf import configmgr
from mic.plugin import pluginmgr
from mic.pluginbase import ImagerPlugin
class FsPlugin(ImagerPlugin):
name = 'fs'
@classmethod
@cmdln.option("--include-src",
dest="include_src",
action="store_true",
default=False,
help="Generate a image with source rpms included")
def do_create(self, subcmd, opts, *args):
"""${cmd_name}: create fs image
Usage:
${name} ${cmd_name} <ksfile> [OPTS]
${cmd_option_list}
"""
if len(args) != 1:
raise errors.Usage("Extra arguments given")
creatoropts = configmgr.create
ksconf = args[0]
if creatoropts['runtime'] == 'bootstrap':
configmgr._ksconf = ksconf
rt_util.bootstrap_mic()
recording_pkgs = []
if len(creatoropts['record_pkgs']) > 0:
recording_pkgs = creatoropts['record_pkgs']
if creatoropts['release'] is not None:
if 'name' not in recording_pkgs:
recording_pkgs.append('name')
if 'vcs' not in recording_pkgs:
recording_pkgs.append('vcs')
configmgr._ksconf = ksconf
# Called After setting the configmgr._ksconf as the creatoropts['name'] is reset there.
if creatoropts['release'] is not None:
creatoropts['outdir'] = "%s/%s/images/%s/" % (creatoropts['outdir'], creatoropts['release'], creatoropts['name'])
# try to find the pkgmgr
pkgmgr = None
backends = pluginmgr.get_plugins('backend')
if 'auto' == creatoropts['pkgmgr']:
for key in configmgr.prefer_backends:
if key in backends:
pkgmgr = backends[key]
break
else:
for key in backends.keys():
if key == creatoropts['pkgmgr']:
pkgmgr = backends[key]
break
if not pkgmgr:
raise errors.CreatorError("Can't find backend: %s, "
"available choices: %s" %
(creatoropts['pkgmgr'],
','.join(backends.keys())))
creator = fs.FsImageCreator(creatoropts, pkgmgr)
creator._include_src = opts.include_src
if len(recording_pkgs) > 0:
creator._recording_pkgs = recording_pkgs
self.check_image_exists(creator.destdir,
creator.pack_to,
[creator.name],
creatoropts['release'])
try:
creator.check_depend_tools()
creator.mount(None, creatoropts["cachedir"])
creator.install()
#Download the source packages ###private options
if opts.include_src:
installed_pkgs = creator.get_installed_packages()
msger.info('--------------------------------------------------')
msger.info('Generating the image with source rpms included ...')
if not misc.SrcpkgsDownload(installed_pkgs, creatoropts["repomd"], creator._instroot, creatoropts["cachedir"]):
msger.warning("Source packages can't be downloaded")
creator.configure(creatoropts["repomd"])
creator.copy_kernel()
creator.unmount()
creator.package(creatoropts["outdir"])
if creatoropts['release'] is not None:
creator.release_output(ksconf, creatoropts['outdir'], creatoropts['release'])
creator.print_outimage_info()
except errors.CreatorError:
raise
finally:
creator.cleanup()
msger.info("Finished.")
return 0
@classmethod
def do_chroot(self, target, cmd=[]):#chroot.py parse opts&args
try:
if len(cmd) != 0:
cmdline = ' '.join(cmd)
else:
cmdline = "/bin/bash"
envcmd = fs_related.find_binary_inchroot("env", target)
if envcmd:
cmdline = "%s HOME=/root %s" % (envcmd, cmdline)
chroot.chroot(target, None, cmdline)
finally:
chroot.cleanup_after_chroot("dir", None, None, None)
return 1
| {
"content_hash": "d254a8bd32be976020ef3fa9f630649d",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 127,
"avg_line_length": 35.80952380952381,
"alnum_prop": 0.5290336879432624,
"repo_name": "marcosbontempo/inatelos",
"id": "6bcaf007290183b24235db07bd42605179352ffd",
"size": "5224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "poky-daisy/scripts/lib/mic/plugins/imager/fs_plugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "158"
},
{
"name": "BitBake",
"bytes": "1910696"
},
{
"name": "BlitzBasic",
"bytes": "4400"
},
{
"name": "C",
"bytes": "1751572"
},
{
"name": "C++",
"bytes": "354295"
},
{
"name": "CMake",
"bytes": "6537"
},
{
"name": "CSS",
"bytes": "27029"
},
{
"name": "Groff",
"bytes": "502444"
},
{
"name": "HTML",
"bytes": "141762"
},
{
"name": "JavaScript",
"bytes": "22555"
},
{
"name": "Lua",
"bytes": "1194"
},
{
"name": "Makefile",
"bytes": "32254"
},
{
"name": "Nginx",
"bytes": "2744"
},
{
"name": "Perl",
"bytes": "66300"
},
{
"name": "Perl6",
"bytes": "73"
},
{
"name": "Python",
"bytes": "3529760"
},
{
"name": "Shell",
"bytes": "598521"
},
{
"name": "Tcl",
"bytes": "60106"
},
{
"name": "VimL",
"bytes": "8506"
},
{
"name": "XSLT",
"bytes": "8814"
}
],
"symlink_target": ""
} |
from abc import ABC, abstractmethod
from typing import TypeVar, Generic, Union
from apache_beam.coders import PickleCoder
from pyflink.datastream.state import ListState, MapState
from pyflink.fn_execution.coders import from_proto
from pyflink.fn_execution.internal_state import InternalListState, InternalMapState
from pyflink.fn_execution.utils.operation_utils import is_built_in_function, load_aggregate_function
from pyflink.fn_execution.state_impl import RemoteKeyedStateBackend
from pyflink.table import FunctionContext
from pyflink.table.data_view import ListView, MapView, DataView
def extract_data_view_specs_from_accumulator(current_index, accumulator):
# for built in functions we extract the data view specs from their accumulator
i = -1
extracted_specs = []
for field in accumulator:
i += 1
# TODO: infer the coder from the input types and output type of the built-in functions
if isinstance(field, MapView):
extracted_specs.append(MapViewSpec(
"builtInAgg%df%d" % (current_index, i), i, PickleCoder(), PickleCoder()))
elif isinstance(field, ListView):
extracted_specs.append(ListViewSpec(
"builtInAgg%df%d" % (current_index, i), i, PickleCoder()))
return extracted_specs
def extract_data_view_specs(udfs):
extracted_udf_data_view_specs = []
current_index = -1
for udf in udfs:
current_index += 1
udf_data_view_specs_proto = udf.specs
if not udf_data_view_specs_proto:
if is_built_in_function(udf.payload):
built_in_function = load_aggregate_function(udf.payload)
accumulator = built_in_function.create_accumulator()
extracted_udf_data_view_specs.append(
extract_data_view_specs_from_accumulator(current_index, accumulator))
else:
extracted_udf_data_view_specs.append([])
else:
extracted_specs = []
for spec_proto in udf_data_view_specs_proto:
state_id = spec_proto.name
field_index = spec_proto.field_index
if spec_proto.HasField("list_view"):
element_coder = from_proto(spec_proto.list_view.element_type)
extracted_specs.append(ListViewSpec(state_id, field_index, element_coder))
elif spec_proto.HasField("map_view"):
key_coder = from_proto(spec_proto.map_view.key_type)
value_coder = from_proto(spec_proto.map_view.value_type)
extracted_specs.append(
MapViewSpec(state_id, field_index, key_coder, value_coder))
else:
raise Exception("Unsupported data view spec type: " + spec_proto.type)
extracted_udf_data_view_specs.append(extracted_specs)
if all([len(i) == 0 for i in extracted_udf_data_view_specs]):
return []
return extracted_udf_data_view_specs
N = TypeVar('N')
class StateDataView(DataView, Generic[N]):
@abstractmethod
def set_current_namespace(self, namespace: N):
"""
Sets current namespace for state.
"""
pass
class StateListView(ListView, StateDataView[N], ABC):
def __init__(self, list_state: Union[ListState, InternalListState]):
super().__init__()
self._list_state = list_state
def get(self):
return self._list_state.get()
def add(self, value):
self._list_state.add(value)
def add_all(self, values):
self._list_state.add_all(values)
def clear(self):
self._list_state.clear()
def __hash__(self) -> int:
return hash([i for i in self.get()])
class KeyedStateListView(StateListView[N]):
"""
KeyedStateListView is a default implementation of StateListView whose underlying
representation is a keyed state.
"""
def __init__(self, list_state: ListState):
super(KeyedStateListView, self).__init__(list_state)
def set_current_namespace(self, namespace: N):
raise Exception("KeyedStateListView doesn't support set_current_namespace")
class NamespacedStateListView(StateListView[N]):
"""
NamespacedStateListView is a StateListView whose underlying representation is a keyed and
namespaced state. It also supports changing current namespace.
"""
def __init__(self, list_state: InternalListState):
super(NamespacedStateListView, self).__init__(list_state)
def set_current_namespace(self, namespace: N):
self._list_state.set_current_namespace(namespace)
class StateMapView(MapView, StateDataView[N], ABC):
def __init__(self, map_state: Union[MapState, InternalMapState]):
super().__init__()
self._map_state = map_state
def get(self, key):
return self._map_state.get(key)
def put(self, key, value) -> None:
self._map_state.put(key, value)
def put_all(self, dict_value) -> None:
self._map_state.put_all(dict_value)
def remove(self, key) -> None:
self._map_state.remove(key)
def contains(self, key) -> bool:
return self._map_state.contains(key)
def items(self):
return self._map_state.items()
def keys(self):
return self._map_state.keys()
def values(self):
return self._map_state.values()
def is_empty(self) -> bool:
return self._map_state.is_empty()
def clear(self) -> None:
return self._map_state.clear()
class KeyedStateMapView(StateMapView[N]):
"""
KeyedStateMapView is a default implementation of StateMapView whose underlying
representation is a keyed state.
"""
def __init__(self, map_state: MapState):
super(KeyedStateMapView, self).__init__(map_state)
def set_current_namespace(self, namespace: N):
raise Exception("KeyedStateMapView doesn't support set_current_namespace")
class NamespacedStateMapView(StateMapView[N]):
"""
NamespacedStateMapView is a StateMapView whose underlying representation is a keyed and
namespaced state. It also supports changing current namespace.
"""
def __init__(self, map_state: InternalMapState):
super(NamespacedStateMapView, self).__init__(map_state)
def set_current_namespace(self, namespace: N):
self._map_state.set_current_namespace(namespace)
class DataViewSpec(object):
def __init__(self, state_id, field_index):
self.state_id = state_id
self.field_index = field_index
class ListViewSpec(DataViewSpec):
def __init__(self, state_id, field_index, element_coder):
super(ListViewSpec, self).__init__(state_id, field_index)
self.element_coder = element_coder
class MapViewSpec(DataViewSpec):
def __init__(self, state_id, field_index, key_coder, value_coder):
super(MapViewSpec, self).__init__(state_id, field_index)
self.key_coder = key_coder
self.value_coder = value_coder
class StateDataViewStore(ABC):
"""
This interface contains methods for registering StateDataView with a managed store.
"""
def __init__(self,
function_context: FunctionContext,
keyed_state_backend: RemoteKeyedStateBackend):
self._function_context = function_context
self._keyed_state_backend = keyed_state_backend
def get_runtime_context(self):
return self._function_context
@abstractmethod
def get_state_list_view(self, state_name, element_coder):
"""
Creates a state list view.
:param state_name: The name of underlying state of the list view.
:param element_coder: The element coder
:return: a keyed list state
"""
pass
@abstractmethod
def get_state_map_view(self, state_name, key_coder, value_coder):
"""
Creates a state map view.
:param state_name: The name of underlying state of the map view.
:param key_coder: The key coder
:param value_coder: The value coder
:return: a keyed map state
"""
pass
class PerKeyStateDataViewStore(StateDataViewStore):
"""
Default implementation of StateDataViewStore.
"""
def __init__(self,
function_context: FunctionContext,
keyed_state_backend: RemoteKeyedStateBackend):
super(PerKeyStateDataViewStore, self).__init__(function_context, keyed_state_backend)
def get_state_list_view(self, state_name, element_coder):
return KeyedStateListView(
self._keyed_state_backend.get_list_state(state_name, element_coder))
def get_state_map_view(self, state_name, key_coder, value_coder):
return KeyedStateMapView(
self._keyed_state_backend.get_map_state(state_name, key_coder, value_coder))
class PerWindowStateDataViewStore(StateDataViewStore):
"""
An implementation of StateDataViewStore for window aggregates which forwards the state
registration to an underlying RemoteKeyedStateBackend. The state created by this store has the
ability to switch window namespaces.
"""
def __init__(self,
function_context: FunctionContext,
keyed_state_backend: RemoteKeyedStateBackend):
super(PerWindowStateDataViewStore, self).__init__(function_context, keyed_state_backend)
def get_state_list_view(self, state_name, element_coder):
return NamespacedStateListView(
self._keyed_state_backend.get_list_state(state_name, element_coder))
def get_state_map_view(self, state_name, key_coder, value_coder):
return NamespacedStateMapView(
self._keyed_state_backend.get_map_state(state_name, key_coder, value_coder))
| {
"content_hash": "98cc8963eddc453f2279654c3cb4f640",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 100,
"avg_line_length": 34.22027972027972,
"alnum_prop": 0.6532134464085011,
"repo_name": "tillrohrmann/flink",
"id": "44e7fac57e8282349c634bea455c88246dfd7504",
"size": "10745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flink-python/pyflink/fn_execution/table/state_data_view.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "20448"
},
{
"name": "Batchfile",
"bytes": "1863"
},
{
"name": "C",
"bytes": "847"
},
{
"name": "Clojure",
"bytes": "84400"
},
{
"name": "Dockerfile",
"bytes": "5563"
},
{
"name": "FreeMarker",
"bytes": "86639"
},
{
"name": "GAP",
"bytes": "139514"
},
{
"name": "HTML",
"bytes": "135625"
},
{
"name": "HiveQL",
"bytes": "78611"
},
{
"name": "Java",
"bytes": "83158201"
},
{
"name": "JavaScript",
"bytes": "1829"
},
{
"name": "Less",
"bytes": "65918"
},
{
"name": "Makefile",
"bytes": "5134"
},
{
"name": "Python",
"bytes": "2433935"
},
{
"name": "Scala",
"bytes": "10501870"
},
{
"name": "Shell",
"bytes": "525933"
},
{
"name": "TypeScript",
"bytes": "288472"
},
{
"name": "q",
"bytes": "7406"
}
],
"symlink_target": ""
} |
import rabit
import numpy as np
rabit.init(lib='mock')
rank = rabit.get_rank()
n = 10
nround = 3
data = np.ones(n) * rank
version, model, local = rabit.load_checkpoint(True)
if version == 0:
model = np.zeros(n)
local = np.ones(n)
else:
print '[%d] restart from version %d' % (rank, version)
for i in xrange(version, nround):
res = rabit.allreduce(data + model+local, rabit.SUM)
print '[%d] iter=%d: %s' % (rank, i, str(res))
model = res
local[:] = i
rabit.checkpoint(model, local)
rabit.finalize()
| {
"content_hash": "e393827a95c1a093cf8350312ef1d8bb",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 58,
"avg_line_length": 22.416666666666668,
"alnum_prop": 0.6263940520446096,
"repo_name": "bssrdf/rabit",
"id": "e35bd31775c28a04a4f46fe1b84db1ea60228500",
"size": "556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/local_recover.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5298"
},
{
"name": "C++",
"bytes": "249357"
},
{
"name": "Java",
"bytes": "33709"
},
{
"name": "Makefile",
"bytes": "6093"
},
{
"name": "Python",
"bytes": "49027"
},
{
"name": "Shell",
"bytes": "991"
}
],
"symlink_target": ""
} |
import mock
from neutron.common import log as call_log
from neutron.tests import base
class TargetKlass(object):
@call_log.log
def test_method(self, arg1, arg2, *args, **kwargs):
pass
class TestCallLog(base.BaseTestCase):
def setUp(self):
super(TestCallLog, self).setUp()
self.klass = TargetKlass()
logger = self.klass.test_method.__func__.__closure__[0].cell_contents
self.log_debug = mock.patch.object(logger, 'debug').start()
def _test_call_log(self, *args, **kwargs):
expected_format = ('%(class_name)s method %(method_name)s '
'called with arguments %(args)s %(kwargs)s')
expected_data = {'class_name': '%s.%s' % (
__name__,
self.klass.__class__.__name__),
'method_name': 'test_method',
'args': args,
'kwargs': kwargs}
self.klass.test_method(*args, **kwargs)
self.log_debug.assert_called_once_with(expected_format, expected_data)
def test_call_log_all_args(self):
self._test_call_log(10, 20)
def test_call_log_all_kwargs(self):
self._test_call_log(arg1=10, arg2=20)
def test_call_log_known_args_unknown_args_kwargs(self):
self._test_call_log(10, 20, 30, arg4=40)
def test_call_log_known_args_kwargs_unknown_kwargs(self):
self._test_call_log(10, arg2=20, arg3=30, arg4=40)
| {
"content_hash": "9747e2291c0252b91dc2e34cc9c4a2d8",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 78,
"avg_line_length": 34.325581395348834,
"alnum_prop": 0.5724932249322493,
"repo_name": "mandeepdhami/neutron",
"id": "b6ed65b43a346661349a5ccf82e35d31dabc92b7",
"size": "2094",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/common/test_log.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "980"
},
{
"name": "Python",
"bytes": "7250115"
},
{
"name": "Shell",
"bytes": "12912"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from djangobaselibrary.sample.models import Spam, Type
admin.site.register([Spam, Type])
| {
"content_hash": "5cadca59b5c22b22422c2696d30b5ec3",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 54,
"avg_line_length": 24.8,
"alnum_prop": 0.8064516129032258,
"repo_name": "ella/django-base-library",
"id": "ef11b994851fe073b3e0043ffd86d1ebeb89fdb5",
"size": "124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangobaselibrary/sample/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "30652"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
float_or_none,
parse_iso8601,
update_url_query,
int_or_none,
determine_protocol,
unescapeHTML,
)
class SendtoNewsIE(InfoExtractor):
_VALID_URL = r'https?://embed\.sendtonews\.com/player2/embedplayer\.php\?.*\bSC=(?P<id>[0-9A-Za-z-]+)'
_TEST = {
# From http://cleveland.cbslocal.com/2016/05/16/indians-score-season-high-15-runs-in-blowout-win-over-reds-rapid-reaction/
'url': 'http://embed.sendtonews.com/player2/embedplayer.php?SC=GxfCe0Zo7D-175909-5588&type=single&autoplay=on&sound=YES',
'info_dict': {
'id': 'GxfCe0Zo7D-175909-5588'
},
'playlist_count': 8,
# test the first video only to prevent lengthy tests
'playlist': [{
'info_dict': {
'id': '240385',
'ext': 'mp4',
'title': 'Indians introduce Encarnacion',
'description': 'Indians president of baseball operations Chris Antonetti and Edwin Encarnacion discuss the slugger\'s three-year contract with Cleveland',
'duration': 137.898,
'thumbnail': r're:https?://.*\.jpg$',
'upload_date': '20170105',
'timestamp': 1483649762,
},
}],
'params': {
# m3u8 download
'skip_download': True,
},
}
_URL_TEMPLATE = '//embed.sendtonews.com/player2/embedplayer.php?SC=%s'
@classmethod
def _extract_url(cls, webpage):
mobj = re.search(r'''(?x)<script[^>]+src=([\'"])
(?:https?:)?//embed\.sendtonews\.com/player/responsiveembed\.php\?
.*\bSC=(?P<SC>[0-9a-zA-Z-]+).*
\1>''', webpage)
if mobj:
sc = mobj.group('SC')
return cls._URL_TEMPLATE % sc
def _real_extract(self, url):
playlist_id = self._match_id(url)
data_url = update_url_query(
url.replace('embedplayer.php', 'data_read.php'),
{'cmd': 'loadInitial'})
playlist_data = self._download_json(data_url, playlist_id)
entries = []
for video in playlist_data['playlistData'][0]:
info_dict = self._parse_jwplayer_data(
video['jwconfiguration'],
require_title=False, m3u8_id='hls', rtmp_params={'no_resume': True})
for f in info_dict['formats']:
if f.get('tbr'):
continue
tbr = int_or_none(self._search_regex(
r'/(\d+)k/', f['url'], 'bitrate', default=None))
if not tbr:
continue
f.update({
'format_id': '%s-%d' % (determine_protocol(f), tbr),
'tbr': tbr,
})
self._sort_formats(info_dict['formats'], ('tbr', 'height', 'width', 'format_id'))
thumbnails = []
if video.get('thumbnailUrl'):
thumbnails.append({
'id': 'normal',
'url': video['thumbnailUrl'],
})
if video.get('smThumbnailUrl'):
thumbnails.append({
'id': 'small',
'url': video['smThumbnailUrl'],
})
info_dict.update({
'title': video['S_headLine'].strip(),
'description': unescapeHTML(video.get('S_fullStory')),
'thumbnails': thumbnails,
'duration': float_or_none(video.get('SM_length')),
'timestamp': parse_iso8601(video.get('S_sysDate'), delimiter=' '),
})
entries.append(info_dict)
return self.playlist_result(entries, playlist_id)
| {
"content_hash": "c574a5b4568689064c277f64cac3ed9a",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 170,
"avg_line_length": 36.70192307692308,
"alnum_prop": 0.5074665968037726,
"repo_name": "bosstb/HaberPush",
"id": "9d9652949bb64ca2a15c02b3e0733dd9b7f42493",
"size": "3833",
"binary": false,
"copies": "62",
"ref": "refs/heads/master",
"path": "youtube_dl/extractor/sendtonews.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "53"
},
{
"name": "HTML",
"bytes": "1074"
},
{
"name": "Python",
"bytes": "4295385"
}
],
"symlink_target": ""
} |
import json
# 3rd-party modules
from flask import Flask
from flask import render_template
from flask import request
from flask import Response
import arrow
# local modules
from mass.monitor import swf
app = Flask(__name__)
def response(status_code, data):
return Response(json.dumps(data),
status=status_code,
mimetype='application/json')
@app.errorhandler(Exception)
def internal_server_error(err):
return response(500, {
'message': 'Internal Server Error',
'details': str(err)})
@app.route('/')
def index():
return render_template('index.html')
@app.route('/api/v1/<region>/<domain>/jobs', methods=['GET'])
def list_jobs(region, domain):
oldest = request.args.get('oldest', arrow.utcnow().replace(days=-1).timestamp)
jobs = swf.list_jobs(
region=region,
domain=domain,
oldest=arrow.get(oldest).datetime)
return response(200, jobs)
@app.route('/api/v1/<region>/<domain>/jobs/<workflow_id>', methods=['GET'])
def retrive_job(region, domain, workflow_id):
oldest = request.args.get('oldest', arrow.utcnow().replace(days=-1).timestamp)
jobs = swf.retrieve_jobs(
region=region,
domain=domain,
workflow_id=workflow_id,
oldest=oldest)
for job in jobs:
if job['executionStatus'] == 'OPEN':
continue
for event in swf.retrieve_job_history(
region=region,
domain=domain,
workflow_id=workflow_id,
run_id=job['execution']['runId'],
reverse=True):
attr_name = [k for k in event.keys() if k.endswith('Attributes')][0]
result = event[attr_name]
result = {k: v for k, v in result.items() if not k.startswith('decision')}
job['executionResult'] = result
break
return response(200, jobs)
if __name__ == '__main__':
app.run(debug=True)
| {
"content_hash": "d864578764f96df5531aa075cb0f8ff6",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 86,
"avg_line_length": 27.577464788732396,
"alnum_prop": 0.6057201225740552,
"repo_name": "KKBOX/mass",
"id": "8166afd4d347d15b88ed8f2ee2b3b45c698a628e",
"size": "2024",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mass/monitor/app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "705"
},
{
"name": "JavaScript",
"bytes": "9470"
},
{
"name": "Python",
"bytes": "55341"
}
],
"symlink_target": ""
} |
import numpy as np
from bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid
from bokeh.models.markers import Diamond
from bokeh.io import curdoc, show
N = 9
x = np.linspace(-2, 2, N)
y = x**2
sizes = np.linspace(10, 20, N)
source = ColumnDataSource(dict(x=x, y=y, sizes=sizes))
xdr = DataRange1d()
ydr = DataRange1d()
plot = Plot(
title=None, x_range=xdr, y_range=ydr, plot_width=300, plot_height=300,
h_symmetry=False, v_symmetry=False, min_border=0, toolbar_location=None)
glyph = Diamond(x="x", y="y", size="sizes", line_color="#1c9099", line_width=2, fill_color=None)
plot.add_glyph(source, glyph)
xaxis = LinearAxis()
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis()
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
curdoc().add_root(plot)
show(plot)
| {
"content_hash": "d511e75efefc315ff6b8cd6d8adb5a61",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 96,
"avg_line_length": 25.571428571428573,
"alnum_prop": 0.7162011173184357,
"repo_name": "percyfal/bokeh",
"id": "803161c816ccf35e5982b879c3cb7e20f74de2a5",
"size": "895",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "tests/glyphs/Diamond.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1710"
},
{
"name": "CSS",
"bytes": "407208"
},
{
"name": "CoffeeScript",
"bytes": "1037299"
},
{
"name": "HTML",
"bytes": "45854"
},
{
"name": "JavaScript",
"bytes": "34591"
},
{
"name": "Jupyter Notebook",
"bytes": "3981"
},
{
"name": "Makefile",
"bytes": "1161"
},
{
"name": "Python",
"bytes": "2089385"
},
{
"name": "Shell",
"bytes": "15350"
},
{
"name": "TypeScript",
"bytes": "71975"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import framework
import importlib
import json
import logging
import os
import thread
from collections import OrderedDict
import django
from api.caching import listeners # noqa
from django.apps import apps
from framework.addons.utils import render_addon_capabilities
from framework.celery_tasks import handlers as celery_task_handlers
from framework.django import handlers as django_handlers
from framework.csrf import handlers as csrf_handlers
from framework.flask import add_handlers, app
# Import necessary to initialize the root logger
from framework.logging import logger as root_logger # noqa
from framework.postcommit_tasks import handlers as postcommit_handlers
from framework.sentry import sentry
from framework.transactions import handlers as transaction_handlers
# Imports necessary to connect signals
from website.archiver import listeners # noqa
from website.mails import listeners # noqa
from website.notifications import listeners # noqa
from website.identifiers import listeners # noqa
from website.reviews import listeners # noqa
from werkzeug.contrib.fixers import ProxyFix
logger = logging.getLogger(__name__)
def init_addons(settings, routes=True):
"""Initialize each addon in settings.ADDONS_REQUESTED.
:param module settings: The settings module.
:param bool routes: Add each addon's routing rules to the URL map.
"""
settings.ADDONS_AVAILABLE = getattr(settings, 'ADDONS_AVAILABLE', [])
settings.ADDONS_AVAILABLE_DICT = getattr(settings, 'ADDONS_AVAILABLE_DICT', OrderedDict())
for addon_name in settings.ADDONS_REQUESTED:
try:
addon = apps.get_app_config('addons_{}'.format(addon_name))
except LookupError:
addon = None
if addon:
if addon not in settings.ADDONS_AVAILABLE:
settings.ADDONS_AVAILABLE.append(addon)
settings.ADDONS_AVAILABLE_DICT[addon.short_name] = addon
settings.ADDON_CAPABILITIES = render_addon_capabilities(settings.ADDONS_AVAILABLE)
def attach_handlers(app, settings):
"""Add callback handlers to ``app`` in the correct order."""
# Add callback handlers to application
add_handlers(app, django_handlers.handlers)
add_handlers(app, celery_task_handlers.handlers)
add_handlers(app, transaction_handlers.handlers)
add_handlers(app, postcommit_handlers.handlers)
add_handlers(app, csrf_handlers.handlers)
# Attach handler for checking view-only link keys.
# NOTE: This must be attached AFTER the TokuMX to avoid calling
# a commitTransaction (in toku's after_request handler) when no transaction
# has been created
add_handlers(app, {'before_request': framework.sessions.prepare_private_key})
# framework.session's before_request handler must go after
# prepare_private_key, else view-only links won't work
add_handlers(app, {'before_request': framework.sessions.before_request,
'after_request': framework.sessions.after_request})
return app
def setup_django():
# Django App config
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'api.base.settings')
django.setup()
def init_app(settings_module='website.settings', set_backends=True, routes=True,
attach_request_handlers=True):
"""Initializes the OSF. A sort of pseudo-app factory that allows you to
bind settings, set up routing, and set storage backends, but only acts on
a single app instance (rather than creating multiple instances).
:param settings_module: A string, the settings module to use.
:param set_backends: Deprecated.
:param routes: Whether to set the url map.
"""
# Ensure app initialization only takes place once
if app.config.get('IS_INITIALIZED', False) is True:
return app
logger.info('Initializing the application from process {}, thread {}.'.format(
os.getpid(), thread.get_ident()
))
setup_django()
# The settings module
settings = importlib.import_module(settings_module)
init_addons(settings, routes)
with open(os.path.join(settings.STATIC_FOLDER, 'built', 'nodeCategories.json'), 'wb') as fp:
json.dump(settings.NODE_CATEGORY_MAP, fp)
app.debug = settings.DEBUG_MODE
# default config for flask app, however, this does not affect setting cookie using set_cookie()
app.config['SESSION_COOKIE_SECURE'] = settings.SESSION_COOKIE_SECURE
app.config['SESSION_COOKIE_HTTPONLY'] = settings.SESSION_COOKIE_HTTPONLY
if routes:
try:
from website.routes import make_url_map
make_url_map(app)
except AssertionError: # Route map has already been created
pass
if attach_request_handlers:
attach_handlers(app, settings)
if app.debug:
logger.info("Sentry disabled; Flask's debug mode enabled")
else:
sentry.init_app(app)
logger.info("Sentry enabled; Flask's debug mode disabled")
apply_middlewares(app, settings)
app.config['IS_INITIALIZED'] = True
return app
def apply_middlewares(flask_app, settings):
# Use ProxyFix to respect X-Forwarded-Proto header
# https://stackoverflow.com/questions/23347387/x-forwarded-proto-and-flask
if settings.LOAD_BALANCER:
flask_app.wsgi_app = ProxyFix(flask_app.wsgi_app)
return flask_app
| {
"content_hash": "c9afebd7e88737378473ffe743d097d5",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 99,
"avg_line_length": 37.54545454545455,
"alnum_prop": 0.7204321102626188,
"repo_name": "erinspace/osf.io",
"id": "b3b43e0c19e18992507366c2f7d6d5f13d4e16f7",
"size": "5393",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "website/app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "92866"
},
{
"name": "Dockerfile",
"bytes": "8455"
},
{
"name": "HTML",
"bytes": "272808"
},
{
"name": "JavaScript",
"bytes": "1796633"
},
{
"name": "Mako",
"bytes": "665847"
},
{
"name": "Python",
"bytes": "8478871"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
} |
from .base import BaseCommand
class NodesInfoCommand(BaseCommand):
command_name = "elasticsearch:nodes-info"
def is_enabled(self):
return True
def run_request(self):
options = dict()
return self.client.nodes.info(**options)
| {
"content_hash": "a2163f84ba7fb637627fb63c6a74da27",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 48,
"avg_line_length": 22,
"alnum_prop": 0.6666666666666666,
"repo_name": "KunihikoKido/sublime-elasticsearch-client",
"id": "f1ea6fd6d885fa3628f9995971a24e5a35a15849",
"size": "264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "commands/nodes_info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "17574"
},
{
"name": "Python",
"bytes": "1492349"
}
],
"symlink_target": ""
} |
import os
import pickle
from datetime import datetime
from typing import (
Dict, Optional, Mapping, Callable, Any, List, Type, Union, MutableMapping
)
import dbt.exceptions
import dbt.flags
from dbt import deprecations
from dbt.adapters.factory import get_relation_class_by_name
from dbt.helper_types import PathSet
from dbt.include.global_project import PACKAGES
from dbt.logger import GLOBAL_LOGGER as logger, DbtProcessState
from dbt.node_types import NodeType
from dbt.clients.jinja import get_rendered
from dbt.clients.system import make_directory
from dbt.config import Project, RuntimeConfig
from dbt.context.docs import generate_runtime_docs
from dbt.contracts.graph.compiled import NonSourceNode
from dbt.contracts.graph.manifest import Manifest, FilePath, FileHash, Disabled
from dbt.contracts.graph.parsed import (
ParsedSourceDefinition, ParsedNode, ParsedMacro, ColumnInfo,
)
from dbt.parser.base import BaseParser, Parser
from dbt.parser.analysis import AnalysisParser
from dbt.parser.data_test import DataTestParser
from dbt.parser.docs import DocumentationParser
from dbt.parser.hooks import HookParser
from dbt.parser.macros import MacroParser
from dbt.parser.models import ModelParser
from dbt.parser.results import ParseResult
from dbt.parser.schemas import SchemaParser
from dbt.parser.search import FileBlock
from dbt.parser.seeds import SeedParser
from dbt.parser.snapshots import SnapshotParser
from dbt.parser.sources import patch_sources
from dbt.version import __version__
PARTIAL_PARSE_FILE_NAME = 'partial_parse.pickle'
PARSING_STATE = DbtProcessState('parsing')
DEFAULT_PARTIAL_PARSE = False
_parser_types: List[Type[Parser]] = [
ModelParser,
SnapshotParser,
AnalysisParser,
DataTestParser,
HookParser,
SeedParser,
DocumentationParser,
SchemaParser,
]
# TODO: this should be calculated per-file based on the vars() calls made in
# parsing, so changing one var doesn't invalidate everything. also there should
# be something like that for env_var - currently changing env_vars in way that
# impact graph selection or configs will result in weird test failures.
# finally, we should hash the actual profile used, not just root project +
# profiles.yml + relevant args. While sufficient, it is definitely overkill.
def make_parse_result(
config: RuntimeConfig, all_projects: Mapping[str, Project]
) -> ParseResult:
"""Make a ParseResult from the project configuration and the profile."""
# if any of these change, we need to reject the parser
vars_hash = FileHash.from_contents(
'\x00'.join([
getattr(config.args, 'vars', '{}') or '{}',
getattr(config.args, 'profile', '') or '',
getattr(config.args, 'target', '') or '',
__version__
])
)
profile_path = os.path.join(config.args.profiles_dir, 'profiles.yml')
with open(profile_path) as fp:
profile_hash = FileHash.from_contents(fp.read())
project_hashes = {}
for name, project in all_projects.items():
path = os.path.join(project.project_root, 'dbt_project.yml')
with open(path) as fp:
project_hashes[name] = FileHash.from_contents(fp.read())
return ParseResult(
vars_hash=vars_hash,
profile_hash=profile_hash,
project_hashes=project_hashes,
)
class ManifestLoader:
def __init__(
self,
root_project: RuntimeConfig,
all_projects: Mapping[str, Project],
macro_hook: Optional[Callable[[Manifest], Any]] = None,
) -> None:
self.root_project: RuntimeConfig = root_project
self.all_projects: Mapping[str, Project] = all_projects
self.macro_hook: Callable[[Manifest], Any]
if macro_hook is None:
self.macro_hook = lambda m: None
else:
self.macro_hook = macro_hook
self.results: ParseResult = make_parse_result(
root_project, all_projects,
)
self._loaded_file_cache: Dict[str, FileBlock] = {}
def _load_macros(
self,
old_results: Optional[ParseResult],
internal_manifest: Optional[Manifest] = None,
) -> None:
projects = self.all_projects
if internal_manifest is not None:
projects = {
k: v for k, v in self.all_projects.items() if k not in PACKAGES
}
self.results.macros.update(internal_manifest.macros)
self.results.files.update(internal_manifest.files)
# TODO: go back to skipping the internal manifest during macro parsing
for project in projects.values():
parser = MacroParser(self.results, project)
for path in parser.search():
self.parse_with_cache(path, parser, old_results)
def parse_with_cache(
self,
path: FilePath,
parser: BaseParser,
old_results: Optional[ParseResult],
) -> None:
block = self._get_file(path, parser)
if not self._get_cached(block, old_results, parser):
parser.parse_file(block)
def _get_cached(
self,
block: FileBlock,
old_results: Optional[ParseResult],
parser: BaseParser,
) -> bool:
# TODO: handle multiple parsers w/ same files, by
# tracking parser type vs node type? Or tracking actual
# parser type during parsing?
if old_results is None:
return False
if old_results.has_file(block.file):
return self.results.sanitized_update(
block.file, old_results, parser.resource_type
)
return False
def _get_file(self, path: FilePath, parser: BaseParser) -> FileBlock:
if path.search_key in self._loaded_file_cache:
block = self._loaded_file_cache[path.search_key]
else:
block = FileBlock(file=parser.load_file(path))
self._loaded_file_cache[path.search_key] = block
return block
def parse_project(
self,
project: Project,
macro_manifest: Manifest,
old_results: Optional[ParseResult],
) -> None:
parsers: List[Parser] = []
for cls in _parser_types:
parser = cls(self.results, project, self.root_project,
macro_manifest)
parsers.append(parser)
# per-project cache.
self._loaded_file_cache.clear()
for parser in parsers:
for path in parser.search():
self.parse_with_cache(path, parser, old_results)
def load_only_macros(self) -> Manifest:
old_results = self.read_parse_results()
self._load_macros(old_results, internal_manifest=None)
# make a manifest with just the macros to get the context
macro_manifest = Manifest.from_macros(
macros=self.results.macros,
files=self.results.files
)
return macro_manifest
def load(self, internal_manifest: Optional[Manifest] = None):
old_results = self.read_parse_results()
if old_results is not None:
logger.debug('Got an acceptable cached parse result')
self._load_macros(old_results, internal_manifest=internal_manifest)
# make a manifest with just the macros to get the context
macro_manifest = Manifest.from_macros(
macros=self.results.macros,
files=self.results.files
)
self.macro_hook(macro_manifest)
for project in self.all_projects.values():
# parse a single project
self.parse_project(project, macro_manifest, old_results)
def write_parse_results(self):
path = os.path.join(self.root_project.target_path,
PARTIAL_PARSE_FILE_NAME)
make_directory(self.root_project.target_path)
with open(path, 'wb') as fp:
pickle.dump(self.results, fp)
def matching_parse_results(self, result: ParseResult) -> bool:
"""Compare the global hashes of the read-in parse results' values to
the known ones, and return if it is ok to re-use the results.
"""
try:
if result.dbt_version != __version__:
logger.debug(
'dbt version mismatch: {} != {}, cache invalidated'
.format(result.dbt_version, __version__)
)
return False
except AttributeError:
logger.debug('malformed result file, cache invalidated')
return False
valid = True
if self.results.vars_hash != result.vars_hash:
logger.debug('vars hash mismatch, cache invalidated')
valid = False
if self.results.profile_hash != result.profile_hash:
logger.debug('profile hash mismatch, cache invalidated')
valid = False
missing_keys = {
k for k in self.results.project_hashes
if k not in result.project_hashes
}
if missing_keys:
logger.debug(
'project hash mismatch: values missing, cache invalidated: {}'
.format(missing_keys)
)
valid = False
for key, new_value in self.results.project_hashes.items():
if key in result.project_hashes:
old_value = result.project_hashes[key]
if new_value != old_value:
logger.debug(
'For key {}, hash mismatch ({} -> {}), cache '
'invalidated'
.format(key, old_value, new_value)
)
valid = False
return valid
def _partial_parse_enabled(self):
# if the CLI is set, follow that
if dbt.flags.PARTIAL_PARSE is not None:
return dbt.flags.PARTIAL_PARSE
# if the config is set, follow that
elif self.root_project.config.partial_parse is not None:
return self.root_project.config.partial_parse
else:
return DEFAULT_PARTIAL_PARSE
def read_parse_results(self) -> Optional[ParseResult]:
if not self._partial_parse_enabled():
logger.debug('Partial parsing not enabled')
return None
path = os.path.join(self.root_project.target_path,
PARTIAL_PARSE_FILE_NAME)
if os.path.exists(path):
try:
with open(path, 'rb') as fp:
result: ParseResult = pickle.load(fp)
# keep this check inside the try/except in case something about
# the file has changed in weird ways, perhaps due to being a
# different version of dbt
if self.matching_parse_results(result):
return result
except Exception as exc:
logger.debug(
'Failed to load parsed file from disk at {}: {}'
.format(path, exc),
exc_info=True
)
return None
def process_manifest(self, manifest: Manifest):
project_name = self.root_project.project_name
process_sources(manifest, project_name)
process_refs(manifest, project_name)
process_docs(manifest, self.root_project)
def create_manifest(self) -> Manifest:
# before we do anything else, patch the sources. This mutates
# results.disabled, so it needs to come before the final 'disabled'
# list is created
sources = patch_sources(self.results, self.root_project)
disabled = []
for value in self.results.disabled.values():
disabled.extend(value)
nodes: MutableMapping[str, NonSourceNode] = {
k: v for k, v in self.results.nodes.items()
}
manifest = Manifest(
nodes=nodes,
sources=sources,
macros=self.results.macros,
docs=self.results.docs,
generated_at=datetime.utcnow(),
metadata=self.root_project.get_metadata(),
disabled=disabled,
files=self.results.files,
)
manifest.patch_nodes(self.results.patches)
manifest.patch_macros(self.results.macro_patches)
self.process_manifest(manifest)
return manifest
@classmethod
def load_all(
cls,
root_config: RuntimeConfig,
internal_manifest: Optional[Manifest],
macro_hook: Callable[[Manifest], Any],
) -> Manifest:
with PARSING_STATE:
projects = root_config.load_dependencies()
v1_configs = []
for project in projects.values():
if project.config_version == 1:
v1_configs.append(f'\n\n - {project.project_name}')
if v1_configs:
deprecations.warn(
'dbt-project-yaml-v1',
project_names=''.join(v1_configs)
)
loader = cls(root_config, projects, macro_hook)
loader.load(internal_manifest=internal_manifest)
loader.write_parse_results()
manifest = loader.create_manifest()
_check_manifest(manifest, root_config)
manifest.build_flat_graph()
return manifest
@classmethod
def load_internal(cls, root_config: RuntimeConfig) -> Manifest:
with PARSING_STATE:
projects = load_internal_projects(root_config)
loader = cls(root_config, projects)
return loader.load_only_macros()
def _check_resource_uniqueness(
manifest: Manifest,
config: RuntimeConfig,
) -> None:
names_resources: Dict[str, NonSourceNode] = {}
alias_resources: Dict[str, NonSourceNode] = {}
for resource, node in manifest.nodes.items():
if node.resource_type not in NodeType.refable():
continue
# appease mypy - sources aren't refable!
assert not isinstance(node, ParsedSourceDefinition)
name = node.name
# the full node name is really defined by the adapter's relation
relation_cls = get_relation_class_by_name(config.credentials.type)
relation = relation_cls.create_from(config=config, node=node)
full_node_name = str(relation)
existing_node = names_resources.get(name)
if existing_node is not None:
dbt.exceptions.raise_duplicate_resource_name(
existing_node, node
)
existing_alias = alias_resources.get(full_node_name)
if existing_alias is not None:
dbt.exceptions.raise_ambiguous_alias(
existing_alias, node, full_node_name
)
names_resources[name] = node
alias_resources[full_node_name] = node
def _warn_for_unused_resource_config_paths(
manifest: Manifest, config: RuntimeConfig
) -> None:
resource_fqns: Mapping[str, PathSet] = manifest.get_resource_fqns()
disabled_fqns: PathSet = frozenset(tuple(n.fqn) for n in manifest.disabled)
config.warn_for_unused_resource_config_paths(resource_fqns, disabled_fqns)
def _check_manifest(manifest: Manifest, config: RuntimeConfig) -> None:
_check_resource_uniqueness(manifest, config)
_warn_for_unused_resource_config_paths(manifest, config)
def internal_project_names():
return iter(PACKAGES.values())
def _load_projects(config, paths):
for path in paths:
try:
project = config.new_project(path)
except dbt.exceptions.DbtProjectError as e:
raise dbt.exceptions.DbtProjectError(
'Failed to read package at {}: {}'
.format(path, e)
)
else:
yield project.project_name, project
def _get_node_column(node, column_name):
"""Given a ParsedNode, add some fields that might be missing. Return a
reference to the dict that refers to the given column, creating it if
it doesn't yet exist.
"""
if column_name in node.columns:
column = node.columns[column_name]
else:
node.columns[column_name] = ColumnInfo(name=column_name)
node.columns[column_name] = column
return column
DocsContextCallback = Callable[
[Union[ParsedNode, ParsedSourceDefinition]],
Dict[str, Any]
]
def _process_docs_for_node(
context: Dict[str, Any],
node: NonSourceNode,
):
node.description = get_rendered(node.description, context)
for column_name, column in node.columns.items():
column.description = get_rendered(column.description, context)
def _process_docs_for_source(
context: Dict[str, Any],
source: ParsedSourceDefinition,
):
table_description = source.description
source_description = source.source_description
table_description = get_rendered(table_description, context)
source_description = get_rendered(source_description, context)
source.description = table_description
source.source_description = source_description
for column in source.columns.values():
column_desc = column.description
column_desc = get_rendered(column_desc, context)
column.description = column_desc
def _process_docs_for_macro(
context: Dict[str, Any], macro: ParsedMacro
) -> None:
macro.description = get_rendered(macro.description, context)
for arg in macro.arguments:
arg.description = get_rendered(arg.description, context)
def process_docs(manifest: Manifest, config: RuntimeConfig):
for node in manifest.nodes.values():
ctx = generate_runtime_docs(
config,
node,
manifest,
config.project_name,
)
_process_docs_for_node(ctx, node)
for source in manifest.sources.values():
ctx = generate_runtime_docs(
config,
source,
manifest,
config.project_name,
)
_process_docs_for_source(ctx, source)
for macro in manifest.macros.values():
ctx = generate_runtime_docs(
config,
macro,
manifest,
config.project_name,
)
_process_docs_for_macro(ctx, macro)
def _process_refs_for_node(
manifest: Manifest, current_project: str, node: NonSourceNode
):
"""Given a manifest and a node in that manifest, process its refs"""
for ref in node.refs:
target_model: Optional[Union[Disabled, NonSourceNode]] = None
target_model_name: str
target_model_package: Optional[str] = None
if len(ref) == 1:
target_model_name = ref[0]
elif len(ref) == 2:
target_model_package, target_model_name = ref
else:
raise dbt.exceptions.InternalException(
f'Refs should always be 1 or 2 arguments - got {len(ref)}'
)
target_model = manifest.resolve_ref(
target_model_name,
target_model_package,
current_project,
node.package_name,
)
if target_model is None or isinstance(target_model, Disabled):
# This may raise. Even if it doesn't, we don't want to add
# this node to the graph b/c there is no destination node
node.config.enabled = False
dbt.utils.invalid_ref_fail_unless_test(
node, target_model_name, target_model_package,
disabled=(isinstance(target_model, Disabled))
)
continue
target_model_id = target_model.unique_id
node.depends_on.nodes.append(target_model_id)
# TODO: I think this is extraneous, node should already be the same
# as manifest.nodes[node.unique_id] (we're mutating node here, not
# making a new one)
manifest.update_node(node)
def process_refs(manifest: Manifest, current_project: str):
for node in manifest.nodes.values():
_process_refs_for_node(manifest, current_project, node)
return manifest
def _process_sources_for_node(
manifest: Manifest, current_project: str, node: NonSourceNode
):
target_source: Optional[Union[Disabled, ParsedSourceDefinition]] = None
for source_name, table_name in node.sources:
target_source = manifest.resolve_source(
source_name,
table_name,
current_project,
node.package_name,
)
if target_source is None or isinstance(target_source, Disabled):
# this folows the same pattern as refs
node.config.enabled = False
dbt.utils.invalid_source_fail_unless_test(
node,
source_name,
table_name,
disabled=(isinstance(target_source, Disabled))
)
continue
target_source_id = target_source.unique_id
node.depends_on.nodes.append(target_source_id)
manifest.update_node(node)
def process_sources(manifest: Manifest, current_project: str):
for node in manifest.nodes.values():
if node.resource_type == NodeType.Source:
continue
assert not isinstance(node, ParsedSourceDefinition)
_process_sources_for_node(manifest, current_project, node)
return manifest
def process_macro(
config: RuntimeConfig, manifest: Manifest, macro: ParsedMacro
) -> None:
ctx = generate_runtime_docs(
config,
macro,
manifest,
config.project_name,
)
_process_docs_for_macro(ctx, macro)
def process_node(
config: RuntimeConfig, manifest: Manifest, node: NonSourceNode
):
_process_sources_for_node(
manifest, config.project_name, node
)
_process_refs_for_node(manifest, config.project_name, node)
ctx = generate_runtime_docs(config, node, manifest, config.project_name)
_process_docs_for_node(ctx, node)
def load_internal_projects(config):
return dict(_load_projects(config, internal_project_names()))
def load_internal_manifest(config: RuntimeConfig) -> Manifest:
return ManifestLoader.load_internal(config)
def load_manifest(
config: RuntimeConfig,
internal_manifest: Optional[Manifest],
macro_hook: Callable[[Manifest], Any],
) -> Manifest:
return ManifestLoader.load_all(config, internal_manifest, macro_hook)
| {
"content_hash": "0c33b61c85fc52a8fd13b3e125cbe562",
"timestamp": "",
"source": "github",
"line_count": 641,
"max_line_length": 79,
"avg_line_length": 34.83151326053042,
"alnum_prop": 0.6219375643839298,
"repo_name": "fishtown-analytics/dbt",
"id": "60ef395fdbdbe1d5ca790577b08f52a4419e650f",
"size": "22327",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev/octavius-catto",
"path": "core/dbt/parser/manifest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1247"
},
{
"name": "HTML",
"bytes": "1343185"
},
{
"name": "Makefile",
"bytes": "997"
},
{
"name": "PLpgSQL",
"bytes": "1649"
},
{
"name": "Python",
"bytes": "2059566"
},
{
"name": "Shell",
"bytes": "2419"
},
{
"name": "TSQL",
"bytes": "396955"
}
],
"symlink_target": ""
} |
import requests
import urllib
from datetime import datetime
from test_base import TestBase
class TestClass(TestBase):
def __init__(self, *args, **kwargs):
super(TestClass, self).__init__(*args, **kwargs)
def plan_trip(self, test):
return None
| {
"content_hash": "06a2ce885faa65d379a6154a26d7f9bf",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 56,
"avg_line_length": 19.428571428571427,
"alnum_prop": 0.6691176470588235,
"repo_name": "plannerstack/testset",
"id": "d439b7a578e29d941d009efe3c9cc40592e35b9b",
"size": "297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mmri/test_skel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PowerShell",
"bytes": "369"
},
{
"name": "Python",
"bytes": "32250"
},
{
"name": "Shell",
"bytes": "1579"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from .setup_reflective_engine import setup_reflective_engine
| {
"content_hash": "bff24c64def7f2f2483757025738fb40",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 60,
"avg_line_length": 50,
"alnum_prop": 0.82,
"repo_name": "jackfirth/sqlalchemy-fp",
"id": "5e7b64ada1793779f7094bcdf5de0fdf8e177c60",
"size": "100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sqlalchemy_fp/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3273"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Redirect',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('from_path', models.CharField(db_index=True, max_length=255)),
('to_path', models.CharField(max_length=255)),
('mode', models.CharField(choices=[('ABS', 'Absolute'), ('FWD', 'Forward')], default='FWD', max_length=20)),
('purpose', models.CharField(choices=[('MARKETING', 'Marketing'), ('LEGACY', 'Legacy')], max_length=20)),
],
options={
'verbose_name': 'redirect',
'verbose_name_plural': 'redirects',
'ordering': ('from_path',),
'unique_together': {('from_path',)},
},
),
]
| {
"content_hash": "de434f063e226a0340d8c72f37b2ee15",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 124,
"avg_line_length": 35.67857142857143,
"alnum_prop": 0.5195195195195195,
"repo_name": "sussexstudent/falmer",
"id": "9eab9cfbe6d90f241213efd19d86ec69d7d51c84",
"size": "1048",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "falmer/redirects/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2142"
},
{
"name": "Dockerfile",
"bytes": "1035"
},
{
"name": "HTML",
"bytes": "8269"
},
{
"name": "JavaScript",
"bytes": "817"
},
{
"name": "Python",
"bytes": "513792"
},
{
"name": "Shell",
"bytes": "8120"
}
],
"symlink_target": ""
} |
import sys
python3 = sys.version_info[0] == 3
python2 = sys.version_info[0] == 2
def main(verbose):
basenames = [
"test_template",
"test_engine",
"test_preprocess",
"test_htmlhelper",
"test_main",
"test_encoding",
"test_users_guide",
"test_faq",
"test_examples",
]
if python3:
basenames.remove("test_encoding")
if verbose:
import os
for basename in basenames:
print('')
print("************************************************* " + basename)
os.system("python %s.py" % basename)
else:
import unittest
suite = unittest.TestSuite()
for basename in basenames:
test_module = __import__(basename)
suite.addTest(unittest.findTestCases(test_module))
unittest.TextTestRunner(verbosity=1).run(suite)
#unittest.TextTestRunner(verbosity=2).run(test_template.TemplateTest)
if __name__ == '__main__':
import sys
verbose = len(sys.argv) > 1 and sys.argv[1] == '-v'
main(verbose)
sys.exit(0)
| {
"content_hash": "5bce40bed32565c721d644e725295f12",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 82,
"avg_line_length": 23.333333333333332,
"alnum_prop": 0.53125,
"repo_name": "mikedougherty/tenjin",
"id": "d46aa37aee4a09172ece8775eacd5b05db27852e",
"size": "1215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_all.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "217942"
}
],
"symlink_target": ""
} |
# This code is part of CMPL
#
# Copyright (C) 2007, 2008, 2009, 2010, 2011
# Thomas Schleiff - Halle(Saale), Germany and
# Mike Steglich - Technical University of Applied Sciences
# Wildau, Germany
#
# CMPL is a project of the Technical University of
# Applied Sciences Wildau and the Institute for Operations Research
# and Business Management at the Martin Luther University
# Halle-Wittenberg.
# Please visit the project homepage <www.coliop.org>
#
# CMPL is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# CMPL is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
#**********************************************************************
#!/usr/bin/python
import sys
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
def cleanBlanks(str):
return str.replace('%', ' ').strip()
if not module_exists('gurobipy'):
print 'Cant find Gurobi'
quit(-1)
from gurobipy import *
i = 1
for arg in sys.argv:
#print "%i %s\n" % (i, arg)
if i == 2:
os.chdir(cleanBlanks(arg))
if i == 3:
solutionPool = arg
if i == 4:
model = read(cleanBlanks(arg))
if i == 5:
solFile = cleanBlanks(arg)
if i > 5:
s = 'model.params.%s' % arg
exec(s)
i += 1
nrOfSolutions = 0
def writeSol():
f.write(' <solution>\n')
f.write(' <header')
s = ' idx="%g"' % model.SolCount
if model.status == GRB.status.OPTIMAL:
s = s + ' value="%g"' % model.objVal
if model.isMIP == 1:
s = s + ' status="integer optimal solution"/>\n'
else:
s = s + ' status="optimal solution"/>\n'
else:
s = s + ' value="0"'
s = s + ' status="Infeasible or unbounded model"/>\n'
f.write(s)
if model.status == GRB.status.OPTIMAL:
f.write(' <variables>\n')
i=0
for v in model.getVars():
if model.isMIP == 1:
s = ' <variable idx="%g" activity="%e"/>\n' % (i,v.x)
else:
s = ' <variable idx="%g" activity="%e" marginal="%e"/>\n' % (i,v.x, v.RC)
f.write(s)
i=i+1
f.write(' </variables>\n')
f.write(' <constraints>\n')
i=0
for c in model.getConstrs():
if model.isMIP == 1:
s = ' <constraint idx="%g" activity="%e"/>\n' % (i,c.RHS-c.Slack)
else:
s = ' <constraint idx="%g" activity="%e" marginal="%e"/>\n' % (i,c.RHS-c.Slack, c.Pi)
f.write(s)
i=i+1
f.write(' </constraints>\n')
f.write(' </solution>\n')
def mycallback(model, where):
if solutionPool == "1":
if where == GRB.callback.MIPSOL:
f.write(' <solution>\n')
f.write(' <header')
s = ' idx="%g"' % int(model.cbGet(GRB.callback.MIPSOL_SOLCNT))
s = s + ' value="%g"' % model.cbGet(GRB.callback.MIPSOL_OBJ)
s = s + ' status="integer feasible solution"/>\n'
f.write(s)
f.write(' <variables>\n')
#print model.cbGetSolution(model.getVars())
vList = model.cbGetSolution(model.getVars())
i=0
for v in vList:
s = ' <variable idx="%g" activity="%e"/>\n' % (i,v)
f.write(s)
i=i+1
f.write(' </variables>\n')
f.write(' </solution>\n')
f = open(solFile, 'w')
f.write('<?xml version = "1.0" encoding="UTF-8" standalone="yes"?>\n')
f.write('<CmplGurobiSolutions>\n')
model.optimize(mycallback)
print 'Write solution'
nrOfSolutions = nrOfSolutions + 1
writeSol()
f.write('</CmplGurobiSolutions>')
f.close()
print '...done' | {
"content_hash": "4fbe319acdc8c9adb64b8c6334abb127",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 91,
"avg_line_length": 25.926666666666666,
"alnum_prop": 0.6052969915145282,
"repo_name": "Mangara/ArboralExplorer",
"id": "6d3d7ae8af90e397dc906def258af9314dc16be5",
"size": "3962",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/Cmpl/bin/gurobiCmpl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2773"
},
{
"name": "Java",
"bytes": "140503"
},
{
"name": "Python",
"bytes": "194573"
},
{
"name": "Shell",
"bytes": "6500"
}
],
"symlink_target": ""
} |
import logging
from odoo import api, fields, models, _
from odoo.exceptions import ValidationError
logger = logging.getLogger(__name__)
try:
from py3o.formats import Formats
except ImportError:
logger.debug('Cannot import py3o.formats')
class IrActionsReportXml(models.Model):
""" Inherit from ir.actions.report.xml to allow customizing the template
file. The user cam chose a template from a list.
The list is configurable in the configuration tab, see py3o_template.py
"""
_inherit = 'ir.actions.report.xml'
@api.one
@api.constrains("py3o_filetype", "report_type")
def _check_py3o_filetype(self):
if self.report_type == "py3o" and not self.py3o_filetype:
raise ValidationError(_(
"Field 'Output Format' is required for Py3O report"))
@api.one
@api.constrains("py3o_is_local_fusion", "py3o_server_id",
"py3o_filetype")
def _check_py3o_server_id(self):
if self.report_type != "py3o":
return
is_native = Formats().get_format(self.py3o_filetype).native
if ((not is_native or not self.py3o_is_local_fusion) and
not self.py3o_server_id):
raise ValidationError(_(
"Can not use not native format in local fusion. "
"Please specify a Fusion Server"))
@api.model
def _get_py3o_filetypes(self):
formats = Formats()
names = formats.get_known_format_names()
selections = []
for name in names:
description = name
if formats.get_format(name).native:
description = description + " " + _("(Native)")
selections.append((name, description))
return selections
py3o_filetype = fields.Selection(
selection="_get_py3o_filetypes",
string="Output Format")
py3o_template_id = fields.Many2one(
'py3o.template',
"Template")
py3o_is_local_fusion = fields.Boolean(
"Local Fusion",
help="Native formats will be processed without a server. "
"You must use this mode if you call methods on your model into "
"the template.",
default=True)
py3o_server_id = fields.Many2one(
"py3o.server",
"Fusion Server")
module = fields.Char(
"Module",
help="The implementer module that provides this report")
py3o_template_fallback = fields.Char(
"Fallback",
size=128,
help=(
"If the user does not provide a template this will be used "
"it should be a relative path to root of YOUR module "
"or an absolute path on your server."
))
report_type = fields.Selection(selection_add=[('py3o', "Py3o")])
@api.model
def render_report(self, res_ids, name, data):
action_py3o_report = self.search(
[("report_name", "=", name),
("report_type", "=", "py3o")])
if action_py3o_report:
return self.env['py3o.report'].create({
'ir_actions_report_xml_id': action_py3o_report.id
}).create_report(res_ids, data)
return super(IrActionsReportXml, self).render_report(
res_ids, name, data)
| {
"content_hash": "d7f208c6d035936d0cd1a22432c017ea",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 77,
"avg_line_length": 35.78021978021978,
"alnum_prop": 0.5985872235872236,
"repo_name": "vileopratama/vitech",
"id": "cfbfeb41fc8523d56b12c6dfb0211e798c3a341b",
"size": "3401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "new-addons/reporting-engine-10.0/report_py3o/models/ir_actions_report_xml.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "2125999"
},
{
"name": "HTML",
"bytes": "252393"
},
{
"name": "Java",
"bytes": "1840167"
},
{
"name": "JavaScript",
"bytes": "6176224"
},
{
"name": "Makefile",
"bytes": "19072"
},
{
"name": "Mako",
"bytes": "7659"
},
{
"name": "NSIS",
"bytes": "16782"
},
{
"name": "Python",
"bytes": "9438805"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "22312"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "11489"
}
],
"symlink_target": ""
} |
"""EmPOWER Runtime."""
import socket
import fcntl
import struct
import tornado.ioloop
from construct import Container
from construct import Struct
from construct import UBInt16
from construct import Bytes
from sqlalchemy.exc import IntegrityError
from empower.datatypes.etheraddress import EtherAddress
from empower.persistence import Session
from empower.persistence.persistence import TblTenant
from empower.persistence.persistence import TblAccount
from empower.persistence.persistence import TblBelongs
from empower.persistence.persistence import TblPendingTenant
from empower.core.account import Account
from empower.core.tenant import Tenant
from empower.core.acl import ACL
from empower.persistence.persistence import TblAllow
from empower.persistence.persistence import TblDeny
from empower.persistence.persistence import TblIMSI2MAC
import empower.logger
LOG = empower.logger.get_logger()
DEFAULT_PERIOD = 5000
CTRL_ADV = Struct("ctrl_adv", Bytes("dst", 6),
Bytes("src", 6),
UBInt16("eth_type"),
Bytes("ctrl", 4),
UBInt16("port"))
def generate_default_accounts():
"""Generate default accounts.
Three default accounts (one root account and two user accounts are created
the first time the controller is started.
"""
if not Session().query(TblAccount).all():
LOG.info("Generating default accounts")
session = Session()
session.add(TblAccount(username="root",
password="root",
role="admin",
name="Administrator",
surname="",
email="admin@empower.net"))
session.add(TblAccount(username="foo",
password="foo",
role="user",
name="Foo",
surname="",
email="foo@empower.net"))
session.add(TblAccount(username="bar",
password="bar",
role="user",
name="Bar",
surname="",
email="bar@empower.net"))
session.commit()
class EmpowerRuntime(object):
"""EmPOWER Runtime."""
def __init__(self, options):
self.components = {}
self.accounts = {}
self.tenants = {}
self.lvaps = {}
self.ues = {}
self.wtps = {}
self.cpps = {}
self.vbses = {}
self.feeds = {}
self.allowed = {}
self.denied = {}
self.imsi2mac = {}
LOG.info("Starting EmPOWER Runtime")
# generate default users if database is empty
generate_default_accounts()
# load defaults
LOG.info("Loading EmPOWER Runtime defaults")
self.__load_accounts()
self.__load_tenants()
self.__load_acl()
self.__load_imsi2mac()
if options.ctrl_adv:
self.__ifname = options.ctrl_adv_iface
self.__ctrl_ip = options.ctrl_ip
self.__ctrl_port = options.ctrl_port
self.__start_adv()
def __start_adv(self):
"""Star ctrl advertising."""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(sock.fileno(), 0x8927, struct.pack('256s',
self.__ifname[:15].encode('utf-8')))
src = EtherAddress(':'.join(['%02x' % char for char in info[18:24]]))
dst = EtherAddress("FF:FF:FF:FF:FF:FF")
adv = Container(dst=dst.to_raw(),
src=src.to_raw(),
eth_type=0xEEEE,
ctrl=self.__ctrl_ip.packed,
port=self.__ctrl_port)
self.__msg = CTRL_ADV.build(adv)
self.__auto_cfg = \
tornado.ioloop.PeriodicCallback(self.__auto_cfg_loop, 2000)
self.__auto_cfg.start()
def __auto_cfg_loop(self):
"""Send ctrl advertisement."""
sock = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)
sock.bind((self.__ifname, 0))
sock.send(self.__msg)
def __load_accounts(self):
"""Load accounts table."""
for account in Session().query(TblAccount).all():
self.accounts[account.username] = Account(account.username,
account.password,
account.name,
account.surname,
account.email,
account.role)
def __load_tenants(self):
"""Load Tenants."""
for tenant in Session().query(TblTenant).all():
if tenant.tenant_id in self.tenants:
raise KeyError(tenant.tenant_id)
self.tenants[tenant.tenant_id] = \
Tenant(tenant.tenant_id,
tenant.tenant_name,
tenant.owner,
tenant.desc,
tenant.bssid_type,
tenant.plmn_id)
def __load_imsi2mac(self):
"""Load IMSI to MAC mapped values."""
for entry in Session().query(TblIMSI2MAC).all():
self.imsi2mac[entry.imsi] = entry.addr
def add_imsi2mac(self, imsi, addr):
"""Add IMSI to MAC mapped value to table."""
imsi2mac = Session().query(TblIMSI2MAC) \
.filter(TblIMSI2MAC.imsi == imsi) \
.first()
if imsi2mac:
raise ValueError(imsi)
try:
session = Session()
session.add(TblIMSI2MAC(imsi=imsi, addr=addr))
session.commit()
except IntegrityError:
session.rollback()
raise ValueError("MAC address must be unique %s", addr)
self.imsi2mac[imsi] = addr
def remove_imsi2mac(self, imsi):
"""Remove IMSI to MAC mapped value from table."""
imsi2mac = Session().query(TblIMSI2MAC) \
.filter(TblIMSI2MAC.imsi == imsi) \
.first()
if not imsi2mac:
raise KeyError(imsi)
session = Session()
session.delete(imsi2mac)
session.commit()
del self.imsi2mac[imsi]
def __load_acl(self):
""" Load ACL list. """
for allow in Session().query(TblAllow).all():
if allow.addr in self.allowed:
raise ValueError(allow.addr_str)
acl = ACL(allow.addr, allow.label)
self.allowed[allow.addr] = acl
for deny in Session().query(TblDeny).all():
if deny.addr in self.denied:
raise ValueError(deny.addr_str)
acl = ACL(deny.addr, deny.label)
self.denied[deny.addr] = acl
def add_allowed(self, sta_addr, label):
""" Add entry to ACL. """
allow = Session().query(TblAllow) \
.filter(TblAllow.addr == sta_addr) \
.first()
if allow:
raise ValueError(sta_addr)
session = Session()
session.add(TblAllow(addr=sta_addr, label=label))
session.commit()
acl = ACL(sta_addr, label)
self.allowed[sta_addr] = acl
return acl
def remove_allowed(self, sta_addr):
""" Remove entry from ACL. """
allow = Session().query(TblAllow) \
.filter(TblAllow.addr == sta_addr) \
.first()
if not allow:
raise KeyError(sta_addr)
session = Session()
session.delete(allow)
session.commit()
del self.allowed[sta_addr]
def add_denied(self, sta_addr, label):
""" Add entry to ACL. """
deny = Session().query(TblDeny) \
.filter(TblDeny.addr == sta_addr) \
.first()
if deny:
raise ValueError(sta_addr)
session = Session()
session.add(TblDeny(addr=sta_addr, label=label))
session.commit()
acl = ACL(sta_addr, label)
self.denied[sta_addr] = acl
return acl
def remove_denied(self, sta_addr):
""" Remove entry from ACL. """
deny = Session().query(TblDeny) \
.filter(TblDeny.addr == sta_addr) \
.first()
if not deny:
raise KeyError(sta_addr)
session = Session()
session.delete(deny)
session.commit()
del self.denied[sta_addr]
def is_allowed(self, src):
""" Check if station is allowed. """
return (self.allowed and src in self.allowed) or not self.allowed
def is_denied(self, src):
""" Check if station is denied. """
return self.denied and src in self.denied
def create_account(self, username, password, role, name, surname, email):
"""Create a new account."""
if username in self.accounts:
LOG.error("'%s' already registered", username)
raise ValueError("%s already registered" % username)
session = Session()
account = TblAccount(username=username,
password=password,
role=role,
name=name,
surname=surname,
email=email)
session.add(account)
session.commit()
self.accounts[account.username] = Account(account.username,
account.password,
account.name,
account.surname,
account.email,
account.role)
def remove_account(self, username):
"""Remove an account."""
if username == 'root':
raise ValueError("Cannot removed root account")
account = Session().query(TblAccount) \
.filter(TblAccount.username == str(username)) \
.first()
if not account:
raise KeyError(username)
session = Session()
session.delete(account)
session.commit()
del self.accounts[username]
to_be_deleted = [x.tenant_id for x in self.tenants.values()
if x.owner == username]
for tenant_id in to_be_deleted:
self.remove_tenant(tenant_id)
def update_account(self, username, request):
"""Update an account."""
account = self.accounts[username]
for param in request:
setattr(account, param, request[param])
def register_app(self, name, init_method, params):
"""Register new component."""
tenant_id = params['tenant_id']
if tenant_id not in self.tenants:
return
if name in self.tenants[tenant_id].components:
LOG.error("'%s' already registered", name)
raise ValueError("%s already registered" % name)
LOG.info("Registering '%s'", name)
self.tenants[tenant_id].components[name] = init_method(**params)
if hasattr(self.tenants[tenant_id].components[name], "start"):
self.tenants[tenant_id].components[name].start()
def register(self, name, init_method, params):
"""Register new component."""
if name in self.components:
LOG.error("'%s' already registered", name)
raise ValueError("%s already registered" % name)
LOG.info("Registering '%s'", name)
self.components[name] = init_method(**params)
if hasattr(self.components[name], "start"):
self.components[name].start()
def unregister_app(self, tenant_id, app_id):
"""Unregister component."""
LOG.info("Unregistering: %s (%s)", app_id, tenant_id)
tenant = self.tenants[tenant_id]
app = tenant.components[app_id]
from empower.core.app import EmpowerApp
if not issubclass(type(app), EmpowerApp):
raise ValueError("Module %s cannot be removed", app_id)
app.stop()
del tenant.components[app_id]
def unregister(self, name):
"""Unregister component."""
LOG.info("Unregistering '%s'", name)
worker = self.components[name]
from empower.core.module import ModuleWorker
if not issubclass(type(worker), ModuleWorker):
raise ValueError("Module %s cannot be removed", name)
to_be_removed = []
for module in self.components[name].modules.values():
to_be_removed.append(module.module_id)
for remove in to_be_removed:
self.components[name].remove_module(remove)
self.components[name].remove_handlers()
del self.components[name]
def get_account(self, username):
"""Load user credential from the username."""
if username not in self.accounts:
return None
return self.accounts[username]
def check_permission(self, username, password):
"""Check if username/password match."""
if username not in self.accounts:
return False
if self.accounts[username].password != password:
return False
return True
def add_tenant(self, owner, desc, tenant_name, bssid_type,
tenant_id=None, plmn_id=None):
"""Create new Tenant."""
if tenant_id in self.tenants:
raise ValueError("Tenant %s exists", tenant_id)
try:
session = Session()
if tenant_id:
request = TblTenant(tenant_id=tenant_id,
tenant_name=tenant_name,
owner=owner,
desc=desc,
bssid_type=bssid_type,
plmn_id=plmn_id)
else:
request = TblTenant(owner=owner,
tenant_name=tenant_name,
desc=desc,
bssid_type=bssid_type,
plmn_id=plmn_id)
session.add(request)
session.commit()
except IntegrityError:
session.rollback()
raise ValueError("Tenant name %s exists", tenant_name)
self.tenants[request.tenant_id] = \
Tenant(request.tenant_id,
request.tenant_name,
self.accounts[owner].username,
desc,
request.bssid_type,
request.plmn_id)
return request.tenant_id
@classmethod
def load_pending_tenant(cls, tenant_id):
"""Load pending tenant request."""
return Session().query(TblPendingTenant) \
.filter(TblPendingTenant.tenant_id == tenant_id) \
.first()
@classmethod
def load_pending_tenants(cls, username=None):
"""Fetch pending tenants requests."""
if username:
return Session().query(TblPendingTenant) \
.filter(TblPendingTenant.owner == username) \
.all()
else:
return Session().query(TblPendingTenant).all()
def request_tenant(self, owner, desc, tenant_name, bssid_type,
tenant_id=None, plmn_id=None):
"""Request new Tenant."""
if tenant_id in self.tenants:
raise ValueError("Tenant %s exists", tenant_id)
if self.load_pending_tenant(tenant_id):
raise ValueError("Tenant %s exists", tenant_id)
try:
session = Session()
if tenant_id:
request = TblPendingTenant(tenant_id=tenant_id,
owner=owner,
tenant_name=tenant_name,
desc=desc,
bssid_type=bssid_type,
plmn_id=plmn_id)
else:
request = TblPendingTenant(owner=owner,
tenant_name=tenant_name,
desc=desc,
bssid_type=bssid_type,
plmn_id=plmn_id)
session.add(request)
session.commit()
except IntegrityError:
session.rollback()
raise ValueError("Tenant name %s exists", tenant_name)
return request.tenant_id
@classmethod
def reject_tenant(cls, tenant_id):
"""Reject previously requested Tenant."""
pending = Session().query(TblPendingTenant) \
.filter(TblPendingTenant.tenant_id == tenant_id) \
.first()
if not pending:
raise KeyError(tenant_id)
session = Session()
session.delete(pending)
session.commit()
def remove_tenant(self, tenant_id):
"""Delete existing Tenant."""
if tenant_id not in self.tenants:
raise KeyError(tenant_id)
tenant = self.tenants[tenant_id]
# remove pnfdev in this tenant
devs = Session().query(TblBelongs) \
.filter(TblBelongs.tenant_id == tenant_id)
for dev in devs:
session = Session()
session.delete(dev)
session.commit()
# remove tenant
del self.tenants[tenant_id]
tenant = Session().query(TblTenant) \
.filter(TblTenant.tenant_id == tenant_id) \
.first()
session = Session()
session.delete(tenant)
session.commit()
# remove running modules
for component in self.components.values():
if not hasattr(component, 'modules'):
continue
to_be_removed = []
for module in component.modules.values():
if module.tenant_id == tenant_id:
to_be_removed.append(module.module_id)
for module_id in to_be_removed:
component.remove_module(module_id)
def load_tenant(self, tenant_name):
"""Load tenant from network name."""
for tenant in self.tenants.values():
if tenant.tenant_name == tenant_name:
return tenant
return None
def load_tenant_by_plmn_id(self, plmn_id):
"""Load tenant from network name."""
for tenant in self.tenants.values():
if tenant.plmn_id == plmn_id:
return tenant
return None
def remove_lvap(self, lvap_addr):
"""Remove LVAP from the network"""
if lvap_addr not in self.lvaps:
return
lvap = self.lvaps[lvap_addr]
if lvap.tenant:
# removing LVAP from tenant, need first to look for right tenant
if lvap.addr in lvap.tenant.lvaps:
LOG.info("Removing %s from tenant %s", lvap.addr, lvap.ssid)
del lvap.tenant.lvaps[lvap.addr]
# Raise LVAP leave event
from empower.lvapp.lvappserver import LVAPPServer
lvapp_server = self.components[LVAPPServer.__module__]
lvapp_server.send_lvap_leave_message_to_self(lvap)
# Reset LVAP
LOG.info("Deleting LVAP (DL+UL): %s", lvap.addr)
lvap.clear_downlink()
lvap.clear_uplink()
del self.lvaps[lvap.addr]
def remove_lvap(self, lvap_addr):
"""Remove LVAP from the network"""
if lvap_addr not in self.lvaps:
return
lvap = self.lvaps[lvap_addr]
if lvap.tenant:
# removing LVAP from tenant, need first to look for right tenant
if lvap.addr in lvap.tenant.lvaps:
LOG.info("Removing %s from tenant %s", lvap.addr, lvap.ssid)
del lvap.tenant.lvaps[lvap.addr]
# Raise LVAP leave event
from empower.lvapp.lvappserver import LVAPPServer
lvapp_server = self.components[LVAPPServer.__module__]
lvapp_server.send_lvap_leave_message_to_self(lvap)
# Reset LVAP
LOG.info("Deleting LVAP (DL+UL): %s", lvap.addr)
lvap.clear_downlink()
lvap.clear_uplink()
del self.lvaps[lvap.addr]
def remove_ue(self, ue_addr):
"""Remove UE from the network"""
if ue_addr not in self.ues:
return
ue = self.ues[ue_addr]
# Raise UE leave event
from empower.vbsp.vbspserver import VBSPServer
vbsp_server = self.components[VBSPServer.__module__]
vbsp_server.send_ue_leave_message_to_self(ue)
# removing UE from tenant, need first to look for right tenant
if ue.addr in ue.tenant.ues:
LOG.info("Removing %s from tenant %u", ue.addr, ue.plmn_id)
del ue.tenant.ues[ue.addr]
del self.ues[ue.addr]
| {
"content_hash": "f79649a8cd5f3c0b28ec619c3dc6ec65",
"timestamp": "",
"source": "github",
"line_count": 700,
"max_line_length": 78,
"avg_line_length": 30.697142857142858,
"alnum_prop": 0.5217330603127327,
"repo_name": "Panagiotis-Kon/empower-runtime",
"id": "76158ce0eb820260fb4120d140b1ff321bfc2be3",
"size": "22097",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "empower/core/core.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "67434"
},
{
"name": "HTML",
"bytes": "32485"
},
{
"name": "JavaScript",
"bytes": "4530426"
},
{
"name": "Python",
"bytes": "567426"
}
],
"symlink_target": ""
} |
"""Utilities for downloading data from WMT, tokenizing, vocabularies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import codecs
import collections
import math
import os
import re
import numpy as np
import tensorflow as tf
# Special vocabulary symbols - we always put them at the start.
_PAD = '<PAD>'
_EOS = '<eos>'
_UNK = '<unk>'
PAD_ID = 0
EOS_ID = 1
UNK_ID = 2
START_VOCAB = [_PAD, _EOS, _UNK]
_DIGIT_RE = re.compile(r'\d')
def _read_words(filename, ptb=True):
""" helper to read the file and split into sentences. """
sentences = []
with codecs.open(filename, "rb", "utf-8") as f:
sentences = f.readlines()
sentences = [sent for sent in sentences if len(sent) > 0]
if ptb:
return [sentence.strip().split() + [_EOS]
for sentence in sentences]
else:
return [sentence.strip().split()
for sentence in sentences]
def read_vocabulary(data_filenames, vocab_size):
# normalize_digits=True):
""" Helper to build the vocabulary from the given filename. It makes use
of python collections.Counter to help counting the occurrences of each word.
"""
lines = []
for filename in data_filenames:
for line in codecs.open(filename, "r", "utf-8"):
lines.append(line)
words = []
for line in lines:
words += line.split()
counter = collections.Counter(words)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
ws, _ = list(zip(*count_pairs))
ws = [w for w in ws if w not in START_VOCAB]
words = START_VOCAB + [w for w in ws if w not in START_VOCAB]
words = words[:vocab_size]
word_dict = dict(zip(words, range(len(words))))
return word_dict
def sentence_to_token_ids(sentence,
vocabulary):
""" Convert a string to list of integers representing token-ids. """
return [vocabulary.get(w, UNK_ID) for w in sentence]
def _data_to_token_ids(data_path,
vocab,
print_info=True):
""" Tokenize data file and turn into token-ids using given the vocabulary. """
dataset = _read_words(data_path)
tokens = [sentence_to_token_ids(sentence, vocab) for sentence in dataset]
n_words = sum([len(tok) for tok in tokens])
if print_info:
n_samples = len(tokens)
print(" # of sentences : {0}".format(n_samples))
return tokens, n_words
def read_lm_data(data_files,
vocabulary,
print_info=True):
"""Read datasets from data_path, build the vocabulary based on the training
dataset and convert words in traning, validation and test sets into
python integers.
"""
train_path = data_files[0]
valid_path = data_files[1]
test_path = data_files[2]
print("\nReading training data from {0}".format(train_path))
train_data, _ = _data_to_token_ids(
train_path, vocabulary, print_info=print_info)
print("\nReading validation data from {0}".format(valid_path))
valid_data, _ = _data_to_token_ids(
valid_path, vocabulary, print_info=print_info)
print("\nReading test data from {0}".format(test_path))
test_data, _ = _data_to_token_ids(
test_path, vocabulary, print_info=print_info)
return train_data, valid_data, test_data
def lm_data_producer(raw_data,
batch_size,
num_steps,
name=None,
dtype=np.int32):
""" Iterate on the given raw data producing samples. """
# we pad or cut the sentences to be of length num_steps
raw_data = [sentence + [PAD_ID] * (num_steps + 1 - len(sentence))
if len(sentence) < num_steps + 1 else sentence[0:(num_steps + 1)]
for sentence in raw_data]
raw_data = np.array(raw_data, dtype=dtype)
data_len = len(raw_data)
epoch_size = int(math.ceil(data_len / batch_size))
data = np.zeros_like(raw_data, dtype=dtype)
for i in range(epoch_size):
dt = raw_data[batch_size * i:batch_size * (i + 1)]
data[batch_size * i:batch_size * (i + 1)] = dt
if len(dt) < batch_size:
shape = (batch_size - len(dt)), num_steps + 1
padding = np.ones(shape) * PAD_ID
data = np.concatenate([data, padding])
xtrain = data[:, 0:num_steps].astype(dtype)
ytrain = data[:, 1:num_steps + 1].astype(dtype)
return xtrain, ytrain
| {
"content_hash": "8ba7b01ef012e32f5095712f24f4df10",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 80,
"avg_line_length": 29.107382550335572,
"alnum_prop": 0.632234263315656,
"repo_name": "giancds/attentive_lm",
"id": "1b65f30a8390502c42b5125371a038a43e97b33f",
"size": "4361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data_reader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "62236"
},
{
"name": "Shell",
"bytes": "1187"
}
],
"symlink_target": ""
} |
import os
import json
import hashlib
from threading import Lock
from ethereum import _solidity
from ethereum.abi import event_id, normalize_name, ContractTranslator
from raiden.utils import get_contract_path
__all__ = (
'CONTRACT_MANAGER',
'CONTRACT_CHANNEL_MANAGER',
'CONTRACT_ENDPOINT_REGISTRY',
'CONTRACT_HUMAN_STANDARD_TOKEN',
'CONTRACT_NETTING_CHANNEL',
'CONTRACT_REGISTRY',
'EVENT_CHANNEL_NEW',
'EVENT_CHANNEL_NEW_BALANCE',
'EVENT_CHANNEL_CLOSED',
'EVENT_CHANNEL_SECRET_REVEALED',
'EVENT_CHANNEL_SETTLED',
'EVENT_TOKEN_ADDED',
)
CONTRACT_CHANNEL_MANAGER = 'channel_manager'
CONTRACT_ENDPOINT_REGISTRY = 'endpoint_registry'
CONTRACT_HUMAN_STANDARD_TOKEN = 'human_standard_token'
CONTRACT_NETTING_CHANNEL = 'netting_channel'
CONTRACT_REGISTRY = 'registry'
EVENT_CHANNEL_NEW = 'ChannelNew'
EVENT_CHANNEL_NEW_BALANCE = 'ChannelNewBalance'
EVENT_CHANNEL_CLOSED = 'ChannelClosed'
EVENT_CHANNEL_SECRET_REVEALED = 'ChannelSecretRevealed'
EVENT_CHANNEL_SETTLED = 'ChannelSettled'
EVENT_TOKEN_ADDED = 'TokenAdded'
def get_event(full_abi, event_name):
for description in full_abi:
name = description.get('name')
# skip constructors
if name is None:
continue
normalized_name = normalize_name(name)
if normalized_name == event_name:
return description
def get_eventname_types(event_description):
if 'name' not in event_description:
raise ValueError('Not an event description, missing the name.')
name = normalize_name(event_description['name'])
encode_types = [
element['type']
for element in event_description['inputs']
]
return name, encode_types
def get_static_or_compile(
contract_path,
contract_name,
**compiler_flags):
"""Search the path of `contract_path` for a file with the same name and the
extension `.static-abi.json`. If the file exists, and the recorded checksum
matches, this will return the precompiled contract, otherwise it will
compile it.
Writing compiled contracts to the desired file and path happens only when
the environment variable `STORE_PRECOMPILED` is set (to whatever value).
Users are not expected to ever set this value, the functionality is exposed
through the `setup.py compile_contracts` command.
Args:
contract_path (str): the path of the contract file
contract_name (str): the contract name
**compiler_flags (dict): flags that will be passed to the compiler
"""
# this will be set by `setup.py compile_contracts`
store_updated = os.environ.get('STORE_PRECOMPILED', False)
precompiled = None
precompiled_path = '{}.static-abi.json'.format(contract_path)
try:
with open(precompiled_path) as f:
precompiled = json.load(f)
except IOError:
pass
if precompiled or store_updated:
checksum = contract_checksum(contract_path)
if precompiled and precompiled['checksum'] == checksum:
return precompiled
if _solidity.get_solidity() is None:
raise RuntimeError("The solidity compiler, `solc`, is not available.")
compiled = _solidity.compile_contract(
contract_path,
contract_name,
combined='abi',
optimize=False
)
if store_updated:
compiled['checksum'] = checksum
with open(precompiled_path, 'w') as f:
json.dump(compiled, f)
print("'{}' written".format(precompiled_path))
return compiled
def contract_checksum(contract_path):
with open(contract_path) as f:
checksum = hashlib.sha1(f.read()).hexdigest()
return checksum
class ContractManager():
def __init__(self):
self.is_instantiated = False
self.lock = Lock()
self.event_to_contract = dict(
ChannelNew=CONTRACT_CHANNEL_MANAGER,
ChannelNewBalance=CONTRACT_NETTING_CHANNEL,
ChannelClosed=CONTRACT_NETTING_CHANNEL,
ChannelSecretRevealed=CONTRACT_NETTING_CHANNEL,
ChannelSettled=CONTRACT_NETTING_CHANNEL,
TokenAdded=CONTRACT_REGISTRY,
)
def instantiate(self):
with self.lock:
if self.is_instantiated:
return
self.human_standard_token_compiled = get_static_or_compile(
get_contract_path('HumanStandardToken.sol'),
'HumanStandardToken',
combined='abi',
)
self.channel_manager_compiled = get_static_or_compile(
get_contract_path('ChannelManagerContract.sol'),
'ChannelManagerContract',
combined='abi',
)
self.endpoint_registry_compiled = get_static_or_compile(
get_contract_path('EndpointRegistry.sol'),
'EndpointRegistry',
combined='abi',
)
self.netting_channel_compiled = get_static_or_compile(
get_contract_path('NettingChannelContract.sol'),
'NettingChannelContract',
combined='abi',
)
self.registry_compiled = get_static_or_compile(
get_contract_path('Registry.sol'),
'Registry',
combined='abi',
)
self.is_instantiated = True
def get_abi(self, contract_name):
self.instantiate()
compiled = getattr(self, '{}_compiled'.format(contract_name))
return compiled['abi']
def get_event_id(self, event_name):
""" Not really generic, as it maps event names to events of specific contracts,
but it is good enough for what we want to accomplish.
"""
event = get_event(self.get_abi(self.event_to_contract[event_name]), event_name)
return event_id(*get_eventname_types(event))
def get_translator(self, contract_name):
return ContractTranslator(self.get_abi(contract_name))
CONTRACT_MANAGER = ContractManager()
| {
"content_hash": "b8df698bd36086c802d62f7a14edc8f1",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 87,
"avg_line_length": 31.659685863874344,
"alnum_prop": 0.6389945427484703,
"repo_name": "tomashaber/raiden",
"id": "6697032082909b27f5238c4171e322b176f2dab8",
"size": "6071",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "raiden/blockchain/abi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4536"
},
{
"name": "HTML",
"bytes": "21998"
},
{
"name": "JavaScript",
"bytes": "1996"
},
{
"name": "Makefile",
"bytes": "5184"
},
{
"name": "Python",
"bytes": "1222610"
},
{
"name": "Shell",
"bytes": "4570"
},
{
"name": "TypeScript",
"bytes": "75150"
}
],
"symlink_target": ""
} |
from miniworld.model.network.backends.bridged.iproute2 import IPRoute2Commands
from miniworld.model.singletons.Singletons import singletons
from miniworld.model.network.backends.bridged.Connection import Connection
def ConnectionIproute2():
class ConnectionIproute2(Connection()):
def _tap_link_up(self, tap_x, tap_y, up=True):
cmd = IPRoute2Commands.get_interface_up_cmd(tap_x, state_down=not up)
self.add_shell_command(self.EVENT_CONN_STATE_CHANGE, cmd)
# remember that the device is up (or down)
singletons.network_backend.connection_book_keeper.interface_states.toggle_state(tap_x, up)
return ConnectionIproute2
| {
"content_hash": "25d6f0bb59917e3a5fc3ee60277ec428",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 102,
"avg_line_length": 42.8125,
"alnum_prop": 0.7372262773722628,
"repo_name": "miniworld-project/miniworld_core",
"id": "ebf0b23a9b78b72f5bb01b1a2460b54603af8e42",
"size": "685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "miniworld/model/network/backends/bridged/iproute2/Connection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "696934"
},
{
"name": "Shell",
"bytes": "1770"
}
],
"symlink_target": ""
} |
"""
Support gahtering system information of hosts which are running glances.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.glances/
"""
import logging
from datetime import timedelta
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST, CONF_PORT, STATE_UNKNOWN, CONF_NAME, CONF_RESOURCES)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
_RESOURCE = 'api/2/all'
DEFAULT_HOST = 'localhost'
DEFAULT_NAME = 'Glances'
DEFAULT_PORT = '61208'
SENSOR_TYPES = {
'disk_use_percent': ['Disk Use', '%'],
'disk_use': ['Disk Use', 'GiB'],
'disk_free': ['Disk Free', 'GiB'],
'memory_use_percent': ['RAM Use', '%'],
'memory_use': ['RAM Use', 'MiB'],
'memory_free': ['RAM Free', 'MiB'],
'swap_use_percent': ['Swap Use', '%'],
'swap_use': ['Swap Use', 'GiB'],
'swap_free': ['Swap Free', 'GiB'],
'processor_load': ['CPU Load', None],
'process_running': ['Running', None],
'process_total': ['Total', None],
'process_thread': ['Thread', None],
'process_sleeping': ['Sleeping', None]
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_RESOURCES, default=['disk_use']):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
# Return cached results if last scan was less then this time ago.
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
# pylint: disable=unused-variable
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Glances sensor."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
url = 'http://{}:{}/{}'.format(host, port, _RESOURCE)
var_conf = config.get(CONF_RESOURCES)
try:
response = requests.get(url, timeout=10)
if not response.ok:
_LOGGER.error('Response status is "%s"', response.status_code)
return False
except requests.exceptions.ConnectionError:
_LOGGER.error("No route to resource/endpoint: %s", url)
return False
rest = GlancesData(url)
dev = []
for resource in var_conf:
dev.append(GlancesSensor(rest, name, resource))
add_devices(dev)
class GlancesSensor(Entity):
"""Implementation of a Glances sensor."""
def __init__(self, rest, name, sensor_type):
"""Initialize the sensor."""
self.rest = rest
self._name = name
self.type = sensor_type
self._state = STATE_UNKNOWN
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self.update()
@property
def name(self):
"""The name of the sensor."""
if self._name is None:
return SENSOR_TYPES[self.type][0]
else:
return '{} {}'.format(self._name, SENSOR_TYPES[self.type][0])
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
# pylint: disable=too-many-branches, too-many-return-statements
@property
def state(self):
"""Return the state of the resources."""
value = self.rest.data
if value is not None:
if self.type == 'disk_use_percent':
return value['fs'][0]['percent']
elif self.type == 'disk_use':
return round(value['fs'][0]['used'] / 1024**3, 1)
elif self.type == 'disk_free':
try:
return round(value['fs'][0]['free'] / 1024**3, 1)
except KeyError:
return round((value['fs'][0]['size'] -
value['fs'][0]['used']) / 1024**3, 1)
elif self.type == 'memory_use_percent':
return value['mem']['percent']
elif self.type == 'memory_use':
return round(value['mem']['used'] / 1024**2, 1)
elif self.type == 'memory_free':
return round(value['mem']['free'] / 1024**2, 1)
elif self.type == 'swap_use_percent':
return value['memswap']['percent']
elif self.type == 'swap_use':
return round(value['memswap']['used'] / 1024**3, 1)
elif self.type == 'swap_free':
return round(value['memswap']['free'] / 1024**3, 1)
elif self.type == 'processor_load':
return value['load']['min15']
elif self.type == 'process_running':
return value['processcount']['running']
elif self.type == 'process_total':
return value['processcount']['total']
elif self.type == 'process_thread':
return value['processcount']['thread']
elif self.type == 'process_sleeping':
return value['processcount']['sleeping']
def update(self):
"""Get the latest data from REST API."""
self.rest.update()
# pylint: disable=too-few-public-methods
class GlancesData(object):
"""The class for handling the data retrieval."""
def __init__(self, resource):
"""Initialize the data object."""
self._resource = resource
self.data = dict()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from the Glances REST API."""
try:
response = requests.get(self._resource, timeout=10)
self.data = response.json()
except requests.exceptions.ConnectionError:
_LOGGER.error("No route to host/endpoint: %s", self._resource)
self.data = None
| {
"content_hash": "3aef54aab38782a213d3e04284eb347d",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 74,
"avg_line_length": 34.94736842105263,
"alnum_prop": 0.5900267737617135,
"repo_name": "leoc/home-assistant",
"id": "51a8ac4d46fa996b16c27cec08f6a1f1a76fb48a",
"size": "5976",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sensor/glances.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1366220"
},
{
"name": "Python",
"bytes": "3636900"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "7255"
}
],
"symlink_target": ""
} |
from .brane import Brane
from .tracks import Target
| {
"content_hash": "3a16e9505f3e39bf083a6e49845348ab",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 26,
"avg_line_length": 26,
"alnum_prop": 0.8076923076923077,
"repo_name": "krosenfeld/scatterbrane",
"id": "d42db6d4750d8763f47d114261a210fa35938713",
"size": "52",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scatterbrane/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40405"
}
],
"symlink_target": ""
} |
import numpy as np
from sklearn.metrics import average_precision_score
from sklearn.metrics import confusion_matrix
from tensorflow.core.framework import summary_pb2
import matplotlib
import matplotlib.pyplot as plt
import itertools
def add_summary(tf_writer, tag, raw_value, global_step):
value = summary_pb2.Summary.Value(tag=tag, simple_value=raw_value)
summary = summary_pb2.Summary(value=[value])
tf_writer.add_summary(summary, global_step)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, init_lr):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = init_lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def mean_average_precision(outputs, targets, num_classes=101):
maps = np.zeros((num_classes, ))
for c in range(num_classes):
target = (targets == c)
output = outputs[:, c]
maps[c] = average_precision_score(target, output)
#preds = np.argmax(outputs, axis=1)
#conf_matrix = confusion_matrix(preds, targets)
return np.mean(maps)*100.0
def conf_matrix(outputs, targets, num_classes=101):
preds = np.argmax(outputs, axis=1)
conf_matrix = confusion_matrix(targets, preds)
return conf_matrix
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues,
image_name='confusion_matrix.png'):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90, fontproperties=matplotlib.font_manager.FontProperties(size=8))
plt.yticks(tick_marks, classes, fontproperties=matplotlib.font_manager.FontProperties(size=8))
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
#for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
# plt.text(j, i, cm[i, j],
# horizontalalignment="center",
# color="white" if cm[i, j] > thresh else "black",
# fontproperties=matplotlib.font_manager.FontProperties(size='x-small') )
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig(image_name) | {
"content_hash": "af2d0d23f82698b04bd450eb1515b498",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 111,
"avg_line_length": 32.970873786407765,
"alnum_prop": 0.6484098939929329,
"repo_name": "UCSB-VRL/action_recognition",
"id": "eb0c4048c0d7521a931e5f0db83a81fb7d624a3d",
"size": "3396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "2264"
},
{
"name": "Python",
"bytes": "33764"
}
],
"symlink_target": ""
} |
import click
from functional import seq
from collections import namedtuple
Edge = namedtuple('Edge', 'u v cap')
@click.command()
@click.argument('source_file')
@click.argument('destination_file')
def cli(source_file, destination_file):
with open(source_file, 'r') as f:
meta = f.readline().split()
n_vertexes = meta[2]
source = int(f.readline().split()[1])
sink = int(f.readline().split()[1])
edges = seq(f.readlines())\
.map(str.split)\
.filter(lambda e: len(e) == 4)\
.map(lambda e: Edge(int(e[1]), int(e[2]), int(e[3])))\
.filter(lambda e: e.cap != 0).cache()
vertex_map = edges.flat_map(lambda e: (e.u, e.v)).distinct().zip_with_index().to_dict()
source = vertex_map[source]
sink = vertex_map[sink]
new_edges = edges.map(
lambda e: "a {0} {1} {2}".format(vertex_map[e.u], vertex_map[e.v], e.cap))
with open(destination_file, 'w') as o:
print('p max {0} {1}'.format(n_vertexes, new_edges.len()), file=o)
print('n {0} s'.format(source), file=o)
print('n {0} t'.format(sink), file=o)
for e in new_edges:
print(e, file=o)
if __name__ == '__main__':
cli()
| {
"content_hash": "22e90b372180ea0067f236afeab91128",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 95,
"avg_line_length": 34.666666666666664,
"alnum_prop": 0.5536858974358975,
"repo_name": "EntilZha/max_flow",
"id": "866c698e926b48599fdc7ca900c97c8d9ac97c06",
"size": "1248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "relabel_dimacs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6098"
},
{
"name": "Rust",
"bytes": "22972"
},
{
"name": "Shell",
"bytes": "426"
},
{
"name": "TeX",
"bytes": "69223"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(
name='pg-table-markdown',
version='1.0.4',
author='Vokal',
author_email='pypi@vokal.io',
description='A command line tool that generates markdown documentation for Postgres tables in a given schema',
url='https://github.com/vokal/pg-table-markdown',
packages=['pg_table_markdown'],
py_modules=['pg_table_markdown'],
install_requires=[
'Click==6.2',
'psycopg2==2.6.1',
],
entry_points='''
[console_scripts]
pgtablemd=pg_table_markdown:cli
''',
)
| {
"content_hash": "7ece47019db7ddb9594179e61fad4fa5",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 114,
"avg_line_length": 26.80952380952381,
"alnum_prop": 0.6269982238010657,
"repo_name": "projectweekend/pg-table-markdown",
"id": "58ce9ffed2215cbffe2760e93d2dc67c64ed6ed9",
"size": "563",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8466"
}
],
"symlink_target": ""
} |
from allauth.account.models import EmailAddress
from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import (ProviderAccount,
AuthAction)
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
from allauth.socialaccount.app_settings import QUERY_EMAIL
class Scope(object):
EMAIL = 'email'
PROFILE = 'profile'
class GoogleAccount(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get('link')
def get_avatar_url(self):
return self.account.extra_data.get('picture')
def to_str(self):
dflt = super(GoogleAccount, self).to_str()
return self.account.extra_data.get('name', dflt)
class GoogleProvider(OAuth2Provider):
id = 'google'
name = 'Google'
account_class = GoogleAccount
def get_default_scope(self):
scope = [Scope.PROFILE]
if QUERY_EMAIL:
scope.append(Scope.EMAIL)
return scope
def get_auth_params(self, request, action):
ret = super(GoogleProvider, self).get_auth_params(request,
action)
if action == AuthAction.REAUTHENTICATE:
ret['prompt'] = 'select_account'
return ret
def extract_uid(self, data):
return str(data['id'])
def extract_common_fields(self, data):
return dict(email=data.get('email'),
last_name=data.get('family_name'),
first_name=data.get('given_name'))
def extract_email_addresses(self, data):
ret = []
email = data.get('email')
if email and data.get('verified_email'):
ret.append(EmailAddress(email=email,
verified=True,
primary=True))
return ret
providers.registry.register(GoogleProvider)
| {
"content_hash": "9fd0b1ff0e3ad370decb2cb212069d65",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 74,
"avg_line_length": 30.887096774193548,
"alnum_prop": 0.6067885117493472,
"repo_name": "nimbis/django-allauth",
"id": "0e66930adcb25a5aa2cb98047548ed37ea9448f9",
"size": "1915",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "allauth/socialaccount/providers/google/provider.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "42101"
},
{
"name": "JavaScript",
"bytes": "3967"
},
{
"name": "Makefile",
"bytes": "298"
},
{
"name": "Python",
"bytes": "581551"
}
],
"symlink_target": ""
} |
'Parse main CHANGELOG.md from stdin outputing on stdout the ubuntu changelog'
import sys,re, datetime
on_block=False
for line in sys.stdin.readlines():
line = line.strip()
if line.startswith('# ') or len(line) == 0:
continue
if line.startswith('## '):
if on_block:
print '\n -- dotCloud <ops@dotcloud.com> {0}\n'.format(date)
version, date = line[3:].split()
date = datetime.datetime.strptime(date, '(%Y-%m-%d)').strftime(
'%a, %d %b %Y 00:00:00 -0700')
on_block = True
print 'lxc-docker ({0}-1) precise; urgency=low'.format(version)
continue
if on_block:
print ' ' + line
print '\n -- dotCloud <ops@dotcloud.com> {0}'.format(date)
| {
"content_hash": "da940172459d733c1d0e3f0fb9b5e0a1",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 77,
"avg_line_length": 35.285714285714285,
"alnum_prop": 0.5843454790823212,
"repo_name": "DuCalixte/docker",
"id": "d19a3424e1bb2cfab3bf7e69581e5eb438d95407",
"size": "764",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "packaging/ubuntu/parse_changelog.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from camera import StereoCamera
if __name__ == "__main__":
print "testing stereo camera..."
cam = StereoCamera("/dev/video0", "/dev/video1", (320,240))
print "click.."
cam.capture("/home/pi/fishpi/imgs")
print "click.."
cam.capture("/home/pi/fishpi/imgs")
print "click.."
cam.capture("/home/pi/fishpi/imgs")
cam = "done"
print "and in YUV..."
cam = StereoCamera("/dev/video0", "/dev/video1", (320,240), "YUV")
print "click.."
cam.capture("/home/pi/fishpi/imgs")
cam = "done"
print "and in HSV..."
cam = StereoCamera("/dev/video0", "/dev/video1", (320,240), "HSV")
print "click.."
cam.capture("/home/pi/fishpi/imgs")
cam = "done"
| {
"content_hash": "8dd82b2e7963b6500645e9b7e97b8d22",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 70,
"avg_line_length": 29.666666666666668,
"alnum_prop": 0.5786516853932584,
"repo_name": "FishPi/FishPi-POCV---Command---Control",
"id": "a13130632fac3d875bdb7117808f1070f4a24afa",
"size": "819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fishpi/sensor/test_stereo_camera.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "420442"
},
{
"name": "Shell",
"bytes": "3066"
}
],
"symlink_target": ""
} |
from setuptools import setup
import sys
setup(
name='iron-worker',
py_modules=["iron_worker"],
packages=["testDir"],
version='1.3.8',
install_requires=["iron_core >= 1.1.0", "python-dateutil"],
description='The Python client for IronWorker, a cloud service for background processing.',
author='Iron.io',
author_email="support@iron.io",
url='https://www.github.com/iron-io/iron_worker_python',
keywords=['iron', 'ironio', 'iron.io', 'iron-io', 'ironworker', 'iron-worker', 'iron_worker', 'worker', 'cloud',
'task queue', 'background processing'],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Development Status :: 2 - Pre-Alpha",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Topic :: Internet",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development :: Libraries :: Python Modules",
],
long_description="""IronWorker Python Library
-------------------------
This package offers a client interface to the Iron.io IronWorker service. It
offers a full, native interface to the IronWorker API, including creating
and uploading code package, queuing and scheduling tasks, viewing task logs,
and more.
IronWorker is a background processing and task queuing system that lets your
applications use the cloud to do their heavy lifting. Find out more at
http://www.iron.io/products/worker.""",
)
| {
"content_hash": "3f7478ce00d4b8b281bd0d77ae42f1e9",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 116,
"avg_line_length": 39.85,
"alnum_prop": 0.6537013801756587,
"repo_name": "iron-io/iron_worker_python",
"id": "88a986e10dcddee56ec5e8a64cf8e3aeecf7fb25",
"size": "1594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "31501"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import json
import os
import random
import string
from contextlib import contextmanager
import pytest
from airflow.exceptions import AirflowException
from airflow.models import Connection
from airflow.providers.microsoft.azure.hooks.fileshare import AzureFileShareHook
from airflow.utils.process_utils import patch_environ
from tests.test_utils import AIRFLOW_MAIN_FOLDER
from tests.test_utils.system_tests_class import SystemTest
AZURE_DAG_FOLDER = os.path.join(
AIRFLOW_MAIN_FOLDER, "airflow", "providers", "microsoft", "azure", "example_dags"
)
WASB_CONNECTION_ID = os.environ.get("WASB_CONNECTION_ID", "wasb_default")
DATA_LAKE_CONNECTION_ID = os.environ.get("AZURE_DATA_LAKE_CONNECTION_ID", 'azure_data_lake_default')
DATA_LAKE_CONNECTION_TYPE = os.environ.get("AZURE_DATA_LAKE_CONNECTION_TYPE", 'azure_data_lake')
@contextmanager
def provide_wasb_default_connection(key_file_path: str):
"""
Context manager to provide a temporary value for wasb_default connection
:param key_file_path: Path to file with wasb_default credentials .json file.
"""
if not key_file_path.endswith(".json"):
raise AirflowException("Use a JSON key file.")
with open(key_file_path) as credentials:
creds = json.load(credentials)
conn = Connection(
conn_id=WASB_CONNECTION_ID,
conn_type="wasb",
host=creds.get("host", None),
login=creds.get("login", None),
password=creds.get("password", None),
extra=json.dumps(creds.get('extra', None)),
)
with patch_environ({f"AIRFLOW_CONN_{conn.conn_id.upper()}": conn.get_uri()}):
yield
@contextmanager
def provide_azure_data_lake_default_connection(key_file_path: str):
"""
Context manager to provide a temporary value for azure_data_lake_default connection
:param key_file_path: Path to file with azure_data_lake_default credentials .json file.
"""
required_fields = {'login', 'password', 'extra'}
if not key_file_path.endswith(".json"):
raise AirflowException("Use a JSON key file.")
with open(key_file_path) as credentials:
creds = json.load(credentials)
missing_keys = required_fields - creds.keys()
if missing_keys:
message = f"{missing_keys} fields are missing"
raise AirflowException(message)
conn = Connection(
conn_id=DATA_LAKE_CONNECTION_ID,
conn_type=DATA_LAKE_CONNECTION_TYPE,
host=creds.get("host", None),
login=creds.get("login", None),
password=creds.get("password", None),
extra=json.dumps(creds.get('extra', None)),
)
with patch_environ({f"AIRFLOW_CONN_{conn.conn_id.upper()}": conn.get_uri()}):
yield
@contextmanager
def provide_azure_fileshare(share_name: str, azure_fileshare_conn_id: str, file_name: str, directory: str):
AzureSystemTest.prepare_share(
share_name=share_name,
azure_fileshare_conn_id=azure_fileshare_conn_id,
file_name=file_name,
directory=directory,
)
yield
AzureSystemTest.delete_share(share_name=share_name, azure_fileshare_conn_id=azure_fileshare_conn_id)
@pytest.mark.system("azure")
class AzureSystemTest(SystemTest):
@classmethod
def create_share(cls, share_name: str, azure_fileshare_conn_id: str):
hook = AzureFileShareHook(azure_fileshare_conn_id=azure_fileshare_conn_id)
hook.create_share(share_name)
@classmethod
def delete_share(cls, share_name: str, azure_fileshare_conn_id: str):
hook = AzureFileShareHook(azure_fileshare_conn_id=azure_fileshare_conn_id)
hook.delete_share(share_name)
@classmethod
def create_directory(cls, share_name: str, azure_fileshare_conn_id: str, directory: str):
hook = AzureFileShareHook(azure_fileshare_conn_id=azure_fileshare_conn_id)
hook.create_directory(share_name=share_name, directory_name=directory)
@classmethod
def upload_file_from_string(
cls,
string_data: str,
share_name: str,
azure_fileshare_conn_id: str,
file_name: str,
directory: str,
):
hook = AzureFileShareHook(azure_fileshare_conn_id=azure_fileshare_conn_id)
hook.load_string(
string_data=string_data,
share_name=share_name,
directory_name=directory,
file_name=file_name,
)
@classmethod
def prepare_share(cls, share_name: str, azure_fileshare_conn_id: str, file_name: str, directory: str):
"""
Create share with a file in given directory. If directory is None, file is in root dir.
"""
cls.create_share(share_name=share_name, azure_fileshare_conn_id=azure_fileshare_conn_id)
cls.create_directory(
share_name=share_name, azure_fileshare_conn_id=azure_fileshare_conn_id, directory=directory
)
string_data = "".join(random.choice(string.ascii_letters) for _ in range(1024))
cls.upload_file_from_string(
string_data=string_data,
share_name=share_name,
azure_fileshare_conn_id=azure_fileshare_conn_id,
file_name=file_name,
directory=directory,
)
| {
"content_hash": "8e2df2e3a7d0fc7c6654d25e032c5c7a",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 107,
"avg_line_length": 37.25,
"alnum_prop": 0.67535953978907,
"repo_name": "cfei18/incubator-airflow",
"id": "752115ce5f2c540e9e8df48f2ff68f3067dc41bf",
"size": "6000",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_utils/azure_system_helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "72003"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173434"
},
{
"name": "JavaScript",
"bytes": "143068"
},
{
"name": "Jinja",
"bytes": "38808"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "22660683"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "312715"
},
{
"name": "TypeScript",
"bytes": "472379"
}
],
"symlink_target": ""
} |
import sh
from sh_verbose import ShVerbose
def get_git(path=None):
return sh.git.bake(_tty_out=False, _cwd=path)
def get_grep():
return sh.grep.bake(_tty_out=False)
class OriginalBranch(object):
def __init__(self, git=None):
self.git = git or get_git()
self.original_branch = None
def __enter__(self):
self.original_branch = git_current_branch(self.git)
return self.original_branch
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.git.checkout(self.original_branch)
except Exception as err:
print "cannot checkout '{}': {}".format(self.original_branch, err)
def git_current_branch(git=None):
git = git or get_git()
grep = get_grep()
branch = grep(git.branch(), '^* ').strip()[2:]
if branch.startswith('('):
branch = git.log('--pretty=oneline', n=1).strip().split(' ')[0]
return branch
def git_submodules(git=None):
git = git or get_git()
submodules = []
for line in git.submodule().split('\n')[:-1]:
path = line[1:].split()[1]
submodules.append(path)
return submodules
def git_check_merge(branch1, branch2, git=None):
"""
returns True if branch1 would auto-merge cleanly into branch2,
False if the merge requires human assistance
Thanks to http://stackoverflow.com/a/501461/240553
"""
git = git or get_git()
with ShVerbose(False):
orig_branch = git_current_branch(git)
git.checkout(branch2)
is_behind = git.log('{0}..{1}'.format(branch2, branch1),
max_count=1).strip()
if is_behind:
try:
git.merge('--no-commit', '--no-ff', branch1).strip()
except sh.ErrorReturnCode_1:
# git merge returns 1 when there's a conflict
return False
else:
return True
finally:
git.merge('--abort')
git.checkout(orig_branch)
else:
return True
def git_bisect_merge_conflict(branch1, branch2, git=None):
"""
return the branch2 commit that prevents branch1 from being merged in
"""
git = git or get_git()
grep = get_grep()
with OriginalBranch(git):
try:
base = git('merge-base', branch1, branch2).strip()
if git_check_merge(branch1, branch2, git):
return None
assert git_check_merge(branch1, base, git)
git.bisect('reset')
txt = git.bisect('start', branch2, base, '--')
while 'is the first bad commit' not in txt:
commit = git_current_branch(git)
if git_check_merge(branch1, commit, git):
txt = git.bisect('good')
else:
txt = git.bisect('bad')
return grep(txt, '^commit ').strip().split(' ')[-1]
finally:
git.bisect('reset')
def _left_pad(padding, text):
return padding + ('\n' + padding).join(text.split('\n'))
def print_one_way_merge_details(branch1, branch2, git):
commit = git_bisect_merge_conflict(branch1, branch2, git)
if commit:
print ' * First conflicting commit on {0}:\n'.format(branch2)
print _left_pad(' ' * 4, git.log('-n1', commit))
else:
print ' * No conflicting commits on {0}'.format(branch2)
def print_merge_details(branch1, branch2, git):
print_one_way_merge_details(branch1, branch2, git)
print_one_way_merge_details(branch2, branch1, git)
if __name__ == '__main__':
import sys
args = sys.argv[1:]
options = ['show-conflict']
try:
option = args.pop(0)
except IndexError:
option = None
if option == 'show-conflict':
if len(args) == 2:
print_merge_details(*args, git=get_git())
else:
print ('usage: python scripts/gitutils.py '
'show-conflict <branch1> <branch2>')
else:
print 'usage: python scripts/gitutils.py <command> [args...]\n'
print 'Available commands:'
print _left_pad(' ', '\n'.join(options))
| {
"content_hash": "d210190ac1b3f9403501f31ad52d4b0a",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 78,
"avg_line_length": 30.335766423357665,
"alnum_prop": 0.563041385948027,
"repo_name": "SEL-Columbia/commcare-hq",
"id": "af84108dbe127ee2c06b5665ca9e1c8ce7be3a19",
"size": "4156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/gitutils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "768322"
},
{
"name": "JavaScript",
"bytes": "2647080"
},
{
"name": "Python",
"bytes": "7806659"
},
{
"name": "Shell",
"bytes": "28569"
}
],
"symlink_target": ""
} |
import sys
sys.path.append('/notebooks')
import wave
import re
import struct
import glob
import params as par
from scipy import fromstring, int16
import numpy as np
import os.path
from keras.models import Sequential, load_model
from keras.layers import Dense, LSTM, Dropout
from keras.callbacks import Callback
from keras import backend as K
test_files = glob.glob(par.base_data + 'input/*.wav')
print(test_files)
#test_files = test_files[5]
def get_dataset(filename):
wavfile = filename
wr = wave.open(wavfile, "rb")
origin = wr.readframes(wr.getnframes())
data = origin[:par.fr * 4 * 60]
wr.close()
X = np.frombuffer(data, dtype="int16")/ (par.bit_depth * 1.0)
X = np.reshape(X, (-1, par.l1_input_length, par.l1_channel_size))
#print(X.shape)
#print(len(X))
return X
import l3_model as level3
for test_file in test_files:
data = get_dataset(test_file)
temp = level3.encoder([data])
res = level3.decoder([temp[0]])
row_data = np.array(res)
mdata = np.reshape(res, (-1)) * par.bit_depth
mdata = mdata.astype('int16')
savename = re.sub('.*\/', '', test_file)
savename = re.sub('.*\\\\', '', savename)
outf = par.base_data + 'output/' + savename
outd = struct.pack("h" * len(mdata), *mdata)
ww = wave.open(outf, 'w')
ww.setnchannels(2)
ww.setsampwidth(2)
ww.setframerate(par.fr)
ww.writeframes(outd)
ww.close()
| {
"content_hash": "434ccbb6554511d0a7b3938916adb1b5",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 69,
"avg_line_length": 27.80392156862745,
"alnum_prop": 0.6558533145275035,
"repo_name": "niisan-tokyo/music_generator",
"id": "09441da049385db82953d8f418787e4b4474a2bd",
"size": "1442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/conv1d/autoencode_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "69"
},
{
"name": "Python",
"bytes": "46114"
}
],
"symlink_target": ""
} |
"""Support for Matrix notifications."""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.notify import (ATTR_TARGET, PLATFORM_SCHEMA,
BaseNotificationService,
ATTR_MESSAGE)
_LOGGER = logging.getLogger(__name__)
CONF_DEFAULT_ROOM = 'default_room'
DOMAIN = 'matrix'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_DEFAULT_ROOM): cv.string,
})
def get_service(hass, config, discovery_info=None):
"""Get the Matrix notification service."""
return MatrixNotificationService(config.get(CONF_DEFAULT_ROOM))
class MatrixNotificationService(BaseNotificationService):
"""Send Notifications to a Matrix Room."""
def __init__(self, default_room):
"""Set up the notification service."""
self._default_room = default_room
def send_message(self, message="", **kwargs):
"""Send the message to the matrix server."""
target_rooms = kwargs.get(ATTR_TARGET) or [self._default_room]
service_data = {
ATTR_TARGET: target_rooms,
ATTR_MESSAGE: message
}
return self.hass.services.call(
DOMAIN, 'send_message', service_data=service_data)
| {
"content_hash": "c7c269eb9a638d25a750f716d6e1bd78",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 74,
"avg_line_length": 30.674418604651162,
"alnum_prop": 0.6413949962092494,
"repo_name": "auduny/home-assistant",
"id": "de2ac3bda2a0b07de0ba3f2dfdc06c63b148e992",
"size": "1319",
"binary": false,
"copies": "7",
"ref": "refs/heads/dev",
"path": "homeassistant/components/matrix/notify.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "407"
},
{
"name": "Python",
"bytes": "15129018"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
} |
"""Test mempool limiting together/eviction with the wallet."""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_greater_than, assert_raises_rpc_error, create_confirmed_utxos, create_lots_of_big_transactions, gen_return_txouts
class MempoolLimitTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[
"-acceptnonstdtxn=1",
"-maxmempool=5",
"-spendzeroconfchange=0",
]]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
txouts = gen_return_txouts()
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
self.log.info('Check that mempoolminfee is minrelytxfee')
assert_equal(self.nodes[0].getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
assert_equal(self.nodes[0].getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
txids = []
utxos = create_confirmed_utxos(self, relayfee, self.nodes[0], 91)
self.log.info('Create a mempool tx that will be evicted')
us0 = utxos.pop()
inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}]
outputs = {self.nodes[0].getnewaddress() : 0.0001}
tx = self.nodes[0].createrawtransaction(inputs, outputs)
self.nodes[0].settxfee(relayfee) # specifically fund this tx with low fee
txF = self.nodes[0].fundrawtransaction(tx)
self.nodes[0].settxfee(0) # return to automatic fee selection
txFS = self.nodes[0].signrawtransactionwithwallet(txF['hex'])
txid = self.nodes[0].sendrawtransaction(txFS['hex'])
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
base_fee = relayfee*100
for i in range (3):
txids.append([])
txids[i] = create_lots_of_big_transactions(self.nodes[0], txouts, utxos[30*i:30*i+30], 30, (i+1)*base_fee)
self.log.info('The tx should be evicted by now')
assert txid not in self.nodes[0].getrawmempool()
txdata = self.nodes[0].gettransaction(txid)
assert txdata['confirmations'] == 0 #confirmation should still be 0
self.log.info('Check that mempoolminfee is larger than minrelytxfee')
assert_equal(self.nodes[0].getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
assert_greater_than(self.nodes[0].getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
self.log.info('Create a mempool tx that will not pass mempoolminfee')
us0 = utxos.pop()
inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}]
outputs = {self.nodes[0].getnewaddress() : 0.0001}
tx = self.nodes[0].createrawtransaction(inputs, outputs)
# specifically fund this tx with a fee < mempoolminfee, >= than minrelaytxfee
txF = self.nodes[0].fundrawtransaction(tx, {'feeRate': relayfee})
txFS = self.nodes[0].signrawtransactionwithwallet(txF['hex'])
assert_raises_rpc_error(-26, "mempool min fee not met", self.nodes[0].sendrawtransaction, txFS['hex'])
if __name__ == '__main__':
MempoolLimitTest().main()
| {
"content_hash": "6f0a553ad3c241775d1897ead2ada6c3",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 166,
"avg_line_length": 46.94285714285714,
"alnum_prop": 0.6463785757760194,
"repo_name": "yenliangl/bitcoin",
"id": "1b1ac23024782c585c18215ef570f85fcdd931ba",
"size": "3500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/mempool_limit.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "694312"
},
{
"name": "C++",
"bytes": "6161382"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "198099"
},
{
"name": "Makefile",
"bytes": "118152"
},
{
"name": "Objective-C",
"bytes": "123749"
},
{
"name": "Objective-C++",
"bytes": "5382"
},
{
"name": "Python",
"bytes": "1537476"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "90713"
}
],
"symlink_target": ""
} |
"""
This subpackage contains developer-oriented utilities used by Astropy.
Public functions and classes in this subpackage are safe to be used by other
packages, but this subpackage is for utilities that are primarily of use for
developers or to implement python hacks.
This subpackage also includes the ``astropy.utils.compat`` package,
which houses utilities that provide compatibility and bugfixes across
all versions of Python that Astropy supports. However, the content of this
module is solely for internal use of ``astropy`` and subject to changes
without deprecations. Do not use it in external packages or code.
"""
from .codegen import * # noqa
from .decorators import * # noqa
from .introspection import * # noqa
from .misc import * # noqa
from .shapes import * # noqa
| {
"content_hash": "d6200975aaca49c6ad701794e6c6f538",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 76,
"avg_line_length": 39.45,
"alnum_prop": 0.7807351077313055,
"repo_name": "saimn/astropy",
"id": "04e269bc8c19ca85c6aac1bcc919ebb5c3395ff6",
"size": "853",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "astropy/utils/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11034753"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78631"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52457"
},
{
"name": "Python",
"bytes": "12214998"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
"""
A lightweight Python WMI module wrapper built on top of `pywin32` and `win32com` extensions.
**Specifications**
* Based on top of the `pywin32` and `win32com` third party extensions only
* Compatible with `Raw`* and `Formatted` Performance Data classes
* Dynamically resolve properties' counter types
* Hold the previous/current `Raw` samples to compute/format new values*
* Fast and lightweight
* Avoid queries overhead
* Cache connections and qualifiers
* Use `wbemFlagForwardOnly` flag to improve enumeration/memory performance
*\* `Raw` data formatting relies on the avaibility of the corresponding calculator.
Please refer to `checks.lib.wmi.counter_type` for more information*
Original discussion thread: https://github.com/DataDog/dd-agent/issues/1952
Credits to @TheCloudlessSky (https://github.com/TheCloudlessSky)
"""
# stdlib
from copy import deepcopy
from itertools import izip
import pywintypes
# 3p
import pythoncom
from win32com.client import Dispatch
# project
from checks.libs.wmi.counter_type import get_calculator, get_raw, UndefinedCalculator
from utils.timeout import timeout, TimeoutException
class CaseInsensitiveDict(dict):
def __setitem__(self, key, value):
super(CaseInsensitiveDict, self).__setitem__(key.lower(), value)
def __getitem__(self, key):
return super(CaseInsensitiveDict, self).__getitem__(key.lower())
def __contains__(self, key):
return super(CaseInsensitiveDict, self).__contains__(key.lower())
def get(self, key):
return super(CaseInsensitiveDict, self).get(key.lower())
class ProviderArchitectureMeta(type):
"""
Metaclass for ProviderArchitecture.
"""
def __contains__(cls, provider):
"""
Support `Enum` style `contains`.
"""
return provider in cls._AVAILABLE_PROVIDER_ARCHITECTURES
class ProviderArchitecture(object):
"""
Enumerate WMI Provider Architectures.
"""
__metaclass__ = ProviderArchitectureMeta
# Available Provider Architecture(s)
DEFAULT = 0
_32BIT = 32
_64BIT = 64
_AVAILABLE_PROVIDER_ARCHITECTURES = frozenset([DEFAULT, _32BIT, _64BIT])
class WMISampler(object):
"""
WMI Sampler.
"""
# Properties
_provider = None
_formatted_filters = None
# Type resolution state
_property_counter_types = None
# Samples
_current_sample = None
_previous_sample = None
# Sampling state
_sampling = False
def __init__(self, logger, class_name, property_names, filters="", host="localhost",
namespace="root\\cimv2", provider=None,
username="", password="", and_props=[], timeout_duration=10):
self.logger = logger
# Connection information
self.host = host
self.namespace = namespace
self.provider = provider
self.username = username
self.password = password
self.is_raw_perf_class = "_PERFRAWDATA_" in class_name.upper()
# Sampler settings
# WMI class, properties, filters and counter types
# Include required properties for making calculations with raw
# performance counters:
# https://msdn.microsoft.com/en-us/library/aa394299(v=vs.85).aspx
if self.is_raw_perf_class:
property_names.extend([
"Timestamp_Sys100NS",
"Frequency_Sys100NS",
# IMPORTANT: To improve performance and since they're currently
# not needed, do not include the other Timestamp/Frequency
# properties:
# - Timestamp_PerfTime
# - Timestamp_Object
# - Frequency_PerfTime
# - Frequency_Object"
])
self.class_name = class_name
self.property_names = property_names
self.filters = filters
self._and_props = and_props
self._timeout_duration = timeout_duration
self._query = timeout(timeout_duration)(self._query)
@property
def provider(self):
"""
Return the WMI provider.
"""
return self._provider
@provider.setter
def provider(self, value):
"""
Validate and set a WMI provider. Default to `ProviderArchitecture.DEFAULT`
"""
result = None
# `None` defaults to `ProviderArchitecture.DEFAULT`
defaulted_value = value or ProviderArchitecture.DEFAULT
try:
parsed_value = int(defaulted_value)
except ValueError:
pass
else:
if parsed_value in ProviderArchitecture:
result = parsed_value
if result is None:
self.logger.error(
u"Invalid '%s' WMI Provider Architecture. The parameter is ignored.", value
)
self._provider = result or ProviderArchitecture.DEFAULT
@property
def connection(self):
"""
A property to retrieve the sampler connection information.
"""
return {
'host': self.host,
'namespace': self.namespace,
'username': self.username,
'password': self.password,
}
@property
def connection_key(self):
"""
Return an index key used to cache the sampler connection.
"""
return "{host}:{namespace}:{username}".format(
host=self.host,
namespace=self.namespace,
username=self.username
)
@property
def formatted_filters(self):
"""
Cache and return filters as a comprehensive WQL clause.
"""
if not self._formatted_filters:
filters = deepcopy(self.filters)
self._formatted_filters = self._format_filter(filters, self._and_props)
return self._formatted_filters
def sample(self):
"""
Compute new samples.
"""
self._sampling = True
try:
if self.is_raw_perf_class and not self._previous_sample:
self._current_sample = self._query()
self._previous_sample = self._current_sample
self._current_sample = self._query()
except TimeoutException:
self.logger.debug(
u"Query timeout after {timeout}s".format(
timeout=self._timeout_duration
)
)
raise
else:
self._sampling = False
def __len__(self):
"""
Return the number of WMI Objects in the current sample.
"""
# No data is returned while sampling
if self._sampling:
raise TypeError(
u"Sampling `WMISampler` object has no len()"
)
return len(self._current_sample)
def __iter__(self):
"""
Iterate on the current sample's WMI Objects and format the property values.
"""
# No data is returned while sampling
if self._sampling:
raise TypeError(
u"Sampling `WMISampler` object is not iterable"
)
if self.is_raw_perf_class:
# Format required
for previous_wmi_object, current_wmi_object in \
izip(self._previous_sample, self._current_sample):
formatted_wmi_object = self._format_property_values(
previous_wmi_object,
current_wmi_object
)
yield formatted_wmi_object
else:
# No format required
for wmi_object in self._current_sample:
yield wmi_object
def __getitem__(self, index):
"""
Get the specified formatted WMI Object from the current sample.
"""
if self.is_raw_perf_class:
previous_wmi_object = self._previous_sample[index]
current_wmi_object = self._current_sample[index]
formatted_wmi_object = self._format_property_values(
previous_wmi_object,
current_wmi_object
)
return formatted_wmi_object
else:
return self._current_sample[index]
def __eq__(self, other):
"""
Equality operator is based on the current sample.
"""
return self._current_sample == other
def __str__(self):
"""
Stringify the current sample's WMI Objects.
"""
return str(self._current_sample)
def _get_property_calculator(self, counter_type):
"""
Return the calculator for the given `counter_type`.
Fallback with `get_raw`.
"""
calculator = get_raw
try:
calculator = get_calculator(counter_type)
except UndefinedCalculator:
self.logger.warning(
u"Undefined WMI calculator for counter_type {counter_type}."
" Values are reported as RAW.".format(
counter_type=counter_type
)
)
return calculator
def _format_property_values(self, previous, current):
"""
Format WMI Object's RAW data based on the previous sample.
Do not override the original WMI Object !
"""
formatted_wmi_object = CaseInsensitiveDict()
for property_name, property_raw_value in current.iteritems():
counter_type = self._property_counter_types.get(property_name)
property_formatted_value = property_raw_value
if counter_type:
calculator = self._get_property_calculator(counter_type)
property_formatted_value = calculator(previous, current, property_name)
formatted_wmi_object[property_name] = property_formatted_value
return formatted_wmi_object
def get_connection(self):
"""
Create a new WMI connection
"""
self.logger.debug(
u"Connecting to WMI server "
u"(host={host}, namespace={namespace}, provider={provider}, username={username})."
.format(
host=self.host, namespace=self.namespace,
provider=self.provider, username=self.username
)
)
# Initialize COM for the current thread
# WARNING: any python COM object (locator, connection, etc) created in a thread
# shouldn't be used in other threads (can lead to memory/handle leaks if done
# without a deep knowledge of COM's threading model). Because of this and given
# that we run each query in its own thread, we don't cache connections
context = None
pythoncom.CoInitialize()
if self.provider != ProviderArchitecture.DEFAULT:
context = Dispatch("WbemScripting.SWbemNamedValueSet")
context.Add("__ProviderArchitecture", self.provider)
locator = Dispatch("WbemScripting.SWbemLocator")
connection = locator.ConnectServer(
self.host, self.namespace, self.username, self.password, None, "", 128, context
)
return connection
@staticmethod
def _format_filter(filters, and_props=[]):
"""
Transform filters to a comprehensive WQL `WHERE` clause.
Builds filter from a filter list.
- filters: expects a list of dicts, typically:
- [{'Property': value},...] or
- [{'Property': (comparison_op, value)},...]
NOTE: If we just provide a value we defailt to '=' comparison operator.
Otherwise, specify the operator in a tuple as above: (comp_op, value)
If we detect a wildcard character ('%') we will override the operator
to use LIKE
"""
def build_where_clause(fltr):
f = fltr.pop()
wql = ""
while f:
prop, value = f.popitem()
if isinstance(value, tuple):
oper = value[0]
value = value[1]
elif isinstance(value, basestring) and '%' in value:
oper = 'LIKE'
else:
oper = '='
if isinstance(value, list):
if not len(value):
continue
internal_filter = map(lambda x:
(prop, x) if isinstance(x, tuple)
else (prop, ('LIKE', x)) if '%' in x
else (prop, (oper, x)), value)
bool_op = ' OR '
for p in and_props:
if p.lower() in prop.lower():
bool_op = ' AND '
break
clause = bool_op.join(['{0} {1} \'{2}\''.format(k, v[0], v[1]) if isinstance(v,tuple)
else '{0} = \'{1}\''.format(k,v)
for k,v in internal_filter])
if bool_op.strip() == 'OR':
wql += "( {clause} )".format(
clause=clause)
else:
wql += "{clause}".format(
clause=clause)
else:
wql += "{property} {cmp} '{constant}'".format(
property=prop,
cmp=oper,
constant=value)
if f:
wql += " AND "
# empty list skipped
if wql.endswith(" AND "):
wql = wql[:-5]
if len(fltr) == 0:
return "( {clause} )".format(clause=wql)
return "( {clause} ) OR {more}".format(
clause=wql,
more=build_where_clause(fltr)
)
if not filters:
return ""
return " WHERE {clause}".format(clause=build_where_clause(filters))
def _query(self): # pylint: disable=E0202
"""
Query WMI using WMI Query Language (WQL) & parse the results.
Returns: List of WMI objects or `TimeoutException`.
"""
formated_property_names = ",".join(self.property_names)
wql = "Select {property_names} from {class_name}{filters}".format(
property_names=formated_property_names,
class_name=self.class_name,
filters=self.formatted_filters,
)
self.logger.debug(u"Querying WMI: {0}".format(wql))
try:
# From: https://msdn.microsoft.com/en-us/library/aa393866(v=vs.85).aspx
flag_return_immediately = 0x10 # Default flag.
flag_forward_only = 0x20
flag_use_amended_qualifiers = 0x20000
query_flags = flag_return_immediately | flag_forward_only
# For the first query, cache the qualifiers to determine each
# propertie's "CounterType"
includes_qualifiers = self.is_raw_perf_class and self._property_counter_types is None
if includes_qualifiers:
self._property_counter_types = CaseInsensitiveDict()
query_flags |= flag_use_amended_qualifiers
raw_results = self.get_connection().ExecQuery(wql, "WQL", query_flags)
results = self._parse_results(raw_results, includes_qualifiers=includes_qualifiers)
except pywintypes.com_error:
self.logger.warning(u"Failed to execute WMI query (%s)", wql, exc_info=True)
results = []
return results
def _parse_results(self, raw_results, includes_qualifiers):
"""
Parse WMI query results in a more comprehensive form.
Returns: List of WMI objects
```
[
{
'freemegabytes': 19742.0,
'name': 'C:',
'avgdiskbytesperwrite': 1536.0
}, {
'freemegabytes': 19742.0,
'name': 'D:',
'avgdiskbytesperwrite': 1536.0
}
]
```
"""
results = []
for res in raw_results:
# Ensure all properties are available. Use case-insensitivity
# because some properties are returned with different cases.
item = CaseInsensitiveDict()
for prop_name in self.property_names:
item[prop_name] = None
for wmi_property in res.Properties_:
# IMPORTANT: To improve performance, only access the Qualifiers
# if the "CounterType" hasn't already been cached.
should_get_qualifier_type = (
includes_qualifiers and
wmi_property.Name not in self._property_counter_types
)
if should_get_qualifier_type:
# Can't index into "Qualifiers_" for keys that don't exist
# without getting an exception.
qualifiers = dict((q.Name, q.Value) for q in wmi_property.Qualifiers_)
# Some properties like "Name" and "Timestamp_Sys100NS" do
# not have a "CounterType" (since they're not a counter).
# Therefore, they're ignored.
if "CounterType" in qualifiers:
counter_type = qualifiers["CounterType"]
self._property_counter_types[wmi_property.Name] = counter_type
self.logger.debug(
u"Caching property qualifier CounterType: "
"{class_name}.{property_names} = {counter_type}"
.format(
class_name=self.class_name,
property_names=wmi_property.Name,
counter_type=counter_type,
)
)
else:
self.logger.debug(
u"CounterType qualifier not found for {class_name}.{property_names}"
.format(
class_name=self.class_name,
property_names=wmi_property.Name,
)
)
try:
item[wmi_property.Name] = float(wmi_property.Value)
except (TypeError, ValueError):
item[wmi_property.Name] = wmi_property.Value
results.append(item)
return results
| {
"content_hash": "34169653dc9cc582d4c13543a6f29f75",
"timestamp": "",
"source": "github",
"line_count": 542,
"max_line_length": 105,
"avg_line_length": 34.45387453874539,
"alnum_prop": 0.5411802506158295,
"repo_name": "indeedops/dd-agent",
"id": "4bd32cc9f93bc17bd0266f9098e200be12b9b287",
"size": "18698",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "checks/libs/wmi/sampler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2717"
},
{
"name": "Go",
"bytes": "2389"
},
{
"name": "HTML",
"bytes": "8553"
},
{
"name": "Nginx",
"bytes": "3908"
},
{
"name": "PowerShell",
"bytes": "2665"
},
{
"name": "Python",
"bytes": "2300561"
},
{
"name": "Ruby",
"bytes": "102896"
},
{
"name": "Shell",
"bytes": "61965"
},
{
"name": "XSLT",
"bytes": "2222"
}
],
"symlink_target": ""
} |
from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
iz_dnevnika = Table('iz_dnevnika', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('ime_i_prezime', String(length=50)),
Column('razred', String(length=4)),
Column('ime', String(length=15)),
Column('prezime', String(length=30)),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['iz_dnevnika'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['iz_dnevnika'].drop()
| {
"content_hash": "c310546edc4a0226cc751a423bfd4c75",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 68,
"avg_line_length": 29.79310344827586,
"alnum_prop": 0.6967592592592593,
"repo_name": "knadir/IIIgimnazija80",
"id": "2b5663740e81ee55c2d472e5c0b5b75925a8df64",
"size": "864",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "db_repository/versions/017_migration.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16668"
},
{
"name": "HTML",
"bytes": "83814"
},
{
"name": "JavaScript",
"bytes": "40972"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "69884"
},
{
"name": "Shell",
"bytes": "464"
}
],
"symlink_target": ""
} |
from typing import Callable, Dict, Any, List, Union, Tuple
class Type:
def __init__(self, atomic_type: str): ...
@staticmethod
def pointer(other: 'Type') -> 'Type': ...
@staticmethod
def reference(other: 'Type') -> 'Type': ...
@staticmethod
def rvalue_reference(other: 'Type') -> 'Type': ...
@staticmethod
def const(other: 'Type') -> 'Type': ...
# E.g. Type.function(Type('int'), [Type('float')]) constructs the type 'int(float)'
@staticmethod
def function(return_type: 'Type', args: List['Type']) -> 'Type': ...
# E.g. Type.array(Type('int')) constructs the type 'int[]'
@staticmethod
def array(elem_type: 'Type') -> 'Type': ...
@staticmethod
def template_instantiation(template_atomic_type: str, args: List['Type']) -> 'Type': ...
# E.g. Type.template_member(Type('foo'), 'bar', [Type('int')]) constructs the type 'foo::bar<int>'.
@staticmethod
def template_member(type: 'Type', member_name: str, args: List['Type']) -> 'Type': ...
# E.g. Type('foo').bar is the type 'foo::bar'.
def __getattr__(self, member_name: str) -> 'Type': ...
def match(*types: Type) -> Callable[[Any, ...], Dict[Union[Type, Tuple[Type, ...]], Type]]: ...
| {
"content_hash": "06e903907403fea0e375e733696a3388",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 103,
"avg_line_length": 34.19444444444444,
"alnum_prop": 0.586515028432169,
"repo_name": "google/tmppy",
"id": "484337dd9a65e7b7b62c86f39f200cfa4f711d6b",
"size": "1830",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_tmppy/type.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "2835"
},
{
"name": "CMake",
"bytes": "6123"
},
{
"name": "Python",
"bytes": "1243209"
},
{
"name": "Shell",
"bytes": "6730"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import status_params
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
import os
from urlparse import urlparse
from ambari_commons.constants import AMBARI_SUDO_BINARY
from ambari_commons.os_check import OSCheck
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.is_empty import is_empty
from resource_management.libraries.functions.copy_tarball import STACK_ROOT_PATTERN, STACK_NAME_PATTERN, STACK_VERSION_PATTERN
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions import upgrade_summary
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.stack_features import get_stack_feature_version
from resource_management.libraries.functions.get_port_from_url import get_port_from_url
from resource_management.libraries.functions.expect import expect
from resource_management.libraries import functions
from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
# Default log4j version; put config files under /etc/hive/conf
log4j_version = '1'
# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY
stack_root = status_params.stack_root
stack_name = status_params.stack_name
stack_name_uppercase = stack_name.upper()
agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
# Needed since this is an Atlas Hook service.
cluster_name = config['clusterName']
# node hostname
hostname = config["hostname"]
# This is expected to be of the form #.#.#.#
stack_version_unformatted = status_params.stack_version_unformatted
stack_version_formatted_major = status_params.stack_version_formatted_major
# this is not available on INSTALL action because <stack-selector-tool> is not available
stack_version_formatted = functions.get_stack_version('hive-server2')
# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade.
# It cannot be used during the initial Cluser Install because the version is not yet known.
version = default("/commandParams/version", None)
# When downgrading the 'version' is pointing to the downgrade-target version
# downgrade_from_version provides the source-version the downgrade is happening from
downgrade_from_version = upgrade_summary.get_downgrade_from_version("HIVE")
# get the correct version to use for checking stack features
version_for_stack_feature_checks = get_stack_feature_version(config)
# Upgrade direction
upgrade_direction = default("/commandParams/upgrade_direction", None)
stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
# component ROLE directory (like hive-metastore or hive-server2-hive2)
component_directory = status_params.component_directory
component_directory_interactive = status_params.component_directory_interactive
hadoop_home = '/usr/lib/hadoop'
hive_bin = '/usr/lib/hive/bin'
hive_schematool_ver_bin = '/usr/lib/hive/bin'
hive_schematool_bin = '/usr/lib/hive/bin'
hive_lib = '/usr/lib/hive/lib'
hive_version_lib = '/usr/lib/hive/lib'
#hadoop_home = format('{stack_root}/current/hadoop-client')
#hive_bin = format('{stack_root}/current/{component_directory}/bin')
#hive_schematool_ver_bin = format('{stack_root}/{version}/hive/bin')
#hive_schematool_bin = format('{stack_root}/current/{component_directory}/bin')
#hive_lib = format('{stack_root}/current/{component_directory}/lib')
#hive_version_lib = format('{stack_root}/{version}/hive/lib')
hive_var_lib = '/var/lib/hive'
hive_user_home_dir = "/home/hive"
# starting on stacks where HSI is supported, we need to begin using the 'hive2' schematool
hive_server2_hive2_dir = None
hive_server2_hive2_lib = None
version = default("/commandParams/version", None)
if check_stack_feature(StackFeature.HIVE_SERVER_INTERACTIVE, version_for_stack_feature_checks):
# the name of the hiveserver2-hive2 component
hive_server2_hive2_component = status_params.SERVER_ROLE_DIRECTORY_MAP["HIVE_SERVER_INTERACTIVE"]
# when using the version, we can just specify the component as "hive2"
hive_schematool_ver_bin = format('{stack_root}/{version}/hive2/bin')
# use the schematool which ships with hive2
hive_schematool_bin = format('{stack_root}/current/{hive_server2_hive2_component}/bin')
# <stack-root>/<version>/hive2 (as opposed to <stack-root>/<version>/hive)
hive_server2_hive2_dir = format('{stack_root}/current/{hive_server2_hive2_component}')
# <stack-root>/<version>/hive2 (as opposed to <stack-root>/<version>/hive)
hive_server2_hive2_version_dir = format('{stack_root}/{version}/hive2')
# <stack-root>/current/hive-server2-hive2/lib -> <stack-root>/<version>/hive2/lib
hive_server2_hive2_lib = format('{hive_server2_hive2_dir}/lib')
# <stack-root>/<version>/hive2/lib
hive_server2_hive2_version_lib = format('{hive_server2_hive2_version_dir}/lib')
hive_interactive_bin = format('{stack_root}/current/{component_directory_interactive}/bin')
hive_interactive_lib = format('{stack_root}/current/{component_directory_interactive}/lib')
# Hive Interactive related paths
hive_interactive_var_lib = '/var/lib/hive2'
# These tar folders were used in previous stack versions, e.g., HDP 2.1
hadoop_streaming_jars = '/usr/lib/hadoop-mapreduce/hadoop-streaming-*.jar'
pig_tar_file = format('/usr/share/{stack_name_uppercase}-webhcat/pig.tar.gz')
hive_tar_file = format('/usr/share/{stack_name_uppercase}-webhcat/hive.tar.gz')
sqoop_tar_file = format('/usr/share/{stack_name_uppercase}-webhcat/sqoop*.tar.gz')
hive_metastore_site_supported = False
hive_etc_dir_prefix = "/etc/hive"
hive_interactive_etc_dir_prefix = "/etc/hive2"
limits_conf_dir = "/etc/security/limits.d"
hive_user_nofile_limit = default("/configurations/hive-env/hive_user_nofile_limit", "32000")
hive_user_nproc_limit = default("/configurations/hive-env/hive_user_nproc_limit", "16000")
# use the directories from status_params as they are already calculated for
# the correct stack version
hadoop_conf_dir = status_params.hadoop_conf_dir
hadoop_bin_dir = status_params.hadoop_bin_dir
webhcat_conf_dir = status_params.webhcat_conf_dir
hive_conf_dir = status_params.hive_conf_dir
hive_home_dir = status_params.hive_home_dir
hive_config_dir = status_params.hive_config_dir
hive_client_conf_dir = status_params.hive_client_conf_dir
hive_server_conf_dir = status_params.hive_server_conf_dir
hcat_conf_dir = '/etc/hive-hcatalog/conf'
config_dir = '/etc/hive-webhcat/conf'
hcat_lib = '/usr/lib/hive-hcatalog/share/hcatalog'
webhcat_bin_dir = '/usr/lib/hive-hcatalog/sbin'
# --- Tarballs ---
# DON'T CHANGE THESE VARIABLE NAMES
# Values don't change from those in copy_tarball.py
webhcat_apps_dir = "/apps/webhcat"
hive_tar_source = "{0}/{1}/hive/hive.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN)
pig_tar_source = "{0}/{1}/pig/pig.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN)
hive_tar_dest_file = "/{0}/apps/{1}/hive/hive.tar.gz".format(STACK_NAME_PATTERN,STACK_VERSION_PATTERN)
pig_tar_dest_file = "/{0}/apps/{1}/pig/pig.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)
hadoop_streaming_tar_source = "{0}/{1}/hadoop-mapreduce/hadoop-streaming.jar".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN)
sqoop_tar_source = "{0}/{1}/sqoop/sqoop.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN)
hadoop_streaming_tar_dest_dir = "/{0}/apps/{1}/mapreduce/".format(STACK_NAME_PATTERN,STACK_VERSION_PATTERN)
sqoop_tar_dest_dir = "/{0}/apps/{1}/sqoop/".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)
tarballs_mode = 0444
purge_tables = "false"
# Starting from stack version for feature hive_purge_table drop should be executed with purge
purge_tables = 'true'
# this is NOT a typo. Configs for hcatalog/webhcat point to a
# specific directory which is NOT called 'conf'
# FIXME: ODPi
# hcat_conf_dir = format('{stack_root}/current/hive-webhcat/etc/hcatalog')
# config_dir = format('{stack_root}/current/hive-webhcat/etc/webhcat')
hcat_conf_dir = format('/etc/hive-hcatalog/conf')
config_dir = format('/etc/hive-webhcat/conf')
hive_metastore_site_supported = True
execute_path = os.environ['PATH'] + os.pathsep + hive_bin + os.pathsep + hadoop_bin_dir
hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
hive_metastore_user_passwd = unicode(hive_metastore_user_passwd) if not is_empty(hive_metastore_user_passwd) else hive_metastore_user_passwd
hive_metastore_db_type = config['configurations']['hive-env']['hive_database_type']
#HACK Temporarily use dbType=azuredb while invoking schematool
if hive_metastore_db_type == "mssql":
hive_metastore_db_type = "azuredb"
#users
hive_user = config['configurations']['hive-env']['hive_user']
#JDBC driver jar name
hive_jdbc_driver = config['configurations']['hive-site']['javax.jdo.option.ConnectionDriverName']
jdk_location = config['hostLevelParams']['jdk_location']
java_share_dir = '/usr/share/java'
hive_database_name = config['configurations']['hive-env']['hive_database_name']
hive_database = config['configurations']['hive-env']['hive_database']
hive_use_existing_db = hive_database.startswith('Existing')
default_connectors_map = { "com.microsoft.sqlserver.jdbc.SQLServerDriver":"sqljdbc4.jar",
"com.mysql.jdbc.Driver":"mysql-connector-java.jar",
"org.postgresql.Driver":"postgresql-jdbc.jar",
"oracle.jdbc.driver.OracleDriver":"ojdbc.jar",
"sap.jdbc4.sqlanywhere.IDriver":"sajdbc4.jar"}
# NOT SURE THAT IT'S A GOOD IDEA TO USE PATH TO CLASS IN DRIVER, MAYBE IT WILL BE BETTER TO USE DB TYPE.
# BECAUSE PATH TO CLASSES COULD BE CHANGED
sqla_db_used = False
hive_previous_jdbc_jar_name = None
if hive_jdbc_driver == "com.microsoft.sqlserver.jdbc.SQLServerDriver":
jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
elif hive_jdbc_driver == "com.mysql.jdbc.Driver":
jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
elif hive_jdbc_driver == "org.postgresql.Driver":
jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
elif hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
elif hive_jdbc_driver == "sap.jdbc4.sqlanywhere.IDriver":
jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
sqla_db_used = True
default_mysql_jar_name = "mysql-connector-java.jar"
default_mysql_target = format("{hive_lib}/{default_mysql_jar_name}")
hive_previous_jdbc_jar = format("{hive_lib}/{hive_previous_jdbc_jar_name}")
if not hive_use_existing_db:
jdbc_jar_name = default_mysql_jar_name
downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}")
hive_jdbc_target = format("{hive_lib}/{jdbc_jar_name}")
hive2_jdbc_target = None
if hive_server2_hive2_dir:
hive2_jdbc_target = format("{hive_server2_hive2_lib}/{jdbc_jar_name}")
# during upgrade / downgrade, use the specific version to copy the JDBC JAR to
if upgrade_direction:
hive_jdbc_target = format("{hive_version_lib}/{jdbc_jar_name}")
hive2_jdbc_target = format("{hive_server2_hive2_version_lib}/{jdbc_jar_name}") if hive2_jdbc_target is not None else None
hive2_previous_jdbc_jar = format("{hive_server2_hive2_lib}/{hive_previous_jdbc_jar_name}") if hive_server2_hive2_lib is not None else None
driver_curl_source = format("{jdk_location}/{jdbc_jar_name}")
# normally, the JDBC driver would be referenced by <stack-root>/current/.../foo.jar
# but in RU if <stack-selector-tool> is called and the restart fails, then this means that current pointer
# is now pointing to the upgraded version location; that's bad for the cp command
version_for_source_jdbc_file = upgrade_summary.get_source_version(default_version = version_for_stack_feature_checks)
source_jdbc_file = format("{stack_root}/{version_for_source_jdbc_file}/hive/lib/{jdbc_jar_name}")
check_db_connection_jar_name = "DBConnectionVerification.jar"
check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
hive_jdbc_drivers_list = ["com.microsoft.sqlserver.jdbc.SQLServerDriver","com.mysql.jdbc.Driver",
"org.postgresql.Driver","oracle.jdbc.driver.OracleDriver","sap.jdbc4.sqlanywhere.IDriver"]
prepackaged_jdbc_name = "ojdbc6.jar"
prepackaged_ojdbc_symlink = format("{hive_lib}/{prepackaged_jdbc_name}")
templeton_port = config['configurations']['webhcat-site']['templeton.port']
#constants for type2 jdbc
jdbc_libs_dir = format("{hive_lib}/native/lib64")
lib_dir_available = os.path.exists(jdbc_libs_dir)
if sqla_db_used:
jars_path_in_archive = format("{tmp_dir}/sqla-client-jdbc/java/*")
libs_path_in_archive = format("{tmp_dir}/sqla-client-jdbc/native/lib64/*")
downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}")
libs_in_hive_lib = format("{jdbc_libs_dir}/*")
# Start, Common Hosts and Ports
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
hive_metastore_hosts = default('/clusterHostInfo/hive_metastore_host', [])
hive_metastore_host = hive_metastore_hosts[0] if len(hive_metastore_hosts) > 0 else None
hive_metastore_port = get_port_from_url(config['configurations']['hive-site']['hive.metastore.uris'])
hive_server_hosts = default("/clusterHostInfo/hive_server_host", [])
hive_server_host = hive_server_hosts[0] if len(hive_server_hosts) > 0 else None
hive_server_interactive_hosts = default('/clusterHostInfo/hive_server_interactive_hosts', [])
hive_server_interactive_host = hive_server_interactive_hosts[0] if len(hive_server_interactive_hosts) > 0 else None
# End, Common Hosts and Ports
hive_transport_mode = config['configurations']['hive-site']['hive.server2.transport.mode']
if hive_transport_mode.lower() == "http":
hive_server_port = config['configurations']['hive-site']['hive.server2.thrift.http.port']
else:
hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
hive_url = format("jdbc:hive2://{hive_server_host}:{hive_server_port}")
hive_http_endpoint = default('/configurations/hive-site/hive.server2.thrift.http.path', "cliservice")
hive_server_principal = config['configurations']['hive-site']['hive.server2.authentication.kerberos.principal']
hive_server2_authentication = config['configurations']['hive-site']['hive.server2.authentication']
# ssl options
hive_ssl = default('/configurations/hive-site/hive.server2.use.SSL', False)
hive_ssl_keystore_path = default('/configurations/hive-site/hive.server2.keystore.path', None)
hive_ssl_keystore_password = default('/configurations/hive-site/hive.server2.keystore.password', None)
smokeuser = config['configurations']['cluster-env']['smokeuser']
smoke_test_sql = format("{tmp_dir}/hiveserver2.sql")
smoke_test_path = format("{tmp_dir}/hiveserver2Smoke.sh")
smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
fs_root = config['configurations']['core-site']['fs.defaultFS']
security_enabled = config['configurations']['cluster-env']['security_enabled']
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
hive_metastore_keytab_path = config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
hive_metastore_principal = config['configurations']['hive-site']['hive.metastore.kerberos.principal']
hive_server2_keytab = config['configurations']['hive-site']['hive.server2.authentication.kerberos.keytab']
#hive_env
hive_log_dir = config['configurations']['hive-env']['hive_log_dir']
hive_pid_dir = status_params.hive_pid_dir
hive_pid = status_params.hive_pid
hive_interactive_pid = status_params.hive_interactive_pid
#Default conf dir for client
hive_conf_dirs_list = [hive_client_conf_dir]
# These are the folders to which the configs will be written to.
ranger_hive_component = status_params.SERVER_ROLE_DIRECTORY_MAP['HIVE_SERVER']
if status_params.role == "HIVE_METASTORE" and hive_metastore_hosts is not None and hostname in hive_metastore_hosts:
hive_conf_dirs_list.append(hive_server_conf_dir)
elif status_params.role == "HIVE_SERVER" and hive_server_hosts is not None and hostname in hive_server_host:
hive_conf_dirs_list.append(hive_server_conf_dir)
elif status_params.role == "HIVE_SERVER_INTERACTIVE" and hive_server_interactive_hosts is not None and hostname in hive_server_interactive_hosts:
hive_conf_dirs_list.append(status_params.hive_server_interactive_conf_dir)
ranger_hive_component = status_params.SERVER_ROLE_DIRECTORY_MAP['HIVE_SERVER_INTERACTIVE']
# log4j version is 2 for hive2; put config files under /etc/hive2/conf
if status_params.role == "HIVE_SERVER_INTERACTIVE":
log4j_version = '2'
#Starting hiveserver2
start_hiveserver2_script = 'startHiveserver2.sh.j2'
##Starting metastore
start_metastore_script = 'startMetastore.sh'
hive_metastore_pid = status_params.hive_metastore_pid
# Hive Server Interactive
slider_am_container_mb = default("/configurations/hive-interactive-env/slider_am_container_mb", 341)
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
yarn_user = config['configurations']['yarn-env']['yarn_user']
user_group = config['configurations']['cluster-env']['user_group']
artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
# Need this for yarn.nodemanager.recovery.dir in yarn-site
yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']
target_hive_interactive = format("{hive_interactive_lib}/{jdbc_jar_name}")
hive_intaractive_previous_jdbc_jar = format("{hive_interactive_lib}/{hive_previous_jdbc_jar_name}")
jars_in_hive_lib = format("{hive_lib}/*.jar")
start_hiveserver2_path = format("{tmp_dir}/start_hiveserver2_script")
start_metastore_path = format("{tmp_dir}/start_metastore_script")
hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
if 'role' in config and config['role'] in ["HIVE_SERVER", "HIVE_METASTORE"]:
hive_heapsize = config['configurations']['hive-env']['hive.heapsize']
else:
hive_heapsize = config['configurations']['hive-env']['hive.client.heapsize']
hive_metastore_heapsize = config['configurations']['hive-env']['hive.metastore.heapsize']
java64_home = config['hostLevelParams']['java_home']
java_version = expect("/hostLevelParams/java_version", int)
##### MYSQL
db_name = config['configurations']['hive-env']['hive_database_name']
mysql_group = 'mysql'
mysql_host = config['clusterHostInfo']['hive_mysql_host']
mysql_adduser_path = format("{tmp_dir}/addMysqlUser.sh")
mysql_deluser_path = format("{tmp_dir}/removeMysqlUser.sh")
#### Metastore
# initialize the schema only if not in an upgrade/downgrade
init_metastore_schema = upgrade_direction is None
########## HCAT
hcat_dbroot = hcat_lib
hcat_user = config['configurations']['hive-env']['hcat_user']
webhcat_user = config['configurations']['hive-env']['webhcat_user']
hcat_pid_dir = status_params.hcat_pid_dir
hcat_log_dir = config['configurations']['hive-env']['hcat_log_dir']
hcat_env_sh_template = config['configurations']['hcat-env']['content']
#hive-log4j.properties.template
if (('hive-log4j' in config['configurations']) and ('content' in config['configurations']['hive-log4j'])):
log4j_props = config['configurations']['hive-log4j']['content']
else:
log4j_props = None
#webhcat-log4j.properties.template
if (('webhcat-log4j' in config['configurations']) and ('content' in config['configurations']['webhcat-log4j'])):
log4j_webhcat_props = config['configurations']['webhcat-log4j']['content']
else:
log4j_webhcat_props = None
#hive-exec-log4j.properties.template
if (('hive-exec-log4j' in config['configurations']) and ('content' in config['configurations']['hive-exec-log4j'])):
log4j_exec_props = config['configurations']['hive-exec-log4j']['content']
else:
log4j_exec_props = None
daemon_name = status_params.daemon_name
process_name = status_params.process_name
hive_env_sh_template = config['configurations']['hive-env']['content']
hive_hdfs_user_dir = format("/user/{hive_user}")
hive_hdfs_user_mode = 0755
hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"]
whs_dir_protocol = urlparse(hive_apps_whs_dir).scheme
hive_exec_scratchdir = config['configurations']['hive-site']["hive.exec.scratchdir"]
#for create_hdfs_directory
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', 'missing_principal').replace("_HOST", hostname)
# Tez-related properties
tez_user = config['configurations']['tez-env']['tez_user']
# Tez jars
tez_local_api_jars = '/usr/lib/tez/tez*.jar'
tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
# Tez libraries
tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)
if OSCheck.is_ubuntu_family():
mysql_configname = '/etc/mysql/my.cnf'
else:
mysql_configname = '/etc/my.cnf'
mysql_user = 'mysql'
# Hive security
hive_authorization_enabled = config['configurations']['hive-site']['hive.security.authorization.enabled']
mysql_jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
hive_site_config = dict(config['configurations']['hive-site'])
########################################################
############# AMS related params #####################
########################################################
ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
has_metric_collector = not len(ams_collector_hosts) == 0
if has_metric_collector:
if 'cluster-env' in config['configurations'] and \
'metrics_collector_vip_host' in config['configurations']['cluster-env']:
metric_collector_host = config['configurations']['cluster-env']['metrics_collector_vip_host']
else:
metric_collector_host = ams_collector_hosts[0]
if 'cluster-env' in config['configurations'] and \
'metrics_collector_vip_port' in config['configurations']['cluster-env']:
metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
else:
metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "localhost:6188")
if metric_collector_web_address.find(':') != -1:
metric_collector_port = metric_collector_web_address.split(':')[1]
else:
metric_collector_port = '6188'
if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
metric_collector_protocol = 'https'
else:
metric_collector_protocol = 'http'
metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
########################################################
############# Atlas related params #####################
########################################################
#region Atlas Hooks
hive_atlas_application_properties = default('/configurations/hive-atlas-application.properties', {})
if has_atlas_in_cluster():
atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
#endregion
########################################################
########### WebHCat related params #####################
########################################################
webhcat_env_sh_template = config['configurations']['webhcat-env']['content']
templeton_log_dir = config['configurations']['hive-env']['hcat_log_dir']
templeton_pid_dir = status_params.hcat_pid_dir
webhcat_pid_file = status_params.webhcat_pid_file
templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
hcat_hdfs_user_dir = format("/user/{hcat_user}")
hcat_hdfs_user_mode = 0755
webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
webhcat_hdfs_user_mode = 0755
#for create_hdfs_directory
security_param = "true" if security_enabled else "false"
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']
dfs_type = default("/commandParams/dfs_type", "")
import functools
#create partial functions with common arguments for every HdfsResource call
#to create hdfs directory we need to call params.HdfsResource in code
HdfsResource = functools.partial(
HdfsResource,
user = hdfs_user,
hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = hdfs_principal_name,
hdfs_site = hdfs_site,
default_fs = default_fs,
immutable_paths = get_not_managed_resources(),
dfs_type = dfs_type
)
# Hive Interactive related
hive_interactive_hosts = default('/clusterHostInfo/hive_server_interactive_hosts', [])
has_hive_interactive = len(hive_interactive_hosts) > 0
if has_hive_interactive:
llap_daemon_log4j = config['configurations']['llap-daemon-log4j']['content']
llap_cli_log4j2 = config['configurations']['llap-cli-log4j2']['content']
hive_log4j2 = config['configurations']['hive-log4j2']['content']
hive_exec_log4j2 = config['configurations']['hive-exec-log4j2']['content']
beeline_log4j2 = config['configurations']['beeline-log4j2']['content']
hive_server_interactive_conf_dir = status_params.hive_server_interactive_conf_dir
execute_path_hive_interactive = os.path.join(os.environ['PATH'], hive_interactive_bin, hadoop_bin_dir)
start_hiveserver2_interactive_script = 'startHiveserver2Interactive.sh.j2'
start_hiveserver2_interactive_path = format("{tmp_dir}/start_hiveserver2_interactive_script")
hive_interactive_env_sh_template = config['configurations']['hive-interactive-env']['content']
hive_interactive_enabled = default('/configurations/hive-interactive-env/enable_hive_interactive', False)
llap_app_java_opts = default('/configurations/hive-interactive-env/llap_java_opts', '-XX:+AlwaysPreTouch {% if java_version > 7 %}-XX:+UseG1GC -XX:TLABSize=8m -XX:+ResizeTLAB -XX:+UseNUMA -XX:+AggressiveOpts -XX:MetaspaceSize=1024m -XX:InitiatingHeapOccupancyPercent=80 -XX:MaxGCPauseMillis=200{% else %}-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC{% endif %}')
# Service check related
if hive_transport_mode.lower() == "http":
hive_server_interactive_port = config['configurations']['hive-interactive-site']['hive.server2.thrift.http.port']
else:
hive_server_interactive_port = default('/configurations/hive-interactive-site/hive.server2.thrift.port',"10500")
# Tez for Hive interactive related
tez_interactive_config_dir = "/etc/tez_hive2/conf"
tez_interactive_user = config['configurations']['tez-env']['tez_user']
num_retries_for_checking_llap_status = default('/configurations/hive-interactive-env/num_retries_for_checking_llap_status', 10)
# Used in LLAP slider package creation
num_llap_nodes = config['configurations']['hive-interactive-env']['num_llap_nodes']
llap_daemon_container_size = config['configurations']['hive-interactive-site']['hive.llap.daemon.yarn.container.mb']
llap_log_level = config['configurations']['hive-interactive-env']['llap_log_level']
hive_llap_io_mem_size = config['configurations']['hive-interactive-site']['hive.llap.io.memory.size']
llap_heap_size = config['configurations']['hive-interactive-env']['llap_heap_size']
llap_app_name = config['configurations']['hive-interactive-env']['llap_app_name']
hive_llap_principal = None
if security_enabled:
hive_llap_keytab_file = config['configurations']['hive-interactive-site']['hive.llap.zk.sm.keytab.file']
hive_llap_principal = (config['configurations']['hive-interactive-site']['hive.llap.zk.sm.principal']).replace('_HOST',hostname.lower())
pass
# ranger host
ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
has_ranger_admin = not len(ranger_admin_hosts) == 0
xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
#ranger hive properties
policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
if 'admin-properties' in config['configurations'] and 'policymgr_external_url' in config['configurations']['admin-properties'] and policymgr_mgr_url.endswith('/'):
policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
xa_audit_db_name = default('/configurations/admin-properties/audit_db_name', 'ranger_audits')
xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
xa_db_host = config['configurations']['admin-properties']['db_host']
repo_name = str(config['clusterName']) + '_hive'
jdbc_driver_class_name = config['configurations']['ranger-hive-plugin-properties']['jdbc.driverClassName']
common_name_for_certificate = config['configurations']['ranger-hive-plugin-properties']['common.name.for.certificate']
repo_config_username = config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
ranger_env = config['configurations']['ranger-env']
ranger_plugin_properties = config['configurations']['ranger-hive-plugin-properties']
policy_user = config['configurations']['ranger-hive-plugin-properties']['policy_user']
if security_enabled:
hive_principal = hive_server_principal.replace('_HOST',hostname.lower())
hive_keytab = config['configurations']['hive-site']['hive.server2.authentication.kerberos.keytab']
#For curl command in ranger plugin to get db connector
if has_ranger_admin:
enable_ranger_hive = (config['configurations']['hive-env']['hive_security_authorization'].lower() == 'ranger')
repo_config_password = unicode(config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_PASSWORD'])
xa_audit_db_flavor = (config['configurations']['admin-properties']['DB_FLAVOR']).lower()
ranger_previous_jdbc_jar_name = None
if stack_supports_ranger_audit_db:
if xa_audit_db_flavor and xa_audit_db_flavor == 'mysql':
ranger_jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
ranger_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
jdbc_driver = "com.mysql.jdbc.Driver"
elif xa_audit_db_flavor and xa_audit_db_flavor == 'oracle':
ranger_jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
ranger_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
colon_count = xa_db_host.count(':')
if colon_count == 2 or colon_count == 0:
audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
else:
audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
jdbc_driver = "oracle.jdbc.OracleDriver"
elif xa_audit_db_flavor and xa_audit_db_flavor == 'postgres':
ranger_jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
ranger_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
jdbc_driver = "org.postgresql.Driver"
elif xa_audit_db_flavor and xa_audit_db_flavor == 'mssql':
ranger_jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
ranger_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
elif xa_audit_db_flavor and xa_audit_db_flavor == 'sqla':
ranger_jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
ranger_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
ranger_downloaded_custom_connector = format("{tmp_dir}/{ranger_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
ranger_driver_curl_source = format("{jdk_location}/{ranger_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
ranger_driver_curl_target = format("{hive_lib}/{ranger_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
ranger_previous_jdbc_jar = format("{hive_lib}/{ranger_previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
sql_connector_jar = ''
hive_ranger_plugin_config = {
'username': repo_config_username,
'password': repo_config_password,
'jdbc.driverClassName': jdbc_driver_class_name,
'jdbc.url': format("{hive_url}/default;principal={hive_principal}") if security_enabled else hive_url,
'commonNameForCertificate': common_name_for_certificate
}
hive_ranger_plugin_repo = {
'isActive': 'true',
'config': json.dumps(hive_ranger_plugin_config),
'description': 'hive repo',
'name': repo_name,
'repositoryType': 'hive',
'assetType': '3'
}
if stack_supports_ranger_kerberos and security_enabled:
hive_ranger_plugin_config['policy.download.auth.users'] = hive_user
hive_ranger_plugin_config['tag.download.auth.users'] = hive_user
hive_ranger_plugin_config['policy.grantrevoke.auth.users'] = hive_user
if stack_supports_ranger_kerberos:
hive_ranger_plugin_config['ambari.service.check.user'] = policy_user
hive_ranger_plugin_repo = {
'isEnabled': 'true',
'configs': hive_ranger_plugin_config,
'description': 'hive repo',
'name': repo_name,
'type': 'hive'
}
xa_audit_db_is_enabled = False
xa_audit_db_password = ''
if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db:
xa_audit_db_password = unicode(config['configurations']['admin-properties']['audit_db_password'])
ranger_audit_solr_urls = config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
if xml_configurations_supported and stack_supports_ranger_audit_db:
xa_audit_db_is_enabled = config['configurations']['ranger-hive-audit']['xasecure.audit.destination.db']
xa_audit_hdfs_is_enabled = config['configurations']['ranger-hive-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else None
ssl_keystore_password = unicode(config['configurations']['ranger-hive-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']) if xml_configurations_supported else None
ssl_truststore_password = unicode(config['configurations']['ranger-hive-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
#For SQLA explicitly disable audit to DB for Ranger
if xa_audit_db_flavor == 'sqla':
xa_audit_db_is_enabled = False
| {
"content_hash": "8c6d1b6bd377817eb460a0058c3fae8a",
"timestamp": "",
"source": "github",
"line_count": 733,
"max_line_length": 405,
"avg_line_length": 51.54297407912688,
"alnum_prop": 0.7378311849871628,
"repo_name": "arenadata/ambari",
"id": "e767bd09a2571933b3e46e893479aded22137eed",
"size": "37803",
"binary": false,
"copies": "1",
"ref": "refs/heads/branch-adh-1.6",
"path": "contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/scripts/params_linux.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "46700"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "343739"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "Dockerfile",
"bytes": "6387"
},
{
"name": "EJS",
"bytes": "777"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Gherkin",
"bytes": "990"
},
{
"name": "Groovy",
"bytes": "15882"
},
{
"name": "HTML",
"bytes": "717983"
},
{
"name": "Handlebars",
"bytes": "1819641"
},
{
"name": "Java",
"bytes": "29172298"
},
{
"name": "JavaScript",
"bytes": "18571926"
},
{
"name": "Jinja",
"bytes": "1490416"
},
{
"name": "Less",
"bytes": "412933"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "287501"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "18507704"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Ruby",
"bytes": "38590"
},
{
"name": "SCSS",
"bytes": "40072"
},
{
"name": "Shell",
"bytes": "924115"
},
{
"name": "Stylus",
"bytes": "820"
},
{
"name": "TSQL",
"bytes": "42351"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
} |
"""
Device Manager monitor logger
"""
from sandesh_common.vns.ttypes import Module
from cfgm_common.vnc_logger import ConfigServiceLogger
from db import BgpRouterDM, PhysicalRouterDM
from sandesh.dm_introspect import ttypes as sandesh
class DeviceManagerLogger(ConfigServiceLogger):
def __init__(self, args=None, http_server_port=None):
module = Module.DEVICE_MANAGER
module_pkg = "device_manager"
self.context = "device_manager"
super(DeviceManagerLogger, self).__init__(
module, module_pkg, args, http_server_port)
def sandesh_init(self, http_server_port=None):
super(DeviceManagerLogger, self).sandesh_init(http_server_port)
self._sandesh.trace_buffer_create(name="MessageBusNotifyTraceBuf",
size=1000)
def redefine_sandesh_handles(self):
sandesh.BgpRouterList.handle_request = \
self.sandesh_bgp_handle_request
sandesh.PhysicalRouterList.handle_request = \
self.sandesh_pr_handle_request
def sandesh_bgp_build(self, bgp_router):
return sandesh.BgpRouter(name=bgp_router.name, uuid=bgp_router.uuid,
peers=bgp_router.bgp_routers,
physical_router=bgp_router.physical_router)
def sandesh_bgp_handle_request(self, req):
# Return the list of BGP routers
resp = sandesh.BgpRouterListResp(bgp_routers=[])
if req.name_or_uuid is None:
for router in BgpRouterDM:
sandesh_router = self.sandesh_bgp_build(router)
resp.bgp_routers.extend(sandesh_router)
else:
router = BgpRouterDM.find_by_name_or_uuid(req.name_or_uuid)
if router:
sandesh_router = router.sandesh_bgp_build(router)
resp.bgp_routers.extend(sandesh_router)
resp.response(req.context())
# end sandesh_bgp_handle_request
def sandesh_pr_build(self, pr):
return sandesh.PhysicalRouter(name=pr.name, uuid=pr.uuid,
bgp_router=pr.bgp_router,
physical_interfaces=pr.physical_interfaces,
logical_interfaces=pr.logical_interfaces,
virtual_networks=pr.virtual_networks)
def sandesh_pr_handle_request(self, req):
# Return the list of PR routers
resp = sandesh.PhysicalRouterListResp(physical_routers=[])
if req.name_or_uuid is None:
for router in PhysicalRouterDM:
sandesh_router = self.sandesh_pr_build(router)
resp.physical_routers.extend(sandesh_router)
else:
router = PhysicalRouterDM.find_by_name_or_uuid(req.name_or_uuid)
if router:
sandesh_router = router.sandesh_pr_build(router)
resp.physical_routers.extend(sandesh_router)
resp.response(req.context())
# end sandesh_pr_handle_request
# end DeviceManagerLogger
| {
"content_hash": "1ee1f6c21aa1c625d61f7061219a92e2",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 76,
"avg_line_length": 41.229729729729726,
"alnum_prop": 0.6181579809898394,
"repo_name": "nischalsheth/contrail-controller",
"id": "8c333d5745ce9930338bd02df562f635d6850f25",
"size": "3165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/config/device-manager/device_manager/logger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "88437"
},
{
"name": "C++",
"bytes": "23392370"
},
{
"name": "CSS",
"bytes": "531"
},
{
"name": "GDB",
"bytes": "44610"
},
{
"name": "Go",
"bytes": "45352"
},
{
"name": "HTML",
"bytes": "519766"
},
{
"name": "Java",
"bytes": "171966"
},
{
"name": "LLVM",
"bytes": "2937"
},
{
"name": "Lua",
"bytes": "20359"
},
{
"name": "Makefile",
"bytes": "12449"
},
{
"name": "Python",
"bytes": "7781013"
},
{
"name": "Roff",
"bytes": "41295"
},
{
"name": "Ruby",
"bytes": "13596"
},
{
"name": "Shell",
"bytes": "63970"
},
{
"name": "Thrift",
"bytes": "5666"
},
{
"name": "Yacc",
"bytes": "7737"
}
],
"symlink_target": ""
} |
import click
from arrow.cli import pass_context
from arrow.decorators import custom_exception, dict_output
@click.command('addKey')
@click.argument("key")
@click.option(
"--metadata",
help=""
)
@pass_context
@custom_exception
@dict_output
def cli(ctx, key, metadata=""):
"""TODO: Undocumented
Output:
???
"""
return ctx.gi.cannedkeys.addKey(key, metadata=metadata)
| {
"content_hash": "5b6b01f99233d3bd51d2597c521bcf3e",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 59,
"avg_line_length": 17.863636363636363,
"alnum_prop": 0.6895674300254453,
"repo_name": "erasche/python-apollo",
"id": "8d2844b11d3e77a754e18a929ea9f60ed42edaf8",
"size": "393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "arrow/commands/cannedkeys/addKey.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18804"
}
],
"symlink_target": ""
} |
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
import sentry_common_service.ttypes
import sentry_policy_service.ttypes
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class TSentryGrantOption(object):
TRUE = 1
FALSE = 0
UNSET = -1
_VALUES_TO_NAMES = {
1: "TRUE",
0: "FALSE",
-1: "UNSET",
}
_NAMES_TO_VALUES = {
"TRUE": 1,
"FALSE": 0,
"UNSET": -1,
}
class TAuthorizable(object):
"""
Attributes:
- type
- name
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'type', None, None, ), # 1
(2, TType.STRING, 'name', None, None, ), # 2
)
def __init__(self, type=None, name=None,):
self.type = type
self.name = name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.type = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.name = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TAuthorizable')
if self.type is not None:
oprot.writeFieldBegin('type', TType.STRING, 1)
oprot.writeString(self.type)
oprot.writeFieldEnd()
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 2)
oprot.writeString(self.name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.type is None:
raise TProtocol.TProtocolException(message='Required field type is unset!')
if self.name is None:
raise TProtocol.TProtocolException(message='Required field name is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.type)
value = (value * 31) ^ hash(self.name)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TSentryPrivilege(object):
"""
Attributes:
- component
- serviceName
- authorizables
- action
- createTime
- grantorPrincipal
- grantOption
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'component', None, None, ), # 1
(2, TType.STRING, 'serviceName', None, None, ), # 2
(3, TType.LIST, 'authorizables', (TType.STRUCT,(TAuthorizable, TAuthorizable.thrift_spec)), None, ), # 3
(4, TType.STRING, 'action', None, None, ), # 4
(5, TType.I64, 'createTime', None, None, ), # 5
(6, TType.STRING, 'grantorPrincipal', None, None, ), # 6
(7, TType.I32, 'grantOption', None, 0, ), # 7
)
def __init__(self, component=None, serviceName=None, authorizables=None, action=None, createTime=None, grantorPrincipal=None, grantOption=thrift_spec[7][4],):
self.component = component
self.serviceName = serviceName
self.authorizables = authorizables
self.action = action
self.createTime = createTime
self.grantorPrincipal = grantorPrincipal
self.grantOption = grantOption
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.component = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.serviceName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.authorizables = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in xrange(_size0):
_elem5 = TAuthorizable()
_elem5.read(iprot)
self.authorizables.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.action = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I64:
self.createTime = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.grantorPrincipal = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I32:
self.grantOption = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TSentryPrivilege')
if self.component is not None:
oprot.writeFieldBegin('component', TType.STRING, 1)
oprot.writeString(self.component)
oprot.writeFieldEnd()
if self.serviceName is not None:
oprot.writeFieldBegin('serviceName', TType.STRING, 2)
oprot.writeString(self.serviceName)
oprot.writeFieldEnd()
if self.authorizables is not None:
oprot.writeFieldBegin('authorizables', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.authorizables))
for iter6 in self.authorizables:
iter6.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.action is not None:
oprot.writeFieldBegin('action', TType.STRING, 4)
oprot.writeString(self.action)
oprot.writeFieldEnd()
if self.createTime is not None:
oprot.writeFieldBegin('createTime', TType.I64, 5)
oprot.writeI64(self.createTime)
oprot.writeFieldEnd()
if self.grantorPrincipal is not None:
oprot.writeFieldBegin('grantorPrincipal', TType.STRING, 6)
oprot.writeString(self.grantorPrincipal)
oprot.writeFieldEnd()
if self.grantOption is not None:
oprot.writeFieldBegin('grantOption', TType.I32, 7)
oprot.writeI32(self.grantOption)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.component is None:
raise TProtocol.TProtocolException(message='Required field component is unset!')
if self.serviceName is None:
raise TProtocol.TProtocolException(message='Required field serviceName is unset!')
if self.authorizables is None:
raise TProtocol.TProtocolException(message='Required field authorizables is unset!')
if self.action is None:
raise TProtocol.TProtocolException(message='Required field action is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.component)
value = (value * 31) ^ hash(self.serviceName)
value = (value * 31) ^ hash(frozenset(self.authorizables))
value = (value * 31) ^ hash(self.action)
value = (value * 31) ^ hash(self.createTime)
value = (value * 31) ^ hash(self.grantorPrincipal)
value = (value * 31) ^ hash(self.grantOption)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TCreateSentryRoleRequest(object):
"""
Attributes:
- protocol_version
- requestorUserName
- roleName
- component
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 2, ), # 1
(2, TType.STRING, 'requestorUserName', None, None, ), # 2
(3, TType.STRING, 'roleName', None, None, ), # 3
(4, TType.STRING, 'component', None, None, ), # 4
)
def __init__(self, protocol_version=thrift_spec[1][4], requestorUserName=None, roleName=None, component=None,):
self.protocol_version = protocol_version
self.requestorUserName = requestorUserName
self.roleName = roleName
self.component = component
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.protocol_version = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.requestorUserName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.roleName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.component = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TCreateSentryRoleRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.requestorUserName is not None:
oprot.writeFieldBegin('requestorUserName', TType.STRING, 2)
oprot.writeString(self.requestorUserName)
oprot.writeFieldEnd()
if self.roleName is not None:
oprot.writeFieldBegin('roleName', TType.STRING, 3)
oprot.writeString(self.roleName)
oprot.writeFieldEnd()
if self.component is not None:
oprot.writeFieldBegin('component', TType.STRING, 4)
oprot.writeString(self.component)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
if self.requestorUserName is None:
raise TProtocol.TProtocolException(message='Required field requestorUserName is unset!')
if self.roleName is None:
raise TProtocol.TProtocolException(message='Required field roleName is unset!')
if self.component is None:
raise TProtocol.TProtocolException(message='Required field component is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.protocol_version)
value = (value * 31) ^ hash(self.requestorUserName)
value = (value * 31) ^ hash(self.roleName)
value = (value * 31) ^ hash(self.component)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TCreateSentryRoleResponse(object):
"""
Attributes:
- status
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'status', (sentry_common_service.ttypes.TSentryResponseStatus, sentry_common_service.ttypes.TSentryResponseStatus.thrift_spec), None, ), # 1
)
def __init__(self, status=None,):
self.status = status
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.status = sentry_common_service.ttypes.TSentryResponseStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TCreateSentryRoleResponse')
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 1)
self.status.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.status)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TDropSentryRoleRequest(object):
"""
Attributes:
- protocol_version
- requestorUserName
- roleName
- component
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 2, ), # 1
(2, TType.STRING, 'requestorUserName', None, None, ), # 2
(3, TType.STRING, 'roleName', None, None, ), # 3
(4, TType.STRING, 'component', None, None, ), # 4
)
def __init__(self, protocol_version=thrift_spec[1][4], requestorUserName=None, roleName=None, component=None,):
self.protocol_version = protocol_version
self.requestorUserName = requestorUserName
self.roleName = roleName
self.component = component
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.protocol_version = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.requestorUserName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.roleName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.component = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TDropSentryRoleRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.requestorUserName is not None:
oprot.writeFieldBegin('requestorUserName', TType.STRING, 2)
oprot.writeString(self.requestorUserName)
oprot.writeFieldEnd()
if self.roleName is not None:
oprot.writeFieldBegin('roleName', TType.STRING, 3)
oprot.writeString(self.roleName)
oprot.writeFieldEnd()
if self.component is not None:
oprot.writeFieldBegin('component', TType.STRING, 4)
oprot.writeString(self.component)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
if self.requestorUserName is None:
raise TProtocol.TProtocolException(message='Required field requestorUserName is unset!')
if self.roleName is None:
raise TProtocol.TProtocolException(message='Required field roleName is unset!')
if self.component is None:
raise TProtocol.TProtocolException(message='Required field component is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.protocol_version)
value = (value * 31) ^ hash(self.requestorUserName)
value = (value * 31) ^ hash(self.roleName)
value = (value * 31) ^ hash(self.component)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TDropSentryRoleResponse(object):
"""
Attributes:
- status
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'status', (sentry_common_service.ttypes.TSentryResponseStatus, sentry_common_service.ttypes.TSentryResponseStatus.thrift_spec), None, ), # 1
)
def __init__(self, status=None,):
self.status = status
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.status = sentry_common_service.ttypes.TSentryResponseStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TDropSentryRoleResponse')
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 1)
self.status.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.status)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TAlterSentryRoleAddGroupsRequest(object):
"""
Attributes:
- protocol_version
- requestorUserName
- roleName
- component
- groups
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 2, ), # 1
(2, TType.STRING, 'requestorUserName', None, None, ), # 2
(3, TType.STRING, 'roleName', None, None, ), # 3
(4, TType.STRING, 'component', None, None, ), # 4
(5, TType.SET, 'groups', (TType.STRING,None), None, ), # 5
)
def __init__(self, protocol_version=thrift_spec[1][4], requestorUserName=None, roleName=None, component=None, groups=None,):
self.protocol_version = protocol_version
self.requestorUserName = requestorUserName
self.roleName = roleName
self.component = component
self.groups = groups
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.protocol_version = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.requestorUserName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.roleName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.component = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.SET:
self.groups = set()
(_etype10, _size7) = iprot.readSetBegin()
for _i11 in xrange(_size7):
_elem12 = iprot.readString()
self.groups.add(_elem12)
iprot.readSetEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TAlterSentryRoleAddGroupsRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.requestorUserName is not None:
oprot.writeFieldBegin('requestorUserName', TType.STRING, 2)
oprot.writeString(self.requestorUserName)
oprot.writeFieldEnd()
if self.roleName is not None:
oprot.writeFieldBegin('roleName', TType.STRING, 3)
oprot.writeString(self.roleName)
oprot.writeFieldEnd()
if self.component is not None:
oprot.writeFieldBegin('component', TType.STRING, 4)
oprot.writeString(self.component)
oprot.writeFieldEnd()
if self.groups is not None:
oprot.writeFieldBegin('groups', TType.SET, 5)
oprot.writeSetBegin(TType.STRING, len(self.groups))
for iter13 in self.groups:
oprot.writeString(iter13)
oprot.writeSetEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
if self.requestorUserName is None:
raise TProtocol.TProtocolException(message='Required field requestorUserName is unset!')
if self.roleName is None:
raise TProtocol.TProtocolException(message='Required field roleName is unset!')
if self.component is None:
raise TProtocol.TProtocolException(message='Required field component is unset!')
if self.groups is None:
raise TProtocol.TProtocolException(message='Required field groups is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.protocol_version)
value = (value * 31) ^ hash(self.requestorUserName)
value = (value * 31) ^ hash(self.roleName)
value = (value * 31) ^ hash(self.component)
value = (value * 31) ^ hash(self.groups)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TAlterSentryRoleAddGroupsResponse(object):
"""
Attributes:
- status
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'status', (sentry_common_service.ttypes.TSentryResponseStatus, sentry_common_service.ttypes.TSentryResponseStatus.thrift_spec), None, ), # 1
)
def __init__(self, status=None,):
self.status = status
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.status = sentry_common_service.ttypes.TSentryResponseStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TAlterSentryRoleAddGroupsResponse')
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 1)
self.status.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.status)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TAlterSentryRoleDeleteGroupsRequest(object):
"""
Attributes:
- protocol_version
- requestorUserName
- roleName
- component
- groups
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 2, ), # 1
(2, TType.STRING, 'requestorUserName', None, None, ), # 2
(3, TType.STRING, 'roleName', None, None, ), # 3
(4, TType.STRING, 'component', None, None, ), # 4
(5, TType.SET, 'groups', (TType.STRING,None), None, ), # 5
)
def __init__(self, protocol_version=thrift_spec[1][4], requestorUserName=None, roleName=None, component=None, groups=None,):
self.protocol_version = protocol_version
self.requestorUserName = requestorUserName
self.roleName = roleName
self.component = component
self.groups = groups
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.protocol_version = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.requestorUserName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.roleName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.component = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.SET:
self.groups = set()
(_etype17, _size14) = iprot.readSetBegin()
for _i18 in xrange(_size14):
_elem19 = iprot.readString()
self.groups.add(_elem19)
iprot.readSetEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TAlterSentryRoleDeleteGroupsRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.requestorUserName is not None:
oprot.writeFieldBegin('requestorUserName', TType.STRING, 2)
oprot.writeString(self.requestorUserName)
oprot.writeFieldEnd()
if self.roleName is not None:
oprot.writeFieldBegin('roleName', TType.STRING, 3)
oprot.writeString(self.roleName)
oprot.writeFieldEnd()
if self.component is not None:
oprot.writeFieldBegin('component', TType.STRING, 4)
oprot.writeString(self.component)
oprot.writeFieldEnd()
if self.groups is not None:
oprot.writeFieldBegin('groups', TType.SET, 5)
oprot.writeSetBegin(TType.STRING, len(self.groups))
for iter20 in self.groups:
oprot.writeString(iter20)
oprot.writeSetEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
if self.requestorUserName is None:
raise TProtocol.TProtocolException(message='Required field requestorUserName is unset!')
if self.roleName is None:
raise TProtocol.TProtocolException(message='Required field roleName is unset!')
if self.component is None:
raise TProtocol.TProtocolException(message='Required field component is unset!')
if self.groups is None:
raise TProtocol.TProtocolException(message='Required field groups is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.protocol_version)
value = (value * 31) ^ hash(self.requestorUserName)
value = (value * 31) ^ hash(self.roleName)
value = (value * 31) ^ hash(self.component)
value = (value * 31) ^ hash(self.groups)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TAlterSentryRoleDeleteGroupsResponse(object):
"""
Attributes:
- status
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'status', (sentry_common_service.ttypes.TSentryResponseStatus, sentry_common_service.ttypes.TSentryResponseStatus.thrift_spec), None, ), # 1
)
def __init__(self, status=None,):
self.status = status
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.status = sentry_common_service.ttypes.TSentryResponseStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TAlterSentryRoleDeleteGroupsResponse')
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 1)
self.status.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.status)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TAlterSentryRoleGrantPrivilegeRequest(object):
"""
Attributes:
- protocol_version
- requestorUserName
- roleName
- component
- privilege
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 2, ), # 1
(2, TType.STRING, 'requestorUserName', None, None, ), # 2
(3, TType.STRING, 'roleName', None, None, ), # 3
(4, TType.STRING, 'component', None, None, ), # 4
(5, TType.STRUCT, 'privilege', (TSentryPrivilege, TSentryPrivilege.thrift_spec), None, ), # 5
)
def __init__(self, protocol_version=thrift_spec[1][4], requestorUserName=None, roleName=None, component=None, privilege=None,):
self.protocol_version = protocol_version
self.requestorUserName = requestorUserName
self.roleName = roleName
self.component = component
self.privilege = privilege
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.protocol_version = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.requestorUserName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.roleName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.component = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
self.privilege = TSentryPrivilege()
self.privilege.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TAlterSentryRoleGrantPrivilegeRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.requestorUserName is not None:
oprot.writeFieldBegin('requestorUserName', TType.STRING, 2)
oprot.writeString(self.requestorUserName)
oprot.writeFieldEnd()
if self.roleName is not None:
oprot.writeFieldBegin('roleName', TType.STRING, 3)
oprot.writeString(self.roleName)
oprot.writeFieldEnd()
if self.component is not None:
oprot.writeFieldBegin('component', TType.STRING, 4)
oprot.writeString(self.component)
oprot.writeFieldEnd()
if self.privilege is not None:
oprot.writeFieldBegin('privilege', TType.STRUCT, 5)
self.privilege.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
if self.requestorUserName is None:
raise TProtocol.TProtocolException(message='Required field requestorUserName is unset!')
if self.roleName is None:
raise TProtocol.TProtocolException(message='Required field roleName is unset!')
if self.component is None:
raise TProtocol.TProtocolException(message='Required field component is unset!')
if self.privilege is None:
raise TProtocol.TProtocolException(message='Required field privilege is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.protocol_version)
value = (value * 31) ^ hash(self.requestorUserName)
value = (value * 31) ^ hash(self.roleName)
value = (value * 31) ^ hash(self.component)
value = (value * 31) ^ hash(self.privilege)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TAlterSentryRoleGrantPrivilegeResponse(object):
"""
Attributes:
- status
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'status', (sentry_common_service.ttypes.TSentryResponseStatus, sentry_common_service.ttypes.TSentryResponseStatus.thrift_spec), None, ), # 1
)
def __init__(self, status=None,):
self.status = status
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.status = sentry_common_service.ttypes.TSentryResponseStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TAlterSentryRoleGrantPrivilegeResponse')
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 1)
self.status.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.status)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TAlterSentryRoleRevokePrivilegeRequest(object):
"""
Attributes:
- protocol_version
- requestorUserName
- roleName
- component
- privilege
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 2, ), # 1
(2, TType.STRING, 'requestorUserName', None, None, ), # 2
(3, TType.STRING, 'roleName', None, None, ), # 3
(4, TType.STRING, 'component', None, None, ), # 4
(5, TType.STRUCT, 'privilege', (TSentryPrivilege, TSentryPrivilege.thrift_spec), None, ), # 5
)
def __init__(self, protocol_version=thrift_spec[1][4], requestorUserName=None, roleName=None, component=None, privilege=None,):
self.protocol_version = protocol_version
self.requestorUserName = requestorUserName
self.roleName = roleName
self.component = component
self.privilege = privilege
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.protocol_version = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.requestorUserName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.roleName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.component = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
self.privilege = TSentryPrivilege()
self.privilege.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TAlterSentryRoleRevokePrivilegeRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.requestorUserName is not None:
oprot.writeFieldBegin('requestorUserName', TType.STRING, 2)
oprot.writeString(self.requestorUserName)
oprot.writeFieldEnd()
if self.roleName is not None:
oprot.writeFieldBegin('roleName', TType.STRING, 3)
oprot.writeString(self.roleName)
oprot.writeFieldEnd()
if self.component is not None:
oprot.writeFieldBegin('component', TType.STRING, 4)
oprot.writeString(self.component)
oprot.writeFieldEnd()
if self.privilege is not None:
oprot.writeFieldBegin('privilege', TType.STRUCT, 5)
self.privilege.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
if self.requestorUserName is None:
raise TProtocol.TProtocolException(message='Required field requestorUserName is unset!')
if self.roleName is None:
raise TProtocol.TProtocolException(message='Required field roleName is unset!')
if self.component is None:
raise TProtocol.TProtocolException(message='Required field component is unset!')
if self.privilege is None:
raise TProtocol.TProtocolException(message='Required field privilege is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.protocol_version)
value = (value * 31) ^ hash(self.requestorUserName)
value = (value * 31) ^ hash(self.roleName)
value = (value * 31) ^ hash(self.component)
value = (value * 31) ^ hash(self.privilege)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TAlterSentryRoleRevokePrivilegeResponse(object):
"""
Attributes:
- status
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'status', (sentry_common_service.ttypes.TSentryResponseStatus, sentry_common_service.ttypes.TSentryResponseStatus.thrift_spec), None, ), # 1
)
def __init__(self, status=None,):
self.status = status
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.status = sentry_common_service.ttypes.TSentryResponseStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TAlterSentryRoleRevokePrivilegeResponse')
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 1)
self.status.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.status)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TListSentryRolesRequest(object):
"""
Attributes:
- protocol_version
- requestorUserName
- groupName
- component
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 2, ), # 1
(2, TType.STRING, 'requestorUserName', None, None, ), # 2
(3, TType.STRING, 'groupName', None, None, ), # 3
(4, TType.STRING, 'component', None, None, ), # 4
)
def __init__(self, protocol_version=thrift_spec[1][4], requestorUserName=None, groupName=None, component=None,):
self.protocol_version = protocol_version
self.requestorUserName = requestorUserName
self.groupName = groupName
self.component = component
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.protocol_version = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.requestorUserName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.groupName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.component = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TListSentryRolesRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.requestorUserName is not None:
oprot.writeFieldBegin('requestorUserName', TType.STRING, 2)
oprot.writeString(self.requestorUserName)
oprot.writeFieldEnd()
if self.groupName is not None:
oprot.writeFieldBegin('groupName', TType.STRING, 3)
oprot.writeString(self.groupName)
oprot.writeFieldEnd()
if self.component is not None:
oprot.writeFieldBegin('component', TType.STRING, 4)
oprot.writeString(self.component)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
if self.requestorUserName is None:
raise TProtocol.TProtocolException(message='Required field requestorUserName is unset!')
if self.component is None:
raise TProtocol.TProtocolException(message='Required field component is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.protocol_version)
value = (value * 31) ^ hash(self.requestorUserName)
value = (value * 31) ^ hash(self.groupName)
value = (value * 31) ^ hash(self.component)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TSentryRole(object):
"""
Attributes:
- roleName
- groups
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'roleName', None, None, ), # 1
(2, TType.SET, 'groups', (TType.STRING,None), None, ), # 2
)
def __init__(self, roleName=None, groups=None,):
self.roleName = roleName
self.groups = groups
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.roleName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.SET:
self.groups = set()
(_etype24, _size21) = iprot.readSetBegin()
for _i25 in xrange(_size21):
_elem26 = iprot.readString()
self.groups.add(_elem26)
iprot.readSetEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TSentryRole')
if self.roleName is not None:
oprot.writeFieldBegin('roleName', TType.STRING, 1)
oprot.writeString(self.roleName)
oprot.writeFieldEnd()
if self.groups is not None:
oprot.writeFieldBegin('groups', TType.SET, 2)
oprot.writeSetBegin(TType.STRING, len(self.groups))
for iter27 in self.groups:
oprot.writeString(iter27)
oprot.writeSetEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.roleName is None:
raise TProtocol.TProtocolException(message='Required field roleName is unset!')
if self.groups is None:
raise TProtocol.TProtocolException(message='Required field groups is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.roleName)
value = (value * 31) ^ hash(frozenset(self.groups))
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TListSentryRolesResponse(object):
"""
Attributes:
- status
- roles
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'status', (sentry_common_service.ttypes.TSentryResponseStatus, sentry_common_service.ttypes.TSentryResponseStatus.thrift_spec), None, ), # 1
(2, TType.SET, 'roles', (TType.STRUCT,(TSentryRole, TSentryRole.thrift_spec)), None, ), # 2
)
def __init__(self, status=None, roles=None,):
self.status = status
self.roles = roles
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.status = sentry_common_service.ttypes.TSentryResponseStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.SET:
self.roles = set()
(_etype31, _size28) = iprot.readSetBegin()
for _i32 in xrange(_size28):
_elem33 = TSentryRole()
_elem33.read(iprot)
self.roles.add(_elem33)
iprot.readSetEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TListSentryRolesResponse')
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 1)
self.status.write(oprot)
oprot.writeFieldEnd()
if self.roles is not None:
oprot.writeFieldBegin('roles', TType.SET, 2)
oprot.writeSetBegin(TType.STRUCT, len(self.roles))
for iter34 in self.roles:
iter34.write(oprot)
oprot.writeSetEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.status)
value = (value * 31) ^ hash(self.roles)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TListSentryPrivilegesRequest(object):
"""
Attributes:
- protocol_version
- requestorUserName
- roleName
- component
- serviceName
- authorizables
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 2, ), # 1
(2, TType.STRING, 'requestorUserName', None, None, ), # 2
(3, TType.STRING, 'roleName', None, None, ), # 3
(4, TType.STRING, 'component', None, None, ), # 4
(5, TType.STRING, 'serviceName', None, None, ), # 5
(6, TType.LIST, 'authorizables', (TType.STRUCT,(TAuthorizable, TAuthorizable.thrift_spec)), None, ), # 6
)
def __init__(self, protocol_version=thrift_spec[1][4], requestorUserName=None, roleName=None, component=None, serviceName=None, authorizables=None,):
self.protocol_version = protocol_version
self.requestorUserName = requestorUserName
self.roleName = roleName
self.component = component
self.serviceName = serviceName
self.authorizables = authorizables
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.protocol_version = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.requestorUserName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.roleName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.component = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.serviceName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.LIST:
self.authorizables = []
(_etype38, _size35) = iprot.readListBegin()
for _i39 in xrange(_size35):
_elem40 = TAuthorizable()
_elem40.read(iprot)
self.authorizables.append(_elem40)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TListSentryPrivilegesRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.requestorUserName is not None:
oprot.writeFieldBegin('requestorUserName', TType.STRING, 2)
oprot.writeString(self.requestorUserName)
oprot.writeFieldEnd()
if self.roleName is not None:
oprot.writeFieldBegin('roleName', TType.STRING, 3)
oprot.writeString(self.roleName)
oprot.writeFieldEnd()
if self.component is not None:
oprot.writeFieldBegin('component', TType.STRING, 4)
oprot.writeString(self.component)
oprot.writeFieldEnd()
if self.serviceName is not None:
oprot.writeFieldBegin('serviceName', TType.STRING, 5)
oprot.writeString(self.serviceName)
oprot.writeFieldEnd()
if self.authorizables is not None:
oprot.writeFieldBegin('authorizables', TType.LIST, 6)
oprot.writeListBegin(TType.STRUCT, len(self.authorizables))
for iter41 in self.authorizables:
iter41.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
if self.requestorUserName is None:
raise TProtocol.TProtocolException(message='Required field requestorUserName is unset!')
if self.roleName is None:
raise TProtocol.TProtocolException(message='Required field roleName is unset!')
if self.component is None:
raise TProtocol.TProtocolException(message='Required field component is unset!')
if self.serviceName is None:
raise TProtocol.TProtocolException(message='Required field serviceName is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.protocol_version)
value = (value * 31) ^ hash(self.requestorUserName)
value = (value * 31) ^ hash(self.roleName)
value = (value * 31) ^ hash(self.component)
value = (value * 31) ^ hash(self.serviceName)
value = (value * 31) ^ hash(self.authorizables)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TListSentryPrivilegesResponse(object):
"""
Attributes:
- status
- privileges
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'status', (sentry_common_service.ttypes.TSentryResponseStatus, sentry_common_service.ttypes.TSentryResponseStatus.thrift_spec), None, ), # 1
(2, TType.SET, 'privileges', (TType.STRUCT,(TSentryPrivilege, TSentryPrivilege.thrift_spec)), None, ), # 2
)
def __init__(self, status=None, privileges=None,):
self.status = status
self.privileges = privileges
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.status = sentry_common_service.ttypes.TSentryResponseStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.SET:
self.privileges = set()
(_etype45, _size42) = iprot.readSetBegin()
for _i46 in xrange(_size42):
_elem47 = TSentryPrivilege()
_elem47.read(iprot)
self.privileges.add(_elem47)
iprot.readSetEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TListSentryPrivilegesResponse')
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 1)
self.status.write(oprot)
oprot.writeFieldEnd()
if self.privileges is not None:
oprot.writeFieldBegin('privileges', TType.SET, 2)
oprot.writeSetBegin(TType.STRUCT, len(self.privileges))
for iter48 in self.privileges:
iter48.write(oprot)
oprot.writeSetEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.status)
value = (value * 31) ^ hash(self.privileges)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TDropPrivilegesRequest(object):
"""
Attributes:
- protocol_version
- requestorUserName
- privilege
- component
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 2, ), # 1
(2, TType.STRING, 'requestorUserName', None, None, ), # 2
(3, TType.STRUCT, 'privilege', (TSentryPrivilege, TSentryPrivilege.thrift_spec), None, ), # 3
(4, TType.STRING, 'component', None, None, ), # 4
)
def __init__(self, protocol_version=thrift_spec[1][4], requestorUserName=None, privilege=None, component=None,):
self.protocol_version = protocol_version
self.requestorUserName = requestorUserName
self.privilege = privilege
self.component = component
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.protocol_version = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.requestorUserName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.privilege = TSentryPrivilege()
self.privilege.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.component = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TDropPrivilegesRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.requestorUserName is not None:
oprot.writeFieldBegin('requestorUserName', TType.STRING, 2)
oprot.writeString(self.requestorUserName)
oprot.writeFieldEnd()
if self.privilege is not None:
oprot.writeFieldBegin('privilege', TType.STRUCT, 3)
self.privilege.write(oprot)
oprot.writeFieldEnd()
if self.component is not None:
oprot.writeFieldBegin('component', TType.STRING, 4)
oprot.writeString(self.component)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
if self.requestorUserName is None:
raise TProtocol.TProtocolException(message='Required field requestorUserName is unset!')
if self.privilege is None:
raise TProtocol.TProtocolException(message='Required field privilege is unset!')
if self.component is None:
raise TProtocol.TProtocolException(message='Required field component is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.protocol_version)
value = (value * 31) ^ hash(self.requestorUserName)
value = (value * 31) ^ hash(self.privilege)
value = (value * 31) ^ hash(self.component)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TDropPrivilegesResponse(object):
"""
Attributes:
- status
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'status', (sentry_common_service.ttypes.TSentryResponseStatus, sentry_common_service.ttypes.TSentryResponseStatus.thrift_spec), None, ), # 1
)
def __init__(self, status=None,):
self.status = status
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.status = sentry_common_service.ttypes.TSentryResponseStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TDropPrivilegesResponse')
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 1)
self.status.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.status)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TRenamePrivilegesRequest(object):
"""
Attributes:
- protocol_version
- requestorUserName
- component
- serviceName
- oldAuthorizables
- newAuthorizables
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 2, ), # 1
(2, TType.STRING, 'requestorUserName', None, None, ), # 2
(3, TType.STRING, 'component', None, None, ), # 3
(4, TType.STRING, 'serviceName', None, None, ), # 4
(5, TType.LIST, 'oldAuthorizables', (TType.STRUCT,(TAuthorizable, TAuthorizable.thrift_spec)), None, ), # 5
(6, TType.LIST, 'newAuthorizables', (TType.STRUCT,(TAuthorizable, TAuthorizable.thrift_spec)), None, ), # 6
)
def __init__(self, protocol_version=thrift_spec[1][4], requestorUserName=None, component=None, serviceName=None, oldAuthorizables=None, newAuthorizables=None,):
self.protocol_version = protocol_version
self.requestorUserName = requestorUserName
self.component = component
self.serviceName = serviceName
self.oldAuthorizables = oldAuthorizables
self.newAuthorizables = newAuthorizables
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.protocol_version = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.requestorUserName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.component = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.serviceName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.oldAuthorizables = []
(_etype52, _size49) = iprot.readListBegin()
for _i53 in xrange(_size49):
_elem54 = TAuthorizable()
_elem54.read(iprot)
self.oldAuthorizables.append(_elem54)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.LIST:
self.newAuthorizables = []
(_etype58, _size55) = iprot.readListBegin()
for _i59 in xrange(_size55):
_elem60 = TAuthorizable()
_elem60.read(iprot)
self.newAuthorizables.append(_elem60)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TRenamePrivilegesRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.requestorUserName is not None:
oprot.writeFieldBegin('requestorUserName', TType.STRING, 2)
oprot.writeString(self.requestorUserName)
oprot.writeFieldEnd()
if self.component is not None:
oprot.writeFieldBegin('component', TType.STRING, 3)
oprot.writeString(self.component)
oprot.writeFieldEnd()
if self.serviceName is not None:
oprot.writeFieldBegin('serviceName', TType.STRING, 4)
oprot.writeString(self.serviceName)
oprot.writeFieldEnd()
if self.oldAuthorizables is not None:
oprot.writeFieldBegin('oldAuthorizables', TType.LIST, 5)
oprot.writeListBegin(TType.STRUCT, len(self.oldAuthorizables))
for iter61 in self.oldAuthorizables:
iter61.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.newAuthorizables is not None:
oprot.writeFieldBegin('newAuthorizables', TType.LIST, 6)
oprot.writeListBegin(TType.STRUCT, len(self.newAuthorizables))
for iter62 in self.newAuthorizables:
iter62.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
if self.requestorUserName is None:
raise TProtocol.TProtocolException(message='Required field requestorUserName is unset!')
if self.component is None:
raise TProtocol.TProtocolException(message='Required field component is unset!')
if self.serviceName is None:
raise TProtocol.TProtocolException(message='Required field serviceName is unset!')
if self.oldAuthorizables is None:
raise TProtocol.TProtocolException(message='Required field oldAuthorizables is unset!')
if self.newAuthorizables is None:
raise TProtocol.TProtocolException(message='Required field newAuthorizables is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.protocol_version)
value = (value * 31) ^ hash(self.requestorUserName)
value = (value * 31) ^ hash(self.component)
value = (value * 31) ^ hash(self.serviceName)
value = (value * 31) ^ hash(self.oldAuthorizables)
value = (value * 31) ^ hash(self.newAuthorizables)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TRenamePrivilegesResponse(object):
"""
Attributes:
- status
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'status', (sentry_common_service.ttypes.TSentryResponseStatus, sentry_common_service.ttypes.TSentryResponseStatus.thrift_spec), None, ), # 1
)
def __init__(self, status=None,):
self.status = status
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.status = sentry_common_service.ttypes.TSentryResponseStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TRenamePrivilegesResponse')
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 1)
self.status.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.status)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TSentryActiveRoleSet(object):
"""
Attributes:
- all
- roles
"""
thrift_spec = (
None, # 0
(1, TType.BOOL, 'all', None, None, ), # 1
(2, TType.SET, 'roles', (TType.STRING,None), None, ), # 2
)
def __init__(self, all=None, roles=None,):
self.all = all
self.roles = roles
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.all = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.SET:
self.roles = set()
(_etype66, _size63) = iprot.readSetBegin()
for _i67 in xrange(_size63):
_elem68 = iprot.readString()
self.roles.add(_elem68)
iprot.readSetEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TSentryActiveRoleSet')
if self.all is not None:
oprot.writeFieldBegin('all', TType.BOOL, 1)
oprot.writeBool(self.all)
oprot.writeFieldEnd()
if self.roles is not None:
oprot.writeFieldBegin('roles', TType.SET, 2)
oprot.writeSetBegin(TType.STRING, len(self.roles))
for iter69 in self.roles:
oprot.writeString(iter69)
oprot.writeSetEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.all is None:
raise TProtocol.TProtocolException(message='Required field all is unset!')
if self.roles is None:
raise TProtocol.TProtocolException(message='Required field roles is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.all)
value = (value * 31) ^ hash(self.roles)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TListSentryPrivilegesForProviderRequest(object):
"""
Attributes:
- protocol_version
- component
- serviceName
- groups
- roleSet
- authorizables
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 2, ), # 1
(2, TType.STRING, 'component', None, None, ), # 2
(3, TType.STRING, 'serviceName', None, None, ), # 3
(4, TType.SET, 'groups', (TType.STRING,None), None, ), # 4
(5, TType.STRUCT, 'roleSet', (TSentryActiveRoleSet, TSentryActiveRoleSet.thrift_spec), None, ), # 5
(6, TType.LIST, 'authorizables', (TType.STRUCT,(TAuthorizable, TAuthorizable.thrift_spec)), None, ), # 6
)
def __init__(self, protocol_version=thrift_spec[1][4], component=None, serviceName=None, groups=None, roleSet=None, authorizables=None,):
self.protocol_version = protocol_version
self.component = component
self.serviceName = serviceName
self.groups = groups
self.roleSet = roleSet
self.authorizables = authorizables
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.protocol_version = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.component = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.serviceName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.SET:
self.groups = set()
(_etype73, _size70) = iprot.readSetBegin()
for _i74 in xrange(_size70):
_elem75 = iprot.readString()
self.groups.add(_elem75)
iprot.readSetEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
self.roleSet = TSentryActiveRoleSet()
self.roleSet.read(iprot)
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.LIST:
self.authorizables = []
(_etype79, _size76) = iprot.readListBegin()
for _i80 in xrange(_size76):
_elem81 = TAuthorizable()
_elem81.read(iprot)
self.authorizables.append(_elem81)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TListSentryPrivilegesForProviderRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.component is not None:
oprot.writeFieldBegin('component', TType.STRING, 2)
oprot.writeString(self.component)
oprot.writeFieldEnd()
if self.serviceName is not None:
oprot.writeFieldBegin('serviceName', TType.STRING, 3)
oprot.writeString(self.serviceName)
oprot.writeFieldEnd()
if self.groups is not None:
oprot.writeFieldBegin('groups', TType.SET, 4)
oprot.writeSetBegin(TType.STRING, len(self.groups))
for iter82 in self.groups:
oprot.writeString(iter82)
oprot.writeSetEnd()
oprot.writeFieldEnd()
if self.roleSet is not None:
oprot.writeFieldBegin('roleSet', TType.STRUCT, 5)
self.roleSet.write(oprot)
oprot.writeFieldEnd()
if self.authorizables is not None:
oprot.writeFieldBegin('authorizables', TType.LIST, 6)
oprot.writeListBegin(TType.STRUCT, len(self.authorizables))
for iter83 in self.authorizables:
iter83.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
if self.component is None:
raise TProtocol.TProtocolException(message='Required field component is unset!')
if self.serviceName is None:
raise TProtocol.TProtocolException(message='Required field serviceName is unset!')
if self.groups is None:
raise TProtocol.TProtocolException(message='Required field groups is unset!')
if self.roleSet is None:
raise TProtocol.TProtocolException(message='Required field roleSet is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.protocol_version)
value = (value * 31) ^ hash(self.component)
value = (value * 31) ^ hash(self.serviceName)
value = (value * 31) ^ hash(self.groups)
value = (value * 31) ^ hash(self.roleSet)
value = (value * 31) ^ hash(self.authorizables)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TListSentryPrivilegesForProviderResponse(object):
"""
Attributes:
- status
- privileges
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'status', (sentry_common_service.ttypes.TSentryResponseStatus, sentry_common_service.ttypes.TSentryResponseStatus.thrift_spec), None, ), # 1
(2, TType.SET, 'privileges', (TType.STRING,None), None, ), # 2
)
def __init__(self, status=None, privileges=None,):
self.status = status
self.privileges = privileges
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.status = sentry_common_service.ttypes.TSentryResponseStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.SET:
self.privileges = set()
(_etype87, _size84) = iprot.readSetBegin()
for _i88 in xrange(_size84):
_elem89 = iprot.readString()
self.privileges.add(_elem89)
iprot.readSetEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TListSentryPrivilegesForProviderResponse')
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 1)
self.status.write(oprot)
oprot.writeFieldEnd()
if self.privileges is not None:
oprot.writeFieldBegin('privileges', TType.SET, 2)
oprot.writeSetBegin(TType.STRING, len(self.privileges))
for iter90 in self.privileges:
oprot.writeString(iter90)
oprot.writeSetEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
if self.privileges is None:
raise TProtocol.TProtocolException(message='Required field privileges is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.status)
value = (value * 31) ^ hash(self.privileges)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TSentryPrivilegeMap(object):
"""
Attributes:
- privilegeMap
"""
thrift_spec = (
None, # 0
(1, TType.MAP, 'privilegeMap', (TType.STRING,None,TType.SET,(TType.STRUCT,(TSentryPrivilege, TSentryPrivilege.thrift_spec))), None, ), # 1
)
def __init__(self, privilegeMap=None,):
self.privilegeMap = privilegeMap
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.MAP:
self.privilegeMap = {}
(_ktype92, _vtype93, _size91 ) = iprot.readMapBegin()
for _i95 in xrange(_size91):
_key96 = iprot.readString()
_val97 = set()
(_etype101, _size98) = iprot.readSetBegin()
for _i102 in xrange(_size98):
_elem103 = TSentryPrivilege()
_elem103.read(iprot)
_val97.add(_elem103)
iprot.readSetEnd()
self.privilegeMap[_key96] = _val97
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TSentryPrivilegeMap')
if self.privilegeMap is not None:
oprot.writeFieldBegin('privilegeMap', TType.MAP, 1)
oprot.writeMapBegin(TType.STRING, TType.SET, len(self.privilegeMap))
for kiter104,viter105 in self.privilegeMap.items():
oprot.writeString(kiter104)
oprot.writeSetBegin(TType.STRUCT, len(viter105))
for iter106 in viter105:
iter106.write(oprot)
oprot.writeSetEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.privilegeMap is None:
raise TProtocol.TProtocolException(message='Required field privilegeMap is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.privilegeMap)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TListSentryPrivilegesByAuthRequest(object):
"""
Attributes:
- protocol_version
- requestorUserName
- component
- serviceName
- authorizablesSet
- groups
- roleSet
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 2, ), # 1
(2, TType.STRING, 'requestorUserName', None, None, ), # 2
(3, TType.STRING, 'component', None, None, ), # 3
(4, TType.STRING, 'serviceName', None, None, ), # 4
(5, TType.SET, 'authorizablesSet', (TType.STRING,None), None, ), # 5
(6, TType.SET, 'groups', (TType.STRING,None), None, ), # 6
(7, TType.STRUCT, 'roleSet', (TSentryActiveRoleSet, TSentryActiveRoleSet.thrift_spec), None, ), # 7
)
def __init__(self, protocol_version=thrift_spec[1][4], requestorUserName=None, component=None, serviceName=None, authorizablesSet=None, groups=None, roleSet=None,):
self.protocol_version = protocol_version
self.requestorUserName = requestorUserName
self.component = component
self.serviceName = serviceName
self.authorizablesSet = authorizablesSet
self.groups = groups
self.roleSet = roleSet
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.protocol_version = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.requestorUserName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.component = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.serviceName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.SET:
self.authorizablesSet = set()
(_etype110, _size107) = iprot.readSetBegin()
for _i111 in xrange(_size107):
_elem112 = iprot.readString()
self.authorizablesSet.add(_elem112)
iprot.readSetEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.SET:
self.groups = set()
(_etype116, _size113) = iprot.readSetBegin()
for _i117 in xrange(_size113):
_elem118 = iprot.readString()
self.groups.add(_elem118)
iprot.readSetEnd()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRUCT:
self.roleSet = TSentryActiveRoleSet()
self.roleSet.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TListSentryPrivilegesByAuthRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.requestorUserName is not None:
oprot.writeFieldBegin('requestorUserName', TType.STRING, 2)
oprot.writeString(self.requestorUserName)
oprot.writeFieldEnd()
if self.component is not None:
oprot.writeFieldBegin('component', TType.STRING, 3)
oprot.writeString(self.component)
oprot.writeFieldEnd()
if self.serviceName is not None:
oprot.writeFieldBegin('serviceName', TType.STRING, 4)
oprot.writeString(self.serviceName)
oprot.writeFieldEnd()
if self.authorizablesSet is not None:
oprot.writeFieldBegin('authorizablesSet', TType.SET, 5)
oprot.writeSetBegin(TType.STRING, len(self.authorizablesSet))
for iter119 in self.authorizablesSet:
oprot.writeString(iter119)
oprot.writeSetEnd()
oprot.writeFieldEnd()
if self.groups is not None:
oprot.writeFieldBegin('groups', TType.SET, 6)
oprot.writeSetBegin(TType.STRING, len(self.groups))
for iter120 in self.groups:
oprot.writeString(iter120)
oprot.writeSetEnd()
oprot.writeFieldEnd()
if self.roleSet is not None:
oprot.writeFieldBegin('roleSet', TType.STRUCT, 7)
self.roleSet.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
if self.requestorUserName is None:
raise TProtocol.TProtocolException(message='Required field requestorUserName is unset!')
if self.component is None:
raise TProtocol.TProtocolException(message='Required field component is unset!')
if self.serviceName is None:
raise TProtocol.TProtocolException(message='Required field serviceName is unset!')
if self.authorizablesSet is None:
raise TProtocol.TProtocolException(message='Required field authorizablesSet is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.protocol_version)
value = (value * 31) ^ hash(self.requestorUserName)
value = (value * 31) ^ hash(self.component)
value = (value * 31) ^ hash(self.serviceName)
value = (value * 31) ^ hash(self.authorizablesSet)
value = (value * 31) ^ hash(self.groups)
value = (value * 31) ^ hash(self.roleSet)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TListSentryPrivilegesByAuthResponse(object):
"""
Attributes:
- status
- privilegesMapByAuth
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'status', (sentry_common_service.ttypes.TSentryResponseStatus, sentry_common_service.ttypes.TSentryResponseStatus.thrift_spec), None, ), # 1
(2, TType.MAP, 'privilegesMapByAuth', (TType.STRING,None,TType.STRUCT,(TSentryPrivilegeMap, TSentryPrivilegeMap.thrift_spec)), None, ), # 2
)
def __init__(self, status=None, privilegesMapByAuth=None,):
self.status = status
self.privilegesMapByAuth = privilegesMapByAuth
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.status = sentry_common_service.ttypes.TSentryResponseStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.privilegesMapByAuth = {}
(_ktype122, _vtype123, _size121 ) = iprot.readMapBegin()
for _i125 in xrange(_size121):
_key126 = iprot.readString()
_val127 = TSentryPrivilegeMap()
_val127.read(iprot)
self.privilegesMapByAuth[_key126] = _val127
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TListSentryPrivilegesByAuthResponse')
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 1)
self.status.write(oprot)
oprot.writeFieldEnd()
if self.privilegesMapByAuth is not None:
oprot.writeFieldBegin('privilegesMapByAuth', TType.MAP, 2)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.privilegesMapByAuth))
for kiter128,viter129 in self.privilegesMapByAuth.items():
oprot.writeString(kiter128)
viter129.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.status)
value = (value * 31) ^ hash(self.privilegesMapByAuth)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| {
"content_hash": "6bc0ab04e72d28677e705128a8798db0",
"timestamp": "",
"source": "github",
"line_count": 3055,
"max_line_length": 188,
"avg_line_length": 34.732896890343696,
"alnum_prop": 0.6424525723548427,
"repo_name": "jayceyxc/hue",
"id": "b728d50c16b461165b5fd50bdb3e6be013123761",
"size": "106260",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "desktop/libs/libsentry/gen-py/sentry_generic_policy_service/ttypes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3096"
},
{
"name": "Batchfile",
"bytes": "41710"
},
{
"name": "C",
"bytes": "2716690"
},
{
"name": "C++",
"bytes": "200268"
},
{
"name": "CSS",
"bytes": "630891"
},
{
"name": "Emacs Lisp",
"bytes": "11704"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Go",
"bytes": "6671"
},
{
"name": "HTML",
"bytes": "23982883"
},
{
"name": "Java",
"bytes": "575404"
},
{
"name": "JavaScript",
"bytes": "5068327"
},
{
"name": "Lex",
"bytes": "36239"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "Makefile",
"bytes": "146292"
},
{
"name": "Mako",
"bytes": "3334641"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "3646"
},
{
"name": "Perl",
"bytes": "3499"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Python",
"bytes": "45608023"
},
{
"name": "Roff",
"bytes": "16669"
},
{
"name": "Shell",
"bytes": "46700"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "Thrift",
"bytes": "278712"
},
{
"name": "Visual Basic",
"bytes": "2884"
},
{
"name": "XSLT",
"bytes": "517693"
},
{
"name": "Yacc",
"bytes": "381310"
}
],
"symlink_target": ""
} |
import json
from google.cloud import pubsub_v1
class PubsubApiService(object):
"""This class handles retrieving and verifying pubsub messages"""
project = ''
subscription = ''
messages = None
def __init__(self, credentials):
"""Construct a PubsubApiService instance.
Args:
credentials: A json string containing the project and
pubsub subscription names.
"""
credentialsJson = {}
try:
credentialsJson = json.loads(credentials)
except json.JSONDecodeError:
print('Decoding PubsubApiService credentials JSON has failed')
self.project = credentialsJson['project']
self.subscription = credentialsJson['subscription']
def doesEventExist(self, deviceId, eventName):
"""Verifies if a specific message was sent. Lazy loads messages
Args:
deviceId: A GUID device id that made the action.
eventName: A string containing the event name we're looking for
"""
if self.messages is None:
self.loadEvents()
for msg in self.messages:
msdData = msg
if deviceId in msdData and eventName in msdData:
return True
return False
def loadEvents(self):
with pubsub_v1.SubscriberClient() as subscriber:
subscription_path = subscriber.subscription_path(self.project,
self.subscription)
response = subscriber.pull(request={
"subscription": subscription_path,
"max_messages": 500,
})
print('Loaded messages :' + str(len(response.received_messages)))
ack_ids = [msg.ack_id for msg in response.received_messages]
self.messages = [
msg.message.data.decode() for msg in response.received_messages
]
if ack_ids:
subscriber.acknowledge(request={
"subscription": subscription_path,
"ack_ids": ack_ids,
})
| {
"content_hash": "6853b81e3ea5d918b35bd28828bbcf92",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 73,
"avg_line_length": 31.966101694915253,
"alnum_prop": 0.6484623541887593,
"repo_name": "chromium/chromium",
"id": "639071d37bd63be819616d7e596f9ef7e0cc6e03",
"size": "2049",
"binary": false,
"copies": "5",
"ref": "refs/heads/main",
"path": "chrome/test/enterprise/e2e/connector/reporting_connector_pubsub/pubsub_api_service.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import argparse
import zmq
def run(args):
ctx = zmq.Context()
dealer = ctx.socket(zmq.DEALER)
router = ctx.socket(zmq.ROUTER)
if args.sp == args.cp:
print 'ERROR: service port must be different from client port'
return
dealer.bind('tcp://*:%s' % args.sp)
router.bind('tcp://*:%s' % args.cp)
try:
zmq.proxy(router, dealer)
except:
print 'except'
raise
return
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-sp', type=str, help='service port', default='5556')
parser.add_argument('-cp', type=str, help='client port', default='5555')
args = parser.parse_args()
run(args) | {
"content_hash": "8196dae93d87577444fd0e2288820615",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 77,
"avg_line_length": 24.310344827586206,
"alnum_prop": 0.5985815602836879,
"repo_name": "disenone/zsync",
"id": "3fdd1d45de5c7a2598baf8ffb5f473648a559cf5",
"size": "730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/zsync_broker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "937"
},
{
"name": "C++",
"bytes": "2019"
},
{
"name": "Python",
"bytes": "62471"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.