hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
48d433d2eef1def8671d48845197a3a897af756f | 2,242 | py | Python | qiskit/test/mock/backends/boeblingen/fake_boeblingen.py | romainfd/qiskit-terra | b5285ccc5cb1d17b7c73402833f2750b93652426 | [
"Apache-2.0"
] | 2 | 2020-12-26T21:12:30.000Z | 2021-05-18T12:53:42.000Z | qiskit/test/mock/backends/boeblingen/fake_boeblingen.py | romainfd/qiskit-terra | b5285ccc5cb1d17b7c73402833f2750b93652426 | [
"Apache-2.0"
] | 1 | 2020-03-29T19:57:14.000Z | 2020-03-29T21:49:25.000Z | qiskit/test/mock/backends/boeblingen/fake_boeblingen.py | romainfd/qiskit-terra | b5285ccc5cb1d17b7c73402833f2750b93652426 | [
"Apache-2.0"
] | 1 | 2020-07-13T17:56:46.000Z | 2020-07-13T17:56:46.000Z | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Fake Boeblingen device (20 qubit).
"""
import os
import json
from qiskit.providers.models import (PulseBackendConfiguration,
BackendProperties, PulseDefaults)
from qiskit.test.mock.fake_backend import FakeBackend
class FakeBoeblingen(FakeBackend):
"""A fake Boeblingen backend."""
def __init__(self):
"""
00 ↔ 01 ↔ 02 ↔ 03 ↔ 04
↕ ↕
05 ↔ 06 ↔ 07 ↔ 08 ↔ 09
↕ ↕ ↕
10 ↔ 11 ↔ 12 ↔ 13 ↔ 14
↕ ↕
15 ↔ 16 ↔ 17 ↔ 18 ↔ 19
"""
dirname = os.path.dirname(__file__)
filename = "conf_boeblingen.json"
with open(os.path.join(dirname, filename), "r") as f_conf:
conf = json.load(f_conf)
configuration = PulseBackendConfiguration.from_dict(conf)
configuration.backend_name = 'fake_boeblingen'
self._defaults = None
self._properties = None
super().__init__(configuration)
def properties(self):
"""Returns a snapshot of device properties"""
dirname = os.path.dirname(__file__)
filename = "props_boeblingen.json"
with open(os.path.join(dirname, filename), "r") as f_prop:
props = json.load(f_prop)
return BackendProperties.from_dict(props)
def defaults(self):
"""Returns a snapshot of device defaults"""
if not self._defaults:
dirname = os.path.dirname(__file__)
filename = "defs_boeblingen.json"
with open(os.path.join(dirname, filename), "r") as f_defs:
defs = json.load(f_defs)
self._defaults = PulseDefaults.from_dict(defs)
return self._defaults
| 33.462687 | 77 | 0.610169 |
cd513aa3055be59ab3bd7838923516a9f8ad3f01 | 382 | py | Python | credentials.sample.py | Lincest/Badminton-Due | 09be77c7ce2edba923bab9d39fea4e377377f777 | [
"Apache-2.0"
] | 1 | 2020-10-14T15:14:16.000Z | 2020-10-14T15:14:16.000Z | credentials.sample.py | Lincest/Badminton-Due | 09be77c7ce2edba923bab9d39fea4e377377f777 | [
"Apache-2.0"
] | null | null | null | credentials.sample.py | Lincest/Badminton-Due | 09be77c7ce2edba923bab9d39fea4e377377f777 | [
"Apache-2.0"
] | null | null | null | # 填写完毕后文件名改为credentials.py
class Credentials():
def __init__(self):
self.IDS_USERNAME = "" # 学号
self.IDS_PASSWORD = "" # 一站式登录密码
self.phone = "13000000000" # 手机电话
self.stu_name_1 = "张三" # 你的名字
self.stu_id_1 = self.IDS_USERNAME # 你的学号
self.stu_name_2 = "18000000000" # 小伙伴的名字
self.stu_id_2 = "18000000000" # 小伙伴的学号
| 31.833333 | 49 | 0.604712 |
cc23a7d6dd6d6440a0b18f1bbd0fa52f323f4e38 | 2,160 | py | Python | doc/source/TCP.py | sunrise-cubesat/AIT-DSN | 6c600c2a585a373aa9e2290842a4f412469810f4 | [
"MIT"
] | null | null | null | doc/source/TCP.py | sunrise-cubesat/AIT-DSN | 6c600c2a585a373aa9e2290842a4f412469810f4 | [
"MIT"
] | null | null | null | doc/source/TCP.py | sunrise-cubesat/AIT-DSN | 6c600c2a585a373aa9e2290842a4f412469810f4 | [
"MIT"
] | null | null | null | AIT SLE User Guide
==================
The TCP forwarding plugin facilitates forwarding pipeline data over TCP.
The plugin can be configured for an arbitrary number of server or clients for each PUB/SUB topic.
Configuration
^^^^^^^^^^^^^
Customize the template within the config.yaml plugin block:
.. code-block:: none
- plugin:
name: ait.dsn.plugins.TCP.TCP_Manager
inputs:
- PUB_SUB_TOPIC_1
- PUB_SUB_TOPIC_2
subscriptions:
PUB_SUB_TOPIC_1:
Server_Name1:
port: 42401
timeout: 1
mode: TRANSMIT
Server_Name2:
port: 42401
hostname: someserver.xyz
mode: TRANSMIT
PUB_SUB_TOPIC_2_RECEIVE:
Server_Name3:
port: 12345
receive_size_bytes: 1024
mode: RECEIVE
Server_Name4:
port: 12346
host: localhost
receive_size_bytes: 512
mode: RECEIVE
* The value *PUB_SUB_TOPIC_1* corresponds to a PUB/SUB topic that should be subscribed to (i.e. another plugin).
* The value *Server_Name1* is an arbitrary nickname for the connection.
* The *port* option is mandatory for all connections.
* The *hostname* field is optional.
+ When defined, the plugin will attempt to establish connection to this server (Client Mode).
+ When undefined, the plugin will start its own server that clients can receieve data from (Server Mode).
* The *mode* field is mandatory.
+ *mode:TRANSMIT* specifies that the connection will forward data from the PUB/SUB topic to the specified TCP client/server.
+ *mode:RECEIVE* specifies that the connection will forward data from the specified TCP client/server to the specified PUB/SUB topic.
* The *timeout_seconds* field is mandatory and specifies how long a Server Mode connection should wait for a client before giving up and dropping the data.
* *receive_size_bytes* specifies how much data to receive from the Server/Client when operating in RECEIVE mode.
| 39.272727 | 155 | 0.642593 |
b1323d6b4b1ac193f86107fe058b9879206da881 | 3,368 | py | Python | plugins/hello_world/hello_world_filter.py | Kitware/VAIME | 47b24b9d8a208cf8c621e5bb1088c61fcf507af6 | [
"BSD-3-Clause"
] | 127 | 2019-05-23T10:05:25.000Z | 2022-03-28T05:14:11.000Z | plugins/hello_world/hello_world_filter.py | Kitware/VAIME | 47b24b9d8a208cf8c621e5bb1088c61fcf507af6 | [
"BSD-3-Clause"
] | 39 | 2019-06-18T21:44:58.000Z | 2022-01-12T14:47:01.000Z | plugins/hello_world/hello_world_filter.py | Kitware/VAIME | 47b24b9d8a208cf8c621e5bb1088c61fcf507af6 | [
"BSD-3-Clause"
] | 40 | 2016-08-23T21:44:17.000Z | 2019-04-20T23:39:53.000Z | #ckwg +28
# Copyright 2017 by Kitware, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither name of Kitware, Inc. nor the names of any contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from kwiver.sprokit.processes.kwiver_process import KwiverProcess
from kwiver.sprokit.pipeline import process
from kwiver.vital.types import Image
from kwiver.vital.types import ImageContainer
class hello_world_filter(KwiverProcess):
"""
This process gets an image as input, does some stuff to it and
sends the modified version to the output port.
"""
# ----------------------------------------------
def __init__(self, conf):
KwiverProcess.__init__(self, conf)
self.add_config_trait("text", "text", 'Hello World',
'Text to display to user.')
self.declare_config_using_trait('text')
self.add_port_trait('out_image', 'image', 'Processed image')
# set up required flags
optional = process.PortFlags()
required = process.PortFlags()
required.add(self.flag_required)
# declare our input port ( port-name,flags)
self.declare_input_port_using_trait('image', required)
self.declare_output_port_using_trait('out_image', optional )
# ----------------------------------------------
def _configure(self):
print( "[DEBUG] ----- configure" )
self.text = self.config_value('text')
self._base_configure()
# ----------------------------------------------
def _step(self):
print( "[DEBUG] ----- start step" )
# grab image container from port using traits
in_img_c = self.grab_input_using_trait('image')
# Get python image from conatiner (just for show)
in_img = in_img_c.get_image()
# Print out text to screen
print( "Text: " + str( self.text ) )
# push dummy image object (same as input) to output port
self.push_to_port_using_trait('out_image', ImageContainer(in_img))
self._base_step()
| 40.095238 | 80 | 0.683492 |
0df0a81ec8b1473fb270de74e66e58e87dabfece | 2,519 | py | Python | setup.py | RhinosF1/google-auth-library-python | c556f6fcc131f496ac3ec2ea50f2f61cefc34b9f | [
"Apache-2.0"
] | null | null | null | setup.py | RhinosF1/google-auth-library-python | c556f6fcc131f496ac3ec2ea50f2f61cefc34b9f | [
"Apache-2.0"
] | null | null | null | setup.py | RhinosF1/google-auth-library-python | c556f6fcc131f496ac3ec2ea50f2f61cefc34b9f | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
from setuptools import find_packages
from setuptools import setup
DEPENDENCIES = (
"cachetools>=2.0.0,<5.0",
"pyasn1-modules>=0.2.1",
# rsa==4.5 is the last version to support 2.7
# https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233
'rsa<4.6; python_version < "3.6"',
'rsa>=3.1.4,<5; python_version >= "3.6"',
"setuptools>=40.3.0",
"six>=1.9.0",
)
extras = {"aiohttp": "aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'"}
with io.open("README.rst", "r") as fh:
long_description = fh.read()
version = "1.26.1"
setup(
name="google-auth",
version=version,
author="Google Cloud Platform",
author_email="googleapis-packages@google.com",
description="Google Authentication Library",
long_description=long_description,
url="https://github.com/googleapis/google-auth-library-python",
packages=find_packages(exclude=("tests*", "system_tests*")),
namespace_packages=("google",),
install_requires=DEPENDENCIES,
extras_require=extras,
python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*",
license="Apache 2.0",
keywords="google auth oauth client",
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS :: MacOS X",
"Operating System :: OS Independent",
"Topic :: Internet :: WWW/HTTP",
],
)
| 34.986111 | 82 | 0.650655 |
8e1c73196f7c76fb73088c3f1a2f86898018cf92 | 10,673 | py | Python | lvsfunc/util.py | RivenSkaye/lvsfunc | 8e284b1a9c0fa670b614bcb7f42cb48144d1a67b | [
"MIT"
] | null | null | null | lvsfunc/util.py | RivenSkaye/lvsfunc | 8e284b1a9c0fa670b614bcb7f42cb48144d1a67b | [
"MIT"
] | null | null | null | lvsfunc/util.py | RivenSkaye/lvsfunc | 8e284b1a9c0fa670b614bcb7f42cb48144d1a67b | [
"MIT"
] | null | null | null | """
Helper functions for module functions and wrapper.
Some of these may also be useful for regular scripting or other modules.
"""
from __future__ import annotations
from typing import Any, Callable, List, Sequence, Tuple, Type, TypeVar, Union
import vapoursynth as vs
from vsutil import (depth, disallow_variable_format,
disallow_variable_resolution, get_depth, get_subsampling)
from .types import Coefs, Range
core = vs.core
@disallow_variable_format
def quick_resample(clip: vs.VideoNode,
function: Callable[[vs.VideoNode], vs.VideoNode]
) -> vs.VideoNode:
"""
A function to quickly resample to 32/16/8 bit and back to the original depth in a one-liner.
Useful for filters that only work in 16 bit or lower when you're working in float.
:param clip: Input clip
:param function: Filter to run after resampling (accepts and returns clip)
:return: Filtered clip in original depth
"""
assert clip.format
try: # Excepts all generic because >plugin/script writers being consistent >_>
dither = depth(clip, 32)
filtered = function(dither)
except: # noqa: E722
try:
dither = depth(clip, 16)
filtered = function(dither)
except: # noqa: E722
dither = depth(clip, 8)
filtered = function(dither)
return depth(filtered, clip.format.bits_per_sample)
@disallow_variable_format
def pick_repair(clip: vs.VideoNode) -> Callable[..., vs.VideoNode]:
"""
Returns rgvs.Repair if the clip is 16 bit or lower, else rgsf.Repair.
This is done because rgvs doesn't work with float, but rgsf does for whatever reason.
Dependencies: rgsf
:param clip: Input clip
:return: Appropriate repair function for input clip's depth
"""
assert clip.format
return core.rgvs.Repair if clip.format.bits_per_sample < 32 else core.rgsf.Repair
@disallow_variable_format
@disallow_variable_resolution
def pick_removegrain(clip: vs.VideoNode) -> Callable[..., vs.VideoNode]:
"""
Returns rgvs.RemoveGrain if the clip is 16 bit or lower, else rgsf.RemoveGrain.
This is done because rgvs doesn't work with float, but rgsf does for whatever reason.
Dependencies:
* RGSF
:param clip: Input clip
:return: Appropriate RemoveGrain function for input clip's depth
"""
assert clip.format
return core.rgvs.RemoveGrain if clip.format.bits_per_sample < 32 else core.rgsf.RemoveGrain
VideoProp = Union[
int, Sequence[int],
float, Sequence[float],
str, Sequence[str],
vs.VideoNode, Sequence[vs.VideoNode],
vs.VideoFrame, Sequence[vs.VideoFrame],
Callable[..., Any], Sequence[Callable[..., Any]]
]
T = TypeVar("T", bound=VideoProp)
def get_prop(frame: vs.VideoFrame, key: str, t: Type[T]) -> T:
"""
Gets FrameProp ``prop`` from frame ``frame`` with expected type ``t``
to satisfy the type checker.
:param frame: Frame containing props
:param key: Prop to get
:param t: Type of prop
:return: frame.prop[key]
"""
try:
prop = frame.props[key]
except KeyError:
raise KeyError(f"get_prop: 'Key {key} not present in props'")
if not isinstance(prop, t):
raise ValueError(f"get_prop: 'Key {key} did not contain expected type: Expected {t} got {type(prop)}'")
return prop
def normalize_ranges(clip: vs.VideoNode, ranges: Range | List[Range]) -> List[Tuple[int, int]]:
"""
Normalize ``Range``\\(s) to a list of inclusive positive integer ranges.
:param clip: Reference clip used for length.
:param ranges: Single ``Range`` or list of ``Range``\\s.
:return: List of inclusive positive ranges.
"""
ranges = ranges if isinstance(ranges, list) else [ranges]
out = []
for r in ranges:
if isinstance(r, tuple):
start, end = r
if start is None:
start = 0
if end is None:
end = clip.num_frames - 1
elif r is None:
start = clip.num_frames - 1
end = clip.num_frames - 1
else:
start = r
end = r
if start < 0:
start = clip.num_frames - 1 + start
if end < 0:
end = clip.num_frames - 1 + end
out.append((start, end))
return out
def replace_ranges(clip_a: vs.VideoNode,
clip_b: vs.VideoNode,
ranges: Range | List[Range] | None) -> vs.VideoNode:
"""
A replacement for ReplaceFramesSimple that uses ints and tuples rather than a string.
Frame ranges are inclusive.
Examples with clips ``black`` and ``white`` of equal length:
* ``replace_ranges(black, white, [(0, 1)])``: replace frames 0 and 1 with ``white``
* ``replace_ranges(black, white, [(None, None)])``: replace the entire clip with ``white``
* ``replace_ranges(black, white, [(0, None)])``: same as previous
* ``replace_ranges(black, white, [(200, None)])``: replace 200 until the end with ``white``
* ``replace_ranges(black, white, [(200, -1)])``: replace 200 until the end with ``white``,
leaving 1 frame of ``black``
:param clip_a: Original clip
:param clip_b: Replacement clip
:param ranges: Ranges to replace clip_a (original clip) with clip_b (replacement clip).
Integer values in the list indicate single frames,
Tuple values indicate inclusive ranges.
Negative integer values will be wrapped around based on clip_b's length.
None values are context dependent:
* None provided as sole value to ranges: no-op
* Single None value in list: Last frame in clip_b
* None as first value of tuple: 0
* None as second value of tuple: Last frame in clip_b
:return: Clip with ranges from clip_a replaced with clip_b
"""
if ranges is None:
return clip_a
out = clip_a
nranges = normalize_ranges(clip_b, ranges)
for start, end in nranges:
tmp = clip_b[start:end + 1]
if start != 0:
tmp = out[: start] + tmp
if end < out.num_frames - 1:
tmp = tmp + out[end + 1:]
out = tmp
return out
def scale_thresh(thresh: float, clip: vs.VideoNode, assume: int | None = None) -> float:
"""
Scale binarization thresholds from float to int.
:param thresh: Threshold [0, 1]. If greater than 1, assumed to be in native clip range
:param clip: Clip to scale to
:param assume: Assume input is this depth when given input >1. If ``None``\\, assume ``clip``\\'s format.
(Default: None)
:return: Threshold scaled to [0, 2^clip.depth - 1] (if vs.INTEGER)
"""
if clip.format is None:
raise ValueError("scale_thresh: 'Variable-format clips not supported'")
if thresh < 0:
raise ValueError("scale_thresh: 'Thresholds must be positive.'")
if thresh > 1:
return thresh if not assume \
else round(thresh/((1 << assume) - 1) * ((1 << clip.format.bits_per_sample) - 1))
return thresh if clip.format.sample_type == vs.FLOAT or thresh > 1 \
else round(thresh * ((1 << clip.format.bits_per_sample) - 1))
def scale_peak(value: float, peak: float) -> float:
"""
Full-range scale function that scales a value from [0, 255] to [0, peak]
"""
return value * peak / 255
def force_mod(x: float, mod: int = 4) -> int:
"""
Force output to fit a specific MOD.
Minimum returned value will always be mod².
"""
return mod ** 2 if x < mod ** 2 else int(x / mod + 0.5) * mod
def clamp_values(x: float, max_val: float, min_val: float) -> float:
"""
Forcibly clamps the given value x to a max and/or min value.
"""
return min_val if x < min_val else max_val if x > max_val else x
@disallow_variable_format
def get_neutral_value(clip: vs.VideoNode, chroma: bool = False) -> float:
"""
Taken from vsutil. This isn't in any new versions yet, so mypy complains.
Will remove once vsutil does another version bump.
Returns the neutral value for the combination
of the plane type and bit depth/type of the clip as float.
:param clip: Input clip.
:param chroma: Whether to get luma or chroma plane value
:return: Neutral value.
"""
assert clip.format
is_float = clip.format.sample_type == vs.FLOAT
return (0. if chroma else 0.5) if is_float else float(1 << (get_depth(clip) - 1))
@disallow_variable_format
@disallow_variable_resolution
def padder(clip: vs.VideoNode,
left: int = 32, right: int = 32,
top: int = 32, bottom: int = 32) -> vs.VideoNode:
"""
Pads out the pixels on the side by the given amount of pixels.
For a 4:2:0 clip, the output must be an even resolution.
:param clip: Input clip
:param left: Padding added to the left side of the clip
:param right: Padding added to the right side of the clip
:param top: Padding added to the top side of the clip
:param bottom: Padding added to the bottom side of the clip
:return: Padded clip
"""
width = clip.width+left+right
height = clip.height+top+bottom
if get_subsampling(clip) == '420' and ((width % 2 != 0) or (height % 2 != 0)):
raise ValueError("padder: 'Values must result in an even resolution when passing a YUV420 clip!'")
scaled = core.resize.Point(clip, width, height,
src_top=-1*top, src_left=-1*left,
src_width=width, src_height=height)
return core.fb.FillBorders(scaled, left=left, right=right, top=top, bottom=bottom)
def get_coefs(curve: vs.TransferCharacteristics) -> Coefs:
srgb = Coefs(0.04045, 12.92, 0.055, 2.4)
bt709 = Coefs(0.08145, 4.5, 0.0993, 2.22222)
smpte240m = Coefs(0.0912, 4.0, 0.1115, 2.22222)
bt2020 = Coefs(0.08145, 4.5, 0.0993, 2.22222)
gamma_linear_map = {
vs.TransferCharacteristics.TRANSFER_IEC_61966_2_1: srgb,
vs.TransferCharacteristics.TRANSFER_BT709: bt709,
vs.TransferCharacteristics.TRANSFER_BT601: bt709,
vs.TransferCharacteristics.TRANSFER_ST240_M: smpte240m,
vs.TransferCharacteristics.TRANSFER_BT2020_10: bt2020,
vs.TransferCharacteristics.TRANSFER_BT2020_12: bt2020
}
return gamma_linear_map[curve]
| 33.990446 | 111 | 0.625597 |
28397bef3f7fe6bef8a42ce9f589b3990189d6aa | 1,795 | py | Python | src/backend/web/handlers/tests/conftest.py | guineawheek/ftc-data-take-2 | 337bff2077eadb3bd6bbebd153cbb6181c99516f | [
"MIT"
] | null | null | null | src/backend/web/handlers/tests/conftest.py | guineawheek/ftc-data-take-2 | 337bff2077eadb3bd6bbebd153cbb6181c99516f | [
"MIT"
] | null | null | null | src/backend/web/handlers/tests/conftest.py | guineawheek/ftc-data-take-2 | 337bff2077eadb3bd6bbebd153cbb6181c99516f | [
"MIT"
] | null | null | null | import pytest
@pytest.fixture
def setup_full_team(test_data_importer) -> None:
test_data_importer.import_team(__file__, "data/frc148.json")
test_data_importer.import_event_list(
__file__, "data/frc148_events_2019.json", "frc148"
)
test_data_importer.import_match_list(__file__, "data/frc148_matches_2019.json")
test_data_importer.import_media_list(
__file__, "data/frc148_media_2019.json", 2019, "frc148"
)
test_data_importer.import_media_list(
__file__, "data/frc148_social_media.json", team_key="frc148"
)
test_data_importer.import_award_list(__file__, "data/frc148_awards_2019.json")
test_data_importer.import_district_list(
__file__, "data/frc148_districts.json", "frc148"
)
test_data_importer.import_robot_list(__file__, "data/frc148_robots.json")
@pytest.fixture
def setup_full_event(test_data_importer):
# So we can import different event keys, return a function
def import_event(event_key) -> None:
test_data_importer.import_event(__file__, f"data/{event_key}.json")
test_data_importer.import_match_list(__file__, f"data/{event_key}_matches.json")
test_data_importer.import_event_alliances(
__file__, f"data/{event_key}_alliances.json", event_key
)
return import_event
@pytest.fixture
def setup_full_match(test_data_importer):
def import_match(match_key) -> None:
event_key = match_key.split("_")[0]
test_data_importer.import_event(__file__, f"data/{event_key}.json")
test_data_importer.import_match(__file__, f"data/{match_key}.json")
return import_match
@pytest.fixture
def setup_full_year_events(test_data_importer) -> None:
test_data_importer.import_event_list(__file__, "data/all_events_2019.json")
| 35.196078 | 88 | 0.748189 |
002a0b5b44bf504da15814b44221ec339eedf3ba | 540 | py | Python | venv/Lib/site-packages/PyOpenGL-3.0.1/OpenGL/raw/GL/EXT/rescale_normal.py | temelkirci/Motion_Editor | a8b8d4c4d2dcc9be28385600f56066cef92a38ad | [
"MIT"
] | 1 | 2022-03-02T17:07:20.000Z | 2022-03-02T17:07:20.000Z | venv/Lib/site-packages/PyOpenGL-3.0.1/OpenGL/raw/GL/EXT/rescale_normal.py | temelkirci/RealTime_6DOF_Motion_Editor | a8b8d4c4d2dcc9be28385600f56066cef92a38ad | [
"MIT"
] | null | null | null | venv/Lib/site-packages/PyOpenGL-3.0.1/OpenGL/raw/GL/EXT/rescale_normal.py | temelkirci/RealTime_6DOF_Motion_Editor | a8b8d4c4d2dcc9be28385600f56066cef92a38ad | [
"MIT"
] | null | null | null | '''OpenGL extension EXT.rescale_normal
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_EXT_rescale_normal'
_DEPRECATED = False
GL_RESCALE_NORMAL_EXT = constant.Constant( 'GL_RESCALE_NORMAL_EXT', 0x803A )
def glInitRescaleNormalEXT():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
| 31.764706 | 76 | 0.809259 |
c0a7ff9f8d5c5cd2e35e9c1ee5a982027091e39b | 1,316 | py | Python | manager.py | flaxandteal/dp-conceptual-search | 16c6383a61ba5b7069337c2626a0dc243bfe9d35 | [
"MIT"
] | 3 | 2018-05-10T16:49:27.000Z | 2022-03-29T15:23:04.000Z | manager.py | flaxandteal/dp-conceptual-search | 16c6383a61ba5b7069337c2626a0dc243bfe9d35 | [
"MIT"
] | 2 | 2018-09-20T06:37:27.000Z | 2018-11-12T12:05:08.000Z | manager.py | flaxandteal/dp-conceptual-search | 16c6383a61ba5b7069337c2626a0dc243bfe9d35 | [
"MIT"
] | 3 | 2018-06-25T10:48:43.000Z | 2021-04-11T08:01:27.000Z | import os
import sys
from subprocess import check_output
from dp_conceptual_search.app.app import create_app
from dp4py_sanic.api.protocol.ons_http_protocol import ONSHttpProtocol
def test():
"""
Launches unit tests
:return:
"""
print(
check_output(['nosetests',
'-v',
'-s',
'unit/',
'--exclude-dir=./unit/integration',
'--exclude-dir=./unit/regression'])
)
def run(app_host: str='0.0.0.0', app_port: int=5000, app_workers: int=1):
"""
Runs the Sanic api on the given host and port address.
:param app_host:
:param app_port:
:param app_workers: Number of worker threads to use (defaults to 1)
:return:
"""
# Create the app
app = create_app()
# Run the api with our custom HttpProtocol (for more control over access log)
app.run(host=app_host, port=app_port, workers=app_workers, protocol=ONSHttpProtocol)
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == "test":
test()
else:
host = os.getenv("BIND_HOST", '0.0.0.0')
port = int(os.getenv("BIND_PORT", 5000))
workers = int(os.getenv("SANIC_WORKERS", 1))
run(app_host=host, app_port=port, app_workers=workers)
| 28.608696 | 88 | 0.600304 |
07977855604a71287d210d29d679498216906ef4 | 4,842 | py | Python | test/functional/wallet_reorgsrestore.py | fpanettieri/bitcoin | 1189b6acab115a7fe7bd67f8b4c6e3f55e53274e | [
"MIT"
] | 1 | 2021-03-17T08:24:51.000Z | 2021-03-17T08:24:51.000Z | test/functional/wallet_reorgsrestore.py | fpanettieri/bitcoin | 1189b6acab115a7fe7bd67f8b4c6e3f55e53274e | [
"MIT"
] | null | null | null | test/functional/wallet_reorgsrestore.py | fpanettieri/bitcoin | 1189b6acab115a7fe7bd67f8b4c6e3f55e53274e | [
"MIT"
] | 4 | 2018-05-16T09:52:06.000Z | 2018-05-17T07:14:48.000Z | #!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test tx status in case of reorgs while wallet being shutdown.
Wallet txn status rely on block connection/disconnection for its
accuracy. In case of reorgs happening while wallet being shutdown
block updates are not going to be received. At wallet loading, we
check against chain if confirmed txn are still in chain and change
their status if block in which they have been included has been
disconnected.
"""
from decimal import Decimal
import os
import shutil
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
disconnect_nodes,
)
class ReorgsRestoreTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Send a tx from which to conflict outputs later
txid_conflict_from = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
self.nodes[0].generate(1)
self.sync_blocks()
# Disconnect node1 from others to reorg its chain later
disconnect_nodes(self.nodes[0], 1)
disconnect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[0], 2)
# Send a tx to be unconfirmed later
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
tx = self.nodes[0].gettransaction(txid)
self.nodes[0].generate(4)
tx_before_reorg = self.nodes[0].gettransaction(txid)
assert_equal(tx_before_reorg["confirmations"], 4)
# Disconnect node0 from node2 to broadcast a conflict on their respective chains
disconnect_nodes(self.nodes[0], 2)
nA = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txid_conflict_from)["details"] if tx_out["amount"] == Decimal("10"))
inputs = []
inputs.append({"txid": txid_conflict_from, "vout": nA})
outputs_1 = {}
outputs_2 = {}
# Create a conflicted tx broadcast on node0 chain and conflicting tx broadcast on node1 chain. Both spend from txid_conflict_from
outputs_1[self.nodes[0].getnewaddress()] = Decimal("9.99998")
outputs_2[self.nodes[0].getnewaddress()] = Decimal("9.99998")
conflicted = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs_1))
conflicting = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs_2))
conflicted_txid = self.nodes[0].sendrawtransaction(conflicted["hex"])
self.nodes[0].generate(1)
conflicting_txid = self.nodes[2].sendrawtransaction(conflicting["hex"])
self.nodes[2].generate(9)
# Reconnect node0 and node2 and check that conflicted_txid is effectively conflicted
connect_nodes(self.nodes[0], 2)
self.sync_blocks([self.nodes[0], self.nodes[2]])
conflicted = self.nodes[0].gettransaction(conflicted_txid)
conflicting = self.nodes[0].gettransaction(conflicting_txid)
assert_equal(conflicted["confirmations"], -9)
assert_equal(conflicted["walletconflicts"][0], conflicting["txid"])
# Node0 wallet is shutdown
self.stop_node(0)
self.start_node(0)
# The block chain re-orgs and the tx is included in a different block
self.nodes[1].generate(9)
self.nodes[1].sendrawtransaction(tx["hex"])
self.nodes[1].generate(1)
self.nodes[1].sendrawtransaction(conflicted["hex"])
self.nodes[1].generate(1)
# Node0 wallet file is loaded on longest sync'ed node1
self.stop_node(1)
self.nodes[0].backupwallet(os.path.join(self.nodes[0].datadir, 'wallet.bak'))
shutil.copyfile(os.path.join(self.nodes[0].datadir, 'wallet.bak'), os.path.join(self.nodes[1].datadir, 'regtest', 'wallet.dat'))
self.start_node(1)
tx_after_reorg = self.nodes[1].gettransaction(txid)
# Check that normal confirmed tx is confirmed again but with different blockhash
assert_equal(tx_after_reorg["confirmations"], 2)
assert(tx_before_reorg["blockhash"] != tx_after_reorg["blockhash"])
conflicted_after_reorg = self.nodes[1].gettransaction(conflicted_txid)
# Check that conflicted tx is confirmed again with blockhash different than previously conflicting tx
assert_equal(conflicted_after_reorg["confirmations"], 1)
assert(conflicting["blockhash"] != conflicted_after_reorg["blockhash"])
if __name__ == '__main__':
ReorgsRestoreTest().main()
| 45.252336 | 144 | 0.699298 |
0004e3b4b968d2fd2baad670d2cd74ed2433f445 | 283,509 | py | Python | src/sage/combinat/partition.py | swewers/mein_sage | 0e4e2d14aab0a1a2e63292939a9baa997f0e986b | [
"BSL-1.0"
] | null | null | null | src/sage/combinat/partition.py | swewers/mein_sage | 0e4e2d14aab0a1a2e63292939a9baa997f0e986b | [
"BSL-1.0"
] | 1 | 2020-04-18T16:30:43.000Z | 2020-04-18T16:30:43.000Z | src/sage/combinat/partition.py | dimpase/sage | 468f23815ade42a2192b0a9cd378de8fdc594dcd | [
"BSL-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
r"""
Integer partitions
A partition `p` of a nonnegative integer `n` is a
non-increasing list of positive integers (the *parts* of the
partition) with total sum `n`.
A partition can be depicted by a diagram made of rows of cells,
where the number of cells in the `i^{th}` row starting from
the top is the `i^{th}` part of the partition.
The coordinate system related to a partition applies from the top
to the bottom and from left to right. So, the corners of the
partition `[5, 3, 1]` are `[[0,4], [1,2], [2,0]]`.
For display options, see :obj:`Partitions.options`.
.. NOTE::
- Boxes is a synonym for cells. All methods will use 'cell' and 'cells'
instead of 'box' and 'boxes'.
- Partitions are 0 based with coordinates in the form of (row-index,
column-index).
- If given coordinates of the form ``(r, c)``, then use Python's
\*-operator.
- Throughout this documentation, for a partition `\lambda` we will denote
its conjugate partition by `\lambda^{\prime}`. For more on conjugate
partitions, see :meth:`Partition.conjugate()`.
- The comparisons on partitions use lexicographic order.
.. NOTE::
We use the convention that lexicographic ordering is read from
left-to-right. That is to say `[1, 3, 7]` is smaller than `[2, 3, 4]`.
AUTHORS:
- Mike Hansen (2007): initial version
- Dan Drake (2009-03-28): deprecate RestrictedPartitions and implement
Partitions_parts_in
- Travis Scrimshaw (2012-01-12): Implemented latex function to Partition_class
- Travis Scrimshaw (2012-05-09): Fixed Partitions(-1).list() infinite recursion
loop by saying Partitions_n is the empty set.
- Travis Scrimshaw (2012-05-11): Fixed bug in inner where if the length was
longer than the length of the inner partition, it would include 0's.
- Andrew Mathas (2012-06-01): Removed deprecated functions and added
compatibility with the PartitionTuple classes. See :trac:`13072`
- Travis Scrimshaw (2012-10-12): Added options. Made
``Partition_class`` to the element ``Partition``. ``Partitions*`` are now
all in the category framework except ``PartitionsRestricted`` (which will
eventually be removed). Cleaned up documentation.
- Matthew Lancellotti (2018-09-14): Added a bunch of "k" methods to Partition.
EXAMPLES:
There are `5` partitions of the integer `4`::
sage: Partitions(4).cardinality()
5
sage: Partitions(4).list()
[[4], [3, 1], [2, 2], [2, 1, 1], [1, 1, 1, 1]]
We can use the method ``.first()`` to get the 'first' partition of a
number::
sage: Partitions(4).first()
[4]
Using the method ``.next(p)``, we can calculate the 'next' partition
after `p`. When we are at the last partition, ``None`` will be returned::
sage: Partitions(4).next([4])
[3, 1]
sage: Partitions(4).next([1,1,1,1]) is None
True
We can use ``iter`` to get an object which iterates over the partitions
one by one to save memory. Note that when we do something like
``for part in Partitions(4)`` this iterator is used in the background::
sage: g = iter(Partitions(4))
sage: next(g)
[4]
sage: next(g)
[3, 1]
sage: next(g)
[2, 2]
sage: for p in Partitions(4): print(p)
[4]
[3, 1]
[2, 2]
[2, 1, 1]
[1, 1, 1, 1]
We can add constraints to the type of partitions we want. For
example, to get all of the partitions of `4` of length `2`, we'd do the
following::
sage: Partitions(4, length=2).list()
[[3, 1], [2, 2]]
Here is the list of partitions of length at least `2` and the list of
ones with length at most `2`::
sage: Partitions(4, min_length=2).list()
[[3, 1], [2, 2], [2, 1, 1], [1, 1, 1, 1]]
sage: Partitions(4, max_length=2).list()
[[4], [3, 1], [2, 2]]
The options ``min_part`` and ``max_part`` can be used to set constraints
on the sizes of all parts. Using ``max_part``, we can select
partitions having only 'small' entries. The following is the list
of the partitions of `4` with parts at most `2`::
sage: Partitions(4, max_part=2).list()
[[2, 2], [2, 1, 1], [1, 1, 1, 1]]
The ``min_part`` options is complementary to ``max_part`` and selects
partitions having only 'large' parts. Here is the list of all
partitions of `4` with each part at least `2`::
sage: Partitions(4, min_part=2).list()
[[4], [2, 2]]
The options ``inner`` and ``outer`` can be used to set part-by-part
constraints. This is the list of partitions of `4` with ``[3, 1, 1]`` as
an outer bound (that is, partitions of `4` contained in the partition
``[3, 1, 1]``)::
sage: Partitions(4, outer=[3,1,1]).list()
[[3, 1], [2, 1, 1]]
``outer`` sets ``max_length`` to the length of its argument. Moreover, the
parts of ``outer`` may be infinite to clear constraints on specific
parts. Here is the list of the partitions of `4` of length at most `3`
such that the second and third part are `1` when they exist::
sage: Partitions(4, outer=[oo,1,1]).list()
[[4], [3, 1], [2, 1, 1]]
Finally, here are the partitions of `4` with ``[1,1,1]`` as an inner
bound (i. e., the partitions of `4` containing the partition ``[1,1,1]``).
Note that ``inner`` sets ``min_length`` to the length of its argument::
sage: Partitions(4, inner=[1,1,1]).list()
[[2, 1, 1], [1, 1, 1, 1]]
The options ``min_slope`` and ``max_slope`` can be used to set
constraints on the slope, that is on the difference ``p[i+1]-p[i]`` of
two consecutive parts. Here is the list of the strictly decreasing
partitions of `4`::
sage: Partitions(4, max_slope=-1).list()
[[4], [3, 1]]
The constraints can be combined together in all reasonable ways.
Here are all the partitions of `11` of length between `2` and `4` such
that the difference between two consecutive parts is between `-3` and
`-1`::
sage: Partitions(11,min_slope=-3,max_slope=-1,min_length=2,max_length=4).list()
[[7, 4], [6, 5], [6, 4, 1], [6, 3, 2], [5, 4, 2], [5, 3, 2, 1]]
Partition objects can also be created individually with :class:`Partition`::
sage: Partition([2,1])
[2, 1]
Once we have a partition object, then there are a variety of
methods that we can use. For example, we can get the conjugate of a
partition. Geometrically, the conjugate of a partition is the
reflection of that partition through its main diagonal. Of course,
this operation is an involution::
sage: Partition([4,1]).conjugate()
[2, 1, 1, 1]
sage: Partition([4,1]).conjugate().conjugate()
[4, 1]
If we create a partition with extra zeros at the end, they will be dropped::
sage: Partition([4,1,0,0])
[4, 1]
sage: Partition([0])
[]
sage: Partition([0,0])
[]
The idea of a partition being followed by infinitely many parts of size
`0` is consistent with the ``get_part`` method::
sage: p = Partition([5, 2])
sage: p.get_part(0)
5
sage: p.get_part(10)
0
We can go back and forth between the standard and the exponential
notations of a partition. The exponential notation can be padded with
extra zeros::
sage: Partition([6,4,4,2,1]).to_exp()
[1, 1, 0, 2, 0, 1]
sage: Partition(exp=[1,1,0,2,0,1])
[6, 4, 4, 2, 1]
sage: Partition([6,4,4,2,1]).to_exp(5)
[1, 1, 0, 2, 0, 1]
sage: Partition([6,4,4,2,1]).to_exp(7)
[1, 1, 0, 2, 0, 1, 0]
sage: Partition([6,4,4,2,1]).to_exp(10)
[1, 1, 0, 2, 0, 1, 0, 0, 0, 0]
We can get the (zero-based!) coordinates of the corners of a
partition::
sage: Partition([4,3,1]).corners()
[(0, 3), (1, 2), (2, 0)]
We can compute the core and quotient of a partition and build
the partition back up from them::
sage: Partition([6,3,2,2]).core(3)
[2, 1, 1]
sage: Partition([7,7,5,3,3,3,1]).quotient(3)
([2], [1], [2, 2, 2])
sage: p = Partition([11,5,5,3,2,2,2])
sage: p.core(3)
[]
sage: p.quotient(3)
([2, 1], [4], [1, 1, 1])
sage: Partition(core=[],quotient=([2, 1], [4], [1, 1, 1]))
[11, 5, 5, 3, 2, 2, 2]
We can compute the `0-1` sequence and go back and forth::
sage: Partitions().from_zero_one([1, 1, 1, 1, 0, 1, 0])
[5, 4]
sage: all(Partitions().from_zero_one(mu.zero_one_sequence())
....: == mu for n in range(5) for mu in Partitions(n))
True
We can compute the Frobenius coordinates and go back and forth::
sage: Partition([7,3,1]).frobenius_coordinates()
([6, 1], [2, 0])
sage: Partition(frobenius_coordinates=([6,1],[2,0]))
[7, 3, 1]
sage: all(mu == Partition(frobenius_coordinates=mu.frobenius_coordinates())
....: for n in range(12) for mu in Partitions(n))
True
We use the lexicographic ordering::
sage: pl = Partition([4,1,1])
sage: ql = Partitions()([3,3])
sage: pl > ql
True
sage: PL = Partitions()
sage: pl = PL([4,1,1])
sage: ql = PL([3,3])
sage: pl > ql
True
"""
# ****************************************************************************
# Copyright (C) 2007 Mike Hansen <mhansen@gmail.com>,
#
# Distributed under the terms of the GNU General Public License (GPL)
# https://www.gnu.org/licenses/
# ****************************************************************************
from __future__ import print_function, absolute_import
from copy import copy
from sage.libs.all import pari
from sage.libs.flint.arith import number_of_partitions as flint_number_of_partitions
from sage.arith.misc import multinomial
from sage.structure.global_options import GlobalOptions
from sage.structure.parent import Parent
from sage.structure.unique_representation import UniqueRepresentation
from sage.symbolic.ring import var
from sage.misc.lazy_import import lazy_import
lazy_import('sage.combinat.skew_partition', 'SkewPartition')
lazy_import('sage.combinat.partition_tuple', 'PartitionTuple')
from sage.misc.all import prod
from sage.misc.prandom import randrange
from sage.misc.cachefunc import cached_method, cached_function
from sage.categories.infinite_enumerated_sets import InfiniteEnumeratedSets
from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets
from sage.sets.non_negative_integers import NonNegativeIntegers
from sage.rings.all import QQ, ZZ, NN, IntegerModRing
from sage.arith.all import factorial, gcd
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.rings.integer import Integer
from sage.rings.infinity import infinity
from .combinat import CombinatorialElement
from . import tableau
from . import permutation
from . import composition
from sage.combinat.partitions import ZS1_iterator, ZS1_iterator_nk
from sage.combinat.integer_vector import IntegerVectors
from sage.combinat.integer_lists import IntegerListsLex
from sage.combinat.combinat_cython import conjugate
from sage.combinat.root_system.weyl_group import WeylGroup
from sage.combinat.combinatorial_map import combinatorial_map
from sage.groups.perm_gps.permgroup import PermutationGroup
from sage.graphs.dot2tex_utils import have_dot2tex
from sage.functions.other import binomial
class Partition(CombinatorialElement):
r"""
A partition `p` of a nonnegative integer `n` is a
non-increasing list of positive integers (the *parts* of the
partition) with total sum `n`.
A partition is often represented as a diagram consisting of **cells**,
or **boxes**, placed in rows on top of each other such that the number of
cells in the `i^{th}` row, reading from top to bottom, is the `i^{th}`
part of the partition. The rows are left-justified (and become shorter
and shorter the farther down one goes). This diagram is called the
**Young diagram** of the partition, or more precisely its Young diagram
in English notation. (French and Russian notations are variations on this
representation.)
The coordinate system related to a partition applies from the top
to the bottom and from left to right. So, the corners of the
partition ``[5, 3, 1]`` are ``[[0,4], [1,2], [2,0]]``.
For display options, see :meth:`Partitions.options`.
.. NOTE::
Partitions are 0 based with coordinates in the form of (row-index,
column-index). For example consider the partition
``mu=Partition([4,3,2,2])``, the first part is ``mu[0]`` (which is 4),
the second is ``mu[1]``, and so on, and the upper-left cell in English
convention is ``(0, 0)``.
A partition can be specified in one of the following ways:
- a list (the default)
- using exponential notation
- by Frobenius coordinates
- specifying its `0-1` sequence
- specifying the core and the quotient
See the examples below.
EXAMPLES:
Creating partitions though parents::
sage: mu = Partitions(8)([3,2,1,1,1]); mu
[3, 2, 1, 1, 1]
sage: nu = Partition([3,2,1,1,1]); nu
[3, 2, 1, 1, 1]
sage: mu == nu
True
sage: mu is nu
False
sage: mu in Partitions()
True
sage: mu.parent()
Partitions of the integer 8
sage: mu.size()
8
sage: mu.category()
Category of elements of Partitions of the integer 8
sage: nu.parent()
Partitions
sage: nu.category()
Category of elements of Partitions
sage: mu[0]
3
sage: mu[1]
2
sage: mu[2]
1
sage: mu.pp()
***
**
*
*
*
sage: mu.removable_cells()
[(0, 2), (1, 1), (4, 0)]
sage: mu.down_list()
[[2, 2, 1, 1, 1], [3, 1, 1, 1, 1], [3, 2, 1, 1]]
sage: mu.addable_cells()
[(0, 3), (1, 2), (2, 1), (5, 0)]
sage: mu.up_list()
[[4, 2, 1, 1, 1], [3, 3, 1, 1, 1], [3, 2, 2, 1, 1], [3, 2, 1, 1, 1, 1]]
sage: mu.conjugate()
[5, 2, 1]
sage: mu.dominates(nu)
True
sage: nu.dominates(mu)
True
Creating partitions using ``Partition``::
sage: Partition([3,2,1])
[3, 2, 1]
sage: Partition(exp=[2,1,1])
[3, 2, 1, 1]
sage: Partition(core=[2,1], quotient=[[2,1],[3],[1,1,1]])
[11, 5, 5, 3, 2, 2, 2]
sage: Partition(frobenius_coordinates=([3,2],[4,0]))
[4, 4, 1, 1, 1]
sage: Partitions().from_zero_one([1, 1, 1, 1, 0, 1, 0])
[5, 4]
sage: [2,1] in Partitions()
True
sage: [2,1,0] in Partitions()
True
sage: Partition([1,2,3])
Traceback (most recent call last):
...
ValueError: [1, 2, 3] is not an element of Partitions
Sage ignores trailing zeros at the end of partitions::
sage: Partition([3,2,1,0])
[3, 2, 1]
sage: Partitions()([3,2,1,0])
[3, 2, 1]
sage: Partitions(6)([3,2,1,0])
[3, 2, 1]
TESTS:
Check that only trailing zeros are stripped::
sage: TestSuite( Partition([]) ).run()
sage: TestSuite( Partition([4,3,2,2,2,1]) ).run()
sage: Partition([3,2,2,2,1,0,0,0])
[3, 2, 2, 2, 1]
sage: Partition([3,0,2,2,2,1,0])
Traceback (most recent call last):
...
ValueError: [3, 0, 2, 2, 2, 1, 0] is not an element of Partitions
sage: Partition([0,7,3])
Traceback (most recent call last):
...
ValueError: [0, 7, 3] is not an element of Partitions
"""
@staticmethod
def __classcall_private__(cls, mu=None, **keyword):
"""
This constructs a list from optional arguments and delegates the
construction of a :class:`Partition` to the ``element_class()`` call
of the appropriate parent.
EXAMPLES::
sage: Partition([3,2,1])
[3, 2, 1]
sage: Partition(exp=[2,1,1])
[3, 2, 1, 1]
sage: Partition(core=[2,1], quotient=[[2,1],[3],[1,1,1]])
[11, 5, 5, 3, 2, 2, 2]
"""
l = len(keyword)
if l == 0:
if mu is not None:
if isinstance(mu, Partition):
return mu
return _Partitions(list(mu))
if l == 1:
if 'beta_numbers' in keyword:
return _Partitions.from_beta_numbers(keyword['beta_numbers'])
elif 'exp' in keyword:
return _Partitions.from_exp(keyword['exp'])
elif 'frobenius_coordinates' in keyword:
return _Partitions.from_frobenius_coordinates(keyword['frobenius_coordinates'])
elif 'zero_one' in keyword:
return _Partitions.from_zero_one(keyword['zero_one'])
if l == 2 and 'core' in keyword and 'quotient' in keyword:
return _Partitions.from_core_and_quotient(keyword['core'], keyword['quotient'])
raise ValueError('incorrect syntax for Partition()')
def __setstate__(self, state):
r"""
In order to maintain backwards compatibility and be able to unpickle a
old pickle from ``Partition_class`` we have to override the default
``__setstate__``.
EXAMPLES::
sage: loads(b'x\x9ck`J.NLO\xd5K\xce\xcfM\xca\xccK,\xd1+H,*\xc9,\xc9\xcc\xcf\xe3\n\x80\xb1\xe2\x93s\x12\x8b\x8b\xb9\n\x195\x1b\x0b\x99j\x0b\x995BY\xe33\x12\x8b3\nY\xfc\x80\xac\x9c\xcc\xe2\x92B\xd6\xd8B6\r\x88IE\x99y\xe9\xc5z\x99y%\xa9\xe9\xa9E\\\xb9\x89\xd9\xa9\xf10N!{(\xa3qkP!G\x06\x90a\x04dp\x82\x18\x86@\x06Wji\x92\x1e\x00x0.\xb5')
[3, 2, 1]
sage: loads(dumps( Partition([3,2,1]) )) # indirect doctest
[3, 2, 1]
"""
if isinstance(state, dict): # for old pickles from Partition_class
self._set_parent(_Partitions)
self.__dict__ = state
else:
self._set_parent(state[0])
self.__dict__ = state[1]
def __init__(self, parent, mu):
"""
Initialize ``self``.
We assume that ``mu`` is a weakly decreasing list of
non-negative elements in ``ZZ``.
EXAMPLES::
sage: p = Partition([3,1])
sage: TestSuite(p).run()
TESTS:
Fix that tuples raise the correct error::
sage: Partition((3,1,7))
Traceback (most recent call last):
...
ValueError: [3, 1, 7] is not an element of Partitions
"""
if isinstance(mu, Partition):
# since we are (suppose to be) immutable, we can share the underlying data
CombinatorialElement.__init__(self, parent, mu._list)
else:
if mu and not mu[-1]:
# direct callers might assume that mu is not modified
mu = mu[:-1]
while mu and not mu[-1]:
mu.pop()
CombinatorialElement.__init__(self, parent, mu)
@cached_method
def __hash__(self):
r"""
Return the hash of ``self``.
TESTS::
sage: P = Partition([4,2,2,1])
sage: hash(P) == hash(P)
True
"""
return hash(tuple(self._list))
def _repr_(self):
r"""
Return a string representation of ``self`` depending on
:meth:`Partitions.options`.
EXAMPLES::
sage: mu=Partition([7,7,7,3,3,2,1,1,1,1,1,1,1]); mu # indirect doctest
[7, 7, 7, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1]
sage: Partitions.options.display="diagram"; mu
*******
*******
*******
***
***
**
*
*
*
*
*
*
*
sage: Partitions.options.display="list"; mu
[7, 7, 7, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1]
sage: Partitions.options.display="compact_low"; mu
1^7,2,3^2,7^3
sage: Partitions.options.display="compact_high"; mu
7^3,3^2,2,1^7
sage: Partitions.options.display="exp_low"; mu
1^7, 2, 3^2, 7^3
sage: Partitions.options.display="exp_high"; mu
7^3, 3^2, 2, 1^7
sage: Partitions.options.convention="French"
sage: mu = Partition([7,7,7,3,3,2,1,1,1,1,1,1,1]); mu # indirect doctest
7^3, 3^2, 2, 1^7
sage: Partitions.options.display="diagram"; mu
*
*
*
*
*
*
*
**
***
***
*******
*******
*******
sage: Partitions.options.display="list"; mu
[7, 7, 7, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1]
sage: Partitions.options.display="compact_low"; mu
1^7,2,3^2,7^3
sage: Partitions.options.display="compact_high"; mu
7^3,3^2,2,1^7
sage: Partitions.options.display="exp_low"; mu
1^7, 2, 3^2, 7^3
sage: Partitions.options.display="exp_high"; mu
7^3, 3^2, 2, 1^7
sage: Partitions.options._reset()
"""
return self.parent().options._dispatch(self, '_repr_', 'display')
def _ascii_art_(self):
"""
TESTS::
sage: ascii_art(Partitions(5).list())
[ * ]
[ ** * ]
[ *** ** * * ]
[ **** *** * ** * * ]
[ *****, * , ** , * , * , * , * ]
"""
from sage.typeset.ascii_art import AsciiArt
return AsciiArt(self._repr_diagram().splitlines(), baseline=0)
def _unicode_art_(self):
"""
TESTS::
sage: unicode_art(Partitions(5).list())
⎡ ┌┐ ⎤
⎢ ┌┬┐ ├┤ ⎥
⎢ ┌┬┬┐ ┌┬┐ ├┼┘ ├┤ ⎥
⎢ ┌┬┬┬┐ ┌┬┬┐ ├┼┴┘ ├┼┤ ├┤ ├┤ ⎥
⎢ ┌┬┬┬┬┐ ├┼┴┴┘ ├┼┼┘ ├┤ ├┼┘ ├┤ ├┤ ⎥
⎣ └┴┴┴┴┘, └┘ , └┴┘ , └┘ , └┘ , └┘ , └┘ ⎦
sage: Partitions.options.convention = "French"
sage: unicode_art(Partitions(5).list())
⎡ ┌┐ ⎤
⎢ ┌┐ ├┤ ⎥
⎢ ┌┐ ┌┐ ├┤ ├┤ ⎥
⎢ ┌┐ ┌┬┐ ├┤ ├┼┐ ├┤ ├┤ ⎥
⎢ ┌┬┬┬┬┐ ├┼┬┬┐ ├┼┼┐ ├┼┬┐ ├┼┤ ├┼┐ ├┤ ⎥
⎣ └┴┴┴┴┘, └┴┴┴┘, └┴┴┘, └┴┴┘, └┴┘, └┴┘, └┘ ⎦
sage: Partitions.options._reset()
"""
from sage.typeset.unicode_art import UnicodeArt
if not self._list:
return UnicodeArt(u'∅', baseline=0)
if self.parent().options.convention == "English":
data = list(self)
else:
data = list(reversed(self))
txt = [u'┌' + u'┬' * (data[0] - 1) + u'┐']
for i in range(len(data) - 1):
p = data[i]
q = data[i + 1]
if p < q:
txt += [u'├' + u'┼' * p + u'┬' * (q - p - 1) + u'┐']
elif p == q:
txt += [u'├' + u'┼' * (p - 1) + u'┤']
else:
txt += [u'├' + u'┼' * q + u'┴' * (p - q - 1) + u'┘']
txt += [u'└' + u'┴' * (data[-1] - 1) + u'┘']
return UnicodeArt(txt, baseline=0)
def _repr_list(self):
"""
Return a string representation of ``self`` as a list.
EXAMPLES::
sage: print(Partition([7,7,7,3,3,2,1,1,1,1,1,1,1])._repr_list())
[7, 7, 7, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1]
"""
return '[%s]' % ', '.join('%s' % m for m in self)
def _repr_exp_low(self):
"""
Return a string representation of ``self`` in exponential form (lowest
first).
EXAMPLES::
sage: print(Partition([7,7,7,3,3,2,1,1,1,1,1,1,1])._repr_exp_low())
1^7, 2, 3^2, 7^3
sage: print(Partition([])._repr_exp_low())
-
"""
if not self._list:
return '-'
exp = self.to_exp()
return '%s' % ', '.join('%s%s' % (m+1, '' if e==1 else '^%s'%e)
for (m,e) in enumerate(exp) if e > 0)
def _repr_exp_high(self):
"""
Return a string representation of ``self`` in exponential form (highest
first).
EXAMPLES::
sage: print(Partition([7,7,7,3,3,2,1,1,1,1,1,1,1])._repr_exp_high())
7^3, 3^2, 2, 1^7
sage: print(Partition([])._repr_exp_high())
-
"""
if not self._list:
return '-'
exp = self.to_exp()[::-1] # reversed list of exponents
M=max(self)
return '%s' % ', '.join('%s%s' % (M-m, '' if e==1 else '^%s'%e)
for (m,e) in enumerate(exp) if e>0)
def _repr_compact_low(self):
"""
Return a string representation of ``self`` in compact form (exponential
form with lowest first).
EXAMPLES::
sage: print(Partition([7,7,7,3,3,2,1,1,1,1,1,1,1])._repr_compact_low())
1^7,2,3^2,7^3
sage: print(Partition([])._repr_compact_low())
-
"""
if not self._list:
return '-'
exp = self.to_exp()
return '%s' % ','.join('%s%s' % (m+1, '' if e==1 else '^%s'%e)
for (m,e) in enumerate(exp) if e > 0)
def _repr_compact_high(self):
"""
Return a string representation of ``self`` in compact form (exponential
form with highest first).
EXAMPLES::
sage: print(Partition([7,7,7,3,3,2,1,1,1,1,1,1,1])._repr_compact_high())
7^3,3^2,2,1^7
sage: print(Partition([])._repr_compact_low())
-
"""
if not self._list:
return '-'
exp = self.to_exp()[::-1] # reversed list of exponents
M=max(self)
return '%s' % ','.join('%s%s' % (M-m, '' if e==1 else '^%s'%e)
for (m,e) in enumerate(exp) if e>0)
def _repr_diagram(self):
r"""
Return a representation of ``self`` as a Ferrers diagram.
EXAMPLES::
sage: print(Partition([7,7,7,3,3,2,1,1,1,1,1,1,1])._repr_diagram())
*******
*******
*******
***
***
**
*
*
*
*
*
*
*
"""
return self.ferrers_diagram()
def level(self):
"""
Return the level of ``self``, which is always 1.
This method exists only for compatibility with
:class:`PartitionTuples`.
EXAMPLES::
sage: Partition([4,3,2]).level()
1
"""
return 1
def components(self):
"""
Return a list containing the shape of ``self``.
This method exists only for compatibility with
:class:`PartitionTuples`.
EXAMPLES::
sage: Partition([3,2]).components()
[[3, 2]]
"""
return [ self ]
def _latex_(self):
r"""
Return a LaTeX version of ``self``.
For more on the latex options, see :meth:`Partitions.options`.
EXAMPLES::
sage: mu = Partition([2, 1])
sage: Partitions.options.latex='diagram'; latex(mu) # indirect doctest
{\def\lr#1{\multicolumn{1}{@{\hspace{.6ex}}c@{\hspace{.6ex}}}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[b]{*{2}c}\\
\lr{\ast}&\lr{\ast}\\
\lr{\ast}\\
\end{array}$}
}
sage: Partitions.options.latex='exp_high'; latex(mu) # indirect doctest
2,1
sage: Partitions.options.latex='exp_low'; latex(mu) # indirect doctest
1,2
sage: Partitions.options.latex='list'; latex(mu) # indirect doctest
[2, 1]
sage: Partitions.options.latex='young_diagram'; latex(mu) # indirect doctest
{\def\lr#1{\multicolumn{1}{|@{\hspace{.6ex}}c@{\hspace{.6ex}}|}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[b]{*{2}c}\cline{1-2}
\lr{\phantom{x}}&\lr{\phantom{x}}\\\cline{1-2}
\lr{\phantom{x}}\\\cline{1-1}
\end{array}$}
}
sage: Partitions.options(latex="young_diagram", convention="french")
sage: Partitions.options.latex='exp_high'; latex(mu) # indirect doctest
2,1
sage: Partitions.options.latex='exp_low'; latex(mu) # indirect doctest
1,2
sage: Partitions.options.latex='list'; latex(mu) # indirect doctest
[2, 1]
sage: Partitions.options.latex='young_diagram'; latex(mu) # indirect doctest
{\def\lr#1{\multicolumn{1}{|@{\hspace{.6ex}}c@{\hspace{.6ex}}|}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[t]{*{2}c}\cline{1-1}
\lr{\phantom{x}}\\\cline{1-2}
\lr{\phantom{x}}&\lr{\phantom{x}}\\\cline{1-2}
\end{array}$}
}
sage: Partitions.options._reset()
"""
return self.parent().options._dispatch(self, '_latex_', 'latex')
def _latex_young_diagram(self):
r"""
LaTeX output as a Young diagram.
EXAMPLES::
sage: print(Partition([2, 1])._latex_young_diagram())
{\def\lr#1{\multicolumn{1}{|@{\hspace{.6ex}}c@{\hspace{.6ex}}|}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[b]{*{2}c}\cline{1-2}
\lr{\phantom{x}}&\lr{\phantom{x}}\\\cline{1-2}
\lr{\phantom{x}}\\\cline{1-1}
\end{array}$}
}
sage: print(Partition([])._latex_young_diagram())
{\emptyset}
"""
if not self._list:
return "{\\emptyset}"
from sage.combinat.output import tex_from_array
return tex_from_array([ ["\\phantom{x}"]*row_size for row_size in self._list ])
def _latex_diagram(self):
r"""
LaTeX output as a Ferrers' diagram.
EXAMPLES::
sage: print(Partition([2, 1])._latex_diagram())
{\def\lr#1{\multicolumn{1}{@{\hspace{.6ex}}c@{\hspace{.6ex}}}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[b]{*{2}c}\\
\lr{\ast}&\lr{\ast}\\
\lr{\ast}\\
\end{array}$}
}
sage: print(Partition([])._latex_diagram())
{\emptyset}
"""
if not self._list:
return "{\\emptyset}"
entry = self.parent().options("latex_diagram_str")
from sage.combinat.output import tex_from_array
return tex_from_array([ [entry]*row_size for row_size in self._list ], False)
def _latex_list(self):
r"""
LaTeX output as a list.
EXAMPLES::
sage: print(Partition([2, 1])._latex_list())
[2, 1]
sage: print(Partition([])._latex_list())
[]
"""
return repr(self._list)
def _latex_exp_low(self):
r"""
LaTeX output in exponential notation (lowest first).
EXAMPLES::
sage: print(Partition([2,2,1])._latex_exp_low())
1,2^{2}
sage: print(Partition([])._latex_exp_low())
{\emptyset}
"""
if not self._list:
return "{\\emptyset}"
exp = self.to_exp()
return '%s' % ','.join('%s%s' % (m+1, '' if e==1 else '^{%s}'%e)
for (m,e) in enumerate(exp) if e > 0)
def _latex_exp_high(self):
r"""
LaTeX output in exponential notation (highest first).
EXAMPLES::
sage: print(Partition([2,2,1])._latex_exp_high())
2^{2},1
sage: print(Partition([])._latex_exp_high())
{\emptyset}
"""
if not self._list:
return "{\\emptyset}"
exp = self.to_exp()[::-1] # reversed list of exponents
M = max(self)
return '%s' % ','.join('%s%s' % (M-m, '' if e==1 else '^{%s}'%e)
for (m,e) in enumerate(exp) if e>0)
def ferrers_diagram(self):
r"""
Return the Ferrers diagram of ``self``.
EXAMPLES::
sage: mu = Partition([5,5,2,1])
sage: Partitions.options(diagram_str='*', convention="english")
sage: print(mu.ferrers_diagram())
*****
*****
**
*
sage: Partitions.options(diagram_str='#')
sage: print(mu.ferrers_diagram())
#####
#####
##
#
sage: Partitions.options.convention="french"
sage: print(mu.ferrers_diagram())
#
##
#####
#####
sage: print(Partition([]).ferrers_diagram())
-
sage: Partitions.options(diagram_str='-')
sage: print(Partition([]).ferrers_diagram())
(/)
sage: Partitions.options._reset()
"""
diag_str = self.parent().options.diagram_str
if not self._list:
return '-' if diag_str != '-' else "(/)"
if self.parent().options.convention == "English":
return '\n'.join(diag_str * p for p in self)
else:
return '\n'.join(diag_str * p for p in reversed(self))
def pp(self):
r"""
Print the Ferrers diagram.
See :meth:`ferrers_diagram` for more on the Ferrers diagram.
EXAMPLES::
sage: Partition([5,5,2,1]).pp()
*****
*****
**
*
sage: Partitions.options.convention='French'
sage: Partition([5,5,2,1]).pp()
*
**
*****
*****
sage: Partitions.options._reset()
"""
print(self.ferrers_diagram())
def __truediv__(self, p):
"""
Returns the skew partition ``self / p``.
EXAMPLES::
sage: p = Partition([3,2,1])
sage: p/[1,1]
[3, 2, 1] / [1, 1]
sage: p/[3,2,1]
[3, 2, 1] / [3, 2, 1]
sage: p/Partition([1,1])
[3, 2, 1] / [1, 1]
sage: p/[2,2,2]
Traceback (most recent call last):
...
ValueError: To form a skew partition p/q, q must be contained in p.
"""
if not self.contains(p):
raise ValueError("To form a skew partition p/q, q must be contained in p.")
return SkewPartition([self[:], p])
def power(self, k):
r"""
Return the cycle type of the `k`-th power of any permutation
with cycle type ``self`` (thus describes the powermap of
symmetric groups).
Equivalent to GAP's ``PowerPartition``.
EXAMPLES::
sage: p = Partition([5,3])
sage: p.power(1)
[5, 3]
sage: p.power(2)
[5, 3]
sage: p.power(3)
[5, 1, 1, 1]
sage: p.power(4)
[5, 3]
Now let us compare this to the power map on `S_8`::
sage: G = SymmetricGroup(8)
sage: g = G([(1,2,3,4,5),(6,7,8)])
sage: g
(1,2,3,4,5)(6,7,8)
sage: g^2
(1,3,5,2,4)(6,8,7)
sage: g^3
(1,4,2,5,3)
sage: g^4
(1,5,4,3,2)(6,7,8)
::
sage: Partition([3,2,1]).power(3)
[2, 1, 1, 1, 1]
"""
res = []
for i in self:
g = gcd(i, k)
res.extend( [ZZ(i//g)]*int(g) )
res.sort(reverse=True)
return Partition(res)
def __next__(self):
"""
Return the partition that lexicographically follows ``self``, of the
same size. If ``self`` is the last partition, then return ``False``.
EXAMPLES::
sage: next(Partition([4]))
[3, 1]
sage: next(Partition([1,1,1,1]))
False
"""
p = self
n = 0
m = 0
for i in p:
n += i
m += 1
next_p = p[:] + [1]*(n - len(p))
#Check to see if we are at the last (all ones) partition
if p == [1]*n:
return False
#
#If we are not, then run the ZS1 algorithm.
#
#Let h be the number of non-one entries in the
#partition
h = 0
for i in next_p:
if i != 1:
h += 1
if next_p[h-1] == 2:
m += 1
next_p[h-1] = 1
h -= 1
else:
r = next_p[h-1] - 1
t = m - h + 1
next_p[h-1] = r
while t >= r:
h += 1
next_p[h-1] = r
t -= r
if t == 0:
m = h
else:
m = h + 1
if t > 1:
h += 1
next_p[h-1] = t
return self.parent()(next_p[:m])
next = __next__
def size(self):
"""
Return the size of ``self``.
EXAMPLES::
sage: Partition([2,2]).size()
4
sage: Partition([3,2,1]).size()
6
"""
return sum(self)
def sign(self):
r"""
Return the sign of any permutation with cycle type ``self``.
This function corresponds to a homomorphism from the symmetric
group `S_n` into the cyclic group of order 2, whose kernel
is exactly the alternating group `A_n`. Partitions of sign
`1` are called even partitions while partitions of sign
`-1` are called odd.
EXAMPLES::
sage: Partition([5,3]).sign()
1
sage: Partition([5,2]).sign()
-1
Zolotarev's lemma states that the Legendre symbol
`\left(\frac{a}{p}\right)` for an integer
`a \pmod p` (`p` a prime number), can be computed
as sign(p_a), where sign denotes the sign of a permutation and
p_a the permutation of the residue classes `\pmod p`
induced by modular multiplication by `a`, provided
`p` does not divide `a`.
We verify this in some examples.
::
sage: F = GF(11)
sage: a = F.multiplicative_generator();a
2
sage: plist = [int(a*F(x)) for x in range(1,11)]; plist
[2, 4, 6, 8, 10, 1, 3, 5, 7, 9]
This corresponds to the permutation (1, 2, 4, 8, 5, 10, 9, 7, 3, 6)
(acting the set `\{1,2,...,10\}`) and to the partition
[10].
::
sage: p = PermutationGroupElement('(1, 2, 4, 8, 5, 10, 9, 7, 3, 6)')
sage: p.sign()
-1
sage: Partition([10]).sign()
-1
sage: kronecker_symbol(11,2)
-1
Now replace `2` by `3`::
sage: plist = [int(F(3*x)) for x in range(1,11)]; plist
[3, 6, 9, 1, 4, 7, 10, 2, 5, 8]
sage: list(range(1, 11))
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
sage: p = PermutationGroupElement('(3,4,8,7,9)')
sage: p.sign()
1
sage: kronecker_symbol(3,11)
1
sage: Partition([5,1,1,1,1,1]).sign()
1
In both cases, Zolotarev holds.
REFERENCES:
- :wikipedia:`Zolotarev%27s_lemma`
"""
return (-1)**(self.size()-self.length())
def k_size(self, k):
r"""
Given a partition ``self`` and a ``k``, return the size of the
`k`-boundary.
This is the same as the length method
:meth:`sage.combinat.core.Core.length` of the
:class:`sage.combinat.core.Core` object, with the exception that here we
don't require ``self`` to be a `k+1`-core.
EXAMPLES::
sage: Partition([2, 1, 1]).k_size(1)
2
sage: Partition([2, 1, 1]).k_size(2)
3
sage: Partition([2, 1, 1]).k_size(3)
3
sage: Partition([2, 1, 1]).k_size(4)
4
.. SEEALSO::
:meth:`k_boundary`, :meth:`SkewPartition.size`
"""
return self.k_boundary(k).size()
def boundary(self):
r"""
Return the integer coordinates of points on the boundary of ``self``.
For the following description, picture the Ferrer's diagram of ``self``
using the French convention. Recall that the French convention puts
the longest row on the bottom and the shortest row on the top. In
addition, interpret the Ferrer's diagram as 1 x 1 cells in the Euclidean
plane. So if ``self`` was the partition [3, 1], the lower-left vertices
of the 1 x 1 cells in the Ferrer's diagram would be (0, 0), (1, 0),
(2, 0), and (0, 1).
The boundary of a partition is the set `\{ \text{NE}(d) \mid \forall
d\:\text{diagonal} \}`. That is, for every diagonal line `y = x + b`
where `b \in \mathbb{Z}`, we find the northeasternmost (NE) point on
that diagonal which is also in the Ferrer's diagram.
The boundary will go from bottom-right to top-left.
EXAMPLES:
Consider the partition (1) depicted as a square on a cartesian plane
with vertices (0, 0), (1, 0), (1, 1), and (0, 1). Three of those
vertices in the appropriate order form the boundary::
sage: Partition([1]).boundary()
[(1, 0), (1, 1), (0, 1)]
The partition (3, 1) can be visualized as three squares on a cartesian
plane. The coordinates of the appropriate vertices form the boundary::
sage: Partition([3, 1]).boundary()
[(3, 0), (3, 1), (2, 1), (1, 1), (1, 2), (0, 2)]
TESTS::
sage: Partition([1]).boundary()
[(1, 0), (1, 1), (0, 1)]
sage: Partition([2, 1]).boundary()
[(2, 0), (2, 1), (1, 1), (1, 2), (0, 2)]
sage: Partition([3, 1]).boundary()
[(3, 0), (3, 1), (2, 1), (1, 1), (1, 2), (0, 2)]
sage: Partition([2, 1, 1]).boundary()
[(2, 0), (2, 1), (1, 1), (1, 2), (1, 3), (0, 3)]
.. SEEALSO::
:meth:`k_rim`. You might have been looking for :meth:`k_boundary`
instead.
"""
def horizontal_piece(xy, bdy):
(start_x, start_y) = xy
if not bdy:
h_piece = [(start_x, start_y)]
else:
stop_x = bdy[-1][0]
y = start_y # y never changes
h_piece = [(x, y) for x in range(start_x, stop_x)]
h_piece = list(reversed(h_piece))
return h_piece
bdy = []
for i, part in enumerate(self):
(cell_x, cell_y) = (part - 1, i)
(x, y) = (cell_x + 1, cell_y + 1)
bdy += horizontal_piece((x, y - 1), bdy)
bdy.append((x, y))
# add final "top-left" horizontal piece
(top_left_x, top_left_y) = (0, len(self))
bdy += horizontal_piece((top_left_x, top_left_y), bdy)
return bdy
def k_rim(self, k):
r"""
Return the ``k``-rim of ``self`` as a list of integer coordinates.
The `k`-rim of a partition is the "line between" (or "intersection of")
the `k`-boundary and the `k`-interior. (Section 2.3 of [HM2011]_)
It will be output as an ordered list of integer coordinates, where the
origin is `(0, 0)`. It will start at the top-left of the `k`-rim (using
French convention) and end at the bottom-right.
EXAMPLES:
Consider the partition (3, 1) split up into its 1-interior and
1-boundary:
.. image:: ../../media/k-rim.JPG
:height: 180px
:align: center
The line shown in bold is the 1-rim, and that information is equivalent
to the integer coordinates of the points that occur along that line::
sage: Partition([3, 1]).k_rim(1)
[(3, 0), (2, 0), (2, 1), (1, 1), (0, 1), (0, 2)]
TESTS::
sage: Partition([1]).k_rim(0)
[(1, 0), (1, 1), (0, 1)]
sage: Partition([3, 1]).k_rim(0)
[(3, 0), (3, 1), (2, 1), (1, 1), (1, 2), (0, 2)]
sage: Partition([3, 1]).k_rim(1)
[(3, 0), (2, 0), (2, 1), (1, 1), (0, 1), (0, 2)]
sage: Partition([3, 1]).k_rim(2)
[(3, 0), (2, 0), (1, 0), (1, 1), (0, 1), (0, 2)]
sage: Partition([3, 1]).k_rim(3)
[(3, 0), (2, 0), (1, 0), (1, 1), (0, 1), (0, 2)]
.. SEEALSO::
:meth:`k_interior`, :meth:`k_boundary`, :meth:`boundary`
"""
interior_rim = self.k_interior(k).boundary()
# get leftmost vertical line
interior_top_left_y = interior_rim[-1][1]
v_piece = [(0, y) for y in range(interior_top_left_y+1, len(self)+1)]
# get bottommost horizontal line
interior_bottom_right_x = interior_rim[0][0]
if self:
ptn_bottom_right_x = self[0]
else:
ptn_bottom_right_x = 0
h_piece = [(x, 0) for x in
range(ptn_bottom_right_x, interior_bottom_right_x, -1)]
# glue together with boundary
rim = h_piece + interior_rim + v_piece
return rim
def k_row_lengths(self, k):
r"""
Return the ``k``-row-shape of the partition ``self``.
This is equivalent to taking the `k`-boundary of the partition and then
returning the row-shape of that. We do *not* discard rows of length 0.
(Section 2.2 of [LLMS2013]_)
EXAMPLES::
sage: Partition([6, 1]).k_row_lengths(2)
[2, 1]
sage: Partition([4, 4, 4, 3, 2]).k_row_lengths(2)
[0, 1, 1, 1, 2]
.. SEEALSO::
:meth:`k_column_lengths`, :meth:`k_boundary`,
:meth:`SkewPartition.row_lengths`,
:meth:`SkewPartition.column_lengths`
"""
return self.k_boundary(k).row_lengths()
def k_column_lengths(self, k):
r"""
Return the ``k``-column-shape of the partition ``self``.
This is the 'column' analog of :meth:`k_row_lengths`.
EXAMPLES::
sage: Partition([6, 1]).k_column_lengths(2)
[1, 0, 0, 0, 1, 1]
sage: Partition([4, 4, 4, 3, 2]).k_column_lengths(2)
[1, 1, 1, 2]
.. SEEALSO::
:meth:`k_row_lengths`, :meth:`k_boundary`,
:meth:`SkewPartition.row_lengths`,
:meth:`SkewPartition.column_lengths`
"""
return self.k_boundary(k).column_lengths()
def has_rectangle(self, h, w):
r"""
Return ``True`` if the Ferrer's diagram of ``self`` has ``h``
(*or more*) rows of length ``w`` (*exactly*).
INPUT:
- ``h`` -- An integer `h \geq 1`. The (*minimum*) height of the
rectangle.
- ``w`` -- An integer `w \geq 1`. The width of the rectangle.
EXAMPLES::
sage: Partition([3, 3, 3, 3]).has_rectangle(2, 3)
True
sage: Partition([3, 3]).has_rectangle(2, 3)
True
sage: Partition([4, 3]).has_rectangle(2, 3)
False
sage: Partition([3]).has_rectangle(2, 3)
False
TESTS::
sage: Partition([1, 1, 1]).has_rectangle(4, 1)
False
sage: Partition([1, 1, 1]).has_rectangle(3, 1)
True
sage: Partition([1, 1, 1]).has_rectangle(2, 1)
True
sage: Partition([1, 1, 1]).has_rectangle(1, 2)
False
sage: Partition([3]).has_rectangle(1, 3)
True
sage: Partition([3]).has_rectangle(1, 2)
False
sage: Partition([3]).has_rectangle(2, 3)
False
.. SEEALSO::
:meth:`has_k_rectangle`
"""
assert h >= 1
assert w >= 1
num_rows_of_len_w = self.to_exp(w)[w - 1]
return num_rows_of_len_w >= h
def has_k_rectangle(self, k):
r"""
Return ``True`` if the Ferrer's diagram of ``self`` contains `k-i+1`
rows (*or more*) of length `i` (*exactly*) for any `i` in `[1, k]`.
This is mainly a helper function for :meth:`is_k_reducible` and
:meth:`is_k_irreducible`, the only difference between this function and
:meth:`is_k_reducible` being that this function allows any partition as
input while :meth:`is_k_reducible` requires the input to be `k`-bounded.
EXAMPLES:
The partition [1, 1, 1] has at least 2 rows of length 1::
sage: Partition([1, 1, 1]).has_k_rectangle(2)
True
The partition [1, 1, 1] does *not* have 4 rows of length 1, 3 rows of
length 2, 2 rows of length 3, nor 1 row of length 4::
sage: Partition([1, 1, 1]).has_k_rectangle(4)
False
TESTS::
sage: Partition([1]).has_k_rectangle(1)
True
sage: Partition([1]).has_k_rectangle(2)
False
sage: Partition([1, 1, 1]).has_k_rectangle(3)
True
sage: Partition([1, 1, 1]).has_k_rectangle(2)
True
sage: Partition([1, 1, 1]).has_k_rectangle(4)
False
sage: Partition([3]).has_k_rectangle(3)
True
sage: Partition([3]).has_k_rectangle(2)
False
sage: Partition([3]).has_k_rectangle(4)
False
.. SEEALSO::
:meth:`is_k_irreducible`, :meth:`is_k_reducible`,
:meth:`has_rectangle`
"""
return any(self.has_rectangle(a, b) for (a, b) in
[(k-i+1, i) for i in range(1, k+1)])
def is_k_bounded(self, k):
r"""
Return ``True`` if the partition ``self`` is bounded by ``k``.
EXAMPLES::
sage: Partition([4, 3, 1]).is_k_bounded(4)
True
sage: Partition([4, 3, 1]).is_k_bounded(7)
True
sage: Partition([4, 3, 1]).is_k_bounded(3)
False
"""
assert k >= 0
if self.is_empty():
return True
else:
return self[0] <= k
def is_k_reducible(self, k):
r"""
Return ``True`` if the partition ``self`` is ``k``-reducible.
A `k`-bounded partition is `k`-*reducible* if its Ferrer's diagram
contains `k-i+1` rows (or more) of length `i` (exactly) for some
`i \in [1, k]`.
(Also, a `k`-bounded partition is `k`-reducible if and only if it is not `k`-irreducible.)
EXAMPLES:
The partition [1, 1, 1] has at least 2 rows of length 1::
sage: Partition([1, 1, 1]).is_k_reducible(2)
True
The partition [1, 1, 1] does *not* have 4 rows of length 1, 3 rows of
length 2, 2 rows of length 3, nor 1 row of length 4::
sage: Partition([1, 1, 1]).is_k_reducible(4)
False
.. SEEALSO::
:meth:`is_k_irreducible`, :meth:`has_k_rectangle`
"""
if not self.is_k_bounded(k):
raise ValueError('we only talk about k-reducible / k-irreducible for k-bounded partitions')
return self.has_k_rectangle(k)
def is_k_irreducible(self, k):
r"""
Return ``True`` if the partition ``self`` is ``k``-irreducible.
A `k`-bounded partition is `k`-*irreducible* if its Ferrer's diagram
does *not* contain `k-i+1` rows (or more) of length `i` (exactly) for
every `i \in [1, k]`.
(Also, a `k`-bounded partition is `k`-irreducible if and only if it is
not `k`-reducible.)
EXAMPLES:
The partition [1, 1, 1] has at least 2 rows of length 1::
sage: Partition([1, 1, 1]).is_k_irreducible(2)
False
The partition [1, 1, 1] does *not* have 4 rows of length 1, 3 rows of
length 2, 2 rows of length 3, nor 1 row of length 4::
sage: Partition([1, 1, 1]).is_k_irreducible(4)
True
.. SEEALSO::
:meth:`is_k_reducible`, :meth:`has_k_rectangle`
"""
return not self.is_k_reducible(k)
def is_symmetric(self):
r"""
Return ``True`` if the partition ``self`` equals its own transpose.
EXAMPLES::
sage: Partition([2, 1]).is_symmetric()
True
sage: Partition([3, 1]).is_symmetric()
False
"""
return self == self.conjugate()
def next_within_bounds(self, min=[], max=None, partition_type=None):
r"""
Get the next partition lexicographically that contains ``min`` and is
contained in ``max``.
INPUT:
- ``min`` -- (default ``[]``, the empty partition) The
'minimum partition' that ``next_within_bounds(self)`` must contain.
- ``max`` -- (default ``None``) The 'maximum partition' that
``next_within_bounds(self)`` must be contained in. If set to ``None``,
then there is no restriction.
- ``partition_type`` -- (default ``None``) The type of partitions
allowed. For example, 'strict' for strictly decreasing partitions, or
``None`` to allow any valid partition.
EXAMPLES::
sage: m = [1, 1]
sage: M = [3, 2, 1]
sage: Partition([1, 1]).next_within_bounds(min=m, max=M)
[1, 1, 1]
sage: Partition([1, 1, 1]).next_within_bounds(min=m, max=M)
[2, 1]
sage: Partition([2, 1]).next_within_bounds(min=m, max=M)
[2, 1, 1]
sage: Partition([2, 1, 1]).next_within_bounds(min=m, max=M)
[2, 2]
sage: Partition([2, 2]).next_within_bounds(min=m, max=M)
[2, 2, 1]
sage: Partition([2, 2, 1]).next_within_bounds(min=m, max=M)
[3, 1]
sage: Partition([3, 1]).next_within_bounds(min=m, max=M)
[3, 1, 1]
sage: Partition([3, 1, 1]).next_within_bounds(min=m, max=M)
[3, 2]
sage: Partition([3, 2]).next_within_bounds(min=m, max=M)
[3, 2, 1]
sage: Partition([3, 2, 1]).next_within_bounds(min=m, max=M) == None
True
.. SEEALSO::
:meth:`next`
"""
# make sure min <= self <= max
if max is not None:
assert _Partitions(max).contains(_Partitions(self))
assert _Partitions(self).contains(_Partitions(min))
# check for empty max
if max is not None and _Partitions(max).is_empty():
return None
# convert partitions to lists to make them mutable
p = list(self)
min = list(min)
# if there is no max, the next partition just tacks a '1' on to the end!
if max is None:
return _Partitions(p + [1])
# extend p and min to include 0's at the end
p = p + [0] * (len(max) - len(p))
min = min + [0] * (len(max) - len(min))
# finally, run the algo to find next_p
next_p = copy(p)
def condition(a, b):
if partition_type in ('strict', 'strictly decreasing'):
return a < b - 1
elif partition_type in (None, 'weak', 'weakly decreasing'):
return a < b
else:
raise ValueError('unrecognized partition type')
for r in range(len(p) - 1, -1, -1):
if r == 0:
if (max is None or p[r] < max[r]):
next_p[r] += 1
break
else:
return None
else:
if (max is None or p[r] < max[r]) and condition(p[r], p[r-1]):
next_p[r] += 1
break
else:
next_p[r] = min[r]
continue
return _Partitions(next_p)
def row_standard_tableaux(self):
"""
Return the :class:`row standard tableaux
<sage.combinat.tableau.RowStandardTableaux>` of shape ``self``.
EXAMPLES::
sage: Partition([3,2,2,1]).row_standard_tableaux()
Row standard tableaux of shape [3, 2, 2, 1]
"""
return tableau.RowStandardTableaux(self)
def standard_tableaux(self):
"""
Return the :class:`standard tableaux<StandardTableaux>`
of shape ``self``.
EXAMPLES::
sage: Partition([3,2,2,1]).standard_tableaux()
Standard tableaux of shape [3, 2, 2, 1]
"""
return tableau.StandardTableaux(self)
def up(self):
r"""
Return a generator for partitions that can be obtained from ``self``
by adding a cell.
EXAMPLES::
sage: list(Partition([2,1,1]).up())
[[3, 1, 1], [2, 2, 1], [2, 1, 1, 1]]
sage: list(Partition([3,2]).up())
[[4, 2], [3, 3], [3, 2, 1]]
sage: [p for p in Partition([]).up()]
[[1]]
"""
p = self
previous = p.get_part(0) + 1
for i, current in enumerate(p):
if current < previous:
yield Partition(p[:i] + [current + 1] + p[i + 1:])
previous = current
yield Partition(p + [1])
def up_list(self):
"""
Return a list of the partitions that can be formed from ``self`` by
adding a cell.
EXAMPLES::
sage: Partition([2,1,1]).up_list()
[[3, 1, 1], [2, 2, 1], [2, 1, 1, 1]]
sage: Partition([3,2]).up_list()
[[4, 2], [3, 3], [3, 2, 1]]
sage: Partition([]).up_list()
[[1]]
"""
return list(self.up())
def down(self):
r"""
Return a generator for partitions that can be obtained from ``self``
by removing a cell.
EXAMPLES::
sage: [p for p in Partition([2,1,1]).down()]
[[1, 1, 1], [2, 1]]
sage: [p for p in Partition([3,2]).down()]
[[2, 2], [3, 1]]
sage: [p for p in Partition([3,2,1]).down()]
[[2, 2, 1], [3, 1, 1], [3, 2]]
TESTS:
We check that :trac:`11435` is fixed::
sage: Partition([]).down_list() #indirect doctest
[]
"""
p = self
l = len(p)
for i in range(l-1):
if p[i] > p[i+1]:
yield Partition(p[:i] + [ p[i]-1 ] + p[i+1:])
if l >= 1:
last = p[-1]
if last == 1:
yield Partition(p[:-1])
else:
yield Partition(p[:-1] + [ p[-1] - 1 ])
def down_list(self):
"""
Return a list of the partitions that can be obtained from ``self``
by removing a cell.
EXAMPLES::
sage: Partition([2,1,1]).down_list()
[[1, 1, 1], [2, 1]]
sage: Partition([3,2]).down_list()
[[2, 2], [3, 1]]
sage: Partition([3,2,1]).down_list()
[[2, 2, 1], [3, 1, 1], [3, 2]]
sage: Partition([]).down_list() #checks :trac:`11435`
[]
"""
return [p for p in self.down()]
@combinatorial_map(name="cell poset")
def cell_poset(self, orientation="SE"):
"""
Return the Young diagram of ``self`` as a poset. The optional
keyword variable ``orientation`` determines the order relation
of the poset.
The poset always uses the set of cells of the Young diagram
of ``self`` as its ground set. The order relation of the poset
depends on the ``orientation`` variable (which defaults to
``"SE"``). Concretely, ``orientation`` has to be specified to
one of the strings ``"NW"``, ``"NE"``, ``"SW"``, and ``"SE"``,
standing for "northwest", "northeast", "southwest" and
"southeast", respectively. If ``orientation`` is ``"SE"``, then
the order relation of the poset is such that a cell `u` is
greater or equal to a cell `v` in the poset if and only if `u`
lies weakly southeast of `v` (this means that `u` can be
reached from `v` by a sequence of south and east steps; the
sequence is allowed to consist of south steps only, or of east
steps only, or even be empty). Similarly the order relation is
defined for the other three orientations. The Young diagram is
supposed to be drawn in English notation.
The elements of the poset are the cells of the Young diagram
of ``self``, written as tuples of zero-based coordinates (so
that `(3, 7)` stands for the `8`-th cell of the `4`-th row,
etc.).
EXAMPLES::
sage: p = Partition([3,3,1])
sage: Q = p.cell_poset(); Q
Finite poset containing 7 elements
sage: sorted(Q)
[(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0)]
sage: sorted(Q.maximal_elements())
[(1, 2), (2, 0)]
sage: Q.minimal_elements()
[(0, 0)]
sage: sorted(Q.upper_covers((1, 0)))
[(1, 1), (2, 0)]
sage: Q.upper_covers((1, 1))
[(1, 2)]
sage: P = p.cell_poset(orientation="NW"); P
Finite poset containing 7 elements
sage: sorted(P)
[(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0)]
sage: sorted(P.minimal_elements())
[(1, 2), (2, 0)]
sage: P.maximal_elements()
[(0, 0)]
sage: P.upper_covers((2, 0))
[(1, 0)]
sage: sorted(P.upper_covers((1, 2)))
[(0, 2), (1, 1)]
sage: sorted(P.upper_covers((1, 1)))
[(0, 1), (1, 0)]
sage: sorted([len(P.upper_covers(v)) for v in P])
[0, 1, 1, 1, 1, 2, 2]
sage: R = p.cell_poset(orientation="NE"); R
Finite poset containing 7 elements
sage: sorted(R)
[(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0)]
sage: R.maximal_elements()
[(0, 2)]
sage: R.minimal_elements()
[(2, 0)]
sage: sorted([len(R.upper_covers(v)) for v in R])
[0, 1, 1, 1, 1, 2, 2]
sage: R.is_isomorphic(P)
False
sage: R.is_isomorphic(P.dual())
False
Linear extensions of ``p.cell_poset()`` are in 1-to-1 correspondence
with standard Young tableaux of shape `p`::
sage: all( len(p.cell_poset().linear_extensions())
....: == len(p.standard_tableaux())
....: for n in range(8) for p in Partitions(n) )
True
This is not the case for northeast orientation::
sage: q = Partition([3, 1])
sage: q.cell_poset(orientation="NE").is_chain()
True
TESTS:
We check that the posets are really what they should be for size
up to `7`::
sage: def check_NW(n):
....: for p in Partitions(n):
....: P = p.cell_poset(orientation="NW")
....: for c in p.cells():
....: for d in p.cells():
....: if P.le(c, d) != (c[0] >= d[0]
....: and c[1] >= d[1]):
....: return False
....: return True
sage: all( check_NW(n) for n in range(8) )
True
sage: def check_NE(n):
....: for p in Partitions(n):
....: P = p.cell_poset(orientation="NE")
....: for c in p.cells():
....: for d in p.cells():
....: if P.le(c, d) != (c[0] >= d[0]
....: and c[1] <= d[1]):
....: return False
....: return True
sage: all( check_NE(n) for n in range(8) )
True
sage: def test_duality(n, ori1, ori2):
....: for p in Partitions(n):
....: P = p.cell_poset(orientation=ori1)
....: Q = p.cell_poset(orientation=ori2)
....: for c in p.cells():
....: for d in p.cells():
....: if P.lt(c, d) != Q.lt(d, c):
....: return False
....: return True
sage: all( test_duality(n, "NW", "SE") for n in range(8) )
True
sage: all( test_duality(n, "NE", "SW") for n in range(8) )
True
sage: all( test_duality(n, "NE", "SE") for n in range(4) )
False
"""
from sage.combinat.posets.posets import Poset
covers = {}
if orientation == "NW":
for i, row in enumerate(self):
if i == 0:
covers[(0, 0)] = []
for j in range(1, row):
covers[(0, j)] = [(0, j - 1)]
else:
covers[(i, 0)] = [(i - 1, 0)]
for j in range(1, row):
covers[(i, j)] = [(i - 1, j), (i, j - 1)]
elif orientation == "NE":
for i, row in enumerate(self):
if i == 0:
covers[(0, row - 1)] = []
for j in range(row - 1):
covers[(0, j)] = [(0, j + 1)]
else:
covers[(i, row - 1)] = [(i - 1, row - 1)]
for j in range(row - 1):
covers[(i, j)] = [(i - 1, j), (i, j + 1)]
elif orientation == "SE":
l = len(self) - 1
for i, row in enumerate(self):
if i == l:
covers[(i, row - 1)] = []
for j in range(row - 1):
covers[(i, j)] = [(i, j + 1)]
else:
next_row = self[i + 1]
if row == next_row:
covers[(i, row - 1)] = [(i + 1, row - 1)]
for j in range(row - 1):
covers[(i, j)] = [(i + 1, j), (i, j + 1)]
else:
covers[(i, row - 1)] = []
for j in range(next_row):
covers[(i, j)] = [(i + 1, j), (i, j + 1)]
for j in range(next_row, row - 1):
covers[(i, j)] = [(i, j + 1)]
elif orientation == "SW":
l = len(self) - 1
for i, row in enumerate(self):
if i == l:
covers[(i, 0)] = []
for j in range(1, row):
covers[(i, j)] = [(i, j - 1)]
else:
covers[(i, 0)] = [(i + 1, 0)]
next_row = self[i + 1]
for j in range(1, next_row):
covers[(i, j)] = [(i + 1, j), (i, j - 1)]
for j in range(next_row, row):
covers[(i, j)] = [(i, j - 1)]
return Poset(covers)
def frobenius_coordinates(self):
"""
Return a pair of sequences of Frobenius coordinates aka beta numbers
of the partition.
These are two strictly decreasing sequences of nonnegative integers
of the same length.
EXAMPLES::
sage: Partition([]).frobenius_coordinates()
([], [])
sage: Partition([1]).frobenius_coordinates()
([0], [0])
sage: Partition([3,3,3]).frobenius_coordinates()
([2, 1, 0], [2, 1, 0])
sage: Partition([9,1,1,1,1,1,1]).frobenius_coordinates()
([8], [6])
"""
mu = self
muconj = mu.conjugate() # Naive implementation
if len(mu) <= len(muconj):
a = [x for x in (val-i-1 for i, val in enumerate(mu)) if x>=0]
b = [x for x in (muconj[i]-i-1 for i in range(len(a))) if x>=0]
else:
b = [x for x in (val-i-1 for i, val in enumerate(muconj)) if x>=0]
a = [x for x in (mu[i]-i-1 for i in range(len(b))) if x>=0]
return (a,b)
def frobenius_rank(self):
r"""
Return the Frobenius rank of the partition ``self``.
The Frobenius rank of a partition
`\lambda = (\lambda_1, \lambda_2, \lambda_3, \cdots)` is
defined to be the largest `i` such that `\lambda_i \geq i`.
In other words, it is the number of cells on the main diagonal
of `\lambda`. In yet other words, it is the size of the largest
square fitting into the Young diagram of `\lambda`.
EXAMPLES::
sage: Partition([]).frobenius_rank()
0
sage: Partition([1]).frobenius_rank()
1
sage: Partition([3,3,3]).frobenius_rank()
3
sage: Partition([9,1,1,1,1,1]).frobenius_rank()
1
sage: Partition([2,1,1,1,1,1]).frobenius_rank()
1
sage: Partition([2,2,1,1,1,1]).frobenius_rank()
2
sage: Partition([3,2]).frobenius_rank()
2
sage: Partition([3,2,2]).frobenius_rank()
2
sage: Partition([8,4,4,4,4]).frobenius_rank()
4
sage: Partition([8,4,1]).frobenius_rank()
2
sage: Partition([3,3,1]).frobenius_rank()
2
"""
for i, x in enumerate(self):
if x <= i:
return i
return len(self)
def beta_numbers(self, length=None):
"""
Return the set of beta numbers corresponding to ``self``.
The optional argument ``length`` specifies the length of the beta set
(which must be at least the length of ``self``).
For more on beta numbers, see :meth:`frobenius_coordinates`.
EXAMPLES::
sage: Partition([4,3,2]).beta_numbers()
[6, 4, 2]
sage: Partition([4,3,2]).beta_numbers(5)
[8, 6, 4, 1, 0]
sage: Partition([]).beta_numbers()
[]
sage: Partition([]).beta_numbers(3)
[2, 1, 0]
sage: Partition([6,4,1,1]).beta_numbers()
[9, 6, 2, 1]
sage: Partition([6,4,1,1]).beta_numbers(6)
[11, 8, 4, 3, 1, 0]
sage: Partition([1,1,1]).beta_numbers()
[3, 2, 1]
sage: Partition([1,1,1]).beta_numbers(4)
[4, 3, 2, 0]
"""
true_length = len(self)
if length is None:
length = true_length
elif length < true_length:
raise ValueError("length must be at least the length of the partition")
beta = [l + length - i - 1 for (i, l) in enumerate(self)]
if length > true_length:
beta.extend(list(range(length-true_length-1,-1,-1)))
return beta
def crank(self):
r"""
Return the Dyson crank of ``self``.
The Dyson crank of a partition `\lambda` is defined as follows:
If `\lambda` contains at least one `1`, then the crank is
`\mu(\lambda) - \omega(\lambda)`, where `\omega(\lambda)` is the
number of `1`s in `\lambda`, and `\mu(\lambda)` is the number of
parts of `\lambda` larger than `\omega(\lambda)`. If `\lambda`
contains no `1`, then the crank is simply the largest part of
`\lambda`.
REFERENCES:
- [AG1988]_
EXAMPLES::
sage: Partition([]).crank()
0
sage: Partition([3,2,2]).crank()
3
sage: Partition([5,4,2,1,1]).crank()
0
sage: Partition([1,1,1]).crank()
-3
sage: Partition([6,4,4,3]).crank()
6
sage: Partition([6,3,3,1,1]).crank()
1
sage: Partition([6]).crank()
6
sage: Partition([5,1]).crank()
0
sage: Partition([4,2]).crank()
4
sage: Partition([4,1,1]).crank()
-1
sage: Partition([3,3]).crank()
3
sage: Partition([3,2,1]).crank()
1
sage: Partition([3,1,1,1]).crank()
-3
sage: Partition([2,2,2]).crank()
2
sage: Partition([2,2,1,1]).crank()
-2
sage: Partition([2,1,1,1,1]).crank()
-4
sage: Partition([1,1,1,1,1,1]).crank()
-6
"""
l = len(self)
if l == 0:
return 0
if self[-1] > 1:
return self[0]
ind_1 = self.index(1)
w = l - ind_1 # w is omega(self).
m = len([x for x in self if x > w])
return m - w
def t_completion(self, t):
r"""
Return the ``t``-completion of the partition ``self``.
If `\lambda = (\lambda_1, \lambda_2, \lambda_3, \ldots)` is a
partition and `t` is an integer greater or equal to
`\left\lvert \lambda \right\rvert + \lambda_1`, then the
`t`-*completion of* `\lambda` is defined as the partition
`(t - \left\lvert \lambda \right\rvert, \lambda_1, \lambda_2,
\lambda_3, \ldots)` of `t`. This partition is denoted by `\lambda[t]`
in [BOR2009]_, by `\lambda_{[t]}` in [BdVO2012]_, and by `\lambda(t)`
in [CO2010]_.
EXAMPLES::
sage: Partition([]).t_completion(0)
[]
sage: Partition([]).t_completion(1)
[1]
sage: Partition([]).t_completion(2)
[2]
sage: Partition([]).t_completion(3)
[3]
sage: Partition([2, 1]).t_completion(5)
[2, 2, 1]
sage: Partition([2, 1]).t_completion(6)
[3, 2, 1]
sage: Partition([4, 2, 2, 1]).t_completion(13)
[4, 4, 2, 2, 1]
sage: Partition([4, 2, 2, 1]).t_completion(19)
[10, 4, 2, 2, 1]
sage: Partition([4, 2, 2, 1]).t_completion(10)
Traceback (most recent call last):
...
ValueError: 10-completion is not defined
sage: Partition([4, 2, 2, 1]).t_completion(5)
Traceback (most recent call last):
...
ValueError: 5-completion is not defined
"""
if self._list and t < self.size() + self._list[0]:
raise ValueError("{}-completion is not defined".format(t))
return Partition([t - self.size()] + self._list)
def larger_lex(self, rhs):
"""
Return ``True`` if ``self`` is larger than ``rhs`` in lexicographic
order. Otherwise return ``False``.
EXAMPLES::
sage: p = Partition([3,2])
sage: p.larger_lex([3,1])
True
sage: p.larger_lex([1,4])
True
sage: p.larger_lex([3,2,1])
False
sage: p.larger_lex([3])
True
sage: p.larger_lex([5])
False
sage: p.larger_lex([3,1,1,1,1,1,1,1])
True
"""
return CombinatorialElement.__gt__(self, rhs)
def dominates(self, p2):
r"""
Return ``True`` if ``self`` dominates the partition ``p2``. Otherwise
it returns ``False``.
EXAMPLES::
sage: p = Partition([3,2])
sage: p.dominates([3,1])
True
sage: p.dominates([2,2])
True
sage: p.dominates([2,1,1])
True
sage: p.dominates([3,3])
False
sage: p.dominates([4])
False
sage: Partition([4]).dominates(p)
False
sage: Partition([]).dominates([1])
False
sage: Partition([]).dominates([])
True
sage: Partition([1]).dominates([])
True
"""
p1 = self
sum1 = 0
sum2 = 0
min_length = min(len(p1), len(p2))
if min_length == 0:
return not p2 # equivalent to len(p1) >= len(p2) = 0
for i in range(min_length):
sum1 += p1[i]
sum2 += p2[i]
if sum2 > sum1:
return False
return sum(p1) >= sum(p2)
def cells(self):
"""
Return the coordinates of the cells of ``self``.
EXAMPLES::
sage: Partition([2,2]).cells()
[(0, 0), (0, 1), (1, 0), (1, 1)]
sage: Partition([3,2]).cells()
[(0, 0), (0, 1), (0, 2), (1, 0), (1, 1)]
"""
res = []
for i in range(len(self)):
for j in range(self[i]):
res.append( (i,j) )
return res
def generalized_pochhammer_symbol(self, a, alpha):
r"""
Return the generalized Pochhammer symbol
`(a)_{self}^{(\alpha)}`. This is the product over all
cells `(i,j)` in ``self`` of `a - (i-1) / \alpha + j - 1`.
EXAMPLES::
sage: Partition([2,2]).generalized_pochhammer_symbol(2,1)
12
"""
res = 1
for (i,j) in self.cells():
res *= (a - (i-1)/alpha + j-1)
return res
def get_part(self, i, default=Integer(0)):
r"""
Return the `i^{th}` part of ``self``, or ``default`` if it does
not exist.
EXAMPLES::
sage: p = Partition([2,1])
sage: p.get_part(0), p.get_part(1), p.get_part(2)
(2, 1, 0)
sage: p.get_part(10,-1)
-1
sage: Partition([]).get_part(0)
0
"""
if i < len(self._list):
return self._list[i]
else:
return default
@combinatorial_map(name="partition to minimal Dyck word")
def to_dyck_word(self, n=None):
r"""
Return the ``n``-Dyck word whose corresponding partition is
``self`` (or, if ``n`` is not specified, the `n`-Dyck word with
smallest `n` to satisfy this property).
If `w` is an `n`-Dyck word (that is, a Dyck word with `n` open
symbols and `n` close symbols), then the Dyck path corresponding
to `w` can be regarded as a lattice path in the northeastern
half of an `n \times n`-square. The region to the northeast of
this Dyck path can be regarded as a partition. It is called the
partition corresponding to the Dyck word `w`. (See
:meth:`~sage.combinat.dyck_word.DyckWord.to_partition`.)
For every partition `\lambda` and every nonnegative integer `n`,
there exists at most one `n`-Dyck word `w` such that the
partition corresponding to `w` is `\lambda` (in fact, such `w`
exists if and only if `\lambda_i + i \leq n` for every `i`,
where `\lambda` is written in the form
`(\lambda_1, \lambda_2, \ldots, \lambda_k)` with `\lambda_k > 0`).
This method computes this `w` for a given `\lambda` and `n`.
If `n` is not specified, this method computes the `w` for the
smallest possible `n` for which such an `w` exists.
(The minimality of `n` means that the partition demarcated by the
Dyck path touches the diagonal.)
EXAMPLES::
sage: Partition([2,2]).to_dyck_word()
[1, 1, 0, 0, 1, 1, 0, 0]
sage: Partition([2,2]).to_dyck_word(4)
[1, 1, 0, 0, 1, 1, 0, 0]
sage: Partition([2,2]).to_dyck_word(5)
[1, 1, 1, 0, 0, 1, 1, 0, 0, 0]
sage: Partition([6,3,1]).to_dyck_word()
[1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0]
sage: Partition([]).to_dyck_word()
[]
sage: Partition([]).to_dyck_word(3)
[1, 1, 1, 0, 0, 0]
The partition corresponding to ``self.dyck_word()`` is ``self``
indeed::
sage: all( p.to_dyck_word().to_partition() == p
....: for p in Partitions(5) )
True
"""
from sage.combinat.dyck_word import DyckWord
if not self._list:
if n is None:
return DyckWord([])
return DyckWord([1]*n + [0]*n)
list_of_word = []
if n is None:
n = max(i + l + 1 for (i, l) in enumerate(self))
# This n is also max(i+j for (i,j) in self.cells()) + 2.
list_of_word.extend([1]*(n-self.length()))
copy_part = list(self)
while copy_part:
c = copy_part.pop()
list_of_word.extend([0]*c)
for i in range(len(copy_part)):
copy_part[i] -= c
list_of_word.append(1)
list_of_word.extend([0]*(n-self[0]))
return DyckWord(list_of_word)
@combinatorial_map(order=2, name="conjugate partition")
def conjugate(self):
"""
Return the conjugate partition of the partition ``self``. This
is also called the associated partition or the transpose in the
literature.
EXAMPLES::
sage: Partition([2,2]).conjugate()
[2, 2]
sage: Partition([6,3,1]).conjugate()
[3, 2, 2, 1, 1, 1]
The conjugate partition is obtained by transposing the Ferrers
diagram of the partition (see :meth:`.ferrers_diagram`)::
sage: print(Partition([6,3,1]).ferrers_diagram())
******
***
*
sage: print(Partition([6,3,1]).conjugate().ferrers_diagram())
***
**
**
*
*
*
"""
if not self:
par = Partitions_n(0)
return par.element_class(par, [])
par = Partitions_n(sum(self))
return par.element_class(par, conjugate(self))
def suter_diagonal_slide(self, n, exp=1):
r"""
Return the image of ``self`` in `Y_n` under Suter's diagonal slide
`\sigma_n`, where the notations used are those defined in [Sut2002]_.
The set `Y_n` is defined as the set of all partitions
`\lambda` such that the hook length of the `(0, 0)`-cell (i.e. the
northwestern most cell in English notation) of `\lambda` is less
than `n`, including the empty partition.
The map `\sigma_n` sends a partition (with non-zero entries)
`(\lambda_1, \lambda_2, \ldots, \lambda_m) \in Y_n` to the partition
`(\lambda_2 + 1, \lambda_3 + 1, \ldots, \lambda_m + 1,
\underbrace{1, 1, \ldots, 1}_{n - m - \lambda_1\text{ ones}})`.
In other words, it pads the partition with trailing zeroes
until it has length `n - \lambda_1`, then removes its first
part, and finally adds `1` to each part.
By Theorem 2.1 of [Sut2002]_, the dihedral group `D_n` with
`2n` elements acts on `Y_n` by letting the primitive rotation
act as `\sigma_n` and the reflection act as conjugation of
partitions (:meth:`conjugate()`). This action is faithful if
`n \geq 3`.
INPUT:
- ``n`` -- nonnegative integer
- ``exp`` -- (default: 1) how many times `\sigma_n` should be applied
OUTPUT:
The result of applying Suter's diagonal slide `\sigma_n` to
``self``, assuming that ``self`` lies in `Y_n`. If the
optional argument ``exp`` is set, then the slide
`\sigma_n` is applied not just once, but ``exp`` times
(note that ``exp`` is allowed to be negative, since
the slide has finite order).
EXAMPLES::
sage: Partition([5,4,1]).suter_diagonal_slide(8)
[5, 2]
sage: Partition([5,4,1]).suter_diagonal_slide(9)
[5, 2, 1]
sage: Partition([]).suter_diagonal_slide(7)
[1, 1, 1, 1, 1, 1]
sage: Partition([]).suter_diagonal_slide(1)
[]
sage: Partition([]).suter_diagonal_slide(7, exp=-1)
[6]
sage: Partition([]).suter_diagonal_slide(1, exp=-1)
[]
sage: P7 = Partitions(7)
sage: all( p == p.suter_diagonal_slide(9, exp=-1).suter_diagonal_slide(9)
....: for p in P7 )
True
sage: all( p == p.suter_diagonal_slide(9, exp=3)
....: .suter_diagonal_slide(9, exp=3)
....: .suter_diagonal_slide(9, exp=3)
....: for p in P7 )
True
sage: all( p == p.suter_diagonal_slide(9, exp=6)
....: .suter_diagonal_slide(9, exp=6)
....: .suter_diagonal_slide(9, exp=6)
....: for p in P7 )
True
sage: all( p == p.suter_diagonal_slide(9, exp=-1)
....: .suter_diagonal_slide(9, exp=1)
....: for p in P7 )
True
Check of the assertion in [Sut2002]_ that `\sigma_n\bigl( \sigma_n(
\lambda^{\prime})^{\prime} \bigr) = \lambda`::
sage: all( p.suter_diagonal_slide(8).conjugate()
....: == p.conjugate().suter_diagonal_slide(8, exp=-1)
....: for p in P7 )
True
Check of Claim 1 in [Sut2002]_::
sage: P5 = Partitions(5)
sage: all( all( (p.suter_diagonal_slide(6) in q.suter_diagonal_slide(6).down())
....: or (q.suter_diagonal_slide(6) in p.suter_diagonal_slide(6).down())
....: for p in q.down() )
....: for q in P5 )
True
TESTS:
Check for ``exp = 0``::
sage: P = Partitions(4)
sage: all(p == p.suter_diagonal_slide(7, 0) for p in P)
True
Check for invalid input::
sage: p = Partition([2,1])
sage: p.hook_length(0, 0)
3
sage: p.suter_diagonal_slide(2)
Traceback (most recent call last):
...
ValueError: the hook length must be less than n
"""
# Check for valid input
if len(self) > 0 and len(self) + self._list[0] > n: # >, not >=, since we double count the (0,0) cell
raise ValueError("the hook length must be less than n")
ret = self
# Arbitrary exp
exp = exp % n # It is at most order n
if exp > n / 2:
exp -= n
while exp != 0:
leng = len(ret)
if exp > 0:
# Suter's map \sigma_n
if leng == 0: # Taking extra care about the empty partition.
ret = Partition([1] * (n - 1))
exp -= 1
continue
res = [i + 1 for i in ret._list[1:]]
res += [1] * (n - leng - ret._list[0])
ret = Partition(res)
exp -= 1
else: # exp < 0 since if exp == 0, we would exit the while loop
# inverse map \sigma_n^{-1}
if leng == 0: # Taking extra care about the empty partition.
ret = Partition([n - 1])
exp += 1
continue
res = [n - leng - 1]
res.extend([i - 1 for i in ret._list if i > 1])
ret = Partition(res)
exp += 1
return ret
@combinatorial_map(name="reading tableau")
def reading_tableau(self):
r"""
Return the RSK recording tableau of the reading word of the
(standard) tableau `T` labeled down (in English convention)
each column to the shape of ``self``.
For an example of the tableau `T`, consider the partition
`\lambda = (3,2,1)`, then we have::
1 4 6
2 5
3
For more, see :func:`~sage.combinat.rsk.RSK()`.
EXAMPLES::
sage: Partition([3,2,1]).reading_tableau()
[[1, 3, 6], [2, 5], [4]]
"""
st = tableau.StandardTableaux(self).first()
return st.reading_word_permutation().right_tableau()
@combinatorial_map(name="initial tableau")
def initial_tableau(self):
r"""
Return the :class:`standard tableau<StandardTableau>` which has the
numbers `1, 2, \ldots, n` where `n` is the :meth:`size` of ``self``
entered in order from left to right along the rows of each component,
where the components are ordered from left to right.
EXAMPLES::
sage: Partition([3,2,2]).initial_tableau()
[[1, 2, 3], [4, 5], [6, 7]]
"""
mu = self._list
# In Python 3, improve this using itertools.accumulate
tab = [list(range(1+sum(mu[:i]), 1+sum(mu[:(i+1)])))
for i in range(len(mu))]
return tableau.StandardTableau(tab)
def initial_column_tableau(self):
r"""
Return the initial column tableau of shape ``self``.
The initial column tableau of shape self is the standard tableau
that has the numbers `1` to `n`, where `n` is the :meth:`size` of ``self``,
entered in order from top to bottom and then left to right down the
columns of ``self``.
EXAMPLES::
sage: Partition([3,2]).initial_column_tableau()
[[1, 3, 5], [2, 4]]
"""
return self.conjugate().initial_tableau().conjugate()
def garnir_tableau(self, *cell):
r"""
Return the Garnir tableau of shape ``self`` corresponding to the cell
``cell``. If ``cell`` `= (a,c)` then `(a+1,c)` must belong to the
diagram of ``self``.
The Garnir tableaux play an important role in integral and
non-semisimple representation theory because they determine the
"straightening" rules for the Specht modules over an arbitrary ring.
The Garnir tableaux are the "first" non-standard tableaux which arise
when you act by simple transpositions. If `(a,c)` is a cell in the
Young diagram of a partition, which is not at the bottom of its
column, then the corresponding Garnir tableau has the integers
`1, 2, \ldots, n` entered in order from left to right along the rows
of the diagram up to the cell `(a,c-1)`, then along the cells
`(a+1,1)` to `(a+1,c)`, then `(a,c)` until the end of row `a` and
then continuing from left to right in the remaining positions. The
examples below probably make this clearer!
.. NOTE::
The function also sets ``g._garnir_cell``, where ``g`` is the
resulting Garnir tableau, equal to ``cell`` which is used by
some other functions.
EXAMPLES::
sage: g = Partition([5,3,3,2]).garnir_tableau((0,2)); g.pp()
1 2 6 7 8
3 4 5
9 10 11
12 13
sage: g.is_row_strict(); g.is_column_strict()
True
False
sage: Partition([5,3,3,2]).garnir_tableau(0,2).pp()
1 2 6 7 8
3 4 5
9 10 11
12 13
sage: Partition([5,3,3,2]).garnir_tableau(2,1).pp()
1 2 3 4 5
6 7 8
9 12 13
10 11
sage: Partition([5,3,3,2]).garnir_tableau(2,2).pp()
Traceback (most recent call last):
...
ValueError: (row+1, col) must be inside the diagram
.. SEEALSO::
- :meth:`top_garnir_tableau`
"""
try:
(row, col) = cell
except ValueError:
(row, col) = cell[0]
if row + 1 >= len(self) or col >= self[row+1]:
raise ValueError('(row+1, col) must be inside the diagram')
g=self.initial_tableau().to_list()
a=g[row][col]
g[row][col:] = list(range(a+col+1,g[row+1][col]+1))
g[row+1][:col+1] = list(range(a,a+col+1))
g=tableau.Tableau(g)
g._garnir_cell = (row, col)
return g
def top_garnir_tableau(self,e,cell):
r"""
Return the most dominant *standard* tableau which dominates the
corresponding Garnir tableau and has the same ``e``-residue.
The Garnir tableau play an important role in integral and non-semisimple
representation theory because they determine the "straightening" rules
for the Specht modules. The *top Garnir tableaux* arise in the graded
representation theory of the symmetric groups and higher level Hecke
algebras. They were introduced in [KMR2012]_.
If the Garnir node is ``cell=(r,c)`` and `m` and `M` are the entries
in the cells ``(r,c)`` and ``(r+1,c)``, respectively, in the initial
tableau then the top ``e``-Garnir tableau is obtained by inserting the
numbers `m, m+1, \ldots, M` in order from left to right first in the
cells in row ``r+1`` which are not in the ``e``-Garnir belt, then in
the cell in rows ``r`` and ``r+1`` which are in the Garnir belt and
then, finally, in the remaining cells in row ``r`` which are not in
the Garnir belt. All other entries in the tableau remain unchanged.
If ``e = 0``, or if there are no ``e``-bricks in either row ``r``
or ``r+1``, then the top Garnir tableau is the corresponding Garnir
tableau.
EXAMPLES::
sage: Partition([5,4,3,2]).top_garnir_tableau(2,(0,2)).pp()
1 2 4 5 8
3 6 7 9
10 11 12
13 14
sage: Partition([5,4,3,2]).top_garnir_tableau(3,(0,2)).pp()
1 2 3 4 5
6 7 8 9
10 11 12
13 14
sage: Partition([5,4,3,2]).top_garnir_tableau(4,(0,2)).pp()
1 2 6 7 8
3 4 5 9
10 11 12
13 14
sage: Partition([5,4,3,2]).top_garnir_tableau(0,(0,2)).pp()
1 2 6 7 8
3 4 5 9
10 11 12
13 14
TESTS::
sage: Partition([5,4,3,2]).top_garnir_tableau(0,(3,2)).pp()
Traceback (most recent call last):
...
ValueError: (4,2)=(row+1,col) must be inside the diagram
REFERENCES:
- [KMR2012]_
"""
(row,col)=cell
if row+1>=len(self) or col>=self[row+1]:
raise ValueError('(%s,%s)=(row+1,col) must be inside the diagram' %(row+1,col))
g=self.garnir_tableau(cell) # start with the Garnir tableau and modify
if e==0:
return g # no more dominant tableau of the same residue
a=e*int((self[row]-col)/e) # number of cells in the e-bricks in row `row`
b=e*int((col+1)/e) # number of cells in the e-bricks in row `row+1`
if a==0 or b==0:
return g
t=g.to_list()
m=g[row+1][0] # smallest number in 0-Garnir belt
# now we will put the number m,m+1,...,t[row+1][col] in order into t
t[row][col:a+col]=[m+col-b+1+i for i in range(a)]
t[row+1][col-b+1:col+1]=[m+a+col-b+1+i for i in range(b)]
return tableau.StandardTableau(t)
@cached_method
def young_subgroup(self):
r"""
Return the corresponding Young, or parabolic, subgroup of the symmetric
group.
The Young subgroup of a partition
`\lambda = (\lambda_1, \lambda_2, \ldots, \lambda_{\ell})` of `n` is
the group:
.. MATH::
S_{\lambda_1} \times S_{\lambda_2} \times \cdots \times
S_{\lambda_{\ell}}
embedded into `S_n` in the standard way (i.e.,
the `S_{\lambda_i}` factor acts on the numbers from
`\lambda_1 + \lambda_2 + \cdots + \lambda_{i-1} + 1` to
`\lambda_1 + \lambda_2 + \cdots + \lambda_i`).
EXAMPLES::
sage: Partition([4,2]).young_subgroup()
Permutation Group with generators [(), (5,6), (3,4), (2,3), (1,2)]
"""
gens=[]
m=0
for row in self:
gens.extend([ (c,c+1) for c in range(m+1,m+row)])
m+=row
gens.append(list(range(1,self.size() + 1))) # to ensure we get a subgroup of Sym_n
return PermutationGroup( gens )
def young_subgroup_generators(self):
r"""
Return an indexing set for the generators of the corresponding Young
subgroup. Here the generators correspond to the simple adjacent
transpositions `s_i = (i \; i+1)`.
EXAMPLES::
sage: Partition([4,2]).young_subgroup_generators()
[1, 2, 3, 5]
sage: Partition([1,1,1]).young_subgroup_generators()
[]
sage: Partition([2,2]).young_subgroup_generators()
[1, 3]
.. SEEALSO::
:meth:`young_subgroup`
"""
gens = []
m = 0
for row in self:
gens.extend(list(range(m + 1, m + row)))
m += row
return gens
@cached_method
def _initial_degree(self, e, multicharge=(0,)):
r"""
Return the Brundan-Kleshchev-Wang degree of the initial row tableau
of shape ``self``.
This degree depends only the shape of the tableau and it is
used as the base case for computing the degrees of all tableau
of shape ``self``, which is why this method is cached. See
:meth:`sage.combinat.tableau.Tableau.degree` for more information.
EXAMPLES::
sage: Partition([5,3,2])._initial_degree(0)
0
sage: Partition([5,3,2])._initial_degree(2)
4
sage: Partition([5,3,2])._initial_degree(3)
2
sage: Partition([5,3,2])._initial_degree(4)
1
"""
if e == 0:
return ZZ.zero()
else:
return sum(m // e for m in self)
def degree(self, e):
r"""
Return the ``e``-th degree of ``self``.
The `e`-th degree of a partition `\lambda` is the sum of the `e`-th
degrees of the standard tableaux of shape `\lambda`. The `e`-th degree
is the exponent of `\Phi_e(q)` in the Gram determinant of the Specht
module for a semisimple Iwahori-Hecke algebra of type `A` with
parameter `q`.
INPUT:
- ``e`` -- an integer `e > 1`
OUTPUT:
A non-negative integer.
EXAMPLES::
sage: Partition([4,3]).degree(2)
28
sage: Partition([4,3]).degree(3)
15
sage: Partition([4,3]).degree(4)
8
sage: Partition([4,3]).degree(5)
13
sage: Partition([4,3]).degree(6)
0
sage: Partition([4,3]).degree(7)
0
Therefore, the Gram determinant of `S(5,3)` when the Hecke parameter
`q` is "generic" is
.. MATH::
q^N \Phi_2(q)^{28} \Phi_3(q)^{15} \Phi_4(q)^8 \Phi_5(q)^{13}
for some integer `N`. Compare with :meth:`prime_degree`.
"""
return sum(t.degree(e) for t in self.standard_tableaux())
def prime_degree(self, p):
r"""
Return the prime degree for the prime integer``p`` for ``self``.
INPUT:
- ``p`` -- a prime integer
OUTPUT:
A non-negative integer
The degree of a partition `\lambda` is the sum of the
`e`-:meth:`degree` of the standard tableaux of shape `\lambda`, for
`e` a poer of the prime `p`. The prime degree gives the exponent of
`p` in the Gram determinant of the integral Specht module of the
symmetric group.
EXAMPLES::
sage: Partition([4,3]).prime_degree(2)
36
sage: Partition([4,3]).prime_degree(3)
15
sage: Partition([4,3]).prime_degree(5)
13
sage: Partition([4,3]).prime_degree(7)
0
Therefore, the Gram determinant of `S(5,3)` when `q = 1` is
`2^{36} 3^{15} 5^{13}`. Compare with :meth:`degree`.
"""
ps = [p]
while ps[-1] * p < self.size():
ps.append(ps[-1] * p)
return sum(t.degree(pk) for pk in ps for t in self.standard_tableaux())
def arm_length(self, i, j):
r"""
Return the length of the arm of cell `(i,j)` in ``self``.
The arm of cell `(i,j)` is the cells that appear to the right of
cell `(i,j)`.
The cell coordinates are zero-based, i. e., the northwesternmost
cell is `(0,0)`.
INPUT:
- ``i, j`` -- two integers
OUTPUT:
An integer or a ``ValueError``
EXAMPLES::
sage: p = Partition([2,2,1])
sage: p.arm_length(0, 0)
1
sage: p.arm_length(0, 1)
0
sage: p.arm_length(2, 0)
0
sage: Partition([3,3]).arm_length(0, 0)
2
sage: Partition([3,3]).arm_length(*[0,0])
2
"""
p = self
if i < len(p) and j < p[i]:
return p[i]-(j+1)
else:
raise ValueError("The cell is not in the diagram")
def arm_lengths(self, flat=False):
"""
Return a tableau of shape ``self`` where each cell is filled with
its arm length. The optional boolean parameter ``flat`` provides
the option of returning a flat list.
EXAMPLES::
sage: Partition([2,2,1]).arm_lengths()
[[1, 0], [1, 0], [0]]
sage: Partition([2,2,1]).arm_lengths(flat=True)
[1, 0, 1, 0, 0]
sage: Partition([3,3]).arm_lengths()
[[2, 1, 0], [2, 1, 0]]
sage: Partition([3,3]).arm_lengths(flat=True)
[2, 1, 0, 2, 1, 0]
"""
p = self
if not flat:
return [[pi - (j + 1) for j in range(pi)] for pi in p]
return [pi - (j + 1) for pi in p for j in range(pi)]
def arm_cells(self, i, j):
r"""
Return the list of the cells of the arm of cell `(i,j)` in ``self``.
The arm of cell `c = (i,j)` is the boxes that appear to the right of
`c`.
The cell coordinates are zero-based, i. e., the northwesternmost
cell is `(0,0)`.
INPUT:
- ``i, j`` -- two integers
OUTPUT:
A list of pairs of integers
EXAMPLES::
sage: Partition([4,4,3,1]).arm_cells(1,1)
[(1, 2), (1, 3)]
sage: Partition([]).arm_cells(0,0)
Traceback (most recent call last):
...
ValueError: The cell is not in the diagram
"""
p = self
if i < len(p) and j < p[i]:
return [ (i, x) for x in range(j+1, p[i]) ]
else:
raise ValueError("The cell is not in the diagram")
def leg_length(self, i, j):
"""
Return the length of the leg of cell `(i,j)` in ``self``.
The leg of cell `c = (i,j)` is defined to be the cells below `c`
(in English convention).
The cell coordinates are zero-based, i. e., the northwesternmost
cell is `(0,0)`.
INPUT:
- ``i, j`` -- two integers
OUTPUT:
An integer or a ``ValueError``
EXAMPLES::
sage: p = Partition([2,2,1])
sage: p.leg_length(0, 0)
2
sage: p.leg_length(0,1)
1
sage: p.leg_length(2,0)
0
sage: Partition([3,3]).leg_length(0, 0)
1
sage: cell = [0,0]; Partition([3,3]).leg_length(*cell)
1
"""
conj = self.conjugate()
if j < len(conj) and i < conj[j]:
return conj[j]-(i+1)
else:
raise ValueError("The cell is not in the diagram")
def leg_lengths(self, flat=False):
"""
Return a tableau of shape ``self`` with each cell filled in with
its leg length. The optional boolean parameter ``flat`` provides
the option of returning a flat list.
EXAMPLES::
sage: Partition([2,2,1]).leg_lengths()
[[2, 1], [1, 0], [0]]
sage: Partition([2,2,1]).leg_lengths(flat=True)
[2, 1, 1, 0, 0]
sage: Partition([3,3]).leg_lengths()
[[1, 1, 1], [0, 0, 0]]
sage: Partition([3,3]).leg_lengths(flat=True)
[1, 1, 1, 0, 0, 0]
"""
p = self
conj = p.conjugate()
if not flat:
return [[conj[j] - (i + 1) for j in range(pi)]
for i, pi in enumerate(p)]
return [conj[j] - (i + 1) for i, pi in enumerate(p)
for j in range(pi)]
def leg_cells(self, i, j):
r"""
Return the list of the cells of the leg of cell `(i,j)` in ``self``.
The leg of cell `c = (i,j)` is defined to be the cells below `c` (in
English convention).
The cell coordinates are zero-based, i. e., the northwesternmost
cell is `(0,0)`.
INPUT:
- ``i, j`` -- two integers
OUTPUT:
A list of pairs of integers
EXAMPLES::
sage: Partition([4,4,3,1]).leg_cells(1,1)
[(2, 1)]
sage: Partition([4,4,3,1]).leg_cells(0,1)
[(1, 1), (2, 1)]
sage: Partition([]).leg_cells(0,0)
Traceback (most recent call last):
...
ValueError: The cell is not in the diagram
"""
l = self.leg_length(i, j)
return [(x, j) for x in range(i+1, i+l+1)]
def attacking_pairs(self):
"""
Return a list of the attacking pairs of the Young diagram of
``self``.
A pair of cells `(c, d)` of a Young diagram (in English notation) is
said to be attacking if one of the following conditions holds:
1. `c` and `d` lie in the same row with `c` strictly to the west
of `d`.
2. `c` is in the row immediately to the south of `d`, and `c`
lies strictly east of `d`.
This particular method returns each pair `(c, d)` as a tuple,
where each of `c` and `d` is given as a tuple `(i, j)` with
`i` and `j` zero-based (so `i = 0` means that the cell lies
in the topmost row).
EXAMPLES::
sage: p = Partition([3, 2])
sage: p.attacking_pairs()
[((0, 0), (0, 1)),
((0, 0), (0, 2)),
((0, 1), (0, 2)),
((1, 0), (1, 1)),
((1, 1), (0, 0))]
sage: Partition([]).attacking_pairs()
[]
"""
attacking_pairs = []
for i, r in enumerate(self):
for j in range(r):
#c is in position (i,j)
#Find the d that satisfy condition 1
for k in range(j+1, r):
attacking_pairs.append( ((i,j),(i,k)) )
#Find the d that satisfy condition 2
if i == 0:
continue
for k in range(j):
attacking_pairs.append( ((i,j),(i-1,k)) )
return attacking_pairs
def dominated_partitions(self, rows=None):
"""
Return a list of the partitions dominated by `n`. If ``rows`` is
specified, then it only returns the ones whose number of rows
is at most ``rows``.
EXAMPLES::
sage: Partition([3,2,1]).dominated_partitions()
[[3, 2, 1], [3, 1, 1, 1], [2, 2, 2], [2, 2, 1, 1], [2, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1]]
sage: Partition([3,2,1]).dominated_partitions(rows=3)
[[3, 2, 1], [2, 2, 2]]
"""
#Naive implementation because iteration is so fast
n = sum(self)
P = Partitions_n(n)
if rows:
return [P(x) for x in ZS1_iterator_nk(n, rows) if self.dominates(x)]
else:
return [P(x) for x in ZS1_iterator(n) if self.dominates(x)]
def contains(self, x):
"""
Return ``True`` if ``x`` is a partition whose Ferrers diagram is
contained in the Ferrers diagram of ``self``.
EXAMPLES::
sage: p = Partition([3,2,1])
sage: p.contains([2,1])
True
sage: all(p.contains(mu) for mu in Partitions(3))
True
sage: all(p.contains(mu) for mu in Partitions(4))
False
"""
return len(self) >= len(x) and all(self[i] >= x[i] for i in range(len(x)))
def hook_product(self, a):
"""
Return the Jack hook-product.
EXAMPLES::
sage: Partition([3,2,1]).hook_product(x)
(2*x + 3)*(x + 2)^2
sage: Partition([2,2]).hook_product(x)
2*(x + 2)*(x + 1)
"""
nu = self.conjugate()
res = 1
for i in range(len(self)):
for j in range(self[i]):
res *= a*(self[i]-j-1)+nu[j]-i
return res
def hook_polynomial(self, q, t):
"""
Return the two-variable hook polynomial.
EXAMPLES::
sage: R.<q,t> = PolynomialRing(QQ)
sage: a = Partition([2,2]).hook_polynomial(q,t)
sage: a == (1 - t)*(1 - q*t)*(1 - t^2)*(1 - q*t^2)
True
sage: a = Partition([3,2,1]).hook_polynomial(q,t)
sage: a == (1 - t)^3*(1 - q*t^2)^2*(1 - q^2*t^3)
True
"""
nu = self.conjugate()
res = 1
for i in range(len(self)):
for j in range(self[i]):
res *= 1-q**(self[i]-j-1)*t**(nu[j]-i)
return res
def hook_length(self, i, j):
r"""
Return the length of the hook of cell `(i,j)` in ``self``.
The (length of the) hook of cell `(i,j)` of a partition `\lambda`
is
.. MATH::
\lambda_i + \lambda^{\prime}_j - i - j + 1
where `\lambda^{\prime}` is the conjugate partition. In English
convention, the hook length is the number of cells horizontally
to the right and vertically below the cell `(i,j)` (including
that cell).
EXAMPLES::
sage: p = Partition([2,2,1])
sage: p.hook_length(0, 0)
4
sage: p.hook_length(0, 1)
2
sage: p.hook_length(2, 0)
1
sage: Partition([3,3]).hook_length(0, 0)
4
sage: cell = [0,0]; Partition([3,3]).hook_length(*cell)
4
"""
return self.leg_length(i,j)+self.arm_length(i,j)+1
def hooks(self):
"""
Return a sorted list of the hook lengths in ``self``.
EXAMPLES::
sage: Partition([3,2,1]).hooks()
[5, 3, 3, 1, 1, 1]
"""
res = []
for row in self.hook_lengths():
res += row
res.sort(reverse=True)
return res
def hook_lengths(self):
r"""
Return a tableau of shape ``self`` with the cells filled in with the
hook lengths.
In each cell, put the sum of one plus the number of cells
horizontally to the right and vertically below the cell (the
hook length).
For example, consider the partition ``[3,2,1]`` of 6 with Ferrers
diagram::
# # #
# #
#
When we fill in the cells with the hook lengths, we obtain::
5 3 1
3 1
1
EXAMPLES::
sage: Partition([2,2,1]).hook_lengths()
[[4, 2], [3, 1], [1]]
sage: Partition([3,3]).hook_lengths()
[[4, 3, 2], [3, 2, 1]]
sage: Partition([3,2,1]).hook_lengths()
[[5, 3, 1], [3, 1], [1]]
sage: Partition([2,2]).hook_lengths()
[[3, 2], [2, 1]]
sage: Partition([5]).hook_lengths()
[[5, 4, 3, 2, 1]]
REFERENCES:
- http://mathworld.wolfram.com/HookLengthFormula.html
"""
p = self
conj = p.conjugate()
return [[p[i]-(i+1)+conj[j]-(j+1)+1 for j in range(p[i])] for i in range(len(p))]
def upper_hook(self, i, j, alpha):
r"""
Return the upper hook length of the cell `(i,j)` in ``self``.
When ``alpha = 1``, this is just the normal hook length.
The upper hook length of a cell `(i,j)` in a partition
`\kappa` is defined by
.. MATH::
h^*_\kappa(i,j) = \kappa^\prime_j - i + \alpha(\kappa_i - j + 1).
EXAMPLES::
sage: p = Partition([2,1])
sage: p.upper_hook(0,0,1)
3
sage: p.hook_length(0,0)
3
sage: [ p.upper_hook(i,j,x) for i,j in p.cells() ]
[2*x + 1, x, x]
"""
p = self
conj = self.conjugate()
return conj[j] - (i+1) + alpha*(p[i]-j)
def upper_hook_lengths(self, alpha):
r"""
Return a tableau of shape ``self`` with the cells filled in with the
upper hook lengths. When ``alpha = 1``, these are just the normal hook
lengths.
The upper hook length of a cell `(i,j)` in a partition
`\kappa` is defined by
.. MATH::
h^*_\kappa(i,j) = \kappa^\prime_j - i + \alpha(\kappa_i - j + 1).
EXAMPLES::
sage: Partition([3,2,1]).upper_hook_lengths(x)
[[3*x + 2, 2*x + 1, x], [2*x + 1, x], [x]]
sage: Partition([3,2,1]).upper_hook_lengths(1)
[[5, 3, 1], [3, 1], [1]]
sage: Partition([3,2,1]).hook_lengths()
[[5, 3, 1], [3, 1], [1]]
"""
p = self
conj = p.conjugate()
return [[conj[j] - (i+1) + alpha*(p[i]-j) for j in range(p[i])] for i in range(len(p))]
def lower_hook(self, i, j, alpha):
r"""
Return the lower hook length of the cell `(i,j)` in ``self``.
When ``alpha = 1``, this is just the normal hook length.
The lower hook length of a cell `(i,j)` in a partition
`\kappa` is defined by
.. MATH::
h_*^\kappa(i,j) = \kappa^\prime_j - i + 1 + \alpha(\kappa_i - j).
EXAMPLES::
sage: p = Partition([2,1])
sage: p.lower_hook(0,0,1)
3
sage: p.hook_length(0,0)
3
sage: [ p.lower_hook(i,j,x) for i,j in p.cells() ]
[x + 2, 1, 1]
"""
p = self
conj = self.conjugate()
return conj[j] - i + alpha*(p[i] - (j+1))
def lower_hook_lengths(self, alpha):
r"""
Return a tableau of shape ``self`` with the cells filled in with the
lower hook lengths. When ``alpha = 1``, these are just the normal hook
lengths.
The lower hook length of a cell `(i,j)` in a partition
`\kappa` is defined by
.. MATH::
h_*^\kappa(i,j) = \kappa^\prime_j - i + 1 + \alpha(\kappa_i - j).
EXAMPLES::
sage: Partition([3,2,1]).lower_hook_lengths(x)
[[2*x + 3, x + 2, 1], [x + 2, 1], [1]]
sage: Partition([3,2,1]).lower_hook_lengths(1)
[[5, 3, 1], [3, 1], [1]]
sage: Partition([3,2,1]).hook_lengths()
[[5, 3, 1], [3, 1], [1]]
"""
p = self
conj = p.conjugate()
return [[conj[j] - i + alpha*(p[i]-(j+1)) for j in range(p[i])] for i in range(len(p))]
def weighted_size(self):
r"""
Return the weighted size of ``self``.
The weighted size of a partition `\lambda` is
.. MATH::
\sum_i i \cdot \lambda_i,
where `\lambda = (\lambda_0, \lambda_1, \lambda_2, \cdots )`.
This also the sum of the leg length of every cell in `\lambda`, or
.. MATH::
\sum_i \binom{\lambda^{\prime}_i}{2}
where `\lambda^{\prime}` is the conjugate partition of `\lambda`.
EXAMPLES::
sage: Partition([2,2]).weighted_size()
2
sage: Partition([3,3,3]).weighted_size()
9
sage: Partition([5,2]).weighted_size()
2
sage: Partition([]).weighted_size()
0
"""
p = self
return sum([i*p[i] for i in range(len(p))])
def is_empty(self):
"""
Return ``True`` if ``self`` is the empty partition.
EXAMPLES::
sage: Partition([]).is_empty()
True
sage: Partition([2,1,1]).is_empty()
False
"""
return len(self) == 0
def length(self):
"""
Return the number of parts in ``self``.
EXAMPLES::
sage: Partition([3,2]).length()
2
sage: Partition([2,2,1]).length()
3
sage: Partition([]).length()
0
"""
return len(self)
def to_exp(self, k=0):
"""
Return a list of the multiplicities of the parts of a partition.
Use the optional parameter ``k`` to get a return list of length at
least ``k``.
EXAMPLES::
sage: Partition([3,2,2,1]).to_exp()
[1, 2, 1]
sage: Partition([3,2,2,1]).to_exp(5)
[1, 2, 1, 0, 0]
TESTS::
sage: [parent(x) for x in Partition([3,2,2,1]).to_exp(5)]
[Integer Ring, Integer Ring, Integer Ring, Integer Ring, Integer Ring]
"""
p = self
if len(p) > 0:
k = max(k, p[0])
a = [ZZ.zero()] * k
for i in p:
a[i-1] += 1
return a
def evaluation(self):
r"""
Return the evaluation of ``self``.
The **commutative evaluation**, often shortened to **evaluation**, of
a word (we think of a partition as a word in `\{1, 2, 3, \ldots\}`)
is its image in the free commutative monoid. In other words,
this counts how many occurrences there are of each letter.
This is also is known as **Parikh vector** and **abelianization** and
has the same output as :meth:`to_exp()`.
EXAMPLES::
sage: Partition([4,3,1,1]).evaluation()
[2, 0, 1, 1]
"""
return self.to_exp()
def to_exp_dict(self):
"""
Return a dictionary containing the multiplicities of the parts of
``self``.
EXAMPLES::
sage: p = Partition([4,2,2,1])
sage: d = p.to_exp_dict()
sage: d[4]
1
sage: d[2]
2
sage: d[1]
1
sage: 5 in d
False
"""
d = {}
for part in self:
d[part] = d.get(part, 0) + 1
return d
def centralizer_size(self, t=0, q=0):
r"""
Return the size of the centralizer of any permutation of cycle type
``self``.
If `m_i` is the multiplicity of `i` as a part of `p`, this is given by
.. MATH::
\prod_i m_i! i^{m_i}.
Including the optional parameters `t` and `q` gives the `q,t` analog,
which is the former product times
.. MATH::
\prod_{i=1}^{\mathrm{length}(p)} \frac{1 - q^{p_i}}{1 - t^{p_i}}.
See Section 1.3, p. 24, in [Ke1991]_.
EXAMPLES::
sage: Partition([2,2,1]).centralizer_size()
8
sage: Partition([2,2,2]).centralizer_size()
48
sage: Partition([2,2,1]).centralizer_size(q=2, t=3)
9/16
sage: Partition([]).centralizer_size()
1
sage: Partition([]).centralizer_size(q=2, t=4)
1
TESTS::
sage: Partition([2,2,2]).aut()
48
"""
size = prod(i**mi * factorial(mi)
for i, mi in self.to_exp_dict().items())
if t or q:
size *= prod((ZZ.one() - q ** j) / (ZZ.one() - t ** j)
for j in self)
return size
aut = centralizer_size
def content(self, r, c, multicharge=(0,)):
r"""
Return the content of the cell at row `r` and column `c`.
The content of a cell is `c - r`.
For consistency with partition tuples there is also an optional
``multicharge`` argument which is an offset to the usual content. By
setting the ``multicharge`` equal to the 0-element of the ring
`\ZZ/e\ZZ`, the corresponding `e`-residue will be returned. This is
the content modulo `e`.
The content (and residue) do not strictly depend on the partition,
however, this method is included because it is often useful in the
context of partitions.
EXAMPLES::
sage: Partition([2,1]).content(1,0)
-1
sage: p = Partition([3,2])
sage: sum([p.content(*c) for c in p.cells()])
2
and now we return the 3-residue of a cell::
sage: Partition([2,1]).content(1,0, multicharge=[IntegerModRing(3)(0)])
2
"""
return c - r + multicharge[0]
def residue(self, r, c, l):
r"""
Return the ``l``-residue of the cell at row ``r`` and column ``c``.
The `\ell`-residue of a cell is `c - r` modulo `\ell`.
This does not strictly depend upon the partition, however, this method
is included because it is often useful in the context of partitions.
EXAMPLES::
sage: Partition([2,1]).residue(1, 0, 3)
2
"""
return (c - r) % l
@cached_method
def block(self, e, multicharge=(0,)):
r"""
Return a dictionary `\beta` that determines the block associated to
the partition ``self`` and the
:meth:`~sage.combinat.tableau_residues.ResidueSequence.quantum_characteristic` ``e``.
INPUT:
- ``e`` -- the quantum characteristic
- ``multicharge`` -- the multicharge (default `(0,)`)
OUTPUT:
- A dictionary giving the multiplicities of the residues in the
partition tuple ``self``
In more detail, the value ``beta[i]`` is equal to the
number of nodes of residue ``i``. This corresponds to
the positive root
.. MATH::
\sum_{i\in I} \beta_i \alpha_i \in Q^+,
a element of the positive root lattice of the corresponding
Kac-Moody algebra. See [DJM1998]_ and [BK2009]_ for more details.
This is a useful statistics because two Specht modules for a
Hecke algebra of type `A` belong to the same block if and only if they
correspond to same element `\beta` of the root lattice, given above.
We return a dictionary because when the quantum characteristic is `0`,
the Cartan type is `A_{\infty}`, in which case the simple roots are
indexed by the integers.
EXAMPLES::
sage: Partition([4,3,2]).block(0)
{-2: 1, -1: 2, 0: 2, 1: 2, 2: 1, 3: 1}
sage: Partition([4,3,2]).block(2)
{0: 4, 1: 5}
sage: Partition([4,3,2]).block(2, multicharge=(1,))
{0: 5, 1: 4}
sage: Partition([4,3,2]).block(3)
{0: 3, 1: 3, 2: 3}
sage: Partition([4,3,2]).block(4)
{0: 2, 1: 2, 2: 2, 3: 3}
"""
block = {}
Ie = IntegerModRing(e)
for (r,c) in self.cells():
i = Ie(multicharge[0] + c - r)
block[i] = block.get(i, 0) + 1
return block
def defect(self, e, multicharge=(0,)):
r"""
Return the ``e``-defect or the ``e``-weight of ``self``.
The `e`-defect is the number of (connected) `e`-rim hooks that
can be removed from the partition.
The defect of a partition is given by
.. MATH::
\text{defect}(\beta) = (\Lambda, \beta) - \tfrac12(\beta, \beta),
where `\Lambda = \sum_r \Lambda_{\kappa_r}` for the multicharge
`(\kappa_1, \ldots, \kappa_{\ell})` and
`\beta = \sum_{(r,c)} \alpha_{(c-r) \pmod e}`, with the sum
being over the cells in the partition.
INPUT:
- ``e`` -- the quantum characteristic
- ``multicharge`` -- the multicharge (default `(0,)`)
OUTPUT:
- a non-negative integer, which is the defect of the block
containing the partition ``self``
EXAMPLES::
sage: Partition([4,3,2]).defect(2)
3
sage: Partition([0]).defect(2)
0
sage: Partition([3]).defect(2)
1
sage: Partition([6]).defect(2)
3
sage: Partition([9]).defect(2)
4
sage: Partition([12]).defect(2)
6
sage: Partition([4,3,2]).defect(3)
3
sage: Partition([0]).defect(3)
0
sage: Partition([3]).defect(3)
1
sage: Partition([6]).defect(3)
2
sage: Partition([9]).defect(3)
3
sage: Partition([12]).defect(3)
4
TESTS::
sage: all(mu.core(e).size() + e * mu.defect(e) == 9
....: for mu in Partitions(9) for e in [2,3,4])
True
"""
beta = self.block(e, multicharge)
Ie = IntegerModRing(e)
return beta.get(multicharge[0], 0) - sum(beta[r]**2 - beta[r] * beta.get(Ie(r+1), 0)
for r in beta)
def contents_tableau(self, multicharge=(0,)):
"""
Return the tableau which has ``(k,r,c)``-th cell equal to the
content ``multicharge[k] - r + c`` of the cell.
EXAMPLES::
sage: Partition([2,1]).contents_tableau()
[[0, 1], [-1]]
sage: Partition([3,2,1,1]).contents_tableau().pp()
0 1 2
-1 0
-2
-3
sage: Partition([3,2,1,1]).contents_tableau([ IntegerModRing(3)(0)] ).pp()
0 1 2
2 0
1
0
"""
return tableau.Tableau([[multicharge[0]-r+c for c in range(self[r])]
for r in range(len(self))])
def is_restricted(self, e, multicharge=(0,)):
"""
Return ``True`` is this is an ``e``-restricted partition.
An `e`-restricted partition is a partition such that the
difference of consecutive parts is always strictly less
than `e`, where partitions are considered to have an infinite
number of `0` parts. I.e., the last part must be strictly
less than `e`.
EXAMPLES::
sage: Partition([4,3,3,2]).is_restricted(2)
False
sage: Partition([4,3,3,2]).is_restricted(3)
True
sage: Partition([4,3,3,2]).is_restricted(4)
True
sage: Partition([4]).is_restricted(4)
False
"""
return (not self
or ( self[-1] < e and all(self[r]-self[r+1] < e for r in range(len(self)-1)) ))
def is_regular(self, e, multicharge=(0,)):
"""
Return ``True`` is this is an ``e``-regular partition.
A partition is `e`-regular if it does not have `e` equal
non-zero parts.
EXAMPLES::
sage: Partition([4,3,3,3]).is_regular(2)
False
sage: Partition([4,3,3,3]).is_regular(3)
False
sage: Partition([4,3,3,3]).is_regular(4)
True
"""
return all(self[r] > self[r+e-1] for r in range(len(self)-e+1))
def conjugacy_class_size(self):
"""
Return the size of the conjugacy class of the symmetric group
indexed by ``self``.
EXAMPLES::
sage: Partition([2,2,2]).conjugacy_class_size()
15
sage: Partition([2,2,1]).conjugacy_class_size()
15
sage: Partition([2,1,1]).conjugacy_class_size()
6
"""
return factorial(sum(self))/self.centralizer_size()
def corners(self):
r"""
Return a list of the corners of the partition ``self``.
A corner of a partition `\lambda` is a cell of the Young diagram
of `\lambda` which can be removed from the Young diagram while
still leaving a straight shape behind.
The entries of the list returned are pairs of the form `(i,j)`,
where `i` and `j` are the coordinates of the respective corner.
The coordinates are counted from `0`.
EXAMPLES::
sage: Partition([3,2,1]).corners()
[(0, 2), (1, 1), (2, 0)]
sage: Partition([3,3,1]).corners()
[(1, 2), (2, 0)]
sage: Partition([]).corners()
[]
"""
p = self
if p.is_empty():
return []
lcors = [[0,p[0]-1]]
nn = len(p)
if nn == 1:
return [tuple(_) for _ in lcors]
lcors_index = 0
for i in range(1, nn):
if p[i] == p[i-1]:
lcors[lcors_index][0] += 1
else:
lcors.append([i,p[i]-1])
lcors_index += 1
return [tuple(_) for _ in lcors]
inside_corners = corners
removable_cells = corners # for compatibility with partition tuples
def corners_residue(self, i, l):
r"""
Return a list of the corners of the partition ``self`` having
``l``-residue ``i``.
A corner of a partition `\lambda` is a cell of the Young diagram
of `\lambda` which can be removed from the Young diagram while
still leaving a straight shape behind. See :meth:`residue` for
the definition of the ``l``-residue.
The entries of the list returned are pairs of the form `(i,j)`,
where `i` and `j` are the coordinates of the respective corner.
The coordinates are counted from `0`.
EXAMPLES::
sage: Partition([3,2,1]).corners_residue(0, 3)
[(1, 1)]
sage: Partition([3,2,1]).corners_residue(1, 3)
[(2, 0)]
sage: Partition([3,2,1]).corners_residue(2, 3)
[(0, 2)]
"""
return [x for x in self.corners() if self.residue(*x, l=l) == i]
inside_corners_residue = corners_residue
removable_cells_residue = corners_residue
def outside_corners(self):
r"""
Return a list of the outside corners of the partition ``self``.
An outside corner (also called a cocorner) of a partition
`\lambda` is a cell on `\ZZ^2` which does not belong to
the Young diagram of `\lambda` but can be added to this Young
diagram to still form a straight-shape Young diagram.
The entries of the list returned are pairs of the form `(i,j)`,
where `i` and `j` are the coordinates of the respective corner.
The coordinates are counted from `0`.
EXAMPLES::
sage: Partition([2,2,1]).outside_corners()
[(0, 2), (2, 1), (3, 0)]
sage: Partition([2,2]).outside_corners()
[(0, 2), (2, 0)]
sage: Partition([6,3,3,1,1,1]).outside_corners()
[(0, 6), (1, 3), (3, 1), (6, 0)]
sage: Partition([]).outside_corners()
[(0, 0)]
"""
p = self
if p.is_empty():
return [(0,0)]
res = [ (0, p[0]) ]
for i in range(1, len(p)):
if p[i-1] != p[i]:
res.append((i,p[i]))
res.append((len(p), 0))
return res
addable_cells = outside_corners # for compatibility with partition tuples
def outside_corners_residue(self, i, l):
r"""
Return a list of the outside corners of the partition ``self``
having ``l``-residue ``i``.
An outside corner (also called a cocorner) of a partition
`\lambda` is a cell on `\ZZ^2` which does not belong to
the Young diagram of `\lambda` but can be added to this Young
diagram to still form a straight-shape Young diagram. See
:meth:`residue` for the definition of the ``l``-residue.
The entries of the list returned are pairs of the form `(i,j)`,
where `i` and `j` are the coordinates of the respective corner.
The coordinates are counted from `0`.
EXAMPLES::
sage: Partition([3,2,1]).outside_corners_residue(0, 3)
[(0, 3), (3, 0)]
sage: Partition([3,2,1]).outside_corners_residue(1, 3)
[(1, 2)]
sage: Partition([3,2,1]).outside_corners_residue(2, 3)
[(2, 1)]
"""
return [x for x in self.outside_corners() if self.residue(*x, l=l) == i]
addable_cells_residue = outside_corners_residue
def rim(self):
r"""
Return the rim of ``self``.
The rim of a partition `\lambda` is defined as the cells which belong
to `\lambda` and which are adjacent to cells not in `\lambda`.
EXAMPLES:
The rim of the partition `[5,5,2,1]` consists of the cells marked with
``#`` below::
****#
*####
##
#
sage: Partition([5,5,2,1]).rim()
[(3, 0), (2, 0), (2, 1), (1, 1), (1, 2), (1, 3), (1, 4), (0, 4)]
sage: Partition([2,2,1]).rim()
[(2, 0), (1, 0), (1, 1), (0, 1)]
sage: Partition([2,2]).rim()
[(1, 0), (1, 1), (0, 1)]
sage: Partition([6,3,3,1,1]).rim()
[(4, 0), (3, 0), (2, 0), (2, 1), (2, 2), (1, 2), (0, 2), (0, 3), (0, 4), (0, 5)]
sage: Partition([]).rim()
[]
"""
p = self
res = []
prevLen = 1
for i in range(len(p)-1, -1, -1):
for c in range(prevLen-1, p[i]):
res.append((i,c))
prevLen = p[i]
return res
def outer_rim(self):
r"""
Return the outer rim of ``self``.
The outer rim of a partition `\lambda` is defined as the cells which do
not belong to `\lambda` and which are adjacent to cells in `\lambda`.
EXAMPLES:
The outer rim of the partition `[4,1]` consists of the cells marked
with ``#`` below::
****#
*####
##
::
sage: Partition([4,1]).outer_rim()
[(2, 0), (2, 1), (1, 1), (1, 2), (1, 3), (1, 4), (0, 4)]
sage: Partition([2,2,1]).outer_rim()
[(3, 0), (3, 1), (2, 1), (2, 2), (1, 2), (0, 2)]
sage: Partition([2,2]).outer_rim()
[(2, 0), (2, 1), (2, 2), (1, 2), (0, 2)]
sage: Partition([6,3,3,1,1]).outer_rim()
[(5, 0), (5, 1), (4, 1), (3, 1), (3, 2), (3, 3), (2, 3), (1, 3), (1, 4), (1, 5), (1, 6), (0, 6)]
sage: Partition([]).outer_rim()
[(0, 0)]
"""
p = self
res = []
prevLen = 0
for i in range(len(p)-1, -1, -1):
for c in range(prevLen, p[i]+1):
res.append((i+1,c))
prevLen = p[i]
res.append((0, prevLen))
return res
def zero_one_sequence(self):
r"""
Compute the finite `0-1` sequence of the partition.
The full `0-1` sequence is the sequence (infinite in both
directions) indicating the steps taken when following the
outer rim of the diagram of the partition. We use the convention
that in English convention, a 1 corresponds to an East step, and
a 0 corresponds to a North step.
Note that every full `0-1` sequence starts with infinitely many 0's and
ends with infinitely many 1's.
One place where these arise is in the affine symmetric group where
one takes an affine permutation `w` and every `i` such that
`w(i) \leq 0` corresponds to a 1 and `w(i) > 0` corresponds to a 0.
See pages 24-25 of [LLMSSZ2013]_ for connections to affine Grassmannian
elements (note there they use the French convention for their
partitions).
These are also known as **path sequences**, **Maya diagrams**,
**plus-minus diagrams**, **Comet code** [Sta-EC2]_, among others.
OUTPUT:
The finite `0-1` sequence is obtained from the full `0-1`
sequence by omitting all heading 0's and trailing 1's. The
output sequence is finite, starts with a 1 and ends with a
0 (unless it is empty, for the empty partition). Its length
is the sum of the first part of the partition with the
length of the partition.
EXAMPLES::
sage: Partition([5,4]).zero_one_sequence()
[1, 1, 1, 1, 0, 1, 0]
sage: Partition([]).zero_one_sequence()
[]
sage: Partition([2]).zero_one_sequence()
[1, 1, 0]
TESTS::
sage: all(Partitions().from_zero_one(mu.zero_one_sequence()) == mu for n in range(10) for mu in Partitions(n))
True
"""
tmp = [self[i]-i for i in range(len(self))]
return ([Integer(not (i in tmp)) for i in range(-len(self)+1,self.get_part(0)+1)])
def core(self, length):
r"""
Return the ``length``-core of the partition -- in the literature
the core is commonly referred to as the `k`-core, `p`-core,
`r`-core, ... .
The `r`-core of a partition `\lambda` can be obtained by
repeatedly removing rim hooks of size `r` from (the Young diagram
of) `\lambda` until this is no longer possible. The remaining
partition is the core.
EXAMPLES::
sage: Partition([6,3,2,2]).core(3)
[2, 1, 1]
sage: Partition([]).core(3)
[]
sage: Partition([8,7,7,4,1,1,1,1,1]).core(3)
[2, 1, 1]
TESTS::
sage: Partition([3,3,3,2,1]).core(3)
[]
sage: Partition([10,8,7,7]).core(4)
[]
sage: Partition([21,15,15,9,6,6,6,3,3]).core(3)
[]
"""
p = self
#Normalize the length
remainder = len(p) % length
part = p[:] + [0]*remainder
#Add the canonical vector to the partition
part = [part[i-1] + len(part)-i for i in range(1, len(part)+1)]
for e in range(length):
k = e
for i in reversed(range(1,len(part)+1)):
if part[i-1] % length == e:
part[i-1] = k
k += length
part.sort()
part.reverse()
#Remove the canonical vector
part = [part[i-1]-len(part)+i for i in range(1, len(part)+1)]
#Select the r-core
return Partition([x for x in part if x != 0])
def quotient(self, length):
r"""
Return the quotient of the partition -- in the literature the
quotient is commonly referred to as the `k`-quotient, `p`-quotient,
`r`-quotient, ... .
The `r`-quotient of a partition `\lambda` is a list of `r`
partitions (labelled from `0` to `r-1`), constructed in the following
way. Label each cell in the Young diagram of `\lambda` with its
content modulo `r`. Let `R_i` be the set of rows ending in a cell
labelled `i`, and `C_i` be the set of columns ending in a cell
labelled `i`. Then the `j`-th component of the quotient of
`\lambda` is the partition defined by intersecting `R_j` with
`C_{j+1}`. (See Theorem 2.7.37 in [JK1981]_.)
EXAMPLES::
sage: Partition([7,7,5,3,3,3,1]).quotient(3)
([2], [1], [2, 2, 2])
TESTS::
sage: Partition([8,7,7,4,1,1,1,1,1]).quotient(3)
([2, 1], [2, 2], [2])
sage: Partition([10,8,7,7]).quotient(4)
([2], [3], [2], [1])
sage: Partition([6,3,3]).quotient(3)
([1], [1], [2])
sage: Partition([3,3,3,2,1]).quotient(3)
([1], [1, 1], [1])
sage: Partition([6,6,6,3,3,3]).quotient(3)
([2, 1], [2, 1], [2, 1])
sage: Partition([21,15,15,9,6,6,6,3,3]).quotient(3)
([5, 2, 1], [5, 2, 1], [7, 3, 2])
sage: Partition([21,15,15,9,6,6,3,3]).quotient(3)
([5, 2], [5, 2, 1], [7, 3, 1])
sage: Partition([14,12,11,10,10,10,10,9,6,4,3,3,2,1]).quotient(5)
([3, 3], [2, 2, 1], [], [3, 3, 3], [1])
sage: all(p == Partition(core=p.core(k), quotient=p.quotient(k))
....: for i in range(10) for p in Partitions(i)
....: for k in range(1,6))
True
"""
p = self
#Normalize the length
remainder = len(p) % length
part = p[:] + [0]*(length-remainder)
#Add the canonical vector to the partition
part = [part[i-1] + len(part)-i for i in range(1, len(part)+1)]
result = [None]*length
#Reducing vector
for e in range(length):
k = e
tmp = []
for i in reversed(range(len(part))):
if part[i] % length == e:
tmp.append(ZZ((part[i]-k)//length))
k += length
a = [i for i in tmp if i != 0]
a.reverse()
result[e] = a
from .partition_tuple import PartitionTuple
return PartitionTuple(result) #tuple(map(Partition, result))
def is_core(self, k):
r"""
Return ``True`` if the Partition ``self`` is a ``k``-core.
A partition is said to be a *`k`-core* if it has no hooks of length
`k`. Equivalently, a partition is said to be a `k`-core if it is its
own `k`-core (where the latter is defined as in :meth:`core`).
Visually, this can be checked by trying to remove border strips of size
`k` from ``self``. If this is not possible, then ``self`` is a
`k`-core.
EXAMPLES:
In the partition (2, 1), a hook length of 2 does not occur, but a hook
length of 3 does::
sage: p = Partition([2, 1])
sage: p.is_core(2)
True
sage: p.is_core(3)
False
sage: q = Partition([12, 8, 5, 5, 2, 2, 1])
sage: q.is_core(4)
False
sage: q.is_core(5)
True
sage: q.is_core(0)
True
.. SEEALSO::
:meth:`core`, :class:`Core`
"""
return k not in self.hooks()
def k_interior(self, k):
r"""
Return the partition consisting of the cells of ``self`` whose hook
lengths are greater than ``k``.
EXAMPLES::
sage: p = Partition([3,2,1])
sage: p.hook_lengths()
[[5, 3, 1], [3, 1], [1]]
sage: p.k_interior(2)
[2, 1]
sage: p.k_interior(3)
[1]
sage: p = Partition([])
sage: p.k_interior(3)
[]
"""
return Partition([len([i for i in row if i > k])
for row in self.hook_lengths()])
def k_boundary(self, k):
r"""
Return the skew partition formed by removing the cells of the
``k``-interior, see :meth:`k_interior`.
EXAMPLES::
sage: p = Partition([3,2,1])
sage: p.k_boundary(2)
[3, 2, 1] / [2, 1]
sage: p.k_boundary(3)
[3, 2, 1] / [1]
sage: p = Partition([12,8,5,5,2,2,1])
sage: p.k_boundary(4)
[12, 8, 5, 5, 2, 2, 1] / [8, 5, 2, 2]
"""
return SkewPartition([self, self.k_interior(k)])
def add_cell(self, i, j = None):
r"""
Return a partition corresponding to ``self`` with a cell added in
row ``i``. (This does not change ``self``.)
EXAMPLES::
sage: Partition([3, 2, 1, 1]).add_cell(0)
[4, 2, 1, 1]
sage: cell = [4, 0]; Partition([3, 2, 1, 1]).add_cell(*cell)
[3, 2, 1, 1, 1]
"""
if j is None:
if i >= len(self):
j = 0
else:
j = self[i]
if (i,j) in self.outside_corners():
pl = self.to_list()
if i == len(pl):
pl.append(1)
else:
pl[i] += 1
return Partition(pl)
raise ValueError("[%s, %s] is not an addable cell"%(i,j))
def remove_cell(self, i, j = None):
"""
Return the partition obtained by removing a cell at the end of row
``i`` of ``self``.
EXAMPLES::
sage: Partition([2,2]).remove_cell(1)
[2, 1]
sage: Partition([2,2,1]).remove_cell(2)
[2, 2]
sage: #Partition([2,2]).remove_cell(0)
::
sage: Partition([2,2]).remove_cell(1,1)
[2, 1]
sage: #Partition([2,2]).remove_cell(1,0)
"""
if i >= len(self):
raise ValueError("i must be less than the length of the partition")
if j is None:
j = self[i] - 1
if (i,j) not in self.corners():
raise ValueError("[%d,%d] is not a corner of the partition" % (i,j))
if self[i] == 1:
return Partition(self[:-1])
else:
return Partition(self[:i] + [ self[i:i+1][0] - 1 ] + self[i+1:])
def k_irreducible(self, k):
r"""
Return the partition with all `r \times (k+1-r)` rectangles removed.
If ``self`` is a `k`-bounded partition, then this method will return the partition
where all rectangles of dimension `r \times (k+1-r)` for `1 \leq r \leq k`
have been deleted.
If ``self`` is not a `k`-bounded partition then the method will raise an error.
INPUT:
- ``k`` -- a non-negative integer
OUTPUT:
- a partition
EXAMPLES::
sage: Partition([3,2,2,1,1,1]).k_irreducible(4)
[3, 2, 2, 1, 1, 1]
sage: Partition([3,2,2,1,1,1]).k_irreducible(3)
[]
sage: Partition([3,3,3,2,2,2,2,2,1,1,1,1]).k_irreducible(3)
[2, 1]
"""
pexp = self.to_exp()
return Partition(sum(([r+1] for r in range(len(pexp)-1,-1,-1) for m in range(pexp[r] % (k-r))),[]))
def k_skew(self, k):
r"""
Return the `k`-skew partition.
The `k`-skew diagram of a `k`-bounded partition is the skew diagram
denoted `\lambda/^k` satisfying the conditions:
1. row `i` of `\lambda/^k` has length `\lambda_i`,
2. no cell in `\lambda/^k` has hook-length exceeding `k`,
3. every square above the diagram of `\lambda/^k` has hook
length exceeding `k`.
REFERENCES:
- [LM2004]_
EXAMPLES::
sage: p = Partition([4,3,2,2,1,1])
sage: p.k_skew(4)
[9, 5, 3, 2, 1, 1] / [5, 2, 1]
"""
if len(self) == 0:
return SkewPartition([[],[]])
if self[0] > k:
raise ValueError("the partition must be %d-bounded" % k)
#Find the k-skew diagram of the partition formed
#by removing the first row
s = Partition(self[1:]).k_skew(k)
s_inner = list(s.inner())
s_outer = list(s.outer())
s_conj_rl = s.conjugate().row_lengths()
#Find the leftmost column with less than
# or equal to kdiff cells
kdiff = k - self[0]
if s_outer == []:
spot = 0
else:
spot = s_outer[0]
for i in range(len(s_conj_rl)):
if s_conj_rl[i] <= kdiff:
spot = i
break
outer = [ self[0] + spot ] + s_outer[:]
if spot > 0:
inner = [ spot ] + s_inner[:]
else:
inner = s_inner[:]
return SkewPartition([outer, inner])
def to_core(self, k):
r"""
Maps the `k`-bounded partition ``self`` to its corresponding `k+1`-core.
See also :meth:`k_skew`.
EXAMPLES::
sage: p = Partition([4,3,2,2,1,1])
sage: c = p.to_core(4); c
[9, 5, 3, 2, 1, 1]
sage: type(c)
<class 'sage.combinat.core.Cores_length_with_category.element_class'>
sage: c.to_bounded_partition() == p
True
"""
from sage.combinat.core import Core
return Core(self.k_skew(k)[0],k+1)
def from_kbounded_to_reduced_word(self, k):
r"""
Maps a `k`-bounded partition to a reduced word for an element in
the affine permutation group.
This uses the fact that there is a bijection between `k`-bounded
partitions and `(k+1)`-cores and an action of the affine nilCoxeter
algebra of type `A_k^{(1)}` on `(k+1)`-cores as described in [LM2006b]_.
EXAMPLES::
sage: p=Partition([2,1,1])
sage: p.from_kbounded_to_reduced_word(2)
[2, 1, 2, 0]
sage: p=Partition([3,1])
sage: p.from_kbounded_to_reduced_word(3)
[3, 2, 1, 0]
sage: p.from_kbounded_to_reduced_word(2)
Traceback (most recent call last):
...
ValueError: the partition must be 2-bounded
sage: p=Partition([])
sage: p.from_kbounded_to_reduced_word(2)
[]
"""
p=self.k_skew(k)[0]
result = []
while not p.is_empty():
corners = p.corners()
c = p.content(corners[0][0],corners[0][1])%(k+1)
result.append(Integer(c))
list = [x for x in corners if p.content(x[0],x[1])%(k+1) ==c]
for x in list:
p = p.remove_cell(x[0])
return result
def from_kbounded_to_grassmannian(self, k):
r"""
Maps a `k`-bounded partition to a Grassmannian element in
the affine Weyl group of type `A_k^{(1)}`.
For details, see the documentation of the method
:meth:`from_kbounded_to_reduced_word` .
EXAMPLES::
sage: p=Partition([2,1,1])
sage: p.from_kbounded_to_grassmannian(2)
[-1 1 1]
[-2 2 1]
[-2 1 2]
sage: p=Partition([])
sage: p.from_kbounded_to_grassmannian(2)
[1 0 0]
[0 1 0]
[0 0 1]
"""
return WeylGroup(['A',k,1]).from_reduced_word(self.from_kbounded_to_reduced_word(k))
def to_list(self):
r"""
Return ``self`` as a list.
EXAMPLES::
sage: p = Partition([2,1]).to_list(); p
[2, 1]
sage: type(p)
<... 'list'>
TESTS::
sage: p = Partition([2,1])
sage: pl = p.to_list()
sage: pl[0] = 0; p
[2, 1]
"""
return self._list[:]
def add_vertical_border_strip(self, k):
"""
Return a list of all the partitions that can be obtained by adding
a vertical border strip of length ``k`` to ``self``.
EXAMPLES::
sage: Partition([]).add_vertical_border_strip(0)
[[]]
sage: Partition([]).add_vertical_border_strip(2)
[[1, 1]]
sage: Partition([2,2]).add_vertical_border_strip(2)
[[3, 3], [3, 2, 1], [2, 2, 1, 1]]
sage: Partition([3,2,2]).add_vertical_border_strip(2)
[[4, 3, 2], [4, 2, 2, 1], [3, 3, 3], [3, 3, 2, 1], [3, 2, 2, 1, 1]]
"""
return [p.conjugate() for p in self.conjugate().add_horizontal_border_strip(k)]
def add_horizontal_border_strip(self, k):
"""
Return a list of all the partitions that can be obtained by adding
a horizontal border strip of length ``k`` to ``self``.
EXAMPLES::
sage: Partition([]).add_horizontal_border_strip(0)
[[]]
sage: Partition([]).add_horizontal_border_strip(2)
[[2]]
sage: Partition([2,2]).add_horizontal_border_strip(2)
[[2, 2, 2], [3, 2, 1], [4, 2]]
sage: Partition([3,2,2]).add_horizontal_border_strip(2)
[[3, 2, 2, 2], [3, 3, 2, 1], [4, 2, 2, 1], [4, 3, 2], [5, 2, 2]]
.. TODO::
Reimplement like ``remove_horizontal_border_strip`` using
:class:`IntegerListsLex`
"""
conj = self.conjugate().to_list()
shelf = []
res = []
i = 0
while i < len(conj):
tmp = 1
while i+1 < len(conj) and conj[i] == conj[i+1]:
tmp += 1
i += 1
if i == len(conj)-1 and i > 0 and conj[i] != conj[i-1]:
tmp = 1
shelf.append(tmp)
i += 1
#added the last shelf on the right side of
#the first line
shelf.append(k)
#list all of the positions for cells
#filling each self from the left to the right
for iv in IntegerVectors(k, len(shelf), outer=shelf):
iv = list(iv) # Make a mutable list
tmp = conj + [0]*k
j = 0
for t in range(len(iv)):
while iv[t] > 0:
tmp[j] += 1
iv[t] -= 1
j += 1
j = sum(shelf[:t+1])
res.append(Partition([u for u in tmp if u != 0]).conjugate())
return res
def remove_horizontal_border_strip(self, k):
"""
Return the partitions obtained from ``self`` by removing an
horizontal border strip of length ``k``.
EXAMPLES::
sage: Partition([5,3,1]).remove_horizontal_border_strip(0).list()
[[5, 3, 1]]
sage: Partition([5,3,1]).remove_horizontal_border_strip(1).list()
[[5, 3], [5, 2, 1], [4, 3, 1]]
sage: Partition([5,3,1]).remove_horizontal_border_strip(2).list()
[[5, 2], [5, 1, 1], [4, 3], [4, 2, 1], [3, 3, 1]]
sage: Partition([5,3,1]).remove_horizontal_border_strip(3).list()
[[5, 1], [4, 2], [4, 1, 1], [3, 3], [3, 2, 1]]
sage: Partition([5,3,1]).remove_horizontal_border_strip(4).list()
[[4, 1], [3, 2], [3, 1, 1]]
sage: Partition([5,3,1]).remove_horizontal_border_strip(5).list()
[[3, 1]]
sage: Partition([5,3,1]).remove_horizontal_border_strip(6).list()
[]
The result is returned as an instance of
:class:`Partitions_with_constraints`::
sage: Partition([5,3,1]).remove_horizontal_border_strip(5)
The subpartitions of [5, 3, 1] obtained by removing an horizontal border strip of length 5
TESTS::
sage: Partition([3,2,2]).remove_horizontal_border_strip(2).list()
[[3, 2], [2, 2, 1]]
sage: Partition([3,2,2]).remove_horizontal_border_strip(2).first().parent()
The subpartitions of [3, 2, 2] obtained by removing an horizontal border strip of length 2
sage: Partition([]).remove_horizontal_border_strip(0).list()
[[]]
sage: Partition([]).remove_horizontal_border_strip(6).list()
[]
"""
return Partitions_with_constraints(n = self.size()-k,
min_length = len(self)-1,
max_length = len(self),
floor = self[1:]+[0],
ceiling = self[:],
max_slope = 0,
name = "The subpartitions of {} obtained by removing an horizontal border strip of length {}".format(self,k))
def k_conjugate(self, k):
r"""
Return the ``k``-conjugate of ``self``.
The `k`-conjugate is the partition that is given by the columns of
the `k`-skew diagram of the partition.
We can also define the `k`-conjugate in the following way. Let `P`
denote the bijection from `(k+1)`-cores to `k`-bounded partitions. The
`k`-conjugate of a `(k+1)`-core `\lambda` is
.. MATH::
\lambda^{(k)} = P^{-1}\left( (P(\lambda))^{\prime} \right).
EXAMPLES::
sage: p = Partition([4,3,2,2,1,1])
sage: p.k_conjugate(4)
[3, 2, 2, 1, 1, 1, 1, 1, 1]
"""
return Partition(self.k_skew(k).conjugate().row_lengths())
def arms_legs_coeff(self, i, j):
r"""
This is a statistic on a cell `c = (i,j)` in the diagram of partition
`p` given by
.. MATH::
\frac{ 1 - q^a \cdot t^{\ell + 1} }{ 1 - q^{a + 1} \cdot t^{\ell} }
where `a` is the arm length of `c` and `\ell` is the leg length of `c`.
The coordinates ``i`` and ``j`` of the cell are understood to be
`0`-based, so that ``(0, 0)`` is the northwesternmost cell (in
English notation).
EXAMPLES::
sage: Partition([3,2,1]).arms_legs_coeff(1,1)
(-t + 1)/(-q + 1)
sage: Partition([3,2,1]).arms_legs_coeff(0,0)
(-q^2*t^3 + 1)/(-q^3*t^2 + 1)
sage: Partition([3,2,1]).arms_legs_coeff(*[0,0])
(-q^2*t^3 + 1)/(-q^3*t^2 + 1)
"""
QQqt = PolynomialRing(QQ, ['q', 't'])
(q, t) = QQqt.gens()
if i < len(self) and j < self[i]:
res = (1-q**self.arm_length(i,j) * t**(self.leg_length(i,j)+1))
res /= (1-q**(self.arm_length(i,j)+1) * t**self.leg_length(i,j))
return res
return ZZ.one()
def atom(self):
"""
Return a list of the standard tableaux of size ``self.size()`` whose
atom is equal to ``self``.
EXAMPLES::
sage: Partition([2,1]).atom()
[[[1, 2], [3]]]
sage: Partition([3,2,1]).atom()
[[[1, 2, 3, 6], [4, 5]], [[1, 2, 3], [4, 5], [6]]]
"""
res = []
for tab in tableau.StandardTableaux_size(self.size()):
if tab.atom() == self:
res.append(tab)
return res
def k_atom(self, k):
"""
Return a list of the standard tableaux of size ``self.size()`` whose
``k``-atom is equal to ``self``.
EXAMPLES::
sage: p = Partition([3,2,1])
sage: p.k_atom(1)
[]
sage: p.k_atom(3)
[[[1, 1, 1], [2, 2], [3]],
[[1, 1, 1, 2], [2], [3]],
[[1, 1, 1, 3], [2, 2]],
[[1, 1, 1, 2, 3], [2]]]
sage: Partition([3,2,1]).k_atom(4)
[[[1, 1, 1], [2, 2], [3]], [[1, 1, 1, 3], [2, 2]]]
TESTS::
sage: Partition([1]).k_atom(1)
[[[1]]]
sage: Partition([1]).k_atom(2)
[[[1]]]
sage: Partition([]).k_atom(1)
[[]]
"""
res = [ tableau.Tableau([]) ]
for i in range(len(self)):
res = [ x.promotion_operator( self[-i-1] ) for x in res]
res = sum(res, [])
res = [ y.catabolism_projector(Partition(self[-i-1:]).k_split(k)) for y in res]
res = [ i for i in res if i !=0 and i != [] ]
return res
def k_split(self, k):
"""
Return the ``k``-split of ``self``.
EXAMPLES::
sage: Partition([4,3,2,1]).k_split(3)
[]
sage: Partition([4,3,2,1]).k_split(4)
[[4], [3, 2], [1]]
sage: Partition([4,3,2,1]).k_split(5)
[[4, 3], [2, 1]]
sage: Partition([4,3,2,1]).k_split(6)
[[4, 3, 2], [1]]
sage: Partition([4,3,2,1]).k_split(7)
[[4, 3, 2, 1]]
sage: Partition([4,3,2,1]).k_split(8)
[[4, 3, 2, 1]]
"""
if self == []:
return []
elif k < self[0]:
return []
else:
res = []
part = list(self)
while part != [] and part[0]+len(part)-1 >= k:
p = k - part[0]
res.append( part[:p+1] )
part = part[p+1:]
if part != []:
res.append(part)
return res
def jacobi_trudi(self):
"""
Return the Jacobi-Trudi matrix of ``self`` thought of as a skew
partition. See :meth:`SkewPartition.jacobi_trudi()
<sage.combinat.skew_partition.SkewPartition.jacobi_trudi>`.
EXAMPLES::
sage: part = Partition([3,2,1])
sage: jt = part.jacobi_trudi(); jt
[h[3] h[1] 0]
[h[4] h[2] h[]]
[h[5] h[3] h[1]]
sage: s = SymmetricFunctions(QQ).schur()
sage: h = SymmetricFunctions(QQ).homogeneous()
sage: h( s(part) )
h[3, 2, 1] - h[3, 3] - h[4, 1, 1] + h[5, 1]
sage: jt.det()
h[3, 2, 1] - h[3, 3] - h[4, 1, 1] + h[5, 1]
"""
return SkewPartition([ self, [] ]).jacobi_trudi()
def character_polynomial(self):
r"""
Return the character polynomial associated to the partition
``self``.
The character polynomial `q_\mu` associated to a partition `\mu`
is defined by
.. MATH::
q_\mu(x_1, x_2, \ldots, x_k) = \downarrow \sum_{\alpha \vdash k}
\frac{ \chi^\mu_\alpha }{1^{a_1}2^{a_2}\cdots k^{a_k}a_1!a_2!\cdots
a_k!} \prod_{i=1}^{k} (ix_i-1)^{a_i}
where `k` is the size of `\mu`, and `a_i` is the multiplicity of
`i` in `\alpha`.
It is computed in the following manner:
1. Expand the Schur function `s_\mu` in the power-sum basis,
2. Replace each `p_i` with `ix_i-1`,
3. Apply the umbral operator `\downarrow` to the resulting polynomial.
EXAMPLES::
sage: Partition([1]).character_polynomial()
x - 1
sage: Partition([1,1]).character_polynomial()
1/2*x0^2 - 3/2*x0 - x1 + 1
sage: Partition([2,1]).character_polynomial()
1/3*x0^3 - 2*x0^2 + 8/3*x0 - x2
"""
#Create the polynomial ring we will use
k = self.size()
P = PolynomialRing(QQ, k, 'x')
x = P.gens()
#Expand s_mu in the power sum basis
from sage.combinat.sf.sf import SymmetricFunctions
Sym = SymmetricFunctions(QQ)
s = Sym.schur()
p = Sym.power()
ps_mu = p(s(self))
#Replace each p_i by i*x_i-1
items = ps_mu.monomial_coefficients().items() #items contains a list of (partition, coeff) pairs
partition_to_monomial = lambda part: prod([ (i*x[i-1]-1) for i in part ])
res = [ [partition_to_monomial(mc[0]), mc[1]] for mc in items ]
#Write things in the monomial basis
res = [ prod(pair) for pair in res ]
res = sum( res )
#Apply the umbral operator and return the result
from sage.combinat.misc import umbral_operation
return umbral_operation(res)
def dimension(self, smaller = [], k = 1):
r"""
Return the number of paths from the ``smaller`` partition to
the partition ``self``, where each step consists of adding a
`k`-ribbon while keeping a partition.
Note that a 1-ribbon is just a single cell, so this counts paths
in the Young graph when `k = 1`.
Note also that the default case (`k = 1` and ``smaller = []``)
gives the dimension of the irreducible representation of the
symmetric group corresponding to ``self``.
INPUT:
- ``smaller`` -- a partition (default: an empty list ``[]``)
- `k` -- a positive integer (default: 1)
OUTPUT:
The number of such paths
EXAMPLES:
Looks at the number of ways of getting from ``[5,4]`` to the empty
partition, removing one cell at a time::
sage: mu = Partition([5,4])
sage: mu.dimension()
42
Same, but removing one 3-ribbon at a time. Note that the 3-core of
``mu`` is empty::
sage: mu.dimension(k=3)
3
The 2-core of ``mu`` is not the empty partition::
sage: mu.dimension(k=2)
0
Indeed, the 2-core of ``mu`` is ``[1]``::
sage: mu.dimension(Partition([1]),k=2)
2
TESTS:
Checks that the sum of squares of dimensions of characters of the
symmetric group is the order of the group::
sage: all(sum(mu.dimension()^2 for mu in Partitions(i))==factorial(i) for i in range(10))
True
A check coming from the theory of `k`-differentiable posets::
sage: k=2; core = Partition([2,1])
sage: all(sum(mu.dimension(core,k=2)^2
....: for mu in Partitions(3+i*2) if mu.core(2) == core)
....: == 2^i*factorial(i) for i in range(10))
True
Checks that the dimension satisfies the obvious recursion relation::
sage: test = lambda larger, smaller: larger.dimension(smaller) == sum(mu.dimension(smaller) for mu in larger.down())
sage: all(test(larger,smaller) for l in range(1,10) for s in range(0,10)
....: for larger in Partitions(l) for smaller in Partitions(s) if smaller != larger)
True
ALGORITHM:
Depending on the parameters given, different simplifications
occur. When `k=1` and ``smaller`` is empty, this function uses
the hook formula. When `k=1` and ``smaller`` is not empty, it
uses a formula from [ORV]_.
When `k \neq 1`, we first check that both ``self`` and
``smaller`` have the same `k`-core, then use the `k`-quotients
and the same algorithm on each of the `k`-quotients.
AUTHORS:
- Paul-Olivier Dehaye (2011-06-07)
"""
larger = self
if smaller == []:
smaller = Partition([])
if k == 1:
if smaller == Partition([]): # In this case, use the hook dimension formula
return factorial(larger.size())/prod(larger.hooks())
else:
if not larger.contains(smaller): # easy case
return 0
else:
# relative dimension
# Uses a formula of Olshanski, Regev, Vershik (see reference)
def inv_factorial(i):
if i < 0:
return 0
else:
return 1/factorial(i)
len_range = list(range(larger.length()))
from sage.matrix.constructor import matrix
M = matrix(QQ,[[inv_factorial(larger.get_part(i)-smaller.get_part(j)-i+j) for i in len_range] for j in len_range])
return factorial(larger.size()-smaller.size())*M.determinant()
else:
larger_core = larger.core(k)
smaller_core = smaller.core(k)
if smaller_core != larger_core: # easy case
return 0
larger_quotients = larger.quotient(k)
smaller_quotients = smaller.quotient(k)
def multinomial_with_partitions(sizes,path_counts):
# count the number of ways of performing the k paths in parallel,
# if we know the total length alloted for each of the paths (sizes), and the number
# of paths for each component. A multinomial picks the ordering of the components where
# each step is taken.
return prod(path_counts) * multinomial(sizes)
sizes = [larger_quotients[i].size()-smaller_quotients[i].size() for i in range(k)]
path_counts = [larger_quotients[i].dimension(smaller_quotients[i]) for i in range(k)]
return multinomial_with_partitions(sizes,path_counts)
def plancherel_measure(self):
r"""
Return the probability of ``self`` under the Plancherel probability
measure on partitions of the same size.
This probability distribution comes from the uniform distribution
on permutations via the Robinson-Schensted correspondence.
See :wikipedia:`Plancherel\_measure`
and :meth:`Partitions_n.random_element_plancherel`.
EXAMPLES::
sage: Partition([]).plancherel_measure()
1
sage: Partition([1]).plancherel_measure()
1
sage: Partition([2]).plancherel_measure()
1/2
sage: [mu.plancherel_measure() for mu in Partitions(3)]
[1/6, 2/3, 1/6]
sage: Partition([5,4]).plancherel_measure()
7/1440
TESTS::
sage: all(sum(mu.plancherel_measure() for mu in Partitions(n))==1 for n in range(10))
True
"""
return self.dimension()**2/factorial(self.size())
def outline(self, variable=None):
r"""
Return the outline of the partition ``self``.
This is a piecewise linear function, normalized so that the area
under the partition ``[1]`` is 2.
INPUT:
- variable -- a variable (default: ``'x'`` in the symbolic ring)
EXAMPLES::
sage: [Partition([5,4]).outline()(x=i) for i in range(-10,11)]
[10, 9, 8, 7, 6, 5, 6, 5, 6, 5, 4, 3, 2, 3, 4, 5, 6, 7, 8, 9, 10]
sage: Partition([]).outline()
abs(x)
sage: Partition([1]).outline()
abs(x + 1) + abs(x - 1) - abs(x)
sage: y=sage.symbolic.ring.var("y")
sage: Partition([6,5,1]).outline(variable=y)
abs(y + 6) - abs(y + 5) + abs(y + 4) - abs(y + 3) + abs(y - 1) - abs(y - 2) + abs(y - 3)
TESTS::
sage: integrate(Partition([1]).outline()-abs(x),(x,-10,10))
2
"""
if variable is None:
variable = var('x')
outside_contents = [self.content(*c) for c in self.outside_corners()]
inside_contents = [self.content(*c) for c in self.corners()]
return sum(abs(variable+c) for c in outside_contents)\
-sum(abs(variable+c) for c in inside_contents)
def dual_equivalence_graph(self, directed=False, coloring=None):
r"""
Return the dual equivalence graph of ``self``.
Two permutations `p` and `q` in the symmetric group `S_n`
differ by an `i`-*elementary dual equivalence (or dual Knuth)
relation* (where `i` is an integer with `1 < i < n`) when the
following two conditions are satisfied:
- In the one-line notation of the permutation `p`, the letter
`i` does not appear inbetween `i-1` and `i+1`.
- The permutation `q` is obtained from `p` by switching two
of the three letters `i-1, i, i+1` (in its one-line
notation) -- namely, the leftmost and the rightmost one
in order of their appearance in `p`.
Notice that this is equivalent to the statement that the
permutations `p^{-1}` and `q^{-1}` differ by an elementary
Knuth equivalence at positions `i-1, i, i+1`.
Two standard Young tableaux of shape `\lambda` differ by an
`i`-elementary dual equivalence relation (of color `i`), if
their reading words differ by an `i`-elementary dual
equivalence relation.
The *dual equivalence graph* of the partition `\lambda` is the
edge-colored graph whose vertices are the standard Young
tableaux of shape `\lambda`, and whose edges colored by `i` are
given by the `i`-elementary dual equivalences.
INPUT:
- ``directed`` -- (default: ``False``) whether to have the dual
equivalence graph be directed (where we have a directed edge
`S \to T` if `i` appears to the left of `i+1` in the
reading word of `T`; otherwise we have the directed edge
`T \to S`)
- ``coloring`` -- (optional) a function which sends each
integer `i > 1` to a color (as a string, e.g., ``'red'`` or
``'black'``) to be used when visually representing the
resulting graph using dot2tex; the default choice is
``2 -> 'red', 3 -> 'blue', 4 -> 'green', 5 -> 'purple',
6 -> 'brown', 7 -> 'orange', 8 -> 'yellow', anything greater
than 8 -> 'black'``.
REFERENCES:
- [As2008b]_
EXAMPLES::
sage: P = Partition([3,1,1])
sage: G = P.dual_equivalence_graph()
sage: sorted(G.edges())
[([[1, 2, 3], [4], [5]], [[1, 2, 4], [3], [5]], 3),
([[1, 2, 4], [3], [5]], [[1, 2, 5], [3], [4]], 4),
([[1, 2, 4], [3], [5]], [[1, 3, 4], [2], [5]], 2),
([[1, 2, 5], [3], [4]], [[1, 3, 5], [2], [4]], 2),
([[1, 3, 4], [2], [5]], [[1, 3, 5], [2], [4]], 4),
([[1, 3, 5], [2], [4]], [[1, 4, 5], [2], [3]], 3)]
sage: G = P.dual_equivalence_graph(directed=True)
sage: sorted(G.edges())
[([[1, 2, 4], [3], [5]], [[1, 2, 3], [4], [5]], 3),
([[1, 2, 5], [3], [4]], [[1, 2, 4], [3], [5]], 4),
([[1, 3, 4], [2], [5]], [[1, 2, 4], [3], [5]], 2),
([[1, 3, 5], [2], [4]], [[1, 2, 5], [3], [4]], 2),
([[1, 3, 5], [2], [4]], [[1, 3, 4], [2], [5]], 4),
([[1, 4, 5], [2], [3]], [[1, 3, 5], [2], [4]], 3)]
TESTS::
sage: G = Partition([1]).dual_equivalence_graph()
sage: G.vertices()
[[[1]]]
sage: G = Partition([]).dual_equivalence_graph()
sage: G.vertices()
[[]]
sage: P = Partition([3,1,1])
sage: G = P.dual_equivalence_graph(coloring=lambda x: 'red')
sage: G2 = P.dual_equivalence_graph(coloring={2: 'black', 3: 'blue', 4: 'cyan', 5: 'grey'})
sage: G is G2
False
sage: G == G2
True
"""
# We do some custom caching to not recreate the graph, but to make
# copies with the desired coloring (i.e., act like a factory).
try:
if directed:
G = self._DDEG.copy(immutable=False)
else:
G = self._DEG.copy(immutable=False)
if have_dot2tex():
if coloring is None:
d = {2: 'red', 3: 'blue', 4: 'green', 5: 'purple',
6: 'brown', 7: 'orange', 8: 'yellow'}
def coloring(i):
if i in d:
return d[i]
return 'black'
elif isinstance(coloring, dict):
d = coloring
coloring = lambda x: d[x]
G.set_latex_options(format="dot2tex",
edge_labels=True,
color_by_label=coloring)
return G
except AttributeError:
pass
T = list(tableau.StandardTableaux(self))
n = sum(self)
edges = []
to_perms = {t: t.reading_word_permutation() for t in T}
to_tab = {to_perms[k]: k for k in to_perms}
Perm = permutation.Permutations()
for t in T:
pt = list(to_perms[t])
for i in range(2, n):
ii = pt.index(i)
iip = pt.index(i+1)
iim = pt.index(i-1)
l = sorted([iim, ii, iip])
if l[0] != ii:
continue
x = pt[:]
x[l[0]], x[l[2]] = x[l[2]], x[l[0]]
if ii < iip:
e = [t, to_tab[Perm(x)], i]
edges.append(e)
else:
e = [to_tab[Perm(x)], t, i]
edges.append(e)
if directed:
from sage.graphs.digraph import DiGraph
self._DDEG = DiGraph([T, edges], format="vertices_and_edges",
immutable=True, multiedges=True)
else:
from sage.graphs.graph import Graph
self._DEG = Graph([T, edges], format="vertices_and_edges",
immutable=True, multiedges=True)
return self.dual_equivalence_graph(directed, coloring)
##############
# Partitions #
##############
class Partitions(UniqueRepresentation, Parent):
r"""
``Partitions(n, **kwargs)`` returns the combinatorial class of
integer partitions of `n` subject to the constraints given by the
keywords.
Valid keywords are: ``starting``, ``ending``, ``min_part``,
``max_part``, ``max_length``, ``min_length``, ``length``,
``max_slope``, ``min_slope``, ``inner``, ``outer``, ``parts_in``,
``regular``, and ``restricted``. They have the following meanings:
- ``starting=p`` specifies that the partitions should all be less
than or equal to `p` in lex order. This argument cannot be combined
with any other (see :trac:`15467`).
- ``ending=p`` specifies that the partitions should all be greater than
or equal to `p` in lex order. This argument cannot be combined with any
other (see :trac:`15467`).
- ``length=k`` specifies that the partitions have
exactly `k` parts.
- ``min_length=k`` specifies that the partitions have
at least `k` parts.
- ``min_part=k`` specifies that all parts of the
partitions are at least `k`.
- ``inner=p`` specifies that the partitions must contain the
partition `p`.
- ``outer=p`` specifies that the partitions
be contained inside the partition `p`.
- ``min_slope=k`` specifies that the partitions have slope at least
`k`; the slope at position `i` is the difference between the
`(i+1)`-th part and the `i`-th part.
- ``parts_in=S`` specifies that the partitions have parts in the set
`S`, which can be any sequence of pairwise distinct positive
integers. This argument cannot be combined with any other
(see :trac:`15467`).
- ``regular=ell`` specifies that the partitions are `\ell`-regular,
and can only be combined with the ``max_length`` or ``max_part``, but
not both, keywords if `n` is not specified
- ``restricted=ell`` specifies that the partitions are `\ell`-restricted,
and cannot be combined with any other keywords
The ``max_*`` versions, along with ``inner`` and ``ending``, work
analogously.
Right now, the ``parts_in``, ``starting``, ``ending``, ``regular``, and
``restricted`` keyword arguments are mutually exclusive, both of each
other and of other keyword arguments. If you specify, say, ``parts_in``,
all other keyword arguments will be ignored; ``starting``, ``ending``,
``regular``, and ``restricted`` work the same way.
EXAMPLES:
If no arguments are passed, then the combinatorial class
of all integer partitions is returned::
sage: Partitions()
Partitions
sage: [2,1] in Partitions()
True
If an integer `n` is passed, then the combinatorial class of integer
partitions of `n` is returned::
sage: Partitions(3)
Partitions of the integer 3
sage: Partitions(3).list()
[[3], [2, 1], [1, 1, 1]]
If ``starting=p`` is passed, then the combinatorial class of partitions
greater than or equal to `p` in lexicographic order is returned::
sage: Partitions(3, starting=[2,1])
Partitions of the integer 3 starting with [2, 1]
sage: Partitions(3, starting=[2,1]).list()
[[2, 1], [1, 1, 1]]
If ``ending=p`` is passed, then the combinatorial class of
partitions at most `p` in lexicographic order is returned::
sage: Partitions(3, ending=[2,1])
Partitions of the integer 3 ending with [2, 1]
sage: Partitions(3, ending=[2,1]).list()
[[3], [2, 1]]
Using ``max_slope=-1`` yields partitions into distinct parts -- each
part differs from the next by at least 1. Use a different
``max_slope`` to get parts that differ by, say, 2::
sage: Partitions(7, max_slope=-1).list()
[[7], [6, 1], [5, 2], [4, 3], [4, 2, 1]]
sage: Partitions(15, max_slope=-1).cardinality()
27
The number of partitions of `n` into odd parts equals the number of
partitions into distinct parts. Let's test that for `n` from 10 to 20::
sage: test = lambda n: Partitions(n, max_slope=-1).cardinality() == Partitions(n, parts_in=[1,3..n]).cardinality()
sage: all(test(n) for n in [10..20])
True
The number of partitions of `n` into distinct parts that differ by
at least 2 equals the number of partitions into parts that equal 1
or 4 modulo 5; this is one of the Rogers-Ramanujan identities::
sage: test = lambda n: Partitions(n, max_slope=-2).cardinality() == Partitions(n, parts_in=([1,6..n] + [4,9..n])).cardinality()
sage: all(test(n) for n in [10..20])
True
Here are some more examples illustrating ``min_part``, ``max_part``,
and ``length``::
sage: Partitions(5,min_part=2)
Partitions of the integer 5 satisfying constraints min_part=2
sage: Partitions(5,min_part=2).list()
[[5], [3, 2]]
::
sage: Partitions(3,max_length=2).list()
[[3], [2, 1]]
::
sage: Partitions(10, min_part=2, length=3).list()
[[6, 2, 2], [5, 3, 2], [4, 4, 2], [4, 3, 3]]
Some examples using the ``regular`` keyword::
sage: Partitions(regular=4)
4-Regular Partitions
sage: Partitions(regular=4, max_length=3)
4-Regular Partitions with max length 3
sage: Partitions(regular=4, max_part=3)
4-Regular 3-Bounded Partitions
sage: Partitions(3, regular=4)
4-Regular Partitions of the integer 3
Some examples using the ``restricted`` keyword::
sage: Partitions(restricted=4)
4-Restricted Partitions
sage: Partitions(3, restricted=4)
4-Restricted Partitions of the integer 3
Here are some further examples using various constraints::
sage: [x for x in Partitions(4)]
[[4], [3, 1], [2, 2], [2, 1, 1], [1, 1, 1, 1]]
sage: [x for x in Partitions(4, length=2)]
[[3, 1], [2, 2]]
sage: [x for x in Partitions(4, min_length=2)]
[[3, 1], [2, 2], [2, 1, 1], [1, 1, 1, 1]]
sage: [x for x in Partitions(4, max_length=2)]
[[4], [3, 1], [2, 2]]
sage: [x for x in Partitions(4, min_length=2, max_length=2)]
[[3, 1], [2, 2]]
sage: [x for x in Partitions(4, max_part=2)]
[[2, 2], [2, 1, 1], [1, 1, 1, 1]]
sage: [x for x in Partitions(4, min_part=2)]
[[4], [2, 2]]
sage: [x for x in Partitions(4, outer=[3,1,1])]
[[3, 1], [2, 1, 1]]
sage: [x for x in Partitions(4, outer=[infinity, 1, 1])]
[[4], [3, 1], [2, 1, 1]]
sage: [x for x in Partitions(4, inner=[1,1,1])]
[[2, 1, 1], [1, 1, 1, 1]]
sage: [x for x in Partitions(4, max_slope=-1)]
[[4], [3, 1]]
sage: [x for x in Partitions(4, min_slope=-1)]
[[4], [2, 2], [2, 1, 1], [1, 1, 1, 1]]
sage: [x for x in Partitions(11, max_slope=-1, min_slope=-3, min_length=2, max_length=4)]
[[7, 4], [6, 5], [6, 4, 1], [6, 3, 2], [5, 4, 2], [5, 3, 2, 1]]
sage: [x for x in Partitions(11, max_slope=-1, min_slope=-3, min_length=2, max_length=4, outer=[6,5,2])]
[[6, 5], [6, 4, 1], [6, 3, 2], [5, 4, 2]]
Note that if you specify ``min_part=0``, then it will treat the minimum
part as being 1 (see :trac:`13605`)::
sage: [x for x in Partitions(4, length=3, min_part=0)]
[[2, 1, 1]]
sage: [x for x in Partitions(4, min_length=3, min_part=0)]
[[2, 1, 1], [1, 1, 1, 1]]
Except for very special cases, counting is done by brute force iteration
through all the partitions. However the iteration itself has a reasonable
complexity (see :class:`IntegerListsLex`), which allows for
manipulating large partitions::
sage: Partitions(1000, max_length=1).list()
[[1000]]
In particular, getting the first element is also constant time::
sage: Partitions(30, max_part=29).first()
[29, 1]
TESTS::
sage: TestSuite(Partitions(0)).run()
sage: TestSuite(Partitions(5)).run()
sage: TestSuite(Partitions(5, min_part=2)).run()
sage: repr( Partitions(5, min_part=2) )
'Partitions of the integer 5 satisfying constraints min_part=2'
sage: P = Partitions(5, min_part=2)
sage: P.first().parent()
Partitions...
sage: [2,1] in P
False
sage: [2,2,1] in P
False
sage: [3,2] in P
True
sage: Partitions(5, inner=[2,1], min_length=3).list()
[[3, 1, 1], [2, 2, 1], [2, 1, 1, 1]]
sage: Partitions(5, inner=Partition([2,2]), min_length=3).list()
[[2, 2, 1]]
sage: Partitions(7, inner=(2, 2), min_length=3).list()
[[4, 2, 1], [3, 3, 1], [3, 2, 2], [3, 2, 1, 1], [2, 2, 2, 1], [2, 2, 1, 1, 1]]
sage: Partitions(5, inner=[2,0,0,0,0,0]).list()
[[5], [4, 1], [3, 2], [3, 1, 1], [2, 2, 1], [2, 1, 1, 1]]
sage: Partitions(6, length=2, max_slope=-1).list()
[[5, 1], [4, 2]]
sage: Partitions(length=2, max_slope=-1).list()
Traceback (most recent call last):
...
ValueError: the size must be specified with any keyword argument
sage: Partitions(max_part = 3)
3-Bounded Partitions
Check that :trac:`14145` has been fixed::
sage: 1 in Partitions()
False
Check :trac:`15467`::
sage: Partitions(5,parts_in=[1,2,3,4], length=4)
Traceback (most recent call last):
...
ValueError: The parameters 'parts_in', 'starting' and 'ending' cannot be combined with anything else.
sage: Partitions(5,starting=[3,2], length=2)
Traceback (most recent call last):
...
ValueError: The parameters 'parts_in', 'starting' and 'ending' cannot be combined with anything else.
sage: Partitions(5,ending=[3,2], length=2)
Traceback (most recent call last):
...
ValueError: The parameters 'parts_in', 'starting' and 'ending' cannot be combined with anything else.
sage: Partitions(NN, length=2)
Traceback (most recent call last):
...
ValueError: the size must be specified with any keyword argument
sage: Partitions(('la','la','laaaa'), max_part=8)
Traceback (most recent call last):
...
ValueError: n must be an integer or be equal to one of None, NN, NonNegativeIntegers()
Check that calling ``Partitions`` with ``outer=a`` no longer
mutates ``a`` (:trac:`16234`)::
sage: a = [4,3,2,1,1,1,1]
sage: for p in Partitions(8, outer=a, min_slope=-1):
....: print(p)
[3, 3, 2]
[3, 2, 2, 1]
[3, 2, 1, 1, 1]
[2, 2, 2, 1, 1]
[2, 2, 1, 1, 1, 1]
[2, 1, 1, 1, 1, 1, 1]
sage: a
[4, 3, 2, 1, 1, 1, 1]
Check that ``inner`` and ``outer`` indeed accept a partition as
argument (:trac:`18423`)::
sage: P = Partitions(5, inner=Partition([2,1]), outer=Partition([3,2])); P
Partitions of the integer 5 satisfying constraints inner=[2, 1], outer=[3, 2]
sage: P.list()
[[3, 2]]
"""
@staticmethod
def __classcall_private__(cls, n=None, **kwargs):
"""
Return the correct parent based upon the input.
TESTS::
sage: P = Partitions()
sage: P2 = Partitions(NN)
sage: P is P2
True
sage: P2 = Partitions(NonNegativeIntegers())
sage: P is P2
True
sage: P = Partitions(4)
sage: P2 = Partitions(int(4))
sage: P is P2
True
Check that :trac:`17898` is fixed::
sage: P = Partitions(5, min_slope=0)
sage: list(P)
[[5], [1, 1, 1, 1, 1]]
"""
if n == infinity:
raise ValueError("n cannot be infinite")
if n is None or n is NN or n is NonNegativeIntegers():
if len(kwargs) > 0:
if len(kwargs) == 1:
if 'max_part' in kwargs:
return Partitions_all_bounded(kwargs['max_part'])
if 'regular' in kwargs:
return RegularPartitions_all(kwargs['regular'])
if 'restricted' in kwargs:
return RestrictedPartitions_all(kwargs['restricted'])
elif len(kwargs) == 2:
if 'regular' in kwargs:
if kwargs['regular'] < 1 or kwargs['regular'] not in ZZ:
raise ValueError("the regularity must be a positive integer")
if 'max_part' in kwargs:
return RegularPartitions_bounded(kwargs['regular'], kwargs['max_part'])
if 'max_length' in kwargs:
return RegularPartitions_truncated(kwargs['regular'], kwargs['max_length'])
raise ValueError("the size must be specified with any keyword argument")
return Partitions_all()
elif isinstance(n, (int,Integer)):
if len(kwargs) == 0:
return Partitions_n(n)
if len(kwargs) == 1:
if 'max_part' in kwargs:
return PartitionsGreatestLE(n, kwargs['max_part'])
if 'length' in kwargs:
return Partitions_nk(n, kwargs['length'])
if (len(kwargs) > 1 and
('parts_in' in kwargs or
'starting' in kwargs or
'ending' in kwargs)):
raise ValueError("The parameters 'parts_in', 'starting' and "+
"'ending' cannot be combined with anything else.")
if 'parts_in' in kwargs:
return Partitions_parts_in(n, kwargs['parts_in'])
elif 'starting' in kwargs:
return Partitions_starting(n, kwargs['starting'])
elif 'ending' in kwargs:
return Partitions_ending(n, kwargs['ending'])
elif 'regular' in kwargs:
return RegularPartitions_n(n, kwargs['regular'])
elif 'restricted' in kwargs:
return RestrictedPartitions_n(n, kwargs['restricted'])
# FIXME: should inherit from IntegerListLex, and implement repr, or _name as a lazy attribute
kwargs['name'] = "Partitions of the integer %s satisfying constraints %s"%(n, ", ".join( ["%s=%s"%(key, kwargs[key]) for key in sorted(kwargs)] ))
# min_part is at least 1, and it is 1 by default
kwargs['min_part'] = max(1,kwargs.get('min_part',1))
# max_slope is at most 0, and it is 0 by default
kwargs['max_slope'] = min(0,kwargs.get('max_slope',0))
if kwargs.get('min_slope', -float('inf')) > 0:
raise ValueError("the minimum slope must be non-negative")
if 'outer' in kwargs:
kwargs['max_length'] = min(len(kwargs['outer']),
kwargs.get('max_length', infinity))
kwargs['ceiling'] = tuple(kwargs['outer'])
del kwargs['outer']
if 'inner' in kwargs:
inner = [x for x in kwargs['inner'] if x > 0]
kwargs['floor'] = inner
kwargs['min_length'] = max(len(inner),
kwargs.get('min_length',0))
del kwargs['inner']
return Partitions_with_constraints(n, **kwargs)
raise ValueError("n must be an integer or be equal to one of "
"None, NN, NonNegativeIntegers()")
def __init__(self, is_infinite=False):
"""
Initialize ``self``.
INPUT:
- ``is_infinite`` -- (Default: ``False``) If ``True``, then the number
of partitions in this set is infinite.
EXAMPLES::
sage: Partitions()
Partitions
sage: Partitions(2)
Partitions of the integer 2
"""
if is_infinite:
Parent.__init__(self, category=InfiniteEnumeratedSets())
else:
Parent.__init__(self, category=FiniteEnumeratedSets())
Element = Partition
# add options to class
class options(GlobalOptions):
r"""
Sets and displays the global options for elements of the partition,
skew partition, and partition tuple classes. If no parameters are
set, then the function returns a copy of the options dictionary.
The ``options`` to partitions can be accessed as the method
:obj:`Partitions.options` of :class:`Partitions` and
related parent classes.
@OPTIONS@
EXAMPLES::
sage: P = Partition([4,2,2,1])
sage: P
[4, 2, 2, 1]
sage: Partitions.options.display="exp"
sage: P
1, 2^2, 4
sage: Partitions.options.display="exp_high"
sage: P
4, 2^2, 1
It is also possible to use user defined functions for the ``display`` and
``latex`` options::
sage: Partitions.options(display=lambda mu: '<%s>' % ','.join('%s'%m for m in mu._list)); P
<4,2,2,1>
sage: Partitions.options(latex=lambda mu: '\\Diagram{%s}' % ','.join('%s'%m for m in mu._list)); latex(P)
\Diagram{4,2,2,1}
sage: Partitions.options(display="diagram", diagram_str="#")
sage: P
####
##
##
#
sage: Partitions.options(diagram_str="*", convention="french")
sage: print(P.ferrers_diagram())
*
**
**
****
Changing the ``convention`` for partitions also changes the ``convention``
option for tableaux and vice versa::
sage: T = Tableau([[1,2,3],[4,5]])
sage: T.pp()
4 5
1 2 3
sage: Tableaux.options.convention="english"
sage: print(P.ferrers_diagram())
****
**
**
*
sage: T.pp()
1 2 3
4 5
sage: Partitions.options._reset()
"""
NAME = 'Partitions'
module = 'sage.combinat.partition'
display = dict(default="list",
description='Specifies how partitions should be printed',
values=dict(list='displayed as a list',
exp_low='in exponential form (lowest first)',
exp_high='in exponential form (highest first)',
diagram='as a Ferrers diagram',
compact_low='compact form of ``exp_low``',
compact_high='compact form of ``exp_high``'),
alias=dict(exp="exp_low", compact="compact_low", array="diagram",
ferrers_diagram="diagram", young_diagram="diagram"),
case_sensitive=False)
latex = dict(default="young_diagram",
description='Specifies how partitions should be latexed',
values=dict(diagram='latex as a Ferrers diagram',
young_diagram='latex as a Young diagram',
list='latex as a list',
exp_high='latex as a list in exponential notation (highest first)',
exp_low='as a list latex in exponential notation (lowest first)'),
alias=dict(exp="exp_low", array="diagram", ferrers_diagram="diagram"),
case_sensitive=False)
diagram_str = dict(default="*",
description='The character used for the cells when printing Ferrers diagrams',
checker=lambda char: isinstance(char,str))
latex_diagram_str = dict(default="\\ast",
description='The character used for the cells when latexing Ferrers diagrams',
checker=lambda char: isinstance(char,str))
convention = dict(link_to=(tableau.Tableaux.options,'convention'))
notation = dict(alt_name='convention')
def __reversed__(self):
"""
A reversed iterator.
EXAMPLES::
sage: [x for x in reversed(Partitions(4))]
[[1, 1, 1, 1], [2, 1, 1], [2, 2], [3, 1], [4]]
"""
if not self.is_finite():
raise NotImplementedError("The set is infinite. This needs a custom reverse iterator")
for i in reversed(range(self.cardinality())):
yield self[i]
def _element_constructor_(self, lst):
"""
Construct an element with ``self`` as parent.
EXAMPLES::
sage: P = Partitions()
sage: p = P([3,3,1]); p
[3, 3, 1]
sage: P(p) is p
True
sage: P([3, 2, 1, 0])
[3, 2, 1]
sage: PT = PartitionTuples()
sage: elt = PT([[4,4,2,2,1]]); elt
([4, 4, 2, 2, 1])
sage: P(elt)
[4, 4, 2, 2, 1]
TESTS::
sage: Partition([3/2])
Traceback (most recent call last):
...
ValueError: all parts of [3/2] should be nonnegative integers
"""
if isinstance(lst, PartitionTuple):
if lst.level() != 1:
raise ValueError('%s is not an element of %s' % (lst, self))
lst = lst[0]
if lst.parent() is self:
return lst
try:
lst = list(map(ZZ, lst))
except TypeError:
raise ValueError('all parts of %s should be nonnegative integers' % repr(lst))
if lst in self:
# trailing zeros are removed in Partition.__init__
return self.element_class(self, lst)
raise ValueError('%s is not an element of %s' % (lst, self))
def __contains__(self, x):
"""
Check if ``x`` is contained in ``self``.
TESTS::
sage: P = Partitions()
sage: Partition([2,1]) in P
True
sage: [2,1] in P
True
sage: [3,2,1] in P
True
sage: [1,2] in P
False
sage: [] in P
True
sage: [0] in P
True
Check that types that represent integers are not excluded::
sage: P = Partitions()
sage: [3/1, 2/2] in P
True
sage: Partition([3/1, 2]) in P
True
Check that non-integers and non-lists are excluded::
sage: P = Partitions()
sage: [2,1.5] in P
False
sage: 0 in P
False
"""
if isinstance(x, Partition):
return True
if isinstance(x, (list, tuple)):
return not x or (all((a in ZZ) and (a >= b) for a, b in zip(x, x[1:]))
and (x[-1] in ZZ) and (x[-1] >= 0))
return False
def subset(self, *args, **kwargs):
r"""
Return ``self`` if no arguments are given, otherwise raises a
``ValueError``.
EXAMPLES::
sage: P = Partitions(5, starting=[3,1]); P
Partitions of the integer 5 starting with [3, 1]
sage: P.subset()
Partitions of the integer 5 starting with [3, 1]
sage: P.subset(ending=[3,1])
Traceback (most recent call last):
...
ValueError: Invalid combination of arguments
"""
if len(args) != 0 or len(kwargs) != 0:
raise ValueError("Invalid combination of arguments")
return self
class Partitions_all(Partitions):
"""
Class of all partitions.
TESTS::
sage: TestSuite( sage.combinat.partition.Partitions_all() ).run()
"""
def __init__(self):
"""
Initialize ``self``.
TESTS::
sage: P = Partitions()
sage: P.category()
Category of infinite enumerated sets
sage: Partitions().cardinality()
+Infinity
sage: TestSuite(P).run()
"""
Partitions.__init__(self, is_infinite=True)
def subset(self, size=None, **kwargs):
"""
Returns the subset of partitions of a given size and additional
keyword arguments.
EXAMPLES::
sage: P = Partitions()
sage: P.subset(4)
Partitions of the integer 4
"""
if size is None:
return self
return Partitions(size, **kwargs)
def _repr_(self):
"""
Return a string representation of ``self``.
TESTS::
sage: Partitions() # indirect doctest
Partitions
"""
return "Partitions"
def __iter__(self):
"""
An iterator for all partitions.
EXAMPLES::
sage: p = Partitions()
sage: it = p.__iter__()
sage: [next(it) for i in range(10)]
[[], [1], [2], [1, 1], [3], [2, 1], [1, 1, 1], [4], [3, 1], [2, 2]]
"""
n = 0
while True:
for p in ZS1_iterator(n):
yield self.element_class(self, p)
n += 1
def __reversed__(self):
"""
A reversed iterator for all partitions.
This reverse iterates through partitions of fixed `n` and incrementing
`n` after reaching the end.
EXAMPLES::
sage: p = Partitions()
sage: revit = p.__reversed__()
sage: [next(revit) for i in range(10)]
[[], [1], [1, 1], [2], [1, 1, 1], [2, 1], [3], [1, 1, 1, 1], [2, 1, 1], [2, 2]]
"""
n = 0
while True:
for p in reversed(list(ZS1_iterator(n))):
yield self.element_class(self, p)
n += 1
def from_frobenius_coordinates(self, frobenius_coordinates):
"""
Returns a partition from a pair of sequences of Frobenius coordinates.
EXAMPLES::
sage: Partitions().from_frobenius_coordinates(([],[]))
[]
sage: Partitions().from_frobenius_coordinates(([0],[0]))
[1]
sage: Partitions().from_frobenius_coordinates(([1],[1]))
[2, 1]
sage: Partitions().from_frobenius_coordinates(([6,3,2],[4,1,0]))
[7, 5, 5, 1, 1]
"""
if len(frobenius_coordinates) != 2:
raise ValueError('%s is not a valid partition, two sequences of coordinates are needed'%str(frobenius_coordinates))
else:
a = frobenius_coordinates[0]
b = frobenius_coordinates[1]
if len(a) != len(b):
raise ValueError('%s is not a valid partition, the sequences of coordinates need to be the same length'%str(frobenius_coordinates))
# should add tests to see if a and b are sorted down, nonnegative and strictly decreasing
r = len(a)
if r == 0:
return self.element_class(self, [])
tmp = [a[i]+i+1 for i in range(r)]
# should check that a is strictly decreasing
if a[-1] < 0:
raise ValueError('%s is not a partition, no coordinate can be negative'%str(frobenius_coordinates))
if b[-1] >= 0:
tmp.extend([r]*b[r-1])
else:
raise ValueError('%s is not a partition, no coordinate can be negative'%str(frobenius_coordinates))
for i in range(r-1,0,-1):
if b[i-1]-b[i] > 0:
tmp.extend([i]*(b[i-1]-b[i]-1))
else:
raise ValueError('%s is not a partition, the coordinates need to be strictly decreasing'%str(frobenius_coordinates))
return self.element_class(self, tmp)
def from_beta_numbers(self, beta):
r"""
Return a partition corresponding to a sequence of beta numbers.
A sequence of beta numbers is a strictly increasing sequence
`0 \leq b_1 < \cdots < b_k` of non-negative integers. The
corresponding partition `\mu = (\mu_k, \ldots, \mu_1)` is
given by `\mu_i = [1,i) \setminus \{ b_1, \ldots, b_i \}`. This gives
a bijection from the set of partitions with at most `k` non-zero parts
to the set of strictly increasing sequences of non-negative integers
of length `k`.
EXAMPLES::
sage: Partitions().from_beta_numbers([0,1,2,4,5,8])
[3, 1, 1]
sage: Partitions().from_beta_numbers([0,2,3,6])
[3, 1, 1]
"""
beta.sort() # put them into increasing order just in case
offset = 0
while offset < len(beta)-1 and beta[offset] == offset:
offset+=1
beta = beta[offset:]
mu = [beta[i]-offset-i for i in range(len(beta))]
return self.element_class(self, list(reversed(mu)))
def from_exp(self, exp):
"""
Returns a partition from its list of multiplicities.
EXAMPLES::
sage: Partitions().from_exp([2,2,1])
[3, 2, 2, 1, 1]
"""
p = []
for i in reversed(range(len(exp))):
p += [i+1]*exp[i]
return self.element_class(self, p)
def from_zero_one(self, seq):
r"""
Return a partition from its `0-1` sequence.
The full `0-1` sequence is the sequence (infinite in both
directions) indicating the steps taken when following the
outer rim of the diagram of the partition. We use the convention
that in English convention, a 1 corresponds to an East step, and
a 0 corresponds to a North step.
Note that every full `0-1` sequence starts with infinitely many 0's and
ends with infinitely many 1's.
.. SEEALSO::
:meth:`Partition.zero_one_sequence()`
INPUT:
The input should be a finite sequence of 0's and 1's. The
heading 0's and trailing 1's will be discarded.
EXAMPLES::
sage: Partitions().from_zero_one([])
[]
sage: Partitions().from_zero_one([1,0])
[1]
sage: Partitions().from_zero_one([1, 1, 1, 1, 0, 1, 0])
[5, 4]
Heading 0's and trailing 1's are correctly handled::
sage: Partitions().from_zero_one([0,0,1,1,1,1,0,1,0,1,1,1])
[5, 4]
TESTS::
sage: all(Partitions().from_zero_one(mu.zero_one_sequence()) == mu for n in range(10) for mu in Partitions(n))
True
"""
tmp = [i for i in range(len(seq)) if seq[i] == 0]
return self.element_class(self,[tmp[i]-i for i in range(len(tmp)-1,-1,-1)])
def from_core_and_quotient(self, core, quotient):
"""
Returns a partition from its core and quotient.
Algorithm from mupad-combinat.
EXAMPLES::
sage: Partitions().from_core_and_quotient([2,1], [[2,1],[3],[1,1,1]])
[11, 5, 5, 3, 2, 2, 2]
TESTS::
sage: Partitions().from_core_and_quotient([2,1], [[2,1],[2,3,1],[1,1,1]])
Traceback (most recent call last):
...
ValueError: the quotient [[2, 1], [2, 3, 1], [1, 1, 1]] must be a tuple of partitions
We check that :trac:`11412` is actually fixed::
sage: test = lambda x, k: x == Partition(core=x.core(k),
....: quotient=x.quotient(k))
sage: all(test(mu,k) for k in range(1,5)
....: for n in range(10) for mu in Partitions(n))
True
sage: test2 = lambda core, mus: (
....: Partition(core=core, quotient=mus).core(mus.level()) == core
....: and
....: Partition(core=core, quotient=mus).quotient(mus.level()) == mus)
sage: all(test2(core,mus) # long time (5s on sage.math, 2011)
....: for k in range(1,10)
....: for n_core in range(10-k)
....: for core in Partitions(n_core)
....: if core.core(k) == core
....: for n_mus in range(10-k)
....: for mus in PartitionTuples(k,n_mus))
True
"""
from .partition_tuple import PartitionTuple, PartitionTuples
if quotient not in PartitionTuples():
raise ValueError('the quotient %s must be a tuple of partitions'%quotient)
components = PartitionTuple(quotient).components()
length = len(components)
k = length*max(len(q) for q in components) + len(core)
# k needs to be large enough. this seems to me like the smallest it can be
v = [core[i]-i for i in range(len(core))] + [ -i for i in range(len(core),k) ]
w = [ [x for x in v if (x-i) % length == 0] for i in range(1, length+1) ]
new_w = []
for i in range(length):
lw = len(w[i])
lq = len(components[i])
# k needs to be chosen so lw >= lq
new_w += [ w[i][j] + length*components[i][j] for j in range(lq)]
new_w += [ w[i][j] for j in range(lq,lw)]
new_w.sort(reverse=True)
return self.element_class(self, [new_w[i]+i for i in range(len(new_w))])
class Partitions_all_bounded(Partitions):
def __init__(self, k):
"""
TESTS::
sage: TestSuite( sage.combinat.partition.Partitions_all_bounded(3) ).run() # long time
"""
self.k = k
Partitions.__init__(self, is_infinite=True)
def __contains__(self, x):
"""
TESTS::
sage: P = Partitions(max_part=3)
sage: Partition([2,1]) in P
True
sage: [2,1] in P
True
sage: [3,2,1] in P
True
sage: [1,2] in P
False
sage: [5,1] in P
False
sage: [0] in P
True
sage: [] in P
True
"""
return not x or (x[0] <= self.k and x in _Partitions)
def _repr_(self):
"""
TESTS::
sage: from sage.combinat.partition import Partitions_all_bounded
sage: Partitions_all_bounded(3)
3-Bounded Partitions
"""
return "%d-Bounded Partitions"%self.k
def __iter__(self):
"""
An iterator for all `k`-bounded partitions.
EXAMPLES::
sage: p = Partitions(max_part=3)
sage: it = p.__iter__()
sage: [next(it) for i in range(10)]
[[], [1], [2], [1, 1], [3], [2, 1], [1, 1, 1], [3, 1], [2, 2], [2, 1, 1]]
"""
n = 0
while True:
for p in Partitions(n, max_part=self.k):
yield self.element_class(self, p)
n += 1
class Partitions_n(Partitions):
"""
Partitions of the integer `n`.
TESTS::
sage: TestSuite( sage.combinat.partition.Partitions_n(0) ).run()
sage: TestSuite( sage.combinat.partition.Partitions_n(0) ).run()
"""
def __init__(self, n):
"""
Initialize ``self``.
TESTS::
sage: TestSuite( Partitions(5) ).run()
"""
Partitions.__init__(self)
self.n = n
def __contains__(self, x):
"""
Check if ``x`` is contained in ``self``.
TESTS::
sage: p = Partitions(5)
sage: [2,1] in p
False
sage: [2,2,1] in p
True
sage: [3,2] in p
True
sage: [2,3] in p
False
"""
return x in _Partitions and sum(x) == self.n
def _repr_(self):
"""
Return a string representation of ``self``.
TESTS::
sage: Partitions(5) # indirect doctest
Partitions of the integer 5
"""
return "Partitions of the integer %s"%self.n
def _an_element_(self):
"""
Returns a partition in ``self``.
EXAMPLES::
sage: Partitions(4).an_element() # indirect doctest
[3, 1]
sage: Partitions(0).an_element()
[]
sage: Partitions(1).an_element()
[1]
"""
if self.n == 0:
lst = []
elif self.n == 1:
lst = [1]
else:
lst = [self.n-1, 1]
return self.element_class(self, lst)
def cardinality(self, algorithm='flint'):
r"""
Return the number of partitions of the specified size.
INPUT:
- ``algorithm`` - (default: ``'flint'``)
- ``'flint'`` -- use FLINT (currently the fastest)
- ``'gap'`` -- use GAP (VERY *slow*)
- ``'pari'`` -- use PARI. Speed seems the same as GAP until
`n` is in the thousands, in which case PARI is faster.
It is possible to associate with every partition of the integer `n` a
conjugacy class of permutations in the symmetric group on `n` points
and vice versa. Therefore the number of partitions `p_n` is the number
of conjugacy classes of the symmetric group on `n` points.
EXAMPLES::
sage: v = Partitions(5).list(); v
[[5], [4, 1], [3, 2], [3, 1, 1], [2, 2, 1], [2, 1, 1, 1], [1, 1, 1, 1, 1]]
sage: len(v)
7
sage: Partitions(5).cardinality(algorithm='gap')
7
sage: Partitions(5).cardinality(algorithm='pari')
7
sage: number_of_partitions(5, algorithm='flint')
7
The input must be a nonnegative integer or a ``ValueError`` is raised.
::
sage: Partitions(10).cardinality()
42
sage: Partitions(3).cardinality()
3
sage: Partitions(10).cardinality()
42
sage: Partitions(3).cardinality(algorithm='pari')
3
sage: Partitions(10).cardinality(algorithm='pari')
42
sage: Partitions(40).cardinality()
37338
sage: Partitions(100).cardinality()
190569292
A generating function for `p_n` is given by the reciprocal of
Euler's function:
.. MATH::
\sum_{n=0}^{\infty} p_n x^n = \prod_{k=1}^{\infty} \frac{1}{1-x^k}.
We use Sage to verify that the first several coefficients do
indeed agree::
sage: q = PowerSeriesRing(QQ, 'q', default_prec=9).gen()
sage: prod([(1-q^k)^(-1) for k in range(1,9)]) ## partial product of
1 + q + 2*q^2 + 3*q^3 + 5*q^4 + 7*q^5 + 11*q^6 + 15*q^7 + 22*q^8 + O(q^9)
sage: [Partitions(k).cardinality() for k in range(2,10)]
[2, 3, 5, 7, 11, 15, 22, 30]
Another consistency test for ``n`` up to 500::
sage: len([n for n in [1..500] if Partitions(n).cardinality() != Partitions(n).cardinality(algorithm='pari')])
0
REFERENCES:
- :wikipedia:`Partition\_(number\_theory)`
"""
if algorithm == 'flint':
return cached_number_of_partitions(self.n)
elif algorithm == 'gap':
from sage.libs.gap.libgap import libgap
return ZZ(libgap.NrPartitions(ZZ(self.n)))
elif algorithm == 'pari':
return ZZ(pari(ZZ(self.n)).numbpart())
raise ValueError("unknown algorithm '%s'" % algorithm)
def random_element(self, measure = 'uniform'):
"""
Return a random partitions of `n` for the specified measure.
INPUT:
- ``measure`` -- ``'uniform'`` or ``'Plancherel'``
(default: ``'uniform'``)
.. SEEALSO::
- :meth:`random_element_uniform`
- :meth:`random_element_plancherel`
EXAMPLES::
sage: Partitions(5).random_element() # random
[2, 1, 1, 1]
sage: Partitions(5).random_element(measure='Plancherel') # random
[2, 1, 1, 1]
"""
if measure == 'uniform':
return self.random_element_uniform()
elif measure == 'Plancherel':
return self.random_element_plancherel()
else:
raise ValueError("Unkown measure: %s" % (measure))
def random_element_uniform(self):
"""
Return a random partition of `n` with uniform probability.
EXAMPLES::
sage: Partitions(5).random_element_uniform() # random
[2, 1, 1, 1]
sage: Partitions(20).random_element_uniform() # random
[9, 3, 3, 2, 2, 1]
TESTS::
sage: all(Part.random_element_uniform() in Part
....: for Part in map(Partitions, range(10)))
True
Check that :trac:`18752` is fixed::
sage: P = Partitions(5)
sage: la = P.random_element_uniform()
sage: la.parent() is P
True
ALGORITHM:
- It is a python Implementation of RANDPAR, see [NW1978]_. The
complexity is unknown, there may be better algorithms.
.. TODO::
Check in Knuth AOCP4.
- There is also certainly a lot of room for optimizations, see
comments in the code.
AUTHOR:
- Florent Hivert (2009-11-23)
"""
n = self.n
res = [] # A dictionary of multiplicities could be faster.
while n > 0:
# Choose a pair d,j = 1,2..., with d*j <= n with probability
# d*numpart(n-d*j) / n / numpart(n)
# and add d^j to the result partition. The resulting partitions is
# equiprobable.
# The following could be made faster by a clever use of floats
rand = randrange(0, n*cached_number_of_partitions(n)) # cached number_of_partition
# It is better to start by the j = 1 pairs because they are the
# most probable. Maybe there is an even more clever order.
for j in range(1, n+1):
d = 1
r = n-j # n - d*j
while r >= 0:
rand -= d * cached_number_of_partitions(r)
if rand < 0:
break
d +=1
r -= j
else:
continue
break
res.extend([d]*j)
n = r
res.sort(reverse=True)
return self.element_class(self, res)
def random_element_plancherel(self):
r"""
Return a random partition of `n` (for the Plancherel measure).
This probability distribution comes from the uniform distribution
on permutations via the Robinson-Schensted correspondence.
See :wikipedia:`Plancherel\_measure`
and :meth:`Partition.plancherel_measure`.
EXAMPLES::
sage: Partitions(5).random_element_plancherel() # random
[2, 1, 1, 1]
sage: Partitions(20).random_element_plancherel() # random
[9, 3, 3, 2, 2, 1]
TESTS::
sage: all(Part.random_element_plancherel() in Part
....: for Part in map(Partitions, range(10)))
True
Check that :trac:`18752` is fixed::
sage: P = Partitions(5)
sage: la = P.random_element_plancherel()
sage: la.parent() is P
True
ALGORITHM:
- insert by Robinson-Schensted a uniform random permutations of n and
returns the shape of the resulting tableau. The complexity is
`O(n\ln(n))` which is likely optimal. However, the implementation
could be optimized.
AUTHOR:
- Florent Hivert (2009-11-23)
"""
T = permutation.Permutations(self.n).random_element().left_tableau()
return self.element_class(self, [len(row) for row in T])
def first(self):
"""
Returns the lexicographically first partition of a positive integer
`n`. This is the partition ``[n]``.
EXAMPLES::
sage: Partitions(4).first()
[4]
"""
return self.element_class(self, [self.n])
def next(self, p):
"""
Return the lexicographically next partition after the partition ``p``.
EXAMPLES::
sage: Partitions(4).next([4])
[3, 1]
sage: Partitions(4).next([1,1,1,1]) is None
True
"""
found = False
for i in self:
if found:
return i
if i == p:
found = True
return None
def last(self):
"""
Return the lexicographically last partition of the positive
integer `n`. This is the all-ones partition.
EXAMPLES::
sage: Partitions(4).last()
[1, 1, 1, 1]
"""
return self.element_class(self, [1]*self.n)
def __iter__(self):
"""
An iterator for the partitions of `n`.
EXAMPLES::
sage: [x for x in Partitions(4)]
[[4], [3, 1], [2, 2], [2, 1, 1], [1, 1, 1, 1]]
"""
for p in ZS1_iterator(self.n):
yield self.element_class(self, p)
def subset(self, **kwargs):
r"""
Return a subset of ``self`` with the additional optional arguments.
EXAMPLES::
sage: P = Partitions(5); P
Partitions of the integer 5
sage: P.subset(starting=[3,1])
Partitions of the integer 5 starting with [3, 1]
"""
return Partitions(self.n, **kwargs)
class Partitions_nk(Partitions):
"""
Partitions of the integer `n` of length equal to `k`.
TESTS::
sage: TestSuite( sage.combinat.partition.Partitions_nk(0,0) ).run()
sage: TestSuite( sage.combinat.partition.Partitions_nk(0,0) ).run()
"""
def __init__(self, n, k):
"""
Initialize ``self``.
TESTS::
sage: TestSuite( Partitions(5, length=2) ).run()
"""
Partitions.__init__(self)
self.n = n
self.k = k
def __contains__(self, x):
"""
Check if ``x`` is contained in ``self``.
TESTS::
sage: p = Partitions(5, length=2)
sage: [2,1] in p
False
sage: [2,2,1] in p
False
sage: [3,2] in p
True
sage: [2,3] in p
False
sage: [4,1] in p
True
sage: [1,1,1,1,1] in p
False
sage: [5] in p
False
"""
return x in _Partitions and sum(x) == self.n and len(x) == self.k
def _repr_(self):
"""
Return a string representation of ``self``.
TESTS::
sage: Partitions(5, length=2) # indirect doctest
Partitions of the integer 5 of length 2
"""
return "Partitions of the integer {} of length {}".format(self.n, self.k)
def _an_element_(self):
"""
Returns a partition in ``self``.
EXAMPLES::
sage: Partitions(4, length=1).an_element() # indirect doctest
[4]
sage: Partitions(4, length=2).an_element()
[3, 1]
sage: Partitions(4, length=3).an_element()
[2, 1, 1]
sage: Partitions(4, length=4).an_element()
[1, 1, 1, 1]
sage: Partitions(1, length=1).an_element()
[1]
sage: Partitions(0, length=0).an_element()
[]
"""
if self.n == 0:
if self.k == 0:
lst = []
else:
from sage.categories.sets_cat import EmptySetError
raise EmptySetError
elif self.n >= self.k > 0:
lst = [self.n - self.k + 1] + [1] * (self.k-1)
else:
from sage.categories.sets_cat import EmptySetError
raise EmptySetError
return self.element_class(self, lst)
def __iter__(self):
"""
An iterator for all partitions of `n` of length `k`.
EXAMPLES::
sage: p = Partitions(9, length=3)
sage: it = p.__iter__()
sage: list(it)
[[7, 1, 1], [6, 2, 1], [5, 3, 1], [5, 2, 2], [4, 4, 1], [4, 3, 2], [3, 3, 3]]
sage: p = Partitions(9, length=10)
sage: list(p.__iter__())
[]
sage: p = Partitions(0, length=0)
sage: list(p.__iter__())
[[]]
sage: from sage.combinat.partition import number_of_partitions_length
sage: all( len(Partitions(n, length=k).list())
....: == number_of_partitions_length(n, k)
....: for n in range(9) for k in range(n+2) )
True
"""
for p in ZS1_iterator_nk(self.n - self.k, self.k):
v = [i + 1 for i in p]
adds = [1] * (self.k - len(v))
yield self.element_class(self, v + adds)
def cardinality(self, algorithm='hybrid'):
r"""
Return the number of partitions of the specified size with the
specified length.
INPUT:
- ``algorithm`` -- (default: ``'hybrid'``) the algorithm to compute
the cardinality and can be one of the following:
* ``'hybrid'`` - use a hybrid algorithm which uses heuristics to
reduce the complexity
* ``'gap'`` - use GAP
EXAMPLES::
sage: v = Partitions(5, length=2).list(); v
[[4, 1], [3, 2]]
sage: len(v)
2
sage: Partitions(5, length=2).cardinality()
2
More generally, the number of partitions of `n` of length `2`
is `\left\lfloor \frac{n}{2} \right\rfloor`::
sage: all( Partitions(n, length=2).cardinality()
....: == n // 2 for n in range(10) )
True
The number of partitions of `n` of length `1` is `1` for `n`
positive::
sage: all( Partitions(n, length=1).cardinality() == 1
....: for n in range(1, 10) )
True
Further examples::
sage: Partitions(5, length=3).cardinality()
2
sage: Partitions(6, length=3).cardinality()
3
sage: Partitions(8, length=4).cardinality()
5
sage: Partitions(8, length=5).cardinality()
3
sage: Partitions(15, length=6).cardinality()
26
sage: Partitions(0, length=0).cardinality()
1
sage: Partitions(0, length=1).cardinality()
0
sage: Partitions(1, length=0).cardinality()
0
sage: Partitions(1, length=4).cardinality()
0
TESTS:
We check the hybrid approach gives the same results as GAP::
sage: N = [0, 1, 2, 3, 5, 10, 20, 500, 850]
sage: K = [0, 1, 2, 3, 5, 10, 11, 20, 21, 250, 499, 500]
sage: all(Partitions(n,length=k).cardinality() == Partitions(n,length=k).cardinality('gap')
....: for n in N for k in K)
True
sage: P = Partitions(4562, length=2800)
sage: P.cardinality() == P.cardinality('gap')
True
"""
return number_of_partitions_length(self.n, self.k, algorithm)
def subset(self, **kwargs):
r"""
Return a subset of ``self`` with the additional optional arguments.
EXAMPLES::
sage: P = Partitions(5, length=2); P
Partitions of the integer 5 of length 2
sage: P.subset(max_part=3)
Partitions of the integer 5 satisfying constraints length=2, max_part=3
"""
return Partitions(self.n, length=self.k, **kwargs)
class Partitions_parts_in(Partitions):
"""
Partitions of `n` with parts in a given set `S`.
This is invoked indirectly when calling
``Partitions(n, parts_in=parts)``, where ``parts`` is a list of
pairwise distinct integers.
TESTS::
sage: TestSuite( sage.combinat.partition.Partitions_parts_in(6, parts=[2,1]) ).run()
"""
@staticmethod
def __classcall_private__(cls, n, parts):
"""
Normalize the input to ensure a unique representation.
TESTS::
sage: P = Partitions(4, parts_in=[2,1])
sage: P2 = Partitions(4, parts_in=(1,2))
sage: P is P2
True
"""
parts = tuple(sorted(parts))
return super(Partitions_parts_in, cls).__classcall__(cls, Integer(n), parts)
def __init__(self, n, parts):
"""
Initialize ``self``.
TESTS::
sage: TestSuite(Partitions(5, parts_in=[1,2,3])).run()
"""
Partitions.__init__(self)
self.n = n
self.parts = list(parts)
def __contains__(self, x):
"""
TESTS::
sage: p = Partitions(5, parts_in=[1,2])
sage: [2,1,1,1] in p
True
sage: [4,1] in p
False
"""
return (x in _Partitions and sum(x) == self.n and
all(p in self.parts for p in x))
def _repr_(self):
"""
TESTS::
sage: Partitions(5, parts_in=[1,2,3]) # indirect doctest
Partitions of the integer 5 with parts in [1, 2, 3]
"""
return "Partitions of the integer %s with parts in %s" % (self.n, self.parts)
def cardinality(self):
r"""
Return the number of partitions with parts in ``self``. Wraps GAP's
``NrRestrictedPartitions``.
EXAMPLES::
sage: Partitions(15, parts_in=[2,3,7]).cardinality()
5
If you can use all parts 1 through `n`, we'd better get `p(n)`::
sage: Partitions(20, parts_in=[1..20]).cardinality() == Partitions(20).cardinality()
True
TESTS:
Let's check the consistency of GAP's function and our own
algorithm that actually generates the partitions::
sage: ps = Partitions(15, parts_in=[1,2,3])
sage: ps.cardinality() == len(ps.list())
True
sage: ps = Partitions(15, parts_in=[])
sage: ps.cardinality() == len(ps.list())
True
sage: ps = Partitions(3000, parts_in=[50,100,500,1000])
sage: ps.cardinality() == len(ps.list())
True
sage: ps = Partitions(10, parts_in=[3,6,9])
sage: ps.cardinality() == len(ps.list())
True
sage: ps = Partitions(0, parts_in=[1,2])
sage: ps.cardinality() == len(ps.list())
True
"""
# GAP complains if you give it an empty list
if self.parts:
from sage.libs.gap.libgap import libgap
return ZZ(libgap.NrRestrictedPartitions(ZZ(self.n), self.parts))
return Integer(self.n == 0)
def first(self):
"""
Return the lexicographically first partition of a positive
integer `n` with the specified parts, or ``None`` if no such
partition exists.
EXAMPLES::
sage: Partitions(9, parts_in=[3,4]).first()
[3, 3, 3]
sage: Partitions(6, parts_in=[1..6]).first()
[6]
sage: Partitions(30, parts_in=[4,7,8,10,11]).first()
[11, 11, 8]
"""
try:
return self.element_class(self, self._findfirst(self.n, self.parts[:]))
except TypeError:
return None
def _findfirst(self, n, parts):
"""
TESTS::
sage: p = Partitions(9, parts_in=[3,4])
sage: p._findfirst(p.n, p.parts[:])
[3, 3, 3]
sage: p._findfirst(0, p.parts[:])
[]
sage: p._findfirst(p.n, [10])
"""
if n == 0:
return []
else:
while parts:
p = parts.pop()
for k in range(n.quo_rem(p)[0], 0, -1):
try:
return k * [p] + self._findfirst(n - k * p, parts[:])
except TypeError:
pass
def last(self):
"""
Return the lexicographically last partition of the positive
integer `n` with the specified parts, or ``None`` if no such
partition exists.
EXAMPLES::
sage: Partitions(15, parts_in=[2,3]).last()
[3, 2, 2, 2, 2, 2, 2]
sage: Partitions(30, parts_in=[4,7,8,10,11]).last()
[7, 7, 4, 4, 4, 4]
sage: Partitions(10, parts_in=[3,6]).last() is None
True
sage: Partitions(50, parts_in=[11,12,13]).last()
[13, 13, 12, 12]
sage: Partitions(30, parts_in=[4,7,8,10,11]).last()
[7, 7, 4, 4, 4, 4]
TESTS::
sage: Partitions(6, parts_in=[1..6]).last()
[1, 1, 1, 1, 1, 1]
sage: Partitions(0, parts_in=[]).last()
[]
sage: Partitions(50, parts_in=[11,12]).last() is None
True
"""
try:
return self.element_class(self, self._findlast(self.n, self.parts))
except TypeError:
return None
def _findlast(self, n, parts):
"""
Return the lexicographically largest partition of `n` using the
given parts, or ``None`` if no such partition exists. This function
is not intended to be called directly.
INPUT:
- ``n`` -- nonnegative integer
- ``parts`` -- a sorted list of positive integers.
OUTPUT:
A list of integers in weakly decreasing order, or ``None``. The
output is just a list, not a partition object.
EXAMPLES::
sage: ps = Partitions(1, parts_in=[1])
sage: ps._findlast(15, [2,3])
[3, 2, 2, 2, 2, 2, 2]
sage: ps._findlast(9, [2,4]) is None
True
sage: ps._findlast(0, [])
[]
sage: ps._findlast(100, [9,17,31])
[31, 17, 17, 17, 9, 9]
"""
if n < 0:
return None
elif n == 0:
return []
elif parts != []:
p = parts[0]
q, r = n.quo_rem(p)
if r == 0:
return [p] * q
# If the smallest part doesn't divide n, try using the next
# largest part
else:
for i, p in enumerate(parts[1:]):
rest = self._findlast(n - p, parts[:i+2])
if rest is not None:
return [p] + rest
# If we get to here, nothing ever worked, so there's no such
# partitions, and we return None.
return None
def __iter__(self):
"""
An iterator through the partitions of `n` with all parts belonging
to a particular set.
EXAMPLES::
sage: [x for x in Partitions(4)]
[[4], [3, 1], [2, 2], [2, 1, 1], [1, 1, 1, 1]]
"""
for p in self._fast_iterator(self.n, self.parts[:]):
yield self.element_class(self, p)
def _fast_iterator(self, n, parts):
"""
A fast iterator for the partitions of ``n`` which returns lists and
not partition types. This function is not intended to be called
directly.
INPUT:
- ``n`` -- nonnegative integer.
- ``parts`` -- a list of parts to use. This list will be
destroyed, so pass things here with ``foo[:]`` (or something
equivalent) if you want to preserve your list. In particular,
the ``__iter__`` method needs to use ``self.parts[:]``, or else we
forget which parts we're using!
OUTPUT:
A generator object for partitions of `n` with parts in
``parts``.
If the parts in ``parts`` are sorted in increasing order, this
function returns weakly decreasing lists. If ``parts`` is not
sorted, your lists won't be, either.
EXAMPLES::
sage: P = Partitions(4, parts_in=[2,4])
sage: it = P._fast_iterator(4, [2,4])
sage: next(it)
[4]
sage: type(_)
<... 'list'>
"""
if n == 0:
yield []
else:
while parts:
p = parts.pop()
for k in range(n.quo_rem(p)[0], 0, -1):
for q in self._fast_iterator(n - k * p, parts[:]):
yield k * [p] + q
class Partitions_starting(Partitions):
"""
All partitions with a given start.
"""
@staticmethod
def __classcall_private__(cls, n, starting_partition):
"""
Normalize the input to ensure a unique representation.
TESTS::
sage: P = Partitions(4, starting=[2,1])
sage: P2 = Partitions(4, starting=[2,1])
sage: P is P2
True
"""
starting_partition = Partition(starting_partition)
return super(Partitions_starting, cls).__classcall__(cls, Integer(n),
starting_partition)
def __init__(self, n, starting_partition):
"""
Initilizes ``self``.
EXAMPLES::
sage: Partitions(3, starting=[2,1])
Partitions of the integer 3 starting with [2, 1]
sage: Partitions(3, starting=[2,1]).list()
[[2, 1], [1, 1, 1]]
TESTS::
sage: p = Partitions(3, starting=[2,1])
sage: TestSuite(p).run()
"""
Partitions.__init__(self)
self.n = n
self._starting = starting_partition
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: Partitions(3, starting=[2,1]) # indirect doctest
Partitions of the integer 3 starting with [2, 1]
"""
return "Partitions of the integer %s starting with %s"%(self.n, self._starting)
def __contains__(self, x):
"""
Checks if ``x`` is contained in ``self``.
EXAMPLES::
sage: p = Partitions(3, starting=[2,1])
sage: [1,1] in p
False
sage: [2,1] in p
True
sage: [1,1,1] in p
True
sage: [3] in p
False
"""
return x in Partitions_n(self.n) and x <= self._starting
def first(self):
"""
Return the first partition in ``self``.
EXAMPLES::
sage: Partitions(3, starting=[2,1]).first()
[2, 1]
"""
return self._starting
def next(self, part):
"""
Return the next partition after ``part`` in ``self``.
EXAMPLES::
sage: Partitions(3, starting=[2,1]).next(Partition([2,1]))
[1, 1, 1]
"""
return next(part)
class Partitions_ending(Partitions):
"""
All partitions with a given ending.
"""
@staticmethod
def __classcall_private__(cls, n, ending_partition):
"""
Normalize the input to ensure a unique representation.
TESTS::
sage: P = Partitions(4)
sage: P2 = Partitions(4)
sage: P is P2
True
"""
ending_partition = Partition(ending_partition)
return super(Partitions_ending, cls).__classcall__(cls, Integer(n),
ending_partition)
def __init__(self, n, ending_partition):
"""
Initializes ``self``.
EXAMPLES::
sage: Partitions(4, ending=[1,1,1,1]).list()
[[4], [3, 1], [2, 2], [2, 1, 1], [1, 1, 1, 1]]
sage: Partitions(4, ending=[2,2]).list()
[[4], [3, 1], [2, 2]]
sage: Partitions(4, ending=[4]).list()
[[4]]
TESTS::
sage: p = Partitions(4, ending=[1,1,1,1])
sage: TestSuite(p).run()
"""
Partitions.__init__(self)
self.n = n
self._ending = ending_partition
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: Partitions(4, ending=[1,1,1,1]) # indirect doctest
Partitions of the integer 4 ending with [1, 1, 1, 1]
"""
return "Partitions of the integer %s ending with %s"%(self.n, self._ending)
def __contains__(self, x):
"""
Checks if ``x`` is contained in ``self``.
EXAMPLES::
sage: p = Partitions(4, ending=[2,2])
sage: [4] in p
True
sage: [2,1,1] in p
False
sage: [2,1] in p
False
"""
return x in Partitions_n(self.n) and x >= self._ending
def first(self):
"""
Return the first partition in ``self``.
EXAMPLES::
sage: Partitions(4, ending=[1,1,1,1]).first()
[4]
"""
return self.element_class(self, [self.n])
def next(self, part):
"""
Return the next partition after ``part`` in ``self``.
EXAMPLES::
sage: Partitions(4, ending=[1,1,1,1]).next(Partition([4]))
[3, 1]
sage: Partitions(4, ending=[1,1,1,1]).next(Partition([1,1,1,1])) is None
True
"""
if part == self._ending:
return None
else:
return next(part)
class PartitionsInBox(Partitions):
r"""
All partitions which fit in an `h \times w` box.
EXAMPLES::
sage: PartitionsInBox(2,2)
Integer partitions which fit in a 2 x 2 box
sage: PartitionsInBox(2,2).list()
[[], [1], [1, 1], [2], [2, 1], [2, 2]]
"""
def __init__(self, h, w):
"""
Initialize ``self``.
TESTS::
sage: p = PartitionsInBox(2,2)
sage: TestSuite(p).run()
"""
Partitions.__init__(self)
self.h = h
self.w = w
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: PartitionsInBox(2,2) # indirect doctest
Integer partitions which fit in a 2 x 2 box
"""
return "Integer partitions which fit in a %s x %s box" % (self.h, self.w)
def __contains__(self, x):
"""
Checks if ``x`` is contained in ``self``.
EXAMPLES::
sage: [] in PartitionsInBox(2,2)
True
sage: [2,1] in PartitionsInBox(2,2)
True
sage: [3,1] in PartitionsInBox(2,2)
False
sage: [2,1,1] in PartitionsInBox(2,2)
False
sage: [3,1] in PartitionsInBox(3, 2)
False
sage: [3,1] in PartitionsInBox(2, 3)
True
"""
return x in _Partitions and len(x) <= self.h \
and (len(x) == 0 or x[0] <= self.w)
def list(self):
"""
Return a list of all the partitions inside a box of height `h` and
width `w`.
EXAMPLES::
sage: PartitionsInBox(2,2).list()
[[], [1], [1, 1], [2], [2, 1], [2, 2]]
sage: PartitionsInBox(2,3).list()
[[], [1], [1, 1], [2], [2, 1], [2, 2], [3], [3, 1], [3, 2], [3, 3]]
TESTS:
Check :trac:`10890`::
sage: type(PartitionsInBox(0,0)[0])
<class 'sage.combinat.partition.PartitionsInBox_with_category.element_class'>
"""
h = self.h
w = self.w
if h == 0:
return [self.element_class(self, [])]
else:
l = [[i] for i in range(0, w+1)]
add = lambda x: [ x+[i] for i in range(0, x[-1]+1)]
for i in range(h-1):
new_list = []
for element in l:
new_list += add(element)
l = new_list
return [self.element_class(self, [x for x in p if x!=0]) for p in l]
def cardinality(self):
"""
Return the cardinality of ``self``.
EXAMPLES::
sage: PartitionsInBox(2, 3).cardinality()
10
TESTS:
Check the corner case::
sage: PartitionsInBox(0, 0).cardinality()
1
sage: PartitionsInBox(0, 1).cardinality()
1
sage: all(PartitionsInBox(a, b).cardinality() ==
....: len(PartitionsInBox(a, b).list())
....: for a in range(6) for b in range(6))
True
"""
return binomial(self.h + self.w, self.w)
class Partitions_constraints(IntegerListsLex):
"""
For unpickling old constrained ``Partitions_constraints`` objects created
with sage <= 3.4.1. See :class:`Partitions`.
"""
def __setstate__(self, data):
r"""
TESTS::
sage: dmp = b'x\x9ck`J.NLO\xd5K\xce\xcfM\xca\xccK,\xd1+H,*\xc9,\xc9\xcc\xcf\xe3\n\x80\xb1\x8a\xe3\x93\x81DIQbf^I1W!\xa3fc!Sm!\xb3F(7\x92x!Km!k(GnbE<\xc8\x88B6\x88\xb9E\x99y\xe9\xc5z@\x05\xa9\xe9\xa9E\\\xb9\x89\xd9\xa9\xf10N!{(\xa3QkP!Gq(c^\x06\x90c\x0c\xe4p\x96&\xe9\x01\x00\xc2\xe53\xfd'
sage: sp = loads(dmp); sp
Integer lists of sum 3 satisfying certain constraints
sage: sp.list()
[[2, 1], [1, 1, 1]]
"""
n = data['n']
self.__class__ = Partitions_with_constraints
constraints = {'max_slope' : 0,
'min_part' : 1}
constraints.update(data['constraints'])
self.__init__(n, **constraints)
class Partitions_with_constraints(IntegerListsLex):
"""
Partitions which satisfy a set of constraints.
EXAMPLES::
sage: P = Partitions(6, inner=[1,1], max_slope=-1)
sage: list(P)
[[5, 1], [4, 2], [3, 2, 1]]
TESTS::
sage: P = Partitions(6, min_part=2, max_slope=-1)
sage: TestSuite(P).run()
Test that :trac:`15525` is fixed::
sage: loads(dumps(P)) == P
True
"""
# def __init__(self, n, **kwargs):
# """
# Initialize ``self``.
# """
# IntegerListsLex.__init__(self, n, **kwargs)
Element = Partition
options = Partitions.options
######################
# Regular Partitions #
######################
class RegularPartitions(Partitions):
r"""
Base class for `\ell`-regular partitions.
Let `\ell` be a positive integer. A partition `\lambda` is
`\ell`-*regular* if `m_i < \ell` for all `i`, where `m_i` is the
multiplicity of `i` in `\lambda`.
.. NOTE::
This is conjugate to the notion of `\ell`-*restricted* partitions,
where the difference between any two consecutive
parts is `< \ell`.
INPUT:
- ``ell`` -- the positive integer `\ell`
- ``is_infinite`` -- boolean; if the subset of `\ell`-regular
partitions is infinite
"""
def __init__(self, ell, is_infinite=False):
"""
Initialize ``self``.
EXAMPLES::
sage: P = Partitions(regular=2)
sage: TestSuite(P).run()
"""
self._ell = ell
Partitions.__init__(self, is_infinite)
def ell(self):
r"""
Return the value `\ell`.
EXAMPLES::
sage: P = Partitions(regular=2)
sage: P.ell()
2
"""
return self._ell
def __contains__(self, x):
"""
TESTS::
sage: P = Partitions(regular=3)
sage: [5] in P
True
sage: [] in P
True
sage: [3, 3, 2, 2] in P
True
sage: [3, 3, 3, 1] in P
False
sage: [4, 0, 0, 0, 0, 0] in P
True
sage: Partition([4,2,2,1]) in P
True
sage: Partition([4,2,2,2]) in P
False
sage: Partition([10,1]) in P
True
"""
if not Partitions.__contains__(self, x):
return False
if isinstance(x, Partition):
return max(x.to_exp() + [0]) < self._ell
return all(x.count(i) < self._ell for i in set(x) if i > 0)
def _fast_iterator(self, n, max_part):
"""
A fast (recursive) iterator which returns a list.
EXAMPLES::
sage: P = Partitions(regular=3)
sage: list(P._fast_iterator(5, 5))
[[5], [4, 1], [3, 2], [3, 1, 1], [2, 2, 1]]
sage: list(P._fast_iterator(5, 3))
[[3, 2], [3, 1, 1], [2, 2, 1]]
sage: list(P._fast_iterator(5, 6))
[[5], [4, 1], [3, 2], [3, 1, 1], [2, 2, 1]]
"""
if n == 0:
yield []
return
if n < max_part:
max_part = n
bdry = self._ell - 1
for i in reversed(range(1, max_part+1)):
for p in self._fast_iterator(n-i, i):
if p.count(i) < bdry:
yield [i] + p
class RegularPartitions_all(RegularPartitions):
r"""
The class of all `\ell`-regular partitions.
INPUT:
- ``ell`` -- the positive integer `\ell`
.. SEEALSO::
:class:`~sage.combinat.partition.RegularPartitions`
"""
def __init__(self, ell):
"""
Initialize ``self``.
EXAMPLES::
sage: P = Partitions(regular=4)
sage: TestSuite(P).run()
1-regular partitions::
sage: P = Partitions(regular=1)
sage: P in FiniteEnumeratedSets()
True
sage: TestSuite(P).run()
"""
RegularPartitions.__init__(self, ell, bool(ell > 1))
def _repr_(self):
"""
TESTS::
sage: from sage.combinat.partition import RegularPartitions_all
sage: RegularPartitions_all(3)
3-Regular Partitions
"""
return "{}-Regular Partitions".format(self._ell)
def __iter__(self):
"""
Iterate over ``self``.
EXAMPLES::
sage: P = Partitions(regular=3)
sage: it = P.__iter__()
sage: [next(it) for x in range(10)]
[[], [1], [2], [1, 1], [3], [2, 1], [4], [3, 1], [2, 2], [2, 1, 1]]
Check that 1-regular partitions works (:trac:`20584`)::
sage: P = Partitions(regular=1)
sage: list(P)
[[]]
"""
if self._ell == 1:
yield self.element_class(self, [])
return
n = 0
while True:
for p in self._fast_iterator(n, n):
yield self.element_class(self, p)
n += 1
class RegularPartitions_truncated(RegularPartitions):
r"""
The class of `\ell`-regular partitions with max length `k`.
INPUT:
- ``ell`` -- the integer `\ell`
- ``max_len`` -- integer; the maximum length
.. SEEALSO::
:class:`~sage.combinat.partition.RegularPartitions`
"""
def __init__(self, ell, max_len):
"""
Initialize ``self``.
EXAMPLES::
sage: P = Partitions(regular=4, max_length=3)
sage: TestSuite(P).run()
"""
self._max_len = max_len
RegularPartitions.__init__(self, ell, bool(ell > 1))
def max_length(self):
"""
Return the maximum length of the partitions of ``self``.
EXAMPLES::
sage: P = Partitions(regular=4, max_length=3)
sage: P.max_length()
3
"""
return self._max_len
def __contains__(self, x):
"""
TESTS::
sage: P = Partitions(regular=4, max_length=3)
sage: [3, 3, 3] in P
True
sage: [] in P
True
sage: [4, 2, 1, 1] in P
False
"""
return len(x) <= self._max_len and RegularPartitions.__contains__(self, x)
def _repr_(self):
"""
TESTS::
sage: from sage.combinat.partition import RegularPartitions_truncated
sage: RegularPartitions_truncated(4, 3)
4-Regular Partitions with max length 3
"""
return "{}-Regular Partitions with max length {}".format(self._ell, self._max_len)
def __iter__(self):
"""
Iterate over ``self``.
EXAMPLES::
sage: P = Partitions(regular=3, max_length=2)
sage: it = P.__iter__()
sage: [next(it) for x in range(10)]
[[], [1], [2], [1, 1], [3], [2, 1], [4], [3, 1], [2, 2], [5]]
Check that 1-regular partitions works (:trac:`20584`)::
sage: P = Partitions(regular=1, max_length=2)
sage: list(P)
[[]]
"""
if self._ell == 1:
yield self.element_class(self, [])
return
n = 0
while True:
for p in self._fast_iterator(n, n):
yield self.element_class(self, p)
n += 1
def _fast_iterator(self, n, max_part, depth=0):
"""
A fast (recursive) iterator which returns a list.
EXAMPLES::
sage: P = Partitions(regular=2, max_length=2)
sage: list(P._fast_iterator(5, 5))
[[5], [4, 1], [3, 2]]
sage: list(P._fast_iterator(5, 3))
[[3, 2]]
sage: list(P._fast_iterator(5, 6))
[[5], [4, 1], [3, 2]]
"""
if n == 0 or depth >= self._max_len:
yield []
return
# Special case
if depth + 1 == self._max_len:
if max_part >= n:
yield [n]
return
if n < max_part:
max_part = n
bdry = self._ell - 1
for i in reversed(range(1, max_part+1)):
for p in self._fast_iterator(n-i, i, depth+1):
if p.count(i) < bdry:
yield [i] + p
class RegularPartitions_bounded(RegularPartitions):
r"""
The class of `\ell`-regular `k`-bounded partitions.
INPUT:
- ``ell`` -- the integer `\ell`
- ``k`` -- integer; the value `k`
.. SEEALSO::
:class:`~sage.combinat.partition.RegularPartitions`
"""
def __init__(self, ell, k):
"""
Initialize ``self``.
EXAMPLES::
sage: P = Partitions(regular=4, max_part=3)
sage: TestSuite(P).run()
1-regular partitions::
sage: P = Partitions(regular=1, max_part=3)
sage: P in FiniteEnumeratedSets()
True
sage: TestSuite(P).run()
"""
self.k = k
RegularPartitions.__init__(self, ell, False)
def __contains__(self, x):
"""
TESTS::
sage: P = Partitions(regular=4, max_part=3)
sage: [3, 3, 3] in P
True
sage: [] in P
True
sage: [4, 2, 1] in P
False
"""
return len(x) == 0 or (x[0] <= self.k and RegularPartitions.__contains__(self, x))
def _repr_(self):
"""
TESTS::
sage: from sage.combinat.partition import RegularPartitions_bounded
sage: RegularPartitions_bounded(4, 3)
4-Regular 3-Bounded Partitions
"""
return "{}-Regular {}-Bounded Partitions".format(self._ell, self.k)
def __iter__(self):
"""
Iterate over ``self``.
EXAMPLES::
sage: P = Partitions(regular=2, max_part=3)
sage: list(P)
[[3, 2, 1], [3, 2], [3, 1], [3], [2, 1], [2], [1], []]
Check that 1-regular partitions works (:trac:`20584`)::
sage: P = Partitions(regular=1, max_part=3)
sage: list(P)
[[]]
"""
k = self.k
for n in reversed(range(k*(k+1)/2 * self._ell)):
for p in self._fast_iterator(n, k):
yield self.element_class(self, p)
class RegularPartitions_n(RegularPartitions, Partitions_n):
r"""
The class of `\ell`-regular partitions of `n`.
INPUT:
- ``n`` -- the integer `n` to partition
- ``ell`` -- the integer `\ell`
.. SEEALSO::
:class:`~sage.combinat.partition.RegularPartitions`
"""
def __init__(self, n, ell):
"""
Initialize ``self``.
EXAMPLES::
sage: P = Partitions(5, regular=3)
sage: TestSuite(P).run()
1-regular partitions::
sage: P = Partitions(5, regular=1)
sage: TestSuite(P).run()
"""
RegularPartitions.__init__(self, ell)
Partitions_n.__init__(self, n)
def _repr_(self):
"""
TESTS::
sage: from sage.combinat.partition import RegularPartitions_n
sage: RegularPartitions_n(3, 5)
5-Regular Partitions of the integer 3
"""
return "{}-Regular Partitions of the integer {}".format(self._ell, self.n)
def __contains__(self, x):
"""
TESTS::
sage: P = Partitions(5, regular=3)
sage: [3, 1, 1] in P
True
sage: [3, 2, 1] in P
False
"""
return RegularPartitions.__contains__(self, x) and sum(x) == self.n
def __iter__(self):
"""
Iterate over ``self``.
EXAMPLES::
sage: P = Partitions(5, regular=3)
sage: list(P)
[[5], [4, 1], [3, 2], [3, 1, 1], [2, 2, 1]]
"""
for p in self._fast_iterator(self.n, self.n):
yield self.element_class(self, p)
def cardinality(self):
"""
Return the cardinality of ``self``.
EXAMPLES::
sage: P = Partitions(5, regular=3)
sage: P.cardinality()
5
sage: P = Partitions(5, regular=6)
sage: P.cardinality()
7
sage: P.cardinality() == Partitions(5).cardinality()
True
TESTS:
Check the corner case::
sage: P = Partitions(0, regular=3)
sage: P.cardinality()
1
Check for 1-regular partitions::
sage: P = Partitions(0, regular=1)
sage: P.cardinality()
1
sage: P = Partitions(5, regular=1)
sage: P.cardinality()
0
"""
if self._ell > self.n:
return Partitions_n.cardinality(self)
return ZZ.sum(1 for x in self)
def _an_element_(self):
"""
Returns a partition in ``self``.
EXAMPLES::
sage: P = Partitions(5, regular=2)
sage: P._an_element_()
[4, 1]
sage: P = Partitions(0, regular=1)
sage: P._an_element_()
[]
sage: P = Partitions(5, regular=1)
sage: P._an_element_()
Traceback (most recent call last):
...
EmptySetError
"""
if self._ell == 1 and self.n > 0:
from sage.categories.sets_cat import EmptySetError
raise EmptySetError
return Partitions_n._an_element_(self)
######################
# Ordered Partitions #
######################
class OrderedPartitions(Partitions):
"""
The class of ordered partitions of `n`. If `k` is specified, then this
contains only the ordered partitions of length `k`.
An *ordered partition* of a nonnegative integer `n` means a list of
positive integers whose sum is `n`. This is the same as a composition
of `n`.
.. NOTE::
It is recommended that you use :meth:`Compositions` instead as
:meth:`OrderedPartitions` wraps GAP.
EXAMPLES::
sage: OrderedPartitions(3)
Ordered partitions of 3
sage: OrderedPartitions(3).list()
[[3], [2, 1], [1, 2], [1, 1, 1]]
sage: OrderedPartitions(3,2)
Ordered partitions of 3 of length 2
sage: OrderedPartitions(3,2).list()
[[2, 1], [1, 2]]
sage: OrderedPartitions(10,k=2).list()
[[9, 1], [8, 2], [7, 3], [6, 4], [5, 5], [4, 6], [3, 7], [2, 8], [1, 9]]
sage: OrderedPartitions(4).list()
[[4], [3, 1], [2, 2], [2, 1, 1], [1, 3], [1, 2, 1], [1, 1, 2], [1, 1, 1, 1]]
"""
@staticmethod
def __classcall_private__(cls, n, k=None):
"""
Normalize the input to ensure a unique representation.
TESTS::
sage: P = OrderedPartitions(3,2)
sage: P2 = OrderedPartitions(3,2)
sage: P is P2
True
"""
if k is not None:
k = Integer(k)
return super(OrderedPartitions, cls).__classcall__(cls, Integer(n), k)
def __init__(self, n, k):
"""
Initialize ``self``.
EXAMPLES::
sage: o = OrderedPartitions(4,2)
TESTS::
sage: TestSuite( OrderedPartitions(5,3) ).run()
"""
Partitions.__init__(self)
self.n = n
self.k = k
def __contains__(self, x):
"""
Check to see if ``x`` is an element of ``self``.
EXAMPLES::
sage: o = OrderedPartitions(4,2)
sage: [2,1] in o
False
sage: [2,2] in o
True
sage: [1,2,1] in o
False
"""
C = composition.Compositions(self.n, length=self.k)
return C(x) in composition.Compositions(self.n, length=self.k)
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: OrderedPartitions(3) # indirect doctest
Ordered partitions of 3
sage: OrderedPartitions(3,2) # indirect doctest
Ordered partitions of 3 of length 2
"""
string = "Ordered partitions of %s" % self.n
if self.k is not None:
string += " of length %s" % self.k
return string
def list(self):
"""
Return a list of partitions in ``self``.
EXAMPLES::
sage: OrderedPartitions(3).list()
[[3], [2, 1], [1, 2], [1, 1, 1]]
sage: OrderedPartitions(3,2).list()
[[2, 1], [1, 2]]
"""
from sage.libs.gap.libgap import libgap
n = self.n
k = self.k
if k is None:
ans = libgap.OrderedPartitions(ZZ(n))
else:
ans = libgap.OrderedPartitions(ZZ(n), ZZ(k))
result = ans.sage()
result.reverse()
return result
def cardinality(self):
"""
Return the cardinality of ``self``.
EXAMPLES::
sage: OrderedPartitions(3).cardinality()
4
sage: OrderedPartitions(3,2).cardinality()
2
sage: OrderedPartitions(10,2).cardinality()
9
sage: OrderedPartitions(15).cardinality()
16384
"""
from sage.libs.gap.libgap import libgap
n = self.n
k = self.k
if k is None:
ans = libgap.NrOrderedPartitions(n)
else:
ans = libgap.NrOrderedPartitions(n, k)
return ZZ(ans)
##########################
# Partitions Greatest LE #
##########################
class PartitionsGreatestLE(UniqueRepresentation, IntegerListsLex):
"""
The class of all (unordered) "restricted" partitions of the integer `n`
having parts less than or equal to the integer `k`.
EXAMPLES::
sage: PartitionsGreatestLE(10, 2)
Partitions of 10 having parts less than or equal to 2
sage: PartitionsGreatestLE(10, 2).list()
[[2, 2, 2, 2, 2],
[2, 2, 2, 2, 1, 1],
[2, 2, 2, 1, 1, 1, 1],
[2, 2, 1, 1, 1, 1, 1, 1],
[2, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
sage: [4,3,2,1] in PartitionsGreatestLE(10, 2)
False
sage: [2,2,2,2,2] in PartitionsGreatestLE(10, 2)
True
sage: PartitionsGreatestLE(10, 2).first().parent()
Partitions...
"""
def __init__(self, n, k):
"""
Initialize ``self``.
TESTS::
sage: p = PartitionsGreatestLE(10, 2)
sage: p.n, p.k
(10, 2)
sage: TestSuite(p).run()
"""
IntegerListsLex.__init__(self, n, max_slope=0, min_part=1, max_part=k)
self.n = n
self.k = k
def _repr_(self):
"""
Return a string representation of ``self``.
TESTS::
sage: PartitionsGreatestLE(10, 2) # indirect doctest
Partitions of 10 having parts less than or equal to 2
"""
return "Partitions of %s having parts less than or equal to %s" % (self.n, self.k)
def cardinality(self):
"""
Return the cardinality of ``self``.
EXAMPLES::
sage: PartitionsGreatestLE(9, 5).cardinality()
23
TESTS::
sage: all(PartitionsGreatestLE(n, a).cardinality() ==
....: len(PartitionsGreatestLE(n, a).list())
....: for n in range(20) for a in range(6))
True
"""
return sum(number_of_partitions_length(self.n, i) for i in range(self.k+1))
Element = Partition
options = Partitions.options
##########################
# Partitions Greatest EQ #
##########################
class PartitionsGreatestEQ(UniqueRepresentation, IntegerListsLex):
"""
The class of all (unordered) "restricted" partitions of the integer `n`
having all its greatest parts equal to the integer `k`.
EXAMPLES::
sage: PartitionsGreatestEQ(10, 2)
Partitions of 10 having greatest part equal to 2
sage: PartitionsGreatestEQ(10, 2).list()
[[2, 2, 2, 2, 2],
[2, 2, 2, 2, 1, 1],
[2, 2, 2, 1, 1, 1, 1],
[2, 2, 1, 1, 1, 1, 1, 1],
[2, 1, 1, 1, 1, 1, 1, 1, 1]]
sage: [4,3,2,1] in PartitionsGreatestEQ(10, 2)
False
sage: [2,2,2,2,2] in PartitionsGreatestEQ(10, 2)
True
The empty partition has no maximal part, but it is contained in
the set of partitions with any specified maximal part::
sage: PartitionsGreatestEQ(0, 2).list()
[[]]
TESTS::
sage: [1]*10 in PartitionsGreatestEQ(10, 2)
False
sage: PartitionsGreatestEQ(10, 2).first().parent()
Partitions...
"""
def __init__(self, n, k):
"""
Initialize ``self``.
TESTS::
sage: p = PartitionsGreatestEQ(10, 2)
sage: p.n, p.k
(10, 2)
sage: TestSuite(p).run()
"""
IntegerListsLex.__init__(self, n, max_slope=0, max_part=k, floor=[k])
self.n = n
self.k = k
def _repr_(self):
"""
Return a string representation of ``self``.
TESTS::
sage: PartitionsGreatestEQ(10, 2) # indirect doctest
Partitions of 10 having greatest part equal to 2
"""
return "Partitions of %s having greatest part equal to %s" % (self.n, self.k)
def cardinality(self):
"""
Return the cardinality of ``self``.
EXAMPLES::
sage: PartitionsGreatestEQ(10, 2).cardinality()
5
TESTS::
sage: all(PartitionsGreatestEQ(n, a).cardinality() ==
....: len(PartitionsGreatestEQ(n, a).list())
....: for n in range(20) for a in range(6))
True
"""
if not self.n:
return 1
return number_of_partitions_length(self.n, self.k)
Element = Partition
options = Partitions.options
#########################
# Restricted Partitions #
#########################
class RestrictedPartitions_generic(Partitions):
r"""
Base class for `\ell`-restricted partitions.
Let `\ell` be a positive integer. A partition `\lambda` is
`\ell`-*restricted* if `\lambda_i - \lambda_{i+1} < \ell` for all `i`,
including rows of length 0.
.. NOTE::
This is conjugate to the notion of `\ell`-*regular* partitions,
where the multiplicity of any parts is at most `\ell`.
INPUT:
- ``ell`` -- the positive integer `\ell`
- ``is_infinite`` -- boolean; if the subset of `\ell`-restricted
partitions is infinite
"""
def __init__(self, ell, is_infinite=False):
"""
Initialize ``self``.
EXAMPLES::
sage: P = Partitions(restricted=2)
sage: TestSuite(P).run()
"""
self._ell = ell
Partitions.__init__(self, is_infinite)
def ell(self):
r"""
Return the value `\ell`.
EXAMPLES::
sage: P = Partitions(restricted=2)
sage: P.ell()
2
"""
return self._ell
def __contains__(self, x):
"""
TESTS::
sage: P = Partitions(restricted=3)
sage: [5] in P
False
sage: [2] in P
True
sage: [] in P
True
sage: [3, 3, 3, 3, 2, 2] in P
True
sage: [3, 3, 3, 1] in P
True
sage: [8, 3, 3, 1] in P
False
sage: [2, 0, 0, 0, 0, 0] in P
True
sage: Partition([4,2,2,1]) in P
True
sage: Partition([4,2,2,2]) in P
True
sage: Partition([6,6,6,6,4,3,2]) in P
True
sage: Partition([7,6,6,2]) in P
False
sage: Partition([6,5]) in P
False
sage: Partition([10,1]) in P
False
sage: Partition([3,3] + [1]*10) in P
True
"""
if not Partitions.__contains__(self, x):
return False
if x == []:
return True
return (all(x[i] - x[i+1] < self._ell for i in range(len(x)-1))
and x[-1] < self._ell)
def _fast_iterator(self, n, max_part):
"""
A fast (recursive) iterator which returns a list.
EXAMPLES::
sage: P = Partitions(restricted=3)
sage: list(P._fast_iterator(5, 5))
[[3, 2], [3, 1, 1], [2, 2, 1], [2, 1, 1, 1], [1, 1, 1, 1, 1]]
sage: list(P._fast_iterator(5, 2))
[[2, 2, 1], [2, 1, 1, 1], [1, 1, 1, 1, 1]]
TESTS::
sage: for n in range(10):
....: for ell in range(2, n):
....: Pres = Partitions(n, restricted=ell)
....: Preg = Partitions(n, regular=ell)
....: assert set(Pres) == set(p.conjugate() for p in Preg)
"""
if n == 0:
yield []
return
if n < max_part:
max_part = n
for i in range(max_part, 0, -1):
for p in self._fast_iterator(n-i, i):
if (p and i - p[0] >= self._ell) or (not p and i >= self._ell):
break
yield [i] + p
class RestrictedPartitions_all(RestrictedPartitions_generic):
r"""
The class of all `\ell`-restricted partitions.
INPUT:
- ``ell`` -- the positive integer `\ell`
.. SEEALSO::
:class:`~sage.combinat.partition.RestrictedPartitions_generic`
"""
def __init__(self, ell):
"""
Initialize ``self``.
EXAMPLES::
sage: P = Partitions(restricted=4)
sage: TestSuite(P).run()
"""
RestrictedPartitions_generic.__init__(self, ell, True)
def _repr_(self):
"""
TESTS::
sage: from sage.combinat.partition import RestrictedPartitions_all
sage: RestrictedPartitions_all(3)
3-Restricted Partitions
"""
return "{}-Restricted Partitions".format(self._ell)
def __iter__(self):
"""
Iterate over ``self``.
EXAMPLES::
sage: P = Partitions(restricted=3)
sage: it = P.__iter__()
sage: [next(it) for x in range(10)]
[[], [1], [2], [1, 1], [2, 1], [1, 1, 1],
[3, 1], [2, 2], [2, 1, 1], [1, 1, 1, 1]]
"""
n = 0
while True:
for p in self._fast_iterator(n, n):
yield self.element_class(self, p)
n += 1
class RestrictedPartitions_n(RestrictedPartitions_generic, Partitions_n):
r"""
The class of `\ell`-restricted partitions of `n`.
INPUT:
- ``n`` -- the integer `n` to partition
- ``ell`` -- the integer `\ell`
.. SEEALSO::
:class:`~sage.combinat.partition.RestrictedPartitions_generic`
"""
def __init__(self, n, ell):
"""
Initialize ``self``.
EXAMPLES::
sage: P = Partitions(5, restricted=3)
sage: TestSuite(P).run()
"""
RestrictedPartitions_generic.__init__(self, ell)
Partitions_n.__init__(self, n)
def _repr_(self):
"""
TESTS::
sage: from sage.combinat.partition import RestrictedPartitions_n
sage: RestrictedPartitions_n(3, 5)
5-Restricted Partitions of the integer 3
"""
return "{}-Restricted Partitions of the integer {}".format(self._ell, self.n)
def __contains__(self, x):
"""
TESTS::
sage: P = Partitions(5, regular=3)
sage: [3, 1, 1] in P
True
sage: [3, 2, 1] in P
False
"""
return RestrictedPartitions_generic.__contains__(self, x) and sum(x) == self.n
def __iter__(self):
"""
Iterate over ``self``.
EXAMPLES::
sage: P = Partitions(5, restricted=3)
sage: list(P)
[[3, 2], [3, 1, 1], [2, 2, 1], [2, 1, 1, 1], [1, 1, 1, 1, 1]]
"""
for p in self._fast_iterator(self.n, self.n):
yield self.element_class(self, p)
def cardinality(self):
"""
Return the cardinality of ``self``.
EXAMPLES::
sage: P = Partitions(5, restricted=3)
sage: P.cardinality()
5
sage: P = Partitions(5, restricted=6)
sage: P.cardinality()
7
sage: P.cardinality() == Partitions(5).cardinality()
True
"""
if self._ell > self.n:
return Partitions_n.cardinality(self)
return ZZ.sum(ZZ.one() for x in self)
def _an_element_(self):
"""
Return an element of ``self``.
EXAMPLES::
sage: P = Partitions(5, restricted=3)
sage: P.an_element()
[2, 1, 1, 1]
sage: Partitions(0, restricted=3).an_element()
[]
sage: Partitions(1, restricted=3).an_element()
[1]
"""
return self.element_class(self, Partitions_n._an_element_(self).conjugate())
#########################################################################
#### partitions
def number_of_partitions(n, algorithm='default'):
r"""
Returns the number of partitions of `n` with, optionally, at most `k`
parts.
The options of :meth:`number_of_partitions()` are being deprecated
:trac:`13072` in favour of :meth:`Partitions_n.cardinality()` so that
:meth:`number_of_partitions()` can become a stripped down version of
the fastest algorithm available (currently this is using FLINT).
INPUT:
- ``n`` -- an integer
- ``algorithm`` -- (default: 'default')
[Will be deprecated except in Partition().cardinality() ]
- ``'default'`` -- If ``k`` is not ``None``, then use Gap (very slow).
If ``k`` is ``None``, use FLINT.
- ``'flint'`` -- use FLINT
EXAMPLES::
sage: v = Partitions(5).list(); v
[[5], [4, 1], [3, 2], [3, 1, 1], [2, 2, 1], [2, 1, 1, 1], [1, 1, 1, 1, 1]]
sage: len(v)
7
The input must be a nonnegative integer or a ``ValueError`` is raised.
::
sage: number_of_partitions(-5)
Traceback (most recent call last):
...
ValueError: n (=-5) must be a nonnegative integer
::
sage: number_of_partitions(10)
42
sage: number_of_partitions(3)
3
sage: number_of_partitions(10)
42
sage: number_of_partitions(40)
37338
sage: number_of_partitions(100)
190569292
sage: number_of_partitions(100000)
27493510569775696512677516320986352688173429315980054758203125984302147328114964173055050741660736621590157844774296248940493063070200461792764493033510116079342457190155718943509725312466108452006369558934464248716828789832182345009262853831404597021307130674510624419227311238999702284408609370935531629697851569569892196108480158600569421098519
A generating function for the number of partitions `p_n` is given by the
reciprocal of Euler's function:
.. MATH::
\sum_{n=0}^{\infty} p_n x^n = \prod_{k=1}^{\infty} \left(
\frac{1}{1-x^k} \right).
We use Sage to verify that the first several coefficients do
instead agree::
sage: q = PowerSeriesRing(QQ, 'q', default_prec=9).gen()
sage: prod([(1-q^k)^(-1) for k in range(1,9)]) ## partial product of
1 + q + 2*q^2 + 3*q^3 + 5*q^4 + 7*q^5 + 11*q^6 + 15*q^7 + 22*q^8 + O(q^9)
sage: [number_of_partitions(k) for k in range(2,10)]
[2, 3, 5, 7, 11, 15, 22, 30]
REFERENCES:
- :wikipedia:`Partition\_(number\_theory)`
TESTS::
sage: n = 500 + randint(0,500)
sage: number_of_partitions( n - (n % 385) + 369) % 385 == 0
True
sage: n = 1500 + randint(0,1500)
sage: number_of_partitions( n - (n % 385) + 369) % 385 == 0
True
sage: n = 1000000 + randint(0,1000000)
sage: number_of_partitions( n - (n % 385) + 369) % 385 == 0
True
sage: n = 1000000 + randint(0,1000000)
sage: number_of_partitions( n - (n % 385) + 369) % 385 == 0
True
sage: n = 1000000 + randint(0,1000000)
sage: number_of_partitions( n - (n % 385) + 369) % 385 == 0
True
sage: n = 1000000 + randint(0,1000000)
sage: number_of_partitions( n - (n % 385) + 369) % 385 == 0
True
sage: n = 1000000 + randint(0,1000000)
sage: number_of_partitions( n - (n % 385) + 369) % 385 == 0
True
sage: n = 1000000 + randint(0,1000000)
sage: number_of_partitions( n - (n % 385) + 369) % 385 == 0
True
sage: n = 100000000 + randint(0,100000000)
sage: number_of_partitions( n - (n % 385) + 369) % 385 == 0 # long time (4s on sage.math, 2011)
True
"""
n = ZZ(n)
if n < 0:
raise ValueError("n (=%s) must be a nonnegative integer"%n)
elif n == 0:
return ZZ.one()
if algorithm == 'default':
algorithm = 'flint'
if algorithm == 'flint':
return cached_number_of_partitions(n)
raise ValueError("unknown algorithm '%s'"%algorithm)
def number_of_partitions_length(n, k, algorithm='hybrid'):
r"""
Return the number of partitions of `n` with length `k`.
This is a wrapper for GAP's ``NrPartitions`` function.
EXAMPLES::
sage: from sage.combinat.partition import number_of_partitions_length
sage: number_of_partitions_length(5, 2)
2
sage: number_of_partitions_length(10, 2)
5
sage: number_of_partitions_length(10, 4)
9
sage: number_of_partitions_length(10, 0)
0
sage: number_of_partitions_length(10, 1)
1
sage: number_of_partitions_length(0, 0)
1
sage: number_of_partitions_length(0, 1)
0
"""
if algorithm == 'hybrid':
# Do the hybrid algorithm
# Special relations between n and k
if n < k:
return ZZ.zero()
if n == k and n >= 0:
return ZZ.one()
# Special case of n
if n <= 0:
# Note: we've already checked the case when n == k == 0
return ZZ.zero()
# Small values of k
if k <= 0:
return ZZ.zero()
if k == 1:
return ZZ.one()
if k == 2:
return n // 2
# We have one column of length `k` and all (inner) partitions of
# size `n-k` can't have length more than `k`
if n <= k*2:
return number_of_partitions(n - k)
# Fall back to GAP
from sage.libs.gap.libgap import libgap
return ZZ(libgap.NrPartitions(ZZ(n), ZZ(k)))
##########
# trac 14225: Partitions() is frequently used, but only weakly cached. Hence,
# establish a strong reference to it.
_Partitions = Partitions()
# Rather than caching an under-used function I have cached the default
# number_of_partitions functions which is currently using FLINT.
# AM trac #13072
cached_number_of_partitions = cached_function( flint_number_of_partitions )
# October 2012: fixing outdated pickles which use classes being deprecated
from sage.misc.persist import register_unpickle_override
from sage.combinat.partition_tuple import PartitionTuples_level_size
register_unpickle_override('sage.combinat.partition', 'PartitionTuples_nk', PartitionTuples_level_size)
register_unpickle_override('sage.combinat.partition', 'Partition_class', Partition)
register_unpickle_override('sage.combinat.partition', 'OrderedPartitions_nk', OrderedPartitions)
register_unpickle_override('sage.combinat.partition', 'PartitionsInBox_hw', PartitionsInBox)
register_unpickle_override('sage.combinat.partition', 'PartitionsGreatestLE_nk', PartitionsGreatestLE)
register_unpickle_override('sage.combinat.partition', 'PartitionsGreatestEQ_nk', PartitionsGreatestEQ)
| 32.194981 | 355 | 0.506121 |
7c213e6cb69d098604bf0835e3ea1c318af1588e | 10,546 | py | Python | hoomd/integrate.py | kmoskovtsev/HOOMD-Blue-fork | 99560563a5ba9e082b513764bae51a84f48fdc70 | [
"BSD-3-Clause"
] | null | null | null | hoomd/integrate.py | kmoskovtsev/HOOMD-Blue-fork | 99560563a5ba9e082b513764bae51a84f48fdc70 | [
"BSD-3-Clause"
] | null | null | null | hoomd/integrate.py | kmoskovtsev/HOOMD-Blue-fork | 99560563a5ba9e082b513764bae51a84f48fdc70 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2009-2016 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
# Maintainer: joaander / All Developers are free to add commands for new features
## \package hoomd.integrate
# \brief Commands that integrate the equations of motion
#
# To integrate the system forward in time, an integration mode must be set. Only one integration mode can be active at
# a time, and the last \c integrate.mode_* command before the run() command is the one that will take effect. It is
# possible to set one mode, run() for a certain number of steps and then switch to another mode before the next run()
# command.
#
# The most commonly used mode is integrate.mode_standard . It specifies a standard mode where, at each time
# step, all of the specified forces are evaluated and used in moving the system forward to the next step.
# integrate.mode_standard doesn't integrate any particles by itself, one or more compatible integration methods must
# be specified before the run() command. Like commands that specify forces, integration methods are \b persistent and
# remain set until they are disabled (this differs greatly from HOOMD-blue behavior in all versions prior to 0.9.0).
# The benefit and reason for this change is that now multiple integration methods can be specified on different particle
# groups, allowing portions of the system to be fixed, integrated at a different temperature, etc...
#
# To clarify, the following series of commands will run for 1000 time steps in the NVT ensemble and then switch to
# NVE for another 1000 steps.
#
# \code
# all = group.all()
# integrate.mode_standard(dt=0.005)
# nvt = integrate.nvt(group=all, T=1.2, tau=0.5)
# run(1000)
# nvt.disable()
# integrate.nve(group=all)
# run(1000)
# \endcode
#
# For more detailed information on the interaction of integration methods and integration modes, see
# integrate.mode_standard.
#
# Some integrators provide parameters that can be changed between runs.
# In order to access the integrator to change it, it needs to be saved
# in a variable. For example:
# \code
# integrator = integrate.nvt(group=all, T=1.2, tau=0.5)
# run(100)
# integrator.set_params(T=1.0)
# run(100)
# \endcode
# This code snippet runs the first 100 time steps with T=1.2 and the next 100 with T=1.0
from hoomd import _hoomd;
import hoomd;
import copy;
import sys;
## \internal
# \brief Base class for integrators
#
# An integrator in hoomd_script reflects an Integrator in c++. It is responsible
# for all high-level management that happens behind the scenes for hoomd_script
# writers. 1) The instance of the c++ integrator itself is tracked 2) All
# forces created so far in the simulation are updated in the cpp_integrator
# whenever run() is called.
class _integrator(hoomd.meta._metadata):
## \internal
# \brief Constructs the integrator
#
# This doesn't really do much bet set some member variables to None
def __init__(self):
# check if initialization has occured
if not hoomd.init.is_initialized():
hoomd.context.msg.error("Cannot create integrator before initialization\n");
raise RuntimeError('Error creating integrator');
# by default, integrators do not support methods
self.cpp_integrator = None;
self.supports_methods = False;
# save ourselves in the global variable
hoomd.context.current.integrator = self;
# base class constructor
hoomd.meta._metadata.__init__(self)
## \var cpp_integrator
# \internal
# \brief Stores the C++ side Integrator managed by this class
## \var supports_methods
# \internal
# \brief True if this integrator supports integration methods
# \note If hoomd ever needs to support multiple TYPES of methods, we could just change this to a string naming the
# type that is supported and add a type string to each of the integration_methods.
## \internal
# \brief Checks that proper initialization has completed
def check_initialization(self):
# check that we have been initialized properly
if self.cpp_integrator is None:
hoomd.context.msg.error('Bug in hoomd_script: cpp_integrator not set, please report\n');
raise RuntimeError();
## \internal
# \brief Updates the integrators in the reflected c++ class
def update_forces(self):
self.check_initialization();
# set the forces
self.cpp_integrator.removeForceComputes();
for f in hoomd.context.current.forces:
if f.cpp_force is None:
hoomd.context.msg.error('Bug in hoomd_script: cpp_force not set, please report\n');
raise RuntimeError('Error updating forces');
if f.log or f.enabled:
f.update_coeffs();
if f.enabled:
self.cpp_integrator.addForceCompute(f.cpp_force);
# set the constraint forces
for f in hoomd.context.current.constraint_forces:
if f.cpp_force is None:
hoomd.context.msg.error('Bug in hoomd_script: cpp_force not set, please report\n');
raise RuntimeError('Error updating forces');
if f.enabled:
self.cpp_integrator.addForceConstraint(f.cpp_force);
# register any composite body forces
if f.composite:
self.cpp_integrator.addForceComposite(f.cpp_force);
f.update_coeffs();
## \internal
# \brief Updates the integration methods in the reflected c++ class
def update_methods(self):
self.check_initialization();
# if we support methods, add them all to the list
if self.supports_methods:
self.cpp_integrator.removeAllIntegrationMethods();
if len(hoomd.context.current.integration_methods) == 0:
hoomd.context.msg.error('This integrator requires that one or more integration methods be specified.\n');
raise RuntimeError('Error initializing integrator methods');
for m in hoomd.context.current.integration_methods:
self.cpp_integrator.addIntegrationMethod(m.cpp_method);
else:
if len(hoomd.context.current.integration_methods) > 0:
hoomd.context.msg.error("This integrator does not support the use of integration methods,\n");
hoomd.context.msg.error("but some have been specified in the script. Remove them or use\n");
hoomd.context.msg.error("a different integrator.\n");
raise RuntimeError('Error initializing integrator methods');
## \internal
# \brief Counts the number of degrees of freedom and updates each hoomd.compute.thermo specified
def update_thermos(self):
self.check_initialization();
for t in hoomd.context.current.thermos:
ndof = self.cpp_integrator.getNDOF(t.group.cpp_group);
t.cpp_compute.setNDOF(ndof);
ndof_rot = self.cpp_integrator.getRotationalNDOF(t.group.cpp_group);
t.cpp_compute.setRotationalNDOF(ndof_rot);
## \internal
# \brief Base class for integration methods
#
# An integration_method in hoomd_script reflects an IntegrationMethod in c++. It is responsible for all high-level
# management that happens behind the scenes for hoomd_script writers. 1) The instance of the c++ integration method
# itself is tracked and added to the integrator and 2) methods are provided for disabling the integration method from
# being active for the next run()
#
# The design of integration_method exactly mirrors that of _force for consistency
class _integration_method(hoomd.meta._metadata):
## \internal
# \brief Constructs the integration_method
#
# Initializes the cpp_method to None.
def __init__(self):
# check if initialization has occured
if not hoomd.init.is_initialized():
hoomd.context.msg.error("Cannot create an integration method before initialization\n");
raise RuntimeError('Error creating integration method');
self.cpp_method = None;
self.enabled = True;
hoomd.context.current.integration_methods.append(self);
# base class constructor
hoomd.meta._metadata.__init__(self)
## \var enabled
# \internal
# \brief True if the integration method is enabled
## \var cpp_method
# \internal
# \brief Stores the C++ side IntegrationMethod managed by this class
## \internal
# \brief Checks that proper initialization has completed
def check_initialization(self):
# check that we have been initialized properly
if self.cpp_method is None:
hoomd.context.msg.error('Bug in hoomd_script: cpp_method not set, please report\n');
raise RuntimeError();
def disable(self):
R""" Disables the integration method.
Examples::
method.disable()
Executing the disable command will remove the integration method from the simulation.
Any :py:func:`hoomd.run()` command executed after disabling an integration method will
not apply the integration method to the particles during the
simulation. A disabled integration method can be re-enabled with :py:meth:`enable()`.
"""
hoomd.util.print_status_line();
self.check_initialization()
# check if we are already disabled
if not self.enabled:
hoomd.context.msg.warning("Ignoring command to disable an integration method that is already disabled");
return;
self.enabled = False;
hoomd.context.current.integration_methods.remove(self);
def enable(self):
R""" Enables the integration method.
Examples::
method.enable()
See Also:
:py:meth:`disable()`.
"""
hoomd.util.print_status_line();
self.check_initialization();
# check if we are already disabled
if self.enabled:
hoomd.context.msg.warning("Ignoring command to enable an integration method that is already enabled");
return;
self.enabled = True;
hoomd.context.current.integration_methods.append(self);
## \internal
# \brief Override get_metadata() to add 'enabled' field
def get_metadata(self):
data = hoomd.meta._metadata.get_metadata(self)
data['enabled'] = self.enabled
return data
| 39.796226 | 121 | 0.687085 |
e38ad8a4ab24f7ce8ef7cfca494f5c5602e44bba | 392 | py | Python | config/wsgi.py | jackxu-eb/hello-umi | 7ad6482cf2e130474ee0330d2581cee423c7b7aa | [
"MIT"
] | null | null | null | config/wsgi.py | jackxu-eb/hello-umi | 7ad6482cf2e130474ee0330d2581cee423c7b7aa | [
"MIT"
] | null | null | null | config/wsgi.py | jackxu-eb/hello-umi | 7ad6482cf2e130474ee0330d2581cee423c7b7aa | [
"MIT"
] | null | null | null | """
WSGI config for hello_umi project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings')
application = get_wsgi_application()
| 23.058824 | 78 | 0.785714 |
e482d58e2bdaa536968b34194d4de9c11e965a00 | 17,574 | py | Python | main.py | thanassi360/grimoire-guide | b777fddf1a285c04af48e22ba165e43fc74b37cb | [
"MIT"
] | 1 | 2016-04-12T23:00:13.000Z | 2016-04-12T23:00:13.000Z | main.py | thanassi360/grimoire-guide | b777fddf1a285c04af48e22ba165e43fc74b37cb | [
"MIT"
] | null | null | null | main.py | thanassi360/grimoire-guide | b777fddf1a285c04af48e22ba165e43fc74b37cb | [
"MIT"
] | null | null | null | import webapp2
import urllib2
import json
import re
import datetime
from uuid import uuid4
from api_data import *
from google.appengine.ext import ndb
from google.appengine.api import users
class TimeEncoder(json.JSONEncoder): # Defines time format
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.strftime('%d/%m/%Y %H:%M')
elif isinstance(obj, datetime.date):
return obj.strftime('%d/%m/%Y')
class Manifest(ndb.Model): # Defines Manifest entities
version = ndb.StringProperty(required=True)
checked = ndb.DateTimeProperty(auto_now=True, required=True)
class Collection(ndb.Model): # Defines Collection entities
name = ndb.StringProperty()
img = ndb.StringProperty()
x = ndb.IntegerProperty()
y = ndb.IntegerProperty()
order = ndb.IntegerProperty()
class Set(ndb.Model): # Defines Set entities
collection = ndb.StringProperty()
shortname = ndb.StringProperty()
name = ndb.StringProperty()
img = ndb.StringProperty()
x = ndb.IntegerProperty()
y = ndb.IntegerProperty()
order = ndb.IntegerProperty()
class Card(ndb.Model): # Defines Card entities
set = ndb.StringProperty()
cardid = ndb.IntegerProperty()
id = ndb.IntegerProperty()
name = ndb.StringProperty()
quote = ndb.StringProperty()
body = ndb.TextProperty()
icon = ndb.StringProperty()
iconx = ndb.IntegerProperty()
icony = ndb.IntegerProperty()
img = ndb.StringProperty()
x = ndb.IntegerProperty()
y = ndb.IntegerProperty()
order = ndb.IntegerProperty()
class User(ndb.Model): # Defines User entities
userid = ndb.StringProperty(required=True)
name = ndb.StringProperty(required=True)
email = ndb.StringProperty(required=True)
index = ndb.StringProperty(required=True)
gamertag = ndb.StringProperty()
platform = ndb.StringProperty()
joined = ndb.DateTimeProperty(auto_now_add=True, indexed=False)
def tojson(self):
return '{"user": "%s",' \
'"name": "%s",' \
'"email": "%s",' \
'"gamertag": "%s",' \
'"platform": "%s",' \
'"joined": "%s"}' % (self.userid,
self.name,
self.email,
self.gamertag,
self.platform,
self.joined.strftime("%d/%m/%Y"))
class Guide(ndb.Model): # Defines Guide entities
userid = ndb.KeyProperty(kind=User, required=True)
cardid = ndb.IntegerProperty(required=True)
content = ndb.StringProperty()
gid = ndb.StringProperty()
youtube = ndb.StringProperty()
created = ndb.DateTimeProperty(auto_now_add=True, required=True)
class CollectionHandler(webapp2.RequestHandler): # Checks user and gets collection data
def get(self):
collections = Collection.query().fetch()
user = users.get_current_user()
if not user:
current = "NotLoggedIn"
else:
currentuser = User.get_by_id(user.user_id())
current = currentuser.name
query = [i.to_dict() for i in collections]
query = sorted(query, key=lambda k: k.get('order', 0))
collections_json = json.dumps({"type": "collectionfeed", "name": current, "data": query})
self.response.write(collections_json)
class SetHandler(webapp2.RequestHandler): # Gets set data
def post(self):
data = self.request.body
query = [i.to_dict() for i in Set.query(Set.collection == data).fetch()]
query = sorted(query, key=lambda k: k.get('order', 0))
set_json = json.dumps({"type": "setfeed", "set": data, "data": query})
self.response.write(set_json)
class CardHandler(webapp2.RequestHandler): # Gets card data
def post(self):
data = self.request.body
query = [i.to_dict() for i in Card.query(Card.set == data).fetch()]
query = sorted(query, key=lambda k: k.get('order', 0))
card_json = json.dumps({"type": "cardfeed", "card": data, "data": query})
self.response.write(card_json)
class CardViewHandler(webapp2.RequestHandler): # Gets guide data for a card
def post(self):
data = int(self.request.body)
guides = Guide.query(Guide.cardid == data).fetch()
guidelist = []
currentuser = users.get_current_user()
if currentuser:
username = users.get_current_user().user_id()
else:
username = "NotAUser"
for guide in guides:
if guide.userid.get().userid == username:
current = "true"
else:
current = "false"
name = guide.userid.get().name
response = {
'user': name,
'content': guide.content,
'youtube': guide.youtube,
'created': guide.created.strftime("%H:%M %d/%m/%Y"),
'id': guide.gid,
'current': current
}
guidelist.append(response)
self.response.write(json.dumps(guidelist))
class LoginHandler(webapp2.RequestHandler): # Logs a user in or registers new user
def get(self):
currentuser = users.get_current_user()
if currentuser:
find = User.query(User.userid == currentuser.user_id()).fetch()
if find:
user = users.get_current_user().user_id()
callback = self.request.get("callback")
self.request.headers["Content-Type"] = 'application/json'
current = User.query(User.userid == user)
sresponse = ""
jsonop = None
for user in current:
sresponse += user.tojson()
if callback is '':
jsonop = sresponse
else:
jsonop = callback+"("+sresponse+")"
self.response.write(jsonop)
else:
guid = str(uuid4())
user = User(userid=currentuser.user_id(),
email=currentuser.email(),
name=guid,
index=guid.lower())
user.key = ndb.Key(User, currentuser.user_id())
user.put()
else:
self.redirect(users.create_login_url('/loggedin'))
class LogoutHandler(webapp2.RequestHandler): # Logs user out
def post(self):
self.redirect(users.create_logout_url('/'))
class UpdateHandler(webapp2.RequestHandler): # Updates users details
def post(self):
data = json.loads(self.request.body)
name = data["name"]
gamertag = data["gamertag"]
platform = data["platform"]
min_len = 4
max_len = 15
pattern = r"^(?i)[a-z0-9_-]{%s,%s}$" % (min_len, max_len)
user = users.get_current_user().user_id()
currentuser = User.get_by_id(user)
checkavail = User.query(User.index == name.lower()).fetch()
if checkavail:
currentuser.gamertag = gamertag
currentuser.platform = platform
currentuser.put()
stat = "userexists"
msg = "Profile updated, except Username (already in use)."
usr = currentuser.name
else:
if re.match(pattern, name): # regular expression to ensure that the username entered is valid
currentuser.name = name
currentuser.index = name.lower()
currentuser.platform = platform
currentuser.gamertag = gamertag
currentuser.put()
stat = "updated"
msg = "Profile updated successfully."
usr = name
else:
stat = "failed"
msg = "Please check the details you inputted."
usr = currentuser.name
status = json.dumps({"status": stat, "msg": msg, "user": usr})
self.response.write(status)
class GuideHandler(webapp2.RedirectHandler): # Saves users guide for a card
def post(self):
data = json.loads(self.request.body)
cardid = data["card"]
guide = data["body"]
url = data["link"]
user = users.get_current_user().user_id()
userid = ndb.Key(User, user)
ident = "G"+str(cardid) + str(user)
guideobj = Guide(gid=ident, userid=userid, cardid=cardid, content=guide, youtube=url)
guideobj.key = ndb.Key(Guide, ident)
guideobj.put()
stat = "Guide Saved"
status = json.dumps({"status": stat})
self.response.write(status)
class ManifestCollectionHandler(webapp2.RequestHandler): # Cron jobs is set to check Bungie.net DB version for Collections
def get(self):
request = urllib2.Request(mani, headers={"x-api-key": api_key})
doc = urllib2.urlopen(request).read()
json_object = json.loads(doc)
server = json_object['Response']['version'] # 1 finds the latest version online
database = Manifest.get_by_id(101) # 2 looks for the version entry
manifest_data = Manifest(id=101, version=server)
if database: # 3 if local version exists, moves on
pass
else: # 4 if local version doesn't exist,
manifest_data.put() # write entry with current server version
populatecollections(define) # and trigger definition storage
local = database.version # 5 gets local version from entry
if server == local: # 6 if local and server match, moves on
del local
del server
pass
else: # 7 if local and server don't match,
del local # deletes local version variable,
del server # deletes server version variable
populatecollections(define) # and trigger definition storage
manifest_data.put() # 8 writes version (updates checked date)
class ManifestSetHandler(webapp2.RequestHandler): # Cron jobs is set to check Bungie.net DB version for Sets
def get(self):
request = urllib2.Request(mani, headers={"x-api-key": api_key})
doc = urllib2.urlopen(request).read()
json_object = json.loads(doc)
server = json_object['Response']['version'] # 1 finds the latest version online
database = Manifest.get_by_id(102) # 2 looks for the version entry
manifest_data = Manifest(id=102, version=server)
if database: # 3 if local version exists, moves on
pass
else: # 4 if local version doesn't exist,
manifest_data.put() # write entry with current server version
populatesets(define) # and trigger definition storage
local = database.version # 5 gets local version from entry
if server == local: # 6 if local and server match, moves on
del local
del server
pass
else: # 7 if local and server don't match,
del local # deletes local version variable,
del server # deletes server version variable
populatesets(define) # and trigger definition storage
manifest_data.put() # 8 writes version (updates checked date)
class ManifestCardHandler(webapp2.RequestHandler): # Cron jobs is set to check Bungie.net DB version for Cards
def get(self):
request = urllib2.Request(mani, headers={"x-api-key": api_key})
doc = urllib2.urlopen(request).read()
json_object = json.loads(doc)
server = json_object['Response']['version'] # 1 finds the latest version online
database = Manifest.get_by_id(103) # 2 looks for the version entry
manifest_data = Manifest(id=103, version=server)
if database: # 3 if local version exists, moves on
pass
else: # 4 if local version doesn't exist,
manifest_data.put() # write entry with current server version
populatecards(define) # and trigger definition storage
local = database.version # 5 gets local version from entry
if server == local: # 6 if local and server match, moves on
del local
del server
pass
else: # 7 if local and server don't match,
del local # deletes local version variable,
del server # deletes server version variable
populatecards(define) # and trigger definition storage
manifest_data.put() # 8 writes version (updates checked date)
def populatecollections(self): # Populates Collections
request = urllib2.Request(self, headers={"x-api-key": api_key})
doc = urllib2.urlopen(request).read()
json_object = json.loads(doc)
order = 1
for c in json_object['Response']['themeCollection']:
cname = c['themeName']
cid = c['themeId']
cimg = c['normalResolution']['smallImage']['sheetPath']
cx = c['normalResolution']['smallImage']['rect']['x']
cy = c['normalResolution']['smallImage']['rect']['y']
col1 = Collection(name=cname, img=cimg, x=cx, y=cy, order=order)
col1.key = ndb.Key(Collection, cid)
col1.put()
order += 1
def populatesets(self): # Populates Sets
request = urllib2.Request(self, headers={"x-api-key": api_key})
doc = urllib2.urlopen(request).read()
json_object = json.loads(doc)
for c in json_object['Response']['themeCollection']:
cid = c['themeId']
order = 1
for s in c['pageCollection']:
colid = cid
sname = s['pageName']
sid = s['pageId']
simg = s['normalResolution']['smallImage']['sheetPath']
sx = s['normalResolution']['smallImage']['rect']['x']
sy = s['normalResolution']['smallImage']['rect']['y']
set1 = Set(collection=colid, name=sname, shortname=sid, img=simg, x=sx, y=sy, order=order)
set1.key = ndb.Key(Set, sid)
set1.put()
order += 1
def populatecards(self): # Populates Cards
request = urllib2.Request(self, headers={"x-api-key": api_key})
doc = urllib2.urlopen(request).read()
json_object = json.loads(doc)
for c in json_object['Response']['themeCollection']:
for s in c['pageCollection']:
sid = s['pageId']
order = 1
for ca in s['cardCollection']:
setid = sid
caid = ca['cardId']
caname = ca['cardName']
cabody = ca['cardDescription']
caicon = ca['normalResolution']['smallImage']['sheetPath']
caix = ca['normalResolution']['smallImage']['rect']['x']
caiy = ca['normalResolution']['smallImage']['rect']['y']
caimg = ca['normalResolution']['image']['sheetPath']
cax = ca['normalResolution']['image']['rect']['x']
cay = ca['normalResolution']['image']['rect']['y']
if 'cardIntro' not in ca:
caquote = ' '
else:
caquote = ca['cardIntro']
card1 = Card(set=setid, id=caid, cardid=caid, name=caname, quote=caquote, body=cabody, icon=caicon,
iconx=caix, icony=caiy, img=caimg, x=cax, y=cay, order=order)
card1.key = ndb.Key(Card, str(caid))
card1.put()
order += 1
app = webapp2.WSGIApplication([
('/grim', CollectionHandler),
('/set', SetHandler),
('/card', CardHandler),
('/getcollections', ManifestCollectionHandler),
('/getsets', ManifestSetHandler),
('/getcards', ManifestCardHandler),
('/cardview', CardViewHandler),
('/login', LoginHandler),
('/logout', LogoutHandler),
('/guide', GuideHandler),
('/update', UpdateHandler)
], debug=True)
| 44.267003 | 123 | 0.53141 |
9cfd2ea0e6f55b474e2a5939cee65dca74cc8453 | 1,258 | py | Python | DataValidation.py | Minituff/SSH-Tools | 0998db5b462b09779a0f02c886caca95989e0dee | [
"MIT"
] | null | null | null | DataValidation.py | Minituff/SSH-Tools | 0998db5b462b09779a0f02c886caca95989e0dee | [
"MIT"
] | null | null | null | DataValidation.py | Minituff/SSH-Tools | 0998db5b462b09779a0f02c886caca95989e0dee | [
"MIT"
] | null | null | null | import re
def is_valid_ipv4(address): # Validates IPv4 addresses.
pattern = re.compile(r"""
(?:
# Dotted variants:
(?:
# Decimal 1-255 (no leading 0's)
[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}
|
0x0*[0-9a-f]{1,2} # Hexadecimal 0x0 - 0xFF (possible leading 0's)
|
0+[1-3]?[0-7]{0,2} # Octal 0 - 0377 (possible leading 0's)
)
(?: # Repeat 0-3 times, separated by a dot
\.
(?:
[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}
|
0x0*[0-9a-f]{1,2}
|
0+[1-3]?[0-7]{0,2}
)
){0,3}
|
0x0*[0-9a-f]{1,8} # Hexadecimal notation, 0x0 - 0xffffffff
|
0+[0-3]?[0-7]{0,10} # Octal notation, 0 - 037777777777
|
# Decimal notation, 1-4294967295:
429496729[0-5]|42949672[0-8]\d|4294967[01]\d\d|429496[0-6]\d{3}|
42949[0-5]\d{4}|4294[0-8]\d{5}|429[0-3]\d{6}|42[0-8]\d{7}|
4[01]\d{8}|[1-3]\d{0,9}|[4-9]\d{0,8}
)
$
""", re.VERBOSE | re.IGNORECASE)
return pattern.match(address) is not None | 33.105263 | 79 | 0.399046 |
5d4628242bf76a5c65726daeb094e5c313d13c58 | 2,671 | py | Python | zhaquirks/xiaomi/mija/motion.py | peterVorman/zha-device-handlers | a2ab1ffd866d2b3b640e2b4b2a8b93bef98d6747 | [
"Apache-2.0"
] | null | null | null | zhaquirks/xiaomi/mija/motion.py | peterVorman/zha-device-handlers | a2ab1ffd866d2b3b640e2b4b2a8b93bef98d6747 | [
"Apache-2.0"
] | null | null | null | zhaquirks/xiaomi/mija/motion.py | peterVorman/zha-device-handlers | a2ab1ffd866d2b3b640e2b4b2a8b93bef98d6747 | [
"Apache-2.0"
] | null | null | null | """Xiaomi mija body sensor."""
import logging
from zigpy.profiles import zha
from zigpy.zcl.clusters.general import (
Basic,
Groups,
Identify,
LevelControl,
OnOff,
Ota,
Scenes,
)
from .. import (
LUMI,
XIAOMI_NODE_DESC,
BasicCluster,
MotionCluster,
OccupancyCluster,
PowerConfigurationCluster,
XiaomiQuickInitDevice,
)
from ... import Bus
from ...const import (
DEVICE_TYPE,
ENDPOINTS,
INPUT_CLUSTERS,
MODELS_INFO,
NODE_DESCRIPTOR,
OUTPUT_CLUSTERS,
PROFILE_ID,
SKIP_CONFIGURATION,
)
XIAOMI_CLUSTER_ID = 0xFFFF
_LOGGER = logging.getLogger(__name__)
class Motion(XiaomiQuickInitDevice):
"""Custom device representing mija body sensors."""
def __init__(self, *args, **kwargs):
"""Init."""
self.battery_size = 9
self.motion_bus = Bus()
super().__init__(*args, **kwargs)
signature = {
# <SimpleDescriptor endpoint=1 profile=260 device_type=263
# device_version=1
# input_clusters=[0, 65535, 3, 25]
# output_clusters=[0, 3, 4, 5, 6, 8, 25]>
MODELS_INFO: [(LUMI, "lumi.sensor_motion")],
NODE_DESCRIPTOR: XIAOMI_NODE_DESC,
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.DIMMER_SWITCH,
INPUT_CLUSTERS: [
Basic.cluster_id,
XIAOMI_CLUSTER_ID,
Ota.cluster_id,
Identify.cluster_id,
],
OUTPUT_CLUSTERS: [
Basic.cluster_id,
Ota.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Scenes.cluster_id,
Ota.cluster_id,
],
}
},
}
replacement = {
SKIP_CONFIGURATION: True,
ENDPOINTS: {
1: {
DEVICE_TYPE: zha.DeviceType.OCCUPANCY_SENSOR,
INPUT_CLUSTERS: [
BasicCluster,
PowerConfigurationCluster,
Identify.cluster_id,
OccupancyCluster,
MotionCluster,
XIAOMI_CLUSTER_ID,
],
OUTPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
Ota.cluster_id,
],
}
},
}
| 25.932039 | 67 | 0.508798 |
60b31eccfd38be1a07c6707b24c55231d11de9d8 | 2,364 | py | Python | src/features/pca_latent_space_feature.py | hyzhak/zalando-research-fashionmnist-analyze | 5dfff74f80982769c7ffae746abc58fc7113113b | [
"MIT"
] | 1 | 2020-05-29T22:04:52.000Z | 2020-05-29T22:04:52.000Z | src/features/pca_latent_space_feature.py | hyzhak/zalando-research-fashionmnist-analyze | 5dfff74f80982769c7ffae746abc58fc7113113b | [
"MIT"
] | null | null | null | src/features/pca_latent_space_feature.py | hyzhak/zalando-research-fashionmnist-analyze | 5dfff74f80982769c7ffae746abc58fc7113113b | [
"MIT"
] | null | null | null | import luigi
import os
import pandas as pd
from sklearn.decomposition import PCA
import numpy as np
from src.features.latent_space_features import LatentSpaceFeature
from src.utils.params_to_filename import get_task_path
# should I use @inherits instead?
# https://luigi.readthedocs.io/en/stable/api/luigi.util.html
class PCALatentSpaceFeature(luigi.Task):
model = luigi.Parameter(
default='vgg16'
)
random_seed = luigi.IntParameter(
default=12345
)
def requires(self):
return LatentSpaceFeature(model=self.model)
def output(self):
task_dir = os.path.join(
'data', 'interim', get_task_path(self)
)
return {
'test': {
'features': luigi.LocalTarget(
os.path.join(task_dir, 'test.features.parquet.gzip')
),
'explained_variance_ratio': luigi.LocalTarget(
os.path.join(task_dir, 'test.evr.npy'),
format=luigi.format.Nop
),
},
'train': {
'features': luigi.LocalTarget(
os.path.join(task_dir, 'train.features.parquet.gzip')
),
'explained_variance_ratio': luigi.LocalTarget(
os.path.join(task_dir, 'train.evr.npy'),
format=luigi.format.Nop
),
},
}
def run(self):
# TODO: could be done in parallel
self._process(self.input()['train'], self.output()['train'])
self._process(self.input()['test'], self.output()['test'])
def _process(self, input_file, output_file):
X = pd.read_parquet(input_file.open('r'))
pca = PCA(random_state=self.random_seed)
pca.fit(X)
np.save(
output_file['explained_variance_ratio'].open('w'),
pca.explained_variance_ratio_.cumsum()
)
pca = PCA(random_state=self.random_seed, n_components=2)
embedded = pca.fit_transform(X)
df = pd.DataFrame(embedded,
columns=['x1', 'x2'],
dtype=np.float32)
output_file['features'].makedirs()
df.to_parquet(output_file['features'].path,
compression='brotli')
if __name__ == '__main__':
luigi.run()
| 29.185185 | 73 | 0.56176 |
1bea4fc2ce5b14ef5515ab89a63ba2ae3a08bad5 | 188 | py | Python | inbm/dispatcher-agent/dispatcher/aota/aota_error.py | ahameedx/intel-inb-manageability | aca445fa4cef0b608e6e88e74476547e10c06073 | [
"Apache-2.0"
] | 5 | 2021-12-13T21:19:31.000Z | 2022-01-18T18:29:43.000Z | inbm/dispatcher-agent/dispatcher/aota/aota_error.py | ahameedx/intel-inb-manageability | aca445fa4cef0b608e6e88e74476547e10c06073 | [
"Apache-2.0"
] | 45 | 2021-12-30T17:21:09.000Z | 2022-03-29T22:47:32.000Z | inbm/dispatcher-agent/dispatcher/aota/aota_error.py | ahameedx/intel-inb-manageability | aca445fa4cef0b608e6e88e74476547e10c06073 | [
"Apache-2.0"
] | 4 | 2022-01-26T17:42:54.000Z | 2022-03-30T04:48:04.000Z | """
AOTA update tool
Copyright (C) 2017-2022 Intel Corporation
SPDX-License-Identifier: Apache-2.0
"""
class AotaError(Exception):
"""Class exception Module"""
pass
| 15.666667 | 45 | 0.659574 |
e62bf9c1c48ac3d4453239297c66b473411e5568 | 3,685 | py | Python | as11fixity.py | joshuatj/IFIscripts | 2817d53f82a447bb4cbb84bdbc7a87b9b2506de5 | [
"MIT"
] | null | null | null | as11fixity.py | joshuatj/IFIscripts | 2817d53f82a447bb4cbb84bdbc7a87b9b2506de5 | [
"MIT"
] | null | null | null | as11fixity.py | joshuatj/IFIscripts | 2817d53f82a447bb4cbb84bdbc7a87b9b2506de5 | [
"MIT"
] | 1 | 2020-07-09T06:14:31.000Z | 2020-07-09T06:14:31.000Z | '''
WORK IN PROGRESS WORKSHOP SCRIPT!!!
'''
import sys
import subprocess
import os
from glob import glob
import csv
from lxml import etree
import hashlib
#1
starting_dir = sys.argv[1]
#2
csv_report_filename = os.path.basename(starting_dir) + "_report"
csv_report = os.path.expanduser("~/Desktop/%s.csv") % csv_report_filename
print csv_report
print os.path.isfile(csv_report)
#5
checkfile = os.path.isfile(csv_report)
#3
def create_csv(csv_file, *args):
f = open(csv_file, 'wb')
try:
writer = csv.writer(f)
writer.writerow(*args)
finally:
f.close()
#6
create_csv(csv_report, ('Filename' , 'Title' , 'Episode_Number' , 'Md5_From_Xml' , 'Md5_from_Mxf' , 'Checksum_Result'))
#6
if checkfile == True:
print "CSV file already exists."
if checkfile == False:
print "No CSV file exists."
#3
for dirpath, dirnames, filenames in os.walk(starting_dir):
for filename in [f for f in filenames if f.endswith(".mxf")]:
full_path = os.path.join(dirpath, filename)
print dirpath + ' ---DIR PATH'
print filename + ' ---THIS IS FILEAME'
print full_path + ' ---THIS IS THE FULL_PATH'
#7
file_no_path = os.path.basename(full_path)
print file_no_path + ' ---THIS IS FILE_NO_PATH'
#8.1
file_no_extension = os.path.splitext(os.path.basename(file_no_path))[0]
print file_no_extension + ' ---THIS IS FILE_NO_EXTENSION'
#8.2
xml_file = file_no_extension + '.xml'
print xml_file + ' ---This is xml file'
full_xml_path = os.path.join(dirpath,xml_file)
print full_xml_path + ' ---This id the full xml path'
checkfile = os.path.isfile(os.path.join(dirpath,xml_file))
if checkfile == True:
print 'XML file already exists.'
if checkfile == False:
print 'No XML file exists.'
#8.3
dpp_xml_parse = etree.parse(full_xml_path)
print dpp_xml_parse, 'hahaha'
dpp_xml_namespace = dpp_xml_parse.xpath('namespace-uri(.)')
print dpp_xml_namespace, 'lol'
checksum = dpp_xml_parse.findtext('//ns:MediaChecksumValue', namespaces={'ns':dpp_xml_namespace })
print checksum
#12
def md5(full_path):
hash_md5 = hashlib.md5()
with open(full_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
print md5(full_path)
#13
if md5(full_path)==checksum:
print 'Checksum matches!'
else:
print 'CHECKSUM DONT MATCH'
#14
"""
As-11 Fixity
1. Import all relevant modules x
2. Define path to starting folder x
3. Check if CSV report already exists x
4. Allow option to overwrite ?
5. Define name for CSv file x
6. Create CSV file with headings x
7. Search through subfolders to verify mxf file exists x
8.1 harvest filname eg GAA.MXF, remove extension eg MXF x
8.2 store new filname eg GAA x
8.3 make new filename variable x
8.4 check if GAA with .XML extension exists
9. Check if same filename
10. Check if folder is empty, note in CSV report
11. If only MXF, note in CSV report. MXF FILENAME + "No sidecar"
12. Extract MD5 from xml file and store as variable x
13. Generate MD5 checksum on MXF file x
14. Compare the 2 MD5s x
15. Write to CSV report
15.1 xml parse title
15.2 xml parse episode title/number
15.3 xml parse programme title
15.4 append list with all findings
8.1 harvest filname eg GAA.MXF, - file_no_path =
8.2 remove extension eg MXF store new filname eg GAA filename_no_extention
8.3 check if GAA with .XML extension exists - xml_filename
"""
| 27.296296 | 119 | 0.664858 |
25a12ec62f574552fae1f71a467f458e2e9aaf5c | 345 | py | Python | py/cn/config.py | sonya/eea | b09502354eb6ce7975a21aa97dd3c812852b0626 | [
"Apache-2.0"
] | 1 | 2020-08-12T19:42:45.000Z | 2020-08-12T19:42:45.000Z | py/cn/config.py | sonya/eea | b09502354eb6ce7975a21aa97dd3c812852b0626 | [
"Apache-2.0"
] | null | null | null | py/cn/config.py | sonya/eea | b09502354eb6ce7975a21aa97dd3c812852b0626 | [
"Apache-2.0"
] | null | null | null | #
# 33 sector io tables exist for 1987, 1990, 1992, 1995
# 40 sectors for 1997
# 17 sectors for 2000
# 42 and 122 sectors for 2002
# 42 sectors for 2005
# for 2007:
# 42 sectors (010* - io, 020* - intemediate only, 030* - supply/use)
# 135 sectors (040*)
#
# env tables: 1999, 2003-2009 inclusive
SCHEMA = "cn"
STUDY_YEARS = [2005, 2007]
| 20.294118 | 70 | 0.669565 |
dad119814ce9591a60c63dabefdc6e0f9a132154 | 482 | py | Python | 2020/day03/one.py | geberl/advent-of-code | 152ac94676830ac920bf06a1a3f1aa88377cd775 | [
"MIT"
] | null | null | null | 2020/day03/one.py | geberl/advent-of-code | 152ac94676830ac920bf06a1a3f1aa88377cd775 | [
"MIT"
] | null | null | null | 2020/day03/one.py | geberl/advent-of-code | 152ac94676830ac920bf06a1a3f1aa88377cd775 | [
"MIT"
] | null | null | null | tree_count = 0
with open("input.txt") as file_handler:
for n, line in enumerate(file_handler):
if n == 0:
continue
if len(line.strip()) == 0:
continue
charPos = n*3
if charPos > len(line.strip()):
multiples = int(charPos / len(line.strip()))
charPos = charPos - (multiples * len(line.strip()))
char = line[charPos]
if char == "#":
tree_count += 1
print(tree_count)
| 22.952381 | 63 | 0.518672 |
ee4ba67a3b6574de943abf4f79dcd139404de52e | 5,984 | py | Python | src/python/pants/backend/jvm/tasks/bundle_create.py | areitz/pants | 9bfb3feb0272c05f36e190c9147091b97ee1950d | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/jvm/tasks/bundle_create.py | areitz/pants | 9bfb3feb0272c05f36e190c9147091b97ee1950d | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/jvm/tasks/bundle_create.py | areitz/pants | 9bfb3feb0272c05f36e190c9147091b97ee1950d | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from twitter.common.collections import OrderedSet
from pants.backend.jvm.targets.jvm_app import JvmApp
from pants.backend.jvm.targets.jvm_binary import JvmBinary
from pants.backend.jvm.tasks.jvm_binary_task import JvmBinaryTask
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.fs import archive
from pants.util.dirutil import safe_mkdir
class BundleCreate(JvmBinaryTask):
@classmethod
def register_options(cls, register):
super(BundleCreate, cls).register_options(register)
register('--deployjar', action='store_true', default=False,
fingerprint=True,
help="Expand 3rdparty jars into loose classfiles in the bundle's root dir. "
"If unset, the root will contain internal classfiles only, and 3rdparty jars "
"will go into the bundle's libs dir.")
register('--archive', choices=list(archive.TYPE_NAMES),
fingerprint=True,
help='Create an archive of this type from the bundle.')
register('--archive-prefix', action='store_true', default=False,
fingerprint=True,
help='If --archive is specified, use the target basename as the path prefix.')
@classmethod
def product_types(cls):
return ['jvm_bundles']
def __init__(self, *args, **kwargs):
super(BundleCreate, self).__init__(*args, **kwargs)
self._outdir = self.get_options().pants_distdir
self._prefix = self.get_options().archive_prefix
self._archiver_type = self.get_options().archive
self._create_deployjar = self.get_options().deployjar
class App(object):
"""A uniform interface to an app."""
@staticmethod
def is_app(target):
return isinstance(target, (JvmApp, JvmBinary))
def __init__(self, target):
assert self.is_app(target), '{} is not a valid app target'.format(target)
self.address = target.address
self.binary = target if isinstance(target, JvmBinary) else target.binary
self.bundles = [] if isinstance(target, JvmBinary) else target.payload.bundles
self.basename = target.basename
def execute(self):
archiver = archive.archiver(self._archiver_type) if self._archiver_type else None
for target in self.context.target_roots:
for app in map(self.App, filter(self.App.is_app, [target])):
basedir = self.bundle(app)
# NB(Eric Ayers): Note that this product is not housed/controlled under .pants.d/ Since
# the bundle is re-created every time, this shouldn't cause a problem, but if we ever
# expect the product to be cached, a user running an 'rm' on the dist/ directory could
# cause inconsistencies.
jvm_bundles_product = self.context.products.get('jvm_bundles')
jvm_bundles_product.add(target, os.path.dirname(basedir)).append(os.path.basename(basedir))
if archiver:
archivepath = archiver.create(
basedir,
self._outdir,
app.basename,
prefix=app.basename if self._prefix else None
)
self.context.log.info('created {}'.format(os.path.relpath(archivepath, get_buildroot())))
def bundle(self, app):
"""Create a self-contained application bundle.
The bundle will contain the target classes, dependencies and resources.
"""
assert(isinstance(app, BundleCreate.App))
def verbose_symlink(src, dst):
try:
os.symlink(src, dst)
except OSError as e:
self.context.log.error("Unable to create symlink: {0} -> {1}".format(src, dst))
raise e
bundle_dir = os.path.join(self._outdir, '{}-bundle'.format(app.basename))
self.context.log.info('creating {}'.format(os.path.relpath(bundle_dir, get_buildroot())))
safe_mkdir(bundle_dir, clean=True)
classpath = OrderedSet()
# If creating a deployjar, we add the external dependencies to the bundle as
# loose classes, and have no classpath. Otherwise we add the external dependencies
# to the bundle as jars in a libs directory.
if not self._create_deployjar:
lib_dir = os.path.join(bundle_dir, 'libs')
os.mkdir(lib_dir)
jarmap = self.context.products.get('jars')
def add_jars(target):
generated = jarmap.get(target)
if generated:
for base_dir, internal_jars in generated.items():
for internal_jar in internal_jars:
verbose_symlink(os.path.join(base_dir, internal_jar), os.path.join(lib_dir, internal_jar))
classpath.add(internal_jar)
app.binary.walk(add_jars, lambda t: t != app.binary)
# Add external dependencies to the bundle.
for basedir, external_jar in self.list_external_jar_dependencies(app.binary):
path = os.path.join(basedir, external_jar)
verbose_symlink(path, os.path.join(lib_dir, external_jar))
classpath.add(external_jar)
bundle_jar = os.path.join(bundle_dir, '{}.jar'.format(app.binary.basename))
with self.monolithic_jar(app.binary, bundle_jar,
with_external_deps=self._create_deployjar) as jar:
self.add_main_manifest_entry(jar, app.binary)
if classpath:
jar.classpath([os.path.join('libs', jar) for jar in classpath])
for bundle in app.bundles:
for path, relpath in bundle.filemap.items():
bundle_path = os.path.join(bundle_dir, relpath)
if not os.path.exists(path):
raise TaskError('Given path: {} does not exist in target {}'.format(
path, app.address.spec))
safe_mkdir(os.path.dirname(bundle_path))
verbose_symlink(path, bundle_path)
return bundle_dir
| 40.707483 | 104 | 0.68516 |
b474a116d7251fa2ed16be8606e871bba1c8f205 | 9,156 | py | Python | transformer/model_concat.py | muqiaoy/dl_signal | 3a30d14982016644bfc96a7d1ca0109b441f17fd | [
"MIT"
] | 54 | 2019-10-24T06:32:07.000Z | 2022-03-23T01:56:10.000Z | transformer/model_concat.py | muqiaoy/dl_signal | 3a30d14982016644bfc96a7d1ca0109b441f17fd | [
"MIT"
] | null | null | null | transformer/model_concat.py | muqiaoy/dl_signal | 3a30d14982016644bfc96a7d1ca0109b441f17fd | [
"MIT"
] | 6 | 2019-10-24T06:17:54.000Z | 2021-07-12T01:54:48.000Z | import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
from modules.transformer import TransformerConcatEncoder, TransformerConcatDecoder
from models import *
from utils import count_parameters
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class TransformerModel(nn.Module):
def __init__(self, time_step, input_dims, hidden_size, embed_dim, output_dim, num_heads, attn_dropout, relu_dropout, res_dropout, out_dropout, layers, attn_mask=False):
"""
Construct a basic Transfomer model.
:param input_dims: The input dimensions of the various modalities.
:param hidden_size: The hidden dimensions of the fc layer.
:param embed_dim: The dimensions of the embedding layer.
:param output_dim: The dimensions of the output (128 in MuiscNet).
:param num_heads: The number of heads to use in the multi-headed attention.
:param attn_dropout: The dropout following self-attention sm((QK)^T/d)V.
:param relu_droput: The dropout for ReLU in residual block.
:param res_dropout: The dropout of each residual block.
:param out_dropout: The dropout of output layer.
:param layers: The number of transformer blocks.
:param attn_mask: A boolean indicating whether to use attention mask (for transformer decoder).
"""
super(TransformerModel, self).__init__()
self.conv = nn.Sequential(
nn.Conv1d(in_channels=1, out_channels=16, kernel_size=6, stride=1),
nn.BatchNorm1d(16),
nn.ReLU(),
nn.MaxPool1d(2, stride=2),
nn.Conv1d(in_channels=16, out_channels=32, kernel_size=3, stride=1),
nn.BatchNorm1d(32),
nn.ReLU(),
nn.MaxPool1d(2, stride=2),
nn.Conv1d(in_channels=32, out_channels=64, kernel_size=3, stride=1),
nn.BatchNorm1d(64),
nn.ReLU(),
nn.MaxPool1d(2, stride=2),
nn.Conv1d(in_channels=64, out_channels=64, kernel_size=3, stride=1),
nn.BatchNorm1d(64),
nn.ReLU(),
nn.MaxPool1d(2, stride=2),
nn.Conv1d(in_channels=64, out_channels=128, kernel_size=3, stride=1),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.MaxPool1d(2, stride=2),
)
[self.orig_d_a, self.orig_d_b] = input_dims
assert self.orig_d_a == self.orig_d_b
channels = ((((((((((self.orig_d_a + self.orig_d_b -6)//1+1 -2)//2+1 -3)//1+1 -2)//2+1
-3)//1+1 -2)//2+1 -3)//1+1 -2)//2+1 -3)//1+1 -2)//2+1
self.d_x = 128*channels
final_out = embed_dim
h_out = hidden_size
self.num_heads = num_heads
self.layers = layers
self.attn_dropout = attn_dropout
self.relu_dropout = relu_dropout
self.res_dropout = res_dropout
self.attn_mask = attn_mask
self.embed_dim = embed_dim
# Transformer networks
self.trans = self.get_network()
print("Encoder Model size: {0}".format(count_parameters(self.trans)))
# Projection layers
self.proj = nn.Linear(self.d_x, self.embed_dim)
self.out_fc1 = nn.Linear(final_out, h_out)
self.out_fc2 = nn.Linear(h_out, output_dim)
self.out_dropout = nn.Dropout(out_dropout)
def get_network(self):
return TransformerConcatEncoder(embed_dim=self.embed_dim, num_heads=self.num_heads, layers=self.layers, attn_dropout=self.attn_dropout,
relu_dropout=self.relu_dropout, res_dropout=self.res_dropout, attn_mask=self.attn_mask)
def forward(self, x):
"""
x should have dimension [seq_len, batch_size, n_features] (i.e., L, N, C).
"""
time_step, batch_size, n_features = x.shape
x = x.view(-1, 1, n_features)
x = self.conv(x)
x = x.reshape(time_step, batch_size, self.d_x)
x = self.proj(x)
# Pass the input through individual transformers
h_x = self.trans(x)
h_concat = torch.cat([h_x], dim=-1)
output = self.out_fc2(self.out_dropout(F.relu(self.out_fc1(h_concat))))
# No sigmoid because we use BCEwithlogitis which contains sigmoid layer and more stable
return output
class TransformerGenerationModel(nn.Module):
def __init__(self, input_dims, hidden_size, embed_dim, output_dim, num_heads, attn_dropout, relu_dropout, res_dropout, out_dropout, layers, attn_mask=False, src_mask=False, tgt_mask=False):
super(TransformerGenerationModel, self).__init__()
[orig_d_a, orig_d_b] = input_dims
self.orig_d_x = orig_d_a + orig_d_b
self.conv = nn.Sequential(
nn.Conv1d(in_channels=1, out_channels=16, kernel_size=6, stride=1),
nn.BatchNorm1d(16),
nn.ReLU(),
nn.MaxPool1d(2, stride=2),
nn.Conv1d(in_channels=16, out_channels=32, kernel_size=3, stride=1),
nn.BatchNorm1d(32),
nn.ReLU(),
nn.MaxPool1d(2, stride=2),
nn.Conv1d(in_channels=32, out_channels=64, kernel_size=3, stride=1),
nn.BatchNorm1d(64),
nn.ReLU(),
nn.MaxPool1d(2, stride=2),
nn.Conv1d(in_channels=64, out_channels=64, kernel_size=3, stride=1),
nn.BatchNorm1d(64),
nn.ReLU(),
nn.MaxPool1d(2, stride=2),
nn.Conv1d(in_channels=64, out_channels=128, kernel_size=3, stride=1),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.MaxPool1d(2, stride=2),
)
channels = ((((((((((self.orig_d_x -6)//1+1 -2)//2+1 -3)//1+1 -2)//2+1 -3)//1+1 -2)//2+1 -3)//1+1 -2)//2+1 -3)//1+1 -2)//2+1
self.d_x = 128*channels
final_out = embed_dim
h_out = hidden_size
self.num_heads = num_heads
self.layers = layers
self.attn_dropout = attn_dropout
self.relu_dropout = relu_dropout
self.res_dropout = res_dropout
self.attn_mask = attn_mask
self.embed_dim = embed_dim
# Transformer networks
self.trans_encoder = self.get_encoder_network()
self.trans_decoder = self.get_decoder_network()
print("Encoder Model size: {0}".format(count_parameters(self.trans_encoder)))
print("Decoder Model size: {0}".format(count_parameters(self.trans_decoder)))
# Projection layers
self.proj_enc = nn.Linear(self.d_x, self.embed_dim)
self.proj_dec = nn.Linear(self.orig_d_x, self.embed_dim)
self.out_fc1 = nn.Linear(final_out, h_out)
self.out_fc2 = nn.Linear(h_out, output_dim)
self.out_dropout = nn.Dropout(out_dropout)
def get_encoder_network(self):
return TransformerConcatEncoder(embed_dim=self.embed_dim, num_heads=self.num_heads, layers=self.layers, attn_dropout=self.attn_dropout,
relu_dropout=self.relu_dropout, res_dropout=self.res_dropout, attn_mask=self.attn_mask)
def get_decoder_network(self):
return TransformerConcatDecoder(embed_dim=self.embed_dim, num_heads=self.num_heads, layers=self.layers, src_attn_dropout=self.attn_dropout,
relu_dropout=self.relu_dropout, res_dropout=self.res_dropout, tgt_attn_dropout=self.attn_dropout)
def forward(self, x, y=None, max_len=None):
"""
x should have dimension [seq_len, batch_size, n_features] (i.e., L, N, C).
"""
time_step, batch_size, n_features = x.shape
# encoder
x = x.view(-1, 1, n_features)
x = self.conv(x)
x = x.reshape(time_step, batch_size, self.d_x)
x = self.proj_enc(x)
h_x = self.trans_encoder(x)
# decoder
if y is not None:
seq_len, batch_size, n_features2 = y.shape
y = y[:-1, :, :]
sos = torch.zeros(1, batch_size, n_features2).cuda()
y = torch.cat([sos, y], dim=0) # add <sos> to front
y = self.proj_dec(y)
out = self.trans_decoder(input=y, enc=h_x)
out_concat = torch.cat([out], dim=-1)
output = self.out_fc2(self.out_dropout(F.relu(self.out_fc1(out_concat))))
elif max_len is not None:
dec_x = torch.zeros(1, batch_size, n_features).cuda()
dec_x = self.proj_dec(dec_x)
dec_x = self.trans_decoder(input=dec_x, enc=h_x)
y = dec_x
for i in range(max_len - 1):
dec_x = self.trans_decoder(input=y, enc=h_x)
y = torch.cat([y, dec_x[-1].unsqueeze(0)], dim=0)
out_concat = torch.cat([y], dim=-1)
output = self.out_fc2(self.out_dropout(F.relu(self.out_fc1(out_concat))))
else:
print("Only one of y and max_len should be input.")
assert False
return output
| 41.808219 | 193 | 0.614788 |
52a5b34cd25e768bd8552fdcff40d6f2726eaebd | 910 | py | Python | run.py | RafaelCenzano/backer | 695607462e1362307986c2d3c34dfbb2b1a816cd | [
"Apache-2.0"
] | null | null | null | run.py | RafaelCenzano/backer | 695607462e1362307986c2d3c34dfbb2b1a816cd | [
"Apache-2.0"
] | null | null | null | run.py | RafaelCenzano/backer | 695607462e1362307986c2d3c34dfbb2b1a816cd | [
"Apache-2.0"
] | null | null | null | import os
import backer
import config
import logging
import datetime
from time import sleep as delay
'''
Run file
'''
# Create config object
configurations = config.Config()
# create logger
logger = logging.getLogger(__name__)
# create console and file handler
term = logging.StreamHandler()
logfile = logging.FileHandler('/' + os.path.join(configurations.BACKUP_FOLDER, 'data', 'backer.log'))
# create formatter and add to term and logfile
formatter = logging.Formatter('%(asctime)s %(name)s - %(levelname)s : %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
term.setFormatter(formatter)
logfile.setFormatter(formatter)
# add term and logfile to logger
logger.addHandler(term)
logger.addHandler(logfile)
logger.setLevel(logging.INFO)
# Create the run object
runObject = backer.Core(configurations)
# Create archive
logger.info('Begin zip archive')
runObject.zipArchive()
logger.info('Complete program') | 22.75 | 115 | 0.758242 |
c260940d982de86c357fc3989b6bb79cf3e90664 | 1,145 | py | Python | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/errors/types/currency_code_error.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/errors/types/currency_code_error.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/errors/types/currency_code_error.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v8.errors',
marshal='google.ads.googleads.v8',
manifest={
'CurrencyCodeErrorEnum',
},
)
class CurrencyCodeErrorEnum(proto.Message):
r"""Container for enum describing possible currency code errors.
"""
class CurrencyCodeError(proto.Enum):
r"""Enum describing possible currency code errors."""
UNSPECIFIED = 0
UNKNOWN = 1
UNSUPPORTED = 2
__all__ = tuple(sorted(__protobuf__.manifest))
| 29.358974 | 74 | 0.710917 |
140047202fea270ea19be6ab36957ba3d908ad7b | 2,866 | py | Python | BaseGame.py | DariHernandez/sports-scraper | 15a535d7f42e514551122edd46c5fd045804f0ef | [
"MIT"
] | null | null | null | BaseGame.py | DariHernandez/sports-scraper | 15a535d7f42e514551122edd46c5fd045804f0ef | [
"MIT"
] | null | null | null | BaseGame.py | DariHernandez/sports-scraper | 15a535d7f42e514551122edd46c5fd045804f0ef | [
"MIT"
] | null | null | null | import datetime
class BaseGame(object):
def __init__(
self, home: str, away: str, home_odds: int, away_odds: int
):
self.home_team = home
self.away_team = away
self.key = f"{away}@{home}"
self.home_odds = home_odds
self.away_odds = away_odds
self.game_date = None
self.home_spread = None
self.home_spread_odds = None
self.away_spread = None
self.away_spread_odds = None
self.over_under = None
self.over_odds = None
self.under_odds = None
self.date = None
self.sport = None
self.day:str = ''
self._set_date_to_now()
def _set_date_to_now(self):
date = datetime.datetime.now()
date_ymd = date.strftime("%Y-%m-%d",)
self.date = date
self.day = date_ymd
def set_game_date(self, date:datetime.datetime):
self.game_date = date
# split date int mon-day string
date_m_d = date.strftime("%m-%d",)
#reset key to include team-team-month-day?
self.key += f"-{date_m_d}"
def add_spread(self, home_spread:float, home_s_odds:int, away_spread:float, away_s_odds:int):
self.home_spread = home_spread
self.home_spread_odds = home_s_odds
self.away_spread = away_spread
self.away_spread_odds = away_s_odds
def add_over_under(self, over_total:float, over_odds:int, under_total:float, under_odds:int):
self.over_total = over_total
self.over_odds = over_odds
self.under_total = under_total
self.under_odds = under_odds
def set_sport(self, sport):
self.sport = sport
def set_site(self, site):
self.site = site
def __repr__(self) -> str:
return f"<{self.key}, {self.home_team}:{self.home_odds}, {self.away_team}:{self.away_odds}>"
def to_dict(self) -> dict:
return {
"home_team": self.home_team,
"home_odds": self.home_odds,
"home_spread": self.home_spread,
"home_spread_odds": self.home_spread_odds,
"away_team": self.away_team,
"away_odds": self.away_odds,
"away_spread": self.away_spread,
"away_spread_odds": self.away_spread_odds,
"over_total": self.over_total,
"over_odds": self.over_odds,
"under_total": self.under_total,
"under_odds": self.under_odds,
"key": self.key,
"date": self.date,
"day": self.day,
"game_date": self.game_date,
"sport": self.sport,
"site": self.site
}
# comment to change file
if __name__ == "__main__":
print(BaseGame("the BARES", "fuootball team ", 0, -143).to_dict())
print(BaseGame("the BARES", "fuootball team ", 270, -330).to_dict())
| 29.244898 | 100 | 0.591068 |
14223e4ffcfce0d251f9f3d6a1c567f48de98f33 | 3,920 | py | Python | selfdrive/car/car_helpers.py | johnwaynerobot/openpilot | b63de593824562b35e08f15b31a762e76b062640 | [
"MIT"
] | null | null | null | selfdrive/car/car_helpers.py | johnwaynerobot/openpilot | b63de593824562b35e08f15b31a762e76b062640 | [
"MIT"
] | null | null | null | selfdrive/car/car_helpers.py | johnwaynerobot/openpilot | b63de593824562b35e08f15b31a762e76b062640 | [
"MIT"
] | 1 | 2018-09-05T16:29:40.000Z | 2018-09-05T16:29:40.000Z | import os
import time
from common.basedir import BASEDIR
from common.realtime import sec_since_boot
from common.fingerprints import eliminate_incompatible_cars, all_known_cars
from selfdrive.swaglog import cloudlog
import selfdrive.messaging as messaging
def load_interfaces(x):
ret = {}
for interface in x:
try:
imp = __import__('selfdrive.car.%s.interface' % interface, fromlist=['CarInterface']).CarInterface
except ImportError:
imp = None
for car in x[interface]:
ret[car] = imp
return ret
def _get_interface_names():
# read all the folders in selfdrive/car and return a dict where:
# - keys are all the car names that which we have an interface for
# - values are lists of spefic car models for a given car
interface_names = {}
for car_folder in [x[0] for x in os.walk(BASEDIR + '/selfdrive/car')]:
try:
car_name = car_folder.split('/')[-1]
model_names = __import__('selfdrive.car.%s.values' % car_name, fromlist=['CAR']).CAR
model_names = [getattr(model_names, c) for c in model_names.__dict__.keys() if not c.startswith("__")]
interface_names[car_name] = model_names
except (ImportError, IOError):
pass
return interface_names
# imports from directory selfdrive/car/<name>/
interfaces = load_interfaces(_get_interface_names())
# **** for use live only ****
def fingerprint(logcan, timeout):
if os.getenv("SIMULATOR2") is not None:
return ("simulator2", None)
elif os.getenv("SIMULATOR") is not None:
return ("simulator", None)
cloudlog.warning("waiting for fingerprint...")
candidate_cars = all_known_cars()
finger = {}
st = None
st_passive = sec_since_boot() # only relevant when passive
can_seen = False
while 1:
for a in messaging.drain_sock(logcan):
for can in a.can:
can_seen = True
# ignore everything not on bus 0 and with more than 11 bits,
# which are ussually sporadic and hard to include in fingerprints
if can.src == 0 and can.address < 0x800:
finger[can.address] = len(can.dat)
candidate_cars = eliminate_incompatible_cars(can, candidate_cars)
if st is None and can_seen:
st = sec_since_boot() # start time
ts = sec_since_boot()
# if we only have one car choice and the time_fingerprint since we got our first
# message has elapsed, exit. Toyota needs higher time_fingerprint, since DSU does not
# broadcast immediately
if len(candidate_cars) == 1 and st is not None:
# TODO: better way to decide to wait more if Toyota
time_fingerprint = 1.0 if ("TOYOTA" in candidate_cars[0] or "LEXUS" in candidate_cars[0]) else 0.1
if (ts-st) > time_fingerprint:
break
# bail if no cars left or we've been waiting too long
elif len(candidate_cars) == 0 or (timeout and (ts - st_passive) > timeout):
return None, finger
time.sleep(0.01)
cloudlog.warning("fingerprinted %s", candidate_cars[0])
return (candidate_cars[0], finger)
def get_car(logcan, sendcan=None, passive=True):
# TODO: timeout only useful for replays so controlsd can start before unlogger
timeout = 2. if passive else None
candidate, fingerprints = fingerprint(logcan, timeout)
if candidate is None:
cloudlog.warning("car doesn't match any fingerprints: %r", fingerprints)
if passive:
candidate = "mock"
else:
return None, None
interface_cls = interfaces[candidate]
if interface_cls is None:
cloudlog.warning("car matched %s, but interface wasn't available or failed to import" % candidate)
return None, None
#2018.09.07 11:48AM add debug print helper
print("car_helpers.py file print candidate and fingerprint")
print(candidate)
print(fingerprints)
params = interface_cls.get_params(candidate, fingerprints)
print("car_helpers.py print params")
print(params)
return interface_cls(params, sendcan), params
| 34.385965 | 108 | 0.703316 |
185c5c72c2f8c35a766736de4b61be79b81b7d01 | 135 | py | Python | GUI.py | Mostafa-Shalaby/PyATM | f0e7cac15df03c8cd4492ed87e8dfe34a82337ff | [
"MIT"
] | null | null | null | GUI.py | Mostafa-Shalaby/PyATM | f0e7cac15df03c8cd4492ed87e8dfe34a82337ff | [
"MIT"
] | null | null | null | GUI.py | Mostafa-Shalaby/PyATM | f0e7cac15df03c8cd4492ed87e8dfe34a82337ff | [
"MIT"
] | null | null | null | # Imports Interface and tkinter modules
from Interface.AppWindow import Window
# Run this code to see magic happen
Window().mainloop() | 27 | 39 | 0.8 |
0808f6a03b7ca683638e419173ce6d0f18ae8669 | 3,123 | py | Python | Project/serve/predict.py | kawano8811/sagemaker-deployment | d9647cf4cf3015ee337b4d9275cb2de2bf3e9cd6 | [
"MIT"
] | null | null | null | Project/serve/predict.py | kawano8811/sagemaker-deployment | d9647cf4cf3015ee337b4d9275cb2de2bf3e9cd6 | [
"MIT"
] | null | null | null | Project/serve/predict.py | kawano8811/sagemaker-deployment | d9647cf4cf3015ee337b4d9275cb2de2bf3e9cd6 | [
"MIT"
] | null | null | null | import argparse
import json
import os
import pickle
import sys
import sagemaker_containers
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
from model import LSTMClassifier
from utils import review_to_words, convert_and_pad
def model_fn(model_dir):
"""Load the PyTorch model from the `model_dir` directory."""
print("Loading model.")
# First, load the parameters used to create the model.
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print("model_info: {}".format(model_info))
# Determine the device and construct the model.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])
# Load the store model parameters.
model_path = os.path.join(model_dir, 'model.pth')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
# Load the saved word_dict.
word_dict_path = os.path.join(model_dir, 'word_dict.pkl')
with open(word_dict_path, 'rb') as f:
model.word_dict = pickle.load(f)
model.to(device).eval()
print("Done loading model.")
return model
def input_fn(serialized_input_data, content_type):
print('Deserializing the input data.')
if content_type == 'text/plain':
data = serialized_input_data.decode('utf-8')
return data
raise Exception('Requested unsupported ContentType in content_type: ' + content_type)
def output_fn(prediction_output, accept):
print('Serializing the generated output.')
return str(prediction_output)
def predict_fn(input_data, model):
print('Inferring sentiment of input data.')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if model.word_dict is None:
raise Exception('Model has not been loaded properly, no word_dict.')
# TODO: Process input_data so that it is ready to be sent to our model.
# You should produce two variables:
# data_X - A sequence of length 500 which represents the converted review
# data_len - The length of the review
data_X, data_len = convert_and_pad(model.word_dict, review_to_words(input_data))
# Using data_X and data_len we construct an appropriate input tensor. Remember
# that our model expects input data of the form 'len, review[500]'.
data_pack = np.hstack((data_len, data_X))
data_pack = data_pack.reshape(1, -1)
data = torch.from_numpy(data_pack)
data = data.to(device)
# Make sure to put the model into evaluation mode
model.eval()
# TODO: Compute the result of applying the model to the input data. The variable `result` should
# be a numpy array which contains a single integer which is either 1 or 0
with torch.no_grad():
prediction = model(data).detach().cpu().numpy()
result = np.round(prediction).astype(int)
return result | 33.945652 | 107 | 0.701889 |
d7541a3d3a27732cba56d41f43273251c89fc3f0 | 1,728 | py | Python | app/middleware/correlation.py | uptimedog/Roadmap | e6cebe70c53041799343505aa296af939371a3da | [
"Apache-2.0"
] | 1 | 2022-01-26T10:04:00.000Z | 2022-01-26T10:04:00.000Z | app/middleware/correlation.py | uptimedog/Roadmap | e6cebe70c53041799343505aa296af939371a3da | [
"Apache-2.0"
] | 57 | 2021-06-12T23:28:50.000Z | 2022-03-25T09:27:43.000Z | app/middleware/correlation.py | uptimedog/roadmap | 57b68eaace463a99cfac151bff75e4baee2edaa0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Uptimedog
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from threading import local
import uuid
_locals = local()
class Correlation():
"""
Correlation Middleware
Add a unique correlation ID to all incoming requests
Attributes:
get_response: a callable function
"""
def __init__(self, get_response):
"""Inits Correlation"""
self.get_response = get_response
def __call__(self, request):
"""Execute Middleware
Args:
request: request instance
"""
request.META["X-Correlation-ID"] = str(uuid.uuid4())
_locals.correlation_id = request.META["X-Correlation-ID"]
response = self.get_response(request)
response['X-Correlation-ID'] = request.META["X-Correlation-ID"]
return response
class CorrelationFilter(logging.Filter):
"""
Correlation Filter
Append the correlation ID to all log records
"""
def filter(self, record):
if not hasattr(record, 'correlation_id'):
record.correlation_id = ""
if hasattr(_locals, 'correlation_id'):
record.correlation_id = _locals.correlation_id
return True
| 24.685714 | 74 | 0.677083 |
e114f2177c44058cb78c0b7696986470533d86ed | 459 | py | Python | src/rapidpro_community_portal/apps/portal_pages/migrations/0017_auto_20150504_1517.py | rapidpro/rapidpro-community-portal | db86e757a24888bebc4d30f451189a2b743396da | [
"Apache-2.0"
] | 19 | 2015-09-15T09:17:54.000Z | 2021-07-13T06:09:49.000Z | src/rapidpro_community_portal/apps/portal_pages/migrations/0017_auto_20150504_1517.py | rapidpro/rapidpro-community-portal | db86e757a24888bebc4d30f451189a2b743396da | [
"Apache-2.0"
] | 222 | 2015-03-13T15:52:20.000Z | 2021-04-08T19:18:41.000Z | src/rapidpro_community_portal/apps/portal_pages/migrations/0017_auto_20150504_1517.py | rapidpro/rapidpro-community-portal | db86e757a24888bebc4d30f451189a2b743396da | [
"Apache-2.0"
] | 11 | 2016-03-01T19:56:52.000Z | 2021-07-04T22:42:14.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portal_pages', '0016_marketplaceindexpage'),
]
operations = [
migrations.AlterField(
model_name='focusarea',
name='name',
field=models.CharField(max_length=255, unique=True),
preserve_default=True,
),
]
| 21.857143 | 64 | 0.614379 |
b3366cca376171285d92d6772ba364f49db1f348 | 41,244 | py | Python | pandas/core/base.py | lrusnac/pandas | a170e977dc8cc270bdcdee904658f9b6e20c8e86 | [
"BSD-3-Clause"
] | 1 | 2020-09-01T12:13:29.000Z | 2020-09-01T12:13:29.000Z | pandas/core/base.py | lrusnac/pandas | a170e977dc8cc270bdcdee904658f9b6e20c8e86 | [
"BSD-3-Clause"
] | null | null | null | pandas/core/base.py | lrusnac/pandas | a170e977dc8cc270bdcdee904658f9b6e20c8e86 | [
"BSD-3-Clause"
] | 1 | 2022-03-08T15:07:11.000Z | 2022-03-08T15:07:11.000Z | """
Base and utility classes for pandas objects.
"""
import builtins
import textwrap
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
FrozenSet,
Optional,
TypeVar,
Union,
cast,
)
import numpy as np
import pandas._libs.lib as lib
from pandas._typing import DtypeObj, IndexLabel
from pandas.compat import PYPY
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import cache_readonly, doc
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_dict_like,
is_extension_array_dtype,
is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
from pandas.core.dtypes.missing import isna, remove_na_arraylike
from pandas.core import algorithms
from pandas.core.accessor import DirNamesMixin
from pandas.core.algorithms import duplicated, unique1d, value_counts
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays import ExtensionArray
from pandas.core.construction import create_series_with_explicit_dtype
import pandas.core.nanops as nanops
if TYPE_CHECKING:
from pandas import Categorical
_shared_docs: Dict[str, str] = dict()
_indexops_doc_kwargs = dict(
klass="IndexOpsMixin",
inplace="",
unique="IndexOpsMixin",
duplicated="IndexOpsMixin",
)
_T = TypeVar("_T", bound="IndexOpsMixin")
class PandasObject(DirNamesMixin):
"""
Baseclass for various pandas objects.
"""
_cache: Dict[str, Any]
@property
def _constructor(self):
"""
Class constructor (for this class it's just `__class__`.
"""
return type(self)
def __repr__(self) -> str:
"""
Return a string representation for a particular object.
"""
# Should be overwritten by base classes
return object.__repr__(self)
def _reset_cache(self, key: Optional[str] = None) -> None:
"""
Reset cached properties. If ``key`` is passed, only clears that key.
"""
if getattr(self, "_cache", None) is None:
return
if key is None:
self._cache.clear()
else:
self._cache.pop(key, None)
def __sizeof__(self):
"""
Generates the total memory usage for an object that returns
either a value or Series of values
"""
if hasattr(self, "memory_usage"):
# pandas\core\base.py:84: error: "PandasObject" has no attribute
# "memory_usage" [attr-defined]
mem = self.memory_usage(deep=True) # type: ignore[attr-defined]
return int(mem if is_scalar(mem) else mem.sum())
# no memory_usage attribute, so fall back to object's 'sizeof'
return super().__sizeof__()
class NoNewAttributesMixin:
"""
Mixin which prevents adding new attributes.
Prevents additional attributes via xxx.attribute = "something" after a
call to `self.__freeze()`. Mainly used to prevent the user from using
wrong attributes on an accessor (`Series.cat/.str/.dt`).
If you really want to add a new attribute at a later time, you need to use
`object.__setattr__(self, key, value)`.
"""
def _freeze(self):
"""
Prevents setting additional attributes.
"""
object.__setattr__(self, "__frozen", True)
# prevent adding any attribute via s.xxx.new_attribute = ...
def __setattr__(self, key: str, value):
# _cache is used by a decorator
# We need to check both 1.) cls.__dict__ and 2.) getattr(self, key)
# because
# 1.) getattr is false for attributes that raise errors
# 2.) cls.__dict__ doesn't traverse into base classes
if getattr(self, "__frozen", False) and not (
key == "_cache"
or key in type(self).__dict__
or getattr(self, key, None) is not None
):
raise AttributeError(f"You cannot add any new attribute '{key}'")
object.__setattr__(self, key, value)
class DataError(Exception):
pass
class SpecificationError(Exception):
pass
class SelectionMixin:
"""
mixin implementing the selection & aggregation interface on a group-like
object sub-classes need to define: obj, exclusions
"""
_selection: Optional[IndexLabel] = None
_internal_names = ["_cache", "__setstate__"]
_internal_names_set = set(_internal_names)
_builtin_table = {builtins.sum: np.sum, builtins.max: np.max, builtins.min: np.min}
_cython_table = {
builtins.sum: "sum",
builtins.max: "max",
builtins.min: "min",
np.all: "all",
np.any: "any",
np.sum: "sum",
np.nansum: "sum",
np.mean: "mean",
np.nanmean: "mean",
np.prod: "prod",
np.nanprod: "prod",
np.std: "std",
np.nanstd: "std",
np.var: "var",
np.nanvar: "var",
np.median: "median",
np.nanmedian: "median",
np.max: "max",
np.nanmax: "max",
np.min: "min",
np.nanmin: "min",
np.cumprod: "cumprod",
np.nancumprod: "cumprod",
np.cumsum: "cumsum",
np.nancumsum: "cumsum",
}
@property
def _selection_name(self):
"""
Return a name for myself;
This would ideally be called the 'name' property,
but we cannot conflict with the Series.name property which can be set.
"""
return self._selection
@property
def _selection_list(self):
if not isinstance(
self._selection, (list, tuple, ABCSeries, ABCIndexClass, np.ndarray)
):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
# pandas\core\base.py:195: error: "SelectionMixin" has no attribute
# "obj" [attr-defined]
if self._selection is None or isinstance(
self.obj, ABCSeries # type: ignore[attr-defined]
):
# pandas\core\base.py:194: error: "SelectionMixin" has no attribute
# "obj" [attr-defined]
return self.obj # type: ignore[attr-defined]
else:
# pandas\core\base.py:204: error: "SelectionMixin" has no attribute
# "obj" [attr-defined]
return self.obj[self._selection] # type: ignore[attr-defined]
@cache_readonly
def ndim(self) -> int:
return self._selected_obj.ndim
@cache_readonly
def _obj_with_exclusions(self):
# pandas\core\base.py:209: error: "SelectionMixin" has no attribute
# "obj" [attr-defined]
if self._selection is not None and isinstance(
self.obj, ABCDataFrame # type: ignore[attr-defined]
):
# pandas\core\base.py:217: error: "SelectionMixin" has no attribute
# "obj" [attr-defined]
return self.obj.reindex( # type: ignore[attr-defined]
columns=self._selection_list
)
# pandas\core\base.py:207: error: "SelectionMixin" has no attribute
# "exclusions" [attr-defined]
if len(self.exclusions) > 0: # type: ignore[attr-defined]
# pandas\core\base.py:208: error: "SelectionMixin" has no attribute
# "obj" [attr-defined]
# pandas\core\base.py:208: error: "SelectionMixin" has no attribute
# "exclusions" [attr-defined]
return self.obj.drop(self.exclusions, axis=1) # type: ignore[attr-defined]
else:
# pandas\core\base.py:210: error: "SelectionMixin" has no attribute
# "obj" [attr-defined]
return self.obj # type: ignore[attr-defined]
def __getitem__(self, key):
if self._selection is not None:
raise IndexError(f"Column(s) {self._selection} already selected")
if isinstance(key, (list, tuple, ABCSeries, ABCIndexClass, np.ndarray)):
# pandas\core\base.py:217: error: "SelectionMixin" has no attribute
# "obj" [attr-defined]
if len(
self.obj.columns.intersection(key) # type: ignore[attr-defined]
) != len(key):
# pandas\core\base.py:218: error: "SelectionMixin" has no
# attribute "obj" [attr-defined]
bad_keys = list(
set(key).difference(self.obj.columns) # type: ignore[attr-defined]
)
raise KeyError(f"Columns not found: {str(bad_keys)[1:-1]}")
return self._gotitem(list(key), ndim=2)
elif not getattr(self, "as_index", False):
if key not in self.obj.columns:
raise KeyError(f"Column not found: {key}")
return self._gotitem(key, ndim=2)
else:
if key not in self.obj:
raise KeyError(f"Column not found: {key}")
return self._gotitem(key, ndim=1)
def _gotitem(self, key, ndim: int, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : str / list of selections
ndim : {1, 2}
requested ndim of result
subset : object, default None
subset to act on
"""
raise AbstractMethodError(self)
def aggregate(self, func, *args, **kwargs):
raise AbstractMethodError(self)
agg = aggregate
def _try_aggregate_string_function(self, arg: str, *args, **kwargs):
"""
if arg is a string, then try to operate on it:
- try to find a function (or attribute) on ourselves
- try to find a numpy function
- raise
"""
assert isinstance(arg, str)
f = getattr(self, arg, None)
if f is not None:
if callable(f):
return f(*args, **kwargs)
# people may try to aggregate on a non-callable attribute
# but don't let them think they can pass args to it
assert len(args) == 0
assert len([kwarg for kwarg in kwargs if kwarg not in ["axis"]]) == 0
return f
f = getattr(np, arg, None)
if f is not None:
if hasattr(self, "__array__"):
# in particular exclude Window
return f(self, *args, **kwargs)
raise AttributeError(
f"'{arg}' is not a valid function for '{type(self).__name__}' object"
)
def _get_cython_func(self, arg: Callable) -> Optional[str]:
"""
if we define an internal function for this argument, return it
"""
return self._cython_table.get(arg)
def _is_builtin_func(self, arg):
"""
if we define an builtin function for this argument, return it,
otherwise return the arg
"""
return self._builtin_table.get(arg, arg)
class IndexOpsMixin(OpsMixin):
"""
Common ops mixin to support a unified interface / docs for Series / Index
"""
# ndarray compatibility
__array_priority__ = 1000
_hidden_attrs: FrozenSet[str] = frozenset(
["tolist"] # tolist is not deprecated, just suppressed in the __dir__
)
@property
def dtype(self) -> DtypeObj:
# must be defined here as a property for mypy
raise AbstractMethodError(self)
@property
def _values(self) -> Union[ExtensionArray, np.ndarray]:
# must be defined here as a property for mypy
raise AbstractMethodError(self)
def transpose(self: _T, *args, **kwargs) -> _T:
"""
Return the transpose, which is by definition self.
Returns
-------
%(klass)s
"""
nv.validate_transpose(args, kwargs)
return self
T = property(
transpose,
doc="""
Return the transpose, which is by definition self.
""",
)
@property
def shape(self):
"""
Return a tuple of the shape of the underlying data.
"""
return self._values.shape
def __len__(self) -> int:
# We need this defined here for mypy
raise AbstractMethodError(self)
@property
def ndim(self) -> int:
"""
Number of dimensions of the underlying data, by definition 1.
"""
return 1
def item(self):
"""
Return the first element of the underlying data as a Python scalar.
Returns
-------
scalar
The first element of %(klass)s.
Raises
------
ValueError
If the data is not length-1.
"""
if len(self) == 1:
return next(iter(self))
raise ValueError("can only convert an array of size 1 to a Python scalar")
@property
def nbytes(self) -> int:
"""
Return the number of bytes in the underlying data.
"""
return self._values.nbytes
@property
def size(self) -> int:
"""
Return the number of elements in the underlying data.
"""
return len(self._values)
@property
def array(self) -> ExtensionArray:
"""
The ExtensionArray of the data backing this Series or Index.
.. versionadded:: 0.24.0
Returns
-------
ExtensionArray
An ExtensionArray of the values stored within. For extension
types, this is the actual array. For NumPy native types, this
is a thin (no copy) wrapper around :class:`numpy.ndarray`.
``.array`` differs ``.values`` which may require converting the
data to a different form.
See Also
--------
Index.to_numpy : Similar method that always returns a NumPy array.
Series.to_numpy : Similar method that always returns a NumPy array.
Notes
-----
This table lays out the different array types for each extension
dtype within pandas.
================== =============================
dtype array type
================== =============================
category Categorical
period PeriodArray
interval IntervalArray
IntegerNA IntegerArray
string StringArray
boolean BooleanArray
datetime64[ns, tz] DatetimeArray
================== =============================
For any 3rd-party extension types, the array type will be an
ExtensionArray.
For all remaining dtypes ``.array`` will be a
:class:`arrays.NumpyExtensionArray` wrapping the actual ndarray
stored within. If you absolutely need a NumPy array (possibly with
copying / coercing data), then use :meth:`Series.to_numpy` instead.
Examples
--------
For regular NumPy types like int, and float, a PandasArray
is returned.
>>> pd.Series([1, 2, 3]).array
<PandasArray>
[1, 2, 3]
Length: 3, dtype: int64
For extension types, like Categorical, the actual ExtensionArray
is returned
>>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
>>> ser.array
['a', 'b', 'a']
Categories (2, object): ['a', 'b']
"""
raise AbstractMethodError(self)
def to_numpy(self, dtype=None, copy=False, na_value=lib.no_default, **kwargs):
"""
A NumPy ndarray representing the values in this Series or Index.
.. versionadded:: 0.24.0
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`.
copy : bool, default False
Whether to ensure that the returned value is not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
na_value : Any, optional
The value to use for missing values. The default value depends
on `dtype` and the type of the array.
.. versionadded:: 1.0.0
**kwargs
Additional keywords passed through to the ``to_numpy`` method
of the underlying array (for extension arrays).
.. versionadded:: 1.0.0
Returns
-------
numpy.ndarray
See Also
--------
Series.array : Get the actual data stored within.
Index.array : Get the actual data stored within.
DataFrame.to_numpy : Similar method for DataFrame.
Notes
-----
The returned array will be the same up to equality (values equal
in `self` will be equal in the returned array; likewise for values
that are not equal). When `self` contains an ExtensionArray, the
dtype may be different. For example, for a category-dtype Series,
``to_numpy()`` will return a NumPy array and the categorical dtype
will be lost.
For NumPy dtypes, this will be a reference to the actual data stored
in this Series or Index (assuming ``copy=False``). Modifying the result
in place will modify the data stored in the Series or Index (not that
we recommend doing that).
For extension types, ``to_numpy()`` *may* require copying data and
coercing the result to a NumPy type (possibly object), which may be
expensive. When you need a no-copy reference to the underlying data,
:attr:`Series.array` should be used instead.
This table lays out the different dtypes and default return types of
``to_numpy()`` for various dtypes within pandas.
================== ================================
dtype array type
================== ================================
category[T] ndarray[T] (same dtype as input)
period ndarray[object] (Periods)
interval ndarray[object] (Intervals)
IntegerNA ndarray[object]
datetime64[ns] datetime64[ns]
datetime64[ns, tz] ndarray[object] (Timestamps)
================== ================================
Examples
--------
>>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
>>> ser.to_numpy()
array(['a', 'b', 'a'], dtype=object)
Specify the `dtype` to control how datetime-aware data is represented.
Use ``dtype=object`` to return an ndarray of pandas :class:`Timestamp`
objects, each with the correct ``tz``.
>>> ser = pd.Series(pd.date_range('2000', periods=2, tz="CET"))
>>> ser.to_numpy(dtype=object)
array([Timestamp('2000-01-01 00:00:00+0100', tz='CET', freq='D'),
Timestamp('2000-01-02 00:00:00+0100', tz='CET', freq='D')],
dtype=object)
Or ``dtype='datetime64[ns]'`` to return an ndarray of native
datetime64 values. The values are converted to UTC and the timezone
info is dropped.
>>> ser.to_numpy(dtype="datetime64[ns]")
... # doctest: +ELLIPSIS
array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'],
dtype='datetime64[ns]')
"""
if is_extension_array_dtype(self.dtype):
# pandas\core\base.py:837: error: Too many arguments for "to_numpy"
# of "ExtensionArray" [call-arg]
return self.array.to_numpy( # type: ignore[call-arg]
dtype, copy=copy, na_value=na_value, **kwargs
)
elif kwargs:
bad_keys = list(kwargs.keys())[0]
raise TypeError(
f"to_numpy() got an unexpected keyword argument '{bad_keys}'"
)
result = np.asarray(self._values, dtype=dtype)
# TODO(GH-24345): Avoid potential double copy
if copy or na_value is not lib.no_default:
result = result.copy()
if na_value is not lib.no_default:
result[self.isna()] = na_value
return result
@property
def empty(self) -> bool:
return not self.size
def max(self, axis=None, skipna: bool = True, *args, **kwargs):
"""
Return the maximum value of the Index.
Parameters
----------
axis : int, optional
For compatibility with NumPy. Only 0 or None are allowed.
skipna : bool, default True
Exclude NA/null values when showing the result.
*args, **kwargs
Additional arguments and keywords for compatibility with NumPy.
Returns
-------
scalar
Maximum value.
See Also
--------
Index.min : Return the minimum value in an Index.
Series.max : Return the maximum value in a Series.
DataFrame.max : Return the maximum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.max()
3
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.max()
'c'
For a MultiIndex, the maximum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.max()
('b', 2)
"""
nv.validate_minmax_axis(axis)
nv.validate_max(args, kwargs)
return nanops.nanmax(self._values, skipna=skipna)
@doc(op="max", oppose="min", value="largest")
def argmax(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
"""
Return int position of the {value} value in the Series.
If the {op}imum is achieved in multiple locations,
the first row position is returned.
Parameters
----------
axis : {{None}}
Dummy argument for consistency with Series.
skipna : bool, default True
Exclude NA/null values when showing the result.
*args, **kwargs
Additional arguments and keywords for compatibility with NumPy.
Returns
-------
int
Row position of the {op}imum value.
See Also
--------
Series.arg{op} : Return position of the {op}imum value.
Series.arg{oppose} : Return position of the {oppose}imum value.
numpy.ndarray.arg{op} : Equivalent method for numpy arrays.
Series.idxmax : Return index label of the maximum values.
Series.idxmin : Return index label of the minimum values.
Examples
--------
Consider dataset containing cereal calories
>>> s = pd.Series({{'Corn Flakes': 100.0, 'Almond Delight': 110.0,
... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0}})
>>> s
Corn Flakes 100.0
Almond Delight 110.0
Cinnamon Toast Crunch 120.0
Cocoa Puff 110.0
dtype: float64
>>> s.argmax()
2
>>> s.argmin()
0
The maximum cereal calories is the third element and
the minimum cereal calories is the first element,
since series is zero-indexed.
"""
nv.validate_minmax_axis(axis)
nv.validate_argmax_with_skipna(skipna, args, kwargs)
return nanops.nanargmax(self._values, skipna=skipna)
def min(self, axis=None, skipna: bool = True, *args, **kwargs):
"""
Return the minimum value of the Index.
Parameters
----------
axis : {None}
Dummy argument for consistency with Series.
skipna : bool, default True
Exclude NA/null values when showing the result.
*args, **kwargs
Additional arguments and keywords for compatibility with NumPy.
Returns
-------
scalar
Minimum value.
See Also
--------
Index.max : Return the maximum value of the object.
Series.min : Return the minimum value in a Series.
DataFrame.min : Return the minimum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.min()
1
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.min()
'a'
For a MultiIndex, the minimum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.min()
('a', 1)
"""
nv.validate_minmax_axis(axis)
nv.validate_min(args, kwargs)
return nanops.nanmin(self._values, skipna=skipna)
@doc(argmax, op="min", oppose="max", value="smallest")
def argmin(self, axis=None, skipna=True, *args, **kwargs) -> int:
nv.validate_minmax_axis(axis)
nv.validate_argmax_with_skipna(skipna, args, kwargs)
return nanops.nanargmin(self._values, skipna=skipna)
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
Returns
-------
list
See Also
--------
numpy.ndarray.tolist : Return the array as an a.ndim-levels deep
nested list of Python scalars.
"""
if not isinstance(self._values, np.ndarray):
# check for ndarray instead of dtype to catch DTA/TDA
return list(self._values)
return self._values.tolist()
to_list = tolist
def __iter__(self):
"""
Return an iterator of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
Returns
-------
iterator
"""
# We are explicitly making element iterators.
if not isinstance(self._values, np.ndarray):
# Check type instead of dtype to catch DTA/TDA
return iter(self._values)
else:
return map(self._values.item, range(self._values.size))
@cache_readonly
def hasnans(self):
"""
Return if I have any nans; enables various perf speedups.
"""
return bool(isna(self).any())
def isna(self):
return isna(self._values)
def _reduce(
self,
op,
name: str,
*,
axis=0,
skipna=True,
numeric_only=None,
filter_type=None,
**kwds,
):
"""
Perform the reduction type operation if we can.
"""
func = getattr(self, name, None)
if func is None:
raise TypeError(
f"{type(self).__name__} cannot perform the operation {name}"
)
return func(skipna=skipna, **kwds)
def _map_values(self, mapper, na_action=None):
"""
An internal function that maps values using the input
correspondence (which can be a dict, Series, or function).
Parameters
----------
mapper : function, dict, or Series
The input correspondence object
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mapping function
Returns
-------
Union[Index, MultiIndex], inferred
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
"""
# we can fastpath dict/Series to an efficient map
# as we know that we are not going to have to yield
# python types
if is_dict_like(mapper):
if isinstance(mapper, dict) and hasattr(mapper, "__missing__"):
# If a dictionary subclass defines a default value method,
# convert mapper to a lookup function (GH #15999).
dict_with_default = mapper
mapper = lambda x: dict_with_default[x]
else:
# Dictionary does not have a default. Thus it's safe to
# convert to an Series for efficiency.
# we specify the keys here to handle the
# possibility that they are tuples
# The return value of mapping with an empty mapper is
# expected to be pd.Series(np.nan, ...). As np.nan is
# of dtype float64 the return value of this method should
# be float64 as well
mapper = create_series_with_explicit_dtype(
mapper, dtype_if_empty=np.float64
)
if isinstance(mapper, ABCSeries):
# Since values were input this means we came from either
# a dict or a series and mapper should be an index
if is_categorical_dtype(self.dtype):
# use the built in categorical series mapper which saves
# time by mapping the categories instead of all values
# pandas\core\base.py:893: error: Incompatible types in
# assignment (expression has type "Categorical", variable has
# type "IndexOpsMixin") [assignment]
self = cast("Categorical", self) # type: ignore[assignment]
# pandas\core\base.py:894: error: Item "ExtensionArray" of
# "Union[ExtensionArray, Any]" has no attribute "map"
# [union-attr]
return self._values.map(mapper) # type: ignore[union-attr]
values = self._values
indexer = mapper.index.get_indexer(values)
new_values = algorithms.take_1d(mapper._values, indexer)
return new_values
# we must convert to python types
if is_extension_array_dtype(self.dtype) and hasattr(self._values, "map"):
# GH#23179 some EAs do not have `map`
values = self._values
if na_action is not None:
raise NotImplementedError
map_f = lambda values, f: values.map(f)
else:
# pandas\core\base.py:1142: error: "IndexOpsMixin" has no attribute
# "astype" [attr-defined]
values = self.astype(object)._values # type: ignore[attr-defined]
if na_action == "ignore":
def map_f(values, f):
return lib.map_infer_mask(values, f, isna(values).view(np.uint8))
elif na_action is None:
map_f = lib.map_infer
else:
msg = (
"na_action must either be 'ignore' or None, "
f"{na_action} was passed"
)
raise ValueError(msg)
# mapper is a function
new_values = map_f(values, mapper)
return new_values
def value_counts(
self,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
bins=None,
dropna: bool = True,
):
"""
Return a Series containing counts of unique values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : bool, default False
If True then the object returned will contain the relative
frequencies of the unique values.
sort : bool, default True
Sort by frequencies.
ascending : bool, default False
Sort in ascending order.
bins : int, optional
Rather than count values, group them into half-open bins,
a convenience for ``pd.cut``, only works with numeric data.
dropna : bool, default True
Don't include counts of NaN.
Returns
-------
Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.count: Number of non-NA elements in a DataFrame.
DataFrame.value_counts: Equivalent method on DataFrames.
Examples
--------
>>> index = pd.Index([3, 1, 2, 3, 4, np.nan])
>>> index.value_counts()
3.0 2
2.0 1
4.0 1
1.0 1
dtype: int64
With `normalize` set to `True`, returns the relative frequency by
dividing all values by the sum of values.
>>> s = pd.Series([3, 1, 2, 3, 4, np.nan])
>>> s.value_counts(normalize=True)
3.0 0.4
2.0 0.2
4.0 0.2
1.0 0.2
dtype: float64
**bins**
Bins can be useful for going from a continuous variable to a
categorical variable; instead of counting unique
apparitions of values, divide the index in the specified
number of half-open bins.
>>> s.value_counts(bins=3)
(0.996, 2.0] 2
(2.0, 3.0] 2
(3.0, 4.0] 1
dtype: int64
**dropna**
With `dropna` set to `False` we can also see NaN index values.
>>> s.value_counts(dropna=False)
3.0 2
2.0 1
NaN 1
4.0 1
1.0 1
dtype: int64
"""
result = value_counts(
self,
sort=sort,
ascending=ascending,
normalize=normalize,
bins=bins,
dropna=dropna,
)
return result
def unique(self):
values = self._values
if not isinstance(values, np.ndarray):
result = values.unique()
if self.dtype.kind in ["m", "M"] and isinstance(self, ABCSeries):
# GH#31182 Series._values returns EA, unpack for backward-compat
if getattr(self.dtype, "tz", None) is None:
result = np.asarray(result)
else:
result = unique1d(values)
return result
def nunique(self, dropna: bool = True) -> int:
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
dropna : bool, default True
Don't include NaN in the count.
Returns
-------
int
See Also
--------
DataFrame.nunique: Method nunique for DataFrame.
Series.count: Count non-NA/null observations in the Series.
Examples
--------
>>> s = pd.Series([1, 3, 5, 7, 7])
>>> s
0 1
1 3
2 5
3 7
4 7
dtype: int64
>>> s.nunique()
4
"""
obj = remove_na_arraylike(self) if dropna else self
return len(obj.unique())
@property
def is_unique(self) -> bool:
"""
Return boolean if values in the object are unique.
Returns
-------
bool
"""
return self.nunique(dropna=False) == len(self)
@property
def is_monotonic(self) -> bool:
"""
Return boolean if values in the object are
monotonic_increasing.
Returns
-------
bool
"""
from pandas import Index
return Index(self).is_monotonic
@property
def is_monotonic_increasing(self) -> bool:
"""
Alias for is_monotonic.
"""
# mypy complains if we alias directly
return self.is_monotonic
@property
def is_monotonic_decreasing(self) -> bool:
"""
Return boolean if values in the object are
monotonic_decreasing.
Returns
-------
bool
"""
from pandas import Index
return Index(self).is_monotonic_decreasing
def memory_usage(self, deep=False):
"""
Memory usage of the values.
Parameters
----------
deep : bool, default False
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption.
Returns
-------
bytes used
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of the
array.
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False or if used on PyPy
"""
if hasattr(self.array, "memory_usage"):
# pandas\core\base.py:1379: error: "ExtensionArray" has no
# attribute "memory_usage" [attr-defined]
return self.array.memory_usage(deep=deep) # type: ignore[attr-defined]
v = self.array.nbytes
if deep and is_object_dtype(self) and not PYPY:
v += lib.memory_usage_of_objects(self._values)
return v
@doc(
algorithms.factorize,
values="",
order="",
size_hint="",
sort=textwrap.dedent(
"""\
sort : bool, default False
Sort `uniques` and shuffle `codes` to maintain the
relationship.
"""
),
)
def factorize(self, sort: bool = False, na_sentinel: Optional[int] = -1):
return algorithms.factorize(self, sort=sort, na_sentinel=na_sentinel)
_shared_docs[
"searchsorted"
] = """
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted {klass} `self` such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
.. note::
The {klass} *must* be monotonically sorted, otherwise
wrong locations will likely be returned. Pandas does *not*
check this for you.
Parameters
----------
value : array_like
Values to insert into `self`.
side : {{'left', 'right'}}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
Returns
-------
int or array of int
A scalar or array of insertion points with the
same shape as `value`.
.. versionchanged:: 0.24.0
If `value` is a scalar, an int is now always returned.
Previously, scalar inputs returned an 1-item array for
:class:`Series` and :class:`Categorical`.
See Also
--------
sort_values : Sort by the values along either axis.
numpy.searchsorted : Similar method from NumPy.
Notes
-----
Binary search is used to find the required insertion points.
Examples
--------
>>> ser = pd.Series([1, 2, 3])
>>> ser
0 1
1 2
2 3
dtype: int64
>>> ser.searchsorted(4)
3
>>> ser.searchsorted([0, 4])
array([0, 3])
>>> ser.searchsorted([1, 3], side='left')
array([0, 2])
>>> ser.searchsorted([1, 3], side='right')
array([1, 3])
>>> ser = pd.Series(pd.to_datetime(['3/11/2000', '3/12/2000', '3/13/2000']))
>>> ser
0 2000-03-11
1 2000-03-12
2 2000-03-13
dtype: datetime64[ns]
>>> ser.searchsorted('3/14/2000')
3
>>> ser = pd.Categorical(
... ['apple', 'bread', 'bread', 'cheese', 'milk'], ordered=True
... )
>>> ser
['apple', 'bread', 'bread', 'cheese', 'milk']
Categories (4, object): ['apple' < 'bread' < 'cheese' < 'milk']
>>> ser.searchsorted('bread')
1
>>> ser.searchsorted(['bread'], side='right')
array([3])
If the values are not monotonically sorted, wrong locations
may be returned:
>>> ser = pd.Series([2, 1, 3])
>>> ser
0 2
1 1
2 3
dtype: int64
>>> ser.searchsorted(1) # doctest: +SKIP
0 # wrong result, correct would be 1
"""
@doc(_shared_docs["searchsorted"], klass="Index")
def searchsorted(self, value, side="left", sorter=None) -> np.ndarray:
return algorithms.searchsorted(self._values, value, side=side, sorter=sorter)
def drop_duplicates(self, keep="first"):
duplicated = self.duplicated(keep=keep)
# pandas\core\base.py:1507: error: Value of type "IndexOpsMixin" is not
# indexable [index]
result = self[np.logical_not(duplicated)] # type: ignore[index]
return result
def duplicated(self, keep="first"):
return duplicated(self._values, keep=keep)
| 31.677419 | 87 | 0.559984 |
12c3cc5f8f023c8c99fc1f82d99e16cb3a10ae6c | 505 | py | Python | VENV/lib/python3.6/site-packages/PyInstaller/loader/rthooks/pyi_rth_gi.py | workingyifei/display-pattern-generator | b27be84c6221fa93833f283109870737b05bfbf6 | [
"MIT"
] | 3 | 2018-11-27T06:30:23.000Z | 2021-05-30T15:56:32.000Z | VENV/lib/python3.6/site-packages/PyInstaller/loader/rthooks/pyi_rth_gi.py | workingyifei/display-pattern-generator | b27be84c6221fa93833f283109870737b05bfbf6 | [
"MIT"
] | 1 | 2018-11-15T02:00:31.000Z | 2021-12-06T02:20:32.000Z | VENV/lib/python3.6/site-packages/PyInstaller/loader/rthooks/pyi_rth_gi.py | workingyifei/display-pattern-generator | b27be84c6221fa93833f283109870737b05bfbf6 | [
"MIT"
] | 1 | 2020-11-06T18:46:35.000Z | 2020-11-06T18:46:35.000Z | #-----------------------------------------------------------------------------
# Copyright (c) 2015-2017, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import os
import sys
os.environ['GI_TYPELIB_PATH'] = os.path.join(sys._MEIPASS, 'gi_typelibs')
| 36.071429 | 78 | 0.532673 |
06a9ce927bbf34482346da6559a32a390d9f9078 | 24,137 | py | Python | smt/veriT/proof.py | crisperdue/holpy | fe88eb91a8db8386184329e3f51a80d11ecdb316 | [
"BSD-3-Clause"
] | 22 | 2021-06-15T00:01:27.000Z | 2022-03-15T11:22:25.000Z | smt/veriT/proof.py | crisperdue/holpy | fe88eb91a8db8386184329e3f51a80d11ecdb316 | [
"BSD-3-Clause"
] | null | null | null | smt/veriT/proof.py | crisperdue/holpy | fe88eb91a8db8386184329e3f51a80d11ecdb316 | [
"BSD-3-Clause"
] | 2 | 2021-11-30T08:56:03.000Z | 2022-01-24T10:46:39.000Z | """
Basic proof rules in veriT solver.
"""
from smt.veriT.verit_macro import *
from kernel.proofterm import ProofTerm, Thm
from logic import basic, matcher, logic, conv
from kernel import term
import functools
from prover.proofrec import int_th_lemma_1_omega
basic.load_theory("verit")
class Concl(object):
def __init__(self, *tms):
self.tms = tms
def __len__(self):
return len(self.tms)
class Input(object):
def __init__(self, seq_num, concl):
self.seq_num = seq_num
self.concl = concl
class Rule(object):
def __init__(self, seq_num, proof_name, concl, assms=[], args=None):
self.seq_num = seq_num
self.proof_name = str(proof_name)
arity = len(concl)
if arity > 1:
self.concl = Or(*concl.tms)
elif arity == 1:
self.concl = concl.tms[0]
else:
self.concl = term.false
self.arity = arity
self.assms = assms
self.args = args
def __str__(self):
return "%s: %s: %s: %s: %s" % (
self.seq_num,
self.proof_name,
self.concl,
self.assms,
self.args
)
class ProofReconstruction(object):
def __init__(self, steps):
# A list of proof steps.
self.steps = steps
# map seq number to proof number
self.proof = dict()
def main(self):
step_num = len(self.steps)
for step in self.steps:
try:
self.reconstruct(step)
except:
self.not_imp(step)
print("%s/%s" % (step.seq_num, step_num))
print("finished")
return self.proof[len(self.steps)]
def reconstruct(self, step):
name = step.proof_name
if name == "input":
self.input_rule(step)
elif name == "true":
self.true_rule(step)
elif name == "false":
self.false_rule(step)
elif name == "tmp_betared":
self.tmp_betared(step)
elif name == "tmp_qnt_tidy":
self.tmp_qnt_tidy(step)
elif name == "or_pos":
self.or_pos(step)
elif name == "or":
self.or_rule(step)
elif name == "resolution" or name == "th_resolution":
self.resolution(step)
elif name == "forall_inst":
self.forall_inst(step)
elif name == "eq_reflexive":
self.eq_reflexive(step)
elif name == "eq_transitive":
self.eq_transitive(step)
elif name == "and":
self.and_rule(step)
elif name == "and_pos":
self.and_pos(step)
elif name == "eq_congruent":
self.eq_congruent(step)
elif name == "equiv1":
self.equiv1(step)
elif name == "equiv2":
self.equiv2(step)
elif name == "not_equiv1":
self.not_equiv1(step)
elif name == "not_equiv2":
self.not_equiv2(step)
elif name == "equiv_pos1":
self.equiv_pos1(step)
elif name == "equiv_pos2":
self.equiv_pos2(step)
elif name == "equiv_neg1":
self.equiv_neg1(step)
elif name == "equiv_neg2":
self.equiv_neg2(step)
elif name == "tmp_distinct_elim":
self.tmp_distinct_elim(step)
elif name == "and_neg":
self.and_neg(step)
elif name == "tmp_LA_pre":
self.tmp_LA_pre(step)
elif name == "not_or":
self.not_or_rule(step)
elif name == "or_neg":
self.or_neg(step)
elif name == "not_and":
self.not_and(step)
elif name == "implies":
self.implies_rule(step)
elif name == "not_implies1":
self.not_implies1(step)
elif name == "not_implies2":
self.not_implies2(step)
elif name == "ite1":
self.ite1(step)
elif name == "ite2":
self.ite2(step)
elif name == "not_ite1":
self.not_ite1(step)
elif name == "not_ite2":
self.not_ite2(step)
elif name == "ite_pos1":
self.ite_pos1(step)
elif name == "ite_pos2":
self.ite_pos2(step)
elif name == "ite_neg1":
self.ite_neg1(step)
elif name == "ite_neg2":
self.ite_neg2(step)
elif name == "la_generic":
self.la_generic(step)
elif name == "la_disequality":
self.la_disequality(step)
elif name == "eq_congruent_pred":
self.eq_congruent_pred(step)
# elif name == "tmp_ite_elim":
# self.tmp_ite_elim(step)
else:
self.not_imp(step)
def not_imp(self, step):
print(step.seq_num, step.proof_name)
if step.proof_name != "tmp_ite_elim":
print(step)
self.proof[step.seq_num] = ProofTerm.sorry(Thm([hyp for i in step.assms for hyp in self.proof[i].hyps], step.concl))
def schematic_rule1(self, th_name, pt):
"""
The th is in a ⊢ A --> B form, pt is ⊢ A, return ⊢ B.
"""
pt_th = ProofTerm.theorem(th_name)
inst = matcher.first_order_match(pt_th.prop.arg1, pt.prop)
pt_th_inst = pt_th.substitution(inst=inst)
return pt_th_inst.implies_elim(pt).on_prop(conv.top_conv(conv.rewr_conv("double_neg")))
def schematic_rule2(self, th_name, tm):
"""
The th is in ⊢ A, A is a tautology, instantiate th by tm.
"""
pt_th = ProofTerm.theorem(th_name)
inst = matcher.first_order_match(pt_th.prop.arg1, tm.arg1)
return pt_th.substitution(inst=inst).on_prop(conv.top_conv(conv.rewr_conv("double_neg")))
def input_rule(self, step):
"""
Verit will give each (sub-)expression a name in input rule,
hence it is not a proof.
"""
self.proof[step.seq_num] = ProofTerm.assume(step.concl)
def true_rule(self, step):
"""|- true"""
self.proof[step.seq_num] = ProofTerm.theorem("trueI")
def false_rule(self, step):
"""|- ~false"""
self.proof[step.seq_num] = ProofTerm.theorem("not_false_res")
def tmp_betared(self, step):
"""
Assume that tmp_betared rules are only followed by input rules,
For now, we only return the conclusion as input.
"""
self.proof[step.seq_num] = ProofTerm.assume(step.concl)
def tmp_qnt_tidy(self, step):
"""
Normalize quantifiers, but we don't know the mechanism now,
so only return the formula waited for normalizing.
"""
self.proof[step.seq_num] = self.proof[step.assms[0]]
def forall_inst(self, step):
"""⊢ (¬∀x. P (x)) ∨ P(x0)
Bugs: ⊢ ~(!x. A --> B) | ~A | B
"""
T = step.concl.arg1.arg.arg.var_T
if step.args.type == "NAME":
inst_var = term.Var(step.args.value, T)
elif step.args.type == "NUMBER":
if T == term.IntType:
inst_var = term.Int(int(step.args))
elif T == term.RealType:
inst_var = term.Real(float(step.args))
elif T == term.BoolType:
if step.args.value == "true":
inst_var = term.true
elif step.args.value == "false":
inst_var = term.false
else:
raise ValueError(str(step.args))
else:
raise ValueError(str(step.args))
else:
raise NotImplementedError(step.args)
forall_tm = step.concl.arg1.arg
pt_assume = ProofTerm.assume(forall_tm)
pt_inst = pt_assume.forall_elim(inst_var)
pt_implies_hyp = pt_inst.implies_intr(pt_inst.hyps[0]).on_prop(conv.rewr_conv("imp_disj_eq"))
if pt_implies_hyp.prop == step.concl:
self.proof[step.seq_num] = pt_implies_hyp
else: # ⊢ ¬(∀x. A --> B) ∨ ¬A ∨ B
is_implicit_conv = step.concl.arg1.arg.arg.body.is_implies()\
and step.concl.arg.is_disj()
if not is_implicit_conv:
return self.not_imp(step)
pt_final = pt_implies_hyp.on_prop(conv.arg_conv(conv.rewr_conv("imp_disj_eq")))
assert pt_final.prop == step.concl, "%s != %s" % (str(pt_final.prop), str(step.concl))
self.proof[step.seq_num] = pt_final
def or_pos(self, step):
"""⊢ ¬(a_1 ∨ ... ∨ a_n) ∨ a_1 ∨ ... ∨ a_n"""
# self.proof[step.seq_num] = self.schematic_rule2("or_pos", step.concl)
self.proof[step.seq_num] = ProofTerm("or_pos", step.concl)
def or_rule(self, step):
"""
{(or a_1 ... a_n)} --> {a_1 ... a_n}
this rule doesn't have effect in HOL, so return the assms[0]
"""
self.proof[step.seq_num] = self.proof[step.assms[0]]
def resolution(self, step):
"""Given a sequence of proof terms, take resolution on them one by one."""
res_pts = [self.proof[num] for num in step.assms]
pt_0 = self.proof[step.assms[0]]
arity1 = self.steps[step.assms[0]-1].arity
for i in step.assms[1:]:
arity2 = self.steps[i-1].arity
assert self.proof[i].prop == self.steps[i-1].concl, i
pt_1 = pt_0
pt_0, arity1 = verit_resolution(pt_0, self.proof[i], arity1, arity2)
if pt_0.prop == step.concl:
self.proof[step.seq_num] = pt_0
else:
concl_disjs = strip_num(step.concl, step.arity)
pt_disjs = strip_num(pt_0.prop, step.arity)
assert set(concl_disjs) == set(pt_disjs)
implies_pt_norm = ProofTerm("imp_disj", term.Implies(pt_0.prop, Or(*concl_disjs)))
self.proof[step.seq_num] = implies_pt_norm.implies_elim(pt_0)
def eq_reflexive(self, step):
"""{(= x x)}"""
self.proof[step.seq_num] = ProofTerm.reflexive(step.concl.lhs)
def eq_transitive(self, step):
"""{(not (= x_1 x_2)) ... (not (= x_{n-1} x_n)) (= x_1 x_n)}"""
self.proof[step.seq_num] = ProofTerm("verit_and_rule", step.concl)
def and_rule(self, step):
"""{(and a_1 ... a_n)} --> {a_i}
a_1 ∧ ... ∧ a_n --> a_i
bug:
¬(m1_2 - m2_2 ≥ 0 ∧ m1_2 - m2_2 ≤ 0) ∨ s1_2 - s2_2 ≥ 4 ∨ s2_2 - s1_2 ≥ 4
¬(0 ≤ m1_2 - m2_2 ∧ m1_2 - m2_2 ≤ 0) ∨ s1_2 - s2_2 ≥ 4 ∨ s2_2 - s1_2 ≥ 4
"""
try:
pt = ProofTerm("imp_conj", term.Implies(self.proof[step.assms[0]].prop, step.concl))
self.proof[step.seq_num] = pt.implies_elim(self.proof[step.assms[0]])
except:
self.not_imp(step)
def and_pos(self, step):
"""
⊢ ¬(a_1 ∧ ... ∧ a_n) ∨ a_i
"""
pt = ProofTerm("imp_conj", term.Implies(step.concl.arg1.arg, step.concl.arg))
self.proof[step.seq_num] = pt.on_prop(conv.rewr_conv("imp_disj_eq"))
def eq_congruent(self, step):
self.proof[step.seq_num] = ProofTerm("verit_eq_congurent", step.concl)
def equiv1(self, step):
"""a ⟷ b --> ¬a ∨ b """
self.proof[step.seq_num] = self.schematic_rule1("equiv1", self.proof[step.assms[0]])
def equiv2(self, step):
"""a ⟷ b --> a ∨ ¬b """
self.proof[step.seq_num] = self.schematic_rule1("equiv2", self.proof[step.assms[0]])
def not_equiv1(self, step):
"""¬(P ⟷ Q) ⟶ P ∨ Q"""
self.proof[step.seq_num] = self.schematic_rule1("not_equiv1", self.proof[step.assms[0]])
def not_equiv2(self, step):
"""¬(P ⟷ Q) ⟶ ¬P ∨ ¬Q"""
self.proof[step.seq_num] = self.schematic_rule1("not_equiv2", self.proof[step.assms[0]])
def equiv_pos1(self, step):
"""¬(a ⟷ b) ∨ a ∨ ¬b"""
self.proof[step.seq_num] = self.schematic_rule2("equiv_pos1", step.concl)
def equiv_pos2(self, step):
"""¬(a ⟷ b) ∨ a ∨ ¬b"""
self.proof[step.seq_num] = self.schematic_rule2("equiv_pos2", step.concl)
def equiv_neg1(self, step):
"""(a ⟷ b) ∨ ¬a ∨ ¬b"""
self.proof[step.seq_num] = self.schematic_rule2("equiv_neg1", step.concl)
def equiv_neg2(self, step):
"""(a ⟷ b) ∨ a ∨ b"""
self.proof[step.seq_num] = self.schematic_rule2("equiv_neg2", step.concl)
def tmp_distinct_elim(self, step):
"""formula where distinct have been eliminated, which have done in the parsing process."""
self.proof[step.seq_num] = self.proof[step.assms[0]]
def and_neg(self, step):
"""⊢ (a_1 ∧ ... ∧ a_n) ∨ ¬a_1 ∨ ... ∨ ¬a_n"""
self.proof[step.seq_num] = ProofTerm("and_neg", [step.arity, step.concl])
def tmp_LA_pre(self, step):
"""formula with = replaced by conjunction of two inequalities"""
self.proof[step.seq_num] = self.proof[step.assms[0]].on_prop(
conv.top_conv(conv.rewr_conv("tmp_LA_pre_int")),
conv.top_conv(conv.rewr_conv("tmp_LA_pre_real")),
)
def not_or_rule(self, step):
"""¬(a_1 ∨ ... ∨ a_n) --> ¬a_i"""
self.proof[step.seq_num] = ProofTerm("not_or_rule", [step.concl], [self.proof[i] for i in step.assms])
def or_neg(self, step):
"""⊢ (a_1 ∨ ... ∨ a_n) ∨ ¬a_i"""
concl = step.concl
disj, atom = concl.arg1, concl.arg
if atom.is_not():
pt0 = ProofTerm("imp_disj", term.Implies(atom.arg, disj))
else:
pt0 = ProofTerm("imp_disj", term.Implies(term.Not(atom), disj))
pt1 = pt0.on_prop(conv.rewr_conv("imp_disj_eq"), conv.rewr_conv("disj_comm"))
self.proof[step.seq_num] = pt1
def not_and(self, step):
"""⊢ ¬(a_1 ∧ ... ∧ a_n) --> ¬a_1 ∨ ... ∨ ¬a_n"""
arity = len(step.concl.arg.strip_disj())
pt = ProofTerm("not_and", [step.concl], [self.proof[step.assms[0]]])
self.proof[step.seq_num] = pt
def implies_rule(self, step):
"""{(implies a b)} --> {(not a) b}"""
self.proof[step.seq_num] = self.proof[step.assms[0]].on_prop(conv.rewr_conv("imp_disj_eq"))
def not_implies1(self, step):
"""¬(a --> b) --> a"""
self.proof[step.seq_num] = self.schematic_rule1("not_implies1", self.proof[step.assms[0]])
def not_implies2(self, step):
"""¬(a --> b) --> ¬b"""
self.proof[step.seq_num] = self.schematic_rule1("not_implies2", self.proof[step.assms[0]])
def ite1(self, step):
"""ite a b c --> a ∨ c"""
self.proof[step.seq_num] = self.schematic_rule1("verit_ite1", self.proof[step.assms[0]])
def ite2(self, step):
"""ite a b c --> ¬a ∨ b"""
self.proof[step.seq_num] = self.schematic_rule1("verit_ite2", self.proof[step.assms[0]])
def not_ite1(self, step):
"""¬(ite a b c) --> a ∨ ¬c"""
self.proof[step.seq_num] = self.schematic_rule1("verit_not_ite1", self.proof[step.assms[0]])
def not_ite2(self, step):
"""¬(ite a b c) --> ¬a ∨ ¬b"""
self.proof[step.seq_num] = self.schematic_rule1("verit_not_ite2", self.proof[step.assms[0]])
def la_generic(self, step):
self.proof[step.seq_num] = ProofTerm("la_generic", step.concl)
def eq_congruent_pred(self, step):
"""{(not (= x_1 y_1)) ... (not (= x_n y_n)) (not (p x_1 ... x_n)) (p y_1 ... y_n)}"""
try:
self.proof[step.seq_num] = ProofTerm("verit_eq_congurent_pred", step.concl)
except:
self.not_imp(step)
def la_disequality(self, step):
"""
Two situtaions:
the first is ?a = ?b ∨ ¬(?a ≤ ?b) ∨ ¬(?b ≤ ?a)
the second is ?a = ?b ∨ ¬(?b ≤ ?a) ∨ ¬(?a ≤ ?b)
"""
if step.concl.arg1.lhs.get_type() == term.IntType:
self.proof[step.seq_num] = self.schematic_rule2("la_disequality_int", step.concl)
elif step.concl.arg1.lhs.get_type() == term.RealType:
self.proof[step.seq_num] = self.schematic_rule2("la_disequality_real", step.concl)
else:
raise NotImplementedError
assert self.proof[step.seq_num].prop == step.concl
# class InputRule(Rule):
# """Assertion."""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class TrueRule(Rule):
# """⊢ true"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class FalseRule(Rule):
# """⊢ ¬false"""
# def __init__(self, seq_num):
# super.__init__(self, seq_num, params, concl)
# class AndPos(Rule):
# """⊢ ¬(a_1 ∧ ... ∧ a_n) ∨ a_i"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class AndNeg(Rule):
# """⊢ (a_1 ∧ ... ∧ a_n) ∨ ¬a_1 ∨ ... ∨ ¬a_n"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class OrPos(Rule):
# """⊢ ¬(a_1 ∨ ... ∨ a_n) ∨ a_1 ∨ ... ∨ a_n"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class OrNeg(Rule):
# """⊢ (a_1 ∨ ... ∨ a_n) ∨ ¬a_i"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class ImpliesPos(Rule):
# """⊢ ¬(a ⟶ b) ∨ ¬a ∨ b"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class ImpliesNeg1(Rule):
# """⊢ (a ⟶ b) ∨ a"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class ImpliesNeg2(Rule):
# """⊢ (a ⟶ b) ∨ ¬b"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class EquivPos1(Rule):
# """¬(a ⟷ b) ∨ a ∨ ¬b"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class EquivPos2(Rule):
# """¬(a ⟷ b) ∨ ¬a ∨ b"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class EquivNeg1(Rule):
# """(a ⟷ b) ∨ ¬a ∨ ¬b"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class EquivNeg2(Rule):
# """(a ⟷ b) ∨ a ∨ b"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class ITEPos1(Rule):
# """¬(ite a b c) ∨ a ∨ c"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class ITEPos2(Rule):
# """¬(ite a b c) ∨ ¬a ∨ b"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class ITENeg1(Rule):
# """(ite a b c) ∨ a ∨ ¬c"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class ITEPos2(Rule):
# """(ite a b c) ∨ ¬a ∨ ¬b"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class EqReflexive(Rule):
# """x = x"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class EqTransitive(Rule):
# """¬(x_1 = x_2 ∨ x_2 = x_3 ∨ ... ∨ x_{n-1} = x_n) ∨ x_1 = x_n"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class EqCongurent(Rule):
# """¬(x_1 = y_1 ∨ ... ∨ x_n = y_n) ∨ f x_1 ... x_n = f y_1 ... y_n"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class EqCongurentPred(Rule):
# """¬(x_1 = y_1) ∨ ... ∨ ¬(x_n = y_n) ∨ ¬(p x_1 ... x_n) ∨ (p y_1 ... y_n)"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class EqCongurentGeneral(Rule):
# """¬(x_1 = y_1) ∨ ... ∨ ¬(x_n = y_n) ∨
# ¬(p t_1 ... x_1 ... t_m ... x_n) ∨ (p t_1 ... y_1 ... t_m ... y_n)"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class DistinctElim(Rule):
# """DIST x y ⟷ x ≠ y"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class LaRwEq(Rule):
# """x = y ⟷ x ≤ y ∧ x ≥ y"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class LaGeneric(Rule):
# """Not defined."""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class LiaGeneric(Rule):
# """Not defined."""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class NlaGeneric(Rule):
# """Not defined."""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class LaDisequality(Rule):
# """Not defined."""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class LaTotality(Rule):
# """t_1 ≤ t_2 ∨ t_2 ≤ t_1"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class LaTautology(Rule):
# """Linear arithmetic tautology without variable."""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class ForAllInst(Rule):
# """∀x. A x --> A t"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class ExistsInst(Rule):
# """A t --> ∃x. A x"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class Resolution(Rule):
# """Resolution."""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class AndRule(Rule):
# """a_1 ∧ ... ∧ a_n --> a_i"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class NotOrRule(Rule):
# """¬(a_1 ∨ ... ∨ a_n) --> ¬a_i"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class OrRule(Rule):
# """{(or a_1 ... a_n)} --> {a_1 ... a_n}"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class NotAndRule(Rule):
# """¬(a_1 ∧ ... ∧ a_n) --> ¬a_1 ∨ ... ∨ ¬a_n"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class NotImplies1(Rule):
# """¬(a --> b) ∨ a"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class NotImplies2(Rule):
# """¬(a --> b) ∨ ¬b"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class Equiv1(Rule):
# """a ⟷ b --> ¬a ∨ b """
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class Equiv2(Rule):
# """a ⟷ b --> a ∨ ¬b """
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class NotEquiv1(Rule):
# """¬(a ⟷ b) --> a ∨ b """
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class NotEquiv2(Rule):
# """¬(a ⟷ b) --> ¬a ∨ ¬b """
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class ITE1(Rule):
# """ite a b c --> a ∨ c"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class ITE2(Rule):
# """ite a b c --> ¬a ∨ b"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class NotITE1(Rule):
# """¬(ite a b c) --> a ∨ ¬c"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
# class NotITE2(Rule):
# """¬(ite a b c) --> ¬a ∨ ¬b"""
# def __init__(self, seq_num, params, concl):
# super.__init__(self, seq_num, params, concl)
| 35.547865 | 124 | 0.559432 |
b69ab462ddb1421425c67bc79fe24b91964ffe2c | 1,813 | py | Python | tests/components/cloud/test_system_health.py | dlintott/core | a6c83cc46a34084fdc4c0e7221b6ba493f82cbac | [
"Apache-2.0"
] | 2 | 2021-05-19T19:05:08.000Z | 2021-06-06T06:51:05.000Z | tests/components/cloud/test_system_health.py | jrhubott/core | 89fe232643134f283c041537e9f6841f47dc1c5e | [
"Apache-2.0"
] | 56 | 2020-08-03T07:30:54.000Z | 2022-03-31T06:02:04.000Z | tests/components/cloud/test_system_health.py | jrhubott/core | 89fe232643134f283c041537e9f6841f47dc1c5e | [
"Apache-2.0"
] | 2 | 2020-12-25T16:31:22.000Z | 2020-12-30T20:53:56.000Z | """Test cloud system health."""
import asyncio
from aiohttp import ClientError
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import utcnow
from tests.async_mock import Mock
from tests.common import get_system_health_info
async def test_cloud_system_health(hass, aioclient_mock):
"""Test cloud system health."""
aioclient_mock.get("https://cloud.bla.com/status", text="")
aioclient_mock.get("https://cert-server", text="")
aioclient_mock.get(
"https://cognito-idp.us-east-1.amazonaws.com/AAAA/.well-known/jwks.json",
exc=ClientError,
)
hass.config.components.add("cloud")
assert await async_setup_component(hass, "system_health", {})
now = utcnow()
hass.data["cloud"] = Mock(
region="us-east-1",
user_pool_id="AAAA",
relayer="wss://cloud.bla.com/websocket_api",
acme_directory_server="https://cert-server",
is_logged_in=True,
remote=Mock(is_connected=False),
expiration_date=now,
is_connected=True,
client=Mock(
prefs=Mock(
remote_enabled=True,
alexa_enabled=True,
google_enabled=False,
)
),
)
info = await get_system_health_info(hass, "cloud")
for key, val in info.items():
if asyncio.iscoroutine(val):
info[key] = await val
assert info == {
"logged_in": True,
"subscription_expiration": now,
"relayer_connected": True,
"remote_enabled": True,
"remote_connected": False,
"alexa_enabled": True,
"google_enabled": False,
"can_reach_cert_server": "ok",
"can_reach_cloud_auth": {"type": "failed", "error": "unreachable"},
"can_reach_cloud": "ok",
}
| 29.721311 | 81 | 0.626034 |
25bc5b43a85ec91bc24bd46aed0fe7e306647ba3 | 155,866 | py | Python | salt/modules/virt.py | joechainz/salt | ae6ca7e4de5d8b214f95e2df98a8346c55186333 | [
"Apache-2.0"
] | 5 | 2017-02-07T05:39:29.000Z | 2020-06-13T02:07:33.000Z | salt/modules/virt.py | joechainz/salt | ae6ca7e4de5d8b214f95e2df98a8346c55186333 | [
"Apache-2.0"
] | 86 | 2017-01-27T11:54:46.000Z | 2020-05-20T06:25:26.000Z | salt/modules/virt.py | joechainz/salt | ae6ca7e4de5d8b214f95e2df98a8346c55186333 | [
"Apache-2.0"
] | 11 | 2017-01-26T19:36:29.000Z | 2021-12-11T07:54:16.000Z | # -*- coding: utf-8 -*-
'''
Work with virtual machines managed by libvirt
:depends: libvirt Python module
Connection
==========
The connection to the virtualization host can be either setup in the minion configuration,
pillar data or overridden for each individual call.
By default, the libvirt connection URL will be guessed: the first available libvirt
hypervisor driver will be used. This can be overridden like this:
.. code-block:: yaml
virt:
connection:
uri: lxc:///
If the connection requires an authentication like for ESXi, this can be defined in the
minion pillar data like this:
.. code-block:: yaml
virt:
connection:
uri: esx://10.1.1.101/?no_verify=1&auto_answer=1
auth:
username: user
password: secret
Connecting with SSH protocol
----------------------------
Libvirt can connect to remote hosts using SSH using one of the ``ssh``, ``libssh`` and
``libssh2`` transports. Note that ``libssh2`` is likely to fail as it doesn't read the
``known_hosts`` file. Libvirt may also have been built without ``libssh`` or ``libssh2``
support.
To use the SSH transport, on the minion setup an SSH agent with a key authorized on
the remote libvirt machine.
Per call connection setup
-------------------------
.. versionadded:: 2019.2.0
All the calls requiring the libvirt connection configuration as mentioned above can
override this configuration using ``connection``, ``username`` and ``password`` parameters.
This means that the following will list the domains on the local LXC libvirt driver,
whatever the ``virt:connection`` is.
.. code-block:: bash
salt 'hypervisor' virt.list_domains connection=lxc:///
The calls not using the libvirt connection setup are:
- ``seed_non_shared_migrate``
- ``virt_type``
- ``is_*hyper``
- all migration functions
- `libvirt ESX URI format <http://libvirt.org/drvesx.html#uriformat>`_
- `libvirt URI format <http://libvirt.org/uri.html#URI_config>`_
- `libvirt authentication configuration <http://libvirt.org/auth.html#Auth_client_config>`_
'''
# Special Thanks to Michael Dehann, many of the concepts, and a few structures
# of his in the virt func module have been used
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import copy
import os
import re
import sys
import shutil
import subprocess
import string # pylint: disable=deprecated-module
import logging
import time
import datetime
from xml.etree import ElementTree
# Import third party libs
import jinja2
import jinja2.exceptions
try:
import libvirt # pylint: disable=import-error
from libvirt import libvirtError
HAS_LIBVIRT = True
except ImportError:
HAS_LIBVIRT = False
# Import salt libs
import salt.utils.files
import salt.utils.json
import salt.utils.network
import salt.utils.path
import salt.utils.stringutils
import salt.utils.templates
import salt.utils.validate.net
import salt.utils.versions
import salt.utils.yaml
from salt.exceptions import CommandExecutionError, SaltInvocationError
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
log = logging.getLogger(__name__)
# Set up template environment
JINJA = jinja2.Environment(
loader=jinja2.FileSystemLoader(
os.path.join(salt.utils.templates.TEMPLATE_DIRNAME, 'virt')
)
)
VIRT_STATE_NAME_MAP = {0: 'running',
1: 'running',
2: 'running',
3: 'paused',
4: 'shutdown',
5: 'shutdown',
6: 'crashed'}
def __virtual__():
if not HAS_LIBVIRT:
return (False, 'Unable to locate or import python libvirt library.')
return 'virt'
def __get_request_auth(username, password):
'''
Get libvirt.openAuth callback with username, password values overriding
the configuration ones.
'''
# pylint: disable=unused-argument
def __request_auth(credentials, user_data):
'''Callback method passed to libvirt.openAuth().
The credentials argument is a list of credentials that libvirt
would like to request. An element of this list is a list containing
5 items (4 inputs, 1 output):
- the credential type, e.g. libvirt.VIR_CRED_AUTHNAME
- a prompt to be displayed to the user
- a challenge
- a default result for the request
- a place to store the actual result for the request
The user_data argument is currently not set in the openAuth call.
'''
for credential in credentials:
if credential[0] == libvirt.VIR_CRED_AUTHNAME:
credential[4] = username if username else \
__salt__['config.get']('virt:connection:auth:username', credential[3])
elif credential[0] == libvirt.VIR_CRED_NOECHOPROMPT:
credential[4] = password if password else \
__salt__['config.get']('virt:connection:auth:password', credential[3])
else:
log.info('Unhandled credential type: %s', credential[0])
return 0
def __get_conn(**kwargs):
'''
Detects what type of dom this node is and attempts to connect to the
correct hypervisor via libvirt.
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
'''
# This has only been tested on kvm and xen, it needs to be expanded to
# support all vm layers supported by libvirt
username = kwargs.get('username', None)
password = kwargs.get('password', None)
conn_str = kwargs.get('connection', None)
if not conn_str:
conn_str = __salt__['config.get']('virt.connect', None)
if conn_str is not None:
salt.utils.versions.warn_until(
'Sodium',
'\'virt.connect\' configuration property has been deprecated in favor '
'of \'virt:connection:uri\'. \'virt.connect\' will stop being used in '
'{version}.'
)
else:
conn_str = __salt__['config.get']('libvirt:connection', None)
if conn_str is not None:
salt.utils.versions.warn_until(
'Sodium',
'\'libvirt.connection\' configuration property has been deprecated in favor '
'of \'virt:connection:uri\'. \'libvirt.connection\' will stop being used in '
'{version}.'
)
conn_str = __salt__['config.get']('virt:connection:uri', conn_str)
hypervisor = __salt__['config.get']('libvirt:hypervisor', None)
if hypervisor is not None:
salt.utils.versions.warn_until(
'Sodium',
'\'libvirt.hypervisor\' configuration property has been deprecated. '
'Rather use the \'virt:connection:uri\' to properly define the libvirt '
'URI or alias of the host to connect to. \'libvirt:hypervisor\' will '
'stop being used in {version}.'
)
if hypervisor == 'esxi' and conn_str is None:
salt.utils.versions.warn_until(
'Sodium',
'esxi hypervisor default with no default connection URI detected, '
'please set \'virt:connection:uri\' to \'esx\' for keep the legacy '
'behavior. Will default to libvirt guess once \'libvirt:hypervisor\' '
'configuration is removed in {version}.'
)
conn_str = 'esx'
try:
auth_types = [libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_NOECHOPROMPT,
libvirt.VIR_CRED_ECHOPROMPT,
libvirt.VIR_CRED_PASSPHRASE,
libvirt.VIR_CRED_EXTERNAL]
conn = libvirt.openAuth(conn_str, [auth_types, __get_request_auth(username, password), None], 0)
except Exception:
raise CommandExecutionError(
'Sorry, {0} failed to open a connection to the hypervisor '
'software at {1}'.format(
__grains__['fqdn'],
conn_str
)
)
return conn
def _get_domain(conn, *vms, **kwargs):
'''
Return a domain object for the named VM or return domain object for all VMs.
:params conn: libvirt connection object
:param vms: list of domain names to look for
:param iterable: True to return an array in all cases
'''
ret = list()
lookup_vms = list()
all_vms = []
if kwargs.get('active', True):
for id_ in conn.listDomainsID():
all_vms.append(conn.lookupByID(id_).name())
if kwargs.get('inactive', True):
for id_ in conn.listDefinedDomains():
all_vms.append(id_)
if not all_vms:
raise CommandExecutionError('No virtual machines found.')
if vms:
for name in vms:
if name not in all_vms:
raise CommandExecutionError('The VM "{name}" is not present'.format(name=name))
else:
lookup_vms.append(name)
else:
lookup_vms = list(all_vms)
for name in lookup_vms:
ret.append(conn.lookupByName(name))
return len(ret) == 1 and not kwargs.get('iterable') and ret[0] or ret
def _parse_qemu_img_info(info):
'''
Parse qemu-img info JSON output into disk infos dictionary
'''
raw_infos = salt.utils.json.loads(info)
disks = []
for disk_infos in raw_infos:
disk = {
'file': disk_infos['filename'],
'file format': disk_infos['format'],
'disk size': disk_infos['actual-size'],
'virtual size': disk_infos['virtual-size'],
'cluster size': disk_infos['cluster-size'] if 'cluster-size' in disk_infos else None,
}
if 'full-backing-filename' in disk_infos.keys():
disk['backing file'] = format(disk_infos['full-backing-filename'])
if 'snapshots' in disk_infos.keys():
disk['snapshots'] = [
{
'id': snapshot['id'],
'tag': snapshot['name'],
'vmsize': snapshot['vm-state-size'],
'date': datetime.datetime.fromtimestamp(
float('{}.{}'.format(snapshot['date-sec'], snapshot['date-nsec']))).isoformat(),
'vmclock': datetime.datetime.utcfromtimestamp(
float('{}.{}'.format(snapshot['vm-clock-sec'],
snapshot['vm-clock-nsec']))).time().isoformat()
} for snapshot in disk_infos['snapshots']]
disks.append(disk)
for disk in disks:
if 'backing file' in disk.keys():
candidates = [info for info in disks if 'file' in info.keys() and info['file'] == disk['backing file']]
if candidates:
disk['backing file'] = candidates[0]
return disks[0]
def _get_uuid(dom):
'''
Return a uuid from the named vm
CLI Example:
.. code-block:: bash
salt '*' virt.get_uuid <domain>
'''
return ElementTree.fromstring(get_xml(dom)).find('uuid').text
def _get_on_poweroff(dom):
'''
Return `on_poweroff` setting from the named vm
CLI Example:
.. code-block:: bash
salt '*' virt.get_on_restart <domain>
'''
node = ElementTree.fromstring(get_xml(dom)).find('on_poweroff')
return node.text if node is not None else ''
def _get_on_reboot(dom):
'''
Return `on_reboot` setting from the named vm
CLI Example:
.. code-block:: bash
salt '*' virt.get_on_reboot <domain>
'''
node = ElementTree.fromstring(get_xml(dom)).find('on_reboot')
return node.text if node is not None else ''
def _get_on_crash(dom):
'''
Return `on_crash` setting from the named vm
CLI Example:
.. code-block:: bash
salt '*' virt.get_on_crash <domain>
'''
node = ElementTree.fromstring(get_xml(dom)).find('on_crash')
return node.text if node is not None else ''
def _get_nics(dom):
'''
Get domain network interfaces from a libvirt domain object.
'''
nics = {}
doc = ElementTree.fromstring(dom.XMLDesc(0))
for iface_node in doc.findall('devices/interface'):
nic = {}
nic['type'] = iface_node.get('type')
for v_node in iface_node:
if v_node.tag == 'mac':
nic['mac'] = v_node.get('address')
if v_node.tag == 'model':
nic['model'] = v_node.get('type')
if v_node.tag == 'target':
nic['target'] = v_node.get('dev')
# driver, source, and match can all have optional attributes
if re.match('(driver|source|address)', v_node.tag):
temp = {}
for key, value in six.iteritems(v_node.attrib):
temp[key] = value
nic[v_node.tag] = temp
# virtualport needs to be handled separately, to pick up the
# type attribute of the virtualport itself
if v_node.tag == 'virtualport':
temp = {}
temp['type'] = v_node.get('type')
for key, value in six.iteritems(v_node.attrib):
temp[key] = value
nic['virtualport'] = temp
if 'mac' not in nic:
continue
nics[nic['mac']] = nic
return nics
def _get_graphics(dom):
'''
Get domain graphics from a libvirt domain object.
'''
out = {'autoport': 'None',
'keymap': 'None',
'listen': 'None',
'port': 'None',
'type': 'None'}
doc = ElementTree.fromstring(dom.XMLDesc(0))
for g_node in doc.findall('devices/graphics'):
for key, value in six.iteritems(g_node.attrib):
out[key] = value
return out
def _get_disks(dom):
'''
Get domain disks from a libvirt domain object.
'''
disks = {}
doc = ElementTree.fromstring(dom.XMLDesc(0))
for elem in doc.findall('devices/disk'):
source = elem.find('source')
if source is None:
continue
target = elem.find('target')
if target is None:
continue
if 'dev' in target.attrib:
qemu_target = source.get('file', '')
if not qemu_target:
qemu_target = source.get('dev', '')
if not qemu_target and 'protocol' in source.attrib and 'name' in source.attrib: # for rbd network
qemu_target = '{0}:{1}'.format(
source.get('protocol'),
source.get('name'))
if not qemu_target:
continue
disk = {'file': qemu_target, 'type': elem.get('device')}
driver = elem.find('driver')
if driver is not None and driver.get('type') == 'qcow2':
try:
stdout = subprocess.Popen(
['qemu-img', 'info', '--output', 'json', '--backing-chain', disk['file']],
shell=False,
stdout=subprocess.PIPE).communicate()[0]
qemu_output = salt.utils.stringutils.to_str(stdout)
output = _parse_qemu_img_info(qemu_output)
disk.update(output)
except TypeError:
disk.update({'file': 'Does not exist'})
disks[target.get('dev')] = disk
return disks
def _libvirt_creds():
'''
Returns the user and group that the disk images should be owned by
'''
g_cmd = 'grep ^\\s*group /etc/libvirt/qemu.conf'
u_cmd = 'grep ^\\s*user /etc/libvirt/qemu.conf'
try:
stdout = subprocess.Popen(g_cmd,
shell=True,
stdout=subprocess.PIPE).communicate()[0]
group = salt.utils.stringutils.to_str(stdout).split('"')[1]
except IndexError:
group = 'root'
try:
stdout = subprocess.Popen(u_cmd,
shell=True,
stdout=subprocess.PIPE).communicate()[0]
user = salt.utils.stringutils.to_str(stdout).split('"')[1]
except IndexError:
user = 'root'
return {'user': user, 'group': group}
def _get_migrate_command():
'''
Returns the command shared by the different migration types
'''
tunnel = __salt__['config.option']('virt.tunnel')
if tunnel:
salt.utils.versions.warn_until(
'Sodium',
'\'virt.tunnel\' has been deprecated in favor of '
'\'virt:tunnel\'. \'virt.tunnel\' will stop '
'being used in {version}.')
else:
tunnel = __salt__['config.get']('virt:tunnel')
if tunnel:
return ('virsh migrate --p2p --tunnelled --live --persistent '
'--undefinesource ')
return 'virsh migrate --live --persistent --undefinesource '
def _get_target(target, ssh):
'''
Compute libvirt URL for target migration host.
'''
proto = 'qemu'
if ssh:
proto += '+ssh'
return ' {0}://{1}/{2}'.format(proto, target, 'system')
def _gen_xml(name,
cpu,
mem,
diskp,
nicp,
hypervisor,
os_type,
arch,
graphics=None,
**kwargs):
'''
Generate the XML string to define a libvirt VM
'''
mem = int(mem) * 1024 # MB
context = {
'hypervisor': hypervisor,
'name': name,
'cpu': six.text_type(cpu),
'mem': six.text_type(mem),
}
if hypervisor in ['qemu', 'kvm']:
context['controller_model'] = False
elif hypervisor == 'vmware':
# TODO: make bus and model parameterized, this works for 64-bit Linux
context['controller_model'] = 'lsilogic'
# By default, set the graphics to listen to all addresses
if graphics:
if 'listen' not in graphics:
graphics['listen'] = {'type': 'address', 'address': '0.0.0.0'}
elif 'address' not in graphics['listen'] and graphics['listen']['type'] == 'address':
graphics['listen']['address'] = '0.0.0.0'
# Graphics of type 'none' means no graphics device at all
if graphics.get('type', 'none') == 'none':
graphics = None
context['graphics'] = graphics
if 'boot_dev' in kwargs:
context['boot_dev'] = []
for dev in kwargs['boot_dev'].split():
context['boot_dev'].append(dev)
else:
context['boot_dev'] = ['hd']
if os_type == 'xen':
# Compute the Xen PV boot method
if __grains__['os_family'] == 'Suse':
context['kernel'] = '/usr/lib/grub2/x86_64-xen/grub.xen'
context['boot_dev'] = []
if 'serial_type' in kwargs:
context['serial_type'] = kwargs['serial_type']
if 'serial_type' in context and context['serial_type'] == 'tcp':
if 'telnet_port' in kwargs:
context['telnet_port'] = kwargs['telnet_port']
else:
context['telnet_port'] = 23023 # FIXME: use random unused port
if 'serial_type' in context:
if 'console' in kwargs:
context['console'] = kwargs['console']
else:
context['console'] = True
context['disks'] = []
disk_bus_map = {'virtio': 'vd', 'xen': 'xvd', 'fdc': 'fd', 'ide': 'hd'}
for i, disk in enumerate(diskp):
prefix = disk_bus_map.get(disk['model'], 'sd')
disk_context = {
'device': disk.get('device', 'disk'),
'target_dev': '{0}{1}'.format(prefix, string.ascii_lowercase[i]),
'disk_bus': disk['model'],
'type': disk['format'],
'index': six.text_type(i),
}
if 'source_file' and disk['source_file']:
disk_context['source_file'] = disk['source_file']
if hypervisor in ['qemu', 'kvm', 'bhyve', 'xen']:
disk_context['address'] = False
disk_context['driver'] = True
elif hypervisor in ['esxi', 'vmware']:
disk_context['address'] = True
disk_context['driver'] = False
context['disks'].append(disk_context)
context['nics'] = nicp
context['os_type'] = os_type
context['arch'] = arch
fn_ = 'libvirt_domain.jinja'
try:
template = JINJA.get_template(fn_)
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template %s', fn_)
return ''
return template.render(**context)
def _gen_vol_xml(vmname,
diskname,
disktype,
size,
pool):
'''
Generate the XML string to define a libvirt storage volume
'''
size = int(size) * 1024 # MB
context = {
'name': vmname,
'filename': '{0}.{1}'.format(diskname, disktype),
'volname': diskname,
'disktype': disktype,
'size': six.text_type(size),
'pool': pool,
}
fn_ = 'libvirt_volume.jinja'
try:
template = JINJA.get_template(fn_)
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template %s', fn_)
return ''
return template.render(**context)
def _gen_net_xml(name,
bridge,
forward,
vport,
tag=None):
'''
Generate the XML string to define a libvirt network
'''
context = {
'name': name,
'bridge': bridge,
'forward': forward,
'vport': vport,
'tag': tag,
}
fn_ = 'libvirt_network.jinja'
try:
template = JINJA.get_template(fn_)
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template %s', fn_)
return ''
return template.render(**context)
def _gen_pool_xml(name,
ptype,
target=None,
permissions=None,
source_devices=None,
source_dir=None,
source_adapter=None,
source_hosts=None,
source_auth=None,
source_name=None,
source_format=None):
'''
Generate the XML string to define a libvirt storage pool
'''
hosts = [host.split(':') for host in source_hosts or []]
context = {
'name': name,
'ptype': ptype,
'target': {'path': target, 'permissions': permissions},
'source': {
'devices': source_devices or [],
'dir': source_dir,
'adapter': source_adapter,
'hosts': [{'name': host[0], 'port': host[1] if len(host) > 1 else None} for host in hosts],
'auth': source_auth,
'name': source_name,
'format': source_format
}
}
fn_ = 'libvirt_pool.jinja'
try:
template = JINJA.get_template(fn_)
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template %s', fn_)
return ''
return template.render(**context)
def _get_images_dir():
'''
Extract the images dir from the configuration. First attempts to
find legacy virt.images, then tries virt:images.
'''
img_dir = __salt__['config.option']('virt.images')
if img_dir:
salt.utils.versions.warn_until(
'Sodium',
'\'virt.images\' has been deprecated in favor of '
'\'virt:images\'. \'virt.images\' will stop '
'being used in {version}.')
else:
img_dir = __salt__['config.get']('virt:images')
log.debug('Image directory from config option `virt:images`'
' is %s', img_dir)
return img_dir
def _qemu_image_create(disk, create_overlay=False, saltenv='base'):
'''
Create the image file using specified disk_size or/and disk_image
Return path to the created image file
'''
disk_size = disk.get('size', None)
disk_image = disk.get('image', None)
if not disk_size and not disk_image:
raise CommandExecutionError(
'Unable to create new disk {0}, please specify'
' disk size and/or disk image argument'
.format(disk['filename'])
)
img_dest = disk['source_file']
log.debug('Image destination will be %s', img_dest)
img_dir = os.path.dirname(img_dest)
log.debug('Image destination directory is %s', img_dir)
if not os.path.exists(img_dir):
os.makedirs(img_dir)
if disk_image:
log.debug('Create disk from specified image %s', disk_image)
sfn = __salt__['cp.cache_file'](disk_image, saltenv)
qcow2 = False
if salt.utils.path.which('qemu-img'):
res = __salt__['cmd.run']('qemu-img info "{}"'.format(sfn))
imageinfo = salt.utils.yaml.safe_load(res)
qcow2 = imageinfo['file format'] == 'qcow2'
try:
if create_overlay and qcow2:
log.info('Cloning qcow2 image %s using copy on write', sfn)
__salt__['cmd.run'](
'qemu-img create -f qcow2 -o backing_file="{0}" "{1}"'
.format(sfn, img_dest).split())
else:
log.debug('Copying %s to %s', sfn, img_dest)
salt.utils.files.copyfile(sfn, img_dest)
mask = salt.utils.files.get_umask()
if disk_size and qcow2:
log.debug('Resize qcow2 image to %sM', disk_size)
__salt__['cmd.run'](
'qemu-img resize "{0}" {1}M'
.format(img_dest, disk_size)
)
log.debug('Apply umask and remove exec bit')
mode = (0o0777 ^ mask) & 0o0666
os.chmod(img_dest, mode)
except (IOError, OSError) as err:
raise CommandExecutionError(
'Problem while copying image. {0} - {1}'
.format(disk_image, err)
)
else:
# Create empty disk
try:
mask = salt.utils.files.get_umask()
if disk_size:
log.debug('Create empty image with size %sM', disk_size)
__salt__['cmd.run'](
'qemu-img create -f {0} "{1}" {2}M'
.format(disk.get('format', 'qcow2'), img_dest, disk_size)
)
else:
raise CommandExecutionError(
'Unable to create new disk {0},'
' please specify <size> argument'
.format(img_dest)
)
log.debug('Apply umask and remove exec bit')
mode = (0o0777 ^ mask) & 0o0666
os.chmod(img_dest, mode)
except (IOError, OSError) as err:
raise CommandExecutionError(
'Problem while creating volume {0} - {1}'
.format(img_dest, err)
)
return img_dest
def _disk_profile(profile, hypervisor, disks=None, vm_name=None, image=None, pool=None, **kwargs):
'''
Gather the disk profile from the config or apply the default based
on the active hypervisor
This is the ``default`` profile for KVM/QEMU, which can be
overridden in the configuration:
.. code-block:: yaml
virt:
disk:
default:
- system:
size: 8192
format: qcow2
model: virtio
Example profile for KVM/QEMU with two disks, first is created
from specified image, the second is empty:
.. code-block:: yaml
virt:
disk:
two_disks:
- system:
size: 8192
format: qcow2
model: virtio
image: http://path/to/image.qcow2
- lvm:
size: 32768
format: qcow2
model: virtio
The ``format`` and ``model`` parameters are optional, and will
default to whatever is best suitable for the active hypervisor.
'''
default = [{'system':
{'size': 8192}}]
if hypervisor == 'vmware':
overlay = {'format': 'vmdk',
'model': 'scsi',
'device': 'disk',
'pool': '[{0}] '.format(pool if pool else '0')}
elif hypervisor in ['qemu', 'kvm']:
overlay = {'format': 'qcow2',
'device': 'disk',
'model': 'virtio'}
elif hypervisor == 'xen':
overlay = {'format': 'qcow2',
'device': 'disk',
'model': 'xen'}
else:
overlay = {}
# Get the disks from the profile
disklist = []
if profile:
disklist = copy.deepcopy(
__salt__['config.get']('virt:disk', {}).get(profile, default))
# Transform the list to remove one level of dictionnary and add the name as a property
disklist = [dict(d, name=name) for disk in disklist for name, d in disk.items()]
# Add the image to the first disk if there is one
if image:
# If image is specified in module arguments, then it will be used
# for the first disk instead of the image from the disk profile
log.debug('%s image from module arguments will be used for disk "%s"'
' instead of %s', image, disklist[0]['name'], disklist[0].get('image', ""))
disklist[0]['image'] = image
# Merge with the user-provided disks definitions
if disks:
for udisk in disks:
if 'name' in udisk:
found = [disk for disk in disklist if udisk['name'] == disk['name']]
if found:
found[0].update(udisk)
else:
disklist.append(udisk)
for disk in disklist:
# Add the missing properties that have defaults
for key, val in six.iteritems(overlay):
if key not in disk:
disk[key] = val
# We may have an already computed source_file (i.e. image not created by our module)
if 'source_file' in disk and disk['source_file']:
disk['filename'] = os.path.basename(disk['source_file'])
elif 'source_file' not in disk:
_fill_disk_filename(vm_name, disk, hypervisor, **kwargs)
return disklist
def _fill_disk_filename(vm_name, disk, hypervisor, **kwargs):
'''
Compute the disk file name and update it in the disk value.
'''
base_dir = disk.get('pool', None)
if hypervisor in ['qemu', 'kvm', 'xen']:
# Compute the base directory from the pool property. We may have either a path
# or a libvirt pool name there.
# If the pool is a known libvirt one with a target path, use it as target path
if not base_dir:
base_dir = _get_images_dir()
else:
if not base_dir.startswith('/'):
# The pool seems not to be a path, lookup for pool infos
infos = pool_info(base_dir, **kwargs)
pool = infos[base_dir] if base_dir in infos else None
if not pool or not pool['target_path'] or pool['target_path'].startswith('/dev'):
raise CommandExecutionError(
'Unable to create new disk {0}, specified pool {1} does not exist '
'or is unsupported'.format(disk['name'], base_dir))
base_dir = pool['target_path']
# Compute the filename and source file properties if possible
if vm_name:
disk['filename'] = '{0}_{1}.{2}'.format(vm_name, disk['name'], disk['format'])
disk['source_file'] = os.path.join(base_dir, disk['filename'])
def _complete_nics(interfaces, hypervisor, dmac=None):
'''
Complete missing data for network interfaces.
'''
vmware_overlay = {'type': 'bridge', 'source': 'DEFAULT', 'model': 'e1000'}
kvm_overlay = {'type': 'bridge', 'source': 'br0', 'model': 'virtio'}
xen_overlay = {'type': 'bridge', 'source': 'br0', 'model': None}
overlays = {
'xen': xen_overlay,
'kvm': kvm_overlay,
'qemu': kvm_overlay,
'vmware': vmware_overlay,
}
def _normalize_net_types(attributes):
'''
Guess which style of definition:
bridge: br0
or
network: net0
or
type: network
source: net0
'''
for type_ in ['bridge', 'network']:
if type_ in attributes:
attributes['type'] = type_
# we want to discard the original key
attributes['source'] = attributes.pop(type_)
attributes['type'] = attributes.get('type', None)
attributes['source'] = attributes.get('source', None)
def _apply_default_overlay(attributes):
'''
Apply the default overlay to attributes
'''
for key, value in six.iteritems(overlays[hypervisor]):
if key not in attributes or not attributes[key]:
attributes[key] = value
def _assign_mac(attributes, hypervisor):
'''
Compute mac address for NIC depending on hypervisor
'''
if dmac is not None:
log.debug('Default MAC address is %s', dmac)
if salt.utils.validate.net.mac(dmac):
attributes['mac'] = dmac
else:
msg = 'Malformed MAC address: {0}'.format(dmac)
raise CommandExecutionError(msg)
else:
if hypervisor in ['qemu', 'kvm']:
attributes['mac'] = salt.utils.network.gen_mac(
prefix='52:54:00')
else:
attributes['mac'] = salt.utils.network.gen_mac()
for interface in interfaces:
_normalize_net_types(interface)
if interface.get('mac', None) is None:
_assign_mac(interface, hypervisor)
if hypervisor in overlays:
_apply_default_overlay(interface)
return interfaces
def _nic_profile(profile_name, hypervisor, dmac=None):
'''
Compute NIC data based on profile
'''
default = [{'eth0': {}}]
# support old location
config_data = __salt__['config.option']('virt.nic', {}).get(
profile_name, None
)
if config_data is not None:
salt.utils.versions.warn_until(
'Sodium',
'\'virt.nic\' has been deprecated in favor of \'virt:nic\'. '
'\'virt.nic\' will stop being used in {version}.'
)
else:
config_data = __salt__['config.get']('virt:nic', {}).get(
profile_name, default
)
interfaces = []
# pylint: disable=invalid-name
def append_dict_profile_to_interface_list(profile_dict):
'''
Append dictionary profile data to interfaces list
'''
for interface_name, attributes in six.iteritems(profile_dict):
attributes['name'] = interface_name
interfaces.append(attributes)
# old style dicts (top-level dicts)
#
# virt:
# nic:
# eth0:
# bridge: br0
# eth1:
# network: test_net
if isinstance(config_data, dict):
append_dict_profile_to_interface_list(config_data)
# new style lists (may contain dicts)
#
# virt:
# nic:
# - eth0:
# bridge: br0
# - eth1:
# network: test_net
#
# virt:
# nic:
# - name: eth0
# bridge: br0
# - name: eth1
# network: test_net
elif isinstance(config_data, list):
for interface in config_data:
if isinstance(interface, dict):
if len(interface) == 1:
append_dict_profile_to_interface_list(interface)
else:
interfaces.append(interface)
# dmac can only be used from init()
return _complete_nics(interfaces, hypervisor, dmac=dmac)
def _get_merged_nics(hypervisor, profile, interfaces=None, dmac=None):
'''
Get network devices from the profile and merge uer defined ones with them.
'''
nicp = _nic_profile(profile, hypervisor, dmac=dmac) if profile else []
log.debug('NIC profile is %s', nicp)
if interfaces:
users_nics = _complete_nics(interfaces, hypervisor)
for unic in users_nics:
found = [nic for nic in nicp if nic['name'] == unic['name']]
if found:
found[0].update(unic)
else:
nicp.append(unic)
log.debug('Merged NICs: %s', nicp)
return nicp
def init(name,
cpu,
mem,
image=None,
nic='default',
interfaces=None,
hypervisor=None,
start=True, # pylint: disable=redefined-outer-name
disk='default',
disks=None,
saltenv='base',
seed=True,
install=True,
pub_key=None,
priv_key=None,
seed_cmd='seed.apply',
enable_vnc=False,
enable_qcow=False,
graphics=None,
os_type=None,
arch=None,
**kwargs):
'''
Initialize a new vm
:param name: name of the virtual machine to create
:param cpu: Number of virtual CPUs to assign to the virtual machine
:param mem: Amount of memory to allocate to the virtual machine in MiB.
:param image: Path to a disk image to use as the first disk (Default: ``None``).
Deprecated in favor of the ``disks`` parameter. To set (or change) the image of a
disk, add the following to the disks definitions:
.. code-block:: python
{
'name': 'name_of_disk_to_change',
'image': '/path/to/the/image'
}
:param nic: NIC profile to use (Default: ``'default'``).
The profile interfaces can be customized / extended with the interfaces parameter.
If set to ``None``, no profile will be used.
:param interfaces:
List of dictionaries providing details on the network interfaces to create.
These data are merged with the ones from the nic profile. The structure of
each dictionary is documented in :ref:`init-nic-def`.
.. versionadded:: 2019.2.0
:param hypervisor: the virtual machine type. By default the value will be computed according
to the virtual host capabilities.
:param start: ``True`` to start the virtual machine after having defined it (Default: ``True``)
:param disk: Disk profile to use (Default: ``'default'``). If set to ``None``, no profile will be used.
:param disks: List of dictionaries providing details on the disk devices to create.
These data are merged with the ones from the disk profile. The structure of
each dictionary is documented in :ref:`init-disk-def`.
.. versionadded:: 2019.2.0
:param saltenv: Fileserver environment (Default: ``'base'``).
See :mod:`cp module for more details <salt.modules.cp>`
:param seed: ``True`` to seed the disk image. Only used when the ``image`` parameter is provided.
(Default: ``True``)
:param install: install salt minion if absent (Default: ``True``)
:param pub_key: public key to seed with (Default: ``None``)
:param priv_key: public key to seed with (Default: ``None``)
:param seed_cmd: Salt command to execute to seed the image. (Default: ``'seed.apply'``)
:param enable_vnc:
``True`` to setup a vnc display for the VM (Default: ``False``)
Deprecated in favor of the ``graphics`` parameter. Could be replaced with
the following:
.. code-block:: python
graphics={'type': 'vnc'}
.. deprecated:: 2019.2.0
:param graphics:
Dictionary providing details on the graphics device to create. (Default: ``None``)
See :ref:`init-graphics-def` for more details on the possible values.
.. versionadded:: 2019.2.0
:param os_type:
type of virtualization as found in the ``//os/type`` element of the libvirt definition.
The default value is taken from the host capabilities, with a preference for ``hvm``.
.. versionadded:: 2019.2.0
:param arch:
architecture of the virtual machine. The default value is taken from the host capabilities,
but ``x86_64`` is prefed over ``i686``.
.. versionadded:: 2019.2.0
:param enable_qcow:
``True`` to create a QCOW2 overlay image, rather than copying the image
(Default: ``False``).
Deprecated in favor of ``disks`` parameter. Add the following to the disks
definitions to create an overlay image of a template disk image with an
image set:
.. code-block:: python
{
'name': 'name_of_disk_to_change',
'overlay_image': True
}
.. deprecated:: 2019.2.0
:param pool:
Path of the folder where the image files are located for vmware/esx hypervisors.
Deprecated in favor of ``disks`` parameter. Add the following to the disks
definitions to set the vmware datastore of a disk image:
.. code-block:: python
{
'name': 'name_of_disk_to_change',
'pool': 'mydatastore'
}
.. deprecated:: Flurorine
:param dmac:
Default MAC address to use for the network interfaces. By default MAC addresses are
automatically generated.
Deprecated in favor of ``interfaces`` parameter. Add the following to the interfaces
definitions to force the mac address of a NIC:
.. code-block:: python
{
'name': 'name_of_nic_to_change',
'mac': 'MY:MA:CC:ADD:RE:SS'
}
.. deprecated:: 2019.2.0
:param config: minion configuration to use when seeding.
See :mod:`seed module for more details <salt.modules.seed>`
:param boot_dev: String of space-separated devices to boot from (Default: ``'hd'``)
:param serial_type: Serial device type. One of ``'pty'``, ``'tcp'`` (Default: ``None``)
:param telnet_port: Telnet port to use for serial device of type ``tcp``.
:param console: ``True`` to add a console device along with serial one (Default: ``True``)
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
.. _init-nic-def:
.. rubric:: Network Interfaces Definitions
Network interfaces dictionaries can contain the following properties:
name
Name of the network interface. This is only used as a key to merge with the profile data
type
Network type. One of ``'bridge'``, ``'network'``
source
The network source, typically the bridge or network name
mac
The desired mac address, computed if ``None`` (Default: ``None``).
model
The network card model (Default: depends on the hypervisor)
.. _init-disk-def:
.. rubric:: Disks Definitions
Disk dictionaries can contain the following properties:
name
Name of the disk. This is mostly used in the name of the disk image and as a key to merge
with the profile data.
format
Format of the disk image, like ``'qcow2'``, ``'raw'``, ``'vmdk'``.
(Default: depends on the hypervisor)
size
Disk size in MiB
pool
Path to the folder or name of the pool where disks should be created.
(Default: depends on hypervisor)
model
One of the disk busses allowed by libvirt (Default: depends on hypervisor)
See the libvirt `disk element`_ documentation for the allowed bus types.
image
Path to the image to use for the disk. If no image is provided, an empty disk will be created
(Default: ``None``)
overlay_image
``True`` to create a QCOW2 disk image with ``image`` as backing file. If ``False``
the file pointed to by the ``image`` property will simply be copied. (Default: ``False``)
source_file
Absolute path to the disk image to use. Not to be confused with ``image`` parameter. This
parameter is useful to use disk images that are created outside of this module. Can also
be ``None`` for devices that have no associated image like cdroms.
device
Type of device of the disk. Can be one of 'disk', 'cdrom', 'floppy' or 'lun'.
(Default: ``'disk'``)
.. _init-graphics-def:
.. rubric:: Graphics Definition
The graphics dictionnary can have the following properties:
type
Graphics type. The possible values are ``none``, ``'spice'``, ``'vnc'`` and other values
allowed as a libvirt graphics type (Default: ``None``)
See the libvirt `graphics element`_ documentation for more details on the possible types.
port
Port to export the graphics on for ``vnc``, ``spice`` and ``rdp`` types.
tls_port
Port to export the graphics over a secured connection for ``spice`` type.
listen
Dictionary defining on what address to listen on for ``vnc``, ``spice`` and ``rdp``.
It has a ``type`` property with ``address`` and ``None`` as possible values, and an
``address`` property holding the IP or hostname to listen on.
By default, not setting the ``listen`` part of the dictionary will default to
listen on all addresses.
.. rubric:: CLI Example
.. code-block:: bash
salt 'hypervisor' virt.init vm_name 4 512 salt://path/to/image.raw
salt 'hypervisor' virt.init vm_name 4 512 /var/lib/libvirt/images/img.raw
salt 'hypervisor' virt.init vm_name 4 512 nic=profile disk=profile
The disk images will be created in an image folder within the directory
defined by the ``virt:images`` option. Its default value is
``/srv/salt-images/`` but this can changed with such a configuration:
.. code-block:: yaml
virt:
images: /data/my/vm/images/
.. _disk element: https://libvirt.org/formatdomain.html#elementsDisks
.. _graphics element: https://libvirt.org/formatdomain.html#elementsGraphics
'''
caps = capabilities(**kwargs)
os_types = sorted({guest['os_type'] for guest in caps['guests']})
arches = sorted({guest['arch']['name'] for guest in caps['guests']})
if not hypervisor:
hypervisor = __salt__['config.get']('libvirt:hypervisor', hypervisor)
if hypervisor is not None:
salt.utils.versions.warn_until(
'Sodium',
'\'libvirt:hypervisor\' configuration property has been deprecated. '
'Rather use the \'virt:connection:uri\' to properly define the libvirt '
'URI or alias of the host to connect to. \'libvirt:hypervisor\' will '
'stop being used in {version}.'
)
else:
# Use the machine types as possible values
# Prefer 'kvm' over the others if available
hypervisors = sorted({x for y in [guest['arch']['domains'].keys() for guest in caps['guests']] for x in y})
hypervisor = 'kvm' if 'kvm' in hypervisors else hypervisors[0]
# esxi used to be a possible value for the hypervisor: map it to vmware since it's the same
hypervisor = 'vmware' if hypervisor == 'esxi' else hypervisor
log.debug('Using hypervisor %s', hypervisor)
# the NICs are computed as follows:
# 1 - get the default NICs from the profile
# 2 - Complete the users NICS
# 3 - Update the default NICS list to the users one, matching key is the name
dmac = kwargs.get('dmac', None)
if dmac:
salt.utils.versions.warn_until(
'Sodium',
'\'dmac\' parameter has been deprecated. Rather use the \'interfaces\' parameter '
'to properly define the desired MAC address. \'dmac\' will be removed in {version}.'
)
nicp = _get_merged_nics(hypervisor, nic, interfaces, dmac=dmac)
# the disks are computed as follows:
# 1 - get the disks defined in the profile
# 2 - set the image on the first disk (will be removed later)
# 3 - update the disks from the profile with the ones from the user. The matching key is the name.
pool = kwargs.get('pool', None)
if pool:
salt.utils.versions.warn_until(
'Sodium',
'\'pool\' parameter has been deprecated. Rather use the \'disks\' parameter '
'to properly define the vmware datastore of disks. \'pool\' will be removed in {version}.'
)
if image:
salt.utils.versions.warn_until(
'Sodium',
'\'image\' parameter has been deprecated. Rather use the \'disks\' parameter '
'to override or define the image. \'image\' will be removed in {version}.'
)
diskp = _disk_profile(disk, hypervisor, disks, name, image=image, pool=pool, **kwargs)
# Create multiple disks, empty or from specified images.
for _disk in diskp:
log.debug("Creating disk for VM [ %s ]: %s", name, _disk)
if hypervisor == 'vmware':
if 'image' in _disk:
# TODO: we should be copying the image file onto the ESX host
raise SaltInvocationError(
'virt.init does not support image '
'template in conjunction with esxi hypervisor'
)
else:
# assume libvirt manages disks for us
log.debug('Generating libvirt XML for %s', _disk)
vol_xml = _gen_vol_xml(
name,
_disk['name'],
_disk['format'],
_disk['size'],
_disk['pool']
)
define_vol_xml_str(vol_xml)
elif hypervisor in ['qemu', 'kvm', 'xen']:
create_overlay = enable_qcow
if create_overlay:
salt.utils.versions.warn_until(
'Sodium',
'\'enable_qcow\' parameter has been deprecated. Rather use the \'disks\' '
'parameter to override or define the image. \'enable_qcow\' will be removed '
'in {version}.'
)
else:
create_overlay = _disk.get('overlay_image', False)
if _disk['source_file']:
if os.path.exists(_disk['source_file']):
img_dest = _disk['source_file']
else:
img_dest = _qemu_image_create(_disk, create_overlay, saltenv)
else:
img_dest = None
# Seed only if there is an image specified
if seed and img_dest and _disk.get('image', None):
log.debug('Seed command is %s', seed_cmd)
__salt__[seed_cmd](
img_dest,
id_=name,
config=kwargs.get('config'),
install=install,
pub_key=pub_key,
priv_key=priv_key,
)
else:
# Unknown hypervisor
raise SaltInvocationError(
'Unsupported hypervisor when handling disk image: {0}'
.format(hypervisor)
)
log.debug('Generating VM XML')
if enable_vnc:
salt.utils.versions.warn_until(
'Sodium',
'\'enable_vnc\' parameter has been deprecated in favor of '
'\'graphics\'. Use graphics={\'type\': \'vnc\'} for the same behavior. '
'\'enable_vnc\' will be removed in {version}. ')
graphics = {'type': 'vnc'}
if os_type is None:
os_type = 'hvm' if 'hvm' in os_types else os_types[0]
if arch is None:
arch = 'x86_64' if 'x86_64' in arches else arches[0]
vm_xml = _gen_xml(name, cpu, mem, diskp, nicp, hypervisor, os_type, arch, graphics, **kwargs)
conn = __get_conn(**kwargs)
try:
conn.defineXML(vm_xml)
except libvirtError as err:
# check if failure is due to this domain already existing
if "domain '{}' already exists".format(name) in six.text_type(err):
# continue on to seeding
log.warning(err)
else:
conn.close()
raise err # a real error we should report upwards
if start:
log.debug('Starting VM %s', name)
_get_domain(conn, name).create()
conn.close()
return True
def _disks_equal(disk1, disk2):
'''
Test if two disk elements should be considered like the same device
'''
target1 = disk1.find('target')
target2 = disk2.find('target')
source1 = ElementTree.tostring(disk1.find('source')) if disk1.find('source') is not None else None
source2 = ElementTree.tostring(disk2.find('source')) if disk2.find('source') is not None else None
return source1 == source2 and \
target1 is not None and target2 is not None and \
target1.get('bus') == target2.get('bus') and \
disk1.get('device', 'disk') == disk2.get('device', 'disk') and \
target1.get('dev') == target2.get('dev')
def _nics_equal(nic1, nic2):
'''
Test if two interface elements should be considered like the same device
'''
def _filter_nic(nic):
'''
Filter out elements to ignore when comparing nics
'''
return {
'type': nic.attrib['type'],
'source': nic.find('source').attrib[nic.attrib['type']] if nic.find('source') is not None else None,
'mac': nic.find('mac').attrib['address'].lower() if nic.find('mac') is not None else None,
'model': nic.find('model').attrib['type'] if nic.find('model') is not None else None,
}
return _filter_nic(nic1) == _filter_nic(nic2)
def _graphics_equal(gfx1, gfx2):
'''
Test if two graphics devices should be considered the same device
'''
def _filter_graphics(gfx):
'''
When the domain is running, the graphics element may contain additional properties
with the default values. This function will strip down the default values.
'''
gfx_copy = copy.deepcopy(gfx)
defaults = [{'node': '.', 'attrib': 'port', 'values': ['5900', '-1']},
{'node': '.', 'attrib': 'address', 'values': ['127.0.0.1']},
{'node': 'listen', 'attrib': 'address', 'values': ['127.0.0.1']}]
for default in defaults:
node = gfx_copy.find(default['node'])
attrib = default['attrib']
if node is not None and (attrib not in node.attrib or node.attrib[attrib] in default['values']):
node.set(attrib, default['values'][0])
return gfx_copy
return ElementTree.tostring(_filter_graphics(gfx1)) == ElementTree.tostring(_filter_graphics(gfx2))
def _diff_lists(old, new, comparator):
'''
Compare lists to extract the changes
:param old: old list
:param new: new list
:return: a dictionary with ``unchanged``, ``new``, ``deleted`` and ``sorted`` keys
The sorted list is the union of unchanged and new lists, but keeping the original
order from the new list.
'''
def _remove_indent(node):
'''
Remove the XML indentation to compare XML trees more easily
'''
node_copy = copy.deepcopy(node)
node_copy.text = None
for item in node_copy.iter():
item.tail = None
return node_copy
diff = {'unchanged': [], 'new': [], 'deleted': [], 'sorted': []}
# We don't want to alter old since it may be used later by caller
old_devices = copy.deepcopy(old)
for new_item in new:
found = [item for item in old_devices if comparator(_remove_indent(item), _remove_indent(new_item))]
if found:
old_devices.remove(found[0])
diff['unchanged'].append(found[0])
diff['sorted'].append(found[0])
else:
diff['new'].append(new_item)
diff['sorted'].append(new_item)
diff['deleted'] = old_devices
return diff
def _diff_disk_lists(old, new):
'''
Compare disk definitions to extract the changes and fix target devices
:param old: list of ElementTree nodes representing the old disks
:param new: list of ElementTree nodes representing the new disks
'''
# Change the target device to avoid duplicates before diffing: this may lead
# to additional changes. Think of unchanged disk 'hda' and another disk listed
# before it becoming 'hda' too... the unchanged need to turn into 'hdb'.
targets = []
prefixes = ['fd', 'hd', 'vd', 'sd', 'xvd', 'ubd']
for disk in new:
target_node = disk.find('target')
target = target_node.get('dev')
prefix = [item for item in prefixes if target.startswith(item)][0]
new_target = ['{0}{1}'.format(prefix, string.ascii_lowercase[i]) for i in range(len(new))
if '{0}{1}'.format(prefix, string.ascii_lowercase[i]) not in targets][0]
target_node.set('dev', new_target)
targets.append(new_target)
return _diff_lists(old, new, _disks_equal)
def _diff_interface_lists(old, new):
'''
Compare network interface definitions to extract the changes
:param old: list of ElementTree nodes representing the old interfaces
:param new: list of ElementTree nodes representing the new interfaces
'''
diff = _diff_lists(old, new, _nics_equal)
# Remove duplicated addresses mac addresses and let libvirt generate them for us
macs = [nic.find('mac').get('address') for nic in diff['unchanged']]
for nic in diff['new']:
mac = nic.find('mac')
if mac.get('address') in macs:
nic.remove(mac)
return diff
def _diff_graphics_lists(old, new):
'''
Compare graphic devices definitions to extract the changes
:param old: list of ElementTree nodes representing the old graphic devices
:param new: list of ElementTree nodes representing the new graphic devices
'''
return _diff_lists(old, new, _graphics_equal)
def update(name,
cpu=0,
mem=0,
disk_profile=None,
disks=None,
nic_profile=None,
interfaces=None,
graphics=None,
live=True,
**kwargs):
'''
Update the definition of an existing domain.
:param name: Name of the domain to update
:param cpu: Number of virtual CPUs to assign to the virtual machine
:param mem: Amount of memory to allocate to the virtual machine in MiB.
:param disk_profile: disk profile to use
:param disks:
Disk definitions as documented in the :func:`init` function.
If neither the profile nor this parameter are defined, the disk devices
will not be changed. However to clear disks set this parameter to empty list.
:param nic_profile: network interfaces profile to use
:param interfaces:
Network interface definitions as documented in the :func:`init` function.
If neither the profile nor this parameter are defined, the interface devices
will not be changed. However to clear network interfaces set this parameter
to empty list.
:param graphics:
The new graphics definition as defined in :ref:`init-graphics-def`. If not set,
the graphics will not be changed. To remove a graphics device, set this parameter
to ``{'type': 'none'}``.
:param live:
``False`` to avoid trying to live update the definition. In such a case, the
new definition is applied at the next start of the virtual machine. If ``True``,
not all aspects of the definition can be live updated, but as much as possible
will be attempted. (Default: ``True``)
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
:return:
Returns a dictionary indicating the status of what has been done. It is structured in
the following way:
.. code-block:: python
{
'definition': True,
'cpu': True,
'mem': True,
'disks': {'attached': [list of actually attached disks],
'detached': [list of actually detached disks]},
'nics': {'attached': [list of actually attached nics],
'detached': [list of actually detached nics]},
'errors': ['error messages for failures']
}
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.update domain cpu=2 mem=1024
'''
status = {
'definition': False,
'disk': {'attached': [], 'detached': []},
'interface': {'attached': [], 'detached': []}
}
conn = __get_conn(**kwargs)
domain = _get_domain(conn, name)
desc = ElementTree.fromstring(domain.XMLDesc(0))
need_update = False
# Compute the XML to get the disks, interfaces and graphics
hypervisor = desc.get('type')
all_disks = _disk_profile(disk_profile, hypervisor, disks, name, **kwargs)
new_desc = ElementTree.fromstring(_gen_xml(name,
cpu,
mem,
all_disks,
_get_merged_nics(hypervisor, nic_profile, interfaces),
hypervisor,
domain.OSType(),
desc.find('.//os/type').get('arch'),
graphics,
**kwargs))
# Update the cpu
cpu_node = desc.find('vcpu')
if cpu and int(cpu_node.text) != cpu:
cpu_node.text = six.text_type(cpu)
cpu_node.set('current', six.text_type(cpu))
need_update = True
# Update the memory, note that libvirt outputs all memory sizes in KiB
for mem_node_name in ['memory', 'currentMemory']:
mem_node = desc.find(mem_node_name)
if mem and int(mem_node.text) != mem * 1024:
mem_node.text = six.text_type(mem)
mem_node.set('unit', 'MiB')
need_update = True
# Update the XML definition with the new disks and diff changes
devices_node = desc.find('devices')
parameters = {'disk': ['disks', 'disk_profile'],
'interface': ['interfaces', 'nic_profile'],
'graphics': ['graphics']}
changes = {}
for dev_type in parameters:
changes[dev_type] = {}
func_locals = locals()
if [param for param in parameters[dev_type] if func_locals.get(param, None) is not None]:
old = devices_node.findall(dev_type)
new = new_desc.findall('devices/{0}'.format(dev_type))
changes[dev_type] = globals()['_diff_{0}_lists'.format(dev_type)](old, new)
if changes[dev_type]['deleted'] or changes[dev_type]['new']:
for item in old:
devices_node.remove(item)
devices_node.extend(changes[dev_type]['sorted'])
need_update = True
# Set the new definition
if need_update:
# Create missing disks if needed
if changes['disk']:
for idx, item in enumerate(changes['disk']['sorted']):
source_file = all_disks[idx]['source_file']
if item in changes['disk']['new'] and source_file and not os.path.isfile(source_file):
_qemu_image_create(all_disks[idx])
try:
conn.defineXML(salt.utils.stringutils.to_str(ElementTree.tostring(desc)))
status['definition'] = True
except libvirt.libvirtError as err:
conn.close()
raise err
# Do the live changes now that we know the definition has been properly set
# From that point on, failures are not blocking to try to live update as much
# as possible.
commands = []
if domain.isActive() and live:
if cpu:
commands.append({'device': 'cpu',
'cmd': 'setVcpusFlags',
'args': [cpu, libvirt.VIR_DOMAIN_AFFECT_LIVE]})
if mem:
commands.append({'device': 'mem',
'cmd': 'setMemoryFlags',
'args': [mem * 1024, libvirt.VIR_DOMAIN_AFFECT_LIVE]})
for dev_type in ['disk', 'interface']:
for added in changes[dev_type].get('new', []):
commands.append({'device': dev_type,
'cmd': 'attachDevice',
'args': [salt.utils.stringutils.to_str(ElementTree.tostring(added))]})
for removed in changes[dev_type].get('deleted', []):
commands.append({'device': dev_type,
'cmd': 'detachDevice',
'args': [salt.utils.stringutils.to_str(ElementTree.tostring(removed))]})
for cmd in commands:
try:
ret = getattr(domain, cmd['cmd'])(*cmd['args'])
device_type = cmd['device']
if device_type in ['cpu', 'mem']:
status[device_type] = not bool(ret)
else:
actions = {'attachDevice': 'attached', 'detachDevice': 'detached'}
status[device_type][actions[cmd['cmd']]].append(cmd['args'][0])
except libvirt.libvirtError as err:
if 'errors' not in status:
status['errors'] = []
status['errors'].append(six.text_type(err))
conn.close()
return status
def list_domains(**kwargs):
'''
Return a list of available domains.
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.list_domains
'''
vms = []
conn = __get_conn(**kwargs)
for dom in _get_domain(conn, iterable=True):
vms.append(dom.name())
conn.close()
return vms
def list_active_vms(**kwargs):
'''
Return a list of names for active virtual machine on the minion
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.list_active_vms
'''
vms = []
conn = __get_conn(**kwargs)
for dom in _get_domain(conn, iterable=True, inactive=False):
vms.append(dom.name())
conn.close()
return vms
def list_inactive_vms(**kwargs):
'''
Return a list of names for inactive virtual machine on the minion
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.list_inactive_vms
'''
vms = []
conn = __get_conn(**kwargs)
for dom in _get_domain(conn, iterable=True, active=False):
vms.append(dom.name())
conn.close()
return vms
def vm_info(vm_=None, **kwargs):
'''
Return detailed information about the vms on this hyper in a
list of dicts:
:param vm_: name of the domain
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
.. code-block:: python
[
'your-vm': {
'cpu': <int>,
'maxMem': <int>,
'mem': <int>,
'state': '<state>',
'cputime' <int>
},
...
]
If you pass a VM name in as an argument then it will return info
for just the named VM, otherwise it will return all VMs.
CLI Example:
.. code-block:: bash
salt '*' virt.vm_info
'''
def _info(dom):
'''
Compute the infos of a domain
'''
raw = dom.info()
return {'cpu': raw[3],
'cputime': int(raw[4]),
'disks': _get_disks(dom),
'graphics': _get_graphics(dom),
'nics': _get_nics(dom),
'uuid': _get_uuid(dom),
'on_crash': _get_on_crash(dom),
'on_reboot': _get_on_reboot(dom),
'on_poweroff': _get_on_poweroff(dom),
'maxMem': int(raw[1]),
'mem': int(raw[2]),
'state': VIRT_STATE_NAME_MAP.get(raw[0], 'unknown')}
info = {}
conn = __get_conn(**kwargs)
if vm_:
info[vm_] = _info(_get_domain(conn, vm_))
else:
for domain in _get_domain(conn, iterable=True):
info[domain.name()] = _info(domain)
conn.close()
return info
def vm_state(vm_=None, **kwargs):
'''
Return list of all the vms and their state.
If you pass a VM name in as an argument then it will return info
for just the named VM, otherwise it will return all VMs.
:param vm_: name of the domain
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.vm_state <domain>
'''
def _info(dom):
'''
Compute domain state
'''
state = ''
raw = dom.info()
state = VIRT_STATE_NAME_MAP.get(raw[0], 'unknown')
return state
info = {}
conn = __get_conn(**kwargs)
if vm_:
info[vm_] = _info(_get_domain(conn, vm_))
else:
for domain in _get_domain(conn, iterable=True):
info[domain.name()] = _info(domain)
conn.close()
return info
def _node_info(conn):
'''
Internal variant of node_info taking a libvirt connection as parameter
'''
raw = conn.getInfo()
info = {'cpucores': raw[6],
'cpumhz': raw[3],
'cpumodel': six.text_type(raw[0]),
'cpus': raw[2],
'cputhreads': raw[7],
'numanodes': raw[4],
'phymemory': raw[1],
'sockets': raw[5]}
return info
def node_info(**kwargs):
'''
Return a dict with information about this node
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.node_info
'''
conn = __get_conn(**kwargs)
info = _node_info(conn)
conn.close()
return info
def get_nics(vm_, **kwargs):
'''
Return info about the network interfaces of a named vm
:param vm_: name of the domain
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.get_nics <domain>
'''
conn = __get_conn(**kwargs)
nics = _get_nics(_get_domain(conn, vm_))
conn.close()
return nics
def get_macs(vm_, **kwargs):
'''
Return a list off MAC addresses from the named vm
:param vm_: name of the domain
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.get_macs <domain>
'''
doc = ElementTree.fromstring(get_xml(vm_, **kwargs))
return [node.get('address') for node in doc.findall('devices/interface/mac')]
def get_graphics(vm_, **kwargs):
'''
Returns the information on vnc for a given vm
:param vm_: name of the domain
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.get_graphics <domain>
'''
conn = __get_conn(**kwargs)
graphics = _get_graphics(_get_domain(conn, vm_))
conn.close()
return graphics
def get_disks(vm_, **kwargs):
'''
Return the disks of a named vm
:param vm_: name of the domain
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.get_disks <domain>
'''
conn = __get_conn(**kwargs)
disks = _get_disks(_get_domain(conn, vm_))
conn.close()
return disks
def setmem(vm_, memory, config=False, **kwargs):
'''
Changes the amount of memory allocated to VM. The VM must be shutdown
for this to work.
:param vm_: name of the domain
:param memory: memory amount to set in MB
:param config: if True then libvirt will be asked to modify the config as well
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.setmem <domain> <size>
salt '*' virt.setmem my_domain 768
'''
conn = __get_conn(**kwargs)
dom = _get_domain(conn, vm_)
if VIRT_STATE_NAME_MAP.get(dom.info()[0], 'unknown') != 'shutdown':
return False
# libvirt has a funny bitwise system for the flags in that the flag
# to affect the "current" setting is 0, which means that to set the
# current setting we have to call it a second time with just 0 set
flags = libvirt.VIR_DOMAIN_MEM_MAXIMUM
if config:
flags = flags | libvirt.VIR_DOMAIN_AFFECT_CONFIG
ret1 = dom.setMemoryFlags(memory * 1024, flags)
ret2 = dom.setMemoryFlags(memory * 1024, libvirt.VIR_DOMAIN_AFFECT_CURRENT)
conn.close()
# return True if both calls succeeded
return ret1 == ret2 == 0
def setvcpus(vm_, vcpus, config=False, **kwargs):
'''
Changes the amount of vcpus allocated to VM. The VM must be shutdown
for this to work.
If config is True then we ask libvirt to modify the config as well
:param vm_: name of the domain
:param vcpus: integer representing the number of CPUs to be assigned
:param config: if True then libvirt will be asked to modify the config as well
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.setvcpus <domain> <amount>
salt '*' virt.setvcpus my_domain 4
'''
conn = __get_conn(**kwargs)
dom = _get_domain(conn, vm_)
if VIRT_STATE_NAME_MAP.get(dom.info()[0], 'unknown') != 'shutdown':
return False
# see notes in setmem
flags = libvirt.VIR_DOMAIN_VCPU_MAXIMUM
if config:
flags = flags | libvirt.VIR_DOMAIN_AFFECT_CONFIG
ret1 = dom.setVcpusFlags(vcpus, flags)
ret2 = dom.setVcpusFlags(vcpus, libvirt.VIR_DOMAIN_AFFECT_CURRENT)
conn.close()
return ret1 == ret2 == 0
def _freemem(conn):
'''
Internal variant of freemem taking a libvirt connection as parameter
'''
mem = conn.getInfo()[1]
# Take off just enough to sustain the hypervisor
mem -= 256
for dom in _get_domain(conn, iterable=True):
if dom.ID() > 0:
mem -= dom.info()[2] / 1024
return mem
def freemem(**kwargs):
'''
Return an int representing the amount of memory (in MB) that has not
been given to virtual machines on this node
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.freemem
'''
conn = __get_conn(**kwargs)
mem = _freemem(conn)
conn.close()
return mem
def _freecpu(conn):
'''
Internal variant of freecpu taking a libvirt connection as parameter
'''
cpus = conn.getInfo()[2]
for dom in _get_domain(conn, iterable=True):
if dom.ID() > 0:
cpus -= dom.info()[3]
return cpus
def freecpu(**kwargs):
'''
Return an int representing the number of unallocated cpus on this
hypervisor
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.freecpu
'''
conn = __get_conn(**kwargs)
cpus = _freecpu(conn)
conn.close()
return cpus
def full_info(**kwargs):
'''
Return the node_info, vm_info and freemem
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.full_info
'''
conn = __get_conn(**kwargs)
info = {'freecpu': _freecpu(conn),
'freemem': _freemem(conn),
'node_info': _node_info(conn),
'vm_info': vm_info()}
conn.close()
return info
def get_xml(vm_, **kwargs):
'''
Returns the XML for a given vm
:param vm_: domain name
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.get_xml <domain>
'''
conn = __get_conn(**kwargs)
xml_desc = vm_.XMLDesc(0) if isinstance(
vm_, libvirt.virDomain
) else _get_domain(conn, vm_).XMLDesc(0)
conn.close()
return xml_desc
def get_profiles(hypervisor=None, **kwargs):
'''
Return the virt profiles for hypervisor.
Currently there are profiles for:
- nic
- disk
:param hypervisor: override the default machine type.
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.get_profiles
salt '*' virt.get_profiles hypervisor=esxi
'''
ret = {}
caps = capabilities(**kwargs)
hypervisors = sorted({x for y in [guest['arch']['domains'].keys() for guest in caps['guests']] for x in y})
default_hypervisor = 'kvm' if 'kvm' in hypervisors else hypervisors[0]
if not hypervisor:
hypervisor = __salt__['config.get']('libvirt:hypervisor')
if hypervisor is not None:
salt.utils.versions.warn_until(
'Sodium',
'\'libvirt:hypervisor\' configuration property has been deprecated. '
'Rather use the \'virt:connection:uri\' to properly define the libvirt '
'URI or alias of the host to connect to. \'libvirt:hypervisor\' will '
'stop being used in {version}.'
)
else:
# Use the machine types as possible values
# Prefer 'kvm' over the others if available
hypervisor = default_hypervisor
virtconf = __salt__['config.get']('virt', {})
for typ in ['disk', 'nic']:
_func = getattr(sys.modules[__name__], '_{0}_profile'.format(typ))
ret[typ] = {'default': _func('default', hypervisor)}
if typ in virtconf:
ret.setdefault(typ, {})
for prf in virtconf[typ]:
ret[typ][prf] = _func(prf, hypervisor)
return ret
def shutdown(vm_, **kwargs):
'''
Send a soft shutdown signal to the named vm
:param vm_: domain name
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.shutdown <domain>
'''
conn = __get_conn(**kwargs)
dom = _get_domain(conn, vm_)
ret = dom.shutdown() == 0
conn.close()
return ret
def pause(vm_, **kwargs):
'''
Pause the named vm
:param vm_: domain name
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.pause <domain>
'''
conn = __get_conn(**kwargs)
dom = _get_domain(conn, vm_)
ret = dom.suspend() == 0
conn.close()
return ret
def resume(vm_, **kwargs):
'''
Resume the named vm
:param vm_: domain name
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.resume <domain>
'''
conn = __get_conn(**kwargs)
dom = _get_domain(conn, vm_)
ret = dom.resume() == 0
conn.close()
return ret
def start(name, **kwargs):
'''
Start a defined domain
:param vm_: domain name
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.start <domain>
'''
conn = __get_conn(**kwargs)
ret = _get_domain(conn, name).create() == 0
conn.close()
return ret
def stop(name, **kwargs):
'''
Hard power down the virtual machine, this is equivalent to pulling the power.
:param vm_: domain name
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.stop <domain>
'''
conn = __get_conn(**kwargs)
ret = _get_domain(conn, name).destroy() == 0
conn.close()
return ret
def reboot(name, **kwargs):
'''
Reboot a domain via ACPI request
:param vm_: domain name
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.reboot <domain>
'''
conn = __get_conn(**kwargs)
ret = _get_domain(conn, name).reboot(libvirt.VIR_DOMAIN_REBOOT_DEFAULT) == 0
conn.close()
return ret
def reset(vm_, **kwargs):
'''
Reset a VM by emulating the reset button on a physical machine
:param vm_: domain name
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.reset <domain>
'''
conn = __get_conn(**kwargs)
dom = _get_domain(conn, vm_)
# reset takes a flag, like reboot, but it is not yet used
# so we just pass in 0
# see: http://libvirt.org/html/libvirt-libvirt.html#virDomainReset
ret = dom.reset(0) == 0
conn.close()
return ret
def ctrl_alt_del(vm_, **kwargs):
'''
Sends CTRL+ALT+DEL to a VM
:param vm_: domain name
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.ctrl_alt_del <domain>
'''
conn = __get_conn(**kwargs)
dom = _get_domain(conn, vm_)
ret = dom.sendKey(0, 0, [29, 56, 111], 3, 0) == 0
conn.close()
return ret
def create_xml_str(xml, **kwargs): # pylint: disable=redefined-outer-name
'''
Start a domain based on the XML passed to the function
:param xml: libvirt XML definition of the domain
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.create_xml_str <XML in string format>
'''
conn = __get_conn(**kwargs)
ret = conn.createXML(xml, 0) is not None
conn.close()
return ret
def create_xml_path(path, **kwargs):
'''
Start a domain based on the XML-file path passed to the function
:param path: path to a file containing the libvirt XML definition of the domain
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.create_xml_path <path to XML file on the node>
'''
try:
with salt.utils.files.fopen(path, 'r') as fp_:
return create_xml_str(
salt.utils.stringutils.to_unicode(fp_.read()),
**kwargs
)
except (OSError, IOError):
return False
def define_xml_str(xml, **kwargs): # pylint: disable=redefined-outer-name
'''
Define a domain based on the XML passed to the function
:param xml: libvirt XML definition of the domain
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.define_xml_str <XML in string format>
'''
conn = __get_conn(**kwargs)
ret = conn.defineXML(xml) is not None
conn.close()
return ret
def define_xml_path(path, **kwargs):
'''
Define a domain based on the XML-file path passed to the function
:param path: path to a file containing the libvirt XML definition of the domain
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.define_xml_path <path to XML file on the node>
'''
try:
with salt.utils.files.fopen(path, 'r') as fp_:
return define_xml_str(
salt.utils.stringutils.to_unicode(fp_.read()),
**kwargs
)
except (OSError, IOError):
return False
def define_vol_xml_str(xml, **kwargs): # pylint: disable=redefined-outer-name
'''
Define a volume based on the XML passed to the function
:param xml: libvirt XML definition of the storage volume
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.define_vol_xml_str <XML in string format>
The storage pool where the disk image will be defined is ``default``
unless changed with a configuration like this:
.. code-block:: yaml
virt:
storagepool: mine
'''
poolname = __salt__['config.get']('libvirt:storagepool', None)
if poolname is not None:
salt.utils.versions.warn_until(
'Sodium',
'\'libvirt:storagepool\' has been deprecated in favor of '
'\'virt:storagepool\'. \'libvirt:storagepool\' will stop '
'being used in {version}.'
)
else:
poolname = __salt__['config.get']('virt:storagepool', 'default')
conn = __get_conn(**kwargs)
pool = conn.storagePoolLookupByName(six.text_type(poolname))
ret = pool.createXML(xml, 0) is not None
conn.close()
return ret
def define_vol_xml_path(path, **kwargs):
'''
Define a volume based on the XML-file path passed to the function
:param path: path to a file containing the libvirt XML definition of the volume
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.define_vol_xml_path <path to XML file on the node>
'''
try:
with salt.utils.files.fopen(path, 'r') as fp_:
return define_vol_xml_str(
salt.utils.stringutils.to_unicode(fp_.read()),
**kwargs
)
except (OSError, IOError):
return False
def migrate_non_shared(vm_, target, ssh=False):
'''
Attempt to execute non-shared storage "all" migration
:param vm_: domain name
:param target: target libvirt host name
:param ssh: True to connect over ssh
CLI Example:
.. code-block:: bash
salt '*' virt.migrate_non_shared <vm name> <target hypervisor>
A tunnel data migration can be performed by setting this in the
configuration:
.. code-block:: yaml
virt:
tunnel: True
For more details on tunnelled data migrations, report to
https://libvirt.org/migration.html#transporttunnel
'''
cmd = _get_migrate_command() + ' --copy-storage-all ' + vm_\
+ _get_target(target, ssh)
stdout = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE).communicate()[0]
return salt.utils.stringutils.to_str(stdout)
def migrate_non_shared_inc(vm_, target, ssh=False):
'''
Attempt to execute non-shared storage "all" migration
:param vm_: domain name
:param target: target libvirt host name
:param ssh: True to connect over ssh
CLI Example:
.. code-block:: bash
salt '*' virt.migrate_non_shared_inc <vm name> <target hypervisor>
A tunnel data migration can be performed by setting this in the
configuration:
.. code-block:: yaml
virt:
tunnel: True
For more details on tunnelled data migrations, report to
https://libvirt.org/migration.html#transporttunnel
'''
cmd = _get_migrate_command() + ' --copy-storage-inc ' + vm_\
+ _get_target(target, ssh)
stdout = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE).communicate()[0]
return salt.utils.stringutils.to_str(stdout)
def migrate(vm_, target, ssh=False):
'''
Shared storage migration
:param vm_: domain name
:param target: target libvirt host name
:param ssh: True to connect over ssh
CLI Example:
.. code-block:: bash
salt '*' virt.migrate <domain> <target hypervisor>
A tunnel data migration can be performed by setting this in the
configuration:
.. code-block:: yaml
virt:
tunnel: True
For more details on tunnelled data migrations, report to
https://libvirt.org/migration.html#transporttunnel
'''
cmd = _get_migrate_command() + ' ' + vm_\
+ _get_target(target, ssh)
stdout = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE).communicate()[0]
return salt.utils.stringutils.to_str(stdout)
def seed_non_shared_migrate(disks, force=False):
'''
Non shared migration requires that the disks be present on the migration
destination, pass the disks information via this function, to the
migration destination before executing the migration.
:param disks: the list of disk data as provided by virt.get_disks
:param force: skip checking the compatibility of source and target disk
images if True. (default: False)
CLI Example:
.. code-block:: bash
salt '*' virt.seed_non_shared_migrate <disks>
'''
for _, data in six.iteritems(disks):
fn_ = data['file']
form = data['file format']
size = data['virtual size'].split()[1][1:]
if os.path.isfile(fn_) and not force:
# the target exists, check to see if it is compatible
pre = salt.utils.yaml.safe_load(subprocess.Popen('qemu-img info arch',
shell=True,
stdout=subprocess.PIPE).communicate()[0])
if pre['file format'] != data['file format']\
and pre['virtual size'] != data['virtual size']:
return False
if not os.path.isdir(os.path.dirname(fn_)):
os.makedirs(os.path.dirname(fn_))
if os.path.isfile(fn_):
os.remove(fn_)
cmd = 'qemu-img create -f ' + form + ' ' + fn_ + ' ' + size
subprocess.call(cmd, shell=True)
creds = _libvirt_creds()
cmd = 'chown ' + creds['user'] + ':' + creds['group'] + ' ' + fn_
subprocess.call(cmd, shell=True)
return True
def set_autostart(vm_, state='on', **kwargs):
'''
Set the autostart flag on a VM so that the VM will start with the host
system on reboot.
:param vm_: domain name
:param state: 'on' to auto start the pool, anything else to mark the
pool not to be started when the host boots
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt "*" virt.set_autostart <domain> <on | off>
'''
conn = __get_conn(**kwargs)
dom = _get_domain(conn, vm_)
# return False if state is set to something other then on or off
ret = False
if state == 'on':
ret = dom.setAutostart(1) == 0
elif state == 'off':
ret = dom.setAutostart(0) == 0
conn.close()
return ret
def undefine(vm_, **kwargs):
'''
Remove a defined vm, this does not purge the virtual machine image, and
this only works if the vm is powered down
:param vm_: domain name
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.undefine <domain>
'''
conn = __get_conn(**kwargs)
dom = _get_domain(conn, vm_)
if getattr(libvirt, 'VIR_DOMAIN_UNDEFINE_NVRAM', False):
# This one is only in 1.2.8+
ret = dom.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_NVRAM) == 0
else:
ret = dom.undefine() == 0
conn.close()
return ret
def purge(vm_, dirs=False, removables=None, **kwargs):
'''
Recursively destroy and delete a virtual machine, pass True for dir's to
also delete the directories containing the virtual machine disk images -
USE WITH EXTREME CAUTION!
Pass removables=False to avoid deleting cdrom and floppy images. To avoid
disruption, the default but dangerous value is True. This will be changed
to the safer False default value in Sodium.
:param vm_: domain name
:param dirs: pass True to remove containing directories
:param removables: pass True to remove removable devices
.. versionadded:: 2019.2.0
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.purge <domain> removables=False
'''
conn = __get_conn(**kwargs)
dom = _get_domain(conn, vm_)
disks = _get_disks(dom)
if removables is None:
salt.utils.versions.warn_until(
'Sodium',
'removables argument default value is True, but will be changed '
'to False by default in {version}. Please set to True to maintain '
'the current behavior in the future.'
)
removables = True
if VIRT_STATE_NAME_MAP.get(dom.info()[0], 'unknown') != 'shutdown' and dom.destroy() != 0:
return False
directories = set()
for disk in disks:
if not removables and disks[disk]['type'] in ['cdrom', 'floppy']:
continue
os.remove(disks[disk]['file'])
directories.add(os.path.dirname(disks[disk]['file']))
if dirs:
for dir_ in directories:
shutil.rmtree(dir_)
if getattr(libvirt, 'VIR_DOMAIN_UNDEFINE_NVRAM', False):
# This one is only in 1.2.8+
try:
dom.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_NVRAM)
except Exception:
dom.undefine()
else:
dom.undefine()
conn.close()
return True
def virt_type():
'''
Returns the virtual machine type as a string
CLI Example:
.. code-block:: bash
salt '*' virt.virt_type
'''
return __grains__['virtual']
def _is_kvm_hyper():
'''
Returns a bool whether or not this node is a KVM hypervisor
'''
try:
with salt.utils.files.fopen('/proc/modules') as fp_:
if 'kvm_' not in salt.utils.stringutils.to_unicode(fp_.read()):
return False
except IOError:
# No /proc/modules? Are we on Windows? Or Solaris?
return False
return 'libvirtd' in __salt__['cmd.run'](__grains__['ps'])
def is_kvm_hyper():
'''
Returns a bool whether or not this node is a KVM hypervisor
CLI Example:
.. code-block:: bash
salt '*' virt.is_kvm_hyper
.. deprecated:: 2019.2.0
'''
salt.utils.versions.warn_until(
'Sodium',
'\'is_kvm_hyper\' function has been deprecated. Use the \'get_hypervisor\' == "kvm" instead. '
'\'is_kvm_hyper\' will be removed in {version}.'
)
return _is_kvm_hyper()
def _is_xen_hyper():
'''
Returns a bool whether or not this node is a XEN hypervisor
'''
try:
if __grains__['virtual_subtype'] != 'Xen Dom0':
return False
except KeyError:
# virtual_subtype isn't set everywhere.
return False
try:
with salt.utils.files.fopen('/proc/modules') as fp_:
if 'xen_' not in salt.utils.stringutils.to_unicode(fp_.read()):
return False
except (OSError, IOError):
# No /proc/modules? Are we on Windows? Or Solaris?
return False
return 'libvirtd' in __salt__['cmd.run'](__grains__['ps'])
def is_xen_hyper():
'''
Returns a bool whether or not this node is a XEN hypervisor
CLI Example:
.. code-block:: bash
salt '*' virt.is_xen_hyper
.. deprecated:: 2019.2.0
'''
salt.utils.versions.warn_until(
'Sodium',
'\'is_xen_hyper\' function has been deprecated. Use the \'get_hypervisor\' == "xen" instead. '
'\'is_xen_hyper\' will be removed in {version}.'
)
return _is_xen_hyper()
def get_hypervisor():
'''
Returns the name of the hypervisor running on this node or ``None``.
Detected hypervisors:
- kvm
- xen
CLI Example:
.. code-block:: bash
salt '*' virt.get_hypervisor
.. versionadded:: 2019.2.0
the function and the ``kvm`` and ``xen`` hypervisors support
'''
# To add a new 'foo' hypervisor, add the _is_foo_hyper function,
# add 'foo' to the list below and add it to the docstring with a .. versionadded::
hypervisors = ['kvm', 'xen']
result = [hyper for hyper in hypervisors if getattr(sys.modules[__name__], '_is_{}_hyper').format(hyper)()]
return result[0] if result else None
def is_hyper():
'''
Returns a bool whether or not this node is a hypervisor of any kind
CLI Example:
.. code-block:: bash
salt '*' virt.is_hyper
'''
if HAS_LIBVIRT:
return is_xen_hyper() or is_kvm_hyper()
return False
def vm_cputime(vm_=None, **kwargs):
'''
Return cputime used by the vms on this hyper in a
list of dicts:
:param vm_: domain name
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
.. code-block:: python
[
'your-vm': {
'cputime' <int>
'cputime_percent' <int>
},
...
]
If you pass a VM name in as an argument then it will return info
for just the named VM, otherwise it will return all VMs.
CLI Example:
.. code-block:: bash
salt '*' virt.vm_cputime
'''
conn = __get_conn(**kwargs)
host_cpus = conn.getInfo()[2]
def _info(dom):
'''
Compute cputime info of a domain
'''
raw = dom.info()
vcpus = int(raw[3])
cputime = int(raw[4])
cputime_percent = 0
if cputime:
# Divide by vcpus to always return a number between 0 and 100
cputime_percent = (1.0e-7 * cputime / host_cpus) / vcpus
return {
'cputime': int(raw[4]),
'cputime_percent': int('{0:.0f}'.format(cputime_percent))
}
info = {}
if vm_:
info[vm_] = _info(_get_domain(conn, vm_))
else:
for domain in _get_domain(conn, iterable=True):
info[domain.name()] = _info(domain)
conn.close()
return info
def vm_netstats(vm_=None, **kwargs):
'''
Return combined network counters used by the vms on this hyper in a
list of dicts:
:param vm_: domain name
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
.. code-block:: python
[
'your-vm': {
'rx_bytes' : 0,
'rx_packets' : 0,
'rx_errs' : 0,
'rx_drop' : 0,
'tx_bytes' : 0,
'tx_packets' : 0,
'tx_errs' : 0,
'tx_drop' : 0
},
...
]
If you pass a VM name in as an argument then it will return info
for just the named VM, otherwise it will return all VMs.
CLI Example:
.. code-block:: bash
salt '*' virt.vm_netstats
'''
def _info(dom):
'''
Compute network stats of a domain
'''
nics = _get_nics(dom)
ret = {
'rx_bytes': 0,
'rx_packets': 0,
'rx_errs': 0,
'rx_drop': 0,
'tx_bytes': 0,
'tx_packets': 0,
'tx_errs': 0,
'tx_drop': 0
}
for attrs in six.itervalues(nics):
if 'target' in attrs:
dev = attrs['target']
stats = dom.interfaceStats(dev)
ret['rx_bytes'] += stats[0]
ret['rx_packets'] += stats[1]
ret['rx_errs'] += stats[2]
ret['rx_drop'] += stats[3]
ret['tx_bytes'] += stats[4]
ret['tx_packets'] += stats[5]
ret['tx_errs'] += stats[6]
ret['tx_drop'] += stats[7]
return ret
info = {}
conn = __get_conn(**kwargs)
if vm_:
info[vm_] = _info(_get_domain(conn, vm_))
else:
for domain in _get_domain(conn, iterable=True):
info[domain.name()] = _info(domain)
conn.close()
return info
def vm_diskstats(vm_=None, **kwargs):
'''
Return disk usage counters used by the vms on this hyper in a
list of dicts:
:param vm_: domain name
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
.. code-block:: python
[
'your-vm': {
'rd_req' : 0,
'rd_bytes' : 0,
'wr_req' : 0,
'wr_bytes' : 0,
'errs' : 0
},
...
]
If you pass a VM name in as an argument then it will return info
for just the named VM, otherwise it will return all VMs.
CLI Example:
.. code-block:: bash
salt '*' virt.vm_blockstats
'''
def get_disk_devs(dom):
'''
Extract the disk devices names from the domain XML definition
'''
doc = ElementTree.fromstring(get_xml(dom, **kwargs))
return [target.get('dev') for target in doc.findall('devices/disk/target')]
def _info(dom):
'''
Compute the disk stats of a domain
'''
# Do not use get_disks, since it uses qemu-img and is very slow
# and unsuitable for any sort of real time statistics
disks = get_disk_devs(dom)
ret = {'rd_req': 0,
'rd_bytes': 0,
'wr_req': 0,
'wr_bytes': 0,
'errs': 0
}
for disk in disks:
stats = dom.blockStats(disk)
ret['rd_req'] += stats[0]
ret['rd_bytes'] += stats[1]
ret['wr_req'] += stats[2]
ret['wr_bytes'] += stats[3]
ret['errs'] += stats[4]
return ret
info = {}
conn = __get_conn(**kwargs)
if vm_:
info[vm_] = _info(_get_domain(conn, vm_))
else:
# Can not run function blockStats on inactive VMs
for domain in _get_domain(conn, iterable=True, inactive=False):
info[domain.name()] = _info(domain)
conn.close()
return info
def _parse_snapshot_description(vm_snapshot, unix_time=False):
'''
Parse XML doc and return a dict with the status values.
:param xmldoc:
:return:
'''
ret = dict()
tree = ElementTree.fromstring(vm_snapshot.getXMLDesc())
for node in tree:
if node.tag == 'name':
ret['name'] = node.text
elif node.tag == 'creationTime':
ret['created'] = datetime.datetime.fromtimestamp(float(node.text)).isoformat(' ') \
if not unix_time else float(node.text)
elif node.tag == 'state':
ret['running'] = node.text == 'running'
ret['current'] = vm_snapshot.isCurrent() == 1
return ret
def list_snapshots(domain=None, **kwargs):
'''
List available snapshots for certain vm or for all.
:param domain: domain name
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' virt.list_snapshots
salt '*' virt.list_snapshots <domain>
'''
ret = dict()
conn = __get_conn(**kwargs)
for vm_domain in _get_domain(conn, *(domain and [domain] or list()), iterable=True):
ret[vm_domain.name()] = [_parse_snapshot_description(snap) for snap in vm_domain.listAllSnapshots()] or 'N/A'
conn.close()
return ret
def snapshot(domain, name=None, suffix=None, **kwargs):
'''
Create a snapshot of a VM.
:param domain: domain name
:param name: Name of the snapshot. If the name is omitted, then will be used original domain
name with ISO 8601 time as a suffix.
:param suffix: Add suffix for the new name. Useful in states, where such snapshots
can be distinguished from manually created.
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' virt.snapshot <domain>
'''
if name and name.lower() == domain.lower():
raise CommandExecutionError('Virtual Machine {name} is already defined. '
'Please choose another name for the snapshot'.format(name=name))
if not name:
name = "{domain}-{tsnap}".format(domain=domain, tsnap=time.strftime('%Y%m%d-%H%M%S', time.localtime()))
if suffix:
name = "{name}-{suffix}".format(name=name, suffix=suffix)
doc = ElementTree.Element('domainsnapshot')
n_name = ElementTree.SubElement(doc, 'name')
n_name.text = name
conn = __get_conn(**kwargs)
_get_domain(conn, domain).snapshotCreateXML(
salt.utils.stringutils.to_str(ElementTree.tostring(doc))
)
conn.close()
return {'name': name}
def delete_snapshots(name, *names, **kwargs):
'''
Delete one or more snapshots of the given VM.
:param name: domain name
:param names: names of the snapshots to remove
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' virt.delete_snapshots <domain> all=True
salt '*' virt.delete_snapshots <domain> <snapshot>
salt '*' virt.delete_snapshots <domain> <snapshot1> <snapshot2> ...
'''
deleted = dict()
conn = __get_conn(**kwargs)
domain = _get_domain(conn, name)
for snap in domain.listAllSnapshots():
if snap.getName() in names or not names:
deleted[snap.getName()] = _parse_snapshot_description(snap)
snap.delete()
conn.close()
available = {name: [_parse_snapshot_description(snap) for snap in domain.listAllSnapshots()] or 'N/A'}
return {'available': available, 'deleted': deleted}
def revert_snapshot(name, vm_snapshot=None, cleanup=False, **kwargs):
'''
Revert snapshot to the previous from current (if available) or to the specific.
:param name: domain name
:param vm_snapshot: name of the snapshot to revert
:param cleanup: Remove all newer than reverted snapshots. Values: True or False (default False).
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' virt.revert <domain>
salt '*' virt.revert <domain> <snapshot>
'''
ret = dict()
conn = __get_conn(**kwargs)
domain = _get_domain(conn, name)
snapshots = domain.listAllSnapshots()
_snapshots = list()
for snap_obj in snapshots:
_snapshots.append({'idx': _parse_snapshot_description(snap_obj, unix_time=True)['created'], 'ptr': snap_obj})
snapshots = [w_ptr['ptr'] for w_ptr in sorted(_snapshots, key=lambda item: item['idx'], reverse=True)]
del _snapshots
if not snapshots:
conn.close()
raise CommandExecutionError('No snapshots found')
elif len(snapshots) == 1:
conn.close()
raise CommandExecutionError('Cannot revert to itself: only one snapshot is available.')
snap = None
for p_snap in snapshots:
if not vm_snapshot:
if p_snap.isCurrent() and snapshots[snapshots.index(p_snap) + 1:]:
snap = snapshots[snapshots.index(p_snap) + 1:][0]
break
elif p_snap.getName() == vm_snapshot:
snap = p_snap
break
if not snap:
conn.close()
raise CommandExecutionError(
snapshot and 'Snapshot "{0}" not found'.format(vm_snapshot) or 'No more previous snapshots available')
elif snap.isCurrent():
conn.close()
raise CommandExecutionError('Cannot revert to the currently running snapshot.')
domain.revertToSnapshot(snap)
ret['reverted'] = snap.getName()
if cleanup:
delete = list()
for p_snap in snapshots:
if p_snap.getName() != snap.getName():
delete.append(p_snap.getName())
p_snap.delete()
else:
break
ret['deleted'] = delete
else:
ret['deleted'] = 'N/A'
conn.close()
return ret
def _caps_add_machine(machines, node):
'''
Parse the <machine> element of the host capabilities and add it
to the machines list.
'''
maxcpus = node.get('maxCpus')
canonical = node.get('canonical')
name = node.text
alternate_name = ""
if canonical:
alternate_name = name
name = canonical
machine = machines.get(name)
if not machine:
machine = {'alternate_names': []}
if maxcpus:
machine['maxcpus'] = int(maxcpus)
machines[name] = machine
if alternate_name:
machine['alternate_names'].append(alternate_name)
def _parse_caps_guest(guest):
'''
Parse the <guest> element of the connection capabilities XML
'''
arch_node = guest.find('arch')
result = {
'os_type': guest.find('os_type').text,
'arch': {
'name': arch_node.get('name'),
'machines': {},
'domains': {}
},
}
for child in arch_node:
if child.tag == 'wordsize':
result['arch']['wordsize'] = int(child.text)
elif child.tag == 'emulator':
result['arch']['emulator'] = child.text
elif child.tag == 'machine':
_caps_add_machine(result['arch']['machines'], child)
elif child.tag == 'domain':
domain_type = child.get('type')
domain = {
'emulator': None,
'machines': {}
}
emulator_node = child.find('emulator')
if emulator_node is not None:
domain['emulator'] = emulator_node.text
for machine in child.findall('machine'):
_caps_add_machine(domain['machines'], machine)
result['arch']['domains'][domain_type] = domain
# Note that some features have no default and toggle attributes.
# This may not be a perfect match, but represent them as enabled by default
# without possibility to toggle them.
# Some guests may also have no feature at all (xen pv for instance)
features_nodes = guest.find('features')
if features_nodes is not None:
result['features'] = {child.tag: {'toggle': True if child.get('toggle') == 'yes' else False,
'default': True if child.get('default') == 'no' else True}
for child in features_nodes}
return result
def _parse_caps_cell(cell):
'''
Parse the <cell> nodes of the connection capabilities XML output.
'''
result = {
'id': int(cell.get('id'))
}
mem_node = cell.find('memory')
if mem_node is not None:
unit = mem_node.get('unit', 'KiB')
memory = mem_node.text
result['memory'] = "{} {}".format(memory, unit)
pages = [{'size': "{} {}".format(page.get('size'), page.get('unit', 'KiB')),
'available': int(page.text)}
for page in cell.findall('pages')]
if pages:
result['pages'] = pages
distances = {int(distance.get('id')): int(distance.get('value'))
for distance in cell.findall('distances/sibling')}
if distances:
result['distances'] = distances
cpus = []
for cpu_node in cell.findall('cpus/cpu'):
cpu = {
'id': int(cpu_node.get('id'))
}
socket_id = cpu_node.get('socket_id')
if socket_id:
cpu['socket_id'] = int(socket_id)
core_id = cpu_node.get('core_id')
if core_id:
cpu['core_id'] = int(core_id)
siblings = cpu_node.get('siblings')
if siblings:
cpu['siblings'] = siblings
cpus.append(cpu)
if cpus:
result['cpus'] = cpus
return result
def _parse_caps_bank(bank):
'''
Parse the <bank> element of the connection capabilities XML.
'''
result = {
'id': int(bank.get('id')),
'level': int(bank.get('level')),
'type': bank.get('type'),
'size': "{} {}".format(bank.get('size'), bank.get('unit')),
'cpus': bank.get('cpus')
}
controls = []
for control in bank.findall('control'):
unit = control.get('unit')
result_control = {
'granularity': "{} {}".format(control.get('granularity'), unit),
'type': control.get('type'),
'maxAllocs': int(control.get('maxAllocs'))
}
minimum = control.get('min')
if minimum:
result_control['min'] = "{} {}".format(minimum, unit)
controls.append(result_control)
if controls:
result['controls'] = controls
return result
def _parse_caps_host(host):
'''
Parse the <host> element of the connection capabilities XML.
'''
result = {}
for child in host:
if child.tag == 'uuid':
result['uuid'] = child.text
elif child.tag == 'cpu':
cpu = {
'arch': child.find('arch').text if child.find('arch') is not None else None,
'model': child.find('model').text if child.find('model') is not None else None,
'vendor': child.find('vendor').text if child.find('vendor') is not None else None,
'features': [feature.get('name') for feature in child.findall('feature')],
'pages': [{'size': '{} {}'.format(page.get('size'), page.get('unit', 'KiB'))}
for page in child.findall('pages')]
}
# Parse the cpu tag
microcode = child.find('microcode')
if microcode is not None:
cpu['microcode'] = microcode.get('version')
topology = child.find('topology')
if topology is not None:
cpu['sockets'] = int(topology.get('sockets'))
cpu['cores'] = int(topology.get('cores'))
cpu['threads'] = int(topology.get('threads'))
result['cpu'] = cpu
elif child.tag == "power_management":
result['power_management'] = [node.tag for node in child]
elif child.tag == "migration_features":
result['migration'] = {
'live': child.find('live') is not None,
'transports': [node.text for node in child.findall('uri_transports/uri_transport')]
}
elif child.tag == "topology":
result['topology'] = {
'cells': [_parse_caps_cell(cell) for cell in child.findall('cells/cell')]
}
elif child.tag == 'cache':
result['cache'] = {
'banks': [_parse_caps_bank(bank) for bank in child.findall('bank')]
}
result['security'] = [{
'model': secmodel.find('model').text if secmodel.find('model') is not None else None,
'doi': secmodel.find('doi').text if secmodel.find('doi') is not None else None,
'baselabels': [{'type': label.get('type'), 'label': label.text}
for label in secmodel.findall('baselabel')]
}
for secmodel in host.findall('secmodel')]
return result
def capabilities(**kwargs):
'''
Return the hypervisor connection capabilities.
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.capabilities
'''
conn = __get_conn(**kwargs)
caps = ElementTree.fromstring(conn.getCapabilities())
conn.close()
return {
'host': _parse_caps_host(caps.find('host')),
'guests': [_parse_caps_guest(guest) for guest in caps.findall('guest')]
}
def _parse_caps_enum(node):
'''
Return a tuple containing the name of the enum and the possible values
'''
return (node.get('name'), [value.text for value in node.findall('value')])
def _parse_caps_cpu(node):
'''
Parse the <cpu> element of the domain capabilities
'''
result = {}
for mode in node.findall('mode'):
if not mode.get('supported') == 'yes':
continue
name = mode.get('name')
if name == 'host-passthrough':
result[name] = True
elif name == 'host-model':
host_model = {}
model_node = mode.find('model')
if model_node is not None:
model = {
'name': model_node.text
}
vendor_id = model_node.get('vendor_id')
if vendor_id:
model['vendor_id'] = vendor_id
fallback = model_node.get('fallback')
if fallback:
model['fallback'] = fallback
host_model['model'] = model
vendor = mode.find('vendor').text if mode.find('vendor') is not None else None
if vendor:
host_model['vendor'] = vendor
features = {feature.get('name'): feature.get('policy') for feature in mode.findall('feature')}
if features:
host_model['features'] = features
result[name] = host_model
elif name == 'custom':
custom_model = {}
models = {model.text: model.get('usable') for model in mode.findall('model')}
if models:
custom_model['models'] = models
result[name] = custom_model
return result
def _parse_caps_devices_features(node):
'''
Parse the devices or features list of the domain capatilities
'''
result = {}
for child in node:
if child.get('supported') == 'yes':
enums = [_parse_caps_enum(node) for node in child.findall('enum')]
result[child.tag] = {item[0]: item[1] for item in enums if item[0]}
return result
def _parse_caps_loader(node):
'''
Parse the <loader> element of the domain capabilities.
'''
enums = [_parse_caps_enum(enum) for enum in node.findall('enum')]
result = {item[0]: item[1] for item in enums if item[0]}
values = [child.text for child in node.findall('value')]
if values:
result['values'] = values
return result
def domain_capabilities(emulator=None, arch=None, machine=None, domain=None, **kwargs):
'''
Return the domain capabilities given an emulator, architecture, machine or virtualization type.
.. versionadded:: 2019.2.0
:param emulator: return the capabilities for the given emulator binary
:param arch: return the capabilities for the given CPU architecture
:param machine: return the capabilities for the given emulated machine type
:param domain: return the capabilities for the given virtualization type.
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
The list of the possible emulator, arch, machine and domain can be found in
the host capabilities output.
If none of the parameters is provided the libvirt default domain capabilities
will be returned.
CLI Example:
.. code-block:: bash
salt '*' virt.domain_capabilities arch='x86_64' domain='kvm'
'''
conn = __get_conn(**kwargs)
caps = ElementTree.fromstring(conn.getDomainCapabilities(emulator, arch, machine, domain, 0))
conn.close()
result = {
'emulator': caps.find('path').text if caps.find('path') is not None else None,
'domain': caps.find('domain').text if caps.find('domain') is not None else None,
'machine': caps.find('machine').text if caps.find('machine') is not None else None,
'arch': caps.find('arch').text if caps.find('arch') is not None else None
}
for child in caps:
if child.tag == 'vcpu' and child.get('max'):
result['max_vcpus'] = int(child.get('max'))
elif child.tag == 'iothreads':
result['iothreads'] = child.get('supported') == 'yes'
elif child.tag == 'os':
result['os'] = {}
loader_node = child.find('loader')
if loader_node is not None and loader_node.get('supported') == 'yes':
loader = _parse_caps_loader(loader_node)
result['os']['loader'] = loader
elif child.tag == 'cpu':
cpu = _parse_caps_cpu(child)
if cpu:
result['cpu'] = cpu
elif child.tag == 'devices':
devices = _parse_caps_devices_features(child)
if devices:
result['devices'] = devices
elif child.tag == 'features':
features = _parse_caps_devices_features(child)
if features:
result['features'] = features
return result
def cpu_baseline(full=False, migratable=False, out='libvirt', **kwargs):
'''
Return the optimal 'custom' CPU baseline config for VM's on this minion
.. versionadded:: 2016.3.0
:param full: Return all CPU features rather than the ones on top of the closest CPU model
:param migratable: Exclude CPU features that are unmigratable (libvirt 2.13+)
:param out: 'libvirt' (default) for usable libvirt XML definition, 'salt' for nice dict
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.cpu_baseline
'''
conn = __get_conn(**kwargs)
caps = ElementTree.fromstring(conn.getCapabilities())
cpu = caps.find('host/cpu')
log.debug('Host CPU model definition: %s', salt.utils.stringutils.to_str(ElementTree.tostring(cpu)))
flags = 0
if migratable:
# This one is only in 1.2.14+
if getattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_MIGRATABLE', False):
flags += libvirt.VIR_CONNECT_BASELINE_CPU_MIGRATABLE
else:
conn.close()
raise ValueError
if full and getattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES', False):
# This one is only in 1.1.3+
flags += libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
cpu = ElementTree.fromstring(conn.baselineCPU([salt.utils.stringutils.to_str(ElementTree.tostring(cpu))], flags))
conn.close()
if full and not getattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES', False):
# Try do it by ourselves
# Find the models in cpu_map.xml and iterate over them for as long as entries have submodels
with salt.utils.files.fopen('/usr/share/libvirt/cpu_map.xml', 'r') as cpu_map:
cpu_map = ElementTree.parse(cpu_map)
cpu_model = cpu.find('model').text
while cpu_model:
cpu_map_models = cpu_map.findall('arch/model')
cpu_specs = [el for el in cpu_map_models if el.get('name') == cpu_model and bool(len(el))]
if not cpu_specs:
raise ValueError('Model {0} not found in CPU map'.format(cpu_model))
elif len(cpu_specs) > 1:
raise ValueError('Multiple models {0} found in CPU map'.format(cpu_model))
cpu_specs = cpu_specs[0]
# libvirt's cpu map used to nest model elements, to point the parent model.
# keep this code for compatibility with old libvirt versions
model_node = cpu_specs.find('model')
if model_node is None:
cpu_model = None
else:
cpu_model = model_node.get('name')
cpu.extend([feature for feature in cpu_specs.findall('feature')])
if out == 'salt':
return {
'model': cpu.find('model').text,
'vendor': cpu.find('vendor').text,
'features': [feature.get('name') for feature in cpu.findall('feature')]
}
return cpu.toxml()
def network_define(name, bridge, forward, **kwargs):
'''
Create libvirt network.
:param name: Network name
:param bridge: Bridge name
:param forward: Forward mode(bridge, router, nat)
:param vport: Virtualport type
:param tag: Vlan tag
:param autostart: Network autostart (default True)
:param start: Network start (default True)
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
CLI Example:
.. code-block:: bash
salt '*' virt.network_define network main bridge openvswitch
.. versionadded:: 2019.2.0
'''
conn = __get_conn(**kwargs)
vport = kwargs.get('vport', None)
tag = kwargs.get('tag', None)
autostart = kwargs.get('autostart', True)
starting = kwargs.get('start', True)
net_xml = _gen_net_xml(
name,
bridge,
forward,
vport,
tag,
)
try:
conn.networkDefineXML(net_xml)
except libvirtError as err:
log.warning(err)
conn.close()
raise err # a real error we should report upwards
try:
network = conn.networkLookupByName(name)
except libvirtError as err:
log.warning(err)
conn.close()
raise err # a real error we should report upwards
if network is None:
conn.close()
return False
if (starting is True or autostart is True) and network.isActive() != 1:
network.create()
if autostart is True and network.autostart() != 1:
network.setAutostart(int(autostart))
elif autostart is False and network.autostart() == 1:
network.setAutostart(int(autostart))
conn.close()
return True
def list_networks(**kwargs):
'''
List all virtual networks.
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.list_networks
'''
conn = __get_conn(**kwargs)
try:
return [net.name() for net in conn.listAllNetworks()]
finally:
conn.close()
def network_info(name=None, **kwargs):
'''
Return informations on a virtual network provided its name.
:param name: virtual network name
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
If no name is provided, return the infos for all defined virtual networks.
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.network_info default
'''
result = {}
conn = __get_conn(**kwargs)
def _net_get_leases(net):
'''
Get all DHCP leases for a network
'''
leases = net.DHCPLeases()
for lease in leases:
if lease['type'] == libvirt.VIR_IP_ADDR_TYPE_IPV4:
lease['type'] = 'ipv4'
elif lease['type'] == libvirt.VIR_IP_ADDR_TYPE_IPV6:
lease['type'] = 'ipv6'
else:
lease['type'] = 'unknown'
return leases
try:
nets = [net for net in conn.listAllNetworks() if name is None or net.name() == name]
result = {net.name(): {
'uuid': net.UUIDString(),
'bridge': net.bridgeName(),
'autostart': net.autostart(),
'active': net.isActive(),
'persistent': net.isPersistent(),
'leases': _net_get_leases(net)} for net in nets}
except libvirt.libvirtError as err:
log.debug('Silenced libvirt error: %s', str(err))
finally:
conn.close()
return result
def network_start(name, **kwargs):
'''
Start a defined virtual network.
:param name: virtual network name
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.network_start default
'''
conn = __get_conn(**kwargs)
try:
net = conn.networkLookupByName(name)
return not bool(net.create())
finally:
conn.close()
def network_stop(name, **kwargs):
'''
Stop a defined virtual network.
:param name: virtual network name
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.network_stop default
'''
conn = __get_conn(**kwargs)
try:
net = conn.networkLookupByName(name)
return not bool(net.destroy())
finally:
conn.close()
def network_undefine(name, **kwargs):
'''
Remove a defined virtual network. This does not stop the virtual network.
:param name: virtual network name
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.network_undefine default
'''
conn = __get_conn(**kwargs)
try:
net = conn.networkLookupByName(name)
return not bool(net.undefine())
finally:
conn.close()
def network_set_autostart(name, state='on', **kwargs):
'''
Set the autostart flag on a virtual network so that the network
will start with the host system on reboot.
:param name: virtual network name
:param state: 'on' to auto start the network, anything else to mark the
virtual network not to be started when the host boots
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt "*" virt.network_set_autostart <pool> <on | off>
'''
conn = __get_conn(**kwargs)
try:
net = conn.networkLookupByName(name)
return not bool(net.setAutostart(1 if state == 'on' else 0))
finally:
conn.close()
def pool_define(name,
ptype,
target=None,
permissions=None,
source_devices=None,
source_dir=None,
source_adapter=None,
source_hosts=None,
source_auth=None,
source_name=None,
source_format=None,
transient=False,
start=True, # pylint: disable=redefined-outer-name
**kwargs):
'''
Create libvirt pool.
:param name: Pool name
:param ptype:
Pool type. See `libvirt documentation <https://libvirt.org/storage.html>`_ for the
possible values.
:param target: Pool full path target
:param permissions:
Permissions to set on the target folder. This is mostly used for filesystem-based
pool types. See :ref:`pool-define-permissions` for more details on this structure.
:param source_devices:
List of source devices for pools backed by physical devices. (Default: ``None``)
Each item in the list is a dictionary with ``path`` and optionally ``part_separator``
keys. The path is the qualified name for iSCSI devices.
Report to `this libvirt page <https://libvirt.org/formatstorage.html#StoragePool>`_
for more informations on the use of ``part_separator``
:param source_dir:
Path to the source directory for pools of type ``dir``, ``netfs`` or ``gluster``.
(Default: ``None``)
:param source_adapter:
SCSI source definition. The value is a dictionary with ``type``, ``name``, ``parent``,
``managed``, ``parent_wwnn``, ``parent_wwpn``, ``parent_fabric_wwn``, ``wwnn``, ``wwpn``
and ``parent_address`` keys.
The ``parent_address`` value is a dictionary with ``unique_id`` and ``address`` keys.
The address represents a PCI address and is itself a dictionary with ``domain``, ``bus``,
``slot`` and ``function`` properties.
Report to `this libvirt page <https://libvirt.org/formatstorage.html#StoragePool>`_
for the meaning and possible values of these properties.
:param source_hosts:
List of source for pools backed by storage from remote servers. Each item is the hostname
optionally followed by the port separated by a colon. (Default: ``None``)
:param source_auth:
Source authentication details. (Default: ``None``)
The value is a dictionary with ``type``, ``username`` and ``secret`` keys. The type
can be one of ``ceph`` for Ceph RBD or ``chap`` for iSCSI sources.
The ``secret`` value links to a libvirt secret object. It is a dictionary with
``type`` and ``value`` keys. The type value can be either ``uuid`` or ``usage``.
Examples:
.. code-block:: python
source_auth={
'type': 'ceph',
'username': 'admin',
'secret': {
'type': 'uuid',
'uuid': '2ec115d7-3a88-3ceb-bc12-0ac909a6fd87'
}
}
.. code-block:: python
source_auth={
'type': 'chap',
'username': 'myname',
'secret': {
'type': 'usage',
'uuid': 'mycluster_myname'
}
}
:param source_name:
Identifier of name-based sources.
:param source_format:
String representing the source format. The possible values are depending on the
source type. See `libvirt documentation <https://libvirt.org/storage.html>`_ for
the possible values.
:param start: Pool start (default True)
:param transient:
When ``True``, the pool will be automatically undefined after being stopped.
Note that a transient pool will force ``start`` to ``True``. (Default: ``False``)
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
.. _pool-define-permissions:
.. rubric:: Permissions definition
The permissions are described by a dictionary containing the following keys:
mode
The octal representation of the permissions. (Default: `0711`)
owner
the numeric user ID of the owner. (Default: from the parent folder)
group
the numeric ID of the group. (Default: from the parent folder)
label
the SELinux label. (Default: `None`)
.. rubric:: CLI Example:
Local folder pool:
.. code-block:: bash
salt '*' virt.pool_define somepool dir target=/srv/mypool \
permissions="{'mode': '0744' 'ower': 107, 'group': 107 }"
CIFS backed pool:
.. code-block:: bash
salt '*' virt.pool_define myshare netfs source_format=cifs \
source_dir=samba_share source_hosts="['example.com']" target=/mnt/cifs
.. versionadded:: 2019.2.0
'''
conn = __get_conn(**kwargs)
pool_xml = _gen_pool_xml(
name,
ptype,
target,
permissions=permissions,
source_devices=source_devices,
source_dir=source_dir,
source_adapter=source_adapter,
source_hosts=source_hosts,
source_auth=source_auth,
source_name=source_name,
source_format=source_format
)
try:
if transient:
pool = conn.storagePoolCreateXML(pool_xml)
else:
pool = conn.storagePoolDefineXML(pool_xml)
if start:
pool.create()
except libvirtError as err:
raise err # a real error we should report upwards
finally:
conn.close()
# libvirt function will raise a libvirtError in case of failure
return True
def list_pools(**kwargs):
'''
List all storage pools.
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.list_pools
'''
conn = __get_conn(**kwargs)
try:
return [pool.name() for pool in conn.listAllStoragePools()]
finally:
conn.close()
def pool_info(name=None, **kwargs):
'''
Return informations on a storage pool provided its name.
:param name: libvirt storage pool name
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
If no name is provided, return the infos for all defined storage pools.
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.pool_info default
'''
result = {}
conn = __get_conn(**kwargs)
def _pool_extract_infos(pool):
'''
Format the pool info dictionary
:param pool: the libvirt pool object
'''
states = ['inactive', 'building', 'running', 'degraded', 'inaccessible']
infos = pool.info()
state = states[infos[0]] if infos[0] < len(states) else 'unknown'
desc = ElementTree.fromstring(pool.XMLDesc())
path_node = desc.find('target/path')
return {
'uuid': pool.UUIDString(),
'state': state,
'capacity': infos[1],
'allocation': infos[2],
'free': infos[3],
'autostart': pool.autostart(),
'persistent': pool.isPersistent(),
'target_path': path_node.text if path_node is not None else None,
'type': desc.get('type')
}
try:
pools = [pool for pool in conn.listAllStoragePools() if name is None or pool.name() == name]
result = {pool.name(): _pool_extract_infos(pool) for pool in pools}
except libvirt.libvirtError as err:
log.debug('Silenced libvirt error: %s', str(err))
finally:
conn.close()
return result
def pool_start(name, **kwargs):
'''
Start a defined libvirt storage pool.
:param name: libvirt storage pool name
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.pool_start default
'''
conn = __get_conn(**kwargs)
try:
pool = conn.storagePoolLookupByName(name)
return not bool(pool.create())
finally:
conn.close()
def pool_build(name, **kwargs):
'''
Build a defined libvirt storage pool.
:param name: libvirt storage pool name
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.pool_build default
'''
conn = __get_conn(**kwargs)
try:
pool = conn.storagePoolLookupByName(name)
return not bool(pool.build())
finally:
conn.close()
def pool_stop(name, **kwargs):
'''
Stop a defined libvirt storage pool.
:param name: libvirt storage pool name
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.pool_stop default
'''
conn = __get_conn(**kwargs)
try:
pool = conn.storagePoolLookupByName(name)
return not bool(pool.destroy())
finally:
conn.close()
def pool_undefine(name, **kwargs):
'''
Remove a defined libvirt storage pool. The pool needs to be stopped before calling.
:param name: libvirt storage pool name
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.pool_undefine default
'''
conn = __get_conn(**kwargs)
try:
pool = conn.storagePoolLookupByName(name)
return not bool(pool.undefine())
finally:
conn.close()
def pool_delete(name, fast=True, **kwargs):
'''
Delete the resources of a defined libvirt storage pool.
:param name: libvirt storage pool name
:param fast: if set to False, zeroes out all the data.
Default value is True.
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.pool_delete default
'''
conn = __get_conn(**kwargs)
try:
pool = conn.storagePoolLookupByName(name)
flags = libvirt.VIR_STORAGE_POOL_DELETE_NORMAL
if fast:
flags = libvirt.VIR_STORAGE_POOL_DELETE_ZEROED
return not bool(pool.delete(flags))
finally:
conn.close()
def pool_refresh(name, **kwargs):
'''
Refresh a defined libvirt storage pool.
:param name: libvirt storage pool name
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.pool_refresh default
'''
conn = __get_conn(**kwargs)
try:
pool = conn.storagePoolLookupByName(name)
return not bool(pool.refresh())
finally:
conn.close()
def pool_set_autostart(name, state='on', **kwargs):
'''
Set the autostart flag on a libvirt storage pool so that the storage pool
will start with the host system on reboot.
:param name: libvirt storage pool name
:param state: 'on' to auto start the pool, anything else to mark the
pool not to be started when the host boots
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt "*" virt.pool_set_autostart <pool> <on | off>
'''
conn = __get_conn(**kwargs)
try:
pool = conn.storagePoolLookupByName(name)
return not bool(pool.setAutostart(1 if state == 'on' else 0))
finally:
conn.close()
def pool_list_volumes(name, **kwargs):
'''
List the volumes contained in a defined libvirt storage pool.
:param name: libvirt storage pool name
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt "*" virt.pool_list_volumes <pool>
'''
conn = __get_conn(**kwargs)
try:
pool = conn.storagePoolLookupByName(name)
return pool.listVolumes()
finally:
conn.close()
| 31.507176 | 119 | 0.594228 |
804803d1e8782fe18761a6e9fdb13d6c11ded556 | 45,151 | py | Python | sympy/physics/quantum/spin.py | Narsil/sympy | 4d837e074b871af351b42591697fe126411a910f | [
"BSD-3-Clause"
] | 1 | 2020-12-27T18:43:22.000Z | 2020-12-27T18:43:22.000Z | sympy_old/physics/quantum/spin.py | curzel-it/KiPyCalc | 909c783d5e6967ea58ca93f875106d8a8e3ca5db | [
"MIT"
] | null | null | null | sympy_old/physics/quantum/spin.py | curzel-it/KiPyCalc | 909c783d5e6967ea58ca93f875106d8a8e3ca5db | [
"MIT"
] | 1 | 2022-03-21T09:07:27.000Z | 2022-03-21T09:07:27.000Z | """Quantum mechanical angular momemtum."""
from sympy import (
Add, binomial, cos, diff, exp, Expr, factorial, I, Integer, Matrix, Mul, N, pi,
Rational, S, sin, simplify, sqrt, Sum, Symbol, symbols, sympify
)
from sympy.matrices.matrices import zeros
from sympy.printing.pretty.stringpict import prettyForm, stringPict
from sympy.physics.quantum.qexpr import QExpr
from sympy.physics.quantum.operator import (
HermitianOperator, Operator, UnitaryOperator
)
from sympy.physics.quantum.state import Bra, Ket, State
from sympy.physics.quantum.kronecker import KroneckerDelta
from sympy.physics.quantum.constants import hbar
from sympy.physics.quantum.hilbert import ComplexSpace
from sympy.physics.quantum.tensorproduct import TensorProduct
from sympy.physics.quantum.cg import CG
from sympy.physics.quantum.qapply import qapply
__all__ = [
'm_values',
'Jplus',
'Jminus',
'Jx',
'Jy',
'Jz',
'J2',
'JxKet',
'JxBra',
'JyKet',
'JyBra',
'JzKet',
'JzBra',
'JxKetCoupled',
'JxBraCoupled',
'JyKetCoupled',
'JyBraCoupled',
'JzKetCoupled',
'JzBraCoupled',
'Rotation',
'WignerD',
'couple',
'uncouple'
]
def m_values(j):
j = sympify(j)
size = 2*j + 1
if not size.is_Integer or not size > 0:
raise ValueError(
'Only integer or half-integer values allowed for j, got: : %r' % j
)
return size, [j-i for i in range(int(2*j+1))]
def couple(tp):
""" Couple an uncoupled spin states
This function can be used to couple an uncoupled tensor product of spin
states. All of the eigenstates to be coupled must be of the same class. It
will return a linear combination of eigenstates that are subclasses of
CoupledSpinState.
Parameters
==========
tp: TensorProduct
TensorProduct of spin states to be coupled
Examples
========
Couple a tensor product of numerical states:
>>> from sympy.physics.quantum.spin import JzKet, couple
>>> from sympy.physics.quantum.tensorproduct import TensorProduct
>>> couple(TensorProduct(JzKet(1,0), JzKet(1,1)))
-sqrt(2)*|1,1,1,1>/2 + sqrt(2)*|2,1,1,1>/2
Couple a tensor product of symbolic states:
>>> from sympy import symbols
>>> j1,m1,j2,m2 = symbols('j1 m1 j2 m2')
>>> couple(TensorProduct(JzKet(j1,m1), JzKet(j2,m2)))
Sum(CG(j1, m1, j2, m2, j, m1 + m2)*|j,m1 + m2>, (j, 0, j1 + j2))
"""
states = tp.args
evect = states[0].__class__
if not all([arg.__class__ is evect for arg in states]):
raise TypeError('All operands must be of the same class')
evect = evect.coupled_class()
if all(state.j.is_number for state in states):
# Numerical coupling
vect = TensorProduct(*[state._represent() for state in states])
maxj = states[0].j + states[1].j
j1, j2 = states[0].j, states[1].j
if maxj == int(maxj):
minj = 0
else:
minj = S(1)/2
result = []
for i in range(maxj-minj+1):
j = maxj-i
for k in range(2*j+1):
m = j-k
max_m1 = min(j1, m+j2)
min_m1 = max(-j1, m-j2)
min_m2 = m-max_m1
result.append(Add(*[vect[(j1-(max_m1-l))*(2*j2+1)+(j2-(min_m2+l)),0] * CG(j1,max_m1-l,j2,min_m2+l,j,m) * evect(j,m,j1,j2) for l in range(max_m1-min_m1+1)]))
if all(state.m.is_number for state in states):
return Add(*result).doit()
else:
return Add(*result)
else:
# Symbolic coupling
maxj = Add(*[state.j for state in states])
m = Add(*[state.m for state in states])
j = symbols('j')
if not maxj.is_number or maxj == int(maxj):
minj = 0
else:
minj = S(1)/2
j1 = states[0].j
j2 = states[1].j
m1 = states[0].m
m2 = states[1].m
return Sum(CG(j1,m1,j2,m2,j,m) * evect(j,m), (j,minj,maxj))
def uncouple(*args):
""" Uncouple a coupled spin state
Gives the uncoupled representation of a coupled spin state. Arguments must
be either a spin state that is a subclass of CoupledSpinState or a spin
state that is a subclass of SpinState and an array giving the j values
of the spaces that are to be coupled
Parameters
==========
args: CoupledSpinState or SpinState
The state that is to be coupled. If a subclass of SpinState is used,
the state must be followed by the j values of the spaces that are to
be coupled.
Examples
========
Uncouple a numerical state using a CoupledSpinState state:
>>> from sympy.physics.quantum.spin import JzKetCoupled, uncouple
>>> from sympy import S
>>> uncouple(JzKetCoupled(1, 0, S(1)/2, S(1)/2))
sqrt(2)*|1/2,-1/2>x|1/2,1/2>/2 + sqrt(2)*|1/2,1/2>x|1/2,-1/2>/2
Perform the same calculation using a SpinState state:
>>> from sympy.physics.quantum.spin import JzKet
>>> uncouple(JzKet(1, 0), S(1)/2, S(1)/2)
sqrt(2)*|1/2,-1/2>x|1/2,1/2>/2 + sqrt(2)*|1/2,1/2>x|1/2,-1/2>/2
Uncouple a symbolic state using a CoupledSpinState state:
>>> from sympy import symbols
>>> j,m,j1,j2 = symbols('j m j1 j2')
>>> uncouple(JzKetCoupled(j, m, j1, j2))
Sum(CG(j1, m1, j2, m2, j, m)*|j1,m1>x|j2,m2>, (m1, -j1, j1), (m2, -j2, j2))
Perform the same calculation using a SpinState state
>>> uncouple(JzKet(j, m), j1, j2)
Sum(CG(j1, m1, j2, m2, j, m)*|j1,m1>x|j2,m2>, (m1, -j1, j1), (m2, -j2, j2))
"""
if len(args) == 3:
state, j1, j2 = args
evect = state.__class__
elif len(args) == 1:
state = args[0]
evect = state.uncoupled_class()
j1, j2 = state.jvals
state = evect(state.j, state.m)
else:
raise TypeError
j = state.j
m = state.m
if state.j.is_number and state.m.is_number:
result = []
for i_m1 in range(2*j1+1):
m1 = j1-i_m1
for i_m2 in range(2*j2+1):
m2 = j2-i_m2
result.append(CG(j1,m1,j2,m2,j,m).doit() * TensorProduct(evect(j1,m1), evect(j2,m2)))
return Add(*result)
else:
m1,m2,mi = symbols('m1 m2 mi')
# Hack to get rotation angles
angles = (evect(0,mi)._represent())[0].args[3:6]
out_state = TensorProduct(evect(j1,m1),evect(j2,m2))
if angles == (0,0,0):
lt = CG(j1,m1,j2,m2,state.j,state.m)
return Sum(lt * out_state, (m1,-j1,j1), (m2,-j2,j2))
else:
lt = CG(j1,m1,j2,m2,state.j,mi) * Rotation.D(state.j,mi,state.m,*angles)
return Sum(lt * out_state, (mi,-state.j,state.j), (m1,-j1,j1), (m2,-j2,j2))
#-----------------------------------------------------------------------------
# SpinOperators
#-----------------------------------------------------------------------------
class SpinOpBase(object):
"""Base class for spin operators."""
@classmethod
def _eval_hilbert_space(cls, label):
# We consider all j values so our space is infinite.
return ComplexSpace(S.Infinity)
@property
def name(self):
return self.args[0]
def _print_contents(self, printer, *args):
return '%s%s' % (unicode(self.name), self._coord)
# def _sympyrepr(self, printer, *args):
# return '%s(%s)' % (
# self.__class__.__name__, printer._print(self.label,*args)
#
def _print_contents_pretty(self, printer, *args):
a = stringPict(unicode(self.name))
b = stringPict(self._coord)
return self._print_subscript_pretty(a, b)
def _print_contents_latex(self, printer, *args):
return r'%s_%s' % ((unicode(self.name), self._coord))
def _represent_base(self, basis, **options):
j = options.get('j', Rational(1,2))
size, mvals = m_values(j)
result = zeros(size, size)
for p in range(size):
for q in range(size):
me = self.matrix_element(j, mvals[p], j, mvals[q])
result[p, q] = me
return result
def _apply_op(self, ket, orig_basis, **options):
state = ket.rewrite(self.basis)
# If the state has only one term
if isinstance(state, State):
return self._apply_operator(state, **options)
# state is a linear combination of states
return qapply(self*state).rewrite(orig_basis)
def _apply_operator_JxKet(self, ket, **options):
return self._apply_op(ket, 'Jx', **options)
def _apply_operator_JyKet(self, ket, **options):
return self._apply_op(ket, 'Jy', **options)
def _apply_operator_JzKet(self, ket, **options):
return self._apply_op(ket, 'Jz', **options)
def _apply_operator_TensorProduct(self, tp, **options):
if isinstance(self, J2Op):
raise NotImplementedError
result = []
for n in range(len(tp.args)):
arg = []
arg.extend(tp.args[:n])
arg.append(self._apply_operator(tp.args[n]))
arg.extend(tp.args[n+1:])
result.append(tp.__class__(*arg))
return Add(*result).expand()
class JplusOp(SpinOpBase, Operator):
"""The J+ operator."""
_coord = '+'
basis = 'Jz'
def _eval_commutator_JminusOp(self, other):
return 2*hbar*JzOp(self.name)
def _apply_operator_JzKet(self, ket, **options):
j = ket.j
m = ket.m
if m.is_Number and j.is_Number:
if m >= j:
return S.Zero
return hbar*sqrt(j*(j+S.One)-m*(m+S.One))*JzKet(j, m+S.One)
def matrix_element(self, j, m, jp, mp):
result = hbar*sqrt(j*(j+S.One)-mp*(mp+S.One))
result *= KroneckerDelta(m, mp+1)
result *= KroneckerDelta(j, jp)
return result
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JzOp(self, basis, **options):
return self._represent_base(basis, **options)
def _eval_rewrite_as_xyz(self, *args):
return JxOp(args[0]) + I*JyOp(args[0])
class JminusOp(SpinOpBase, Operator):
"""The J- operator."""
_coord = '-'
basis = 'Jz'
def _apply_operator_JzKet(self, ket, **options):
j = ket.j
m = ket.m
if m.is_Number and j.is_Number:
if m <= -j:
return S.Zero
return hbar*sqrt(j*(j+S.One)-m*(m-S.One))*JzKet(j, m-S.One)
def matrix_element(self, j, m, jp, mp):
result = hbar*sqrt(j*(j+S.One)-mp*(mp-S.One))
result *= KroneckerDelta(m, mp-1)
result *= KroneckerDelta(j, jp)
return result
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JzOp(self, basis, **options):
return self._represent_base(basis, **options)
def _eval_rewrite_as_xyz(self, *args):
return JxOp(args[0]) - I*JyOp(args[0])
class JxOp(SpinOpBase, HermitianOperator):
"""The Jx operator."""
_coord = 'x'
basis = 'Jx'
def _eval_commutator_JyOp(self, other):
return I*hbar*JzOp(self.name)
def _eval_commutator_JzOp(self, other):
return -I*hbar*JyOp(self.name)
def _apply_operator_JxKet(self, ket, **options):
return (hbar*ket.m)*ket
def _apply_operator_JzKet(self, ket, **options):
jp = JplusOp(self.name)._apply_operator_JzKet(ket, **options)
jm = JminusOp(self.name)._apply_operator_JzKet(ket, **options)
return (jp + jm)/Integer(2)
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JzOp(self, basis, **options):
jp = JplusOp(self.name)._represent_JzOp(basis, **options)
jm = JminusOp(self.name)._represent_JzOp(basis, **options)
return (jp + jm)/Integer(2)
def _eval_rewrite_as_plusminus(self, *args):
return (JplusOp(args[0]) + JminusOp(args[0]))/2
class JyOp(SpinOpBase, HermitianOperator):
"""The Jy operator."""
_coord = 'y'
basis = 'Jy'
def _eval_commutator_JzOp(self, other):
return I*hbar*JxOp(self.name)
def _eval_commutator_JxOp(self, other):
return -I*hbar*J2Op(self.name)
def _apply_operator_JyKet(self, ket, **options):
return (hbar*ket.m)*ket
def _apply_operator_JzKet(self, ket, **options):
jp = JplusOp(self.name)._apply_operator_JzKet(ket, **options)
jm = JminusOp(self.name)._apply_operator_JzKet(ket, **options)
return (jp - jm)/(Integer(2)*I)
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JzOp(self, basis, **options):
jp = JplusOp(self.name)._represent_JzOp(basis, **options)
jm = JminusOp(self.name)._represent_JzOp(basis, **options)
return (jp - jm)/(Integer(2)*I)
def _eval_rewrite_as_plusminus(self, *args):
return (JplusOp(args[0]) - JminusOp(args[0]))/(2*I)
class JzOp(SpinOpBase, HermitianOperator):
"""The Jz operator."""
_coord = 'z'
basis = 'Jz'
def _eval_commutator_JxOp(self, other):
return I*hbar*JyOp(self.name)
def _eval_commutator_JyOp(self, other):
return -I*hbar*JxOp(self.name)
def _eval_commutator_JplusOp(self, other):
return hbar*JplusOp(self.name)
def _eval_commutator_JminusOp(self, other):
return -hbar*JminusOp(self.name)
def _apply_operator_JzKet(self, ket, **options):
return (hbar*ket.m)*ket
def matrix_element(self, j, m, jp, mp):
result = hbar*mp
result *= KroneckerDelta(m, mp)
result *= KroneckerDelta(j, jp)
return result
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JzOp(self, basis, **options):
return self._represent_base(basis, **options)
class J2Op(SpinOpBase, HermitianOperator):
"""The J^2 operator."""
_coord = '2'
def _eval_commutator_JxOp(self, other):
return S.Zero
def _eval_commutator_JyOp(self, other):
return S.Zero
def _eval_commutator_JzOp(self, other):
return S.Zero
def _eval_commutator_JplusOp(self, other):
return S.Zero
def _eval_commutator_JminusOp(self, other):
return S.Zero
def _apply_operator_JzKet(self, ket, **options):
j = ket.j
return hbar**2*j*(j+1)*ket
def matrix_element(self, j, m, jp, mp):
result = (hbar**2)*j*(j+1)
result *= KroneckerDelta(m, mp)
result *= KroneckerDelta(j, jp)
return result
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JzOp(self, basis, **options):
return self._represent_base(basis, **options)
def _pretty(self, printer, *args):
a = stringPict('J')
b = stringPict('2')
top = stringPict(*b.left(' '*a.width()))
bot = stringPict(*a.right(' '*b.width()))
return prettyForm(binding=prettyForm.POW, *bot.above(top))
def _latex(self, printer, *args):
return r'%s^2' % str(self.name)
def _eval_rewrite_as_xyz(self, *args):
return JxOp(args[0])**2 + JyOp(args[0])**2 + JzOp(args[0])**2
def _eval_rewrite_as_plusminus(self, *args):
a = args[0]
return JzOp(a)**2 +\
Rational(1,2)*(JplusOp(a)*JminusOp(a) + JminusOp(a)*JplusOp(a))
class Rotation(UnitaryOperator):
"""Wigner D operator in terms of Euler angles.
Defines the rotation operator in terms of the Euler angles defined by
the z-y-z convention for a passive transformation. That is the coordinate
axes are rotated first about the z-axis, giving the new x'-y'-z' axes. Then
this new coordinate system is rotated about the new y'-axis, giving new
x''-y''-z'' axes. Then this new coordinate system is rotated about the
z''-axis. Conventions follow those laid out in [1].
See the Wigner D-function, Rotation.D, and the Wigner small-d matrix for
the evaluation of the rotation operator on spin states.
Parameters
==========
alpha : Number, Symbol
First Euler Angle
beta : Number, Symbol
Second Euler angle
gamma : Number, Symbol
Third Euler angle
Examples
========
A simple example rotation operator:
>>> from sympy import pi
>>> from sympy.physics.quantum.spin import Rotation
>>> Rotation(pi, 0, pi/2)
'R'(pi,0,pi/2)
With symbolic Euler angles and calculating the inverse rotation operator:
>>> from sympy import symbols
>>> a, b, c = symbols('a b c')
>>> Rotation(a, b, c)
'R'(a,b,c)
>>> Rotation(a, b, c).inverse()
'R'(-c,-b,-a)
References
==========
[1] Varshalovich, D A, Quantum Theory of Angular Momentum. 1988.
"""
@classmethod
def _eval_args(cls, args):
args = QExpr._eval_args(args)
if len(args) != 3:
raise ValueError('3 Euler angles required, got: %r' % args)
return args
@classmethod
def _eval_hilbert_space(cls, label):
# We consider all j values so our space is infinite.
return ComplexSpace(S.Infinity)
@property
def alpha(self):
return self.label[0]
@property
def beta(self):
return self.label[1]
@property
def gamma(self):
return self.label[2]
def _print_operator_name(self, printer, *args):
return printer._print('R', *args)
def _print_operator_name_pretty(self, printer, *args):
return prettyForm(u"\u211B" + u" ")
def _eval_inverse(self):
return Rotation(-self.gamma, -self.beta, -self.alpha)
@classmethod
def D(cls, j, m, mp, alpha, beta, gamma):
"""Wigner D-function.
Returns an instance of the WignerD class. See the corresponding
docstring for more information on the Wigner-D matrix.
Parameters
===========
j : Number
Total angular momentum
m : Number
Eigenvalue of angular momentum along axis after rotation
mp : Number
Eigenvalue of angular momentum along rotated axis
alpha : Number, Symbol
First Euler angle of rotation
beta : Number, Symbol
Second Euler angle of rotation
gamma : Number, Symbol
Third Euler angle of rotation
Examples
========
Return the Wigner-D matrix element for a defined rotation, both
numerical and symbolic:
>>> from sympy.physics.quantum.spin import Rotation
>>> from sympy import pi, symbols
>>> alpha, beta, gamma = symbols('alpha beta gamma')
>>> Rotation.D(1, 1, 0,pi, pi/2,-pi)
WignerD(1, 1, 0, pi, pi/2, -pi)
"""
return WignerD(j,m,mp,alpha,beta,gamma)
@classmethod
def d(cls, j, m, mp, beta):
"""Wigner small-d function.
Returns an instance of the WignerD class with the alpha and gamma
angles given as 0. See the corresponding docstring for more
information on the Wigner small-d matrix.
Parameters
===========
j : Number
Total angular momentum
m : Number
Eigenvalue of angular momentum along axis after rotation
mp : Number
Eigenvalue of angular momentum along rotated axis
beta : Number, Symbol
Second Euler angle of rotation
Examples
========
Return the Wigner-D matrix element for a defined rotation, both
numerical and symbolic:
>>> from sympy.physics.quantum.spin import Rotation
>>> from sympy import pi, symbols
>>> beta = symbols('beta')
>>> Rotation.d(1, 1, 0, pi/2)
WignerD(1, 1, 0, 0, pi/2, 0)
"""
return WignerD(j,m,mp,0,beta,0)
def matrix_element(self, j, m, jp, mp):
result = self.__class__.D(
jp, m, mp, self.alpha, self.beta, self.gamma
)
result *= KroneckerDelta(j,jp)
return result
def _represent_base(self, basis, **options):
j = sympify(options.get('j', Rational(1,2)))
size, mvals = m_values(j)
result = zeros(size, size)
for p in range(size):
for q in range(size):
me = self.matrix_element(j, mvals[p], j, mvals[q])
result[p, q] = me
return result
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JzOp(self, basis, **options):
return self._represent_base(basis, **options)
class WignerD(Expr):
"""Wigner-D function
The Wigner D-function gives the matrix elements of the rotation
operator in the jm-representation. For the Euler angles alpha, beta,
gamma, the D-function is defined such that:
<j,m| R(alpha,beta,gamma) |j',m'> = delta_jj' * D(j, m, m', alpha, beta, gamma)
Where the rotation operator is as defined by the Rotation class.
The Wigner D-function defined in this way gives:
D(j, m, m', alpha, beta, gamma) = exp(-i*m*alpha) * d(j, m, m', beta) * exp(-i*m'*gamma)
Where d is the Wigner small-d function, which is given by Rotation.d.
The Wigner small-d function gives the component of the Wigner
D-function that is determined by the second Euler angle. That is the
Wigner D-function is:
D(j, m, m', alpha, beta, gamma) = exp(-i*m*alpha) * d(j, m, m', beta) * exp(-i*m'*gamma)
Where d is the small-d function. The Wigner D-function is given by
Rotation.D.
Note that to evaluate the D-function, the j, m and mp parameters must
be integer or half integer numbers.
Parameters
==========
j : Number
Total angular momentum
m : Number
Eigenvalue of angular momentum along axis after rotation
mp : Number
Eigenvalue of angular momentum along rotated axis
alpha : Number, Symbol
First Euler angle of rotation
beta : Number, Symbol
Second Euler angle of rotation
gamma : Number, Symbol
Third Euler angle of rotation
Examples
========
Evaluate the Wigner-D matrix elements of a simple rotation:
>>> from sympy.physics.quantum.spin import Rotation
>>> from sympy import pi
>>> rot = Rotation.D(1, 1, 0, pi, pi/2, 0)
>>> rot
WignerD(1, 1, 0, pi, pi/2, 0)
>>> rot.doit()
sqrt(2)/2
Evaluate the Wigner-d matrix elements of a simple rotation
>>> rot = Rotation.d(1, 1, 0, pi/2)
>>> rot
WignerD(1, 1, 0, 0, pi/2, 0)
>>> rot.doit()
-sqrt(2)/2
References
==========
[1] Varshalovich, D A, Quantum Theory of Angular Momentum. 1988.
"""
def __new__(cls, *args, **hints):
if not len(args) == 6:
raise ValueError('6 parameters expected, got %s' % args)
evaluate = hints.get('evaluate', False)
if evaluate:
return Expr.__new__(cls, *args)._eval_wignerd()
return Expr.__new__(cls, *args, **{'evaluate': False})
@property
def j(self):
return self.args[0]
@property
def m(self):
return self.args[1]
@property
def mp(self):
return self.args[2]
@property
def alpha(self):
return self.args[3]
@property
def beta(self):
return self.args[4]
@property
def gamma(self):
return self.args[5]
def _latex(self, printer, *args):
if self.alpha == 0 and self.gamma == 0:
return r'd^{%s}_{%s,%s}\left(%s\right)' % \
( printer._print(self.j), printer._print(self.m), printer._print(self.mp),
printer._print(self.beta) )
return r'D^{%s}_{%s,%s}\left(%s,%s,%s\right)' % \
( printer._print(self.j), printer._print(self.m), printer._print(self.mp),
printer._print(self.alpha), printer._print(self.beta), printer._print(self.gamma) )
def _pretty(self, printer, *args):
top = printer._print(self.j)
bot = printer._print(self.m)
bot = prettyForm(*bot.right(','))
bot = prettyForm(*bot.right(printer._print(self.mp)))
pad = max(top.width(), bot.width())
top = prettyForm(*top.left(' '))
bot = prettyForm(*bot.left(' '))
if pad > top.width():
top = prettyForm(*top.right(' ' * (pad-top.width())))
if pad > bot.width():
bot = prettyForm(*bot.right(' ' * (pad-bot.width())))
if self.alpha == 0 and self.gamma == 0:
args = printer._print(self.beta)
s = stringPict('d' + ' '*pad)
else:
args = printer._print(self.alpha)
args = prettyForm(*args.right(','))
args = prettyForm(*args.right(printer._print(self.beta)))
args = prettyForm(*args.right(','))
args = prettyForm(*args.right(printer._print(self.gamma)))
s = stringPict('D' + ' '*pad)
args = prettyForm(*args.parens())
s = prettyForm(*s.above(top))
s = prettyForm(*s.below(bot))
s = prettyForm(*s.right(args))
return s
def doit(self, **hints):
hints['evaluate'] = True
return WignerD(*self.args, **hints)
def _eval_wignerd(self):
j = sympify(self.j)
m = sympify(self.m)
mp = sympify(self.mp)
alpha = sympify(self.alpha)
beta = sympify(self.beta)
gamma = sympify(self.gamma)
if not j.is_number:
raise ValueError("j parameter must be numerical to evaluate, got %s", j)
r = 0
if beta == pi/2:
# Varshalovich Equation (5), Section 4.16, page 113, setting
# alpha=gamma=0.
for k in range(2*j+1):
if k > j+mp or k > j-m or k < mp-m:
continue
r += (-S(1))**k * binomial(j+mp, k) * binomial(j-mp, k+m-mp)
r *= (-S(1))**(m-mp) / 2**j * sqrt(factorial(j+m) * \
factorial(j-m) / (factorial(j+mp) * factorial(j-mp)))
else:
# Varshalovich Equation(5), Section 4.7.2, page 87, where we set
# beta1=beta2=pi/2, and we get alpha=gamma=pi/2 and beta=phi+pi,
# then we use the Eq. (1), Section 4.4. page 79, to simplify:
# d(j, m, mp, beta+pi) = (-1)**(j-mp) * d(j, m, -mp, beta)
# This happens to be almost the same as in Eq.(10), Section 4.16,
# except that we need to substitute -mp for mp.
size, mvals = m_values(j)
for mpp in mvals:
r += Rotation.d(j, m, mpp, pi/2).doit() * (cos(-mpp*beta)+I*sin(-mpp*beta)) * \
Rotation.d(j, mpp, -mp, pi/2).doit()
# Empirical normalization factor so results match Varshalovich
# Tables 4.3-4.12
# Note that this exact normalization does not follow from the
# above equations
r = r * I**(2*j-m-mp) * (-1)**(2*m)
# Finally, simplify the whole expression
r = simplify(r)
r *= exp(-I*m*alpha)*exp(-I*mp*gamma)
return r
Jx = JxOp('J')
Jy = JyOp('J')
Jz = JzOp('J')
J2 = J2Op('J')
Jplus = JplusOp('J')
Jminus = JminusOp('J')
#-----------------------------------------------------------------------------
# Spin States
#-----------------------------------------------------------------------------
class SpinState(State):
"""Base class for angular momentum states."""
_label_separator = ','
def __new__(cls, j, m):
if sympify(j).is_number and not 2*j == int(2*j):
raise ValueError('j must be integer or half-integer, got %s' % j)
if sympify(m).is_number and not 2*m == int(2*m):
raise ValueError('m must be integer or half-integer, got %s' % m)
if sympify(j).is_number and j < 0:
raise ValueError('j must be < 0')
if sympify(j).is_number and sympify(m).is_number and abs(m) > j:
raise ValueError('Allowed values for m are -j <= m <= j')
return State.__new__(cls, j, m)
@property
def j(self):
return self.label[0]
@property
def m(self):
return self.label[1]
@classmethod
def _eval_hilbert_space(cls, label):
return ComplexSpace(2*label[0]+1)
def _represent_base(self, **options):
j = sympify(self.j)
m = sympify(self.m)
alpha = sympify(options.get('alpha', 0))
beta = sympify(options.get('beta', 0))
gamma = sympify(options.get('gamma', 0))
if self.j.is_number:
size, mvals = m_values(j)
result = zeros(size, 1)
for p in range(size):
if m.is_number and alpha.is_number and beta.is_number and gamma.is_number:
result[p,0] = Rotation.D(self.j, mvals[p], self.m, alpha, beta, gamma).doit()
else:
result[p,0] = Rotation.D(self.j, mvals[p], self.m, alpha, beta, gamma)
return result
else:
mi = symbols("mi")
result = zeros(1, 1)
result[0] = (Rotation.D(self.j, mi, self.m, alpha, beta, gamma), mi)
return result
def _eval_rewrite_as_Jx(self, *args, **options):
if isinstance(self, Bra):
return self._rewrite_basis(Jx, JxBra, **options)
return self._rewrite_basis(Jx, JxKet, **options)
def _eval_rewrite_as_Jy(self, *args, **options):
if isinstance(self, Bra):
return self._rewrite_basis(Jy, JyBra, **options)
return self._rewrite_basis(Jy, JyKet, **options)
def _eval_rewrite_as_Jz(self, *args, **options):
if isinstance(self, Bra):
return self._rewrite_basis(Jz, JzBra, **options)
return self._rewrite_basis(Jz, JzKet, **options)
def _rewrite_basis(self, basis, evect, **options):
from sympy.physics.quantum.represent import represent
j = sympify(self.j)
if j.is_number:
vect = represent(self, basis=basis, **options)
return Add(*[vect[i] * evect(j,j-i) for i in range(2*j+1)])
else:
# TODO: better way to get angles of rotation
mi = symbols('mi')
angles = represent(self.__class__(0,mi),basis=basis)[0].args[3:6]
if angles == (0,0,0):
return self
else:
state = evect(j, mi)
lt = Rotation.D(j, mi, self.m, *angles)
result = lt * state
return Sum(lt * state, (mi,-j,j))
def _eval_innerproduct_JxBra(self, bra, **hints):
result = KroneckerDelta(self.j, bra.j)
if not bra.dual_class() is self.__class__:
result *= self._represent_JxOp(None)[bra.j-bra.m]
else:
result *= KroneckerDelta(self.j, bra.j) * KroneckerDelta(self.m, bra.m)
return result
def _eval_innerproduct_JyBra(self, bra, **hints):
result = KroneckerDelta(self.j, bra.j)
if not bra.dual_class() is self.__class__:
result *= self._represent_JyOp(None)[bra.j-bra.m]
else:
result *= KroneckerDelta(self.j, bra.j) * KroneckerDelta(self.m, bra.m)
return result
def _eval_innerproduct_JzBra(self, bra, **hints):
result = KroneckerDelta(self.j, bra.j)
if not bra.dual_class() is self.__class__:
result *= self._represent_JzOp(None)[bra.j-bra.m]
else:
result *= KroneckerDelta(self.j, bra.j) * KroneckerDelta(self.m, bra.m)
return result
class JxKet(SpinState, Ket):
"""Eigenket of Jx.
See JzKet for the usage of spin eigenstates.
"""
@classmethod
def dual_class(self):
return JxBra
@classmethod
def coupled_class(self):
return JxKetCoupled
def _represent_default_basis(self, **options):
return self._represent_JxOp(None, **options)
def _represent_JxOp(self, basis, **options):
return self._represent_base(**options)
def _represent_JyOp(self, basis, **options):
return self._represent_base(alpha=3*pi/2, **options)
def _represent_JzOp(self, basis, **options):
return self._represent_base(beta=pi/2, **options)
class JxBra(SpinState, Bra):
"""Eigenbra of Jx.
See JzKet for the usage of spin eigenstates.
"""
@classmethod
def dual_class(self):
return JxKet
@classmethod
def coupled_class(self):
return JxBraCoupled
class JyKet(SpinState, Ket):
"""Eigenket of Jy.
See JzKet for the usage of spin eigenstates.
"""
@classmethod
def dual_class(self):
return JyBra
@classmethod
def coupled_class(self):
return JyKetCoupled
def _represent_default_basis(self, **options):
return self._represent_JyOp(None, **options)
def _represent_JxOp(self, basis, **options):
return self._represent_base(gamma=pi/2, **options)
def _represent_JyOp(self, basis, **options):
return self._represent_base(**options)
def _represent_JzOp(self, basis, **options):
return self._represent_base(alpha=3*pi/2,beta=-pi/2,gamma=pi/2, **options)
class JyBra(SpinState, Bra):
"""Eigenbra of Jy.
See JzKet for the usage of spin eigenstates.
"""
@classmethod
def dual_class(self):
return JyKet
@classmethod
def coupled_class(self):
return JyBraCoupled
class JzKet(SpinState, Ket):
"""Eigenket of Jz.
Spin state which is an eigenstate of the Jz operator. Uncoupled states,
that is states representing the interaction of multiple separate spin
states, are defined as a tensor product of states.
See uncouple and couple for coupling of states and JzKetCoupled for coupled
states.
Parameters
==========
j : Number, Symbol
Total spin angular momentum
m : Number, Symbol
Eigenvalue of the Jz spin operator
Examples
========
Normal States
-------------
Defining simple spin states, both numerical and symbolic:
>>> from sympy.physics.quantum.spin import JzKet, JxKet
>>> from sympy import symbols
>>> JzKet(1, 0)
|1,0>
>>> j, m = symbols('j m')
>>> JzKet(j, m)
|j,m>
Rewriting the JzKet in terms of eigenkets of the Jx operator:
Note: that the resulting eigenstates are JxKet's
>>> JzKet(1,1).rewrite("Jx")
|1,-1>/2 - sqrt(2)*|1,0>/2 + |1,1>/2
Get the vector representation of a state in terms of the basis elements
of the Jx operator:
>>> from sympy.physics.quantum.represent import represent
>>> from sympy.physics.quantum.spin import Jx, Jz
>>> represent(JzKet(1,-1), basis=Jx)
[ 1/2]
[sqrt(2)/2]
[ 1/2]
Apply innerproducts between states:
>>> from sympy.physics.quantum.innerproduct import InnerProduct
>>> from sympy.physics.quantum.spin import JxBra
>>> i = InnerProduct(JxBra(1,1), JzKet(1,1))
>>> i
<1,1|1,1>
>>> i.doit()
1/2
Uncoupled States
---------------
Define an uncoupled state as a TensorProduct between two Jz eigenkets:
>>> from sympy.physics.quantum.tensorproduct import TensorProduct
>>> j1,m1,j2,m2 = symbols('j1 m1 j2 m2')
>>> TensorProduct(JzKet(1,0), JzKet(1,1))
|1,0>x|1,1>
>>> TensorProduct(JzKet(j1,m1), JzKet(j2,m2))
|j1,m1>x|j2,m2>
A TensorProduct can be rewritten, in which case the eigenstates that make
up the tensor product is rewritten to the new basis:
>>> TensorProduct(JzKet(1,1),JxKet(1,1)).rewrite('Jz')
|1,1>x|1,-1>/2 + sqrt(2)*|1,1>x|1,0>/2 + |1,1>x|1,1>/2
The represent method for TensorProduct's gives the vector representation of
the state. Note that the state in the product basis is the equivalent of the
tensor product of the vector representation of the component eigenstates:
>>> represent(TensorProduct(JzKet(1,0),JzKet(1,1)))
[0]
[0]
[0]
[1]
[0]
[0]
[0]
[0]
[0]
>>> represent(TensorProduct(JzKet(1,1),JxKet(1,1)), basis=Jz)
[ 1/2]
[sqrt(2)/2]
[ 1/2]
[ 0]
[ 0]
[ 0]
[ 0]
[ 0]
[ 0]
"""
@classmethod
def dual_class(self):
return JzBra
@classmethod
def coupled_class(self):
return JzKetCoupled
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JxOp(self, basis, **options):
return self._represent_base(beta=3*pi/2, **options)
def _represent_JyOp(self, basis, **options):
return self._represent_base(alpha=3*pi/2,beta=pi/2,gamma=pi/2, **options)
def _represent_JzOp(self, basis, **options):
return self._represent_base(**options)
class JzBra(SpinState, Bra):
"""Eigenbra of Jz.
See the JzKet for the usage of spin eigenstates.
"""
@classmethod
def dual_class(self):
return JzKet
@classmethod
def coupled_class(self):
return JzBraCoupled
class CoupledSpinState(SpinState):
"""Base class for coupled angular momentum states."""
def __new__(cls, j, m, *jvals):
return State.__new__(cls, j, m, *jvals)
@property
def jvals(self):
return self.label[2:]
@classmethod
def _eval_hilbert_space(cls, label):
j = Add(*label[2:])
if j.is_number:
ret = ComplexSpace(2*j+1)
while j >= 1:
j -= 1
ret += ComplexSpace(2*j+1)
return ret
else:
# TODO
# Need hilbert space fix
#ji = symbols('ji')
#ret = Sum(ComplexSpace(2*ji + 1), (ji, j, 0))
return ComplexSpace(2*j+1)
def _represent_coupled_base(self, **options):
evect = self.uncoupled_class()
result = zeros(self.hilbert_space.dimension, 1)
if self.j == int(self.j):
start = self.j**2
else:
start = (2*self.j-1)*(1+2*self.j)/4
result[start:start+2*self.j+1,0] = evect(self.j, self.m)._represent_base(**options)
return result
def _eval_rewrite_as_Jx(self, *args, **options):
if isinstance(self, Bra):
return self._rewrite_basis(Jx, JxBraCoupled, **options)
return self._rewrite_basis(Jx, JxKetCoupled, **options)
def _eval_rewrite_as_Jy(self, *args, **options):
if isinstance(self, Bra):
return self._rewrite_basis(Jy, JyBraCoupled, **options)
return self._rewrite_basis(Jy, JyKetCoupled, **options)
def _eval_rewrite_as_Jz(self, *args, **options):
if isinstance(self, Bra):
return self._rewrite_basis(Jz, JzBraCoupled, **options)
return self._rewrite_basis(Jz, JzKetCoupled, **options)
def _rewrite_basis(self, basis, evect, **options):
from sympy.physics.quantum.represent import represent
j = sympify(self.j)
jvals = self.jvals
if j.is_number:
if j == int(j):
start = j**2
else:
start = (2*j-1)*(2*j+1)/4
vect = represent(self, basis=basis, **options)
result = Add(*[vect[start+i] * evect(j,j-i,*jvals) for i in range(2*j+1)])
if options.get('coupled') is False:
return uncouple(result)
return result
else:
# TODO: better way to get angles of rotation
mi = symbols('mi')
angles = represent(self.__class__(0,mi),basis=basis)[0].args[3:6]
if angles == (0,0,0):
return self
else:
state = evect(j, mi, *jvals)
lt = Rotation.D(j, mi, self.m, *angles)
result = lt * state
return Sum(lt * state, (mi,-j,j))
class JxKetCoupled(CoupledSpinState, Ket):
"""Coupled eigenket of Jx.
See JzKetCoupled for the usage of coupled spin eigenstates.
"""
@classmethod
def dual_class(self):
return JxBraCoupled
@classmethod
def uncoupled_class(self):
return JxKet
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JxOp(self, basis, **options):
return self._represent_coupled_base(**options)
def _represent_JyOp(self, basis, **options):
return self._represent_coupled_base(alpha=3*pi/2, **options)
def _represent_JzOp(self, basis, **options):
return self._represent_coupled_base(beta=pi/2, **options)
class JxBraCoupled(CoupledSpinState, Bra):
"""Coupled eigenbra of Jx.
See JzKetCoupled for the usage of coupled spin eigenstates.
"""
@classmethod
def dual_class(self):
return JxKetCoupled
@classmethod
def uncoupled_class(self):
return JxBra
class JyKetCoupled(CoupledSpinState, Ket):
"""Coupled eigenket of Jy.
See JzKetCoupled for the usage of coupled spin eigenstates.
"""
@classmethod
def dual_class(self):
return JyBraCoupled
@classmethod
def uncoupled_class(self):
return JyKet
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JxOp(self, basis, **options):
return self._represent_coupled_base(gamma=pi/2, **options)
def _represent_JyOp(self, basis, **options):
return self._represent_coupled_base(**options)
def _represent_JzOp(self, basis, **options):
return self._represent_coupled_base(alpha=3*pi/2,beta=-pi/2,gamma=pi/2, **options)
class JyBraCoupled(CoupledSpinState, Bra):
"""Coupled eigenbra of Jy.
See JzKetCoupled for the usage of coupled spin eigenstates.
"""
@classmethod
def dual_class(self):
return JyKetCoupled
@classmethod
def uncoupled_class(self):
return JyBra
class JzKetCoupled(CoupledSpinState, Ket):
"""Coupled eigenket of Jz
Spin state that is an eigenket of Jz which represents the coupling of
separate spin spaces.
See uncouple and couple for coupling of states and JzKetCoupled for coupled
states.
Parameters
==========
j : Number, Symbol
Total spin angular momentum
m : Number, Symbol
Eigenvalue of the Jz spin operator
*jvals : tuple
The j values of the spaces that are coupled
Examples
========
Defining simple spin states, both numerical and symbolic:
>>> from sympy.physics.quantum.spin import JzKetCoupled
>>> from sympy import symbols
>>> JzKetCoupled(1, 0, 1, 1)
|1,0,1,1>
>>> j, m, j1, j2 = symbols('j m j1 j2')
>>> JzKetCoupled(j, m, j1, j2)
|j,m,j1,j2>
Rewriting the JzKetCoupled in terms of eigenkets of the Jx operator:
Note: that the resulting eigenstates are JxKetCoupled
>>> JzKetCoupled(1,1,1,1).rewrite("Jx")
|1,-1,1,1>/2 - sqrt(2)*|1,0,1,1>/2 + |1,1,1,1>/2
The rewrite method can be used to convert a coupled state to an uncoupled
state. This is done by passing coupled=False to the rewrite function:
>>> JzKetCoupled(1, 0, 1, 1).rewrite('Jz', coupled=False)
-sqrt(2)*|1,-1>x|1,1>/2 + sqrt(2)*|1,1>x|1,-1>/2
Get the vector representation of a state in terms of the basis elements
of the Jx operator:
>>> from sympy.physics.quantum.represent import represent
>>> from sympy.physics.quantum.spin import Jx
>>> from sympy import S
>>> represent(JzKetCoupled(1,-1,S(1)/2,S(1)/2), basis=Jx)
[ 0]
[ 1/2]
[sqrt(2)/2]
[ 1/2]
"""
@classmethod
def dual_class(self):
return JzBraCoupled
@classmethod
def uncoupled_class(self):
return JzKet
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JxOp(self, basis, **options):
return self._represent_coupled_base(beta=3*pi/2, **options)
def _represent_JyOp(self, basis, **options):
return self._represent_coupled_base(alpha=3*pi/2,beta=pi/2,gamma=pi/2, **options)
def _represent_JzOp(self, basis, **options):
return self._represent_coupled_base(**options)
class JzBraCoupled(CoupledSpinState, Bra):
"""Coupled eigenbra of Jz.
See the JzKetCoupled for the usage of coupled spin eigenstates.
"""
@classmethod
def dual_class(self):
return JzKetCoupled
@classmethod
def uncoupled_class(self):
return JzBra
| 30.735875 | 172 | 0.58703 |
5ba18c4a24c5831a19e3787e9c00b287ec86b5f9 | 16,247 | py | Python | venv/Lib/site-packages/pythonwin/pywin/framework/intpyapp.py | Shuvayan007/Internship_project_heroku | 560dcb8f0a4d182c262f72f589d14509d051336c | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/pythonwin/pywin/framework/intpyapp.py | Shuvayan007/Internship_project_heroku | 560dcb8f0a4d182c262f72f589d14509d051336c | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/pythonwin/pywin/framework/intpyapp.py | Shuvayan007/Internship_project_heroku | 560dcb8f0a4d182c262f72f589d14509d051336c | [
"Apache-2.0"
] | null | null | null | # intpyapp.py - Interactive Python application class
#
import win32con
import win32api
import win32ui
import __main__
import sys
import os
from . import app
import traceback
from pywin.mfc import afxres, dialog
import commctrl
from . import dbgcommands
lastLocateFileName = ".py" # used in the "File/Locate" dialog...
# todo - _SetupSharedMenu should be moved to a framework class.
def _SetupSharedMenu_(self):
sharedMenu = self.GetSharedMenu()
from pywin.framework import toolmenu
toolmenu.SetToolsMenu(sharedMenu)
from pywin.framework import help
help.SetHelpMenuOtherHelp(sharedMenu)
from pywin.mfc import docview
docview.DocTemplate._SetupSharedMenu_=_SetupSharedMenu_
class MainFrame(app.MainFrame):
def OnCreate(self, createStruct):
self.closing = 0
if app.MainFrame.OnCreate(self, createStruct)==-1:
return -1
style = win32con.WS_CHILD | afxres.CBRS_SIZE_DYNAMIC | afxres.CBRS_TOP | afxres.CBRS_TOOLTIPS | afxres.CBRS_FLYBY
self.EnableDocking(afxres.CBRS_ALIGN_ANY)
tb = win32ui.CreateToolBar (self, style | win32con.WS_VISIBLE)
tb.ModifyStyle(0, commctrl.TBSTYLE_FLAT)
tb.LoadToolBar(win32ui.IDR_MAINFRAME)
tb.EnableDocking(afxres.CBRS_ALIGN_ANY)
tb.SetWindowText("Standard")
self.DockControlBar(tb)
# Any other packages which use toolbars
from pywin.debugger.debugger import PrepareControlBars
PrepareControlBars(self)
# Note "interact" also uses dockable windows, but they already happen
# And a "Tools" menu on the main frame.
menu = self.GetMenu()
from . import toolmenu
toolmenu.SetToolsMenu(menu, 2)
# And fix the "Help" menu on the main frame
from pywin.framework import help
help.SetHelpMenuOtherHelp(menu)
def OnClose(self):
try:
import pywin.debugger
if pywin.debugger.currentDebugger is not None and pywin.debugger.currentDebugger.pumping:
try:
pywin.debugger.currentDebugger.close(1)
except:
traceback.print_exc()
return
except win32ui.error:
pass
self.closing = 1
self.SaveBarState("ToolbarDefault")
self.SetActiveView(None) # Otherwise MFC's OnClose may _not_ prompt for save.
from pywin.framework import help
help.FinalizeHelp()
self.DestroyControlBar(afxres.AFX_IDW_TOOLBAR)
self.DestroyControlBar(win32ui.ID_VIEW_TOOLBAR_DBG)
return self._obj_.OnClose()
def DestroyControlBar(self, id):
try:
bar = self.GetControlBar(id)
except win32ui.error:
return
bar.DestroyWindow()
def OnCommand(self, wparam, lparam):
# By default, the current MDI child frame will process WM_COMMAND
# messages before any docked control bars - even if the control bar
# has focus. This is a problem for the interactive window when docked.
# Therefore, we detect the situation of a view having the main frame
# as its parent, and assume it must be a docked view (which it will in an MDI app)
try:
v = self.GetActiveView() # Raise an exception if none - good - then we want default handling
# Main frame _does_ have a current view (ie, a docking view) - see if it wants it.
if v.OnCommand(wparam, lparam):
return 1
except (win32ui.error, AttributeError):
pass
return self._obj_.OnCommand(wparam, lparam)
class InteractivePythonApp(app.CApp):
# This works if necessary - just we dont need to override the Run method.
# def Run(self):
# return self._obj_.Run()
def HookCommands(self):
app.CApp.HookCommands(self)
dbgcommands.DebuggerCommandHandler().HookCommands()
self.HookCommand(self.OnViewBrowse,win32ui.ID_VIEW_BROWSE)
self.HookCommand(self.OnFileImport,win32ui.ID_FILE_IMPORT)
self.HookCommand(self.OnFileCheck,win32ui.ID_FILE_CHECK)
self.HookCommandUpdate(self.OnUpdateFileCheck, win32ui.ID_FILE_CHECK)
self.HookCommand(self.OnFileRun,win32ui.ID_FILE_RUN)
self.HookCommand(self.OnFileLocate,win32ui.ID_FILE_LOCATE)
self.HookCommand(self.OnInteractiveWindow, win32ui.ID_VIEW_INTERACTIVE)
self.HookCommandUpdate(self.OnUpdateInteractiveWindow, win32ui.ID_VIEW_INTERACTIVE)
self.HookCommand(self.OnViewOptions, win32ui.ID_VIEW_OPTIONS)
self.HookCommand(self.OnHelpIndex, afxres.ID_HELP_INDEX)
self.HookCommand(self.OnFileSaveAll, win32ui.ID_FILE_SAVE_ALL)
self.HookCommand(self.OnViewToolbarDbg, win32ui.ID_VIEW_TOOLBAR_DBG)
self.HookCommandUpdate(self.OnUpdateViewToolbarDbg, win32ui.ID_VIEW_TOOLBAR_DBG)
def CreateMainFrame(self):
return MainFrame()
def MakeExistingDDEConnection(self):
# Use DDE to connect to an existing instance
# Return None if no existing instance
try:
from . import intpydde
except ImportError:
# No dde support!
return None
conv = intpydde.CreateConversation(self.ddeServer)
try:
conv.ConnectTo("Pythonwin", "System")
return conv
except intpydde.error:
return None
def InitDDE(self):
# Do all the magic DDE handling.
# Returns TRUE if we have pumped the arguments to our
# remote DDE app, and we should terminate.
try:
from . import intpydde
except ImportError:
self.ddeServer = None
intpydde = None
if intpydde is not None:
self.ddeServer = intpydde.DDEServer(self)
self.ddeServer.Create("Pythonwin", intpydde.CBF_FAIL_SELFCONNECTIONS )
try:
# If there is an existing instance, pump the arguments to it.
connection = self.MakeExistingDDEConnection()
if connection is not None:
connection.Exec("self.Activate()")
if self.ProcessArgs(sys.argv, connection) is None:
return 1
except:
# It is too early to 'print' an exception - we
# don't have stdout setup yet!
win32ui.DisplayTraceback(sys.exc_info(), " - error in DDE conversation with Pythonwin")
return 1
def InitInstance(self):
# Allow "/nodde" and "/new" to optimize this!
if ("/nodde" not in sys.argv and "/new" not in sys.argv
and "-nodde" not in sys.argv and "-new" not in sys.argv):
if self.InitDDE():
return 1 # A remote DDE client is doing it for us!
else:
self.ddeServer = None
win32ui.SetRegistryKey("Python %s" % (sys.winver,)) # MFC automatically puts the main frame caption on!
app.CApp.InitInstance(self)
# Create the taskbar icon
win32ui.CreateDebuggerThread()
# Allow Pythonwin to host OCX controls.
win32ui.EnableControlContainer()
# Display the interactive window if the user wants it.
from . import interact
interact.CreateInteractiveWindowUserPreference()
# Load the modules we use internally.
self.LoadSystemModules()
# Load additional module the user may want.
self.LoadUserModules()
# Load the ToolBar state near the end of the init process, as
# there may be Toolbar IDs created by the user or other modules.
# By now all these modules should be loaded, so all the toolbar IDs loaded.
try:
self.frame.LoadBarState("ToolbarDefault")
except win32ui.error:
# MFC sucks. It does essentially "GetDlgItem(x)->Something", so if the
# toolbar with ID x does not exist, MFC crashes! Pythonwin has a trap for this
# but I need to investigate more how to prevent it (AFAIK, ensuring all the
# toolbars are created by now _should_ stop it!)
pass
# Finally process the command line arguments.
try:
self.ProcessArgs(sys.argv)
except:
# too early for printing anything.
win32ui.DisplayTraceback(sys.exc_info(), " - error processing command line args")
def ExitInstance(self):
win32ui.DestroyDebuggerThread()
try:
from . import interact
interact.DestroyInteractiveWindow()
except:
pass
if self.ddeServer is not None:
self.ddeServer.Shutdown()
self.ddeServer = None
return app.CApp.ExitInstance(self)
def Activate(self):
# Bring to the foreground. Mainly used when another app starts up, it asks
# this one to activate itself, then it terminates.
frame = win32ui.GetMainFrame()
frame.SetForegroundWindow()
if frame.GetWindowPlacement()[1]==win32con.SW_SHOWMINIMIZED:
frame.ShowWindow(win32con.SW_RESTORE)
def ProcessArgs(self, args, dde = None):
# If we are going to talk to a remote app via DDE, then
# activate it!
if len(args)<1 or not args[0]: # argv[0]=='' when started without args, just like Python.exe!
return
i = 0
while i < len(args):
argType = args[i]
i += 1
if argType.startswith('-'):
# Support dash options. Slash options are misinterpreted by python init
# as path and not finding usually 'C:\\' ends up in sys.path[0]
argType = '/' + argType[1:]
if not argType.startswith('/'):
argType = win32ui.GetProfileVal("Python","Default Arg Type","/edit").lower()
i -= 1 # arg is /edit's parameter
par = i < len(args) and args[i] or 'MISSING'
if argType in ['/nodde', '/new', '-nodde', '-new']:
# Already handled
pass
elif argType.startswith('/goto:'):
gotoline = int(argType[len('/goto:'):])
if dde:
dde.Exec("from pywin.framework import scriptutils\n"
"ed = scriptutils.GetActiveEditControl()\n"
"if ed: ed.SetSel(ed.LineIndex(%s - 1))" % gotoline)
else:
from . import scriptutils
ed = scriptutils.GetActiveEditControl()
if ed: ed.SetSel(ed.LineIndex(gotoline - 1))
elif argType == "/edit":
# Load up the default application.
i += 1
fname = win32api.GetFullPathName(par)
if not os.path.isfile(fname):
# if we don't catch this, OpenDocumentFile() (actually
# PyCDocument.SetPathName() in
# pywin.scintilla.document.CScintillaDocument.OnOpenDocument)
# segfaults Pythonwin on recent PY3 builds (b228)
win32ui.MessageBox(
"No such file: %s\n\nCommand Line: %s" % (
fname, win32api.GetCommandLine()),
"Open file for edit", win32con.MB_ICONERROR)
continue
if dde:
dde.Exec("win32ui.GetApp().OpenDocumentFile(%s)" % (repr(fname)))
else:
win32ui.GetApp().OpenDocumentFile(par)
elif argType=="/rundlg":
if dde:
dde.Exec("from pywin.framework import scriptutils;scriptutils.RunScript(%r, %r, 1)" % (par, ' '.join(args[i + 1:])))
else:
from . import scriptutils
scriptutils.RunScript(par, ' '.join(args[i + 1:]))
return
elif argType=="/run":
if dde:
dde.Exec("from pywin.framework import scriptutils;scriptutils.RunScript(%r, %r, 0)" % (par, ' '.join(args[i + 1:])))
else:
from . import scriptutils
scriptutils.RunScript(par, ' '.join(args[i + 1:]), 0)
return
elif argType=="/app":
raise RuntimeError("/app only supported for new instances of Pythonwin.exe")
elif argType=='/dde': # Send arbitary command
if dde is not None:
dde.Exec(par)
else:
win32ui.MessageBox("The /dde command can only be used\r\nwhen Pythonwin is already running")
i += 1
else:
raise ValueError("Command line argument not recognised: %s" % argType)
def LoadSystemModules(self):
self.DoLoadModules("pywin.framework.editor,pywin.framework.stdin")
def LoadUserModules(self, moduleNames = None):
# Load the users modules.
if moduleNames is None:
default = "pywin.framework.sgrepmdi,pywin.framework.mdi_pychecker"
moduleNames=win32ui.GetProfileVal('Python','Startup Modules',default)
self.DoLoadModules(moduleNames)
def DoLoadModules(self, moduleNames): # ", sep string of module names.
if not moduleNames: return
modules = moduleNames.split(",")
for module in modules:
try:
__import__(module)
except: # Catch em all, else the app itself dies! 'ImportError:
traceback.print_exc()
msg = 'Startup import of user module "%s" failed' % module
print(msg)
win32ui.MessageBox(msg)
#
# DDE Callback
#
def OnDDECommand(self, command):
try:
exec(command + "\n")
except:
print("ERROR executing DDE command: ", command)
traceback.print_exc()
raise
#
# General handlers
#
def OnViewBrowse( self, id, code ):
" Called when ViewBrowse message is received "
from pywin.tools import browser
obName = dialog.GetSimpleInput('Object', '__builtins__', 'Browse Python Object')
if obName is None:
return
try:
browser.Browse(eval(obName, __main__.__dict__, __main__.__dict__))
except NameError:
win32ui.MessageBox('This is no object with this name')
except AttributeError:
win32ui.MessageBox('The object has no attribute of that name')
except:
traceback.print_exc()
win32ui.MessageBox('This object can not be browsed')
def OnFileImport( self, id, code ):
" Called when a FileImport message is received. Import the current or specified file"
from . import scriptutils
scriptutils.ImportFile()
def OnFileCheck( self, id, code ):
" Called when a FileCheck message is received. Check the current file."
from . import scriptutils
scriptutils.CheckFile()
def OnUpdateFileCheck(self, cmdui):
from . import scriptutils
cmdui.Enable( scriptutils.GetActiveFileName(0) is not None )
def OnFileRun( self, id, code ):
" Called when a FileRun message is received. "
from . import scriptutils
showDlg = win32api.GetKeyState(win32con.VK_SHIFT) >= 0
scriptutils.RunScript(None, None, showDlg)
def OnFileLocate( self, id, code ):
from . import scriptutils
global lastLocateFileName # save the new version away for next time...
name = dialog.GetSimpleInput('File name', lastLocateFileName, 'Locate Python File')
if name is None: # Cancelled.
return
lastLocateFileName = name
# if ".py" supplied, rip it off!
# should also check for .pys and .pyw
if lastLocateFileName[-3:].lower()=='.py':
lastLocateFileName = lastLocateFileName[:-3]
lastLocateFileName = lastLocateFileName.replace(".","\\")
newName = scriptutils.LocatePythonFile(lastLocateFileName)
if newName is None:
win32ui.MessageBox("The file '%s' can not be located" % lastLocateFileName)
else:
win32ui.GetApp().OpenDocumentFile(newName)
# Display all the "options" proprety pages we can find
def OnViewOptions(self, id, code):
win32ui.InitRichEdit()
sheet = dialog.PropertySheet("Pythonwin Options")
# Add property pages we know about that need manual work.
from pywin.dialogs import ideoptions
sheet.AddPage( ideoptions.OptionsPropPage() )
from . import toolmenu
sheet.AddPage( toolmenu.ToolMenuPropPage() )
# Get other dynamic pages from templates.
pages = []
for template in self.GetDocTemplateList():
try:
# Dont actually call the function with the exception handler.
getter = template.GetPythonPropertyPages
except AttributeError:
# Template does not provide property pages!
continue
pages = pages + getter()
# Debugger template goes at the end
try:
from pywin.debugger import configui
except ImportError:
configui = None
if configui is not None: pages.append(configui.DebuggerOptionsPropPage())
# Now simply add the pages, and display the dialog.
for page in pages:
sheet.AddPage(page)
if sheet.DoModal()==win32con.IDOK:
win32ui.SetStatusText("Applying configuration changes...", 1)
win32ui.DoWaitCursor(1)
# Tell every Window in our app that win.ini has changed!
win32ui.GetMainFrame().SendMessageToDescendants(win32con.WM_WININICHANGE, 0, 0)
win32ui.DoWaitCursor(0)
def OnInteractiveWindow(self, id, code):
# toggle the existing state.
from . import interact
interact.ToggleInteractiveWindow()
def OnUpdateInteractiveWindow(self, cmdui):
try:
interact=sys.modules['pywin.framework.interact']
state = interact.IsInteractiveWindowVisible()
except KeyError: # Interactive module hasnt ever been imported.
state = 0
cmdui.Enable()
cmdui.SetCheck(state)
def OnFileSaveAll(self, id, code):
# Only attempt to save editor documents.
from pywin.framework.editor import editorTemplate
num = 0
for doc in editorTemplate.GetDocumentList():
if doc.IsModified() and doc.GetPathName():
num = num = 1
doc.OnSaveDocument(doc.GetPathName())
win32ui.SetStatusText("%d documents saved" % num, 1)
def OnViewToolbarDbg(self, id, code):
if code==0:
return not win32ui.GetMainFrame().OnBarCheck(id)
def OnUpdateViewToolbarDbg(self, cmdui):
win32ui.GetMainFrame().OnUpdateControlBarMenu(cmdui)
cmdui.Enable(1)
def OnHelpIndex( self, id, code ):
from . import help
help.SelectAndRunHelpFile()
# As per the comments in Algorithm Dashboard V-1.0.py, this use is depreciated.
# app.AppBuilder = InteractivePythonApp
# Now all we do is create the application
thisApp = InteractivePythonApp()
| 33.98954 | 121 | 0.726473 |
6ed594b24e803d51e180f05d79f2395343dec30a | 1,816 | py | Python | modules/GeneralOptions.py | MuriloChianfa/SimpleChess | 02619320f03a6d5bd30623f73056d8714276043e | [
"MIT"
] | 2 | 2020-03-15T03:05:21.000Z | 2020-03-18T17:24:56.000Z | modules/GeneralOptions.py | MuriloChianfa/SimpleTrueTable | 02619320f03a6d5bd30623f73056d8714276043e | [
"MIT"
] | null | null | null | modules/GeneralOptions.py | MuriloChianfa/SimpleTrueTable | 02619320f03a6d5bd30623f73056d8714276043e | [
"MIT"
] | null | null | null | from engine.GUI import *
EnabledGeneralOptions = False
class GeneralOptions:
def __init__(self, root):
self.GeneralOptions = GUI('GeneralOptions', 'Module: General Options')
self.GeneralOptions.DefaultWindow('DefaultWindow')
def SetGeneralOptions():
global EnabledGeneralOptions
if not EnabledGeneralOptions:
EnabledGeneralOptions = True
ButtonEnabled.configure(text='GeneralOptions: ON')
ScanGeneralOptions()
else:
EnabledGeneralOptions = False
ButtonEnabled.configure(text='GeneralOptions: OFF')
def ScanGeneralOptions():
if EnabledGeneralOptions:
print("Try Lock GeneralOptions")
print("Try This")
root.after(300, ScanGeneralOptions)
CheckPrint = tk.BooleanVar()
LowMana = tk.BooleanVar()
self.GeneralOptions.addButton('Ok', self.GeneralOptions.destroyWindow, [84, 29, 130, 504], [127, 17, 8], [123, 13, 5])
global EnabledGeneralOptions
if not EnabledGeneralOptions:
ButtonEnabled = self.GeneralOptions.addButton('GeneralOptions: OFF', SetGeneralOptions, [328, 29, 12, 469],
[127, 17, 8], [123, 13, 5])
else:
ButtonEnabled = self.GeneralOptions.addButton('GeneralOptions: ON', SetGeneralOptions, [328, 29, 12, 469],
[127, 17, 8], [123, 13, 5])
ButtonPrint = self.GeneralOptions.addCheck(CheckPrint, [10, 408], [120, 98, 51], 0, "Print on Tibia's screen")
ButtonLowMana = self.GeneralOptions.addCheck(LowMana, [10, 440], [120, 98, 51], 0, "Low Mana Warnings")
self.GeneralOptions.loop()
| 38.638298 | 126 | 0.589758 |
a9356f5f99dd98251d06ac10b83874aec2020932 | 1,183 | py | Python | RecoJets/JetProducers/python/PFClusterJetParameters_cfi.py | gputtley/cmssw | c1ef8454804e4ebea8b65f59c4a952a6c94fde3b | [
"Apache-2.0"
] | 3 | 2018-08-24T19:10:26.000Z | 2019-02-19T11:45:32.000Z | RecoJets/JetProducers/python/PFClusterJetParameters_cfi.py | gputtley/cmssw | c1ef8454804e4ebea8b65f59c4a952a6c94fde3b | [
"Apache-2.0"
] | 8 | 2020-03-20T23:18:36.000Z | 2020-05-27T11:00:06.000Z | RecoJets/JetProducers/python/PFClusterJetParameters_cfi.py | gputtley/cmssw | c1ef8454804e4ebea8b65f59c4a952a6c94fde3b | [
"Apache-2.0"
] | 5 | 2018-08-21T16:37:52.000Z | 2020-01-09T13:33:17.000Z | import FWCore.ParameterSet.Config as cms
PFClusterJetParameters = cms.PSet(
src = cms.InputTag('pfClusterRefsForJets'),
srcPVs = cms.InputTag('offlinePrimaryVertices'),
jetType = cms.string('PFClusterJet'),
# minimum jet pt
jetPtMin = cms.double(3.0),
# minimum calo tower input et
inputEtMin = cms.double(0.3),
# minimum calo tower input energy
inputEMin = cms.double(0.0),
# primary vertex correction
doPVCorrection = cms.bool(True),
# pileup with offset correction
doPUOffsetCorr = cms.bool(False),
# if pileup is false, these are not read:
nSigmaPU = cms.double(1.0),
radiusPU = cms.double(0.5),
# fastjet-style pileup
doAreaFastjet = cms.bool( False),
doRhoFastjet = cms.bool( False),
doAreaDiskApprox = cms.bool( False),
Active_Area_Repeats = cms.int32( 1),
GhostArea = cms.double(0.01),
Ghost_EtaMax = cms.double( 5.0),
Rho_EtaMax = cms.double( 4.4),
voronoiRfact = cms.double(-0.9),
useDeterministicSeed= cms.bool( True ),
minSeed = cms.uint32( 14327 )
)
| 35.848485 | 60 | 0.606932 |
29c6458926f960d14ff3ab02ccd8cdaeab7167b2 | 2,782 | py | Python | control.py | gwisk/Control | beafebcdcb7b5131d1eef2ef022d11196486e26a | [
"MIT"
] | null | null | null | control.py | gwisk/Control | beafebcdcb7b5131d1eef2ef022d11196486e26a | [
"MIT"
] | null | null | null | control.py | gwisk/Control | beafebcdcb7b5131d1eef2ef022d11196486e26a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from time import sleep
import board
import busio
import adafruit_ads1x15.ads1115 as ADS
from adafruit_ads1x15.analog_in import AnalogIn
import sys
import random
import cloud4rpi
import ds18b20
import rpi
i2c = busio.I2C(board.SCL, board.SDA)
ads=ADS.ADS1115(i2c)
efficacy = 21.856
area = 3.2*(10**-6)
chan1=AnalogIn(ads, ADS.P0)
chan2=AnalogIn(ads, ADS.P1)
def sensor1():
voltage = chan1.voltage
logLux = voltage * 5.0/3.0
lux = pow(10, logLux)
return lux
def senso2():
voltage = chan2.voltage
logLux = voltage* 5.0/3.0
lux =pow(10, logLux)
return lux
# Put your device token here. To get the token,
# sign up at https://cloud4rpi.io and create a device.
DEVICE_TOKEN = 'AQ9zBj6KjdR2b761douGif4Ns'
# Constants
LED_PIN = 12
DATA_SENDING_INTERVAL = 30 # secs
DIAG_SENDING_INTERVAL = 60 # secs
POLL_INTERVAL = 0.5 # 500 ms
def listen_for_events():
# Write your own logic here
result = random.randint(1, 5)
if result == 1:
return 'RING'
if result == 5:
return 'BOOM!'
return 'IDLE'
def main():
# Put variable declarations here
# Available types: 'bool', 'numeric', 'string'
variables = {
'STATUS': {
'type': 'string',
'bind': listen_for_events
},
'SENSOR1': {
'type': 'numeric',
'bind': sensor1
} }
diagnostics = {
'CPU Temp': rpi.cpu_temp,
'IP Address': rpi.ip_address,
'Host': rpi.host_name,
'Operating System': rpi.os_name
}
device = cloud4rpi.connect(DEVICE_TOKEN)
# Use the following 'device' declaration
# to enable the MQTT traffic encryption (TLS).
#
# tls = {
# 'ca_certs': '/etc/ssl/certs/ca-certificates.crt'
# }
# device = cloud4rpi.connect(DEVICE_TOKEN, tls_config=tls)
try:
device.declare(variables)
device.declare_diag(diagnostics)
device.publish_config()
# Adds a 1 second delay to ensure device variables are created
sleep(1)
data_timer = 0
diag_timer = 0
while True:
if data_timer <= 0:
device.publish_data()
data_timer = DATA_SENDING_INTERVAL
if diag_timer <= 0:
device.publish_diag()
diag_timer = DIAG_SENDING_INTERVAL
sleep(POLL_INTERVAL)
diag_timer -= POLL_INTERVAL
data_timer -= POLL_INTERVAL
except KeyboardInterrupt:
cloud4rpi.log.info('Keyboard interrupt received. Stopping...')
except Exception as e:
error = cloud4rpi.get_error_message(e)
cloud4rpi.log.exception("ERROR! %s %s", error, sys.exc_info()[0])
finally:
sys.exit(0)
if __name__ == '__main__':
main()
| 22.079365 | 73 | 0.616822 |
b03ce5fa2f3ce6c2850a099120a1de44e48dc4ae | 18,733 | py | Python | src/test/tinc/tincrepo/mpp/gpdb/tests/utilities/recoverseg/gprecoverseg_tests/fault/fault.py | lintzc/GPDB | b48c8b97da18f495c10065d0853db87960aebae2 | [
"PostgreSQL",
"Apache-2.0"
] | 1 | 2017-09-15T06:09:56.000Z | 2017-09-15T06:09:56.000Z | src/test/tinc/tincrepo/mpp/gpdb/tests/utilities/recoverseg/gprecoverseg_tests/fault/fault.py | guofengrichard/gpdb | 29bdd6ef38d8d9b9cb04ca31d44e279eb9f640d3 | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | src/test/tinc/tincrepo/mpp/gpdb/tests/utilities/recoverseg/gprecoverseg_tests/fault/fault.py | guofengrichard/gpdb | 29bdd6ef38d8d9b9cb04ca31d44e279eb9f640d3 | [
"PostgreSQL",
"Apache-2.0"
] | 1 | 2018-12-04T09:13:57.000Z | 2018-12-04T09:13:57.000Z | """
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tinctest
import os
from gppylib.commands.base import Command, REMOTE
from mpp.lib.config import GPDBConfig
from mpp.lib.PSQL import PSQL
from time import sleep
from tinctest import TINCTestCase
from mpp.gpdb.tests.utilities.recoverseg.gprecoverseg_tests.fault.genFault import Fault
from mpp.lib.config import GPDBConfig
from mpp.lib.gprecoverseg import GpRecoverseg
from utilities.gppersistentrebuild import PTTestCase
# Environmental variable to be set priror to the gprecoverseg run.
ENV_VAR="GP_MPP_12038_INJECT_DELAY"
class FaultInjectorTestCase (TINCTestCase):
"""
@description Injects the specific faults in Primary or Mirror
@created 2009-01-27 14:00:00
@modified 2013-09-12 17:10:15
@tags storage schema_topology
@product_version gpdb:4.2.x,gpdb:main
"""
def test_kill_primary(self):
"""
[feature]: Kills primary gp0 segment
"""
newfault = Fault()
hosts = newfault.get_segment_host(preferred_role='p',content=0)
if not newfault.kill_primary_gp0(hosts):
self.fail("Could not the kill the primary process, cannot proceed further!")
rtrycnt = 0
while( not newfault.is_changetracking()):
tinctest.logger.info("Waiting [%s] for DB to go into CT mode" %rtrycnt)
rtrycnt = rtrycnt + 1
def test_kill_mirror(self):
"""
[feature]: Kills mirror gp0 segment
"""
newfault = Fault()
hosts = newfault.get_segment_host(preferred_role='m',content=0)
if not newfault.kill_mirror_gp0(hosts):
self.fail("Could not the kill the mirror process, cannot proceed further!")
rtrycnt = 0
while( not newfault.is_changetracking()):
tinctest.logger.info("Waiting [%s] for DB to go in CT mode" %rtrycnt)
rtrycnt = rtrycnt + 1
def test_kill_primary_group(self):
"""
[feature]: Kill a group of primary segments
"""
newfault = Fault()
seglist = newfault.get_seginfo_for_primaries()
seglist = seglist[:(len(seglist) + 1 ) / 2]
for seg in seglist:
tinctest.logger.info('Killing segment %s' % seg.getSegmentDataDirectory())
newfault.kill_primary(seg.getSegmentHostName(), seg.getSegmentDataDirectory(), seg.getSegmentPort())
rtrycnt = 0
while (not newfault.is_changetracking()):
tinctest.logger.info('Waiting [%s] for DB to go in CT mode' % rtrycnt)
rtrycnt += 1
def test_drop_pg_dirs_on_primary(self):
"""
[feature]: Drops primary gp0 folder
"""
newfault = Fault()
(host, fileLoc) = newfault.get_segment_host_fileLoc()
newfault.drop_pg_dirs_on_primary(host, fileLoc)
rtrycnt = 0
max_rtrycnt = 300
while( not newfault.is_changetracking()):
tinctest.logger.info("Waiting [%s] for DB to go into CT mode" %rtrycnt)
rtrycnt = rtrycnt + 1
def test_use_gpfaultinjector_to_mark_segment_down(self):
"""
[feature]: Use gpfaultinjector to mark a segment down in the configuration, but the
process is still running on the segment.
"""
newfault = Fault()
seginfo = newfault.get_seginfo(preferred_role='m', content=1)
newfault.inject_using_gpfaultinjector(fault_name='filerep_consumer', fault_mode='async', fault_type='fault', segdbid=seginfo.getSegmentDbId())
rtrycnt = 0
while (not newfault.is_changetracking()):
tinctest.logger.info("Waiting [%s] for DB to go into CT mode" % rtrycnt)
rtrycnt += 1
def test_create_symlink_for_seg(self):
"""
[feature]: Creates a symlink to the data directory for a given segment
"""
newfault = Fault()
seginfo = newfault.get_seginfo(preferred_role='m', content=1)
newfault.create_remote_symlink(seginfo.getSegmentHostName(), seginfo.getSegmentDataDirectory())
tinctest.logger.info('Creating symlink for seg %s on host %s' % (seginfo.getSegmentDataDirectory(), seginfo.getSegmentHostName()))
def test_remove_symlink_for_seg(self):
"""
[feature]: Remove symlink for datadirectory and restore the orignal directory
for a given segment.
"""
newfault = Fault()
seginfo = newfault.get_seginfo(preferred_role='m', content=1)
newfault.remove_remote_symlink(seginfo.getSegmentHostName(), seginfo.getSegmentDataDirectory())
tinctest.logger.info('Removed symlinks for seg %s on host %s' % (seginfo.getSegmentDataDirectory(), seginfo.getSegmentHostName()))
def test_corrupt_persistent_tables(self):
"""
[feature]: corrupts PT tables for segment that has been marked down
"""
newfault = Fault()
seginfo = newfault.get_seginfo(preferred_role='p', content=1)
pt = PTTestCase('corrupt_persistent_table')
pt.corrupt_persistent_table(seginfo.getSegmentHostName(), seginfo.getSegmentPort())
tinctest.logger.info('Finished corruption of PT tables')
def test_rebuild_persistent_tables(self):
"""
[feature]: rebuilds PT tables for segment that has been marked down
"""
cmd = Command(name='Running gppersistentrebuild tool', cmdStr = 'echo "y\ny\n" | $GPHOME/sbin/gppersistentrebuild -c 1')
cmd.run(validateAfter=True)
tinctest.logger.info('Finished rebuild of PT tables')
def test_shared_mem_is_cleaned(self):
"""
[feature]: Check if the shared memory is cleaned
"""
newfault = Fault()
seginfo = newfault.get_seginfo(preferred_role='p',content=0)
cmd = Command('check for shared memory', cmdStr="ipcs -a", ctxt=REMOTE, remoteHost=seginfo.getSegmentHostName())
cmd.run(validateAfter=True)
result = cmd.get_results().stdout.split('\n')
for r in result:
if r and r.split()[-1] == '0':
raise Exception('Shared memory not cleaned up for %s' % r)
def test_wait_till_segments_in_change_tracking(self):
"""
[feature]: Wait until segments for into change tracking
"""
newfault = Fault()
rtrycnt = 0
while( not newfault.is_changetracking()):
tinctest.logger.info("Waiting [%s] for DB to go in CT mode" %rtrycnt)
rtrycnt = rtrycnt + 1
class GprecoversegClass(TINCTestCase):
"""
@description Performs different types of Recovery process.
@created 2009-01-27 14:00:00
@modified 2013-09-12 17:10:15
@tags storage schema_topology
@product_version gpdb:4.2.x,gpdb:main
"""
def test_recovery_with_new_loc(self):
"""
[feature]: Performs recovery by creating a configuration file with new segment locations
"""
newfault = Fault()
config = GPDBConfig()
hosts = newfault.get_segment_host()
newfault.create_new_loc_config(hosts, orig_filename='recovery.conf', new_filename='recovery_new.conf')
if not newfault.run_recovery_with_config(filename='recovery_new.conf'):
self.fail("*** Incremental recovery with config file recovery_new.conf failed")
rtrycnt = 0
while (not config.is_not_insync_segments()):
tinctest.logger.info("Waiting [%s] for DB to recover" %rtrycnt)
rtrycnt = rtrycnt + 1
def test_do_incremental_recovery(self):
"""
[feature]: Performs Incremental Recovery
"""
config = GPDBConfig()
recoverseg = GpRecoverseg()
tinctest.logger.info('Running Incremental gprecoverseg...')
recoverseg.run()
rtrycnt = 0
while (not config.is_not_insync_segments()):
tinctest.logger.info("Waiting [%s] for DB to recover" %rtrycnt)
rtrycnt = rtrycnt + 1
def test_do_full_recovery(self):
"""
[feature]: Performs Full Recovery
"""
config = GPDBConfig()
recoverseg = GpRecoverseg()
tinctest.logger.info('Running Full gprecoverseg...')
recoverseg.run(option = '-F')
rtrycnt = 0
while (not config.is_not_insync_segments()):
tinctest.logger.info("Waiting [%s] for DB to recover" %rtrycnt)
rtrycnt = rtrycnt + 1
def test_invalid_state_recoverseg(self):
"""
[feature]: Sets the ENV_VAR and runs the incremental recoverseg
"""
''' '''
# setting the ENV_VAR
os.environ[ENV_VAR] = '1'
recoverseg = GpRecoverseg()
config = GPDBConfig()
tinctest.logger.info('Running Incremental gprecoverseg...')
recoverseg.run()
rtrycnt = 0
while (not config.is_not_insync_segments()):
tinctest.logger.info("Waiting [%s] for DB to recover" %rtrycnt)
rtrycnt = rtrycnt + 1
def test_incremental_recovery_skip_persistent_tables_check(self):
"""
[feature]: Run incremental recoverseg with persistent tables check option
"""
config = GPDBConfig()
recoverseg = GpRecoverseg()
tinctest.logger.info('Running gprecoverseg...')
recoverseg.run()
self.assertNotIn('Performing persistent table check', recoverseg.stdout)
rtrycnt = 0
while (not config.is_not_insync_segments()):
tinctest.logger.info("Waiting [%s] for DB to recover" %rtrycnt)
rtrycnt = rtrycnt + 1
def test_full_recovery_skip_persistent_tables_check(self):
"""
[feature]: Run recoverseg with persistent tables check option
"""
config = GPDBConfig()
recoverseg = GpRecoverseg()
tinctest.logger.info('Running gprecoverseg...')
recoverseg.run(option='-F')
self.assertNotIn('Performing persistent table check', recoverseg.stdout)
rtrycnt = 0
while (not config.is_not_insync_segments()):
tinctest.logger.info("Waiting [%s] for DB to recover" %rtrycnt)
rtrycnt = rtrycnt + 1
def test_incremental_recovery_with_persistent_tables_corruption(self):
"""
[feature]: Run incremental recoverseg with persistent tables corruption
"""
recoverseg = GpRecoverseg()
tinctest.logger.info('Running gprecoverseg...')
try:
recoverseg.run(option='--persistent-check', validate=False)
except Exception as e:
tinctest.logger.info('Encountered exception while running incremental recovery with corrupt persistent table')
self.assertIn('Performing persistent table check', recoverseg.stdout)
def test_full_recovery_with_persistent_tables_corruption(self):
"""
[feature]: Run recoverseg with persistent tables corruption
"""
recoverseg = GpRecoverseg()
tinctest.logger.info('Running gprecoverseg...')
try:
recoverseg.run(option='-F --persistent-check', validate=False)
except Exception as e:
tinctest.logger.info('Encountered exception while running full recovery with corrupt persistent table')
self.assertIn('Performing persistent table check', recoverseg.stdout)
class GPDBdbOps(TINCTestCase):
"""
@description GPDB admin operations
@created 2009-01-27 14:00:00
@modified 2013-09-12 17:10:15
@tags storage schema_topology
@product_version gpdb:4.2.x,gpdb:main
"""
@classmethod
def setUpClass(cls):
super(GPDBdbOps,cls).setUpClass()
tinctest.logger.info('GPDB Operations')
def gprestartdb(self):
''' Restarts the Database '''
newfault = Fault()
newfault.stop_db()
newfault.start_db()
sleep(30)
def check_if_not_in_preferred_role(self):
''' Checks if the segments are in preferred role or not '''
newfault = Fault()
result = newfault.check_if_not_in_preferred_role()
if result == True:
self.fail("Segments are not in preferred roles!!!")
class SegmentConfigurations(TINCTestCase):
"""
@description Checks the segment's configuration for any invalid states
@created 2009-01-27 14:00:00
@modified 2013-09-12 17:10:15
@tags storage schema_topology
@product_version gpdb:4.2.x,gpdb:main
"""
@classmethod
def setUpClass(cls):
super(SegmentConfigurations,cls).setUpClass()
tinctest.logger.info('Running all the invalid state tests...')
# Sleep introduced so that the gprecoverseg starts before the invalid state tests
tinctest.logger.info('Sleep introduced for 15 secs...')
sleep(15)
def test_if_primary_down(self):
"""
[feature]: Check for invalid state - Primary is marked down
"""
sql_stmt = "SELECT 'down_segment' FROM gp_segment_configuration " \
"WHERE role = 'p' " \
"AND status = 'd'"
out = PSQL.run_sql_command(sql_stmt)
if len(out) == 0:
error_msg = 'Could not connect to the sever!!'
tinctest.logger.error(error_msg)
self.fail(error_msg)
out = out.count('down_segment') - 1
if out == 0:
tinctest.logger.info('Primary is marked down => 0 rows')
else:
error_msg = "%s down segments found" %out
tinctest.logger.info(error_msg)
self.fail(error_msg)
def test_if_mirror_down_and_primary_in_CT(self):
"""
[feature]: Check for invalid state - Mirror is down but primary is not in change tracking
"""
sql_stmt = "SELECT p.content, p.dbid AS p_dbid, m.dbid AS m_dbid, " \
"p.role AS p_role, m.role AS m_role, " \
"p.preferred_role AS p_pref_role, m.preferred_role AS m_pref_role, " \
"p.address AS p_address, m.address AS m_address, " \
"p.status AS p_status, m.status AS m_status, " \
"p.mode AS p_mode, m.mode AS m_mode " \
"FROM gp_segment_configuration p, gp_segment_configuration m " \
"WHERE ( (p.content = m.content) AND (p.dbid <> m.dbid) ) " \
"AND p.status = 'u' and m.status = 'd' " \
"AND p.mode <> 'c'"
out = PSQL.run_sql_command(sql_stmt)
if len(out) == 0:
error_msg = 'Could not connect to the sever!!'
tinctest.logger.error(error_msg)
self.fail(error_msg)
out = out.split('\n')[3].find('0 rows')
if out > 0:
tinctest.logger.info('Mirror is down but primary is not in change tracking => 0 rows')
else:
error_msg = "%s down segments found" %out
tinctest.logger.info(error_msg)
self.fail(error_msg)
def test_if_primary_in_CT_but_mirror_not_down(self):
"""
[feature]: Check for invalid state - Primary is in change tracking but mirror is not down
"""
sql_stmt = "SELECT p.content, p.dbid AS p_dbid, m.dbid AS m_dbid, " \
"p.role AS p_role, m.role AS m_role, " \
"p.preferred_role AS p_pref_role, m.preferred_role AS m_pref_role, " \
"p.address AS p_address, m.address AS m_address, " \
"p.status AS p_status, m.status AS m_status, " \
"p.mode AS p_mode, m.mode AS m_mode " \
"FROM gp_segment_configuration p, gp_segment_configuration m " \
"WHERE ( (p.content = m.content) AND (p.dbid <> m.dbid) ) " \
"AND p.status = 'u' and p.mode = 'c' " \
"AND m.status <> 'd'"
out = PSQL.run_sql_command(sql_stmt)
if len(out) == 0:
error_msg = 'Could not connect to the sever!!'
tinctest.logger.error(error_msg)
self.fail(error_msg)
out = out.split('\n')[3].find('0 rows')
if out > 0:
tinctest.logger.info('Primary is in change tracking but mirror is not down => 0 rows')
else:
error_msg = "%s down segments found" %out
tinctest.logger.info(error_msg)
self.fail(error_msg)
def test_if_primary_up_resync_and_mirror_down_not_in_resync(self):
"""
[feature]: Check for invalid state - Primary is Up/In resync, Mirror is not in resync or is marked down
"""
sql_stmt = "SELECT p.content, p.dbid AS p_dbid, m.dbid AS m_dbid, " \
"p.role AS p_role, m.role AS m_role, " \
"p.preferred_role AS p_pref_role, m.preferred_role AS m_pref_role, " \
"p.address AS p_address, m.address AS m_address, " \
"p.status AS p_status, m.status AS m_status, " \
"p.mode AS p_mode, m.mode AS m_mode " \
"FROM gp_segment_configuration p, gp_segment_configuration m " \
"WHERE ( (p.content = m.content) AND (p.dbid <> m.dbid) ) " \
"AND p.status = 'u' and p.mode = 'r' " \
"AND ( (m.mode <> 'r') OR (m.status = 'd') )"
out = PSQL.run_sql_command(sql_stmt)
if len(out) == 0:
error_msg = 'Could not connect to the sever!!'
tinctest.logger.error(error_msg)
self.fail(error_msg)
out = out.split('\n')[3].find('0 rows')
if out > 0:
tinctest.logger.info('Primary is Up/In resync, Mirror is not in resync or is marked down => 0 rows')
else:
error_msg = "%s down segments found" %out
tinctest.logger.info(error_msg)
self.fail(error_msg)
| 39.190377 | 150 | 0.611541 |
4c76ce72a449c9bad0dd10b23da973452a1c32bf | 7,282 | py | Python | tests/functional/coercers/test_coercer_non_null_string_field.py | matt-koevort/tartiflette | 5777866b133d846ce4f8aa03f735fa81832896cd | [
"MIT"
] | 530 | 2019-06-04T11:45:36.000Z | 2022-03-31T09:29:56.000Z | tests/functional/coercers/test_coercer_non_null_string_field.py | matt-koevort/tartiflette | 5777866b133d846ce4f8aa03f735fa81832896cd | [
"MIT"
] | 242 | 2019-06-04T11:53:08.000Z | 2022-03-28T07:06:27.000Z | tests/functional/coercers/test_coercer_non_null_string_field.py | matt-koevort/tartiflette | 5777866b133d846ce4f8aa03f735fa81832896cd | [
"MIT"
] | 36 | 2019-06-21T06:40:27.000Z | 2021-11-04T13:11:16.000Z | import pytest
from tests.functional.coercers.common import resolve_unwrapped_field
@pytest.mark.asyncio
@pytest.mark.ttftt_engine(
name="coercion",
resolvers={"Query.nonNullStringField": resolve_unwrapped_field},
)
@pytest.mark.parametrize(
"query,variables,expected",
[
(
"""query { nonNullStringField }""",
None,
{
"data": None,
"errors": [
{
"message": "Missing mandatory argument < param > in field < Query.nonNullStringField >.",
"path": ["nonNullStringField"],
"locations": [{"line": 1, "column": 9}],
"extensions": {
"rule": "5.4.2.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Required-Arguments",
"tag": "required-arguments",
},
}
],
},
),
(
"""query { nonNullStringField(param: null) }""",
None,
{
"data": None,
"errors": [
{
"message": "Argument < param > of non-null type < String! > must not be null.",
"path": ["nonNullStringField"],
"locations": [{"line": 1, "column": 28}],
"extensions": {
"rule": "5.6.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Values-of-Correct-Type",
"tag": "values-of-correct-type",
},
}
],
},
),
(
"""query { nonNullStringField(param: "paramDefaultValue") }""",
None,
{
"data": {
"nonNullStringField": "SUCCESS-paramdefaultvalue-scalar-nonNullStringField"
}
},
),
(
"""query ($param: String!) { nonNullStringField(param: $param) }""",
None,
{
"data": None,
"errors": [
{
"message": "Variable < $param > of required type < String! > was not provided.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: String!) { nonNullStringField(param: $param) }""",
{"param": None},
{
"data": None,
"errors": [
{
"message": "Variable < $param > of non-null type < String! > must not be null.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: String!) { nonNullStringField(param: $param) }""",
{"param": "varValue"},
{
"data": {
"nonNullStringField": "SUCCESS-varvalue-scalar-nonNullStringField"
}
},
),
(
"""query ($param: String! = null) { nonNullStringField(param: $param) }""",
None,
{
"data": None,
"errors": [
{
"message": "Variable < $param > got invalid default value < null >.",
"path": None,
"locations": [{"line": 1, "column": 26}],
}
],
},
),
(
"""query ($param: String! = null) { nonNullStringField(param: $param) }""",
{"param": None},
{
"data": None,
"errors": [
{
"message": "Variable < $param > of non-null type < String! > must not be null.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: String! = null) { nonNullStringField(param: $param) }""",
{"param": "varValue"},
{
"data": {
"nonNullStringField": "SUCCESS-varvalue-scalar-nonNullStringField"
}
},
),
(
"""query ($param: String! = "varDefault") { nonNullStringField(param: $param) }""",
None,
{
"data": {
"nonNullStringField": "SUCCESS-vardefault-scalar-nonNullStringField"
}
},
),
(
"""query ($param: String! = "varDefault") { nonNullStringField(param: $param) }""",
{"param": None},
{
"data": None,
"errors": [
{
"message": "Variable < $param > of non-null type < String! > must not be null.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: String! = "varDefault") { nonNullStringField(param: $param) }""",
{"param": "varValue"},
{
"data": {
"nonNullStringField": "SUCCESS-varvalue-scalar-nonNullStringField"
}
},
),
(
"""query ($param: String!) { nonNullStringField(param: $param) }""",
None,
{
"data": None,
"errors": [
{
"message": "Variable < $param > of required type < String! > was not provided.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: String!) { nonNullStringField(param: $param) }""",
{"param": None},
{
"data": None,
"errors": [
{
"message": "Variable < $param > of non-null type < String! > must not be null.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: String!) { nonNullStringField(param: $param) }""",
{"param": "varValue"},
{
"data": {
"nonNullStringField": "SUCCESS-varvalue-scalar-nonNullStringField"
}
},
),
],
)
async def test_coercion_non_null_string_field(
engine, query, variables, expected
):
assert await engine.execute(query, variables=variables) == expected
| 34.349057 | 117 | 0.363087 |
26a56d6df98b3b8d73b50ffb02cd8ccc6e71fca5 | 2,478 | py | Python | homeassistant/components/insteon/fan.py | VirtualL/home-assistant | 301829d02be8d865ab46c8901ac046d060849320 | [
"Apache-2.0"
] | 2 | 2017-10-26T19:43:55.000Z | 2017-12-30T23:29:00.000Z | homeassistant/components/insteon/fan.py | VirtualL/home-assistant | 301829d02be8d865ab46c8901ac046d060849320 | [
"Apache-2.0"
] | 3 | 2021-09-08T03:34:57.000Z | 2022-03-12T00:59:48.000Z | homeassistant/components/insteon/fan.py | VirtualL/home-assistant | 301829d02be8d865ab46c8901ac046d060849320 | [
"Apache-2.0"
] | 1 | 2022-02-20T07:41:14.000Z | 2022-02-20T07:41:14.000Z | """Support for INSTEON fans via PowerLinc Modem."""
import logging
from homeassistant.components.fan import (
SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, SPEED_OFF, SUPPORT_SET_SPEED,
FanEntity)
from homeassistant.const import STATE_OFF
from . import InsteonEntity
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['insteon']
SPEED_TO_HEX = {
SPEED_OFF: 0x00,
SPEED_LOW: 0x3f,
SPEED_MEDIUM: 0xbe,
SPEED_HIGH: 0xff,
}
FAN_SPEEDS = [STATE_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the INSTEON device class for the hass platform."""
insteon_modem = hass.data['insteon'].get('modem')
address = discovery_info['address']
device = insteon_modem.devices[address]
state_key = discovery_info['state_key']
_LOGGER.debug('Adding device %s entity %s to Fan platform',
device.address.hex, device.states[state_key].name)
new_entity = InsteonFan(device, state_key)
async_add_entities([new_entity])
class InsteonFan(InsteonEntity, FanEntity):
"""An INSTEON fan component."""
@property
def speed(self) -> str:
"""Return the current speed."""
return self._hex_to_speed(self._insteon_device_state.value)
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
return FAN_SPEEDS
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_SET_SPEED
async def async_turn_on(self, speed: str = None, **kwargs) -> None:
"""Turn on the entity."""
if speed is None:
speed = SPEED_MEDIUM
await self.async_set_speed(speed)
async def async_turn_off(self, **kwargs) -> None:
"""Turn off the entity."""
await self.async_set_speed(SPEED_OFF)
async def async_set_speed(self, speed: str) -> None:
"""Set the speed of the fan."""
fan_speed = SPEED_TO_HEX[speed]
if fan_speed == 0x00:
self._insteon_device_state.off()
else:
self._insteon_device_state.set_level(fan_speed)
@staticmethod
def _hex_to_speed(speed: int):
hex_speed = SPEED_OFF
if speed > 0xfe:
hex_speed = SPEED_HIGH
elif speed > 0x7f:
hex_speed = SPEED_MEDIUM
elif speed > 0:
hex_speed = SPEED_LOW
return hex_speed
| 28.159091 | 71 | 0.655771 |
8ba9be9b246133af15fcde1fa3e6ba6ff33f1116 | 14,684 | py | Python | main.py | JMax45/JMax-Encryption-Tools | a0f8b334077fc6d4196a4c1ebb106adf79cc9fa4 | [
"MIT"
] | 1 | 2019-07-22T14:48:43.000Z | 2019-07-22T14:48:43.000Z | main.py | JMax45/JMax-Encryption-Tools | a0f8b334077fc6d4196a4c1ebb106adf79cc9fa4 | [
"MIT"
] | 2 | 2019-07-25T15:42:24.000Z | 2019-07-30T06:45:28.000Z | main.py | JMax45/JMax-Encryption-Tools | a0f8b334077fc6d4196a4c1ebb106adf79cc9fa4 | [
"MIT"
] | null | null | null | import sys
import json
from PyQt5 import QtWidgets, QtTest
from PyQt5.Qt import QApplication, QClipboard, QFileDialog
from design import design
from design.popup.about_jet import Ui_popup_about_jet
from design.popup.encryptTXT import Ui_encryptTXT
from design.popup.pgen import Ui_pgen
from methods.morse import *
from methods.caesar import *
from methods.vigenere import *
from methods.substitution import *
to_encrypt = ("")
to_decrypt = ("")
encryption_key = ("")
save_encryption_method = ("")
class ExampleApp(QtWidgets.QMainWindow, design.Ui_MainWindow):
def crypt(self):
caesar_radio = self.radioButton_2.isChecked()
morse_radio = self.radioButton.isChecked()
vigenere_radio = self.radioButton_3.isChecked()
substitution_radio = self.radioButton_4.isChecked()
if caesar_radio + morse_radio + vigenere_radio + substitution_radio == 0:
self.textEdit_2.setText("Choose an encryption metod")
QtTest.QTest.qWait(1000)
self.textEdit_2.setText("")
empty_check = self.textEdit.toPlainText()
def empty_check_true():
self.textEdit_2.setText("The text field is empty")
QtTest.QTest.qWait(1000)
self.textEdit_2.setText("")
if caesar_radio == True:
if empty_check == "":
empty_check_true()
else:
global to_encrypt
to_encrypt = self.textEdit.toPlainText()
caesar_crypt()
from methods.caesar import encrypted_text
self.textEdit_2.setText(encrypted_text)
if morse_radio == True:
if empty_check == "":
empty_check_true()
else:
to_encrypt = self.textEdit.toPlainText()
morse_crypt()
from methods.morse import encrypted_text
self.textEdit_2.setText(encrypted_text)
if vigenere_radio == True:
if empty_check == "":
empty_check_true()
else:
to_encrypt = self.textEdit.toPlainText()
vigenere_crypt()
from methods.vigenere import encrypted_text,encryption_key
self.textEdit_2.setText(encrypted_text)
self.lineEdit.setText(encryption_key)
if substitution_radio == True:
if empty_check == "":
empty_check_true()
else:
to_encrypt = self.textEdit.toPlainText().upper()
substitution_crypt()
from methods.substitution import encrypted_text
self.textEdit_2.setText(encrypted_text)
self.textEdit.setText("")
def decrypt(self):
caesar_radio = self.radioButton_2.isChecked()
morse_radio = self.radioButton.isChecked()
vigenere_radio = self.radioButton_3.isChecked()
substitution_radio = self.radioButton_4.isChecked()
if caesar_radio + morse_radio + vigenere_radio + substitution_radio == 0:
self.textEdit_2.setText("Choose an encryption metod")
QtTest.QTest.qWait(1000)
self.textEdit_2.setText("")
empty_check = self.textEdit.toPlainText()
def empty_check_true():
self.textEdit_2.setText("The text field is empty")
QtTest.QTest.qWait(1000)
self.textEdit_2.setText("")
if caesar_radio == True:
if empty_check == "":
empty_check_true()
else:
global to_decrypt
to_decrypt = self.textEdit.toPlainText()
caesar_decrypt()
from methods.caesar import decrypted_text
self.textEdit_2.setText(decrypted_text)
if morse_radio == True:
if empty_check == "":
empty_check_true()
else:
to_decrypt = self.textEdit.toPlainText()
morse_decrypt()
from methods.morse import decrypted_text
self.textEdit_2.setText(decrypted_text)
if vigenere_radio == True:
if empty_check == "":
empty_check_true()
else:
to_decrypt = self.textEdit.toPlainText()
global encryption_key
encryption_key = self.lineEdit.text()
vigenere_decrypt()
from methods.vigenere import decrypted_text
self.textEdit_2.setText(str(decrypted_text))
if substitution_radio == True:
if empty_check == "":
empty_check_true()
else:
to_decrypt = self.textEdit.toPlainText().upper()
substitution_decrypt()
from methods.substitution import decrypted_text
self.textEdit_2.setText(decrypted_text)
self.textEdit.setText("")
self.lineEdit.setText("")
def clear_encryption_key(self):
self.lineEdit.setText("")
def copy_encryption_key(self):
copy_key = self.lineEdit.text()
QApplication.clipboard().setText(copy_key)
self.pushButton_3.setStyleSheet("background-color:#E75917;")
self.pushButton_3.setText("COPIED")
QtTest.QTest.qWait(1000)
self.pushButton_3.setStyleSheet("background-color:#5858FA;")
self.pushButton_3.setText("COPY")
def show_vigenere_keys(self):
self.lineEdit.show()
self.pushButton_3.show()
self.label.show()
def hide_vigenere_keys(self):
self.lineEdit.hide()
self.pushButton_3.hide()
self.label.hide()
def on_click_radioButton(self):
caesar_radio = self.radioButton_2.isChecked()
morse_radio = self.radioButton.isChecked()
vigenere_radio = self.radioButton_3.isChecked()
if self.radioButton.isChecked() == True:
self.hide_vigenere_keys()
global save_encryption_method
save_encryption_method = "morse"
if self.radioButton_2.isChecked() == True:
self.hide_vigenere_keys()
save_encryption_method = "caesar"
if self.radioButton_3.isChecked() == True:
save_encryption_method = "vigenere"
self.show_vigenere_keys()
if self.radioButton_4.isChecked() == True:
save_encryption_method = "substitution"
self.hide_vigenere_keys()
def open_about_jet(self):
self.window = QtWidgets.QMainWindow()
self.ui = Ui_popup_about_jet()
self.ui.setupUi(self.window)
self.window.show()
def save_message2(self):
file_name = self.lineEdit_2.text()
file_name2 = ("saves/"+file_name+".txt")
print(file_name2)
with open(file_name2, 'w') as outfile:
to_save = self.textEdit_2.toPlainText()
encryption_key_save = self.lineEdit.text()
data = {}
data['encrypted_message'] = []
if save_encryption_method == 'vigenere':
data['encrypted_message'].append({
'message': to_save,
'encryption_method': save_encryption_method,
'encryption_key': encryption_key_save
})
else:
data['encrypted_message'].append({
'message': to_save,
'encryption_method': save_encryption_method
})
json.dump(data, outfile)
self.check_save_message1 = "False"
self.label_2.hide()
self.lineEdit_2.hide()
self.pushButton_4.hide()
self.pushButton_5.setStyleSheet("")
self.label_4.show()
QtTest.QTest.qWait(3000)
self.label_4.hide()
def save_message(self):
check_save_message2 = self.check_save_message1
check_save_message3 = self.check_save_message4
if check_save_message2 == "False":
if check_save_message3 == "True":
self.check_save_message4 = "False"
self.pushButton_6.setStyleSheet("")
self.toolButton.hide()
self.lineEdit_3.hide()
self.pushButton_7.hide()
self.check_save_message1 = "True"
self.label_2.show()
self.lineEdit_2.show()
self.pushButton_4.show()
self.pushButton_5.setStyleSheet("background-color:#38A1CB")
if check_save_message2 == "True":
self.check_save_message1 = "False"
self.label_2.hide()
self.lineEdit_2.hide()
self.pushButton_4.hide()
self.pushButton_5.setStyleSheet("")
def load_message(self):
check_save_message3 = self.check_save_message4
check_save_message2 = self.check_save_message1
if check_save_message3 == "False":
if check_save_message2 == "True":
self.check_save_message1 = "False"
self.label_2.hide()
self.lineEdit_2.hide()
self.pushButton_4.hide()
self.pushButton_5.setStyleSheet("")
self.check_save_message4 = "True"
self.pushButton_6.setStyleSheet("background-color:#38A1CB")
self.toolButton.show()
self.lineEdit_3.show()
self.pushButton_7.show()
if check_save_message3 == "True":
self.check_save_message4 = "False"
self.pushButton_6.setStyleSheet("")
self.toolButton.hide()
self.lineEdit_3.hide()
self.pushButton_7.hide()
def choose_a_file_to_load(self):
file_to_load1 = QFileDialog.getOpenFileName()[0]
file_to_load2 = str(file_to_load1)
self.lineEdit_3.setText(file_to_load2)
def load_the_file(self):
file_to_load = self.lineEdit_3.text()
with open(file_to_load) as json_file:
data = json.load(json_file)
for p in data['encrypted_message']:
print('Message: ' + p['message'])
print('Encryption Method: ' + p['encryption_method'])
print('')
global to_decrypt
to_decrypt = (p['message'])
if p['encryption_method'] == 'caesar':
caesar_decrypt()
from methods.caesar import decrypted_text
if p['encryption_method'] == 'morse':
morse_decrypt()
from methods.morse import decrypted_text
if p['encryption_method'] == 'vigenere':
global encryption_key
encryption_key = p['encryption_key']
vigenere_decrypt()
from methods.vigenere import decrypted_text
if p['encryption_method'] == 'substitution':
substitution_decrypt()
from methods.substitution import decrypted_text
self.textEdit_2.setText(decrypted_text)
self.check_save_message4 = "False"
self.pushButton_6.setStyleSheet("")
self.toolButton.hide()
self.lineEdit_3.hide()
self.pushButton_7.hide()
self.label_3.show()
QtTest.QTest.qWait(3000)
self.label_3.hide()
def open_encrypt_txt(self):
window1.hide()
height = self.geometry().y()
height2 = (height-30)
self.window4 = QtWidgets.QMainWindow()
global window4_global
window4_global = self.window4
self.ui = Ui_encryptTXT()
self.ui.setupUi(self.window4)
self.window4.show()
self.window4.move(self.geometry().x(), self.geometry().y())
def open_pgen(self):
window1.hide()
height = self.geometry().y()
height2 = (height-30)
self.window5 = QtWidgets.QMainWindow()
global window5_global
window5_global = self.window5
self.ui = Ui_pgen()
self.ui.setupUi(self.window5)
self.window5.show()
self.window5.move(self.geometry().x(), self.geometry().y())
def __init__(self):
# Это здесь нужно для доступа к переменным, методам
# и т.д. в файле design.py
super().__init__()
self.setupUi(self) # Это нужно для инициализации нашего дизайна
self.pushButton.clicked.connect(self.crypt)
self.pushButton_2.clicked.connect(self.decrypt)
self.pushButton_3.clicked.connect(self.copy_encryption_key)
self.pushButton_4.clicked.connect(self.save_message2)
self.pushButton_5.clicked.connect(self.save_message)
self.pushButton_6.clicked.connect(self.load_message)
self.radioButton.toggled.connect(self.on_click_radioButton)
self.radioButton_2.toggled.connect(self.on_click_radioButton)
self.radioButton_3.toggled.connect(self.on_click_radioButton)
self.radioButton_4.toggled.connect(self.on_click_radioButton)
self.toolButton.clicked.connect(self.choose_a_file_to_load)
self.pushButton_7.clicked.connect(self.load_the_file)
self.actionAbout_JET.triggered.connect(self.open_about_jet)
self.actionEncryptTXT.triggered.connect(self.open_encrypt_txt)
self.actionPGEN.triggered.connect(self.open_pgen)
#hide and show stuff
self.lineEdit.hide()
self.lineEdit_2.hide()
self.pushButton_3.hide()
self.pushButton_7.hide()
self.lineEdit_3.hide()
self.label.hide()
self.label_2.hide()
self.label_3.hide()
self.label_3.setStyleSheet("color:#0B610B;")
self.label_4.hide()
self.label_4.setStyleSheet("color:#0B610B;")
self.toolButton.hide()
self.pushButton_3.setStyleSheet("background-color:#5858FA;")
self.pushButton_4.setStyleSheet("background-color:#5858FA;")
self.pushButton_4.hide()
self.lineEdit.setStyleSheet("background-color:#EFF2FB;")
self.lineEdit_2.setStyleSheet("background-color:#EFF2FB;")
self.textEdit.setStyleSheet("background-color:#EFF2FB;")
self.textEdit_2.setStyleSheet("background-color:#EFF2FB;")
self.check_save_message1 = ("False")
self.check_save_message4 = ("False")
def main():
app = QtWidgets.QApplication(sys.argv) # Новый экземпляр QApplication
global window1
global window4
global window5
window4 = ("null")
window1 = ExampleApp() # Создаём объект класса ExampleApp
window1.setStyleSheet("background-color:#CED8F6;")
window1.show() # Показываем окно
app.exec_() # и запускаем приложение
if __name__ == '__main__': # Если мы запускаем файл напрямую, а не импортируем
main() # то запускаем функцию main()
| 42.439306 | 81 | 0.608554 |
2510cc93a4b929d0683508f9f020284608b53eae | 13,713 | py | Python | python/ql/test/experimental/dataflow/coverage/test.py | momyarp/codeql | 50f2557dd21eb6e28f3e8a16b30d03c5aafbe9dc | [
"MIT"
] | null | null | null | python/ql/test/experimental/dataflow/coverage/test.py | momyarp/codeql | 50f2557dd21eb6e28f3e8a16b30d03c5aafbe9dc | [
"MIT"
] | null | null | null | python/ql/test/experimental/dataflow/coverage/test.py | momyarp/codeql | 50f2557dd21eb6e28f3e8a16b30d03c5aafbe9dc | [
"MIT"
] | null | null | null | # This should cover all the syntactical constructs that we hope to support.
# Headings refer to https://docs.python.org/3/reference/expressions.html,
# and are selected whenever they incur dataflow.
# Intended sources should be the variable `SOURCE` and intended sinks should be
# arguments to the function `SINK` (see python/ql/test/experimental/dataflow/testConfig.qll).
#
# Functions whose name ends with "_with_local_flow" will also be tested for local flow.
#
# All functions starting with "test_" should run and execute `print("OK")` exactly once.
# This can be checked by running validTest.py.
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname((__file__))))
from testlib import *
# These are defined so that we can evaluate the test code.
NONSOURCE = "not a source"
SOURCE = "source"
def is_source(x):
return x == "source" or x == b"source" or x == 42 or x == 42.0 or x == 42j
def SINK(x):
if is_source(x):
print("OK")
else:
print("Unexpected flow", x)
def SINK_F(x):
if is_source(x):
print("Unexpected flow", x)
else:
print("OK")
def test_tuple_with_local_flow():
x = (NONSOURCE, SOURCE)
y = x[1]
SINK(y) #$ flow="SOURCE, l:-2 -> y"
def test_tuple_negative():
x = (NONSOURCE, SOURCE)
y = x[0]
SINK_F(y)
# 6.2.1. Identifiers (Names)
def test_names():
x = SOURCE
SINK(x) #$ flow="SOURCE, l:-1 -> x"
# 6.2.2. Literals
def test_string_literal():
x = "source"
SINK(x) #$ flow="'source', l:-1 -> x"
def test_bytes_literal():
x = b"source"
SINK(x) #$ flow="b'source', l:-1 -> x"
def test_integer_literal():
x = 42
SINK(x) #$ flow="42, l:-1 -> x"
def test_floatnumber_literal():
x = 42.0
SINK(x) #$ flow="42.0, l:-1 -> x"
def test_imagnumber_literal():
x = 42j
SINK(x) #$ MISSING:flow="42j, l:-1 -> x"
# 6.2.3. Parenthesized forms
def test_parenthesized_form():
x = (SOURCE)
SINK(x) #$ flow="SOURCE, l:-1 -> x"
# 6.2.5. List displays
def test_list_display():
x = [SOURCE]
SINK(x[0]) #$ flow="SOURCE, l:-1 -> x[0]"
def test_list_display_negative():
x = [SOURCE]
SINK_F(x)
def test_list_comprehension():
x = [SOURCE for y in [NONSOURCE]]
SINK(x[0]) #$ flow="SOURCE, l:-1 -> x[0]"
def test_list_comprehension_flow():
x = [y for y in [SOURCE]]
SINK(x[0]) #$ flow="SOURCE, l:-1 -> x[0]"
def test_list_comprehension_inflow():
l = [SOURCE]
x = [y for y in l]
SINK(x[0]) #$ flow="SOURCE, l:-2 -> x[0]"
def test_nested_list_display():
x = [*[SOURCE]]
SINK(x[0]) #$ MISSING:flow="SOURCE, l:-1 -> x[0]"
# 6.2.6. Set displays
def test_set_display():
x = {SOURCE}
SINK(x.pop()) #$ flow="SOURCE, l:-1 -> x.pop()"
def test_set_comprehension():
x = {SOURCE for y in [NONSOURCE]}
SINK(x.pop()) #$ flow="SOURCE, l:-1 -> x.pop()"
def test_set_comprehension_flow():
x = {y for y in [SOURCE]}
SINK(x.pop()) #$ flow="SOURCE, l:-1 -> x.pop()"
def test_set_comprehension_inflow():
l = {SOURCE}
x = {y for y in l}
SINK(x.pop()) #$ flow="SOURCE, l:-2 -> x.pop()"
def test_nested_set_display():
x = {*{SOURCE}}
SINK(x.pop()) #$ MISSING:flow="SOURCE, l:-1 -> x.pop()"
# 6.2.7. Dictionary displays
def test_dict_display():
x = {"s": SOURCE}
SINK(x["s"]) #$ flow="SOURCE, l:-1 -> x['s']"
def test_dict_display_pop():
x = {"s": SOURCE}
SINK(x.pop("s")) #$ flow="SOURCE, l:-1 -> x.pop(..)"
def test_dict_comprehension():
x = {y: SOURCE for y in ["s"]}
SINK(x["s"]) #$ MISSING:flow="SOURCE, l:-1 -> x['s']"
def test_dict_comprehension_pop():
x = {y: SOURCE for y in ["s"]}
SINK(x.pop("s")) #$ MISSING:flow="SOURCE, l:-1 -> x.pop()"
def test_nested_dict_display():
x = {**{"s": SOURCE}}
SINK(x["s"]) #$ MISSING:flow="SOURCE, l:-1 -> x['s']"
def test_nested_dict_display_pop():
x = {**{"s": SOURCE}}
SINK(x.pop("s")) #$ MISSING:flow="SOURCE, l:-1 -> x.pop()"
# Nested comprehensions
def test_nested_comprehension():
x = [y for z in [[SOURCE]] for y in z]
SINK(x[0]) #$ flow="SOURCE, l:-1 -> x[0]"
def test_nested_comprehension_deep_with_local_flow():
x = [y for v in [[[[SOURCE]]]] for u in v for z in u for y in z]
SINK(x[0]) #$ flow="SOURCE, l:-1 -> x[0]"
def test_nested_comprehension_dict():
d = {"s": [SOURCE]}
x = [y for k, v in d.items() for y in v]
SINK(x[0]) #$ MISSING:flow="SOURCE, l:-1 -> x[0]"
def test_nested_comprehension_paren():
x = [y for y in (z for z in [SOURCE])]
SINK(x[0]) #$ flow="SOURCE, l:-1 -> x[0]"
# 6.2.8. Generator expressions
def test_generator():
x = (SOURCE for y in [NONSOURCE])
SINK([*x][0]) #$ MISSING:flow="SOURCE, l:-1 -> List[0]"
# 6.2.9. Yield expressions
def gen(x):
yield x
def test_yield():
g = gen(SOURCE)
SINK(next(g)) #$ MISSING:flow="SOURCE, l:-1 -> next()"
def gen_from(x):
yield from gen(x)
def test_yield_from():
g = gen_from(SOURCE)
SINK(next(g)) #$ MISSING:flow="SOURCE, l:-1 -> next()"
# a statement rather than an expression, but related to generators
def test_for():
for x in gen(SOURCE):
SINK(x) #$ MISSING:flow="SOURCE, l:-1 -> x"
# 6.2.9.1. Generator-iterator methods
def test___next__():
g = gen(SOURCE)
SINK(g.__next__()) #$ MISSING:flow="SOURCE, l:-1 -> g.__next__()"
def gen2(x):
# argument of `send` has to flow to value of `yield x` (and so to `m`)
m = yield x
yield m
def test_send():
g = gen2(NONSOURCE)
n = next(g)
SINK(g.send(SOURCE)) #$ MISSING:flow="SOURCE -> g.send()"
def gen_ex(x):
try:
yield NONSOURCE
except:
yield x # `x` has to flow to call to `throw`
def test_throw():
g = gen_ex(SOURCE)
n = next(g)
SINK(g.throw(TypeError)) #$ MISSING:flow="SOURCE, l:-2 -> g.throw()"
# no `test_close` as `close` involves no data flow
# 6.2.9.3. Asynchronous generator functions
async def agen(x):
yield x
# 6.2.9.4. Asynchronous generator-iterator methods
# helper to run async test functions
def runa(a):
import asyncio
asyncio.run(a)
async def atest___anext__():
g = agen(SOURCE)
SINK(await g.__anext__()) #$ MISSING:flow="SOURCE, l:-1 -> g.__anext__()"
def test___anext__():
runa(atest___anext__())
async def agen2(x):
# argument of `send` has to flow to value of `yield x` (and so to `m`)
m = yield x
yield m
async def atest_asend():
g = agen2(NONSOURCE)
n = await g.__anext__()
SINK(await g.asend(SOURCE)) #$ MISSING:flow="SOURCE -> g.asend()"
def test_asend():
runa(atest_asend())
async def agen_ex(x):
try:
yield NONSOURCE
except:
yield x # `x` has to flow to call to `athrow`
async def atest_athrow():
g = agen_ex(SOURCE)
n = await g.__anext__()
SINK(await g.athrow(TypeError)) #$ MISSING:flow="SOURCE, l:-2 -> g.athrow()"
def test_athrow():
runa(atest_athrow())
# 6.3.1. Attribute references
class C:
a = SOURCE
@expects(2)
def test_attribute_reference():
SINK(C.a) #$ MISSING:flow="SOURCE, l:-4 -> C.a"
c = C()
SINK(c.a) #$ MISSING:flow="SOURCE, l:-6 -> c.a"
# overriding __getattr__ should be tested by the class coverage tests
# 6.3.2. Subscriptions
def test_subscription_tuple():
SINK((SOURCE,)[0]) #$ flow="SOURCE -> Tuple[0]"
def test_subscription_list():
SINK([SOURCE][0]) #$ flow="SOURCE -> List[0]"
def test_subscription_mapping():
SINK({"s": SOURCE}["s"]) #$ flow="SOURCE -> Dict['s']"
# overriding __getitem__ should be tested by the class coverage tests
# 6.3.3. Slicings
l = [SOURCE]
def test_slicing():
s = l[0:1:1]
SINK(s[0]) #$ MISSING:flow="SOURCE -> s[0]"
# The grammar seems to allow `l[0:1:1, 0:1]`, but the interpreter does not like it
# 6.3.4. Calls
def second(a, b):
return b
def test_call_positional():
SINK(second(NONSOURCE, SOURCE)) #$ flow="SOURCE -> second(..)"
def test_call_positional_negative():
SINK_F(second(SOURCE, NONSOURCE))
def test_call_keyword():
SINK(second(NONSOURCE, b=SOURCE)) #$ flow="SOURCE -> second(..)"
def test_call_unpack_iterable():
SINK(second(NONSOURCE, *[SOURCE])) #$ MISSING:flow="SOURCE -> second(..)"
def test_call_unpack_mapping():
SINK(second(NONSOURCE, **{"b": SOURCE})) #$ flow="SOURCE -> second(..)"
def f_extra_pos(a, *b):
return b[0]
def test_call_extra_pos():
SINK(f_extra_pos(NONSOURCE, SOURCE)) #$ flow="SOURCE -> f_extra_pos(..)"
def f_extra_keyword(a, **b):
return b["b"]
def test_call_extra_keyword():
SINK(f_extra_keyword(NONSOURCE, b=SOURCE)) #$ flow="SOURCE -> f_extra_keyword(..)"
# return the name of the first extra keyword argument
def f_extra_keyword_flow(**a):
return [*a][0]
# call the function with our source as the name of the keyword arguemnt
def test_call_extra_keyword_flow():
SINK(f_extra_keyword_flow(**{SOURCE: None})) #$ MISSING:flow="SOURCE -> f_extra_keyword(..)"
# 6.12. Assignment expressions
def test_assignment_expression():
x = NONSOURCE
SINK(x := SOURCE) #$ MISSING:flow="SOURCE -> x"
# 6.13. Conditional expressions
def test_conditional_true():
SINK(SOURCE if True else NONSOURCE) #$ flow="SOURCE -> IfExp"
def test_conditional_true_guards():
SINK_F(NONSOURCE if True else SOURCE)
def test_conditional_false():
SINK(NONSOURCE if False else SOURCE) #$ flow="SOURCE -> IfExp"
def test_conditional_false_guards():
SINK_F(SOURCE if False else NONSOURCE)
# Condition is evaluated first, so x is SOURCE once chosen
def test_conditional_evaluation_true():
x = NONSOURCE
SINK(x if (SOURCE == (x := SOURCE)) else NONSOURCE) #$ MISSING:flow="SOURCE -> IfExp"
# Condition is evaluated first, so x is SOURCE once chosen
def test_conditional_evaluation_false():
x = NONSOURCE
SINK(NONSOURCE if (NONSOURCE == (x := SOURCE)) else x) #$ MISSING:flow="SOURCE -> IfExp"
# 6.14. Lambdas
def test_lambda():
def f(x):
return x
SINK(f(SOURCE)) #$ flow="SOURCE -> f(..)"
def test_lambda_positional():
def second(a, b):
return b
SINK(second(NONSOURCE, SOURCE)) #$ flow="SOURCE -> second(..)"
def test_lambda_positional_negative():
def second(a, b):
return b
SINK_F(second(SOURCE, NONSOURCE))
def test_lambda_keyword():
def second(a, b):
return b
SINK(second(NONSOURCE, b=SOURCE)) #$ flow="SOURCE -> second(..)"
def test_lambda_unpack_iterable():
def second(a, b):
return b
SINK(second(NONSOURCE, *[SOURCE])) #$ MISSING:flow="SOURCE -> second(..)" # Flow missing
def test_lambda_unpack_mapping():
def second(a, b):
return b
SINK(second(NONSOURCE, **{"b": SOURCE})) #$ flow="SOURCE -> second(..)"
def test_lambda_extra_pos():
f_extra_pos = lambda a, *b: b[0]
SINK(f_extra_pos(NONSOURCE, SOURCE)) #$ flow="SOURCE -> f_extra_pos(..)"
def test_lambda_extra_keyword():
f_extra_keyword = lambda a, **b: b["b"]
SINK(f_extra_keyword(NONSOURCE, b=SOURCE)) #$ flow="SOURCE -> f_extra_keyword(..)"
# call the function with our source as the name of the keyword argument
def test_lambda_extra_keyword_flow():
# return the name of the first extra keyword argument
f_extra_keyword_flow = lambda **a: [*a][0]
SINK(f_extra_keyword_flow(**{SOURCE: None})) #$ MISSING:flow="SOURCE -> f_extra_keyword(..)"
@expects(4)
def test_swap():
a = SOURCE
b = NONSOURCE
SINK(a) #$ flow="SOURCE, l:-2 -> a"
SINK_F(b)
a, b = b, a
SINK_F(a)
SINK(b) #$ flow="SOURCE, l:-7 -> b"
def test_deep_callgraph():
# port of python/ql/test/library-tests/taint/general/deep.py
def f1(arg):
return arg
def f2(arg):
return f1(arg)
def f3(arg):
return f2(arg)
def f4(arg):
return f3(arg)
def f5(arg):
return f4(arg)
def f6(arg):
return f5(arg)
x = f6(SOURCE)
SINK(x) #$ MISSING:flow="SOURCE, l:-1 -> x"
@expects(2)
def test_dynamic_tuple_creation_1():
tup = tuple()
tup += (SOURCE,)
tup += (NONSOURCE,)
SINK(tup[0]) #$ MISSING:flow="SOURCE, l:-3 -> tup[0]"
SINK_F(tup[1])
@expects(2)
def test_dynamic_tuple_creation_2():
tup = ()
tup += (SOURCE,)
tup += (NONSOURCE,)
SINK(tup[0]) #$ MISSING:flow="SOURCE, l:-3 -> tup[0]"
SINK_F(tup[1])
@expects(2)
def test_dynamic_tuple_creation_3():
tup1 = (SOURCE,)
tup2 = (NONSOURCE,)
tup = tup1 + tup2
SINK(tup[0]) #$ MISSING:flow="SOURCE, l:-4 -> tup[0]"
SINK_F(tup[1])
# Inspired by FP-report https://github.com/github/codeql/issues/4239
@expects(2)
def test_dynamic_tuple_creation_4():
tup = ()
for item in [SOURCE, NONSOURCE]:
tup += (item,)
SINK(tup[0]) #$ MISSING:flow="SOURCE, l:-3 -> tup[0]"
SINK_F(tup[1])
def return_from_inner_scope(x):
try:
return x[0]
except IndexError:
return SOURCE
def test_return_from_inner_scope():
SINK(return_from_inner_scope([])) #$ flow="SOURCE, l:-3 -> return_from_inner_scope(..)"
# Inspired by reverse read inconsistency check
def insertAtA(d):
d["a"] = SOURCE
def test_reverse_read_subscript():
d = {"a": NONSOURCE}
l = [d]
insertAtA(l[0])
SINK(d["a"]) #$ MISSING:flow="SOURCE, l-6 -> d['a']""
def test_reverse_read_dict_arg():
d = {"a": NONSOURCE}
dd = {"d": d}
insertAtA(**dd)
SINK(d["a"]) #$ MISSING:flow="SOURCE, l-12 -> d['a']""
class WithA:
def setA(self, v):
self.a = v
def __init__(self):
self.a = ""
def test_reverse_read_subscript_cls():
withA = WithA()
l = [withA]
l[0].setA(SOURCE)
SINK(withA.a) #$ MISSING:flow="SOURCE, l:-1 -> self.a"
| 21.905751 | 96 | 0.617297 |
a8f38f552de944bc7af9312bb63037fc862b1330 | 1,726 | py | Python | manimlib/animation/numbers.py | aDotInTheVoid/manim | eb3e5f419cb164f12b253cf885e19c35c62a2f31 | [
"MIT"
] | null | null | null | manimlib/animation/numbers.py | aDotInTheVoid/manim | eb3e5f419cb164f12b253cf885e19c35c62a2f31 | [
"MIT"
] | null | null | null | manimlib/animation/numbers.py | aDotInTheVoid/manim | eb3e5f419cb164f12b253cf885e19c35c62a2f31 | [
"MIT"
] | null | null | null | import warnings
from manimlib.animation.animation import Animation
from manimlib.mobject.numbers import DecimalNumber
from manimlib.utils.bezier import interpolate
class ChangingDecimal(Animation):
CONFIG = {
"suspend_mobject_updating": False,
}
def __init__(self, decimal_mob, number_update_func, **kwargs):
self.check_validity_of_input(decimal_mob)
self.yell_about_depricated_configuration(**kwargs)
self.number_update_func = number_update_func
super().__init__(decimal_mob, **kwargs)
def check_validity_of_input(self, decimal_mob):
if not isinstance(decimal_mob, DecimalNumber):
raise Exception(
"ChangingDecimal can only take "
"in a DecimalNumber"
)
def yell_about_depricated_configuration(self, **kwargs):
# Obviously this would optimally be removed at
# some point.
for attr in ["tracked_mobject", "position_update_func"]:
if attr in kwargs:
warnings.warn("""
Don't use {} for ChangingDecimal,
that functionality has been depricated
and you should use a mobject updater
instead
""".format(attr)
)
def interpolate_mobject(self, alpha):
self.mobject.set_value(
self.number_update_func(alpha)
)
class ChangeDecimalToValue(ChangingDecimal):
def __init__(self, decimal_mob, target_number, **kwargs):
start_number = decimal_mob.number
super().__init__(
decimal_mob,
lambda a: interpolate(start_number, target_number, a),
**kwargs
)
| 32.566038 | 66 | 0.628042 |
a1ce4a5af50ec2a02700a3588b13f0aba54325e1 | 397 | py | Python | example/src/dsodemo/cli.py | navytux/setuptools_dso | 6b1b72f012bdba1c891ce7719dbd230345924209 | [
"BSD-3-Clause"
] | null | null | null | example/src/dsodemo/cli.py | navytux/setuptools_dso | 6b1b72f012bdba1c891ce7719dbd230345924209 | [
"BSD-3-Clause"
] | null | null | null | example/src/dsodemo/cli.py | navytux/setuptools_dso | 6b1b72f012bdba1c891ce7719dbd230345924209 | [
"BSD-3-Clause"
] | null | null | null |
from __future__ import print_function
import sys, os
def fixpath():
path = os.environ.get('PATH', '').split(os.pathsep)
moddir = os.path.dirname(__file__)
path.append(os.path.join(moddir, 'lib'))
os.environ['PATH'] = os.pathsep.join(path)
if sys.platform == "win32":
fixpath()
from .ext import dtest
if __name__=='__main__':
print(dtest.foo())
print(dtest.bar())
| 19.85 | 55 | 0.65995 |
ba849a08141dd9933f34bd4549e20a1324e86e6c | 4,420 | py | Python | native/jni/external/selinux/sandbox/test_sandbox.py | Joyoe/Magisk-nosbin_magisk-nohide | 449441921740bf85926c14f41b3532822ca0eb65 | [
"MIT"
] | 2 | 2022-01-16T00:59:54.000Z | 2022-02-09T12:00:48.000Z | native/jni/external/selinux/sandbox/test_sandbox.py | Joyoe/Magisk-nosbin_magisk-nohide | 449441921740bf85926c14f41b3532822ca0eb65 | [
"MIT"
] | null | null | null | native/jni/external/selinux/sandbox/test_sandbox.py | Joyoe/Magisk-nosbin_magisk-nohide | 449441921740bf85926c14f41b3532822ca0eb65 | [
"MIT"
] | 2 | 2022-02-09T12:00:39.000Z | 2022-02-21T18:34:46.000Z | import unittest
import os
import shutil
import sys
from tempfile import mkdtemp
from subprocess import Popen, PIPE
class SandboxTests(unittest.TestCase):
def assertDenied(self, err):
self.assertTrue(b'Permission denied' in err,
'"Permission denied" not found in %r' % err)
def assertNotFound(self, err):
self.assertTrue(b'not found' in err,
'"not found" not found in %r' % err)
def assertFailure(self, status):
self.assertTrue(status != 0,
'"Succeeded when it should have failed')
def assertSuccess(self, status, err):
self.assertTrue(status == 0,
'"Sandbox should have succeeded for this test %r' % err)
def test_simple_success(self):
"Verify that we can read file descriptors handed to sandbox"
p1 = Popen(['cat', '/etc/passwd'], stdout=PIPE)
p2 = Popen([sys.executable, 'sandbox', 'grep', 'root'], stdin=p1.stdout, stdout=PIPE)
p1.stdout.close()
out, err = p2.communicate()
self.assertTrue(b'root' in out)
def test_cant_kill(self):
"Verify that we cannot send kill signal in the sandbox"
pid = os.getpid()
p = Popen([sys.executable, 'sandbox', 'kill', '-HUP', str(pid)], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
self.assertDenied(err)
def test_cant_ping(self):
"Verify that we can't ping within the sandbox"
p = Popen([sys.executable, 'sandbox', 'ping', '-c 1 ', '127.0.0.1'], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
self.assertDenied(err)
def test_cant_mkdir(self):
"Verify that we can't mkdir within the sandbox"
p = Popen([sys.executable, 'sandbox', 'mkdir', '~/test'], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
self.assertFailure(p.returncode)
def test_cant_list_homedir(self):
"Verify that we can't list homedir within the sandbox"
p = Popen([sys.executable, 'sandbox', 'ls', '~'], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
self.assertFailure(p.returncode)
def test_cant_send_mail(self):
"Verify that we can't send mail within the sandbox"
p = Popen([sys.executable, 'sandbox', 'mail'], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
self.assertDenied(err)
def test_cant_sudo(self):
"Verify that we can't run sudo within the sandbox"
p = Popen([sys.executable, 'sandbox', 'sudo'], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
self.assertFailure(p.returncode)
def test_mount(self):
"Verify that we mount a file system"
p = Popen([sys.executable, 'sandbox', '-M', 'id'], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
self.assertSuccess(p.returncode, err)
def test_set_level(self):
"Verify that we set level a file system"
p = Popen([sys.executable, 'sandbox', '-l', 's0', 'id'], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
self.assertSuccess(p.returncode, err)
def test_homedir(self):
"Verify that we set homedir a file system"
homedir = mkdtemp(dir=".", prefix=".sandbox_test")
p = Popen([sys.executable, 'sandbox', '-H', homedir, '-M', 'id'], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
shutil.rmtree(homedir)
self.assertSuccess(p.returncode, err)
def test_tmpdir(self):
"Verify that we set tmpdir a file system"
tmpdir = mkdtemp(dir="/tmp", prefix=".sandbox_test")
p = Popen([sys.executable, 'sandbox', '-T', tmpdir, '-M', 'id'], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
shutil.rmtree(tmpdir)
self.assertSuccess(p.returncode, err)
def test_include_file(self):
"Verify that sandbox can copy a file in the sandbox home and use it"
p = Popen([sys.executable, 'sandbox', '-i' ,'test_sandbox.py' , '-M', '/bin/cat', 'test_sandbox.py'],
stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
self.assertSuccess(p.returncode, err)
if __name__ == "__main__":
import selinux
if selinux.is_selinux_enabled() and selinux.security_getenforce() == 1:
unittest.main()
else:
print("SELinux must be in enforcing mode for this test")
| 38.77193 | 109 | 0.613348 |
2c525fbbf89eeeabf9512c4ed25ece3f60390e60 | 35,899 | py | Python | hmAP.py | zhujiaxiaowang/Safety-helmet-detection-based-on-Yolov3 | 5f68bd391fc1f5ca796b955c28ea8f4074215b09 | [
"MIT"
] | null | null | null | hmAP.py | zhujiaxiaowang/Safety-helmet-detection-based-on-Yolov3 | 5f68bd391fc1f5ca796b955c28ea8f4074215b09 | [
"MIT"
] | null | null | null | hmAP.py | zhujiaxiaowang/Safety-helmet-detection-based-on-Yolov3 | 5f68bd391fc1f5ca796b955c28ea8f4074215b09 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 18 10:33:15 2020
@author: CQU2616
"""
import glob
import json
import os
import shutil
import operator
import sys
import argparse
import math
import numpy as np
MINOVERLAP = 0.5 # default value (defined in the PASCAL VOC2012 challenge)
parser = argparse.ArgumentParser()
parser.add_argument('-na', '--no-animation', help="no animation is shown.", action="store_true")
parser.add_argument('-np', '--no-plot', help="no plot is shown.", action="store_true")
parser.add_argument('-q', '--quiet', help="minimalistic console output.", action="store_true")
# argparse receiving list of classes to be ignored (e.g., python main.py --ignore person book)
parser.add_argument('-i', '--ignore', nargs='+', type=str, help="ignore a list of classes.")
# argparse receiving list of classes with specific IoU (e.g., python main.py --set-class-iou person 0.7)
parser.add_argument('--set-class-iou', nargs='+', type=str, help="set IoU for a specific class.")
args = parser.parse_args()
'''
0,0 ------> x (width)
|
| (Left,Top)
| *_________
| | |
| |
y |_________|
(height) *
(Right,Bottom)
'''
# if there are no classes to ignore then replace None by empty list
if args.ignore is None:
args.ignore = []
specific_iou_flagged = False
if args.set_class_iou is not None:
specific_iou_flagged = True
# make sure that the cwd() is the location of the python script (so that every path makes sense)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
GT_PATH = os.path.join(os.getcwd(), 'input', 'ground-truth')
DR_PATH = os.path.join(os.getcwd(), 'input', 'detection-results')
# if there are no images then no animation can be shown
IMG_PATH = os.path.join(os.getcwd(), 'input', 'images-optional')
if os.path.exists(IMG_PATH):
for dirpath, dirnames, files in os.walk(IMG_PATH):
if not files:
# no image files found
args.no_animation = True
else:
args.no_animation = True
# try to import OpenCV if the user didn't choose the option --no-animation
show_animation = False
if not args.no_animation:
try:
import cv2
show_animation = True
except ImportError:
print("\"opencv-python\" not found, please install to visualize the results.")
args.no_animation = True
# try to import Matplotlib if the user didn't choose the option --no-plot
draw_plot = False
if not args.no_plot:
try:
import matplotlib.pyplot as plt
draw_plot = True
except ImportError:
print("\"matplotlib\" not found, please install it to get the resulting plots.")
args.no_plot = True
def log_average_miss_rate(prec, rec, num_images):
"""
log-average miss rate:
Calculated by averaging miss rates at 9 evenly spaced FPPI points
between 10e-2 and 10e0, in log-space.
output:
lamr | log-average miss rate
mr | miss rate
fppi | false positives per image
references:
[1] Dollar, Piotr, et al. "Pedestrian Detection: An Evaluation of the
State of the Art." Pattern Analysis and Machine Intelligence, IEEE
Transactions on 34.4 (2012): 743 - 761.
"""
# if there were no detections of that class
if prec.size == 0:
lamr = 0
mr = 1
fppi = 0
return lamr, mr, fppi
fppi = (1 - prec)
mr = (1 - rec)
fppi_tmp = np.insert(fppi, 0, -1.0)
mr_tmp = np.insert(mr, 0, 1.0)
# Use 9 evenly spaced reference points in log-space
ref = np.logspace(-2.0, 0.0, num = 9)
for i, ref_i in enumerate(ref):
# np.where() will always find at least 1 index, since min(ref) = 0.01 and min(fppi_tmp) = -1.0
j = np.where(fppi_tmp <= ref_i)[-1][-1]
ref[i] = mr_tmp[j]
# log(0) is undefined, so we use the np.maximum(1e-10, ref)
lamr = math.exp(np.mean(np.log(np.maximum(1e-10, ref))))
return lamr, mr, fppi
"""
throw error and exit
"""
def error(msg):
print(msg)
sys.exit(0)
"""
check if the number is a float between 0.0 and 1.0
"""
def is_float_between_0_and_1(value):
try:
val = float(value)
if val > 0.0 and val < 1.0:
return True
else:
return False
except ValueError:
return False
"""
Calculate the AP given the recall and precision array
1st) We compute a version of the measured precision/recall curve with
precision monotonically decreasing
2nd) We compute the AP as the area under this curve by numerical integration.
"""
def voc_ap(rec, prec):
"""
--- Official matlab code VOC2012---
mrec=[0 ; rec ; 1];
mpre=[0 ; prec ; 0];
for i=numel(mpre)-1:-1:1
mpre(i)=max(mpre(i),mpre(i+1));
end
i=find(mrec(2:end)~=mrec(1:end-1))+1;
ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
"""
rec.insert(0, 0.0) # insert 0.0 at begining of list
rec.append(1.0) # insert 1.0 at end of list
mrec = rec[:]
prec.insert(0, 0.0) # insert 0.0 at begining of list
prec.append(0.0) # insert 0.0 at end of list
mpre = prec[:]
"""
This part makes the precision monotonically decreasing
(goes from the end to the beginning)
matlab: for i=numel(mpre)-1:-1:1
mpre(i)=max(mpre(i),mpre(i+1));
"""
# matlab indexes start in 1 but python in 0, so I have to do:
# range(start=(len(mpre) - 2), end=0, step=-1)
# also the python function range excludes the end, resulting in:
# range(start=(len(mpre) - 2), end=-1, step=-1)
for i in range(len(mpre)-2, -1, -1):
mpre[i] = max(mpre[i], mpre[i+1])
"""
This part creates a list of indexes where the recall changes
matlab: i=find(mrec(2:end)~=mrec(1:end-1))+1;
"""
i_list = []
for i in range(1, len(mrec)):
if mrec[i] != mrec[i-1]:
i_list.append(i) # if it was matlab would be i + 1
"""
The Average Precision (AP) is the area under the curve
(numerical integration)
matlab: ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
"""
ap = 0.0
for i in i_list:
ap += ((mrec[i]-mrec[i-1])*mpre[i])
return ap, mrec, mpre
"""
Convert the lines of a file to a list
"""
def file_lines_to_list(path):
# open txt file lines to a list
with open(path) as f:
content = f.readlines()
# remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
return content
"""
Draws text in image
"""
def draw_text_in_image(img, text, pos, color, line_width):
font = cv2.FONT_HERSHEY_PLAIN
fontScale = 1
lineType = 1
bottomLeftCornerOfText = pos
cv2.putText(img, text,
bottomLeftCornerOfText,
font,
fontScale,
color,
lineType)
text_width, _ = cv2.getTextSize(text, font, fontScale, lineType)[0]
return img, (line_width + text_width)
"""
Plot - adjust axes
"""
def adjust_axes(r, t, fig, axes):
# get text width for re-scaling
bb = t.get_window_extent(renderer=r)
text_width_inches = bb.width / fig.dpi
# get axis width in inches
current_fig_width = fig.get_figwidth()
new_fig_width = current_fig_width + text_width_inches
propotion = new_fig_width / current_fig_width
# get axis limit
x_lim = axes.get_xlim()
axes.set_xlim([x_lim[0], x_lim[1]*propotion])
"""
Draw plot using Matplotlib
"""
def draw_plot_func(dictionary, n_classes, window_title, plot_title, x_label, output_path, to_show, plot_color, true_p_bar):
# sort the dictionary by decreasing value, into a list of tuples
sorted_dic_by_value = sorted(dictionary.items(), key=operator.itemgetter(1))
# unpacking the list of tuples into two lists
sorted_keys, sorted_values = zip(*sorted_dic_by_value)
#
if true_p_bar != "":
"""
Special case to draw in:
- green -> TP: True Positives (object detected and matches ground-truth)
- red -> FP: False Positives (object detected but does not match ground-truth)
- pink -> FN: False Negatives (object not detected but present in the ground-truth)
"""
fp_sorted = []
tp_sorted = []
for key in sorted_keys:
fp_sorted.append(dictionary[key] - true_p_bar[key])
tp_sorted.append(true_p_bar[key])
plt.barh(range(n_classes), fp_sorted, align='center', color='crimson', label='False Positive')
plt.barh(range(n_classes), tp_sorted, align='center', color='forestgreen', label='True Positive', left=fp_sorted)
# add legend
plt.legend(loc='lower right')
"""
Write number on side of bar
"""
fig = plt.gcf() # gcf - get current figure
axes = plt.gca()
r = fig.canvas.get_renderer()
for i, val in enumerate(sorted_values):
fp_val = fp_sorted[i]
tp_val = tp_sorted[i]
fp_str_val = " " + str(fp_val)
tp_str_val = fp_str_val + " " + str(tp_val)
# trick to paint multicolor with offset:
# first paint everything and then repaint the first number
t = plt.text(val, i, tp_str_val, color='forestgreen', va='center', fontweight='bold')
plt.text(val, i, fp_str_val, color='crimson', va='center', fontweight='bold')
if i == (len(sorted_values)-1): # largest bar
adjust_axes(r, t, fig, axes)
else:
plt.barh(range(n_classes), sorted_values, color=plot_color)
"""
Write number on side of bar
"""
fig = plt.gcf() # gcf - get current figure
axes = plt.gca()
r = fig.canvas.get_renderer()
for i, val in enumerate(sorted_values):
str_val = " " + str(val) # add a space before
if val < 1.0:
str_val = " {0:.2f}".format(val)
t = plt.text(val, i, str_val, color=plot_color, va='center', fontweight='bold')
# re-set axes to show number inside the figure
if i == (len(sorted_values)-1): # largest bar
adjust_axes(r, t, fig, axes)
# set window title
fig.canvas.set_window_title(window_title)
# write classes in y axis
tick_font_size = 12
plt.yticks(range(n_classes), sorted_keys, fontsize=tick_font_size)
"""
Re-scale height accordingly
"""
init_height = fig.get_figheight()
# comput the matrix height in points and inches
dpi = fig.dpi
height_pt = n_classes * (tick_font_size * 1.4) # 1.4 (some spacing)
height_in = height_pt / dpi
# compute the required figure height
top_margin = 0.15 # in percentage of the figure height
bottom_margin = 0.05 # in percentage of the figure height
figure_height = height_in / (1 - top_margin - bottom_margin)
# set new height
if figure_height > init_height:
fig.set_figheight(figure_height)
# set plot title
plt.title(plot_title, fontsize=14)
# set axis titles
# plt.xlabel('classes')
plt.xlabel(x_label, fontsize='large')
# adjust size of window
fig.tight_layout()
# save the plot
fig.savefig(output_path)
# show image
if to_show:
plt.show()
# close the plot
plt.close()
"""
Create a ".temp_files/" and "output/" directory
"""
TEMP_FILES_PATH = ".temp_files"
if not os.path.exists(TEMP_FILES_PATH): # if it doesn't exist already
os.makedirs(TEMP_FILES_PATH)
output_files_path = "output"
if os.path.exists(output_files_path): # if it exist already
# reset the output directory
shutil.rmtree(output_files_path)
os.makedirs(output_files_path)
if draw_plot:
os.makedirs(os.path.join(output_files_path, "classes"))
if show_animation:
os.makedirs(os.path.join(output_files_path, "images", "detections_one_by_one"))
"""
ground-truth
Load each of the ground-truth files into a temporary ".json" file.
Create a list of all the class names present in the ground-truth (gt_classes).
"""
# get a list with the ground-truth files
ground_truth_files_list = glob.glob(GT_PATH + '/*.txt')
if len(ground_truth_files_list) == 0:
error("Error: No ground-truth files found!")
ground_truth_files_list.sort()
# dictionary with counter per class
gt_counter_per_class = {}
counter_images_per_class = {}
gt_files = []
for txt_file in ground_truth_files_list:
#print(txt_file)
file_id = txt_file.split(".txt", 1)[0]
file_id = os.path.basename(os.path.normpath(file_id))
# check if there is a correspondent detection-results file
temp_path = os.path.join(DR_PATH, (file_id + ".txt"))
if not os.path.exists(temp_path):
error_msg = "Error. File not found: {}\n".format(temp_path)
error_msg += "(You can avoid this error message by running extra/intersect-gt-and-dr.py)"
error(error_msg)
lines_list = file_lines_to_list(txt_file)
# create ground-truth dictionary
bounding_boxes = []
is_difficult = False
already_seen_classes = []
for line in lines_list:
try:
if "difficult" in line:
class_name, left, top, right, bottom, _difficult = line.split()
is_difficult = True
else:
class_name, left, top, right, bottom = line.split()
except ValueError:
error_msg = "Error: File " + txt_file + " in the wrong format.\n"
error_msg += " Expected: <class_name> <left> <top> <right> <bottom> ['difficult']\n"
error_msg += " Received: " + line
error_msg += "\n\nIf you have a <class_name> with spaces between words you should remove them\n"
error_msg += "by running the script \"remove_space.py\" or \"rename_class.py\" in the \"extra/\" folder."
error(error_msg)
# check if class is in the ignore list, if yes skip
if class_name in args.ignore:
continue
bbox = left + " " + top + " " + right + " " +bottom
if is_difficult:
bounding_boxes.append({"class_name":class_name, "bbox":bbox, "used":False, "difficult":True})
is_difficult = False
else:
bounding_boxes.append({"class_name":class_name, "bbox":bbox, "used":False})
# count that object
if class_name in gt_counter_per_class:
gt_counter_per_class[class_name] += 1
else:
# if class didn't exist yet
gt_counter_per_class[class_name] = 1
if class_name not in already_seen_classes:
if class_name in counter_images_per_class:
counter_images_per_class[class_name] += 1
else:
# if class didn't exist yet
counter_images_per_class[class_name] = 1
already_seen_classes.append(class_name)
# dump bounding_boxes into a ".json" file
new_temp_file = TEMP_FILES_PATH + "/" + file_id + "_ground_truth.json"
gt_files.append(new_temp_file)
with open(new_temp_file, 'w') as outfile:
json.dump(bounding_boxes, outfile)
gt_classes = list(gt_counter_per_class.keys())
# let's sort the classes alphabetically
gt_classes = sorted(gt_classes)
n_classes = len(gt_classes)
#print(gt_classes)
#print(gt_counter_per_class)
"""
Check format of the flag --set-class-iou (if used)
e.g. check if class exists
"""
if specific_iou_flagged:
n_args = len(args.set_class_iou)
error_msg = \
'\n --set-class-iou [class_1] [IoU_1] [class_2] [IoU_2] [...]'
if n_args % 2 != 0:
error('Error, missing arguments. Flag usage:' + error_msg)
# [class_1] [IoU_1] [class_2] [IoU_2]
# specific_iou_classes = ['class_1', 'class_2']
specific_iou_classes = args.set_class_iou[::2] # even
# iou_list = ['IoU_1', 'IoU_2']
iou_list = args.set_class_iou[1::2] # odd
if len(specific_iou_classes) != len(iou_list):
error('Error, missing arguments. Flag usage:' + error_msg)
for tmp_class in specific_iou_classes:
if tmp_class not in gt_classes:
error('Error, unknown class \"' + tmp_class + '\". Flag usage:' + error_msg)
for num in iou_list:
if not is_float_between_0_and_1(num):
error('Error, IoU must be between 0.0 and 1.0. Flag usage:' + error_msg)
"""
detection-results
Load each of the detection-results files into a temporary ".json" file.
"""
# get a list with the detection-results files
dr_files_list = glob.glob(DR_PATH + '/*.txt')
dr_files_list.sort()
for class_index, class_name in enumerate(gt_classes):
bounding_boxes = []
for txt_file in dr_files_list:
#print(txt_file)
# the first time it checks if all the corresponding ground-truth files exist
file_id = txt_file.split(".txt",1)[0]
file_id = os.path.basename(os.path.normpath(file_id))
temp_path = os.path.join(GT_PATH, (file_id + ".txt"))
if class_index == 0:
if not os.path.exists(temp_path):
error_msg = "Error. File not found: {}\n".format(temp_path)
error_msg += "(You can avoid this error message by running extra/intersect-gt-and-dr.py)"
error(error_msg)
lines = file_lines_to_list(txt_file)
for line in lines:
try:
tmp_class_name, confidence, left, top, right, bottom = line.split()
except ValueError:
error_msg = "Error: File " + txt_file + " in the wrong format.\n"
error_msg += " Expected: <class_name> <confidence> <left> <top> <right> <bottom>\n"
error_msg += " Received: " + line
error(error_msg)
if tmp_class_name == class_name:
#print("match")
bbox = left + " " + top + " " + right + " " +bottom
bounding_boxes.append({"confidence":confidence, "file_id":file_id, "bbox":bbox})
#print(bounding_boxes)
# sort detection-results by decreasing confidence
bounding_boxes.sort(key=lambda x:float(x['confidence']), reverse=True)
with open(TEMP_FILES_PATH + "/" + class_name + "_dr.json", 'w') as outfile:
json.dump(bounding_boxes, outfile)
"""
Calculate the AP for each class
"""
sum_AP = 0.0
ap_dictionary = {}
lamr_dictionary = {}
# open file to store the output
with open(output_files_path + "/output.txt", 'w') as output_file:
output_file.write("# AP and precision/recall per class\n")
count_true_positives = {}
for class_index, class_name in enumerate(gt_classes):
count_true_positives[class_name] = 0
"""
Load detection-results of that class
"""
dr_file = TEMP_FILES_PATH + "/" + class_name + "_dr.json"
dr_data = json.load(open(dr_file))
"""
Assign detection-results to ground-truth objects
"""
nd = len(dr_data)
tp = [0] * nd # creates an array of zeros of size nd
fp = [0] * nd
for idx, detection in enumerate(dr_data):
file_id = detection["file_id"]
if show_animation:
# find ground truth image
ground_truth_img = glob.glob1(IMG_PATH, file_id + ".*")
#tifCounter = len(glob.glob1(myPath,"*.tif"))
if len(ground_truth_img) == 0:
error("Error. Image not found with id: " + file_id)
elif len(ground_truth_img) > 1:
error("Error. Multiple image with id: " + file_id)
else: # found image
#print(IMG_PATH + "/" + ground_truth_img[0])
# Load image
img = cv2.imread(IMG_PATH + "/" + ground_truth_img[0])
# load image with draws of multiple detections
img_cumulative_path = output_files_path + "/images/" + ground_truth_img[0]
if os.path.isfile(img_cumulative_path):
img_cumulative = cv2.imread(img_cumulative_path)
else:
img_cumulative = img.copy()
# Add bottom border to image
bottom_border = 60
BLACK = [0, 0, 0]
img = cv2.copyMakeBorder(img, 0, bottom_border, 0, 0, cv2.BORDER_CONSTANT, value=BLACK)
# assign detection-results to ground truth object if any
# open ground-truth with that file_id
gt_file = TEMP_FILES_PATH + "/" + file_id + "_ground_truth.json"
ground_truth_data = json.load(open(gt_file))
ovmax = -1
gt_match = -1
# load detected object bounding-box
bb = [ float(x) for x in detection["bbox"].split() ]
for obj in ground_truth_data:
# look for a class_name match
if obj["class_name"] == class_name:
bbgt = [ float(x) for x in obj["bbox"].split() ]
bi = [max(bb[0],bbgt[0]), max(bb[1],bbgt[1]), min(bb[2],bbgt[2]), min(bb[3],bbgt[3])]
iw = bi[2] - bi[0] + 1
ih = bi[3] - bi[1] + 1
if iw > 0 and ih > 0:
# compute overlap (IoU) = area of intersection / area of union
ua = (bb[2] - bb[0] + 1) * (bb[3] - bb[1] + 1) + (bbgt[2] - bbgt[0]
+ 1) * (bbgt[3] - bbgt[1] + 1) - iw * ih
ov = iw * ih / ua
if ov > ovmax:
ovmax = ov
gt_match = obj
# assign detection as true positive/don't care/false positive
if show_animation:
status = "NO MATCH FOUND!" # status is only used in the animation
# set minimum overlap
min_overlap = MINOVERLAP
if specific_iou_flagged:
if class_name in specific_iou_classes:
index = specific_iou_classes.index(class_name)
min_overlap = float(iou_list[index])
if ovmax >= min_overlap:
if "difficult" not in gt_match:
if not bool(gt_match["used"]):
# true positive
tp[idx] = 1
gt_match["used"] = True
count_true_positives[class_name] += 1
# update the ".json" file
with open(gt_file, 'w') as f:
f.write(json.dumps(ground_truth_data))
if show_animation:
status = "MATCH!"
else:
# false positive (multiple detection)
fp[idx] = 1
if show_animation:
status = "REPEATED MATCH!"
else:
# false positive
fp[idx] = 1
if ovmax > 0:
status = "INSUFFICIENT OVERLAP"
"""
Draw image to show animation
"""
if show_animation:
height, widht = img.shape[:2]
# colors (OpenCV works with BGR)
white = (255,255,255)
light_blue = (255,200,100)
green = (0,255,0)
light_red = (30,30,255)
# 1st line
margin = 10
v_pos = int(height - margin - (bottom_border / 2.0))
text = "Image: " + ground_truth_img[0] + " "
img, line_width = draw_text_in_image(img, text, (margin, v_pos), white, 0)
text = "Class [" + str(class_index) + "/" + str(n_classes) + "]: " + class_name + " "
img, line_width = draw_text_in_image(img, text, (margin + line_width, v_pos), light_blue, line_width)
if ovmax != -1:
color = light_red
if status == "INSUFFICIENT OVERLAP":
text = "IoU: {0:.2f}% ".format(ovmax*100) + "< {0:.2f}% ".format(min_overlap*100)
else:
text = "IoU: {0:.2f}% ".format(ovmax*100) + ">= {0:.2f}% ".format(min_overlap*100)
color = green
img, _ = draw_text_in_image(img, text, (margin + line_width, v_pos), color, line_width)
# 2nd line
v_pos += int(bottom_border / 2.0)
rank_pos = str(idx+1) # rank position (idx starts at 0)
text = "Detection #rank: " + rank_pos + " confidence: {0:.2f}% ".format(float(detection["confidence"])*100)
img, line_width = draw_text_in_image(img, text, (margin, v_pos), white, 0)
color = light_red
if status == "MATCH!":
color = green
text = "Result: " + status + " "
img, line_width = draw_text_in_image(img, text, (margin + line_width, v_pos), color, line_width)
font = cv2.FONT_HERSHEY_SIMPLEX
if ovmax > 0: # if there is intersections between the bounding-boxes
bbgt = [ int(round(float(x))) for x in gt_match["bbox"].split() ]
cv2.rectangle(img,(bbgt[0],bbgt[1]),(bbgt[2],bbgt[3]),light_blue,2)
cv2.rectangle(img_cumulative,(bbgt[0],bbgt[1]),(bbgt[2],bbgt[3]),light_blue,2)
cv2.putText(img_cumulative, class_name, (bbgt[0],bbgt[1] - 5), font, 0.6, light_blue, 1, cv2.LINE_AA)
bb = [int(i) for i in bb]
cv2.rectangle(img,(bb[0],bb[1]),(bb[2],bb[3]),color,2)
cv2.rectangle(img_cumulative,(bb[0],bb[1]),(bb[2],bb[3]),color,2)
cv2.putText(img_cumulative, class_name, (bb[0],bb[1] - 5), font, 0.6, color, 1, cv2.LINE_AA)
# show image
cv2.imshow("Animation", img)
cv2.waitKey(20) # show for 20 ms
# save image to output
output_img_path = output_files_path + "/images/detections_one_by_one/" + class_name + "_detection" + str(idx) + ".jpg"
cv2.imwrite(output_img_path, img)
# save the image with all the objects drawn to it
cv2.imwrite(img_cumulative_path, img_cumulative)
#print(tp)
# compute precision/recall
cumsum = 0
for idx, val in enumerate(fp):
fp[idx] += cumsum
cumsum += val
cumsum = 0
for idx, val in enumerate(tp):
tp[idx] += cumsum
cumsum += val
#print(tp)
rec = tp[:]
for idx, val in enumerate(tp):
rec[idx] = float(tp[idx]) / gt_counter_per_class[class_name]
#print(rec)
prec = tp[:]
for idx, val in enumerate(tp):
prec[idx] = float(tp[idx]) / (fp[idx] + tp[idx])
#print(prec)
ap, mrec, mprec = voc_ap(rec[:], prec[:])
sum_AP += ap
text = "{0:.2f}%".format(ap*100) + " = " + class_name + " AP " #class_name + " AP = {0:.2f}%".format(ap*100)
"""
Write to output.txt
"""
rounded_prec = [ '%.2f' % elem for elem in prec ]
rounded_rec = [ '%.2f' % elem for elem in rec ]
output_file.write(text + "\n Precision: " + str(rounded_prec) + "\n Recall :" + str(rounded_rec) + "\n\n")
if not args.quiet:
print(text)
ap_dictionary[class_name] = ap
n_images = counter_images_per_class[class_name]
lamr, mr, fppi = log_average_miss_rate(np.array(prec), np.array(rec), n_images)
lamr_dictionary[class_name] = lamr
"""
Draw plot
"""
if draw_plot:
plt.plot(rec, prec, '-o')
# add a new penultimate point to the list (mrec[-2], 0.0)
# since the last line segment (and respective area) do not affect the AP value
area_under_curve_x = mrec[:-1] + [mrec[-2]] + [mrec[-1]]
area_under_curve_y = mprec[:-1] + [0.0] + [mprec[-1]]
plt.fill_between(area_under_curve_x, 0, area_under_curve_y, alpha=0.2, edgecolor='r')
# set window title
fig = plt.gcf() # gcf - get current figure
fig.canvas.set_window_title('AP ' + class_name)
# set plot title
plt.title('class: ' + text)
#plt.suptitle('This is a somewhat long figure title', fontsize=16)
# set axis titles
plt.xlabel('Recall')
plt.ylabel('Precision')
# optional - set axes
axes = plt.gca() # gca - get current axes
axes.set_xlim([0.0,1.0])
axes.set_ylim([0.0,1.05]) # .05 to give some extra space
# Alternative option -> wait for button to be pressed
#while not plt.waitforbuttonpress(): pass # wait for key display
# Alternative option -> normal display
#plt.show()
# save the plot
fig.savefig(output_files_path + "/classes/" + class_name + ".png")
plt.cla() # clear axes for next plot
if show_animation:
cv2.destroyAllWindows()
output_file.write("\n# mAP of all classes\n")
mAP = sum_AP / n_classes
text = "mAP = {0:.2f}%".format(mAP*100)
output_file.write(text + "\n")
print(text)
"""
Draw false negatives
"""
if show_animation:
pink = (203,192,255)
for tmp_file in gt_files:
ground_truth_data = json.load(open(tmp_file))
#print(ground_truth_data)
# get name of corresponding image
start = TEMP_FILES_PATH + '/'
img_id = tmp_file[tmp_file.find(start)+len(start):tmp_file.rfind('_ground_truth.json')]
img_cumulative_path = output_files_path + "/images/" + img_id + ".jpg"
img = cv2.imread(img_cumulative_path)
if img is None:
img_path = IMG_PATH + '/' + img_id + ".jpg"
img = cv2.imread(img_path)
# draw false negatives
for obj in ground_truth_data:
if not obj['used']:
bbgt = [ int(round(float(x))) for x in obj["bbox"].split() ]
cv2.rectangle(img,(bbgt[0],bbgt[1]),(bbgt[2],bbgt[3]),pink,2)
cv2.imwrite(img_cumulative_path, img)
# remove the temp_files directory
shutil.rmtree(TEMP_FILES_PATH)
"""
Count total of detection-results
"""
# iterate through all the files
det_counter_per_class = {}
for txt_file in dr_files_list:
# get lines to list
lines_list = file_lines_to_list(txt_file)
for line in lines_list:
class_name = line.split()[0]
# check if class is in the ignore list, if yes skip
if class_name in args.ignore:
continue
# count that object
if class_name in det_counter_per_class:
det_counter_per_class[class_name] += 1
else:
# if class didn't exist yet
det_counter_per_class[class_name] = 1
#print(det_counter_per_class)
dr_classes = list(det_counter_per_class.keys())
"""
Plot the total number of occurences of each class in the ground-truth
"""
if draw_plot:
window_title = "ground-truth-info"
plot_title = "ground-truth\n"
plot_title += "(" + str(len(ground_truth_files_list)) + " files and " + str(n_classes) + " classes)"
x_label = "Number of objects per class"
output_path = output_files_path + "/ground-truth-info.png"
to_show = False
plot_color = 'forestgreen'
draw_plot_func(
gt_counter_per_class,
n_classes,
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
'',
)
"""
Write number of ground-truth objects per class to results.txt
"""
with open(output_files_path + "/output.txt", 'a') as output_file:
output_file.write("\n# Number of ground-truth objects per class\n")
for class_name in sorted(gt_counter_per_class):
output_file.write(class_name + ": " + str(gt_counter_per_class[class_name]) + "\n")
"""
Finish counting true positives
"""
for class_name in dr_classes:
# if class exists in detection-result but not in ground-truth then there are no true positives in that class
if class_name not in gt_classes:
count_true_positives[class_name] = 0
#print(count_true_positives)
"""
Plot the total number of occurences of each class in the "detection-results" folder
"""
if draw_plot:
window_title = "detection-results-info"
# Plot title
plot_title = "detection-results\n"
plot_title += "(" + str(len(dr_files_list)) + " files and "
count_non_zero_values_in_dictionary = sum(int(x) > 0 for x in list(det_counter_per_class.values()))
plot_title += str(count_non_zero_values_in_dictionary) + " detected classes)"
# end Plot title
x_label = "Number of objects per class"
output_path = output_files_path + "/detection-results-info.png"
to_show = False
plot_color = 'forestgreen'
true_p_bar = count_true_positives
draw_plot_func(
det_counter_per_class,
len(det_counter_per_class),
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
true_p_bar
)
"""
Write number of detected objects per class to output.txt
"""
with open(output_files_path + "/output.txt", 'a') as output_file:
output_file.write("\n# Number of detected objects per class\n")
for class_name in sorted(dr_classes):
n_det = det_counter_per_class[class_name]
text = class_name + ": " + str(n_det)
text += " (tp:" + str(count_true_positives[class_name]) + ""
text += ", fp:" + str(n_det - count_true_positives[class_name]) + ")\n"
output_file.write(text)
"""
Draw log-average miss rate plot (Show lamr of all classes in decreasing order)
"""
if draw_plot:
window_title = "lamr"
plot_title = "log-average miss rate"
x_label = "log-average miss rate"
output_path = output_files_path + "/lamr.png"
to_show = False
plot_color = 'royalblue'
draw_plot_func(
lamr_dictionary,
n_classes,
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
""
)
"""
Draw mAP plot (Show AP's of all classes in decreasing order)
"""
if draw_plot:
window_title = "mAP"
plot_title = "mAP = {0:.2f}%".format(mAP*100)
x_label = "Average Precision"
output_path = output_files_path + "/mAP.png"
to_show = True
plot_color = 'royalblue'
draw_plot_func(
ap_dictionary,
n_classes,
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
""
) | 39.536344 | 135 | 0.569542 |
b5b6cb27bf3119558e8ff5802b29b39c2b5f72aa | 32,345 | py | Python | mobmonitor/checkfile/manager_unittest.py | hustwei/chromite | 10eb79abeb64e859362546214b7e039096ac9830 | [
"BSD-3-Clause"
] | null | null | null | mobmonitor/checkfile/manager_unittest.py | hustwei/chromite | 10eb79abeb64e859362546214b7e039096ac9830 | [
"BSD-3-Clause"
] | null | null | null | mobmonitor/checkfile/manager_unittest.py | hustwei/chromite | 10eb79abeb64e859362546214b7e039096ac9830 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for Mob* Monitor checkfile manager."""
from __future__ import print_function
import imp
import mock
import os
import subprocess
import time
import threading
from cherrypy.process import plugins
from chromite.lib import cros_test_lib
from chromite.mobmonitor.checkfile import manager
# Test health check and related attributes
class TestHealthCheck(object):
"""Test health check."""
def Check(self):
"""Stub Check."""
return 0
def Diagnose(self, _errcode):
"""Stub Diagnose."""
return ('Unknown Error.', [])
class TestHealthCheckHasAttributes(object):
"""Test health check with attributes."""
CHECK_INTERVAL_SEC = 10
def Check(self):
"""Stub Check."""
return 0
def Diagnose(self, _errcode):
"""Stub Diagnose."""
return ('Unknown Error.', [])
class TestHealthCheckUnhealthy(object):
"""Unhealthy test health check."""
def __init__(self):
self.x = -1
def Check(self):
"""Stub Check."""
return self.x
def Diagnose(self, errcode):
"""Stub Diagnose."""
if errcode == -1:
return ('Stub Error.', [self.Repair])
return ('Unknown Error.', [])
def Repair(self):
self.x = 0
class TestHealthCheckMultipleActions(object):
"""Unhealthy check with many actions that have different parameters."""
def __init__(self):
self.x = -1
def Check(self):
"""Stub Check."""
return self.x
def Diagnose(self, errcode):
"""Stub Diagnose."""
if errcode == -1:
return ('Stub Error.', [self.NoParams, self.PositionalParams,
self.DefaultParams, self.MixedParams])
return ('Unknown Error.', [])
def NoParams(self):
"""NoParams Action."""
self.x = 0
# pylint: disable=unused-argument
def PositionalParams(self, x, y, z):
"""PositionalParams Action."""
self.x = 0
def DefaultParams(self, x=1, y=2, z=3):
"""DefaultParams Action."""
self.x = 0
def MixedParams(self, x, y, z=1):
"""MixedParams Action."""
self.x = 0
# pylint: enable=unused-argument
class TestHealthCheckQuasihealthy(object):
"""Quasi-healthy test health check."""
def Check(self):
"""Stub Check."""
return 1
def Diagnose(self, errcode):
"""Stub Diagnose."""
if errcode == 1:
return ('Stub Error.', [self.RepairStub])
return ('Unknown Error.', [])
def RepairStub(self):
"""Stub repair action."""
class TestHealthCheckBroken(object):
"""Broken test health check."""
def Check(self):
"""Stub Check."""
raise ValueError()
def Diagnose(self, _errcode):
"""A broken Diagnose function. A proper return should be a pair."""
raise ValueError()
def TestAction():
return True
TEST_SERVICE_NAME = 'test-service'
TEST_MTIME = 100
TEST_EXEC_TIME = 400
CHECKDIR = '.'
# Strings that are used to mock actual check modules.
CHECKFILE_MANY_SIMPLE = '''
SERVICE = 'test-service'
class MyHealthCheck2(object):
def Check(self):
return 0
def Diagnose(self, errcode):
return ('Unknown error.', [])
class MyHealthCheck3(object):
def Check(self):
return 0
def Diagnose(self, errcode):
return ('Unknown error.', [])
class MyHealthCheck4(object):
def Check(self):
return 0
def Diagnose(self, errcode):
return ('Unknown error.', [])
'''
CHECKFILE_MANY_SIMPLE_ONE_BAD = '''
SERVICE = 'test-service'
class MyHealthCheck(object):
def Check(self):
return 0
def Diagnose(self, errcode):
return ('Unknown error.', [])
class NotAHealthCheck(object):
def Diagnose(self, errcode):
return ('Unknown error.', [])
class MyHealthCheck2(object):
def Check(self):
return 0
def Diagnose(self, errcode):
return ('Unknown error.', [])
'''
NOT_A_CHECKFILE = '''
class NotAHealthCheck(object):
def NotCheckNorDiagnose(self):
return -1
'''
ANOTHER_NOT_A_CHECKFILE = '''
class AnotherNotAHealthCheck(object):
def AnotherNotCheckNorDiagnose(self):
return -2
'''
ACTION_FILE = '''
def TestAction():
return True
def AnotherAction():
return False
'''
class RunCommand(threading.Thread):
"""Helper class for executing the Mob* Monitor with a timeout."""
def __init__(self, cmd, timeout):
threading.Thread.__init__(self)
self.cmd = cmd
self.timeout = timeout
self.p = None
self.proc_stdout = None
self.proc_stderr = None
def run(self):
self.p = subprocess.Popen(self.cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.proc_stdout, self.proc_stderr = self.p.communicate()
def Stop(self):
self.join(self.timeout)
if self.is_alive():
self.p.terminate()
self.join(self.timeout)
if self.is_alive():
self.p.kill()
self.join(self.timeout)
return self.proc_stdout
class CheckFileManagerHelperTest(cros_test_lib.MockTestCase):
"""Unittests for CheckFileManager helper functions."""
def testMapHealthcheckStatusToDict(self):
"""Test mapping a manager.HEALTHCHECK_STATUS to a dict."""
def _func():
pass
status = manager.HEALTHCHECK_STATUS('test', False, 'desc', [_func])
expect = {'name': 'test', 'health': False, 'description': 'desc',
'actions': ['_func']}
self.assertEquals(expect, manager.MapHealthcheckStatusToDict(status))
def testMapServiceStatusToDict(self):
"""Test mapping a manager.SERVICE_STATUS to a dict."""
def _func():
pass
hcstatus = manager.HEALTHCHECK_STATUS('test', False, 'desc', [_func])
hcexpect = {'name': 'test', 'health': False, 'description': 'desc',
'actions': ['_func']}
status = manager.SERVICE_STATUS('test-service', False, [hcstatus])
expect = {'service': 'test-service', 'health': False,
'healthchecks': [hcexpect]}
self.assertEquals(expect, manager.MapServiceStatusToDict(status))
def testMapActionInfoToDict(self):
"""Test mapping a manager.ACTION_INFO to a dict."""
actioninfo = manager.ACTION_INFO('test', 'test', [1], {'a': 1})
expect = {'action': 'test', 'info': 'test', 'args': [1],
'kwargs': {'a': 1}}
self.assertEquals(expect, manager.MapActionInfoToDict(actioninfo))
def testIsHealthcheckHealthy(self):
"""Test checking whether health check statuses are healthy."""
# Test a healthy health check.
hch = manager.HEALTHCHECK_STATUS('healthy', True, manager.NULL_DESCRIPTION,
manager.EMPTY_ACTIONS)
self.assertTrue(manager.isHealthcheckHealthy(hch))
# Test a quasi-healthy health check.
hcq = manager.HEALTHCHECK_STATUS('quasi-healthy', True, 'Quasi-Healthy',
['QuasiAction'])
self.assertFalse(manager.isHealthcheckHealthy(hcq))
# Test an unhealthy health check.
hcu = manager.HEALTHCHECK_STATUS('unhealthy', False, 'Unhealthy',
['UnhealthyAction'])
self.assertFalse(manager.isHealthcheckHealthy(hcu))
# Test an object that is not a health check status.
s = manager.SERVICE_STATUS('service_status', True, [])
self.assertFalse(manager.isHealthcheckHealthy(s))
def testIsServiceHealthy(self):
"""Test checking whether service statuses are healthy."""
# Define some health check statuses.
hch = manager.HEALTHCHECK_STATUS('healthy', True, manager.NULL_DESCRIPTION,
manager.EMPTY_ACTIONS)
hcq = manager.HEALTHCHECK_STATUS('quasi-healthy', True, 'Quasi-Healthy',
['QuasiAction'])
hcu = manager.HEALTHCHECK_STATUS('unhealthy', False, 'Unhealthy',
['UnhealthyAction'])
# Test a healthy service.
s = manager.SERVICE_STATUS('healthy', True, [])
self.assertTrue(manager.isServiceHealthy(s))
# Test a quasi-healthy service.
s = manager.SERVICE_STATUS('quasi-healthy', True, [hch, hcq])
self.assertFalse(manager.isServiceHealthy(s))
# Test an unhealthy service.
s = manager.SERVICE_STATUS('unhealthy', False, [hcu])
self.assertFalse(manager.isServiceHealthy(s))
# Test an object that is not a service status.
self.assertFalse(manager.isServiceHealthy(hch))
def testDetermineHealthcheckStatusHealthy(self):
"""Test DetermineHealthCheckStatus on a healthy check."""
hcname = TestHealthCheck.__name__
testhc = TestHealthCheck()
expected = manager.HEALTHCHECK_STATUS(hcname, True,
manager.NULL_DESCRIPTION,
manager.EMPTY_ACTIONS)
self.assertEquals(expected,
manager.DetermineHealthcheckStatus(hcname, testhc))
def testDeterminHealthcheckStatusUnhealthy(self):
"""Test DetermineHealthcheckStatus on an unhealthy check."""
hcname = TestHealthCheckUnhealthy.__name__
testhc = TestHealthCheckUnhealthy()
desc, actions = testhc.Diagnose(testhc.Check())
expected = manager.HEALTHCHECK_STATUS(hcname, False, desc, actions)
self.assertEquals(expected,
manager.DetermineHealthcheckStatus(hcname, testhc))
def testDetermineHealthcheckStatusQuasihealth(self):
"""Test DetermineHealthcheckStatus on a quasi-healthy check."""
hcname = TestHealthCheckQuasihealthy.__name__
testhc = TestHealthCheckQuasihealthy()
desc, actions = testhc.Diagnose(testhc.Check())
expected = manager.HEALTHCHECK_STATUS(hcname, True, desc, actions)
self.assertEquals(expected,
manager.DetermineHealthcheckStatus(hcname, testhc))
def testDetermineHealthcheckStatusBrokenCheck(self):
"""Test DetermineHealthcheckStatus raises on a broken health check."""
hcname = TestHealthCheckBroken.__name__
testhc = TestHealthCheckBroken()
result = manager.DetermineHealthcheckStatus(hcname, testhc)
self.assertEquals(hcname, result.name)
self.assertFalse(result.health)
self.assertFalse(result.actions)
def testIsHealthCheck(self):
"""Test that IsHealthCheck properly asserts the health check interface."""
class NoAttrs(object):
"""Test health check missing 'check' and 'diagnose' methods."""
class NoCheckAttr(object):
"""Test health check missing 'check' method."""
def Diagnose(self, errcode):
pass
class NoDiagnoseAttr(object):
"""Test health check missing 'diagnose' method."""
def Check(self):
pass
class GoodHealthCheck(object):
"""Test health check that implements 'check' and 'diagnose' methods."""
def Check(self):
pass
def Diagnose(self, errcode):
pass
self.assertFalse(manager.IsHealthCheck(NoAttrs()))
self.assertFalse(manager.IsHealthCheck(NoCheckAttr()))
self.assertFalse(manager.IsHealthCheck(NoDiagnoseAttr()))
self.assertTrue(manager.IsHealthCheck(GoodHealthCheck()))
def testApplyHealthCheckAttributesNoAttrs(self):
"""Test that we can apply attributes to a health check."""
testhc = TestHealthCheck()
result = manager.ApplyHealthCheckAttributes(testhc)
self.assertEquals(result.CHECK_INTERVAL_SEC,
manager.CHECK_INTERVAL_DEFAULT_SEC)
def testApplyHealthCheckAttributesHasAttrs(self):
"""Test that we do not override an acceptable attribute."""
testhc = TestHealthCheckHasAttributes()
check_interval = testhc.CHECK_INTERVAL_SEC
result = manager.ApplyHealthCheckAttributes(testhc)
self.assertEquals(result.CHECK_INTERVAL_SEC, check_interval)
def testImportFileAllHealthChecks(self):
"""Test that health checks and service name are collected."""
self.StartPatcher(mock.patch('os.path.splitext'))
os.path.splitext.return_value = '/path/to/test_check.py'
self.StartPatcher(mock.patch('os.path.getmtime'))
os.path.getmtime.return_value = TEST_MTIME
checkmodule = imp.new_module('test_check')
exec CHECKFILE_MANY_SIMPLE in checkmodule.__dict__
self.StartPatcher(mock.patch('imp.load_source'))
imp.load_source.return_value = checkmodule
healthchecks, mtime = manager.ImportFile(TEST_SERVICE_NAME, '/')
self.assertEquals(len(healthchecks), 3)
self.assertEquals(mtime, TEST_MTIME)
def testImportFileSomeHealthChecks(self):
"""Test importing when not all classes are actually health checks."""
self.StartPatcher(mock.patch('os.path.splitext'))
os.path.splitext.return_value = '/path/to/test_check.py'
self.StartPatcher(mock.patch('os.path.getmtime'))
os.path.getmtime.return_value = TEST_MTIME
checkmodule = imp.new_module('test_check')
exec CHECKFILE_MANY_SIMPLE_ONE_BAD in checkmodule.__dict__
self.StartPatcher(mock.patch('imp.load_source'))
imp.load_source.return_value = checkmodule
healthchecks, mtime = manager.ImportFile(TEST_SERVICE_NAME, '/')
self.assertEquals(len(healthchecks), 2)
self.assertEquals(mtime, TEST_MTIME)
class CheckFileManagerTest(cros_test_lib.MockTestCase):
"""Unittests for CheckFileManager."""
def testCollectionExecutionCallbackCheckfiles(self):
"""Test the CollectionExecutionCallback on collecting checkfiles."""
self.StartPatcher(mock.patch('os.walk'))
os.walk.return_value = iter([[CHECKDIR, [TEST_SERVICE_NAME], []]])
self.StartPatcher(mock.patch('os.listdir'))
os.listdir.return_value = ['test_check.py']
self.StartPatcher(mock.patch('os.path.isfile'))
os.path.isfile.return_value = True
self.StartPatcher(mock.patch('imp.find_module'))
imp.find_module.return_value = (None, None, None)
self.StartPatcher(mock.patch('imp.load_module'))
myobj = TestHealthCheck()
manager.ImportFile = mock.Mock(return_value=[[myobj], TEST_MTIME])
cfm = manager.CheckFileManager(checkdir=CHECKDIR)
cfm.CollectionExecutionCallback()
manager.ImportFile.assert_called_once_with(
TEST_SERVICE_NAME, './%s/test_check.py' % TEST_SERVICE_NAME)
self.assertTrue(TEST_SERVICE_NAME in cfm.service_checks)
self.assertEquals(cfm.service_checks[TEST_SERVICE_NAME],
{myobj.__class__.__name__: (TEST_MTIME, myobj)})
def testCollectionExecutionCallbackNoChecks(self):
"""Test the CollectionExecutionCallback with no valid check files."""
self.StartPatcher(mock.patch('os.walk'))
os.walk.return_value = iter([['/checkdir/', [], ['test.py']]])
manager.ImportFile = mock.Mock(return_value=None)
cfm = manager.CheckFileManager(checkdir=CHECKDIR)
cfm.CollectionExecutionCallback()
self.assertFalse(manager.ImportFile.called)
self.assertFalse(TEST_SERVICE_NAME in cfm.service_checks)
def testStartCollectionExecution(self):
"""Test the StartCollectionExecution method."""
plugins.Monitor = mock.Mock()
cfm = manager.CheckFileManager(checkdir=CHECKDIR)
cfm.StartCollectionExecution()
self.assertTrue(plugins.Monitor.called)
def testUpdateExistingHealthCheck(self):
"""Test update when a health check exists and is not stale."""
cfm = manager.CheckFileManager(checkdir=CHECKDIR)
myobj = TestHealthCheck()
cfm.service_checks[TEST_SERVICE_NAME] = {myobj.__class__.__name__:
(TEST_MTIME, myobj)}
myobj2 = TestHealthCheck()
cfm.Update(TEST_SERVICE_NAME, [myobj2], TEST_MTIME)
self.assertTrue(TEST_SERVICE_NAME in cfm.service_checks)
self.assertEquals(cfm.service_checks[TEST_SERVICE_NAME],
{myobj.__class__.__name__: (TEST_MTIME, myobj)})
def testUpdateNonExistingHealthCheck(self):
"""Test adding a new health check to the manager."""
cfm = manager.CheckFileManager(checkdir=CHECKDIR)
cfm.service_checks = {}
myobj = TestHealthCheck()
cfm.Update(TEST_SERVICE_NAME, [myobj], TEST_MTIME)
self.assertTrue(TEST_SERVICE_NAME in cfm.service_checks)
self.assertEquals(cfm.service_checks[TEST_SERVICE_NAME],
{myobj.__class__.__name__: (TEST_MTIME, myobj)})
def testExecuteFresh(self):
"""Test executing a health check when the result is still fresh."""
self.StartPatcher(mock.patch('time.time'))
exec_time_offset = TestHealthCheckHasAttributes.CHECK_INTERVAL_SEC / 2
time.time.return_value = TEST_EXEC_TIME + exec_time_offset
cfm = manager.CheckFileManager(checkdir=CHECKDIR)
cfm.service_checks = {TEST_SERVICE_NAME:
{TestHealthCheckHasAttributes.__name__:
(TEST_MTIME, TestHealthCheckHasAttributes())}}
cfm.service_check_results = {
TEST_SERVICE_NAME: {TestHealthCheckHasAttributes.__name__:
(manager.HCEXECUTION_COMPLETED, TEST_EXEC_TIME,
None)}}
cfm.Execute()
_, exec_time, _ = cfm.service_check_results[TEST_SERVICE_NAME][
TestHealthCheckHasAttributes.__name__]
self.assertEquals(exec_time, TEST_EXEC_TIME)
def testExecuteStale(self):
"""Test executing a health check when the result is stale."""
self.StartPatcher(mock.patch('time.time'))
exec_time_offset = TestHealthCheckHasAttributes.CHECK_INTERVAL_SEC * 2
time.time.return_value = TEST_EXEC_TIME + exec_time_offset
cfm = manager.CheckFileManager(checkdir=CHECKDIR)
cfm.service_checks = {TEST_SERVICE_NAME:
{TestHealthCheckHasAttributes.__name__:
(TEST_MTIME, TestHealthCheckHasAttributes())}}
cfm.service_check_results = {
TEST_SERVICE_NAME: {TestHealthCheckHasAttributes.__name__:
(manager.HCEXECUTION_COMPLETED, TEST_EXEC_TIME,
None)}}
cfm.Execute()
_, exec_time, _ = cfm.service_check_results[TEST_SERVICE_NAME][
TestHealthCheckHasAttributes.__name__]
self.assertEquals(exec_time, TEST_EXEC_TIME + exec_time_offset)
def testExecuteNonExistent(self):
"""Test executing a health check when the result is nonexistent."""
self.StartPatcher(mock.patch('time.time'))
time.time.return_value = TEST_EXEC_TIME
cfm = manager.CheckFileManager(checkdir=CHECKDIR)
cfm.service_checks = {TEST_SERVICE_NAME:
{TestHealthCheck.__name__:
(TEST_MTIME, TestHealthCheck())}}
cfm.Execute()
resultsdict = cfm.service_check_results.get(TEST_SERVICE_NAME)
self.assertTrue(resultsdict is not None)
exec_status, exec_time, _ = resultsdict.get(TestHealthCheck.__name__,
(None, None, None))
self.assertTrue(exec_status is not None)
self.assertTrue(exec_time is not None)
self.assertEquals(exec_status, manager.HCEXECUTION_COMPLETED)
self.assertEquals(exec_time, TEST_EXEC_TIME)
def testExecuteForce(self):
"""Test executing a health check by ignoring the check interval."""
self.StartPatcher(mock.patch('time.time'))
exec_time_offset = TestHealthCheckHasAttributes.CHECK_INTERVAL_SEC / 2
time.time.return_value = TEST_EXEC_TIME + exec_time_offset
cfm = manager.CheckFileManager(checkdir=CHECKDIR)
cfm.service_checks = {TEST_SERVICE_NAME:
{TestHealthCheckHasAttributes.__name__:
(TEST_MTIME, TestHealthCheckHasAttributes())}}
cfm.service_check_results = {
TEST_SERVICE_NAME: {TestHealthCheckHasAttributes.__name__:
(manager.HCEXECUTION_COMPLETED, TEST_EXEC_TIME,
None)}}
cfm.Execute(force=True)
_, exec_time, _ = cfm.service_check_results[TEST_SERVICE_NAME][
TestHealthCheckHasAttributes.__name__]
self.assertEquals(exec_time, TEST_EXEC_TIME + exec_time_offset)
def testConsolidateServiceStatesUnhealthy(self):
"""Test consolidating state for a service with unhealthy checks."""
cfm = manager.CheckFileManager(checkdir=CHECKDIR)
# Setup some test check results
hcname = TestHealthCheck.__name__
statuses = [
manager.HEALTHCHECK_STATUS(hcname, False, 'Failed', ['Repair']),
manager.HEALTHCHECK_STATUS(hcname, True, 'Quasi', ['RepairQuasi']),
manager.HEALTHCHECK_STATUS(hcname, True, '', [])]
cfm.service_check_results.setdefault(TEST_SERVICE_NAME, {})
for i, status in enumerate(statuses):
name = '%s_%s' % (hcname, i)
cfm.service_check_results[TEST_SERVICE_NAME][name] = (
manager.HCEXECUTION_COMPLETED, TEST_EXEC_TIME,
status)
# Run and check the results.
cfm.ConsolidateServiceStates()
self.assertTrue(TEST_SERVICE_NAME in cfm.service_states)
_, health, healthchecks = cfm.service_states[TEST_SERVICE_NAME]
self.assertFalse(health)
self.assertEquals(2, len(healthchecks))
self.assertTrue(all([x in healthchecks for x in statuses[:2]]))
def testConsolidateServiceStatesQuasiHealthy(self):
"""Test consolidating state for a service with quasi-healthy checks."""
cfm = manager.CheckFileManager(checkdir=CHECKDIR)
# Setup some test check results
hcname = TestHealthCheck.__name__
statuses = [
manager.HEALTHCHECK_STATUS(hcname, True, 'Quasi', ['RepairQuasi']),
manager.HEALTHCHECK_STATUS(hcname, True, '', [])]
cfm.service_check_results.setdefault(TEST_SERVICE_NAME, {})
for i, status in enumerate(statuses):
name = '%s_%s' % (hcname, i)
cfm.service_check_results[TEST_SERVICE_NAME][name] = (
manager.HCEXECUTION_COMPLETED, TEST_EXEC_TIME,
status)
# Run and check the results.
cfm.ConsolidateServiceStates()
self.assertTrue(TEST_SERVICE_NAME in cfm.service_states)
_, health, healthchecks = cfm.service_states[TEST_SERVICE_NAME]
self.assertTrue(health)
self.assertEquals(1, len(healthchecks))
self.assertTrue(statuses[0] in healthchecks)
def testConsolidateServiceStatesHealthy(self):
"""Test consolidating state for a healthy service."""
cfm = manager.CheckFileManager(checkdir=CHECKDIR)
# Setup some test check results
hcname = TestHealthCheck.__name__
hcname2 = '%s_2' % hcname
statuses = [
manager.HEALTHCHECK_STATUS(hcname, True, '', []),
manager.HEALTHCHECK_STATUS(hcname2, True, '', [])]
cfm.service_check_results.setdefault(TEST_SERVICE_NAME, {})
cfm.service_check_results[TEST_SERVICE_NAME][hcname] = (
manager.HCEXECUTION_COMPLETED, TEST_EXEC_TIME, statuses[0])
cfm.service_check_results[TEST_SERVICE_NAME][hcname2] = (
manager.HCEXECUTION_IN_PROGRESS, TEST_EXEC_TIME, statuses[1])
# Run and check.
cfm.ConsolidateServiceStates()
self.assertTrue(TEST_SERVICE_NAME in cfm.service_states)
_, health, healthchecks = cfm.service_states.get(TEST_SERVICE_NAME)
self.assertTrue(health)
self.assertEquals(0, len(healthchecks))
def testGetServiceList(self):
"""Test the GetServiceList RPC response."""
cfm = manager.CheckFileManager(checkdir=CHECKDIR)
self.assertEquals([], cfm.GetServiceList())
status = manager.SERVICE_STATUS(TEST_SERVICE_NAME, True, [])
cfm.service_states[TEST_SERVICE_NAME] = status
self.assertEquals([TEST_SERVICE_NAME], cfm.GetServiceList())
def testGetStatusNonExistent(self):
"""Test the GetStatus RPC response when the service does not exist."""
cfm = manager.CheckFileManager(checkdir=CHECKDIR)
self.assertFalse(TEST_SERVICE_NAME in cfm.service_states)
status = manager.SERVICE_STATUS(TEST_SERVICE_NAME, False, [])
self.assertEquals(status, cfm.GetStatus(TEST_SERVICE_NAME))
def testGetStatusSingleService(self):
"""Test the GetStatus RPC response for a single service."""
cfm = manager.CheckFileManager(checkdir=CHECKDIR)
s1name = TEST_SERVICE_NAME
s2name = '%s_2' % s1name
status1 = manager.SERVICE_STATUS(s1name, True, [])
status2 = manager.SERVICE_STATUS(s2name, True, [])
cfm.service_states[s1name] = status1
cfm.service_states[s2name] = status2
self.assertEquals(status1, cfm.GetStatus(s1name))
self.assertEquals(status2, cfm.GetStatus(s2name))
def testGetStatusAllServices(self):
"""Test the GetStatus RPC response when no service is specified."""
cfm = manager.CheckFileManager(checkdir=CHECKDIR)
s1name = TEST_SERVICE_NAME
s2name = '%s_2' % s1name
status1 = manager.SERVICE_STATUS(s1name, True, [])
status2 = manager.SERVICE_STATUS(s2name, True, [])
cfm.service_states[s1name] = status1
cfm.service_states[s2name] = status2
result = cfm.GetStatus('')
self.assertEquals(2, len(result))
self.assertTrue(all([x in result for x in [status1, status2]]))
def testRepairServiceHealthy(self):
"""Test the RepairService RPC when the service is healthy."""
cfm = manager.CheckFileManager(checkdir=CHECKDIR)
healthy_status = manager.SERVICE_STATUS(TEST_SERVICE_NAME, True, [])
cfm.service_states[TEST_SERVICE_NAME] = healthy_status
self.assertEquals(healthy_status, cfm.RepairService(TEST_SERVICE_NAME,
'HealthcheckName',
'RepairFuncName',
[], {}))
def testRepairServiceNonExistent(self):
"""Test the RepairService RPC when the service does not exist."""
cfm = manager.CheckFileManager(checkdir=CHECKDIR)
self.assertFalse(TEST_SERVICE_NAME in cfm.service_states)
expected = manager.SERVICE_STATUS(TEST_SERVICE_NAME, False, [])
result = cfm.RepairService(TEST_SERVICE_NAME, 'DummyHealthcheck',
'DummyAction', [], {})
self.assertEquals(expected, result)
def testRepairServiceInvalidAction(self):
"""Test the RepairService RPC when the action is not recognized."""
cfm = manager.CheckFileManager(checkdir=CHECKDIR)
hcobj = TestHealthCheckUnhealthy()
cfm.service_checks[TEST_SERVICE_NAME] = {
hcobj.__class__.__name__: (TEST_MTIME, hcobj)}
unhealthy_status = manager.SERVICE_STATUS(
TEST_SERVICE_NAME, False,
[manager.HEALTHCHECK_STATUS(hcobj.__class__.__name__,
False, 'Always fails', [hcobj.Repair])])
cfm.service_states[TEST_SERVICE_NAME] = unhealthy_status
status = cfm.GetStatus(TEST_SERVICE_NAME)
self.assertFalse(status.health)
self.assertEquals(1, len(status.healthchecks))
status = cfm.RepairService(TEST_SERVICE_NAME, hcobj.__class__.__name__,
'Blah', [], {})
self.assertFalse(status.health)
self.assertEquals(1, len(status.healthchecks))
def testRepairServiceInvalidActionArguments(self):
"""Test the RepairService RPC when the action arguments are invalid."""
cfm = manager.CheckFileManager(checkdir=CHECKDIR)
hcobj = TestHealthCheckUnhealthy()
cfm.service_checks[TEST_SERVICE_NAME] = {
hcobj.__class__.__name__: (TEST_MTIME, hcobj)}
unhealthy_status = manager.SERVICE_STATUS(
TEST_SERVICE_NAME, False,
[manager.HEALTHCHECK_STATUS(hcobj.__class__.__name__,
False, 'Always fails', [hcobj.Repair])])
cfm.service_states[TEST_SERVICE_NAME] = unhealthy_status
status = cfm.GetStatus(TEST_SERVICE_NAME)
self.assertFalse(status.health)
self.assertEquals(1, len(status.healthchecks))
status = cfm.RepairService(TEST_SERVICE_NAME, hcobj.__class__.__name__,
'Repair', [1, 2, 3], {})
self.assertFalse(status.health)
self.assertEquals(1, len(status.healthchecks))
def testRepairService(self):
"""Test the RepairService RPC to repair an unhealthy service."""
cfm = manager.CheckFileManager(checkdir=CHECKDIR)
hcobj = TestHealthCheckUnhealthy()
cfm.service_checks[TEST_SERVICE_NAME] = {
hcobj.__class__.__name__: (TEST_MTIME, hcobj)}
unhealthy_status = manager.SERVICE_STATUS(
TEST_SERVICE_NAME, False,
[manager.HEALTHCHECK_STATUS(hcobj.__class__.__name__,
False, 'Always fails', [hcobj.Repair])])
cfm.service_states[TEST_SERVICE_NAME] = unhealthy_status
status = cfm.GetStatus(TEST_SERVICE_NAME)
self.assertFalse(status.health)
self.assertEquals(1, len(status.healthchecks))
status = cfm.RepairService(TEST_SERVICE_NAME,
hcobj.__class__.__name__,
hcobj.Repair.__name__,
[], {})
self.assertTrue(status.health)
self.assertEquals(0, len(status.healthchecks))
def testActionInfoServiceNonExistent(self):
"""Test the ActionInfo RPC when the service does not exist."""
cfm = manager.CheckFileManager(checkdir=CHECKDIR)
self.assertFalse(TEST_SERVICE_NAME in cfm.service_states)
expect = manager.ACTION_INFO('test', 'Service not recognized.',
[], {})
result = cfm.ActionInfo(TEST_SERVICE_NAME, 'test', 'test')
self.assertEquals(expect, result)
def testActionInfoServiceHealthy(self):
"""Test the ActionInfo RPC when the service is healthy."""
cfm = manager.CheckFileManager(checkdir=CHECKDIR)
healthy_status = manager.SERVICE_STATUS(TEST_SERVICE_NAME, True, [])
cfm.service_states[TEST_SERVICE_NAME] = healthy_status
expect = manager.ACTION_INFO('test', 'Service is healthy.',
[], {})
result = cfm.ActionInfo(TEST_SERVICE_NAME, 'test', 'test')
self.assertEquals(expect, result)
def testActionInfoActionNonExistent(self):
"""Test the ActionInfo RPC when the action does not exist."""
cfm = manager.CheckFileManager(checkdir=CHECKDIR)
hcobj = TestHealthCheckUnhealthy()
cfm.service_checks[TEST_SERVICE_NAME] = {
hcobj.__class__.__name__: (TEST_MTIME, hcobj)}
unhealthy_status = manager.SERVICE_STATUS(
TEST_SERVICE_NAME, False,
[manager.HEALTHCHECK_STATUS(hcobj.__class__.__name__,
False, 'Always fails', [hcobj.Repair])])
cfm.service_states[TEST_SERVICE_NAME] = unhealthy_status
expect = manager.ACTION_INFO('test', 'Action not recognized.', [], {})
result = cfm.ActionInfo(TEST_SERVICE_NAME, hcobj.__class__.__name__,
'test')
self.assertEquals(expect, result)
def testActionInfo(self):
"""Test the ActionInfo RPC to collect information on a repair action."""
cfm = manager.CheckFileManager(checkdir=CHECKDIR)
hcobj = TestHealthCheckMultipleActions()
hcname = hcobj.__class__.__name__
actions = [hcobj.NoParams, hcobj.PositionalParams, hcobj.DefaultParams,
hcobj.MixedParams]
cfm.service_checks[TEST_SERVICE_NAME] = {hcname: (TEST_MTIME, hcobj)}
unhealthy_status = manager.SERVICE_STATUS(
TEST_SERVICE_NAME, False,
[manager.HEALTHCHECK_STATUS(hcname, False, 'Always fails', actions)])
cfm.service_states[TEST_SERVICE_NAME] = unhealthy_status
# Test ActionInfo when the action has no parameters.
expect = manager.ACTION_INFO('NoParams', 'NoParams Action.', [], {})
self.assertEquals(expect,
cfm.ActionInfo(TEST_SERVICE_NAME, hcname, 'NoParams'))
# Test ActionInfo when the action has only positional parameters.
expect = manager.ACTION_INFO('PositionalParams', 'PositionalParams Action.',
['x', 'y', 'z'], {})
self.assertEquals(expect,
cfm.ActionInfo(TEST_SERVICE_NAME,
hcname, 'PositionalParams'))
# Test ActionInfo when the action has only default parameters.
expect = manager.ACTION_INFO('DefaultParams', 'DefaultParams Action.',
[], {'x': 1, 'y': 2, 'z': 3})
self.assertEquals(expect,
cfm.ActionInfo(TEST_SERVICE_NAME,
hcname, 'DefaultParams'))
# Test ActionInfo when the action has positional and default parameters.
expect = manager.ACTION_INFO('MixedParams', 'MixedParams Action.',
['x', 'y'], {'z': 1})
self.assertEquals(expect, cfm.ActionInfo(TEST_SERVICE_NAME,
hcname, 'MixedParams'))
| 35.543956 | 80 | 0.681465 |
5f7f8f0c7f8a4120d0671975ae42bd9ad01bdb75 | 439 | py | Python | djangocms_baseplugins/text/translation.py | benzkji/djangocms-baseplugins | 7f041a030ed93dcdec70e4ca777b841846b8f2f2 | [
"MIT"
] | 2 | 2019-04-14T01:31:22.000Z | 2020-03-05T13:06:57.000Z | djangocms_baseplugins/text/translation.py | benzkji/djangocms-baseplugins | 7f041a030ed93dcdec70e4ca777b841846b8f2f2 | [
"MIT"
] | 32 | 2017-04-04T09:28:06.000Z | 2021-08-18T16:23:02.000Z | djangocms_baseplugins/text/translation.py | bnzk/djangocms-baseplugins | 7f041a030ed93dcdec70e4ca777b841846b8f2f2 | [
"MIT"
] | null | null | null | from django.conf import settings
from modeltranslation.translator import TranslationOptions, translator
from djangocms_baseplugins.baseplugin import defaults
from . import conf
from .models import Text
class TextTranslationOptions(TranslationOptions):
fields = defaults.TRANSLATED_FIELDS + conf.TRANSLATED_FIELDS
if getattr(settings, 'DJANGOCMS_BASEPLUGINS_TRANSLATE', None):
translator.register(Text, TextTranslationOptions)
| 29.266667 | 70 | 0.838269 |
6951603c37853d581ab4afb52d1a4d3d95ad2e48 | 12,462 | py | Python | deep3dmap/datasets/scannet.py | achao2013/DeepRecon | 1c9b0480710212e1fe86ab75dcf0b30bd9f654e7 | [
"Apache-2.0"
] | 30 | 2022-02-05T18:35:27.000Z | 2022-02-09T09:14:41.000Z | deep3dmap/datasets/scannet.py | achao2013/DeepRecon | 1c9b0480710212e1fe86ab75dcf0b30bd9f654e7 | [
"Apache-2.0"
] | null | null | null | deep3dmap/datasets/scannet.py | achao2013/DeepRecon | 1c9b0480710212e1fe86ab75dcf0b30bd9f654e7 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) achao2013. All rights reserved.
import torch
import numpy as np
import os
import cv2
import pickle
from PIL import Image
from torch.utils.data import Dataset
from .builder import DATASETS
from .pipelines import Compose
import sys
sys.path.append('.')
import argparse
import json
import os
import numpy as np
from deep3dmap.core.renderer.rerender_pr import PyRenderer
import trimesh
from deep3dmap.core.evaluation.depth_eval import eval_depth
from deep3dmap.core.evaluation.mesh_eval import eval_fscore
from deep3dmap.core.evaluation.metrics_utils import parse_metrics_neucon
import open3d as o3d
import ray
from ray.exceptions import GetTimeoutError
torch.multiprocessing.set_sharing_strategy('file_system')
@DATASETS.register_module()
class ScanNetDataset(Dataset):
CLASSES=None
def __init__(self, datapath, mode, pipeline, nviews, n_scales, epoch=0, test_mode=False):
super(ScanNetDataset, self).__init__()
self.datapath = datapath
self.mode = mode
self.test_mode = test_mode
self.n_views = nviews
self.pipeline = pipeline
self.tsdf_file = 'all_tsdf_{}'.format(self.n_views)
assert self.mode in ["train", "val", "test", "train_debug", "val_debug"]
self.metas = self.build_list()
if mode == 'test':
self.source_path = 'scans_test'
else:
self.source_path = 'scans'
self.n_scales = n_scales
self.epoch = epoch
self.tsdf_cashe = {}
self.max_cashe = 100
# processing pipeline
self.pipeline = Compose(pipeline)
self.flag = np.zeros(len(self), dtype=np.uint8)
def set_epoch(self, epoch):
self.epoch=epoch
self.tsdf_cashe = {}
def build_list(self):
with open(os.path.join(self.datapath, self.tsdf_file, 'fragments_{}.pkl'.format(self.mode)), 'rb') as f:
metas = pickle.load(f)
return metas
def __len__(self):
return len(self.metas)
def read_cam_file(self, filepath, vid):
intrinsics = np.loadtxt(os.path.join(filepath, 'intrinsic', 'intrinsic_color.txt'), delimiter=' ')[:3, :3]
intrinsics = intrinsics.astype(np.float32)
extrinsics = np.loadtxt(os.path.join(filepath, 'pose', '{}.txt'.format(str(vid))))
return intrinsics, extrinsics
def read_img(self, filepath):
img = Image.open(filepath)
return img
def read_depth(self, filepath):
# Read depth image and camera pose
depth_im = cv2.imread(filepath, -1).astype(
np.float32)
depth_im /= 1000. # depth is saved in 16-bit PNG in millimeters
depth_im[depth_im > 3.0] = 0
return depth_im
def read_scene_volumes(self, data_path, scene):
if scene not in self.tsdf_cashe.keys():
if len(self.tsdf_cashe) > self.max_cashe:
self.tsdf_cashe = {}
full_tsdf_list = []
for l in range(self.n_scales + 1):
# load full tsdf volume
full_tsdf = np.load(os.path.join(data_path, scene, 'full_tsdf_layer{}.npz'.format(l)),
allow_pickle=True)
full_tsdf_list.append(full_tsdf.f.arr_0)
self.tsdf_cashe[scene] = full_tsdf_list
return self.tsdf_cashe[scene]
def __getitem__(self, idx):
meta = self.metas[idx]
imgs = []
depth = []
extrinsics_list = []
intrinsics_list = []
tsdf_list = self.read_scene_volumes(os.path.join(self.datapath, self.tsdf_file), meta['scene'])
for i, vid in enumerate(meta['image_ids']):
# load images
imgs.append(
self.read_img(
os.path.join(self.datapath, self.source_path, meta['scene'], 'color', '{}.jpg'.format(vid))))
depth.append(
self.read_depth(
os.path.join(self.datapath, self.source_path, meta['scene'], 'depth', '{}.png'.format(vid)))
)
# load intrinsics and extrinsics
intrinsics, extrinsics = self.read_cam_file(os.path.join(self.datapath, self.source_path, meta['scene']),
vid)
intrinsics_list.append(intrinsics)
extrinsics_list.append(extrinsics)
intrinsics = np.stack(intrinsics_list)
extrinsics = np.stack(extrinsics_list)
items = {
'imgs': imgs,
'depth': depth,
'intrinsics': intrinsics,
'extrinsics': extrinsics,
'tsdf_list_full': tsdf_list,
'vol_origin': meta['vol_origin'],
'scene': meta['scene'],
'fragment': meta['scene'] + '_' + str(meta['fragment_id']),
'epoch': [self.epoch],
}
if self.pipeline is not None:
items = self.pipeline(items)
return items
def evaluate(self, outputs, metric, data_path, save_path, gt_path, max_depth,
num_workers, loader_num_workers, n_proc, n_gpu, **kwargs):
def process(scene, total_scenes_index, total_scenes_count,
data_path, save_path, gt_path, max_depth, loader_num_workers):
width, height = 640, 480
test_framid = os.listdir(os.path.join(data_path, scene, 'color'))
n_imgs = len(test_framid)
intrinsic_dir = os.path.join(data_path, scene, 'intrinsic', 'intrinsic_depth.txt')
cam_intr = np.loadtxt(intrinsic_dir, delimiter=' ')[:3, :3]
dataset = ScanNetSceneDataset(n_imgs, scene, data_path, max_depth)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=None, collate_fn=collate_fn,
batch_sampler=None, num_workers=loader_num_workers)
voxel_size = 4
# re-fuse to remove hole filling since filled holes are penalized in
# mesh metrics
# tsdf_fusion = TSDFFusion(vol_dim, float(voxel_size)/100, origin, color=False)
#volume = o3d.pipelines.integration.ScalableTSDFVolume(
volume = o3d.integration.ScalableTSDFVolume(
voxel_length=float(voxel_size) / 100,
sdf_trunc=3 * float(voxel_size) / 100,
color_type=o3d.integration.TSDFVolumeColorType.RGB8)
# color_type=o3d.pipelines.integration.TSDFVolumeColorType.RGB8)
mesh_file = os.path.join(save_path, '%s.ply' % scene.replace('/', '-'))
try:
mesh = trimesh.load(mesh_file, process=False)
except:
return scene, None
# mesh renderer
renderer = PyRenderer()
mesh_opengl = renderer.mesh_opengl(mesh)
for i, (cam_pose, depth_trgt, _) in enumerate(dataloader):
print(total_scenes_index, total_scenes_count, scene, i, len(dataloader))
if cam_pose[0][0] == np.inf or cam_pose[0][0] == -np.inf or cam_pose[0][0] == np.nan:
continue
_, depth_pred = renderer(height, width, cam_intr, cam_pose, mesh_opengl)
temp = eval_depth(depth_pred, depth_trgt)
if i == 0:
metrics_depth = temp
else:
metrics_depth = {key: value + temp[key]
for key, value in metrics_depth.items()}
# placeholder
color_im = np.repeat(depth_pred[:, :, np.newaxis] * 255, 3, axis=2).astype(np.uint8)
depth_pred = o3d.geometry.Image(depth_pred)
color_im = o3d.geometry.Image(color_im)
rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(color_im, depth_pred, depth_scale=1.0,
depth_trunc=5.0,
convert_rgb_to_intensity=False)
volume.integrate(
rgbd,
o3d.camera.PinholeCameraIntrinsic(width=width, height=height, fx=cam_intr[0, 0], fy=cam_intr[1, 1],
cx=cam_intr[0, 2],
cy=cam_intr[1, 2]), np.linalg.inv(cam_pose))
metrics_depth = {key: value / len(dataloader)
for key, value in metrics_depth.items()}
# save trimed mesh
file_mesh_trim = os.path.join(save_path, '%s_trim_single.ply' % scene.replace('/', '-'))
o3d.io.write_triangle_mesh(file_mesh_trim, volume.extract_triangle_mesh())
# eval trimed mesh
file_mesh_trgt = os.path.join(gt_path, scene, scene + '_vh_clean_2.ply')
metrics_mesh = eval_fscore(file_mesh_trim, file_mesh_trgt)
metrics = {**metrics_depth, **metrics_mesh}
rslt_file = os.path.join(save_path, '%s_metrics.json' % scene.replace('/', '-'))
json.dump(metrics, open(rslt_file, 'w'))
return scene, metrics
@ray.remote(num_cpus=num_workers + 1, num_gpus=(1 / n_proc))
def process_with_single_worker(info_files, data_path, save_path, gt_path, max_depth, loader_num_workers):
metrics = {}
for i, info_file in enumerate(info_files):
scene, temp = process(info_file, i, len(info_files),
data_path, save_path, gt_path, max_depth, loader_num_workers)
if temp is not None:
metrics[scene] = temp
return metrics
def split_list(_list, n):
assert len(_list) >= n
ret = [[] for _ in range(n)]
for idx, item in enumerate(_list):
ret[idx % n].append(item)
return ret
all_proc = n_proc * n_gpu
ray.init(num_cpus=all_proc * (num_workers + 1), num_gpus=n_gpu)
info_files = sorted(os.listdir(data_path))
info_files = split_list(info_files, all_proc)
ray_worker_ids = []
for w_idx in range(all_proc):
ray_worker_ids.append(process_with_single_worker.remote(info_files[w_idx],
data_path, save_path, gt_path, max_depth, loader_num_workers))
try:
results = ray.get(ray_worker_ids, timeout=14400)
except GetTimeoutError:
print("`get` timed out.")
metrics = {}
for r in results:
metrics.update(r)
rslt_file = os.path.join(save_path, 'metrics.json')
json.dump(metrics, open(rslt_file, 'w'))
# display results
parse_metrics_neucon(rslt_file)
print('parse_metrics_neucon end')
return metrics
def collate_fn(list_data):
cam_pose, depth_im, _ = list_data
# Concatenate all lists
return cam_pose, depth_im, _
class ScanNetSceneDataset(Dataset):
"""Pytorch Dataset for a single scene. getitem loads individual frames"""
def __init__(self, n_imgs, scene, data_path, max_depth, id_list=None):
"""
Args:
"""
self.n_imgs = n_imgs
self.scene = scene
self.data_path = data_path
self.max_depth = max_depth
if id_list is None:
self.id_list = [i for i in range(n_imgs)]
else:
self.id_list = id_list
def __len__(self):
return self.n_imgs
def __getitem__(self, id):
"""
Returns:
dict of meta data and images for a single frame
"""
id = self.id_list[id]
cam_pose = np.loadtxt(os.path.join(self.data_path, self.scene, "pose", str(id) + ".txt"), delimiter=' ')
# Read depth image and camera pose
depth_im = cv2.imread(os.path.join(self.data_path, self.scene, "depth", str(id) + ".png"), -1).astype(
np.float32)
depth_im /= 1000. # depth is saved in 16-bit PNG in millimeters
depth_im[depth_im > self.max_depth] = 0
# Read RGB image
color_image = cv2.cvtColor(cv2.imread(os.path.join(self.data_path, self.scene, "color", str(id) + ".jpg")),
cv2.COLOR_BGR2RGB)
color_image = cv2.resize(color_image, (depth_im.shape[1], depth_im.shape[0]), interpolation=cv2.INTER_AREA)
return cam_pose, depth_im, color_image
| 36.979228 | 119 | 0.582491 |
f5a7fd627f706da58a67bd89caf9990b80a840f3 | 4,805 | py | Python | src/print-alt-fastas.py | cory-weller/pseudodiploidy | 33b22e906ccae424ecf065e98032c2843290b57c | [
"MIT"
] | null | null | null | src/print-alt-fastas.py | cory-weller/pseudodiploidy | 33b22e906ccae424ecf065e98032c2843290b57c | [
"MIT"
] | null | null | null | src/print-alt-fastas.py | cory-weller/pseudodiploidy | 33b22e906ccae424ecf065e98032c2843290b57c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
import subprocess
import gzip
chromosome = sys.argv[1]
start = sys.argv[2]
end = sys.argv[3]
chromosome = sys.argv[4]
vcfFileName = 'data/external/chromosome12.vcf.gz'
chromosomeName = 'chromosome12'
def buildGenotypeDictionary(vcfFileName):
# Create dictionary for this chromosome
genotypeDict = {}
with gzip.open(vcfFileName, 'r') as infile:
for line in infile:
try: line = line.decode()
except(UnicodeDecodeError, AttributeError): pass
chromosome, pos, _, ref, alt = line.strip().split()[0:5]
strainGenotypes = line.strip().split()[9:]
for strain, genotype in zip(strains, strainGenotypes):
asldfkj
if chromosome not in genotypeDict:
genotypeDict[chromosome] = {}
genotypes = [ref] + alt.split(',') # index0 = ref, index1 = alt1, index2 = alt2, etc
genotypeDict[chromosome][int(pos)] = genotypes
return(genotypeDict)
genotypeDict = buildGenotypeDictionary(vcfFileName)
positions = list(genotypeDict[chromosomeName].keys())
positions.sort(reverse = True)
# fasta = ''
# import VCF as list
# sort inverse by position (descending, high to low)
# for item in list,
# read in whole thing
# reverse order
#
vcf[position][individual]
def import_fasta(filename):
with gzip.open(filename, 'r') as infile:
text = infile.read().splitlines()
text = list(''.join(text[1:]))
return(text)
def personalize_fasta(site_genotypes, personal_genotypes_file, chromosome, ref_sequence):
haplotype1 = ref_sequence[:]
haplotype2 = ref_sequence[:]
with open(personal_genotypes_file, 'r') as genotype_file:
for line in genotype_file:
chrom, pos, hap1, hap2 = line.split()
if chrom == chromosome:
pos = int(pos)
if hap1 == "1/1":
haplotype1[pos-1] = site_genotypes[pos][1]
elif hap1 == "./.":
haplotype1[pos-1] = "N"
if hap2 == "1/1":
haplotype2[pos-1] = site_genotypes[pos][1]
elif hap2 == "./.":
haplotype2[pos-1] = "N"
return(haplotype1, haplotype2)
def write_fasta(ind_n, chromosome, haplotype1, haplotype2, wrap_length):
with open(str(ind_n) + "." + chromosome + ".fasta", 'w') as outfile:
outfile.write(">" + str(ind_n) + "_" + chromosome + "_haplotype1" "\n")
outfile.write('\n'.join([''.join(haplotype1[i:i+wrap_length]) for i in range(0,len(haplotype1),wrap_length)]))
outfile.write('\n')
outfile.write(">" + str(ind_n) + "_" + chromosome + "_haplotype2" "\n")
outfile.write('\n'.join([''.join(haplotype2[i:i+wrap_length]) for i in range(0,len(haplotype2),wrap_length)]))
def get_diploidGenomeSize(fastaFilename):
cmd = """grep "^[^>]" %s | wc -c""" % (fastaFilename)
output = subprocess.check_output(cmd, shell=True).rstrip()
return(int(output))
def run_wgsim(fastaFilename, readLength, coverage):
diploidGenomeSize = get_diploidGenomeSize(fastaFilename)
nReads = round( (float(diploidGenomeSize) * coverage) / (4*readLength) )
ind_n, chromosome = fastaFilename.split(".")[0:2]
cmd = """../../bin/wgsim-master/wgsim \
-1 %s \
-2 %s \
-N %s \
-e 0.001 \
-r 0 \
-R 0 \
%s.%s.fasta \
%s.%s.F.fq \
%s.%s.R.fq && \
rm %s.%s.fasta && \
bzip2 %s.%s.F.fq && \
bzip2 %s.%s.R.fq""" % (readLength, readLength, nReads, ind_n, chromosome, ind_n, chromosome, ind_n, chromosome, ind_n, chromosome, ind_n, chromosome, ind_n, chromosome)
subprocess.call(cmd, shell=True)
args = sys.argv[1:]
project_directory = os.path.dirname(os.getcwd())
population = args[0]
chromosome = args[1]
first_ind = int(args[2])
last_ind = int(args[3])
site_genotypes = build_ref_alt_dict(directory = project_directory + "/input_data/", chromosome=chromosome)
ref_sequence = import_fasta(filename = project_directory + "/input_data/" + chromosome + ".fa")
os.chdir(population)
# Iterate through individuals
for ind_n in range(first_ind, last_ind+1, 1):
fastaFilename = str(ind_n) + "." + chromosome + ".fasta"
if os.path.exists(str(ind_n) + "." + chromosome + ".R.fq.bz2") == False:
haplotype1, haplotype2 = personalize_fasta( site_genotypes=site_genotypes,
personal_genotypes_file=str(ind_n) + ".geno",
chromosome=chromosome,
ref_sequence=ref_sequence)
write_fasta(ind_n, chromosome, haplotype1, haplotype2, wrap_length=80)
run_wgsim(fastaFilename, readLength=100.0, coverage=0.05) | 38.44 | 172 | 0.612903 |
9fd4e408ed45ebc5efce1e70ad9ba75b4d1acdeb | 7,887 | py | Python | backend/test2_33914/settings.py | crowdbotics-apps/test2-33914 | 57ae765648af915113375de9cdde558d6499a9ad | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/test2_33914/settings.py | crowdbotics-apps/test2-33914 | 57ae765648af915113375de9cdde558d6499a9ad | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/test2_33914/settings.py | crowdbotics-apps/test2-33914 | 57ae765648af915113375de9cdde558d6499a9ad | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | """
Django settings for test2_33914 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import io
import environ
import logging
import google.auth
from google.cloud import secretmanager
from google.auth.exceptions import DefaultCredentialsError
from google.api_core.exceptions import PermissionDenied
from modules.manifest import get_modules
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
env_file = os.path.join(BASE_DIR, ".env")
env = environ.Env()
env.read_env(env_file)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
try:
# Pull secrets from Secret Manager
_, project = google.auth.default()
client = secretmanager.SecretManagerServiceClient()
settings_name = os.environ.get("SETTINGS_NAME", "django_settings")
name = client.secret_version_path(project, settings_name, "latest")
payload = client.access_secret_version(name=name).payload.data.decode("UTF-8")
env.read_env(io.StringIO(payload))
except (DefaultCredentialsError, PermissionDenied):
pass
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'test2_33914.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test2_33914.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# GCP config
GS_BUCKET_NAME = env.str("GS_BUCKET_NAME", "")
if GS_BUCKET_NAME:
DEFAULT_FILE_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
STATICFILES_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
GS_DEFAULT_ACL = "publicRead"
| 30.334615 | 112 | 0.736402 |
e4edd42130ed00c59ecaf05e3cef605b4d6ee123 | 613 | py | Python | Algo and DSA/LeetCode-Solutions-master/Python/base-7.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 3,269 | 2018-10-12T01:29:40.000Z | 2022-03-31T17:58:41.000Z | Algo and DSA/LeetCode-Solutions-master/Python/base-7.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 53 | 2018-12-16T22:54:20.000Z | 2022-02-25T08:31:20.000Z | Algo and DSA/LeetCode-Solutions-master/Python/base-7.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 1,236 | 2018-10-12T02:51:40.000Z | 2022-03-30T13:30:37.000Z | # Time: O(1)
# Space: O(1)
class Solution(object):
def convertToBase7(self, num):
if num < 0:
return '-' + self.convertToBase7(-num)
result = ''
while num:
result = str(num % 7) + result
num //= 7
return result if result else '0'
class Solution2(object):
def convertToBase7(self, num):
"""
:type num: int
:rtype: str
"""
if num < 0:
return '-' + self.convertToBase7(-num)
if num < 7:
return str(num)
return self.convertToBase7(num // 7) + str(num % 7)
| 22.703704 | 59 | 0.487765 |
1764586f4e6a7163a54b216a8e80be81ab6ec855 | 12,368 | py | Python | grr/client/client_actions/osx/osx.py | panhania/grr | fe16a7311a528e31fe0e315a880e98273b8df960 | [
"Apache-2.0"
] | null | null | null | grr/client/client_actions/osx/osx.py | panhania/grr | fe16a7311a528e31fe0e315a880e98273b8df960 | [
"Apache-2.0"
] | null | null | null | grr/client/client_actions/osx/osx.py | panhania/grr | fe16a7311a528e31fe0e315a880e98273b8df960 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""OSX specific actions.
Most of these actions share an interface (in/out rdfvalues) with linux actions
of the same name. OSX-only actions are registered with the server via
libs/server_stubs.py
"""
import ctypes
import logging
import os
import re
import shutil
import sys
import pytsk3
from grr import config
from grr.client import actions
from grr.client import client_utils_common
from grr.client import client_utils_osx
from grr.client.client_actions import standard
from grr.client.osx.objc import ServiceManagement
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import protodict as rdf_protodict
from grr.parsers import osx_launchd
class Error(Exception):
"""Base error class."""
class UnsupportedOSVersionError(Error):
"""This action not supported on this os version."""
# struct sockaddr_dl {
# u_char sdl_len; /* Total length of sockaddr */
# u_char sdl_family; /* AF_LINK */
# u_short sdl_index; /* if != 0, system given index for interface */
# u_char sdl_type; /* interface type */
# u_char sdl_nlen; /* interface name length, no trailing 0 reqd. */
# u_char sdl_alen; /* link level address length */
# u_char sdl_slen; /* link layer selector length */
# char sdl_data[12]; /* minimum work area, can be larger;
# contains both if name and ll address */
# };
# Interfaces can have names up to 15 chars long and sdl_data contains name + mac
# but no separators - we need to make sdl_data at least 15+6 bytes.
class Sockaddrdl(ctypes.Structure):
"""The sockaddr_dl struct."""
_fields_ = [
("sdl_len", ctypes.c_ubyte),
("sdl_family", ctypes.c_ubyte),
("sdl_index", ctypes.c_ushort),
("sdl_type", ctypes.c_ubyte),
("sdl_nlen", ctypes.c_ubyte),
("sdl_alen", ctypes.c_ubyte),
("sdl_slen", ctypes.c_ubyte),
("sdl_data", ctypes.c_ubyte * 24),
]
# struct sockaddr_in {
# __uint8_t sin_len;
# sa_family_t sin_family;
# in_port_t sin_port;
# struct in_addr sin_addr;
# char sin_zero[8];
# };
class Sockaddrin(ctypes.Structure):
"""The sockaddr_in struct."""
_fields_ = [
("sin_len", ctypes.c_ubyte),
("sin_family", ctypes.c_ubyte),
("sin_port", ctypes.c_ushort),
("sin_addr", ctypes.c_ubyte * 4),
("sin_zero", ctypes.c_char * 8)
] # pyformat: disable
# struct sockaddr_in6 {
# __uint8_t sin6_len; /* length of this struct */
# sa_family_t sin6_family; /* AF_INET6 (sa_family_t) */
# in_port_t sin6_port; /* Transport layer port */
# __uint32_t sin6_flowinfo; /* IP6 flow information */
# struct in6_addr sin6_addr; /* IP6 address */
# __uint32_t sin6_scope_id; /* scope zone index */
# };
class Sockaddrin6(ctypes.Structure):
"""The sockaddr_in6 struct."""
_fields_ = [
("sin6_len", ctypes.c_ubyte),
("sin6_family", ctypes.c_ubyte),
("sin6_port", ctypes.c_ushort),
("sin6_flowinfo", ctypes.c_ubyte * 4),
("sin6_addr", ctypes.c_ubyte * 16),
("sin6_scope_id", ctypes.c_ubyte * 4)
] # pyformat: disable
# struct ifaddrs *ifa_next; /* Pointer to next struct */
# char *ifa_name; /* Interface name */
# u_int ifa_flags; /* Interface flags */
# struct sockaddr *ifa_addr; /* Interface address */
# struct sockaddr *ifa_netmask; /* Interface netmask */
# struct sockaddr *ifa_broadaddr; /* Interface broadcast address */
# struct sockaddr *ifa_dstaddr; /* P2P interface destination */
# void *ifa_data; /* Address specific data */
class Ifaddrs(ctypes.Structure):
pass
setattr(Ifaddrs, "_fields_", [
("ifa_next", ctypes.POINTER(Ifaddrs)),
("ifa_name", ctypes.POINTER(ctypes.c_char)),
("ifa_flags", ctypes.c_uint),
("ifa_addr", ctypes.POINTER(ctypes.c_char)),
("ifa_netmask", ctypes.POINTER(ctypes.c_char)),
("ifa_broadaddr", ctypes.POINTER(ctypes.c_char)),
("ifa_destaddr", ctypes.POINTER(ctypes.c_char)),
("ifa_data", ctypes.POINTER(ctypes.c_char))
]) # pyformat: disable
class EnumerateInterfaces(actions.ActionPlugin):
"""Enumerate all MAC addresses of all NICs."""
out_rdfvalues = [rdf_client.Interface]
def Run(self, unused_args):
"""Enumerate all MAC addresses."""
libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c"))
ifa = Ifaddrs()
p_ifa = ctypes.pointer(ifa)
libc.getifaddrs(ctypes.pointer(p_ifa))
addresses = {}
macs = {}
ifs = set()
m = p_ifa
while m:
ifname = ctypes.string_at(m.contents.ifa_name)
ifs.add(ifname)
try:
iffamily = ord(m.contents.ifa_addr[1])
if iffamily == 0x2: # AF_INET
data = ctypes.cast(m.contents.ifa_addr, ctypes.POINTER(Sockaddrin))
ip4 = "".join(map(chr, data.contents.sin_addr))
address_type = rdf_client.NetworkAddress.Family.INET
address = rdf_client.NetworkAddress(
address_type=address_type, packed_bytes=ip4)
addresses.setdefault(ifname, []).append(address)
if iffamily == 0x12: # AF_LINK
data = ctypes.cast(m.contents.ifa_addr, ctypes.POINTER(Sockaddrdl))
iflen = data.contents.sdl_nlen
addlen = data.contents.sdl_alen
macs[ifname] = "".join(
map(chr, data.contents.sdl_data[iflen:iflen + addlen]))
if iffamily == 0x1E: # AF_INET6
data = ctypes.cast(m.contents.ifa_addr, ctypes.POINTER(Sockaddrin6))
ip6 = "".join(map(chr, data.contents.sin6_addr))
address_type = rdf_client.NetworkAddress.Family.INET6
address = rdf_client.NetworkAddress(
address_type=address_type, packed_bytes=ip6)
addresses.setdefault(ifname, []).append(address)
except ValueError:
# Some interfaces don't have a iffamily and will raise a null pointer
# exception. We still want to send back the name.
pass
m = m.contents.ifa_next
libc.freeifaddrs(p_ifa)
for interface in ifs:
mac = macs.setdefault(interface, "")
address_list = addresses.setdefault(interface, "")
args = {"ifname": interface}
if mac:
args["mac_address"] = mac
if address_list:
args["addresses"] = address_list
self.SendReply(rdf_client.Interface(**args))
class GetInstallDate(actions.ActionPlugin):
"""Estimate the install date of this system."""
out_rdfvalues = [rdf_protodict.DataBlob]
def Run(self, unused_args):
for f in ["/var/log/CDIS.custom", "/var", "/private"]:
try:
stat = os.stat(f)
self.SendReply(rdf_protodict.DataBlob(integer=int(stat.st_ctime)))
return
except OSError:
pass
self.SendReply(rdf_protodict.DataBlob(integer=0))
class EnumerateFilesystems(actions.ActionPlugin):
"""Enumerate all unique filesystems local to the system."""
out_rdfvalues = [rdf_client.Filesystem]
def Run(self, unused_args):
"""List all local filesystems mounted on this system."""
for fs_struct in client_utils_osx.GetFileSystems():
self.SendReply(
rdf_client.Filesystem(
device=fs_struct.f_mntfromname,
mount_point=fs_struct.f_mntonname,
type=fs_struct.f_fstypename))
drive_re = re.compile("r?disk[0-9].*")
for drive in os.listdir("/dev"):
if not drive_re.match(drive):
continue
path = os.path.join("/dev", drive)
try:
img_inf = pytsk3.Img_Info(path)
# This is a volume or a partition - we send back a TSK device.
self.SendReply(rdf_client.Filesystem(device=path))
vol_inf = pytsk3.Volume_Info(img_inf)
for volume in vol_inf:
if volume.flags == pytsk3.TSK_VS_PART_FLAG_ALLOC:
offset = volume.start * vol_inf.info.block_size
self.SendReply(
rdf_client.Filesystem(
device=path + ":" + str(offset), type="partition"))
except (IOError, RuntimeError):
continue
class OSXEnumerateRunningServices(actions.ActionPlugin):
"""Enumerate all running launchd jobs."""
in_rdfvalue = None
out_rdfvalues = [rdf_client.OSXServiceInformation]
def GetRunningLaunchDaemons(self):
"""Get running launchd jobs from objc ServiceManagement framework."""
sm = ServiceManagement()
return sm.SMGetJobDictionaries("kSMDomainSystemLaunchd")
def Run(self, unused_arg):
"""Get running launchd jobs.
Raises:
UnsupportedOSVersionError: for OS X earlier than 10.6
"""
osxversion = client_utils_osx.OSXVersion()
version_array = osxversion.VersionAsMajorMinor()
if version_array[:2] < [10, 6]:
raise UnsupportedOSVersionError(
"ServiceManagment API unsupported on < 10.6. This client is %s" %
osxversion.VersionString())
launchd_list = self.GetRunningLaunchDaemons()
self.parser = osx_launchd.OSXLaunchdJobDict(launchd_list)
for job in self.parser.Parse():
response = self.CreateServiceProto(job)
self.SendReply(response)
def CreateServiceProto(self, job):
"""Create the Service protobuf.
Args:
job: Launchdjobdict from servicemanagement framework.
Returns:
sysinfo_pb2.OSXServiceInformation proto
"""
service = rdf_client.OSXServiceInformation(
label=job.get("Label"),
program=job.get("Program"),
sessiontype=job.get("LimitLoadToSessionType"),
lastexitstatus=int(job["LastExitStatus"]),
timeout=int(job["TimeOut"]),
ondemand=bool(job["OnDemand"]))
for arg in job.get("ProgramArguments", "", stringify=False):
# Returns CFArray of CFStrings
service.args.Append(unicode(arg))
mach_dict = job.get("MachServices", {}, stringify=False)
for key, value in mach_dict.iteritems():
service.machservice.Append("%s:%s" % (key, value))
job_mach_dict = job.get("PerJobMachServices", {}, stringify=False)
for key, value in job_mach_dict.iteritems():
service.perjobmachservice.Append("%s:%s" % (key, value))
if "PID" in job:
service.pid = job["PID"].value
return service
class Uninstall(actions.ActionPlugin):
"""Remove the service that starts us at startup."""
out_rdfvalues = [rdf_protodict.DataBlob]
def Run(self, unused_arg):
"""This kills us with no cleanups."""
logging.debug("Disabling service")
msg = "Service disabled."
if hasattr(sys, "frozen"):
grr_binary = os.path.abspath(sys.executable)
elif __file__:
grr_binary = os.path.abspath(__file__)
try:
os.remove(grr_binary)
except OSError:
msg = "Could not remove binary."
try:
os.remove(config.CONFIG["Client.plist_path"])
except OSError:
if "Could not" in msg:
msg += " Could not remove plist file."
else:
msg = "Could not remove plist file."
# Get the directory we are running in from pyinstaller. This is either the
# GRR directory which we should delete (onedir mode) or a generated temp
# directory which we can delete without problems in onefile mode.
directory = getattr(sys, "_MEIPASS", None)
if directory:
shutil.rmtree(directory, ignore_errors=True)
self.SendReply(rdf_protodict.DataBlob(string=msg))
class UpdateAgent(standard.ExecuteBinaryCommand):
"""Updates the GRR agent to a new version."""
def ProcessFile(self, path, args):
cmd = "/usr/sbin/installer"
cmd_args = ["-pkg", path, "-target", "/"]
time_limit = args.time_limit
res = client_utils_common.Execute(
cmd, cmd_args, time_limit=time_limit, bypass_whitelist=True)
(stdout, stderr, status, time_used) = res
# Limit output to 10MB so our response doesn't get too big.
stdout = stdout[:10 * 1024 * 1024]
stderr = stderr[:10 * 1024 * 1024]
self.SendReply(
rdf_client.ExecuteBinaryResponse(
stdout=stdout,
stderr=stderr,
exit_status=status,
# We have to return microseconds.
time_used=int(1e6 * time_used)))
| 32.806366 | 80 | 0.644809 |
33e6ffa679702762014a0851b37eeabfbc5fdfc5 | 6,736 | py | Python | instrumentation/opentelemetry-instrumentation-urllib3/src/opentelemetry/instrumentation/urllib3/__init__.py | ericmustin/opentelemetry-python-contrib | 308369004c71a8d07c18560ac1fb46049a0a8105 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | instrumentation/opentelemetry-instrumentation-urllib3/src/opentelemetry/instrumentation/urllib3/__init__.py | ericmustin/opentelemetry-python-contrib | 308369004c71a8d07c18560ac1fb46049a0a8105 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | instrumentation/opentelemetry-instrumentation-urllib3/src/opentelemetry/instrumentation/urllib3/__init__.py | ericmustin/opentelemetry-python-contrib | 308369004c71a8d07c18560ac1fb46049a0a8105 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This library allows tracing HTTP requests made by the
`urllib3 <https://urllib3.readthedocs.io/>`_ library.
Usage
-----
.. code-block:: python
import urllib3
import urllib3.util
from opentelemetry.instrumentation.urllib3 import URLLib3Instrumentor
def strip_query_params(url: str) -> str:
return url.split("?")[0]
def span_name_callback(method: str, url: str, headers):
return urllib3.util.Url(url).path
URLLib3Instrumentor().instrument(
# Remove all query params from the URL attribute on the span.
url_filter=strip_query_params,
# Use the URL's path as the span name.
span_name_or_callback=span_name_callback
)
http = urllib3.PoolManager()
response = http.request("GET", "https://www.example.org/")
API
---
"""
import contextlib
import typing
import urllib3.connectionpool
import wrapt
from opentelemetry import context
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
from opentelemetry.instrumentation.urllib3.version import __version__
from opentelemetry.instrumentation.utils import (
http_status_to_status_code,
unwrap,
)
from opentelemetry.propagate import inject
from opentelemetry.trace import Span, SpanKind, TracerProvider, get_tracer
from opentelemetry.trace.status import Status
_SUPPRESS_HTTP_INSTRUMENTATION_KEY = "suppress_http_instrumentation"
_UrlFilterT = typing.Optional[typing.Callable[[str], str]]
_SpanNameT = typing.Optional[
typing.Union[typing.Callable[[str, str, typing.Mapping], str], str]
]
_URL_OPEN_ARG_TO_INDEX_MAPPING = {
"method": 0,
"url": 1,
}
class URLLib3Instrumentor(BaseInstrumentor):
def _instrument(self, **kwargs):
"""Instruments the urllib3 module
Args:
**kwargs: Optional arguments
``tracer_provider``: a TracerProvider, defaults to global.
``span_name_or_callback``: Override the default span name.
``url_filter``: A callback to process the requested URL prior
to adding it as a span attribute.
"""
_instrument(
tracer_provider=kwargs.get("tracer_provider"),
span_name_or_callback=kwargs.get("span_name"),
url_filter=kwargs.get("url_filter"),
)
def _uninstrument(self, **kwargs):
_uninstrument()
def _instrument(
tracer_provider: TracerProvider = None,
span_name_or_callback: _SpanNameT = None,
url_filter: _UrlFilterT = None,
):
def instrumented_urlopen(wrapped, instance, args, kwargs):
if _is_instrumentation_suppressed():
return wrapped(*args, **kwargs)
method = _get_url_open_arg("method", args, kwargs).upper()
url = _get_url(instance, args, kwargs, url_filter)
headers = _prepare_headers(kwargs)
span_name = _get_span_name(span_name_or_callback, method, url, headers)
span_attributes = {
"http.method": method,
"http.url": url,
}
with get_tracer(
__name__, __version__, tracer_provider
).start_as_current_span(
span_name, kind=SpanKind.CLIENT, attributes=span_attributes
) as span:
inject(headers)
with _suppress_further_instrumentation():
response = wrapped(*args, **kwargs)
_apply_response(span, response)
return response
wrapt.wrap_function_wrapper(
urllib3.connectionpool.HTTPConnectionPool,
"urlopen",
instrumented_urlopen,
)
def _get_url_open_arg(name: str, args: typing.List, kwargs: typing.Mapping):
arg_idx = _URL_OPEN_ARG_TO_INDEX_MAPPING.get(name)
if arg_idx is not None:
try:
return args[arg_idx]
except IndexError:
pass
return kwargs.get(name)
def _get_url(
instance: urllib3.connectionpool.HTTPConnectionPool,
args: typing.List,
kwargs: typing.Mapping,
url_filter: _UrlFilterT,
) -> str:
url_or_path = _get_url_open_arg("url", args, kwargs)
if not url_or_path.startswith("/"):
url = url_or_path
else:
url = instance.scheme + "://" + instance.host
if _should_append_port(instance.scheme, instance.port):
url += ":" + str(instance.port)
url += url_or_path
if url_filter:
return url_filter(url)
return url
def _should_append_port(scheme: str, port: typing.Optional[int]) -> bool:
if not port:
return False
if scheme == "http" and port == 80:
return False
if scheme == "https" and port == 443:
return False
return True
def _prepare_headers(urlopen_kwargs: typing.Dict) -> typing.Dict:
headers = urlopen_kwargs.get("headers")
# avoid modifying original headers on inject
headers = headers.copy() if headers is not None else {}
urlopen_kwargs["headers"] = headers
return headers
def _get_span_name(
span_name_or_callback, method: str, url: str, headers: typing.Mapping
):
span_name = None
if callable(span_name_or_callback):
span_name = span_name_or_callback(method, url, headers)
elif isinstance(span_name_or_callback, str):
span_name = span_name_or_callback
if not span_name or not isinstance(span_name, str):
span_name = "HTTP {}".format(method.strip())
return span_name
def _apply_response(span: Span, response: urllib3.response.HTTPResponse):
if not span.is_recording():
return
span.set_attribute("http.status_code", response.status)
span.set_status(Status(http_status_to_status_code(response.status)))
def _is_instrumentation_suppressed() -> bool:
return bool(
context.get_value("suppress_instrumentation")
or context.get_value(_SUPPRESS_HTTP_INSTRUMENTATION_KEY)
)
@contextlib.contextmanager
def _suppress_further_instrumentation():
token = context.attach(
context.set_value(_SUPPRESS_HTTP_INSTRUMENTATION_KEY, True)
)
try:
yield
finally:
context.detach(token)
def _uninstrument():
unwrap(urllib3.connectionpool.HTTPConnectionPool, "urlopen")
| 29.414847 | 79 | 0.687203 |
51249d7ab57083da814e587a381ab4f3be9c31bf | 408 | py | Python | Level1/Lessons12982/minari - 12982.py | StudyForCoding/ProgrammersLevel | dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25 | [
"MIT"
] | null | null | null | Level1/Lessons12982/minari - 12982.py | StudyForCoding/ProgrammersLevel | dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25 | [
"MIT"
] | null | null | null | Level1/Lessons12982/minari - 12982.py | StudyForCoding/ProgrammersLevel | dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25 | [
"MIT"
] | 1 | 2021-04-05T07:35:59.000Z | 2021-04-05T07:35:59.000Z | ```python
def solution(d, budget):
answer = 0
d.sort() #1.
#2.
for i in range(len(d)):
if budget - d[i] >= 0:
answer += 1
budget = budget - d[i]
else:
break
return answer
#1. 최대한 많은 부서의 물품지원 -> 지원금액이 작은 부서부터 채워야 많은 부서를 지원 가능
#2. 부서가 지원한 금액 그대로를 주어야함. ( 모자라게 줄 수 없음 ) -> for문 돌려서 d[i] 값을 budget에서 빼기
```
| 18.545455 | 73 | 0.460784 |
1e841763a8d173a24b58fd7370c2443a336e938b | 612 | py | Python | var/spack/repos/builtin/packages/py-doxypypy/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360 | 2017-11-06T08:47:01.000Z | 2022-03-31T14:45:33.000Z | var/spack/repos/builtin/packages/py-doxypypy/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838 | 2017-11-04T07:49:45.000Z | 2022-03-31T23:38:39.000Z | var/spack/repos/builtin/packages/py-doxypypy/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793 | 2017-11-04T07:45:50.000Z | 2022-03-30T14:31:53.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyDoxypypy(PythonPackage):
"""A Doxygen filter for Python.
A more Pythonic version of doxypy, a Doxygen filter for Python.
"""
homepage = "https://github.com/Feneric/doxypypy"
pypi = "doxypypy/doxypypy-0.8.8.6.tar.gz"
version('0.8.8.6', sha256='627571455c537eb91d6998d95b32efc3c53562b2dbadafcb17e49593e0dae01b')
depends_on('py-setuptools', type='build')
| 29.142857 | 97 | 0.73366 |
5d8b2c967e5f5da6f815636b1ab90fcb8cb91b92 | 3,547 | py | Python | src/copuled_hopf_network.py | koriavinash1/Network-Games-for-Coupled-Neuron-Model | 9766eee0002b2e0eba22c871349bc9c98aaa575a | [
"MIT"
] | 1 | 2020-04-30T08:00:40.000Z | 2020-04-30T08:00:40.000Z | src/copuled_hopf_network.py | koriavinash1/Network-Games-for-Coupled-Neuron-Model | 9766eee0002b2e0eba22c871349bc9c98aaa575a | [
"MIT"
] | null | null | null | src/copuled_hopf_network.py | koriavinash1/Network-Games-for-Coupled-Neuron-Model | 9766eee0002b2e0eba22c871349bc9c98aaa575a | [
"MIT"
] | null | null | null | import os, sys
import numpy as np
sys.path.append('../../../Projects/nfm/')
from nfm.helper.GaussianStatistics import *
from nfm.helper.configure import Config
from nfm.SOM import SOM
from keras.datasets import mnist
import matplotlib.pyplot as plt
(x_train, y_train), (x_test, y_test) = mnist.load_data()
index = np.arange(len(x_test))
np.random.shuffle(index)
x_test = x_test[index]
y_test = y_test[index]
_som_ = SOM((10, 10),(28, 28), 25, learning_rate=1e-2, rad = 5, sig = 3)
_som_.load_weights('../../../Projects/nfm/logs/SOM_weights_MNIST_noise_0.0.npy')
complex = lambda x: np.random.uniform(0, 1, size=x) + np.random.uniform(0, 1, size = x)*1j
star = lambda x: np.conjugate(x)
norm = lambda x: x/(np.abs(x) + 1e-3)
minmax = lambda x: (x - np.min(x))/(np.max(x) - np.min(x))
def som(digit):
response = _som_.response(digit, _som_.weights)
# response = minmax(response)
real = response
imag = 1. - response
return real + imag*1j
# DP codes
def get_nbrs(i, j, Z):
nbrs = []
idx = [(1, 0), (0, 1), (-1, 0), (0, -1)]
for ix, iy in idx:
try:
nbrs.append(Z[i+ix, j+iy])
except:
pass
return nbrs
def couple(W, Z):
coupling = np.zeros_like(Z)
for i in range(Z.shape[0]):
for j in range(Z.shape[1]):
nbrs = get_nbrs(i,j,Z)
coupling[i, j] = W[i, j]*np.prod(nbrs)
return coupling
def nbr(Z):
nbr_matrix = np.zeros_like(Z)
for i in range(Z.shape[0]):
for j in range(Z.shape[1]):
nbrs = get_nbrs(i,j,Z)
nbr_matrix[i, j] = np.prod(nbrs)
return nbr_matrix
omega = np.pi/20.
mu = 5.0
T = 50
deltaT = 0.1
ita = 1e-2
digits = np.arange(10)
phase_information = []
magnitude_information = []
for digit in digits:
Zs = []; Ws = []
# initalizations
Z = som(x_test[y_test == digit][10])
W = complex(Z.shape)
# plt.clf()
# plt.ion()
for _ in range(int(T/deltaT)):
Zdot = (mu - np.abs(Z)**2)*Z + Z*omega*1j + couple(W, Z)
Z = Z + deltaT*Zdot
W = norm(W); Z = norm(Z)
utility = np.zeros(Z.shape)
utility[np.abs(Z) > 0.5] = 1.0
W = W + ita*((utility + 1)*Z*star(nbr(Z)) - W)
Zs.append(Z); Ws.append(W)
# plt.imshow(np.abs(Z))
# plt.pause(0.01)
Zs = np.array(Zs)
mZs = np.mean(Zs, axis=0)
idx = np.where(mZs == np.max(mZs))
phase_information.append(np.unwrap(np.angle((Zs[:, 2, 2]))))
magnitude_information.append(np.mean(Zs, axis=0))
for i in digits:
plt.plot(phase_information[i])
plt.ylabel("Unwrapped phase for different digits")
plt.xlabel('t')
plt.grid()
plt.show()
"""
plt.subplot(1, 2, 1)
plt.plot(np.real(Z1s), np.imag(Z1s), 'b')
plt.plot(np.real(Z1s[0]), np.imag(Z1s[0]), '*g')
#plt.xlim(-10, 10)
#plt.ylim(-10, 10.0)
plt.grid()
plt.xlabel("(1) Neuron $Z_1$ stable state oscillator")
plt.subplot(1, 2, 2)
plt.plot(np.real(Z2s), np.imag(Z2s), 'g')
plt.plot(np.real(Z2s[0]), np.imag(Z2s[0]), '*r')
#plt.xlim(-10, 10)
#plt.ylim(-10, 10.0)
plt.grid()
plt.xlabel("(2) Neuron $Z_2$ stable state oscillator")
plt.show()
#####
plt.subplot(3, 1, 1)
plt.plot(np.real(Z1s))
plt.plot(np.real(Z2s))
plt.xlabel("(1) Real part of $Z_1$ and $Z_2$")
plt.grid()
plt.subplot(3, 1, 2)
plt.plot(np.imag(Z1s))
plt.plot(np.imag(Z2s))
plt.xlabel("(2) Imaginary part of $Z1$ and $Z2$")
plt.grid()
plt.subplot(3, 1, 3)
plt.plot(np.angle(Z1s))
plt.plot(np.angle(Z2s))
plt.xlabel("(3) Phase plot for both $Z_1$ and $Z2$")
plt.grid()
plt.show()
"""
| 23.032468 | 90 | 0.594869 |
440668745ce65f0a6a3c621f9d345207a9fb3927 | 1,722 | py | Python | magellan_models/initializers/initialize_with_yaml.py | 3mcloud/magellan-models | aae47496f240a5211e650a5c0efcbc95a15f7bb0 | [
"BSD-3-Clause"
] | 2 | 2021-08-11T18:15:28.000Z | 2021-08-11T18:33:38.000Z | magellan_models/initializers/initialize_with_yaml.py | 3mcloud/magellan-models | aae47496f240a5211e650a5c0efcbc95a15f7bb0 | [
"BSD-3-Clause"
] | null | null | null | magellan_models/initializers/initialize_with_yaml.py | 3mcloud/magellan-models | aae47496f240a5211e650a5c0efcbc95a15f7bb0 | [
"BSD-3-Clause"
] | null | null | null | """
Yaml based initialization module
"""
import yaml
import requests
from magellan_models.config import MagellanConfig
from magellan_models.model_generator.generate_from_spec import generate_from_spec
from magellan_models.exceptions import MagellanParserException
def initialize_with_yaml_file(path: str, model_config: MagellanConfig = None):
"""Initializes Magellan with a yaml file path
Args:
path (str): The path to a yaml file
model_config (MagellanConfig, optional): A Magellan Config instance. Defaults to None.
Returns:
[type]: [description]
"""
if not model_config:
model_config = MagellanConfig()
with open(path) as yaml_file:
specification = yaml.load(yaml_file, Loader=yaml.FullLoader)
return generate_from_spec(specification, configuration=model_config)
def initialize_with_yaml_url(api_spec_url: str, model_config: MagellanConfig = None):
"""Initializes Magellan with a Yaml URL
Args:
api_spec_url (str): the URL path to a YAML file
model_config (MagellanConfig, optional): The Magellan Config object. Defaults to None.
Raises:
MagellanParserException: Raises if the request fails
Returns:
tuple(dict, dict): The Magellan objects and functions generated
"""
if not model_config:
model_config = MagellanConfig()
spec_resp = requests.get(api_spec_url)
if spec_resp.status_code != 200:
raise MagellanParserException(
f"Error retrieving the json schema .yaml. Error code: {spec_resp.status_code}"
)
return generate_from_spec(
yaml.load(spec_resp.content, Loader=yaml.FullLoader), configuration=model_config
)
| 31.888889 | 94 | 0.720674 |
a62de8fcbe7f418cbb7dc8be73fac50a7b7dae27 | 1,315 | py | Python | data-mining/votes_dataset.py | paulopieczarka/cuddly-umbrella | 29e1c101db0bd733b92eeb36d0ec334a5a87b9da | [
"MIT"
] | null | null | null | data-mining/votes_dataset.py | paulopieczarka/cuddly-umbrella | 29e1c101db0bd733b92eeb36d0ec334a5a87b9da | [
"MIT"
] | null | null | null | data-mining/votes_dataset.py | paulopieczarka/cuddly-umbrella | 29e1c101db0bd733b92eeb36d0ec334a5a87b9da | [
"MIT"
] | null | null | null | import pandas as pd
import statistics as stcs
def HouseVotes_DataSet(filename='house-votes-84.csv'):
print("-> Loading %s dataset file." % filename)
# read database
dataset = pd.read_csv(filename)
x = dataset.iloc[:, 1:].values
y = dataset.iloc[:, 0].values
n_samples, n_instances = x.shape
print("-> Done. Shape =", n_samples, "x", n_instances)
print("-> Preprocessing dataset...")
# replace class name with ints
# democrat = 0, republican = 1
for i in range(n_samples):
if y[i] == 'democrat':
y[i] = 0
elif y[i] == 'republican':
y[i] = 1
y = y.astype(int)
# replace value with ints
# y = 1, n = 0
for i in range(n_samples):
for j in range(n_instances):
if ('y' in x[i][j]):
x[i][j] = 1
elif ('n' in x[i][j]):
x[i][j] = 0
# find each class median
medians = []
for j in range(n_instances):
acceptable = []
for i in range(n_samples):
if (x[i][j] == 1 or x[i][j] == 0):
acceptable.append(x[i][j])
med = stcs.median(acceptable)
medians.append(int(med))
# replace missing values with median
for i in range(n_samples):
for j in range(n_instances):
if (x[i][j] != 1 and x[i][j] != 0):
x[i][j] = medians[j]
x = x.astype(int)
print("-> Done.")
return (x, y, dataset)
| 23.909091 | 56 | 0.576426 |
94eb4ee6e0cac458bf46f3c31de203e4c2a21c09 | 5,720 | py | Python | tensorflow_recommenders/experimental/optimizers/composite_optimizer_test.py | D34D10CK/recommenders | e6e491b4e6bb991b3ad7ce8843c5ed04dfd60996 | [
"Apache-2.0"
] | null | null | null | tensorflow_recommenders/experimental/optimizers/composite_optimizer_test.py | D34D10CK/recommenders | e6e491b4e6bb991b3ad7ce8843c5ed04dfd60996 | [
"Apache-2.0"
] | null | null | null | tensorflow_recommenders/experimental/optimizers/composite_optimizer_test.py | D34D10CK/recommenders | e6e491b4e6bb991b3ad7ce8843c5ed04dfd60996 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The TensorFlow Recommenders Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for CompositeOptimizer."""
import unittest
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_recommenders.experimental.optimizers.composite_optimizer import CompositeOptimizer
class CompositOptimizerTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
("sgd", "adam"),
("rmsprop", "sgd"),
("adam", "adagrad"),
("adagrad", "rmsprop"))
def test_composite_optimizer(self, optimizer1_type, optimizer2_type):
values1 = [1.0, 2.0, 3.0]
values2 = [0.5, 0.0, -2.0]
values3 = [0.1, 0.0, -1.0]
grad1_values = [0.1, 0.2, 1.0]
grad2_values = [-0.1, 0.05, 2.0]
grad3_values = [2.1, 0.0, 0.3]
var1 = tf.Variable(values1)
var2 = tf.Variable(values2)
var3 = tf.Variable(values3)
grads1 = tf.constant(grad1_values)
grads2 = tf.constant(grad2_values)
grads3 = tf.constant(grad3_values)
composite_optimizer = CompositeOptimizer([
(tf.keras.optimizers.get(optimizer1_type), lambda: [var1]),
(tf.keras.optimizers.get(optimizer2_type), lambda: [var2, var3]),
])
optimizer1 = tf.keras.optimizers.get(optimizer1_type)
optimizer2 = tf.keras.optimizers.get(optimizer2_type)
grads_and_vars_1 = [(tf.constant(grad1_values), tf.Variable(values1))]
grads_and_vars_2 = [(tf.constant(grad2_values), tf.Variable(values2)),
(tf.constant(grad3_values), tf.Variable(values3))]
grads_and_vars = list(zip([grads1, grads2, grads3], [var1, var2, var3]))
for _ in range(10):
# Testing that when applying a coposite optimizer has the same effect as
# applying optimizer1 and optimizer2 seperately on subset of gradients/
# variables.
composite_optimizer.apply_gradients(grads_and_vars)
optimizer1.apply_gradients(grads_and_vars_1)
optimizer2.apply_gradients(grads_and_vars_2)
self.assertAllClose(grads_and_vars[:1], grads_and_vars_1)
self.assertAllClose(grads_and_vars[1:], grads_and_vars_2)
def test_incorrect_inputs(self):
var1 = tf.Variable([0.1, 0.2, 1.0])
var2 = tf.Variable([-5.1, 0.1, 0])
var3 = tf.Variable([-2.1, 1.3, 0/3])
grads1 = tf.constant([0.1, 0.2, 1.0])
grads2 = tf.constant([0.5, 0.0, -2.0])
grads3 = tf.constant([-0.2, 0.0, -1.0])
# Test same variable in tow optimizers.
composite_optimizer = CompositeOptimizer([
(tf.keras.optimizers.Adam(), lambda: [var1]),
(tf.keras.optimizers.Adagrad(), lambda: [var1, var2]),
])
grads_and_vars = list(zip([grads1, grads2], [var1, var2]))
with self.assertRaises(ValueError):
composite_optimizer.apply_gradients(grads_and_vars)
# Test missing variable (var3) in optimizers.
composite_optimizer = CompositeOptimizer([
(tf.keras.optimizers.Adam(), lambda: [var1]),
(tf.keras.optimizers.Adagrad(), lambda: [var2]),
])
grads_and_vars = list(zip([grads1, grads2, grads3], [var1, var2, var3]))
with self.assertRaises(ValueError):
composite_optimizer.apply_gradients(grads_and_vars)
# TODO(agagik) Need to remove expectedFailure after fixing CompositeOptimizer
# to restore optimizer.iteration steps are restoreing a model from checkpoint.
@unittest.expectedFailure
def test_checkpoint_save_restore(self):
# Using a simple Linear model to test checkpoint save/restore.
model = tf.keras.experimental.LinearModel(units=10)
composite_optimizer = CompositeOptimizer([
(tf.keras.optimizers.Adam(), lambda: model.trainable_variables[:1]),
(tf.keras.optimizers.Adagrad(), lambda: model.trainable_variables[1:]),
])
checkpoint = tf.train.Checkpoint(model=model,
optimizer=composite_optimizer)
model.compile(optimizer=composite_optimizer,
loss=tf.keras.losses.MSE)
batch_size = 16
num_of_batches = 8
x = tf.ones((num_of_batches * batch_size, 5))
y = tf.zeros((num_of_batches * batch_size, 1))
training_dataset = tf.data.Dataset.from_tensor_slices((x, y))
training_dataset = training_dataset.batch(batch_size)
model.fit(training_dataset, epochs=1)
# Check that optimizer iterations matches dataset size.
self.assertEqual(composite_optimizer.iterations.numpy(), num_of_batches)
# Saving checkpoint.
checkpoint_path = self.get_temp_dir()
checkpoint.write(checkpoint_path)
# Loading checkpoint after reinitializing the optimizer and checkpoint.
composite_optimizer = CompositeOptimizer([
(tf.keras.optimizers.Adam(), lambda: model.trainable_variables),
(tf.keras.optimizers.Adagrad(), lambda: []),
])
checkpoint = tf.train.Checkpoint(model=model,
optimizer=composite_optimizer)
checkpoint.read(checkpoint_path).assert_consumed()
# After restoring the checkpoint, optimizer iterations should also be
# restored to its original value. Right now this assertion is failing.
self.assertEqual(composite_optimizer.iterations.numpy(), num_of_batches)
if __name__ == "__main__":
tf.test.main()
| 36.666667 | 98 | 0.697203 |
ffbbd25065fd49e5b4da0b0d1da31ebe3a7da1f9 | 98 | py | Python | era_search/era_search/gunicorn.conf.py | bergonzzi/eracareers | 8956bd21770a72ed466791b53aae8cf0cfc9b6db | [
"Apache-2.0"
] | 1 | 2016-01-05T15:21:13.000Z | 2016-01-05T15:21:13.000Z | era_search/era_search/gunicorn.conf.py | bergonzzi/eracareers | 8956bd21770a72ed466791b53aae8cf0cfc9b6db | [
"Apache-2.0"
] | null | null | null | era_search/era_search/gunicorn.conf.py | bergonzzi/eracareers | 8956bd21770a72ed466791b53aae8cf0cfc9b6db | [
"Apache-2.0"
] | null | null | null | daemon = True
accesslog = "../../../logs/access_era.log"
errorlog = "../../../logs/error_era.log"
| 24.5 | 42 | 0.612245 |
ec66e18b7ae85923a5e16b2e0a0cffcd12c91bb0 | 22 | py | Python | toolkit/__init__.py | bmorris3/trappist1g_spots | 3ee7c5ef523d517bd9e4363fd5292c2bfab3ab77 | [
"MIT"
] | 4 | 2020-05-11T00:07:25.000Z | 2021-10-05T15:46:32.000Z | toolkit/__init__.py | bmorris3/trappist1g_spots | 3ee7c5ef523d517bd9e4363fd5292c2bfab3ab77 | [
"MIT"
] | 4 | 2021-11-17T08:45:57.000Z | 2021-12-21T15:33:22.000Z | examples/__init__.py | AndreyBychkov/QBee | c444d1bc8c76dcf9c2b316354e215bcaa92d072c | [
"MIT"
] | null | null | null | from .systems import * | 22 | 22 | 0.772727 |
348405db68fd368bd4e99a54e49175fdf0e27b9f | 1,973 | py | Python | everyday_wechat/control/bot/tuling123.py | llht/EverydayWechat | 9c46ce10abf2fbbea3c6ea8de4d63e86a0a7e5e5 | [
"MIT"
] | 1 | 2019-06-20T07:25:46.000Z | 2019-06-20T07:25:46.000Z | everyday_wechat/control/bot/tuling123.py | llht/EverydayWechat | 9c46ce10abf2fbbea3c6ea8de4d63e86a0a7e5e5 | [
"MIT"
] | null | null | null | everyday_wechat/control/bot/tuling123.py | llht/EverydayWechat | 9c46ce10abf2fbbea3c6ea8de4d63e86a0a7e5e5 | [
"MIT"
] | null | null | null | # coding=utf-8
'''
图灵机器人自动回复
官网:http://www.tuling123.com/
apiKey,userid 需要去官网申请。
'''
import requests
from everyday_wechat.utils.common import (
is_json,
md5_encode
)
from everyday_wechat.utils import config
# 图灵机器人错误码集合
TULING_ERROR_CODE_LIST = [
5000, 6000, 4000, 4001, 4002,
4003, 4005, 4007, 4100, 4200,
4300, 4400, 4500, 4600, 4602,
7002, 8008, 0]
URL = "http://openapi.tuling123.com/openapi/api/v2"
def get_tuling123(text, userId):
"""
接口地址:(https://www.kancloud.cn/turing/www-tuling123-com/718227)
获取图灵机器人对话
:param text: 发送的话
:param userId: 用户唯一标识(最好用微信好友uuid)
:return: 对白
"""
try:
# config.init()
info = config.get('auto_relay_info')['turing_conf']
apiKey = info['apiKey']
if not apiKey:
print('图灵机器人 apikey 为空,请求出错')
return None
userId = md5_encode(userId if userId else '250')
content = {
'perception': {
'inputText': {
'text': text
}
},
'userInfo': {
'apiKey': apiKey,
'userId': userId
}
}
# print('发出消息:{}'.format(text))
resp = requests.post(URL, json=content)
if resp.status_code == 200 and is_json(resp):
# print(resp.text)
re_data = resp.json()
if re_data['intent']['code'] not in TULING_ERROR_CODE_LIST:
return_text = re_data['results'][0]['values']['text']
return return_text
else:
error_text = re_data['results'][0]['values']['text']
print('图灵机器人错误信息:{}'.format(error_text))
print('图灵机器人获取数据失败')
except Exception as e:
print(e)
print('图灵机器人获取数据失败')
get_auto_reply = get_tuling123
if __name__ == '__main__':
text = '雷军 are you ok?'
reply = get_auto_reply(text,'WE……………………………………')
print(reply)
pass
| 24.974684 | 71 | 0.547897 |
296af1eb179dd05907d24d3d75a830143447aade | 855 | py | Python | osc/users/migrations/0001_initial.py | aqd14/online-shopping | 0c76c24cb277c637cb490ef70fae28f85e469059 | [
"Apache-2.0"
] | null | null | null | osc/users/migrations/0001_initial.py | aqd14/online-shopping | 0c76c24cb277c637cb490ef70fae28f85e469059 | [
"Apache-2.0"
] | null | null | null | osc/users/migrations/0001_initial.py | aqd14/online-shopping | 0c76c24cb277c637cb490ef70fae28f85e469059 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-13 03:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=20)),
('password', models.CharField(max_length=30)),
('confirm_pw', models.CharField(max_length=30)),
('email', models.EmailField(max_length=254)),
('birthday', models.DateField()),
('address', models.CharField(max_length=100)),
],
),
]
| 29.482759 | 114 | 0.574269 |
7e4c9cd1e11ac40d9a9d0235bccbc0b12274f421 | 1,447 | py | Python | smallsmilhandler.py | vmartinezf/ptavi-p3 | 9d282502d9a40ab8d075f0d20089391fee08eca1 | [
"Apache-2.0"
] | null | null | null | smallsmilhandler.py | vmartinezf/ptavi-p3 | 9d282502d9a40ab8d075f0d20089391fee08eca1 | [
"Apache-2.0"
] | null | null | null | smallsmilhandler.py | vmartinezf/ptavi-p3 | 9d282502d9a40ab8d075f0d20089391fee08eca1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
class SmallSMILHandler(ContentHandler):
"""
Clase para manejar smill
"""
def __init__(self):
"""
Constructor. Inicializamos las variables
"""
self.lista_etiquetas = []
self.dic = {'root-layout': ['width', 'height', 'background-color'],
'region': ['id', 'top', 'bottom', 'left', 'right'],
'img': ['src', 'region', 'begin', 'dur'],
'audio': ['src', 'begin', 'dur'],
'textstream': ['src', 'region']}
def startElement(self, name, attrs):
"""
Método que se llama para alamacenar las etiquetas,
los atributos y su contenido
"""
if name in self.dic:
dicc = {}
for item in self.dic[name]:
dicc[item] = attrs.get(item, "")
diccname = {name: dicc}
self.lista_etiquetas.append(diccname)
def get_tags(self):
"""
Método que devuelve las etiquetas,
los atributos y su contenido
"""
return self.lista_etiquetas
if __name__ == "__main__":
"""
Programa principal
"""
parser = make_parser()
cHandler = SmallSMILHandler()
parser.setContentHandler(cHandler)
parser.parse(open('karaoke.smil'))
print (cHandler.get_tags())
| 27.301887 | 75 | 0.543193 |
bde920039aa8f82c5b41aa9b5ffb1c1c2bbf3a28 | 600 | py | Python | SmartHelmet/core/migrations/0005_alter_data_options_data_timestamp.py | amanpandey-crypto/PDP_Group_44 | b29ca460dd7f429bf27a0060ea7d377c718a624f | [
"MIT"
] | null | null | null | SmartHelmet/core/migrations/0005_alter_data_options_data_timestamp.py | amanpandey-crypto/PDP_Group_44 | b29ca460dd7f429bf27a0060ea7d377c718a624f | [
"MIT"
] | null | null | null | SmartHelmet/core/migrations/0005_alter_data_options_data_timestamp.py | amanpandey-crypto/PDP_Group_44 | b29ca460dd7f429bf27a0060ea7d377c718a624f | [
"MIT"
] | null | null | null | # Generated by Django 4.0.3 on 2022-04-24 19:13
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('core', '0004_data'),
]
operations = [
migrations.AlterModelOptions(
name='data',
options={'ordering': ['-timestamp']},
),
migrations.AddField(
model_name='data',
name='timestamp',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
| 24 | 93 | 0.591667 |
57e46a262a0d6e40d3856e496302d0d057cd903e | 48,254 | py | Python | infra/bots/recipes/test.py | iabro/skia | 33d8fb035a3d2ad0f2dedb5703b5385f8fd15b84 | [
"BSD-3-Clause"
] | null | null | null | infra/bots/recipes/test.py | iabro/skia | 33d8fb035a3d2ad0f2dedb5703b5385f8fd15b84 | [
"BSD-3-Clause"
] | null | null | null | infra/bots/recipes/test.py | iabro/skia | 33d8fb035a3d2ad0f2dedb5703b5385f8fd15b84 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Recipe module for Skia Swarming test.
DEPS = [
'env',
'flavor',
'recipe_engine/context',
'recipe_engine/file',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/raw_io',
'recipe_engine/step',
'run',
'vars',
]
def upload_dm_results(buildername):
skip_upload_bots = [
'ASAN',
'Coverage',
'MSAN',
'TSAN',
'UBSAN',
'Valgrind',
]
for s in skip_upload_bots:
if s in buildername:
return False
return True
def dm_flags(api, bot):
args = []
configs = []
blacklisted = []
def blacklist(quad):
config, src, options, name = (
quad.split(' ') if isinstance(quad, str) else quad)
if (config == '_' or
config in configs or
(config[0] == '~' and config[1:] in configs)):
blacklisted.extend([config, src, options, name])
# We've been spending lots of time writing out and especially uploading
# .pdfs, but not doing anything further with them. skia:6821
args.extend(['--dont_write', 'pdf'])
# This enables non-deterministic random seeding of the GPU FP optimization
# test.
# Not Android due to:
# - https://skia.googlesource.com/skia/+/
# 5910ed347a638ded8cd4c06dbfda086695df1112/BUILD.gn#160
# - https://skia.googlesource.com/skia/+/
# ce06e261e68848ae21cac1052abc16bc07b961bf/tests/ProcessorTest.cpp#307
# Not MSAN due to:
# - https://skia.googlesource.com/skia/+/
# 0ac06e47269a40c177747310a613d213c95d1d6d/infra/bots/recipe_modules/
# flavor/gn_flavor.py#80
if 'Android' not in bot and 'MSAN' not in bot:
args.append('--randomProcessorTest')
# 32-bit desktop bots tend to run out of memory, because they have relatively
# far more cores than RAM (e.g. 32 cores, 3G RAM). Hold them back a bit.
if '-x86-' in bot and not 'NexusPlayer' in bot:
args.extend(['--threads', '4'])
# Nexus7 runs out of memory due to having 4 cores and only 1G RAM.
if 'CPU' in bot and 'Nexus7' in bot:
args.extend(['--threads', '2'])
# MotoG4 occasionally fails when multiple threads read the same image file.
if 'CPU' in bot and 'MotoG4' in bot:
args.extend(['--threads', '0'])
if 'Chromecast' in bot:
args.extend(['--threads', '0'])
# Avoid issues with dynamically exceeding resource cache limits.
if 'Test' in bot and 'DISCARDABLE' in bot:
args.extend(['--threads', '0'])
# See if staying on the main thread helps skia:6748.
if 'Test-iOS' in bot:
args.extend(['--threads', '0'])
# Android's kernel will occasionally attempt to kill our process, using
# SIGINT, in an effort to free up resources. If requested, that signal
# is ignored and dm will keep attempting to proceed until we actually
# exhaust the available resources.
if ('NexusPlayer' in bot or
'Chromecast' in bot):
args.append('--ignoreSigInt')
if 'SwiftShader' in api.vars.extra_tokens:
configs.extend(['gles', 'glesdft'])
args.append('--disableDriverCorrectnessWorkarounds')
elif api.vars.builder_cfg.get('cpu_or_gpu') == 'CPU':
args.append('--nogpu')
configs.append('8888')
if 'BonusConfigs' in bot or ('SAN' in bot and 'GCE' in bot):
configs.extend([
'pdf',
'g8', '565',
'pic-8888', 'tiles_rt-8888', 'lite-8888', 'serialize-8888',
'gbr-8888',
'f16', 'srgb', 'esrgb', 'narrow', 'enarrow',
'p3', 'ep3', 'rec2020', 'erec2020'])
elif api.vars.builder_cfg.get('cpu_or_gpu') == 'GPU':
args.append('--nocpu')
# Add in either gles or gl configs to the canonical set based on OS
sample_count = '8'
gl_prefix = 'gl'
if 'Android' in bot or 'iOS' in bot:
sample_count = '4'
# We want to test the OpenGL config not the GLES config on the Shield
if 'NVIDIA_Shield' not in bot:
gl_prefix = 'gles'
elif 'Intel' in bot:
sample_count = ''
elif 'ChromeOS' in bot:
gl_prefix = 'gles'
if 'NativeFonts' in bot:
configs.append(gl_prefix)
else:
configs.extend([gl_prefix,
gl_prefix + 'dft',
gl_prefix + 'srgb'])
if sample_count is not '':
configs.append(gl_prefix + 'msaa' + sample_count)
# The NP produces a long error stream when we run with MSAA. The Tegra3 just
# doesn't support it.
if ('NexusPlayer' in bot or
'Tegra3' in bot or
# We aren't interested in fixing msaa bugs on current iOS devices.
'iPad4' in bot or
'iPadPro' in bot or
'iPhone6' in bot or
'iPhone7' in bot or
# skia:5792
'IntelHD530' in bot or
'IntelIris540' in bot):
configs = [x for x in configs if 'msaa' not in x]
# The NP produces different images for dft on every run.
if 'NexusPlayer' in bot:
configs = [x for x in configs if 'dft' not in x]
# We want to test both the OpenGL config and the GLES config on Linux Intel:
# GL is used by Chrome, GLES is used by ChromeOS.
# Also do the Ganesh threading verification test (render with and without
# worker threads, using only the SW path renderer, and compare the results).
if 'Intel' in bot and api.vars.is_linux:
configs.extend(['gles',
'glesdft',
'glessrgb',
'gltestthreading'])
# skbug.com/6333, skbug.com/6419, skbug.com/6702
blacklist('gltestthreading gm _ lcdblendmodes')
blacklist('gltestthreading gm _ lcdoverlap')
blacklist('gltestthreading gm _ textbloblooper')
# All of these GMs are flaky, too:
blacklist('gltestthreading gm _ bleed_alpha_bmp')
blacklist('gltestthreading gm _ bleed_alpha_bmp_shader')
blacklist('gltestthreading gm _ bleed_alpha_image')
blacklist('gltestthreading gm _ bleed_alpha_image_shader')
blacklist('gltestthreading gm _ savelayer_with_backdrop')
blacklist('gltestthreading gm _ persp_shaders_bw')
blacklist('gltestthreading gm _ dftext_blob_persp')
blacklist('gltestthreading gm _ dftext')
# skbug.com/7523 - Flaky on various GPUs
blacklist('gltestthreading gm _ orientation')
# CommandBuffer bot *only* runs the command_buffer config.
if 'CommandBuffer' in bot:
configs = ['commandbuffer']
# ANGLE bot *only* runs the angle configs
if 'ANGLE' in bot:
configs = ['angle_d3d11_es2',
'angle_d3d9_es2',
'angle_gl_es2',
'angle_d3d11_es3']
if sample_count is not '':
configs.append('angle_d3d11_es2_msaa' + sample_count)
configs.append('angle_d3d11_es3_msaa' + sample_count)
if 'GTX' in bot or 'Quadro' in bot:
# See skia:7823 and chromium:693090.
configs.append('angle_gl_es3')
if sample_count is not '':
configs.append('angle_gl_es2_msaa' + sample_count)
configs.append('angle_gl_es3_msaa' + sample_count)
if 'NUC5i7RYH' in bot:
# skbug.com/7376
blacklist('_ test _ ProcessorCloneTest')
# Vulkan bot *only* runs the vk config.
if 'Vulkan' in bot:
configs = ['vk']
if 'Metal' in bot:
configs = ['mtl']
# Test 1010102 on our Linux/NVIDIA bots and the persistent cache config
# on the GL bots.
if ('QuadroP400' in bot and 'PreAbandonGpuContext' not in bot and
'TSAN' not in bot and api.vars.is_linux):
if 'Vulkan' in bot:
configs.append('vk1010102')
# Decoding transparent images to 1010102 just looks bad
blacklist('vk1010102 image _ _')
else:
configs.extend(['gl1010102', 'gltestpersistentcache'])
# Decoding transparent images to 1010102 just looks bad
blacklist('gl1010102 image _ _')
# These tests produce slightly different pixels run to run on NV.
blacklist('gltestpersistentcache gm _ atlastext')
blacklist('gltestpersistentcache gm _ dftext')
blacklist('gltestpersistentcache gm _ glyph_pos_h_b')
# Test SkColorSpaceXformCanvas and rendering to wrapped dsts on a few bots
if 'BonusConfigs' in api.vars.extra_tokens:
configs = ['gbr-gl', 'glbetex', 'glbert']
if 'ChromeOS' in bot:
# Just run GLES for now - maybe add gles_msaa4 in the future
configs = ['gles']
if 'Chromecast' in bot:
configs = ['gles']
# Test coverage counting path renderer.
if 'CCPR' in bot:
configs = [c for c in configs if c == 'gl' or c == 'gles']
args.extend(['--pr', 'ccpr', '--cachePathMasks', 'false'])
# DDL is a GPU-only feature
if 'DDL1' in bot:
# This bot generates gl and vk comparison images for the large skps
configs = [c for c in configs if c == 'gl' or c == 'vk']
args.extend(['--skpViewportSize', "2048"])
args.extend(['--pr', '~small'])
if 'DDL3' in bot:
# This bot generates the ddl-gl and ddl-vk images for the
# large skps and the gms
configs = ['ddl-' + c for c in configs if c == 'gl' or c == 'vk']
args.extend(['--skpViewportSize', "2048"])
args.extend(['--gpuThreads', "0"])
if 'Lottie' in bot:
configs = ['gl']
tf = api.vars.builder_cfg.get('test_filter')
if 'All' != tf:
# Expected format: shard_XX_YY
parts = tf.split('_')
if len(parts) == 3:
args.extend(['--shard', parts[1]])
args.extend(['--shards', parts[2]])
else: # pragma: nocover
raise Exception('Invalid task name - bad shards: %s' % tf)
args.append('--config')
args.extend(configs)
# Run tests, gms, and image decoding tests everywhere.
args.extend('--src tests gm image lottie colorImage svg skp'.split(' '))
if api.vars.builder_cfg.get('cpu_or_gpu') == 'GPU':
# Don't run the 'svgparse_*' svgs on GPU.
blacklist('_ svg _ svgparse_')
elif bot == 'Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-All-ASAN':
# Only run the CPU SVGs on 8888.
blacklist('~8888 svg _ _')
else:
# On CPU SVGs we only care about parsing. Only run them on the above bot.
args.remove('svg')
# Eventually I'd like these to pass, but for now just skip 'em.
if 'SK_FORCE_RASTER_PIPELINE_BLITTER' in bot:
args.remove('tests')
if 'NativeFonts' in bot: # images won't exercise native font integration :)
args.remove('image')
args.remove('colorImage')
def remove_from_args(arg):
if arg in args:
args.remove(arg)
if 'DDL' in bot:
# The DDL bots just render the large skps and the gms
remove_from_args('tests')
remove_from_args('image')
remove_from_args('colorImage')
remove_from_args('svg')
else:
# Currently, only the DDL bots render skps
remove_from_args('skp')
if 'Lottie' in api.vars.builder_cfg.get('extra_config', ''):
# Only run the lotties on Lottie bots.
remove_from_args('tests')
remove_from_args('gm')
remove_from_args('image')
remove_from_args('colorImage')
remove_from_args('svg')
remove_from_args('skp')
else:
remove_from_args('lottie')
# TODO: ???
blacklist('f16 _ _ dstreadshuffle')
blacklist('glsrgb image _ _')
blacklist('glessrgb image _ _')
# Not any point to running these.
blacklist('gbr-8888 image _ _')
blacklist('gbr-8888 colorImage _ _')
# --src image --config g8 means "decode into Gray8", which isn't supported.
blacklist('g8 image _ _')
blacklist('g8 colorImage _ _')
if 'Valgrind' in bot:
# These take 18+ hours to run.
blacklist('pdf gm _ fontmgr_iter')
blacklist('pdf _ _ PANO_20121023_214540.jpg')
blacklist('pdf skp _ worldjournal')
blacklist('pdf skp _ desk_baidu.skp')
blacklist('pdf skp _ desk_wikipedia.skp')
blacklist('_ svg _ _')
if 'iOS' in bot:
blacklist(gl_prefix + ' skp _ _')
if 'Mac' in bot or 'iOS' in bot:
# CG fails on questionable bmps
blacklist('_ image gen_platf rgba32abf.bmp')
blacklist('_ image gen_platf rgb24prof.bmp')
blacklist('_ image gen_platf rgb24lprof.bmp')
blacklist('_ image gen_platf 8bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 4bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 32bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 24bpp-pixeldata-cropped.bmp')
# CG has unpredictable behavior on this questionable gif
# It's probably using uninitialized memory
blacklist('_ image gen_platf frame_larger_than_image.gif')
# CG has unpredictable behavior on incomplete pngs
# skbug.com/5774
blacklist('_ image gen_platf inc0.png')
blacklist('_ image gen_platf inc1.png')
blacklist('_ image gen_platf inc2.png')
blacklist('_ image gen_platf inc3.png')
blacklist('_ image gen_platf inc4.png')
blacklist('_ image gen_platf inc5.png')
blacklist('_ image gen_platf inc6.png')
blacklist('_ image gen_platf inc7.png')
blacklist('_ image gen_platf inc8.png')
blacklist('_ image gen_platf inc9.png')
blacklist('_ image gen_platf inc10.png')
blacklist('_ image gen_platf inc11.png')
blacklist('_ image gen_platf inc12.png')
blacklist('_ image gen_platf inc13.png')
blacklist('_ image gen_platf inc14.png')
blacklist('_ image gen_platf incInterlaced.png')
# These images fail after Mac 10.13.1 upgrade.
blacklist('_ image gen_platf incInterlaced.gif')
blacklist('_ image gen_platf inc1.gif')
blacklist('_ image gen_platf inc0.gif')
blacklist('_ image gen_platf butterfly.gif')
# WIC fails on questionable bmps
if 'Win' in bot:
blacklist('_ image gen_platf pal8os2v2.bmp')
blacklist('_ image gen_platf pal8os2v2-16.bmp')
blacklist('_ image gen_platf rgba32abf.bmp')
blacklist('_ image gen_platf rgb24prof.bmp')
blacklist('_ image gen_platf rgb24lprof.bmp')
blacklist('_ image gen_platf 8bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 4bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 32bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 24bpp-pixeldata-cropped.bmp')
if 'x86_64' in bot and 'CPU' in bot:
# This GM triggers a SkSmallAllocator assert.
blacklist('_ gm _ composeshader_bitmap')
if 'Win' in bot or 'Mac' in bot:
# WIC and CG fail on arithmetic jpegs
blacklist('_ image gen_platf testimgari.jpg')
# More questionable bmps that fail on Mac, too. skbug.com/6984
blacklist('_ image gen_platf rle8-height-negative.bmp')
blacklist('_ image gen_platf rle4-height-negative.bmp')
# These PNGs have CRC errors. The platform generators seem to draw
# uninitialized memory without reporting an error, so skip them to
# avoid lots of images on Gold.
blacklist('_ image gen_platf error')
if 'Android' in bot or 'iOS' in bot or 'Chromecast' in bot:
# This test crashes the N9 (perhaps because of large malloc/frees). It also
# is fairly slow and not platform-specific. So we just disable it on all of
# Android and iOS. skia:5438
blacklist('_ test _ GrShape')
if api.vars.internal_hardware_label == '2':
# skia:7160
blacklist('_ test _ SRGBReadWritePixels')
blacklist('_ test _ SRGBMipMap')
if api.vars.internal_hardware_label == '5':
# skia:8470
blacklist('_ test _ SRGBReadWritePixels')
blacklist('_ test _ ES2BlendWithNoTexture')
# skia:4095
bad_serialize_gms = ['bleed_image',
'c_gms',
'colortype',
'colortype_xfermodes',
'drawfilter',
'fontmgr_bounds_0.75_0',
'fontmgr_bounds_1_-0.25',
'fontmgr_bounds',
'fontmgr_match',
'fontmgr_iter',
'imagemasksubset']
# skia:5589
bad_serialize_gms.extend(['bitmapfilters',
'bitmapshaders',
'bleed',
'bleed_alpha_bmp',
'bleed_alpha_bmp_shader',
'convex_poly_clip',
'extractalpha',
'filterbitmap_checkerboard_32_32_g8',
'filterbitmap_image_mandrill_64',
'shadows',
'simpleaaclip_aaclip'])
# skia:5595
bad_serialize_gms.extend(['composeshader_bitmap',
'scaled_tilemodes_npot',
'scaled_tilemodes'])
# skia:5778
bad_serialize_gms.append('typefacerendering_pfaMac')
# skia:5942
bad_serialize_gms.append('parsedpaths')
# these use a custom image generator which doesn't serialize
bad_serialize_gms.append('ImageGeneratorExternal_rect')
bad_serialize_gms.append('ImageGeneratorExternal_shader')
# skia:6189
bad_serialize_gms.append('shadow_utils')
# skia:7938
bad_serialize_gms.append('persp_images')
# Not expected to round trip encoding/decoding.
bad_serialize_gms.append('all_bitmap_configs')
bad_serialize_gms.append('makecolorspace')
bad_serialize_gms.append('readpixels')
bad_serialize_gms.append('draw_image_set_rect_to_rect')
# This GM forces a path to be convex. That property doesn't survive
# serialization.
bad_serialize_gms.append('analytic_antialias_convex')
for test in bad_serialize_gms:
blacklist(['serialize-8888', 'gm', '_', test])
if 'Mac' not in bot:
for test in ['bleed_alpha_image', 'bleed_alpha_image_shader']:
blacklist(['serialize-8888', 'gm', '_', test])
# It looks like we skip these only for out-of-memory concerns.
if 'Win' in bot or 'Android' in bot:
for test in ['verylargebitmap', 'verylarge_picture_image']:
blacklist(['serialize-8888', 'gm', '_', test])
if 'Mac' in bot and 'CPU' in bot:
# skia:6992
blacklist(['pic-8888', 'gm', '_', 'encode-platform'])
blacklist(['serialize-8888', 'gm', '_', 'encode-platform'])
# skia:4769
for test in ['drawfilter']:
blacklist([ 'pic-8888', 'gm', '_', test])
blacklist([ 'lite-8888', 'gm', '_', test])
# skia:4703
for test in ['image-cacherator-from-picture',
'image-cacherator-from-raster',
'image-cacherator-from-ctable']:
blacklist([ 'pic-8888', 'gm', '_', test])
blacklist(['serialize-8888', 'gm', '_', test])
# GM that requires raster-backed canvas
for test in ['gamut', 'complexclip4_bw', 'complexclip4_aa', 'p3']:
blacklist([ 'pic-8888', 'gm', '_', test])
blacklist([ 'lite-8888', 'gm', '_', test])
blacklist(['serialize-8888', 'gm', '_', test])
# GM that not support tiles_rt
for test in ['complexclip4_bw', 'complexclip4_aa']:
blacklist([ 'tiles_rt-8888', 'gm', '_', test])
# Extensions for RAW images
r = ['arw', 'cr2', 'dng', 'nef', 'nrw', 'orf', 'raf', 'rw2', 'pef', 'srw',
'ARW', 'CR2', 'DNG', 'NEF', 'NRW', 'ORF', 'RAF', 'RW2', 'PEF', 'SRW']
# skbug.com/4888
# Blacklist RAW images (and a few large PNGs) on GPU bots
# until we can resolve failures.
if 'GPU' in bot:
blacklist('_ image _ interlaced1.png')
blacklist('_ image _ interlaced2.png')
blacklist('_ image _ interlaced3.png')
for raw_ext in r:
blacklist('_ image _ .%s' % raw_ext)
# Blacklist memory intensive tests on 32-bit bots.
if ('Win8' in bot or 'Win2016' in bot) and 'x86-' in bot:
blacklist('_ image f16 _')
blacklist('_ image _ abnormal.wbmp')
blacklist('_ image _ interlaced1.png')
blacklist('_ image _ interlaced2.png')
blacklist('_ image _ interlaced3.png')
for raw_ext in r:
blacklist('_ image _ .%s' % raw_ext)
if 'Nexus5' in bot and 'GPU' in bot:
# skia:5876
blacklist(['_', 'gm', '_', 'encode-platform'])
if 'AndroidOne-GPU' in bot: # skia:4697, skia:4704, skia:4694, skia:4705
blacklist(['_', 'gm', '_', 'bigblurs'])
blacklist(['_', 'gm', '_', 'bleed'])
blacklist(['_', 'gm', '_', 'bleed_alpha_bmp'])
blacklist(['_', 'gm', '_', 'bleed_alpha_bmp_shader'])
blacklist(['_', 'gm', '_', 'bleed_alpha_image'])
blacklist(['_', 'gm', '_', 'bleed_alpha_image_shader'])
blacklist(['_', 'gm', '_', 'bleed_image'])
blacklist(['_', 'gm', '_', 'dropshadowimagefilter'])
blacklist(['_', 'gm', '_', 'filterfastbounds'])
blacklist([gl_prefix, 'gm', '_', 'imageblurtiled'])
blacklist(['_', 'gm', '_', 'imagefiltersclipped'])
blacklist(['_', 'gm', '_', 'imagefiltersscaled'])
blacklist(['_', 'gm', '_', 'imageresizetiled'])
blacklist(['_', 'gm', '_', 'matrixconvolution'])
blacklist(['_', 'gm', '_', 'strokedlines'])
if sample_count is not '':
gl_msaa_config = gl_prefix + 'msaa' + sample_count
blacklist([gl_msaa_config, 'gm', '_', 'imageblurtiled'])
blacklist([gl_msaa_config, 'gm', '_', 'imagefiltersbase'])
match = []
if 'Valgrind' in bot: # skia:3021
match.append('~Threaded')
if 'Valgrind' in bot and 'PreAbandonGpuContext' in bot:
# skia:6575
match.append('~multipicturedraw_')
if 'CommandBuffer' in bot:
# https://crbug.com/697030
match.append('~HalfFloatAlphaTextureTest')
if 'AndroidOne' in bot:
match.append('~WritePixels') # skia:4711
match.append('~PremulAlphaRoundTrip_Gpu') # skia:7501
match.append('~ReimportImageTextureWithMipLevels') # skia:8090
if 'Chromecast' in bot:
if 'GPU' in bot:
# skia:6687
match.append('~animated-image-blurs')
match.append('~blur_0.01')
match.append('~blur_image_filter')
match.append('~check_small_sigma_offset')
match.append('~imageblur2')
match.append('~lighting')
match.append('~longpathdash')
match.append('~matrixconvolution')
match.append('~textblobmixedsizes_df')
match.append('~textblobrandomfont')
# Blacklisted to avoid OOM (we see DM just end with "broken pipe")
match.append('~bigbitmaprect_')
match.append('~DrawBitmapRect')
match.append('~drawbitmaprect')
match.append('~GM_animated-image-blurs')
match.append('~ImageFilterBlurLargeImage')
match.append('~savelayer_clipmask')
match.append('~TextBlobCache')
match.append('~verylarge')
if 'GalaxyS6' in bot:
match.append('~SpecialImage') # skia:6338
match.append('~skbug6653') # skia:6653
if 'MSAN' in bot:
match.extend(['~Once', '~Shared']) # Not sure what's up with these tests.
if 'TSAN' in bot:
match.extend(['~ReadWriteAlpha']) # Flaky on TSAN-covered on nvidia bots.
match.extend(['~RGBA4444TextureTest', # Flakier than they are important.
'~RGB565TextureTest'])
# By default, we test with GPU threading enabled, unless specifically
# disabled.
if 'NoGPUThreads' in bot:
args.extend(['--gpuThreads', '0'])
if 'Vulkan' in bot and 'Adreno530' in bot:
# skia:5777
match.extend(['~CopySurface'])
if 'Vulkan' in bot and 'Adreno' in bot:
# skia:7663
match.extend(['~WritePixelsNonTextureMSAA_Gpu'])
match.extend(['~WritePixelsMSAA_Gpu'])
if 'Vulkan' in bot and api.vars.is_linux and 'IntelIris640' in bot:
match.extend(['~VkHeapTests']) # skia:6245
if api.vars.is_linux and 'IntelIris640' in bot:
match.extend(['~GLPrograms']) # skia:7849
if 'Vulkan' in bot and api.vars.is_linux and 'IntelHD405' in bot:
# skia:7322
blacklist(['vk', 'gm', '_', 'skbug_257'])
blacklist(['vk', 'gm', '_', 'filltypespersp'])
match.append('~^ClearOp$')
match.append('~^CopySurface$')
match.append('~^ImageNewShader_GPU$')
match.append('~^InitialTextureClear$')
match.append('~^PinnedImageTest$')
match.append('~^ReadPixels_Gpu$')
match.append('~^ReadPixels_Texture$')
match.append('~^SRGBReadWritePixels$')
match.append('~^VkUploadPixelsTests$')
match.append('~^WritePixelsNonTexture_Gpu$')
match.append('~^WritePixelsNonTextureMSAA_Gpu$')
match.append('~^WritePixels_Gpu$')
match.append('~^WritePixelsMSAA_Gpu$')
if 'Vulkan' in bot and 'IntelIris540' in bot and 'Win' in bot:
# skia:6398
blacklist(['vk', 'gm', '_', 'aarectmodes'])
blacklist(['vk', 'gm', '_', 'aaxfermodes'])
blacklist(['vk', 'gm', '_', 'dont_clip_to_layer'])
blacklist(['vk', 'gm', '_', 'dftext'])
blacklist(['vk', 'gm', '_', 'dftext_blob_persp'])
blacklist(['vk', 'gm', '_', 'drawregionmodes'])
blacklist(['vk', 'gm', '_', 'filterfastbounds'])
blacklist(['vk', 'gm', '_', 'fontmgr_iter'])
blacklist(['vk', 'gm', '_', 'fontmgr_match'])
blacklist(['vk', 'gm', '_', 'fontscaler'])
blacklist(['vk', 'gm', '_', 'fontscalerdistortable'])
blacklist(['vk', 'gm', '_', 'gammagradienttext'])
blacklist(['vk', 'gm', '_', 'gammatext'])
blacklist(['vk', 'gm', '_', 'gradtext'])
blacklist(['vk', 'gm', '_', 'hairmodes'])
blacklist(['vk', 'gm', '_', 'imagefilters_xfermodes'])
blacklist(['vk', 'gm', '_', 'imagefiltersclipped'])
blacklist(['vk', 'gm', '_', 'imagefiltersscaled'])
blacklist(['vk', 'gm', '_', 'imagefiltersstroked'])
blacklist(['vk', 'gm', '_', 'imagefilterstransformed'])
blacklist(['vk', 'gm', '_', 'imageresizetiled'])
blacklist(['vk', 'gm', '_', 'lcdblendmodes'])
blacklist(['vk', 'gm', '_', 'lcdoverlap'])
blacklist(['vk', 'gm', '_', 'lcdtext'])
blacklist(['vk', 'gm', '_', 'lcdtextsize'])
blacklist(['vk', 'gm', '_', 'matriximagefilter'])
blacklist(['vk', 'gm', '_', 'mixedtextblobs'])
blacklist(['vk', 'gm', '_', 'resizeimagefilter'])
blacklist(['vk', 'gm', '_', 'rotate_imagefilter'])
blacklist(['vk', 'gm', '_', 'savelayer_lcdtext'])
blacklist(['vk', 'gm', '_', 'shadermaskfilter_image'])
blacklist(['vk', 'gm', '_', 'srcmode'])
blacklist(['vk', 'gm', '_', 'surfaceprops'])
blacklist(['vk', 'gm', '_', 'textblobgeometrychange'])
blacklist(['vk', 'gm', '_', 'textbloblooper'])
blacklist(['vk', 'gm', '_', 'textblobmixedsizes'])
blacklist(['vk', 'gm', '_', 'textblobrandomfont'])
blacklist(['vk', 'gm', '_', 'textfilter_color'])
blacklist(['vk', 'gm', '_', 'textfilter_image'])
blacklist(['vk', 'gm', '_', 'varied_text_clipped_lcd'])
blacklist(['vk', 'gm', '_', 'varied_text_ignorable_clip_lcd'])
if 'Vulkan' in bot and 'GTX660' in bot and 'Win' in bot:
# skbug.com/8047
match.append('~FloatingPointTextureTest$')
if 'MoltenVK' in bot:
# skbug.com/7959
blacklist(['_', 'gm', '_', 'vertices_scaled_shader'])
blacklist(['_', 'gm', '_', 'vertices'])
match.append('~^InitialTextureClear$')
match.append('~^RGB565TextureTest$')
match.append('~^RGBA4444TextureTest$')
match.append('~^WritePixelsNonTextureMSAA_Gpu$')
if 'ANGLE' in bot:
# skia:7835
match.append('~BlurMaskBiggerThanDest')
if 'IntelIris6100' in bot and 'ANGLE' in bot and 'Release' in bot:
# skia:7376
match.append('~^ProcessorOptimizationValidationTest$')
if ('IntelIris6100' in bot or 'IntelHD4400' in bot) and 'ANGLE' in bot:
# skia:6857
blacklist(['angle_d3d9_es2', 'gm', '_', 'lighting'])
if 'PowerVRGX6250' in bot:
match.append('~gradients_view_perspective_nodither') #skia:6972
if '-arm-' in bot and 'ASAN' in bot:
# TODO: can we run with env allocator_may_return_null=1 instead?
match.append('~BadImage')
if 'Mac' in bot and 'IntelHD6000' in bot:
# skia:7574
match.append('~^ProcessorCloneTest$')
match.append('~^GrMeshTest$')
if 'Mac' in bot and 'IntelHD615' in bot:
# skia:7603
match.append('~^GrMeshTest$')
if 'Metal' in bot:
# skia:8243
match.append('~^ClearOp$')
match.append('~^DDLSurfaceCharacterizationTest$')
match.append('~^DDLOperatorEqTest$')
match.append('~^DeferredProxyTest$')
match.append('~^GPUMemorySize$')
match.append('~^GrContext_colorTypeSupportedAsImage$')
match.append('~^GrContext_colorTypeSupportedAsSurface$')
match.append('~^GrContext_maxSurfaceSamplesForColorType$')
match.append('~^GrContextFactory_sharedContexts$')
match.append('~^GrPipelineDynamicStateTest$')
match.append('~^InitialTextureClear$')
match.append('~^PromiseImageTest$')
match.append('~^ResourceAllocatorTest$')
match.append('~^RGB565TextureTest$')
match.append('~^RGBA4444TextureTest$')
match.append('~^TransferPixelsTest$')
match.append('~^SurfaceSemaphores$')
match.append('~^VertexAttributeCount$')
match.append('~^WrappedProxyTest$')
if blacklisted:
args.append('--blacklist')
args.extend(blacklisted)
if match:
args.append('--match')
args.extend(match)
# These bots run out of memory running RAW codec tests. Do not run them in
# parallel
if 'NexusPlayer' in bot or 'Nexus5' in bot or 'Nexus9' in bot:
args.append('--noRAW_threading')
if 'FSAA' in bot:
args.extend(['--analyticAA', 'false', '--deltaAA', 'false'])
if 'FAAA' in bot:
args.extend(['--deltaAA', 'false', '--forceAnalyticAA'])
if 'FDAA' in bot:
args.extend(['--deltaAA', '--forceDeltaAA'])
if 'NativeFonts' not in bot:
args.append('--nonativeFonts')
if 'GDI' in bot:
args.append('--gdi')
if ('QuadroP400' in bot or
'Adreno540' in bot or
'IntelHD2000' in bot or # gen 6 - sandy bridge
'IntelHD4400' in bot or # gen 7 - haswell
'IntelHD405' in bot or # gen 8 - cherryview braswell
'IntelIris6100' in bot or # gen 8 - broadwell
'IntelIris540' in bot or # gen 9 - skylake
'IntelIris640' in bot or # gen 9 - kaby lake
'MaliT760' in bot or
'MaliT860' in bot or
'MaliT880' in bot):
args.extend(['--reduceOpListSplitting'])
# Let's make all bots produce verbose output by default.
args.append('--verbose')
return args
def key_params(api):
"""Build a unique key from the builder name (as a list).
E.g. arch x86 gpu GeForce320M mode MacMini4.1 os Mac10.6
"""
# Don't bother to include role, which is always Test.
blacklist = ['role', 'test_filter']
flat = []
for k in sorted(api.vars.builder_cfg.keys()):
if k not in blacklist:
flat.append(k)
flat.append(api.vars.builder_cfg[k])
return flat
def test_steps(api):
"""Run the DM test."""
b = api.properties['buildername']
use_hash_file = False
if upload_dm_results(b):
host_dm_dir = str(api.flavor.host_dirs.dm_dir)
api.flavor.create_clean_host_dir(api.path['start_dir'].join('test'))
device_dm_dir = str(api.flavor.device_dirs.dm_dir)
if host_dm_dir != device_dm_dir:
api.flavor.create_clean_device_dir(device_dm_dir)
# Obtain the list of already-generated hashes.
hash_filename = 'uninteresting_hashes.txt'
host_hashes_file = api.vars.tmp_dir.join(hash_filename)
hashes_file = api.flavor.device_path_join(
api.flavor.device_dirs.tmp_dir, hash_filename)
api.run(
api.python.inline,
'get uninteresting hashes',
program="""
import contextlib
import math
import socket
import sys
import time
import urllib2
HASHES_URL = sys.argv[1]
RETRIES = 5
TIMEOUT = 60
WAIT_BASE = 15
socket.setdefaulttimeout(TIMEOUT)
for retry in range(RETRIES):
try:
with contextlib.closing(
urllib2.urlopen(HASHES_URL, timeout=TIMEOUT)) as w:
hashes = w.read()
with open(sys.argv[2], 'w') as f:
f.write(hashes)
break
except Exception as e:
print 'Failed to get uninteresting hashes from %s:' % HASHES_URL
print e
if retry == RETRIES:
raise
waittime = WAIT_BASE * math.pow(2, retry)
print 'Retry in %d seconds.' % waittime
time.sleep(waittime)
""",
args=[api.properties['gold_hashes_url'], host_hashes_file],
abort_on_failure=False,
fail_build_on_failure=False,
infra_step=True)
if api.path.exists(host_hashes_file):
api.flavor.copy_file_to_device(host_hashes_file, hashes_file)
use_hash_file = True
# Run DM.
properties = [
'gitHash', api.properties['revision'],
'builder', api.vars.builder_name,
'buildbucket_build_id', api.properties.get('buildbucket_build_id', ''),
]
if api.vars.is_trybot:
properties.extend([
'issue', api.vars.issue,
'patchset', api.vars.patchset,
'patch_storage', api.vars.patch_storage,
])
properties.extend(['swarming_bot_id', api.vars.swarming_bot_id])
properties.extend(['swarming_task_id', api.vars.swarming_task_id])
if 'Chromecast' in api.vars.builder_cfg.get('os', ''):
# Due to limited disk space, we only deal with skps and one image.
args = [
'dm',
'--resourcePath', api.flavor.device_dirs.resource_dir,
'--skps', api.flavor.device_dirs.skp_dir,
'--images', api.flavor.device_path_join(
api.flavor.device_dirs.resource_dir, 'images', 'color_wheel.jpg'),
'--nameByHash',
'--properties'
] + properties
else:
args = [
'dm',
'--resourcePath', api.flavor.device_dirs.resource_dir,
'--skps', api.flavor.device_dirs.skp_dir,
'--images', api.flavor.device_path_join(
api.flavor.device_dirs.images_dir, 'dm'),
'--colorImages', api.flavor.device_path_join(
api.flavor.device_dirs.images_dir, 'colorspace'),
'--nameByHash',
'--properties'
] + properties
args.extend(['--svgs', api.flavor.device_dirs.svg_dir])
if 'Lottie' in api.vars.builder_cfg.get('extra_config', ''):
args.extend(['--lotties', api.flavor.device_dirs.lotties_dir])
args.append('--key')
keys = key_params(api)
if 'Lottie' in api.vars.builder_cfg.get('extra_config', ''):
keys.extend(['renderer', 'skottie'])
args.extend(keys)
if use_hash_file:
args.extend(['--uninterestingHashesFile', hashes_file])
if upload_dm_results(b):
args.extend(['--writePath', api.flavor.device_dirs.dm_dir])
args.extend(dm_flags(api, api.vars.builder_name))
# See skia:2789.
if 'AbandonGpuContext' in api.vars.extra_tokens:
args.append('--abandonGpuContext')
if 'PreAbandonGpuContext' in api.vars.extra_tokens:
args.append('--preAbandonGpuContext')
if 'ReleaseAndAbandonGpuContext' in api.vars.extra_tokens:
args.append('--releaseAndAbandonGpuContext')
api.run(api.flavor.step, 'dm', cmd=args, abort_on_failure=False)
if upload_dm_results(b):
# Copy images and JSON to host machine if needed.
api.flavor.copy_directory_contents_to_host(
api.flavor.device_dirs.dm_dir, api.flavor.host_dirs.dm_dir)
def RunSteps(api):
api.vars.setup()
api.file.ensure_directory('makedirs tmp_dir', api.vars.tmp_dir)
api.flavor.setup()
env = {}
if 'iOS' in api.vars.builder_name:
env['IOS_BUNDLE_ID'] = 'com.google.dm'
env['IOS_MOUNT_POINT'] = api.vars.slave_dir.join('mnt_iosdevice')
with api.context(env=env):
try:
if 'Chromecast' in api.vars.builder_name:
api.flavor.install(resources=True, skps=True)
elif 'Lottie' in api.vars.builder_name:
api.flavor.install(resources=True, lotties=True)
else:
api.flavor.install(skps=True, images=True, svgs=True, resources=True)
test_steps(api)
finally:
api.flavor.cleanup_steps()
api.run.check_failure()
TEST_BUILDERS = [
'Test-Android-Clang-AndroidOne-GPU-Mali400MP2-arm-Release-All-Android',
'Test-Android-Clang-GalaxyS6-GPU-MaliT760-arm64-Debug-All-Android',
('Test-Android-Clang-GalaxyS6-GPU-MaliT760-arm64-Debug-All'
'-Android_NoGPUThreads'),
('Test-Android-Clang-GalaxyS7_G930FD-GPU-MaliT880-arm64-Release-All'
'-Android_Vulkan'),
'Test-Android-Clang-MotoG4-CPU-Snapdragon617-arm-Release-All-Android',
'Test-Android-Clang-NVIDIA_Shield-GPU-TegraX1-arm64-Debug-All-Android_CCPR',
'Test-Android-Clang-Nexus5-GPU-Adreno330-arm-Release-All-Android',
'Test-Android-Clang-Nexus7-CPU-Tegra3-arm-Release-All-Android',
'Test-Android-Clang-NexusPlayer-GPU-PowerVRG6430-x86-Release-All-Android',
'Test-Android-Clang-Pixel-GPU-Adreno530-arm64-Debug-All-Android_Vulkan',
'Test-Android-Clang-Pixel-GPU-Adreno530-arm-Debug-All-Android_ASAN',
('Test-ChromeOS-Clang-AcerChromebookR13Convertible-GPU-PowerVRGX6250-'
'arm-Debug-All'),
'Test-Chromecast-Clang-Chorizo-CPU-Cortex_A7-arm-Release-All',
'Test-Chromecast-Clang-Chorizo-GPU-Cortex_A7-arm-Release-All',
'Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-All-ASAN',
'Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-All-BonusConfigs',
'Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-shard_00_10-Coverage',
'Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-All-MSAN',
('Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-All'
'-SK_USE_DISCARDABLE_SCALEDIMAGECACHE'),
'Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Release-All-Lottie',
('Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Release-All'
'-SK_FORCE_RASTER_PIPELINE_BLITTER'),
'Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Release-All-TSAN',
'Test-Debian9-Clang-GCE-GPU-SwiftShader-x86_64-Release-All-SwiftShader',
'Test-Debian9-Clang-NUC5PPYH-GPU-IntelHD405-x86_64-Release-All-Vulkan',
'Test-Debian9-Clang-NUC7i5BNK-GPU-IntelIris640-x86_64-Debug-All-Vulkan',
'Test-Mac-Clang-MacBook10.1-GPU-IntelHD615-x86_64-Release-All-NativeFonts',
'Test-Mac-Clang-MacBookAir7.2-GPU-IntelHD6000-x86_64-Debug-All',
'Test-Mac-Clang-MacBookPro11.5-CPU-AVX2-x86_64-Release-All',
'Test-Mac-Clang-MacBookPro11.5-GPU-RadeonHD8870M-x86_64-Debug-All-Metal',
('Test-Mac-Clang-MacBookPro11.5-GPU-RadeonHD8870M-x86_64-Release-All-'
'MoltenVK_Vulkan'),
'Test-Mac-Clang-MacMini7.1-GPU-IntelIris5100-x86_64-Debug-All-CommandBuffer',
'Test-Ubuntu17-Clang-Golo-GPU-QuadroP400-x86_64-Debug-All-Vulkan_Coverage',
('Test-Ubuntu17-GCC-Golo-GPU-QuadroP400-x86_64-Release-All'
'-Valgrind_AbandonGpuContext_SK_CPU_LIMIT_SSE41'),
('Test-Ubuntu17-GCC-Golo-GPU-QuadroP400-x86_64-Release-All'
'-Valgrind_PreAbandonGpuContext_SK_CPU_LIMIT_SSE41'),
'Test-Ubuntu17-Clang-Golo-GPU-QuadroP400-x86_64-Debug-All-DDL1',
'Test-Ubuntu17-Clang-Golo-GPU-QuadroP400-x86_64-Debug-All-DDL3',
'Test-Ubuntu17-Clang-Golo-GPU-QuadroP400-x86_64-Debug-All-Lottie',
'Test-Win10-Clang-Golo-GPU-QuadroP400-x86_64-Release-All-BonusConfigs',
('Test-Win10-Clang-Golo-GPU-QuadroP400-x86_64-Release-All'
'-ReleaseAndAbandonGpuContext'),
'Test-Win10-Clang-NUC5i7RYH-CPU-AVX2-x86_64-Debug-All-NativeFonts_GDI',
'Test-Win10-Clang-NUC5i7RYH-GPU-IntelIris6100-x86_64-Release-All-ANGLE',
'Test-Win10-Clang-NUC6i5SYK-GPU-IntelIris540-x86_64-Debug-All-ANGLE',
'Test-Win10-Clang-NUC6i5SYK-GPU-IntelIris540-x86_64-Debug-All-Vulkan',
'Test-Win10-Clang-NUCD34010WYKH-GPU-IntelHD4400-x86_64-Release-All-ANGLE',
'Test-Win10-Clang-ShuttleA-GPU-GTX660-x86_64-Release-All-Vulkan',
'Test-Win10-Clang-ShuttleC-GPU-GTX960-x86_64-Debug-All-ANGLE',
'Test-Win2016-Clang-GCE-CPU-AVX2-x86_64-Debug-All-FAAA',
'Test-Win2016-Clang-GCE-CPU-AVX2-x86_64-Debug-All-FDAA',
'Test-Win2016-Clang-GCE-CPU-AVX2-x86_64-Debug-All-FSAA',
'Test-iOS-Clang-iPadPro-GPU-PowerVRGT7800-arm64-Release-All',
]
def GenTests(api):
for builder in TEST_BUILDERS:
test = (
api.test(builder) +
api.properties(buildername=builder,
buildbucket_build_id='123454321',
revision='abc123',
path_config='kitchen',
gold_hashes_url='https://example.com/hashes.txt',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
) +
api.step_data('get swarming bot id',
stdout=api.raw_io.output('skia-bot-123')) +
api.step_data('get swarming task id',
stdout=api.raw_io.output('123456'))
)
if 'Win' in builder:
test += api.platform('win', 64)
if 'Chromecast' in builder:
test += api.step_data(
'read chromecast ip',
stdout=api.raw_io.output('192.168.1.2:5555'))
if 'ChromeOS' in builder:
test += api.step_data(
'read chromeos ip',
stdout=api.raw_io.output('{"user_ip":"foo@127.0.0.1"}'))
yield test
builder = 'Test-Win8-Clang-Golo-CPU-AVX-x86-Debug-All'
yield (
api.test('trybot') +
api.properties(buildername=builder,
buildbucket_build_id='123454321',
revision='abc123',
path_config='kitchen',
gold_hashes_url='https://example.com/hashes.txt',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.properties(patch_storage='gerrit') +
api.properties.tryserver(
buildername=builder,
gerrit_project='skia',
gerrit_url='https://skia-review.googlesource.com/',
)+
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
)
)
builder = 'Test-Debian9-GCC-GCE-CPU-AVX2-x86_64-Debug-All'
yield (
api.test('failed_dm') +
api.properties(buildername=builder,
buildbucket_build_id='123454321',
revision='abc123',
path_config='kitchen',
gold_hashes_url='https://example.com/hashes.txt',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
) +
api.step_data('symbolized dm', retcode=1)
)
builder = 'Test-Android-Clang-Nexus7-GPU-Tegra3-arm-Release-All-Android'
yield (
api.test('failed_get_hashes') +
api.properties(buildername=builder,
buildbucket_build_id='123454321',
revision='abc123',
path_config='kitchen',
gold_hashes_url='https://example.com/hashes.txt',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
) +
api.step_data('get uninteresting hashes', retcode=1)
)
builder = ('Test-Android-Clang-NexusPlayer-CPU-Moorefield-x86-'
'Debug-All-Android')
yield (
api.test('failed_push') +
api.properties(buildername=builder,
buildbucket_build_id='123454321',
revision='abc123',
path_config='kitchen',
gold_hashes_url='https://example.com/hashes.txt',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
) +
api.step_data('get swarming bot id',
stdout=api.raw_io.output('build123-m2--device5')) +
api.step_data('push [START_DIR]/skia/resources/* '+
'/sdcard/revenge_of_the_skiabot/resources', retcode=1)
)
builder = 'Test-Android-Clang-Nexus7-GPU-Tegra3-arm-Debug-All-Android'
retry_step_name = 'adb pull.pull /sdcard/revenge_of_the_skiabot/dm_out'
yield (
api.test('failed_pull') +
api.properties(buildername=builder,
buildbucket_build_id='123454321',
revision='abc123',
path_config='kitchen',
gold_hashes_url='https://example.com/hashes.txt',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
) +
api.step_data('dm', retcode=1) +
api.step_data(retry_step_name, retcode=1) +
api.step_data(retry_step_name + ' (attempt 2)', retcode=1) +
api.step_data(retry_step_name + ' (attempt 3)', retcode=1)
)
yield (
api.test('internal_bot_2') +
api.properties(buildername=builder,
buildbucket_build_id='123454321',
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]',
gold_hashes_url='https://example.com/hashes.txt',
internal_hardware_label='2') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
)
)
yield (
api.test('internal_bot_5') +
api.properties(buildername=builder,
buildbucket_build_id='123454321',
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]',
gold_hashes_url='https://example.com/hashes.txt',
internal_hardware_label='5') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
)
)
| 38.388226 | 80 | 0.625772 |
43de6d701fd333c80e55f1bc9019f3c9f41a8558 | 24,580 | py | Python | anuga/damage_modelling/tests/test_inundation_damage.py | samcom12/anuga_core | f4378114dbf02d666fe6423de45798add5c42806 | [
"Python-2.0",
"OLDAP-2.7"
] | null | null | null | anuga/damage_modelling/tests/test_inundation_damage.py | samcom12/anuga_core | f4378114dbf02d666fe6423de45798add5c42806 | [
"Python-2.0",
"OLDAP-2.7"
] | null | null | null | anuga/damage_modelling/tests/test_inundation_damage.py | samcom12/anuga_core | f4378114dbf02d666fe6423de45798add5c42806 | [
"Python-2.0",
"OLDAP-2.7"
] | null | null | null | #!/usr/bin/env python
#
from __future__ import print_function
from builtins import str
from builtins import range
import unittest
import tempfile
import os, sys
import time
import csv
from random import seed
seed(17) #, version=1) # Make probabilistic tests reproducible
#from anuga.damage.inundation_damage import _calc_collapse_structures
from anuga.damage_modelling.inundation_damage import *
from anuga.geospatial_data.geospatial_data import Geospatial_data
from anuga.pmesh.mesh import Mesh
from anuga.coordinate_transforms.geo_reference import Geo_reference
from anuga.utilities.numerical_tools import mean
from anuga.utilities import system_tools
from anuga.file.sww import SWW_file
from anuga.shallow_water.shallow_water_domain import Domain
from anuga.abstract_2d_finite_volumes.generic_boundary_conditions\
import Transmissive_boundary
import numpy as num
from pprint import pprint
def elevation_function(x, y):
return -x
class Test_inundation_damage(unittest.TestCase):
# Class variable
verbose = False
def set_verbose(self):
Test_Data_Manager.verbose = True
def setUp(self):
#print "****set up****"
# Create an sww file
# Set up an sww that has a geo ref.
# have it cover an area in Australia. 'gong maybe
#Don't have many triangles though!
#Site Name: GDA-MGA: (UTM with GRS80 ellipsoid)
#Zone: 56
#Easting: 222908.705 Northing: 6233785.284
#Latitude: -34 0 ' 0.00000 '' Longitude: 150 0 ' 0.00000 ''
#Grid Convergence: -1 40 ' 43.13 '' Point Scale: 1.00054660
#geo-ref
#Zone: 56
#Easting: 220000 Northing: 6230000
#have a big area covered.
mesh_file = tempfile.mktemp(".tsh")
points_lat_long = [[-33,152],[-35,152],[-35,150],[-33,150]]
spat = Geospatial_data(data_points=points_lat_long,
points_are_lats_longs=True)
points_ab = spat.get_data_points( absolute = True)
geo = Geo_reference(56,400000,6000000)
spat.set_geo_reference(geo)
m = Mesh()
m.add_vertices(spat)
m.auto_segment()
m.generate_mesh(verbose=False)
m.export_mesh_file(mesh_file)
#Create shallow water domain
domain = Domain(mesh_file)
os.remove(mesh_file)
domain.default_order=2
#Set some field values
#domain.set_quantity('stage', 1.0)
domain.set_quantity('elevation', -0.5)
domain.set_quantity('friction', 0.03)
######################
# Boundary conditions
B = Transmissive_boundary(domain)
domain.set_boundary( {'exterior': B})
######################
#Initial condition - with jumps
bed = domain.quantities['elevation'].vertex_values
stage = num.zeros(bed.shape, float)
h = 0.3
for i in range(stage.shape[0]):
if i % 2 == 0:
stage[i,:] = bed[i,:] + h
else:
stage[i,:] = bed[i,:]
domain.set_quantity('stage', stage)
domain.set_quantity('xmomentum', stage*22.0)
domain.set_quantity('ymomentum', stage*55.0)
domain.distribute_to_vertices_and_edges()
self.domain = domain
C = domain.get_vertex_coordinates()
self.X = C[:,0:6:2].copy()
self.Y = C[:,1:6:2].copy()
self.F = bed
#sww_file = tempfile.mktemp("")
self.domain.set_name('tid_P0')
self.domain.format = 'sww'
self.domain.smooth = True
self.domain.reduction = mean
sww = SWW_file(self.domain)
sww.store_connectivity()
sww.store_timestep()
self.domain.set_time(2.)
sww.store_timestep()
self.sww = sww # so it can be deleted
#Create another sww file
mesh_file = tempfile.mktemp(".tsh")
points_lat_long = [[-35,152],[-36,152],[-36,150],[-35,150]]
spat = Geospatial_data(data_points=points_lat_long,
points_are_lats_longs=True)
points_ab = spat.get_data_points( absolute = True)
geo = Geo_reference(56,400000,6000000)
spat.set_geo_reference(geo)
m = Mesh()
m.add_vertices(spat)
m.auto_segment()
m.generate_mesh(verbose=False)
m.export_mesh_file(mesh_file)
#Create shallow water domain
domain = Domain(mesh_file)
os.remove(mesh_file)
domain.default_order=2
#Set some field values
#domain.set_quantity('stage', 1.0)
domain.set_quantity('elevation', -40)
domain.set_quantity('friction', 0.03)
######################
# Boundary conditions
B = Transmissive_boundary(domain)
domain.set_boundary( {'exterior': B})
######################
#Initial condition - with jumps
bed = domain.quantities['elevation'].vertex_values
stage = num.zeros(bed.shape, float)
h = 30.
for i in range(stage.shape[0]):
if i % 2 == 0:
stage[i,:] = bed[i,:] + h
else:
stage[i,:] = bed[i,:]
domain.set_quantity('stage', stage)
domain.set_quantity('xmomentum', stage*22.0)
domain.set_quantity('ymomentum', stage*55.0)
domain.distribute_to_vertices_and_edges()
self.domain2 = domain
C = domain.get_vertex_coordinates()
self.X2 = C[:,0:6:2].copy()
self.Y2 = C[:,1:6:2].copy()
self.F2 = bed
#sww_file = tempfile.mktemp("")
domain.set_name('tid_P1')
domain.format = 'sww'
domain.smooth = True
domain.reduction = mean
sww = SWW_file(domain)
sww.store_connectivity()
sww.store_timestep()
domain.set_time(2.0)
sww.store_timestep()
self.swwII = sww # so it can be deleted
# print "sww.filename", sww.filename
#Create a csv file
self.csv_file = tempfile.mktemp(".csv")
fd = open(self.csv_file,'w',newline="")
writer = csv.writer(fd)
writer.writerow(['LONGITUDE','LATITUDE',STR_VALUE_LABEL,CONT_VALUE_LABEL,'ROOF_TYPE',WALL_TYPE_LABEL, SHORE_DIST_LABEL])
writer.writerow(['151.5','-34','199770','130000','Metal','Timber',20.])
writer.writerow(['151','-34.5','150000','76000','Metal','Double Brick',200.])
writer.writerow(['151','-34.25','150000','76000','Metal','Brick Veneer',200.])
fd.close()
#Create a csv file
self.csv_fileII = tempfile.mktemp(".csv")
fd = open(self.csv_fileII,'w',newline="")
writer = csv.writer(fd)
writer.writerow(['LONGITUDE','LATITUDE',STR_VALUE_LABEL,CONT_VALUE_LABEL,'ROOF_TYPE',WALL_TYPE_LABEL, SHORE_DIST_LABEL])
writer.writerow(['151.5','-34','199770','130000','Metal','Timber',20.])
writer.writerow(['151','-34.5','150000','76000','Metal','Double Brick',200.])
writer.writerow(['151','-34.25','150000','76000','Metal','Brick Veneer',200.])
fd.close()
def tearDown(self):
#print "***** tearDown ********"
# FIXME (Ole): Sometimes this fails - is the file open or is it sometimes not created?
try:
# Sometimes this fails - don't know why.
# Seems to be that the file is not created, since after it
# fails there are no sww files in the anuga directory
os.remove(self.sww.filename)
os.remove(self.swwII.filename)
except OSError:
pass
os.remove(self.csv_file)
os.remove(self.csv_fileII)
def test_inundation_damage1(self):
# Note, this isn't testing the results,
# just that is all runs
sww_file = self.domain.get_name() + "." + self.domain.format
#print "sww_file",sww_file
inundation_damage(sww_file, self.csv_file, verbose=False)
def test_inundation_damage_list_as_input(self):
# Note, this isn't testing the results,
# just that is all runs
sww_file = self.domain.get_name() + "." + self.domain.format
#print "sww_file",sww_file
inundation_damage(sww_file,
[self.csv_file, self.csv_fileII], verbose=False)
def test_inundation_damage2(self):
# create mesh
mesh_file = tempfile.mktemp(".tsh")
points = [[0.0,0.0],[6.0,0.0],[6.0,6.0],[0.0,6.0]]
m = Mesh()
m.add_vertices(points)
m.auto_segment()
m.generate_mesh(verbose=False)
m.export_mesh_file(mesh_file)
#Create shallow water domain
domain = Domain(mesh_file)
os.remove(mesh_file)
domain.default_order=2
#Set some field values
domain.set_quantity('elevation', elevation_function)
domain.set_quantity('friction', 0.03)
domain.set_quantity('xmomentum', 22.0)
domain.set_quantity('ymomentum', 55.0)
######################
# Boundary conditions
B = Transmissive_boundary(domain)
domain.set_boundary( {'exterior': B})
# This call mangles the stage values.
domain.distribute_to_vertices_and_edges()
domain.set_quantity('stage', 0.3)
#sww_file = tempfile.mktemp("")
domain.set_name('datatest' + str(time.time()))
domain.format = 'sww'
domain.smooth = True
domain.reduction = mean
sww = SWW_file(domain)
sww.store_connectivity()
sww.store_timestep()
domain.set_quantity('stage', -0.3)
domain.set_time(2.)
sww.store_timestep()
#Create a csv file
csv_file = tempfile.mktemp(".csv")
fd = open(csv_file,'w',newline="")
writer = csv.writer(fd)
writer.writerow(['x', 'y', STR_VALUE_LABEL, CONT_VALUE_LABEL, \
'ROOF_TYPE', WALL_TYPE_LABEL, SHORE_DIST_LABEL])
writer.writerow([5.5,0.5,'10','130000','Metal','Timber',20])
writer.writerow([4.5,1.0,'150','76000','Metal','Double Brick',20])
writer.writerow([0.1,1.5,'100','76000','Metal','Brick Veneer',300])
writer.writerow([6.1,1.5,'100','76000','Metal','Brick Veneer',300])
fd.close()
sww_file = domain.get_name() + "." + domain.format
#print "sww_file",sww_file
inundation_damage(sww_file, csv_file, verbose=False)
csv_handle = Exposure(csv_file)
struct_loss = csv_handle.get_column(EventDamageModel.STRUCT_LOSS_TITLE)
#print "struct_loss",struct_loss
struct_loss = [float(x) for x in struct_loss]
assert num.allclose(struct_loss, [10.0, 150.0, 66.553333478768664, 0.0])
depth = csv_handle.get_column(EventDamageModel.MAX_DEPTH_TITLE)
#print "depth",depth
depth = [float(x) for x in depth]
assert num.allclose(depth,[3.00000001192092, 2.9166666785875957, 2.2666666785875957, -0.3])
os.remove(sww.filename)
os.remove(csv_file)
def test_inundation_damage_list(self):
# create mesh
mesh_file = tempfile.mktemp(".tsh")
points = [[0.0,0.0],[6.0,0.0],[6.0,6.0],[0.0,6.0]]
m = Mesh()
m.add_vertices(points)
m.auto_segment()
m.generate_mesh(verbose=False)
m.export_mesh_file(mesh_file)
#Create shallow water domain
domain = Domain(mesh_file)
os.remove(mesh_file)
domain.default_order=2
#Set some field values
domain.set_quantity('elevation', elevation_function)
domain.set_quantity('friction', 0.03)
domain.set_quantity('xmomentum', 22.0)
domain.set_quantity('ymomentum', 55.0)
######################
# Boundary conditions
B = Transmissive_boundary(domain)
domain.set_boundary( {'exterior': B})
# This call mangles the stage values.
domain.distribute_to_vertices_and_edges()
domain.set_quantity('stage', 0.3)
#sww_file = tempfile.mktemp("")
domain.set_name('datatest' + str(time.time()))
domain.format = 'sww'
domain.smooth = True
domain.reduction = mean
sww = SWW_file(domain)
sww.store_connectivity()
sww.store_timestep()
domain.set_quantity('stage', -0.3)
domain.set_time(2.)
sww.store_timestep()
#Create a csv file
csv_file = tempfile.mktemp(".csv")
fd = open(csv_file,'w',newline="")
writer = csv.writer(fd)
writer.writerow(['x','y',STR_VALUE_LABEL,CONT_VALUE_LABEL,'ROOF_TYPE',WALL_TYPE_LABEL, SHORE_DIST_LABEL])
writer.writerow([5.5,0.5,'10','130000','Metal','Timber',20])
writer.writerow([4.5,1.0,'150','76000','Metal','Double Brick',20])
writer.writerow([0.1,1.5,'100','76000','Metal','Brick Veneer',300])
writer.writerow([6.1,1.5,'100','76000','Metal','Brick Veneer',300])
fd.close()
extension = ".csv"
csv_fileII = tempfile.mktemp(extension)
fd = open(csv_fileII,'w',newline="")
writer = csv.writer(fd)
writer.writerow(['x','y',STR_VALUE_LABEL,CONT_VALUE_LABEL,'ROOF_TYPE',WALL_TYPE_LABEL, SHORE_DIST_LABEL])
writer.writerow([5.5,0.5,'10','130000','Metal','Timber',20])
writer.writerow([4.5,1.0,'150','76000','Metal','Double Brick',20])
writer.writerow([0.1,1.5,'100','76000','Metal','Brick Veneer',300])
writer.writerow([6.1,1.5,'100','76000','Metal','Brick Veneer',300])
fd.close()
sww_file = domain.get_name() + "." + domain.format
#print "sww_file",sww_file
marker='_gosh'
inundation_damage(sww_file, [csv_file, csv_fileII],
exposure_file_out_marker=marker,
verbose=False)
# Test one file
csv_handle = Exposure(csv_file[:-4]+marker+extension)
struct_loss = csv_handle.get_column(EventDamageModel.STRUCT_LOSS_TITLE)
#print "struct_loss",struct_loss
struct_loss = [float(x) for x in struct_loss]
#pprint(struct_loss)
assert num.allclose(struct_loss,[10.0, 150.0, 66.55333347876866, 0.0])
depth = csv_handle.get_column(EventDamageModel.MAX_DEPTH_TITLE)
#print "depth",depth
depth = [float(x) for x in depth]
assert num.allclose(depth, [3.000000011920929, 2.9166666785875957, 2.2666666785875957, -0.3])
# Test another file
csv_handle = Exposure(csv_fileII[:-4]+marker+extension)
struct_loss = csv_handle.get_column(EventDamageModel.STRUCT_LOSS_TITLE)
#print "struct_loss",struct_loss
struct_loss = [float(x) for x in struct_loss]
#pprint(struct_loss)
assert num.allclose(struct_loss, [10.0, 150.0, 66.553333478768664, 0.0])
depth = csv_handle.get_column(EventDamageModel.MAX_DEPTH_TITLE)
#print "depth",depth
depth = [float(x) for x in depth]
assert num.allclose(depth,[3.000000011920929, 2.9166666785875957, 2.2666666785875957, -0.3])
os.remove(sww.filename)
os.remove(csv_file)
os.remove(csv_fileII)
def ztest_add_depth_and_momentum2csv(self):
sww_file = self.domain.get_name() + "." + self.domain.format
#print "sww_file",sww_file
out_csv = tempfile.mktemp(".csv")
print("out_csv",out_csv)
add_depth_and_momentum2csv(sww_file, self.csv_file,
out_csv, verbose=False)
def test_calc_damage_percentages(self):
max_depths = [-0.3, 0.0, 1.0,-0.3, 0.0, 1.0,-0.3, 0.0, 1.0]
shore_distances = [100, 100, 100, 100, 100, 100, 100, 100, 100]
walls = ['Double Brick',
'Double Brick',
'Double Brick',
'Timber',
'Timber',
'Timber',
'Brick Veneer',
'Brick Veneer',
'Brick Veneer']
struct_costs = [10,
10,
10,
10,
10,
10,
1,
1,
1]
content_costs = [100,
100,
100,
100,
100,
100,
10,
10,
10]
edm = EventDamageModel(max_depths, shore_distances, walls,
struct_costs, content_costs)
edm.calc_damage_percentages()
assert num.allclose(edm.struct_damage,[0.0,0.016,0.572,
0.0,0.016,0.618,
0.0,0.016,0.618])
assert num.allclose(edm.contents_damage,[0.0,0.013,0.970,
0.0,0.013,0.970,
0.0,0.013,0.970])
edm.calc_cost()
assert num.allclose(edm.struct_loss,[0.0,.16,5.72,
0.0,.16,6.18,
0.0,0.016,0.618])
assert num.allclose(edm.contents_loss,[0.0,1.3,97,
0.0,1.3,97,
0.0,0.13,9.7])
def test_calc_collapse_structures1(self):
edm = EventDamageModel([0.0]*17, [0.0]*17, [0.0]*17,
[0.0]*17, [0.0]*17)
edm.struct_damage = num.zeros(17,float)
edm.contents_damage = num.zeros(17,float)
collapse_probability = {0.4:[0], #0
0.6:[1], #1
0.5:[2], #1
0.25:[3,4], #1
0.1:[5,6,7,8], #0
0.2:[9,10,11,12,13,14,15,16]} #2
assert num.allclose(edm.max_depths, 0.0)
assert num.allclose(edm.shore_distances, 0.0)
assert num.allclose(edm.walls, 0.0)
assert num.allclose(edm.struct_costs, 0.0)
assert num.allclose(edm.content_costs, 0.0)
edm._calc_collapse_structures(collapse_probability, verbose_csv=True)
# Random numbers are not stable between Python2 and Python3 - even with the same seed seed(17, version=1)
# See https://stackoverflow.com/questions/11929701/why-is-seeding-the-random-generator-not-stable-between-versions-of-python
if system_tools.major_version == 2:
assert num.allclose(edm.struct_damage, [0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]), 'Expected %s' % edm.struct_damage
assert num.allclose(edm.contents_damage, [0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]), 'Expected %s' % edm.contents_damage
self.assertTrue(edm.struct_damage[0] == 0.0 and
edm.contents_damage[0] == 0.0,
'Error!')
self.assertTrue(edm.struct_damage[1] == 1.0 and
edm.contents_damage[1] == 1.0,
'Error!')
self.assertTrue(edm.struct_damage[2] == 1.0 and
edm.contents_damage[2] == 1.0,
'Error!')
self.assertTrue(edm.struct_damage[3] + edm.struct_damage[4] == 1.0 and
edm.contents_damage[3] + edm.contents_damage[4] ==1.0,
'Error!')
elif system_tools.major_version == 3:
assert num.allclose(edm.struct_damage, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0]), 'Expected %s' % edm.struct_damage
assert num.allclose(edm.contents_damage, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0]), 'Expected %s' % edm.contents_damage
self.assertTrue(edm.struct_damage[0] == 0.0 and
edm.contents_damage[0] == 0.0,
'Error!')
self.assertTrue(edm.struct_damage[1] == 1.0 and
edm.contents_damage[1] == 1.0,
'Error!')
self.assertTrue(edm.struct_damage[2] == 0.0 and
edm.contents_damage[2] == 0.0,
'Error!')
self.assertTrue(edm.struct_damage[3] + edm.struct_damage[4] == 0 and
edm.contents_damage[3] + edm.contents_damage[4] ==0,
'Error!')
else:
raise Exception('Unknown python version: %s' % system_tools.version)
sum_struct = 0.0
sum_contents = 0.0
for i in [5,6,7,8]:
sum_struct += edm.struct_damage[i]
sum_contents += edm.contents_damage[i]
#print("", end=' ')
self.assertTrue(sum_struct == 0.0 and sum_contents == 0.0,
'Error!')
sum_struct = 0.0
sum_contents = 0.0
for i in [9,10,11,12,13,14,15,16]:
sum_struct += edm.struct_damage[i]
sum_contents += edm.contents_damage[i]
self.assertTrue( sum_struct == 2.0 and sum_contents == 2.0,
'Error!')
def test_calc_collapse_probability(self):
depth = [0.0, 0.5, 0.5 , 1.5, 2.5, 4.5, 10000, 2.0]
shore_distance = [0.0, 125, 250.1, 0.0, 150, 225, 10000, 251]
dummy = depth
edm = EventDamageModel(depth, shore_distance, dummy, dummy, dummy)
struct_coll_prob = edm.calc_collapse_probability()
answer = {0.05:[1,7],
0.6:[3],
0.4:[4],
0.5:[5],
0.45:[6]}
#print "struct_coll_prob",struct_coll_prob
#print "answer",answer
self.assertTrue( struct_coll_prob == answer,
'Error!')
def test_calc_damage_and_costs(self):
max_depths = [-0.3, 0.0, 1.0,-0.3, 0.0, 1.0,-0.3, 0.0, 10.0]
shore_distances = [100, 100, 100, 100, 100, 100, 100, 100, 100]
walls = ['Double Brick',
'Double Brick',
'Double Brick',
'Timber',
'Timber',
'Timber',
'Brick Veneer',
'Brick Veneer',
'Brick Veneer']
struct_costs = [10,
10,
10,
10,
10,
10,
1,
1,
1]
content_costs = [100,
100,
100,
100,
100,
100,
10,
10,
10]
edm = EventDamageModel(max_depths, shore_distances, walls,
struct_costs, content_costs)
results_dic = edm.calc_damage_and_costs(verbose_csv=True)
#print "results_dic",results_dic
def test_calc_max_depth_and_momentum(self):
sww_file = "tid" # self.domain.get_name() + "." + self.domain.format
points_lat_long = [[-34, 151.5],[-35.5, 151.5],[-50, 151]]
spat = Geospatial_data(data_points=points_lat_long,
points_are_lats_longs=True)
points_ab = spat.get_data_points( absolute = True)
deps, _ = calc_max_depth_and_momentum(sww_file,
points_ab,
verbose=self.verbose,
use_cache = False)
# Test values based on returned results, so not an excellent test
assert num.allclose(deps[0],0.113204555211)
assert num.allclose(deps[1],11.3215)
assert num.allclose(deps[2],0.0) # this value is outside both sww files
#-------------------------------------------------------------
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1][0].upper() == 'V':
Test_inundation_damage.verbose=True
saveout = sys.stdout
filename = ".temp_verbose"
fid = open(filename, 'w',newline="")
sys.stdout = fid
else:
pass
suite = unittest.makeSuite(Test_inundation_damage,'test')
runner = unittest.TextTestRunner()
runner.run(suite)
# Cleaning up
if len(sys.argv) > 1 and sys.argv[1][0].upper() == 'V':
sys.stdout = saveout
fid.close()
os.remove(filename)
| 37.641654 | 142 | 0.547315 |
595fc4560c600bc335113e6e337290f1a542385c | 3,531 | py | Python | problems/template-problem.py | beratyenilen/qc-ga | 8c94fe6a89a627fc65a64d3102823c8466f50649 | [
"Apache-2.0"
] | null | null | null | problems/template-problem.py | beratyenilen/qc-ga | 8c94fe6a89a627fc65a64d3102823c8466f50649 | [
"Apache-2.0"
] | 14 | 2021-03-01T11:43:05.000Z | 2021-06-16T17:03:59.000Z | problems/template-problem.py | beratyenilen/qc-ga | 8c94fe6a89a627fc65a64d3102823c8466f50649 | [
"Apache-2.0"
] | null | null | null | # Importing the necessary modules.
import projectq
from projectq.ops import H, X, Y, Z, T, Tdagger, S, Sdagger, CNOT, Measure, All, Rx, Ry, Rz, SqrtX
import numpy as np
import copy
from constants import *
from deap import creator, base, tools
from candidate import Candidate
from constants import *
from evolution import crossoverInd, mutateInd, selectAndEvolve, geneticAlgorithm
# Import additional modules you want to use in here
def desiredState():
'''
This function returns the state vector of the desiredState as list where
ith element is the ith coefficient of the state vector.
'''
return None
def evaluateInd(individual, verbose=False):
'''
This function should take an individual,possibly an instance of Candidate class,
and return a tuple where each element of the tuple is an objective.
An example objective would be (error,circuitLen) where:
error = |1 - < createdState | wantedState >
circuitLen = len(individual.circuit) / MAX_CIRCUIT_LENGTH
MAX_CIRCUIT_LENGTH is the expected circuit length for the problem.
'''
return (None, None)
# Your main function
if __name__ == "__main__":
'''
You should initialize:
numberOfQubits : number of qubits to be used for the problem
allowedGates : allowed set of gates. Default is [Rz,SX,X,CX]
problemName : output of the problem will be stored at ./outputs/problemName.txt
problemDescription : A small header describing the problem.
fitnessWeights : A tuple describing the weight of each objective. A negative
weight means that objective should be minimized, a positive weight means
that objective should be maximized. For example, if you want to represent
your weights as (error,circuitLen) and want to minimize both with equal
weight you can just define fitnessWeights = (-1.0,-1.0). Only the relative
values of the weights have meaning. BEWARE that they are multiplied and summed
up while calculating the total fitness, so you might want to normalize them.
'''
# Initialize your variables
numberOfQubits = 5
allowedGates = [Rz,SqrtX,X,CNOT]
problemName = "template"
problemDescription = "Template Problem\nnumberOfQubits=" + str(numberOfQubits)+"\nallowedGates="+str(allowedGates)+"\n"
fitnessWeights = (-1.0, -1.0)
# Create the type of the individual
creator.create("FitnessMin", base.Fitness, weights=fitnessWeights)
creator.create("Individual", Candidate, fitness=creator.FitnessMin)
# Initialize your toolbox and population
toolbox = base.Toolbox()
toolbox.register("individual", creator.Individual, numberOfQubits=numberOfQubits, allowedGates=allowedGates)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
# Register the necessary functions to the toolbox
toolbox.register("mate", crossoverInd, toolbox=toolbox)
toolbox.register("mutate", mutateInd)
toolbox.register("select", tools.selNSGA2)
toolbox.register("selectAndEvolve", selectAndEvolve)
toolbox.register("evaluate", evaluateInd)
# Get it running
NGEN = 100 # For how many generations should the algorithm run ?
POPSIZE = 1000 # How many individuals should be in the population ?
verbose = False # Do you want functions to print out information.
# Note that they will print out a lot of things.
# Initialize a random population
pop = toolbox.population(n=POPSIZE)
# Run the genetic algorithm
geneticAlgorithm(pop, toolbox, NGEN, problemName, problemDescription, epsilon, verbose=verbose) | 44.696203 | 121 | 0.745681 |
ff07e26a80b7fe412bd0ed9a28b94488072a3691 | 28 | py | Python | python/quicktest.py | elsampsa/darknet-python | 6c62a5934082157154087809d67d0ee43384cc7a | [
"MIT"
] | 10 | 2019-05-10T07:26:56.000Z | 2021-04-22T18:59:12.000Z | python/quicktest.py | elsampsa/darknet-python | 6c62a5934082157154087809d67d0ee43384cc7a | [
"MIT"
] | null | null | null | python/quicktest.py | elsampsa/darknet-python | 6c62a5934082157154087809d67d0ee43384cc7a | [
"MIT"
] | 4 | 2018-11-16T00:55:41.000Z | 2020-09-29T03:44:28.000Z | from darknet.core import *
| 9.333333 | 26 | 0.75 |
80f9204e1fbb385e86dfd99daa1ffd07e231c5ec | 4,725 | py | Python | mac_vendor_lookup.py | Terrarasa/mac_vendor_lookup | 994847656bf44ae90c41e73e6f365172280e2465 | [
"Apache-2.0"
] | null | null | null | mac_vendor_lookup.py | Terrarasa/mac_vendor_lookup | 994847656bf44ae90c41e73e6f365172280e2465 | [
"Apache-2.0"
] | null | null | null | mac_vendor_lookup.py | Terrarasa/mac_vendor_lookup | 994847656bf44ae90c41e73e6f365172280e2465 | [
"Apache-2.0"
] | null | null | null | ##-----------------------------------------------##
# MAC Lookup basic functions
# @author Mike https://github.com/terrarasa
# Based on work by Johann Bauer https://github.com/bauerj
# Based on @project https://github.com/bauerj/mac_vendor_lookup
##-----------------------------------------------##
import os, logging, sys, urllib.request, urllib.error
from datetime import datetime
OUI_URL = "https://standards-oui.ieee.org/oui.txt"
class InvalidMacError(Exception):
pass
class VendorNotFoundError(KeyError):
def __init__(self, mac):
self.mac = mac
def __str__(self):
return f"The vendor for MAC {self.mac} could not be found. " \
f"Either it's not registered or the local list is out of date. Try MacLookup().update_vendors()"
class BaseMacLookup(object):
cache_path = f"{os.path.curdir}{os.path.sep}mac-vendors.txt"
@staticmethod
def sanitise(_mac):
mac = _mac.replace(":", "").replace("-", "").replace(".", "").upper()
try:
int(mac, 16)
except ValueError:
raise InvalidMacError("{} contains unexpected character".format(_mac))
if len(mac) > 12:
raise InvalidMacError("{} is not a valid MAC address (too long)".format(_mac))
return mac
def get_last_updated(self):
vendors_location = self.find_vendors_list()
if vendors_location:
return datetime.fromtimestamp(os.path.getmtime(vendors_location))
def find_vendors_list(self):
possible_locations = [
self.cache_path,
sys.prefix + os.path.sep + "cache" + os.path.sep + "mac-vendors.txt"
]
for location in possible_locations:
if os.path.exists(location):
return location
class MacLookup(BaseMacLookup):
def __init__(self):
self.prefixes = {}
#Download Function
def download_vendors(self):
logging.log(logging.DEBUG, "Downloading MAC vendor list")
# Connect to the IEEE server and obtain MAC vendor list
# Try/except here to handle errors (context manager looks ugly)
try:
response = urllib.request.urlopen(OUI_URL)
#Handle HTTP errors first as it's a subclass of URL error (and otherwise causes everything to be handled as URL errors)
except urllib.error.HTTPError as e:
sys.exit(f"IEEE server returned an error\nUnable to obtain updated MAC vendor list\n{e.code}: {e.reason}")
except urllib.error.URLError as e:
sys.exit("Unable to connect to the server\nCheck your internet connection, firewall, or proxy")
file = response.read().decode("utf-8")
split_request = file.splitlines(False)
with open(self.cache_path, mode='w', encoding="utf-8") as out_file:
""" The OUI file is very long
it contains the Hex and Base16 variants of the MAC identifier along with full addresses of manufacturers
to save on cache space, we cut out the data we need and discard the remainder
Cache file is saved as list of base16:vendor separated by \n
"""
for i in range(len(split_request)):
#Does the line relate to the base16 version of the MAC identifier?
if not split_request[i]:
continue
if split_request[i].find("(base 16)") == 11:
#Split the line into the identifier and vendor
prefix, vendor = split_request[i].split("(base 16)", 1)
#Strip out white space and write to cache file
out_file.write(f"{prefix.strip()}:{vendor.strip()}\n")
else:
continue
#Load prefixes into memory
def load_prefixes(self):
logging.log(logging.DEBUG, "Loading prefixes into memory")
#Does the cache already exist or does it need to be downloaded?
if os.path.isfile(self.cache_path) == False:
self.download_vendors
with open(self.cache_path, mode='r', encoding="utf-8") as f:
split_file = f.read().splitlines(False)
for i in range(len(split_file)):
prefix, vendor = split_file[i].split(":", 1)
self.prefixes[prefix] = vendor
#Lookup a MAC
def lookup(self, mac):
mac = BaseMacLookup.sanitise(mac)
self.load_prefixes()
try:
return self.prefixes[mac[:6]]
except KeyError:
raise VendorNotFoundError(mac) | 40.042373 | 128 | 0.582434 |
15a6ca4381a2d22d3c8f997214722bead4b3c3ca | 27,901 | py | Python | xclim/sdba/utils.py | Ouranosinc/dcvar | 0737c66a36f8969e7a17276990bc7e76f7b410c4 | [
"Apache-2.0"
] | 1 | 2018-08-20T16:36:40.000Z | 2018-08-20T16:36:40.000Z | xclim/sdba/utils.py | Ouranosinc/dcvar | 0737c66a36f8969e7a17276990bc7e76f7b410c4 | [
"Apache-2.0"
] | 3 | 2018-08-23T13:25:47.000Z | 2018-08-23T15:59:45.000Z | xclim/sdba/utils.py | Ouranosinc/hailstorm | 494c850164a9f553eeeba66c6cc90fe398eb2094 | [
"Apache-2.0"
] | null | null | null | # noqa: D205,D400
"""
Statistical Downscaling and Bias Adjustment Utilities
=====================================================
"""
from __future__ import annotations
import itertools
from typing import Callable, Mapping, Union
from warnings import warn
import numpy as np
import xarray as xr
from boltons.funcutils import wraps
from dask import array as dsk
from scipy.interpolate import griddata, interp1d
from scipy.stats import spearmanr
from xclim.core.calendar import _interpolate_doy_calendar # noqa
from xclim.core.utils import ensure_chunk_size
from .base import Grouper, parse_group
from .nbutils import _extrapolate_on_quantiles
MULTIPLICATIVE = "*"
ADDITIVE = "+"
loffsets = {"MS": "14d", "M": "15d", "YS": "181d", "Y": "182d", "QS": "45d", "Q": "46d"}
def _ecdf_1d(x, value):
sx = np.r_[-np.inf, np.sort(x, axis=None)]
return np.searchsorted(sx, value, side="right") / np.sum(~np.isnan(sx))
def map_cdf_1d(x, y, y_value):
"""Return the value in `x` with the same CDF as `y_value` in `y`."""
q = _ecdf_1d(y, y_value)
_func = np.nanquantile
return _func(x, q=q)
def map_cdf(
ds: xr.Dataset,
*,
y_value: xr.DataArray,
dim,
):
"""Return the value in `x` with the same CDF as `y_value` in `y`.
This function is meant to be wrapped in a `Grouper.apply`.
Parameters
----------
ds : xr.Dataset
Variables: x, Values from which to pick,
y, Reference values giving the ranking
y_value : float, array
Value within the support of `y`.
dim : str
Dimension along which to compute quantile.
Returns
-------
array
Quantile of `x` with the same CDF as `y_value` in `y`.
"""
return xr.apply_ufunc(
map_cdf_1d,
ds.x,
ds.y,
input_core_dims=[dim] * 2,
output_core_dims=[["x"]],
vectorize=True,
keep_attrs=True,
kwargs={"y_value": np.atleast_1d(y_value)},
output_dtypes=[ds.x.dtype],
)
def ecdf(x: xr.DataArray, value: float, dim: str = "time") -> xr.DataArray:
"""Return the empirical CDF of a sample at a given value.
Parameters
----------
x : array
Sample.
value : float
The value within the support of `x` for which to compute the CDF value.
dim : str
Dimension name.
Returns
-------
xr.DataArray
Empirical CDF.
"""
return (x <= value).sum(dim) / x.notnull().sum(dim)
def ensure_longest_doy(func: Callable) -> Callable:
"""Ensure that selected day is the longest day of year for x and y dims."""
@wraps(func)
def _ensure_longest_doy(x, y, *args, **kwargs):
if (
hasattr(x, "dims")
and hasattr(y, "dims")
and "dayofyear" in x.dims
and "dayofyear" in y.dims
and x.dayofyear.max() != y.dayofyear.max()
):
warn(
(
"get_correction received inputs defined on different dayofyear ranges. "
"Interpolating to the longest range. Results could be strange."
),
stacklevel=4,
)
if x.dayofyear.max() < y.dayofyear.max():
x = _interpolate_doy_calendar(
x, int(y.dayofyear.max()), int(y.dayofyear.min())
)
else:
y = _interpolate_doy_calendar(
y, int(x.dayofyear.max()), int(x.dayofyear.min())
)
return func(x, y, *args, **kwargs)
return _ensure_longest_doy
@ensure_longest_doy
def get_correction(x: xr.DataArray, y: xr.DataArray, kind: str) -> xr.DataArray:
"""Return the additive or multiplicative correction/adjustment factors."""
with xr.set_options(keep_attrs=True):
if kind == ADDITIVE:
out = y - x
elif kind == MULTIPLICATIVE:
out = y / x
else:
raise ValueError("kind must be + or *.")
if isinstance(out, xr.DataArray):
out.attrs["kind"] = kind
return out
@ensure_longest_doy
def apply_correction(
x: xr.DataArray, factor: xr.DataArray, kind: str | None = None
) -> xr.DataArray:
"""Apply the additive or multiplicative correction/adjustment factors.
If kind is not given, default to the one stored in the "kind" attribute of factor.
"""
kind = kind or factor.get("kind", None)
with xr.set_options(keep_attrs=True):
if kind == ADDITIVE:
out = x + factor
elif kind == MULTIPLICATIVE:
out = x * factor
else:
raise ValueError
return out
def invert(x: xr.DataArray, kind: str | None = None) -> xr.DataArray:
"""Invert a DataArray either additively (-x) or multiplicatively (1/x).
If kind is not given, default to the one stored in the "kind" attribute of x.
"""
kind = kind or x.get("kind", None)
with xr.set_options(keep_attrs=True):
if kind == ADDITIVE:
return -x
if kind == MULTIPLICATIVE:
return 1 / x # type: ignore
raise ValueError
@parse_group
def broadcast(
grouped: xr.DataArray,
x: xr.DataArray,
*,
group: str | Grouper = "time",
interp: str = "nearest",
sel: Mapping[str, xr.DataArray] | None = None,
) -> xr.DataArray:
"""Broadcast a grouped array back to the same shape as a given array.
Parameters
----------
grouped : xr.DataArray
The grouped array to broadcast like `x`.
x : xr.DataArray
The array to broadcast grouped to.
group : Union[str, Grouper]
Grouping information. See :py:class:`xclim.sdba.base.Grouper` for details.
interp : {'nearest', 'linear', 'cubic'}
The interpolation method to use,
sel : Mapping[str, xr.DataArray]
Mapping of grouped coordinates to x coordinates (other than the grouping one).
Returns
-------
xr.DataArray
"""
if sel is None:
sel = {}
if group.prop != "group" and group.prop not in sel:
sel.update({group.prop: group.get_index(x, interp=interp != "nearest")})
if sel:
# Extract the correct mean factor for each time step.
if interp == "nearest": # Interpolate both the time group and the quantile.
grouped = grouped.sel(sel, method="nearest")
else: # Find quantile for nearest time group and quantile.
# For `.interp` we need to explicitly pass the shared dims
# (see pydata/xarray#4463 and Ouranosinc/xclim#449,567)
sel.update(
{dim: x[dim] for dim in set(grouped.dims).intersection(set(x.dims))}
)
if group.prop != "group":
grouped = add_cyclic_bounds(grouped, group.prop, cyclic_coords=False)
if interp == "cubic" and len(sel.keys()) > 1:
interp = "linear"
warn(
"Broadcasting operations in multiple dimensions can only be done with linear and nearest-neighbor"
" interpolation, not cubic. Using linear."
)
grouped = grouped.interp(sel, method=interp).astype(grouped.dtype)
for var in sel.keys():
if var in grouped.coords and var not in grouped.dims:
grouped = grouped.drop_vars(var)
if group.prop == "group" and "group" in grouped.dims:
grouped = grouped.squeeze("group", drop=True)
return grouped
def equally_spaced_nodes(n: int, eps: float | None = None) -> np.array:
"""Return nodes with `n` equally spaced points within [0, 1], optionally adding two end-points.
Parameters
----------
n : int
Number of equally spaced nodes.
eps : float, optional
Distance from 0 and 1 of added end nodes. If None (default), do not add endpoints.
Returns
-------
np.array
Nodes between 0 and 1. Nodes can be seen as the middle points of `n` equal bins.
Warnings
--------
Passing a small `eps` will effectively clip the scenario to the bounds of the reference
on the historical period in most cases. With normal quantile mapping algorithms, this can
give strange result when the reference does not show as many extremes as the simulation does.
Notes
-----
For n=4, eps=0 : 0---x------x------x------x---1
"""
dq = 1 / n / 2
q = np.linspace(dq, 1 - dq, n)
if eps is None:
return q
return np.insert(np.append(q, 1 - eps), 0, eps)
def add_cyclic_bounds(
da: xr.DataArray, att: str, cyclic_coords: bool = True
) -> xr.DataArray | xr.Dataset:
"""Reindex an array to include the last slice at the beginning and the first at the end.
This is done to allow interpolation near the end-points.
Parameters
----------
da : Union[xr.DataArray, xr.Dataset]
An array
att : str
The name of the coordinate to make cyclic
cyclic_coords : bool
If True, the coordinates are made cyclic as well,
if False, the new values are guessed using the same step as their neighbour.
Returns
-------
Union[xr.DataArray, xr.Dataset]
da but with the last element along att prepended and the last one appended.
"""
qmf = da.pad({att: (1, 1)}, mode="wrap")
if not cyclic_coords:
vals = qmf.coords[att].values
diff = da.coords[att].diff(att)
vals[0] = vals[1] - diff[0]
vals[-1] = vals[-2] + diff[-1]
qmf = qmf.assign_coords({att: vals})
qmf[att].attrs.update(da.coords[att].attrs)
return ensure_chunk_size(qmf, **{att: -1})
def _interp_on_quantiles_1D(newx, oldx, oldy, method, extrap):
mask_new = np.isnan(newx)
mask_old = np.isnan(oldy) | np.isnan(oldx)
out = np.full_like(newx, np.NaN, dtype=f"float{oldy.dtype.itemsize * 8}")
if np.all(mask_new) or np.all(mask_old):
warn(
"All-NaN slice encountered in interp_on_quantiles",
category=RuntimeWarning,
)
return out
if extrap == "constant":
fill_value = (
oldy[~np.isnan(oldy)][0],
oldy[~np.isnan(oldy)][-1],
)
else: # extrap == 'nan'
fill_value = np.NaN
out[~mask_new] = interp1d(
oldx[~mask_old],
oldy[~mask_old],
kind=method,
bounds_error=False,
fill_value=fill_value,
)(newx[~mask_new])
return out
def _interp_on_quantiles_2D(newx, newg, oldx, oldy, oldg, method, extrap): # noqa
mask_new = np.isnan(newx) | np.isnan(newg)
mask_old = np.isnan(oldy) | np.isnan(oldx) | np.isnan(oldg)
out = np.full_like(newx, np.NaN, dtype=f"float{oldy.dtype.itemsize * 8}")
if np.all(mask_new) or np.all(mask_old):
warn(
"All-NaN slice encountered in interp_on_quantiles",
category=RuntimeWarning,
)
return out
out[~mask_new] = griddata(
(oldx[~mask_old], oldg[~mask_old]),
oldy[~mask_old],
(newx[~mask_new], newg[~mask_new]),
method=method,
)
if method == "nearest" or extrap != "nan":
# 'nan' extrapolation implicit for cubic and linear interpolation.
out = _extrapolate_on_quantiles(out, oldx, oldg, oldy, newx, newg, extrap)
return out
@parse_group
def interp_on_quantiles(
newx: xr.DataArray,
xq: xr.DataArray,
yq: xr.DataArray,
*,
group: str | Grouper = "time",
method: str = "linear",
extrapolation: str = "constant",
):
"""Interpolate values of yq on new values of x.
Interpolate in 2D with :py:func:`~scipy.interpolate.griddata` if grouping is used,
in 1D otherwise, with :py:class:`~scipy.interpolate.interp1d`. Any NaNs in xq
or yq are removed from the input map. Similarly, NaNs in newx are left NaNs.
Parameters
----------
newx : xr.DataArray
The values at which to evaluate `yq`. If `group` has group information,
`new` should have a coordinate with the same name as the group name
In that case, 2D interpolation is used.
xq, yq : xr.DataArray
Coordinates and values on which to interpolate. The interpolation is done
along the "quantiles" dimension if `group` has no group information.
If it does, interpolation is done in 2D on "quantiles" and on the group dimension.
group : Union[str, Grouper]
The dimension and grouping information. (ex: "time" or "time.month").
Defaults to "time".
method : {'nearest', 'linear', 'cubic'}
The interpolation method.
extrapolation : {'constant', 'nan'}
The extrapolation method used for values of `newx` outside the range of `xq`.
See notes.
Notes
-----
Extrapolation methods:
- 'nan' : Any value of `newx` outside the range of `xq` is set to NaN.
- 'constant' : Values of `newx` smaller than the minimum of `xq` are set to the first
value of `yq` and those larger than the maximum, set to the last one (first and
last non-nan values along the "quantiles" dimension). When the grouping is "time.month",
these limits are linearly interpolated along the month dimension.
"""
dim = group.dim
prop = group.prop
if prop == "group":
if "group" in xq.dims:
xq = xq.squeeze("group", drop=True)
if "group" in yq.dims:
yq = yq.squeeze("group", drop=True)
out = xr.apply_ufunc(
_interp_on_quantiles_1D,
newx,
xq,
yq,
kwargs={"method": method, "extrap": extrapolation},
input_core_dims=[[dim], ["quantiles"], ["quantiles"]],
output_core_dims=[[dim]],
vectorize=True,
dask="parallelized",
output_dtypes=[yq.dtype],
)
return out
# else:
if prop not in xq.dims:
xq = xq.expand_dims({prop: group.get_coordinate()})
if prop not in yq.dims:
yq = yq.expand_dims({prop: group.get_coordinate()})
xq = add_cyclic_bounds(xq, prop, cyclic_coords=False)
yq = add_cyclic_bounds(yq, prop, cyclic_coords=False)
newg = group.get_index(newx, interp=method != "nearest")
oldg = xq[prop].expand_dims(quantiles=xq.coords["quantiles"])
return xr.apply_ufunc(
_interp_on_quantiles_2D,
newx,
newg,
xq,
yq,
oldg,
kwargs={"method": method, "extrap": extrapolation},
input_core_dims=[
[dim],
[dim],
[prop, "quantiles"],
[prop, "quantiles"],
[prop, "quantiles"],
],
output_core_dims=[[dim]],
vectorize=True,
dask="parallelized",
output_dtypes=[yq.dtype],
)
# TODO is this useless?
def rank(da: xr.DataArray, dim: str = "time", pct: bool = False) -> xr.DataArray:
"""Ranks data along a dimension.
Replicates `xr.DataArray.rank` but as a function usable in a Grouper.apply().
Xarray's docstring is below:
Equal values are assigned a rank that is the average of the ranks that
would have been otherwise assigned to all the values within that
set. Ranks begin at 1, not 0. If pct, computes percentage ranks.
Parameters
----------
da: xr.DataArray
Source array.
dim : str, hashable
Dimension over which to compute rank.
pct : bool, optional
If True, compute percentage ranks, otherwise compute integer ranks.
Returns
-------
DataArray
DataArray with the same coordinates and dtype 'float64'.
Notes
-----
The `bottleneck` library is required.
NaNs in the input array are returned as NaNs.
"""
return da.rank(dim, pct=pct)
def pc_matrix(arr: np.ndarray | dsk.Array) -> np.ndarray | dsk.Array:
"""Construct a Principal Component matrix.
This matrix can be used to transform points in arr to principal components
coordinates. Note that this function does not manage NaNs; if a single observation is null, all elements
of the transformation matrix involving that variable will be NaN.
Parameters
----------
arr : numpy.ndarray or dask.array.Array
2D array (M, N) of the M coordinates of N points.
Returns
-------
numpy.ndarray or dask.array.Array
MxM Array of the same type as arr.
"""
# Get appropriate math module
mod = dsk if isinstance(arr, dsk.Array) else np
# Covariance matrix
cov = mod.cov(arr)
# Get eigenvalues and eigenvectors
# There are no such method yet in dask, but we are lucky:
# the SVD decomposition of a symmetric matrix gives the eigen stuff.
# And covariance matrices are by definition symmetric!
# Numpy has a hermitian=True option to accelerate, but not dask...
kwargs = {} if mod is dsk else {"hermitian": True}
eig_vec, eig_vals, _ = mod.linalg.svd(cov, **kwargs)
# The PC matrix is the eigen vectors matrix scaled by the square root of the eigen values
return eig_vec * mod.sqrt(eig_vals)
def best_pc_orientation_simple(
R: np.ndarray, Hinv: np.ndarray, val: float = 1000
) -> np.ndarray:
"""Return best orientation vector according to a simple test.
Eigenvectors returned by `pc_matrix` do not have a defined orientation.
Given an inverse transform Hinv and a transform R, this returns the orientation
minimizing the projected distance for a test point far from the origin.
This trick is inspired by the one exposed in [hnilica2017]_.
For each possible orientation vector, the test point is reprojected
and the distance from the original point is computed. The orientation
minimizing that distance is chosen.
See documentation of `sdba.adjustment.PrincipalComponentAdjustment`.
Parameters
----------
R : np.ndarray
MxM Matrix defining the final transformation.
Hinv : np.ndarray
MxM Matrix defining the (inverse) first transformation.
val : float
The coordinate of the test point (same for all axes). It should be much
greater than the largest furthest point in the array used to define B.
Returns
-------
np.ndarray
Mx1 vector of orientation correction (1 or -1).
References
----------
.. [hnilica2017] Hnilica, J., Hanel, M. and Pš, V. (2017), Multisite bias correction of precipitation data from regional climate models. Int. J. Climatol., 37: 2934-2946. https://doi.org/10.1002/joc.4890
"""
m = R.shape[0]
P = np.diag(val * np.ones(m))
signes = dict(itertools.zip_longest(itertools.product(*[[1, -1]] * m), [None]))
for orient in list(signes.keys()):
# Compute new error
signes[orient] = np.linalg.norm(P - ((orient * R) @ Hinv) @ P)
return np.array(min(signes, key=lambda o: signes[o]))
def best_pc_orientation_full(
R: np.ndarray,
Hinv: np.ndarray,
Rmean: np.ndarray,
Hmean: np.ndarray,
hist: np.ndarray,
) -> np.ndarray:
"""Return best orientation vector for A according to the method of Alavoine et al. (2021, preprint).
Eigenvectors returned by `pc_matrix` do not have a defined orientation.
Given an inverse transform Hinv, a transform R, the actual and target origins
Hmean and Rmean and the matrix of training observations hist, this computes
a scenario for all possible orientations and return the orientation that
maximizes the Spearman correlation coefficient of all variables. The
correlation is computed for each variable individually, then averaged.
This trick is explained in [alavoine2021]_.
See documentation of :py:func:`sdba.adjustment.PrincipalComponentAdjustment`.
Parameters
----------
R : np.ndarray
MxM Matrix defining the final transformation.
Hinv : np.ndarray
MxM Matrix defining the (inverse) first transformation.
Rmean : np.ndarray
M vector defining the target distribution center point.
Hmean : np.ndarray
M vector defining the original distribution center point.
hist : np.ndarray
MxN matrix of all training observations of the M variables/sites.
Returns
-------
np.ndarray
M vector of orientation correction (1 or -1).
References
----------
.. [alavoine2021] Alavoine, M., & Grenier, P. (2021). The distinct problems of physical inconsistency and of multivariate bias potentially involved in the statistical adjustment of climate simulations. https://eartharxiv.org/repository/view/2876/
"""
# All possible orientation vectors
m = R.shape[0]
signes = dict(itertools.zip_longest(itertools.product(*[[1, -1]] * m), [None]))
for orient in list(signes.keys()):
# Calculate scen for hist
scen = np.atleast_2d(Rmean).T + ((orient * R) @ Hinv) @ (
hist - np.atleast_2d(Hmean).T
)
# Correlation for each variable
corr = [spearmanr(hist[i, :], scen[i, :])[0] for i in range(hist.shape[0])]
# Store mean correlation
signes[orient] = np.mean(corr)
# Return orientation that maximizes the correlation
return np.array(max(signes, key=lambda o: signes[o]))
def get_clusters_1d(
data: np.ndarray, u1: float, u2: float
) -> tuple[np.array, np.array, np.array, np.array]:
"""Get clusters of a 1D array.
A cluster is defined as a sequence of values larger than u2 with at least one value larger than u1.
Parameters
----------
data: 1D ndarray
Values to get clusters from.
u1 : float
Extreme value threshold, at least one value in the cluster must exceed this.
u2 : float
Cluster threshold, values above this can be part of a cluster.
Returns
-------
(np.array, np.array, np.array, np.array)
References
----------
`getcluster` of Extremes.jl (read on 2021-04-20) https://github.com/jojal5/Extremes.jl
"""
# Boolean array, True where data is over u2
# We pad with values under u2, so that clusters never start or end at boundaries.
exce = np.concatenate(([u2 - 1], data, [u2 - 1])) > u2
# 1 just before the start of the cluster
# -1 on the last element of the cluster
bounds = np.diff(exce.astype(np.int32))
# We add 1 to get the first element and sub 1 to get the same index as in data
starts = np.where(bounds == 1)[0]
# We sub 1 to get the same index as in data and add 1 to get the element after (for python slicing)
ends = np.where(bounds == -1)[0]
cl_maxpos = []
cl_maxval = []
cl_start = []
cl_end = []
for start, end in zip(starts, ends):
cluster_max = data[start:end].max()
if cluster_max > u1:
cl_maxval.append(cluster_max)
cl_maxpos.append(start + np.argmax(data[start:end]))
cl_start.append(start)
cl_end.append(end - 1)
return (
np.array(cl_start),
np.array(cl_end),
np.array(cl_maxpos),
np.array(cl_maxval),
)
def get_clusters(data: xr.DataArray, u1, u2, dim: str = "time") -> xr.Dataset:
"""Get cluster count, maximum and position along a given dim.
See `get_clusters_1d`. Used by `adjustment.ExtremeValues`.
Parameters
----------
data: 1D ndarray
Values to get clusters from.
u1 : float
Extreme value threshold, at least one value in the cluster must exceed this.
u2 : float
Cluster threshold, values above this can be part of a cluster.
dim : str
Dimension name.
Returns
-------
xr.Dataset
With variables,
- `nclusters` : Number of clusters for each point (with `dim` reduced), int
- `start` : First index in the cluster (`dim` reduced, new `cluster`), int
- `end` : Last index in the cluster, inclusive (`dim` reduced, new `cluster`), int
- `maxpos` : Index of the maximal value within the cluster (`dim` reduced, new `cluster`), int
- `maximum` : Maximal value within the cluster (`dim` reduced, new `cluster`), same dtype as data.
For `start`, `end` and `maxpos`, -1 means NaN and should always correspond to a `NaN` in `maximum`.
The length along `cluster` is half the size of "dim", the maximal theoretical number of clusters.
"""
def _get_clusters(arr, u1, u2, N):
st, ed, mp, mv = get_clusters_1d(arr, u1, u2)
count = len(st)
pad = [-1] * (N - count)
return (
np.append(st, pad),
np.append(ed, pad),
np.append(mp, pad),
np.append(mv, [np.NaN] * (N - count)),
count,
)
# The largest possible number of clusters. Ex: odd positions are < u2, even positions are > u1.
N = data[dim].size // 2
starts, ends, maxpos, maxval, nclusters = xr.apply_ufunc(
_get_clusters,
data,
u1,
u2,
input_core_dims=[[dim], [], []],
output_core_dims=[["cluster"], ["cluster"], ["cluster"], ["cluster"], []],
kwargs={"N": N},
dask="parallelized",
vectorize=True,
dask_gufunc_kwargs={
"meta": (
np.array((), dtype=int),
np.array((), dtype=int),
np.array((), dtype=int),
np.array((), dtype=data.dtype),
np.array((), dtype=int),
),
"output_sizes": {"cluster": N},
},
)
ds = xr.Dataset(
{
"start": starts,
"end": ends,
"maxpos": maxpos,
"maximum": maxval,
"nclusters": nclusters,
}
)
return ds
def rand_rot_matrix(
crd: xr.DataArray, num: int = 1, new_dim: str | None = None
) -> xr.DataArray:
r"""Generate random rotation matrices.
Rotation matrices are members of the SO(n) group, where n is the matrix size (`crd.size`).
They can be characterized as orthogonal matrices with determinant 1. A square matrix :math:`R`
is a rotation matrix if and only if :math:`R^t = R^{−1}` and :math:`\mathrm{det} R = 1`.
Parameters
----------
crd: xr.DataArray
1D coordinate DataArray along which the rotation occurs.
The output will be square with the same coordinate replicated,
the second renamed to `new_dim`.
num : int
If larger than 1 (default), the number of matrices to generate, stacked along a "matrices" dimension.
new_dim : str
Name of the new "prime" dimension, defaults to the same name as `crd` + "_prime".
Returns
-------
xr.DataArray
float, NxN if num = 1, numxNxN otherwise, where N is the length of crd.
References
----------
Mezzadri, F. (2006). How to generate random matrices from the classical compact groups. arXiv preprint math-ph/0609050.
"""
if num > 1:
return xr.concat([rand_rot_matrix(crd, num=1) for i in range(num)], "matrices")
N = crd.size
dim = crd.dims[0]
# Rename and rebuild second coordinate : "prime" axis.
if new_dim is None:
new_dim = dim + "_prime"
crd2 = xr.DataArray(crd.values, dims=new_dim, name=new_dim, attrs=crd.attrs)
# Random floats from the standardized normal distribution
Z = np.random.standard_normal((N, N))
# QR decomposition and manipulation from Mezzadri 2006
Q, R = np.linalg.qr(Z)
num = np.diag(R)
denum = np.abs(num)
lam = np.diag(num / denum) # "lambda"
return xr.DataArray(
Q @ lam, dims=(dim, new_dim), coords={dim: crd, new_dim: crd2}
).astype("float32")
def copy_all_attrs(ds: xr.Dataset | xr.DataArray, ref: xr.Dataset | xr.DataArray):
"""Copy all attributes of ds to ref, including attributes of shared coordinates, and variables in the case of Datasets."""
ds.attrs.update(ref.attrs)
extras = ds.variables if isinstance(ds, xr.Dataset) else ds.coords
others = ref.variables if isinstance(ref, xr.Dataset) else ref.coords
for name, var in extras.items():
if name in others:
var.attrs.update(ref[name].attrs)
| 33.69686 | 250 | 0.617505 |
f9df2aa2f332a0c987dfd2e2421bdf8b81331a76 | 529 | py | Python | tests/check_behaviour_with_minimal_settings.py | pakal/django-compat-patcher | 62c1a766807f2be11b03ea481fbb4c9f9e6529ba | [
"MIT"
] | 12 | 2017-05-21T10:52:45.000Z | 2022-03-04T09:52:58.000Z | tests/check_behaviour_with_minimal_settings.py | pakal/django-compat-patcher | 62c1a766807f2be11b03ea481fbb4c9f9e6529ba | [
"MIT"
] | 18 | 2019-04-18T12:42:18.000Z | 2022-02-23T09:38:45.000Z | tests/check_behaviour_with_minimal_settings.py | pakal/django-compat-patcher | 62c1a766807f2be11b03ea481fbb4c9f9e6529ba | [
"MIT"
] | 2 | 2019-05-07T20:28:25.000Z | 2022-03-03T22:13:15.000Z | from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
os.environ["DJANGO_SETTINGS_MODULE"] = "test_project.minimal_settings"
import django
import django_compat_patcher
django_compat_patcher.patch() # fixers which can't apply should be skipped, not crash
django.setup() # idempotent
from django.conf import settings
assert settings.INSTALLED_APPS == [] # we ensure that we haven't mixed django confs
| 26.45 | 87 | 0.773157 |
ccbc3a0e09f687d0e2efd3a61e621a0cf259062c | 690 | py | Python | api/services/q1.py | kazishuvo22/Health-Data-Analytics | d93b863429ee360f58d24dffcac21ebfed29d36c | [
"Apache-2.0"
] | null | null | null | api/services/q1.py | kazishuvo22/Health-Data-Analytics | d93b863429ee360f58d24dffcac21ebfed29d36c | [
"Apache-2.0"
] | null | null | null | api/services/q1.py | kazishuvo22/Health-Data-Analytics | d93b863429ee360f58d24dffcac21ebfed29d36c | [
"Apache-2.0"
] | null | null | null | from flask import jsonify
from flask.views import MethodView
from api.querycontroller.q1 import Query1
class Query1API(MethodView):
def __init__(self):
self.q1 = Query1()
def get(self):
'''
Get the data of querycontroller 1
:return: [{
‘division’: “Dhaka”,
'total_sales’: 1000
},
{
‘division’: “Rangpur”,
‘total_sales’: 1000
},....
]
'''
result = self.q1.execute() ## Dataframe
# print(jsonify(result))
return jsonify(result)
# def post(self):
# def delete(self):
| 23 | 48 | 0.478261 |
e07a34a79802da2a53dd138d8316c6a3f4be0fe1 | 235 | py | Python | design_patterns/creational/object_pool/resource_module.py | gsingh79/obi_explorations | bcae6bb4c691b8806fcbb2bc35ea1bac604f7dd9 | [
"Apache-2.0"
] | null | null | null | design_patterns/creational/object_pool/resource_module.py | gsingh79/obi_explorations | bcae6bb4c691b8806fcbb2bc35ea1bac604f7dd9 | [
"Apache-2.0"
] | null | null | null | design_patterns/creational/object_pool/resource_module.py | gsingh79/obi_explorations | bcae6bb4c691b8806fcbb2bc35ea1bac604f7dd9 | [
"Apache-2.0"
] | 1 | 2021-11-15T22:32:40.000Z | 2021-11-15T22:32:40.000Z | class ResourceModule:
_value = 0
def get_value(self):
return self._value
def set_obj_value(self, number):
print('Number', number)
self._value = number
def reset(self):
self._value = 0 | 18.076923 | 36 | 0.595745 |
691f697295c66262fddb35c30e2cece72bfddbd6 | 4,227 | py | Python | flask_resources/resources.py | ppanero/flask-resources | f0b99f7ceb31054ac3c4b1da705e9a009476996c | [
"MIT"
] | null | null | null | flask_resources/resources.py | ppanero/flask-resources | f0b99f7ceb31054ac3c4b1da705e9a009476996c | [
"MIT"
] | null | null | null | flask_resources/resources.py | ppanero/flask-resources | f0b99f7ceb31054ac3c4b1da705e9a009476996c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
# Copyright (C) 2020 Northwestern University.
#
# Flask-Resources is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Resource view."""
from flask import Blueprint
from marshmallow import ValidationError
from .deserializers import JSONDeserializer
from .errors import create_errormap_handler
from .loaders import RequestLoader
from .parsers import HeadersParser, URLArgsParser
from .responses import Response
from .serializers import JSONSerializer
from .views import ItemView, ListView, SingletonView
ITEM_VIEW_SUFFIX = "_item"
LIST_VIEW_SUFFIX = "_list"
class ResourceConfig:
"""Base resource configuration."""
request_loaders = {
"application/json": RequestLoader(deserializer=JSONDeserializer())
}
response_handlers = {"application/json": Response(JSONSerializer())}
item_route = "/resources/<id>"
list_route = "/resources/"
request_url_args_parser = URLArgsParser()
request_headers_parser = HeadersParser()
default_content_type = "application/json"
default_accept_mimetype = "application/json"
class Resource:
"""Resource controller interface."""
default_config = ResourceConfig
def __init__(self, config=None):
"""Initialize the base resource."""
# TODO: The config should be checked to see that it is consistent. See issue #57
self.config = config or self.default_config
self.bp_name = None
# Primary interface
def search(self):
"""Perform a search over the items."""
return [], 200
def create(self):
"""Create an item."""
return {}, 200
def read(self):
"""Read an item."""
return {}, 200
def update(self):
"""Update an item."""
return {}, 200
def partial_update(self):
"""Partial update an item."""
return {}, 200
def delete(self):
"""Delete an item."""
return {}, 200
# Secondary interface
def as_blueprint(self, name, **bp_kwargs):
"""Create blueprint and register rules only for the RecordResource."""
self.bp_name = name
blueprint = Blueprint(name, __name__, **bp_kwargs)
for rule in self.create_url_rules(name):
blueprint.add_url_rule(**rule)
for exc_or_code, error_handler in self.create_errormap_handlers():
blueprint.register_error_handler(exc_or_code, error_handler)
return blueprint
def create_url_rules(self, bp_name):
"""Create url rules."""
return [
{
"rule": self.config.item_route,
"view_func": ItemView.as_view(
name=f"{bp_name}",
resource=self,
),
}
]
def create_errormap_handlers(self):
"""Create error handlers."""
error_map = getattr(self.config, "error_map", {})
return error_map.items()
class CollectionResource(Resource):
"""CollectionResource."""
def update_all(self):
"""Delete an item."""
return [], 200
def delete_all(self):
"""Delete an item."""
return [], 200
def create_url_rules(self, bp_name):
"""Create url rules."""
return [
{
"rule": self.config.item_route,
"view_func": ItemView.as_view(
name=f"{bp_name}{ITEM_VIEW_SUFFIX}",
resource=self,
),
},
{
"rule": self.config.list_route,
"view_func": ListView.as_view(
name=f"{bp_name}{LIST_VIEW_SUFFIX}",
resource=self,
),
},
]
class SingletonResource(Resource):
"""SingletonResource."""
def create_url_rules(self, bp_name):
"""Create url rules."""
return [
{
"rule": self.config.list_route,
"view_func": SingletonView.as_view(
name=f"{bp_name}",
resource=self,
),
}
]
| 27.448052 | 88 | 0.585758 |
31374d5208258f5bd270f1286029fd440dbcda8d | 7,179 | py | Python | nntoolbox/sequence/components/cells/multiplicative.py | nhatsmrt/nn-toolbox | 689b9924d3c88a433f8f350b89c13a878ac7d7c3 | [
"Apache-2.0"
] | 16 | 2019-07-11T15:57:41.000Z | 2020-09-08T13:52:45.000Z | nntoolbox/sequence/components/cells/multiplicative.py | nhatsmrt/nn-toolbox | 689b9924d3c88a433f8f350b89c13a878ac7d7c3 | [
"Apache-2.0"
] | 1 | 2022-01-18T22:21:57.000Z | 2022-01-18T22:21:57.000Z | nntoolbox/sequence/components/cells/multiplicative.py | nhatsmrt/nn-toolbox | 689b9924d3c88a433f8f350b89c13a878ac7d7c3 | [
"Apache-2.0"
] | 1 | 2019-08-07T10:07:09.000Z | 2019-08-07T10:07:09.000Z | import torch
from torch import nn, Tensor
from typing import Tuple, Optional
from ....init import sqrt_uniform_init
from torch import jit
__all__ = ['MultiplicativeRNNCell', 'MILSTMCell', 'MIGRUCell']
class MultiplicativeRNNCell(jit.ScriptModule):
"""
Multiplicative RNN. Allowing input to change the hidden state easier by introducing multiplicative interaction:
m_t = (W_mx x_t) * (W_mh h_{t-1})
h_t = tanh(W_hm m_t + W_hx x_t)
Note that the implementation is based on the re-formulation from the second reference.
References:
Ilya Sutskever, James Martens, and Geoffrey Hinton. "Generating Text with Recurrent Neural Networks."
https://www.cs.utoronto.ca/~ilya/pubs/2011/LANG-RNN.pdf
Ben Krause, Iain Murray, Steve Renals, and Liang Lu. "MULTIPLICATIVE LSTM FOR SEQUENCE MODELLING."
https://arxiv.org/pdf/1609.07959.pdf
"""
__constants__ = ['intermediate_size', 'hidden_size', 'bias']
def __init__(self, input_size: int, hidden_size: int, intermediate_size: int, bias: bool=True):
super().__init__()
self.hidden_size, self.intermediate_size, self.bias = hidden_size, intermediate_size, bias
self.weight_i = nn.Parameter(torch.rand(intermediate_size + hidden_size, input_size))
self.weight_h = nn.Parameter(torch.rand(intermediate_size, hidden_size))
self.weight_m = nn.Parameter(torch.rand(hidden_size, intermediate_size))
if bias:
self.bias_i = nn.Parameter(torch.rand(intermediate_size + hidden_size))
self.bias_h = nn.Parameter(torch.rand(intermediate_size))
self.bias_op = nn.Parameter(torch.rand(hidden_size))
sqrt_uniform_init(self)
@jit.script_method
def forward(self, input: Tensor, state: Optional[Tensor]=None) -> Tensor:
if state is None: state = torch.zeros((input.shape[0], self.hidden_size)).to(input.device).to(input.dtype)
input_trans = torch.matmul(input, self.weight_i.t())
if self.bias: input_trans += self.bias_i
intermediate = input_trans[:, :self.intermediate_size] * torch.matmul(state, self.weight_h.t())
if self.bias: intermediate += self.bias_h
op = torch.matmul(intermediate, self.weight_m.t()) + input_trans[:, self.intermediate_size:]
if self.bias: op += self.bias_op
return torch.tanh(op)
class MILSTMCell(jit.ScriptModule):
"""
Multiplicative Integration LSTM Cell
References:
Yuhuai Wu, Saizheng Zhang, Ying Zhang, Yoshua Bengio, Ruslan Salakhutdinov.
"On Multiplicative Integration with Recurrent Neural Networks."
https://arxiv.org/abs/1606.06630
The PyTorch Team. "Optimizing CUDA Recurrent Neural Networks with TorchScript."
https://pytorch.org/blog/optimizing-cuda-rnn-with-torchscript/
"""
__constants__ = ['hidden_size']
def __init__(self, input_size: int, hidden_size: int):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.weight_ih = nn.Parameter(torch.randn(4 * hidden_size, input_size))
self.weight_hh = nn.Parameter(torch.randn(4 * hidden_size, hidden_size))
self.bias_ih = nn.Parameter(torch.randn(4 * hidden_size))
self.bias_hh = nn.Parameter(torch.randn(4 * hidden_size))
self.bias_mult = nn.Parameter(torch.randn(4 * hidden_size))
self.bias_ind = nn.Parameter(torch.randn(4 * hidden_size))
sqrt_uniform_init(self)
@jit.script_method
def forward(
self, input: Tensor, state: Optional[Tuple[Tensor, Tensor]]=None
) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
if state is None: state = (
torch.zeros((input.shape[0], self.hidden_size)).to(input.device).to(input.dtype),
torch.zeros((input.shape[0], self.hidden_size)).to(input.device).to(input.dtype)
)
hx, cx = state
input_transform = torch.mm(input, self.weight_ih.t())
hidden_transform = torch.mm(hx, self.weight_hh.t())
gates = (
input_transform * hidden_transform * self.bias_mult
+ input_transform * self.bias_ih
+ hidden_transform * self.bias_hh
+ self.bias_ind
)
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate, forgetgate, cellgate, outgate = \
torch.sigmoid(ingate), torch.sigmoid(forgetgate), torch.tanh(cellgate), torch.sigmoid(outgate)
cy = forgetgate * cx + ingate * cellgate
hy = outgate * torch.tanh(cy)
return hy, (hy, cy)
class MIGRUCell(jit.ScriptModule):
"""
Multiplicative Integration GRU Cell
References:
Yuhuai Wu, Saizheng Zhang, Ying Zhang, Yoshua Bengio, Ruslan Salakhutdinov.
"On Multiplicative Integration with Recurrent Neural Networks."
https://arxiv.org/abs/1606.06630
The PyTorch Team. "Optimizing CUDA Recurrent Neural Networks with TorchScript."
https://pytorch.org/blog/optimizing-cuda-rnn-with-torchscript/
"""
__constants__ = ['hidden_size']
def __init__(self, input_size: int, hidden_size: int):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.weight_ig = nn.Parameter(torch.randn(2 * hidden_size, input_size))
self.weight_hg = nn.Parameter(torch.randn(2 * hidden_size, hidden_size))
self.bias_ig = nn.Parameter(torch.randn(2 * hidden_size))
self.bias_hg = nn.Parameter(torch.randn(2 * hidden_size))
self.bias_mult_g = nn.Parameter(torch.randn(2 * hidden_size))
self.bias_ind_g = nn.Parameter(torch.randn(2 * hidden_size))
self.weight_ic = nn.Parameter(torch.randn(hidden_size, input_size))
self.weight_hc = nn.Parameter(torch.randn(hidden_size, hidden_size))
self.bias_ic = nn.Parameter(torch.randn(hidden_size))
self.bias_hc = nn.Parameter(torch.randn(hidden_size))
self.bias_mult_c = nn.Parameter(torch.randn(hidden_size))
self.bias_ind_c = nn.Parameter(torch.randn(hidden_size))
sqrt_uniform_init(self)
@jit.script_method
def forward(self, input: Tensor, hx: Optional[Tensor]=None) -> Tensor:
if hx is None: hx = torch.zeros((input.shape[0], self.hidden_size)).to(input.device).to(input.dtype)
input_transform = torch.mm(input, self.weight_ig.t())
hidden_transform = torch.mm(hx, self.weight_hg.t())
gates = (
input_transform * hidden_transform * self.bias_mult_g
+ input_transform * self.bias_ig
+ hidden_transform * self.bias_hg
+ self.bias_ind_g
)
update_gate, reset_gate = gates.chunk(2, 1)
input_transform_c = torch.mm(input, self.weight_ic.t())
hidden_transform_c = torch.mm(reset_gate * hx, self.weight_hc.t())
candidate = torch.tanh(
input_transform_c * hidden_transform_c * self.bias_mult_c
+ input_transform_c * self.bias_ic
+ hidden_transform_c * self.bias_hc
+ self.bias_ind_c
)
return (1.0 - update_gate) * hx + update_gate * candidate
| 41.022857 | 115 | 0.667502 |
40e930d5c7e90e135b493745a8b79515798613c3 | 4,232 | py | Python | pyASH/exceptions.py | dhrone/pyASH | 85da060d135fb8be6475d58d4dc33acf88a3a9b2 | [
"MIT"
] | null | null | null | pyASH/exceptions.py | dhrone/pyASH | 85da060d135fb8be6475d58d4dc33acf88a3a9b2 | [
"MIT"
] | null | null | null | pyASH/exceptions.py | dhrone/pyASH | 85da060d135fb8be6475d58d4dc33acf88a3a9b2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018 by dhrone. All Rights Reserved.
#
# Credit for the concepts behind this module go entirely to Matěj Hlaváček (https://github.com/mathead)
class pyASH_EXCEPTION(Exception):
def __init__(self, message, *args, **kwargs):
if hasattr(self, 'payload'):
self.payload['type'] = type(self).__name__
self.payload['message'] = message
else:
self.payload = { 'type': type(self).__name__, 'message': message }
super(pyASH_EXCEPTION, self).__init__(message, *args, **kwargs)
class ACCEPT_GRANT_FAILED(pyASH_EXCEPTION):
pass
class BRIDGE_UNREACHABLE(pyASH_EXCEPTION):
pass
class ENDPOINT_BUSY(pyASH_EXCEPTION):
pass
class ENDPOINT_LOW_POWER(pyASH_EXCEPTION):
def __init__(self, message, percentageState=None, *args, **kwargs):
if percentageState:
self.payload = { 'percentageState': percentageState }
super(ENDPOINT_LOW_POWER, self).__init__(message, *args, **kwargs)
class ENDPOINT_UNREACHABLE(pyASH_EXCEPTION):
pass
class EXPIRED_AUTHORIZATION_CREDENTIAL(pyASH_EXCEPTION):
pass
class FIRMWARE_OUT_OF_DATE(pyASH_EXCEPTION):
pass
class HARDWARE_MALFUNCTION(pyASH_EXCEPTION):
pass
class INTERNAL_ERROR(pyASH_EXCEPTION):
def __init__(self, message, *args, **kwargs):
super(INTERNAL_ERROR, self).__init__(message, *args, **kwargs)
self.payload['type'] = 'INTERNAL_ERROR'
class INVALID_AUTHORIZATION_CREDENTIAL(pyASH_EXCEPTION):
pass
class INVALID_DIRECTIVE(pyASH_EXCEPTION):
pass
class INVALID_VALUE(pyASH_EXCEPTION):
pass
class NO_SUCH_ENDPOINT(pyASH_EXCEPTION):
pass
class NOT_SUPPORTED_IN_CURRENT_MODE(pyASH_EXCEPTION):
def __init__(self, message, mode, *args, **kwargs):
self.payload = { 'currentDeviceMode': mode }
super(NOT_SUPPORTED_IN_CURRENT_MODE, self).__init__(message, *args, **kwargs)
class RATE_LIMIT_EXCEEDED(pyASH_EXCEPTION):
pass
class TEMPERATURE_VALUE_OUT_OF_RANGE(pyASH_EXCEPTION):
def __init__(self, message, minv=None, maxv=None, scale='FAHRENHEIT', *args, **kwargs):
minv = minv if isinstance(minv, Temperature) or not minv else Temperature(minv, scale)
maxv = maxv if isinstance(maxv, Temperature) or not maxv else Temperature(maxv, scale)
if minv and maxv:
self.payload= {
'validRange': {
'minimumValue': minv.json,
'maximumValue': maxv.json
}
}
super(TEMPERATURE_VALUE_OUT_OF_RANGE, self).__init__(message, *args, **kwargs)
class VALUE_OUT_OF_RANGE(pyASH_EXCEPTION):
def __init__(self, message, minv=None, maxv=None, *args, **kwargs):
if minv and maxv:
self.payload = {
'validRange': {
'minimumValue': minv,
'maximumValue': maxv
}
}
super(VALUE_OUT_OF_RANGE, self).__init__(message, *args, **kwargs)
# Additional Exceptions
# These are not directly related to the ASH errors so get reported as INTERNAL_ERROR
class OAUTH2_EXCEPTION(INTERNAL_ERROR):
pass
class USER_NOT_FOUND_EXCEPTION(INTERNAL_ERROR):
pass
class MISCELLANIOUS_EXCEPTION(INTERNAL_ERROR):
pass
#############################################################
# In Alexa Smart Home documentation but missing from schema #
#############################################################
class ALREADY_IN_OPERATION(pyASH_EXCEPTION):
pass
class NOT_IN_OPERATION(pyASH_EXCEPTION):
pass
class POWER_LEVEL_NOT_SUPPORTED(pyASH_EXCEPTION):
pass
# COOKING Interface specific Exceptions
class DOOR_OPEN(pyASH_EXCEPTION):
pass
class DOOR_CLOSED_TOO_LONG(pyASH_EXCEPTION):
pass
class COOK_DURATION_TOO_LONG(pyASH_EXCEPTION):
def __init__(self, message, maxCookTime=None, *args, **kwargs):
self.payload = { 'maxCookTime': Duration(maxCookTime) }
super(COOK_DURATION_TOO_LONG, self).__init__(message, *args, **kwargs)
class REMOTE_START_NOT_SUPPORTED(pyASH_EXCEPTION):
pass
class REMOTE_START_DISABLED(pyASH_EXCEPTION):
pass
# Video interface specific exceptions
class NOT_SUBSCRIBED(pyASH_EXCEPTION):
pass
| 30.228571 | 103 | 0.678166 |
0107e628367d9dc9f4a06dbd5ccd917717068c2b | 990 | py | Python | bangoo/navigation/urlresolvers.py | slapec/bangoo | 34facf122f15943a4368d5c2f45fe178ff01edaa | [
"MIT"
] | null | null | null | bangoo/navigation/urlresolvers.py | slapec/bangoo | 34facf122f15943a4368d5c2f45fe178ff01edaa | [
"MIT"
] | null | null | null | bangoo/navigation/urlresolvers.py | slapec/bangoo | 34facf122f15943a4368d5c2f45fe178ff01edaa | [
"MIT"
] | null | null | null | from .models import Menu
from django.core.urlresolvers import reverse as rev, NoReverseMatch
def reverse(viewname, urlconf=None, args=None, kwargs=None, prefix=None, current_app=None):
url = None
try:
### Try static url first
url = rev(viewname=viewname, args=args, kwargs=kwargs, prefix=prefix, current_app=current_app)
except NoReverseMatch:
urlconf = kwargs.pop('urlconf', '') ## If the urlconf is explicitly setted, then try that first
if urlconf:
urlconfs = [urlconf] + list(Menu.objects.exclude(urlconf=urlconf).values_list("urlconf", flat=True).distinct())
else:
urlconfs = Menu.objects.all().values_list("urlconf", flat=True).distinct()
for uconf in urlconfs:
try:
url = rev(viewname, uconf, args, kwargs, prefix, current_app)
break
except NoReverseMatch:
pass
if not url:
raise NoReverseMatch()
return url
| 38.076923 | 123 | 0.634343 |
2186d7e015c631ff7cac875c8673008ab5d72e46 | 875 | py | Python | processing.py | dremovd/data-fusion-1-baseline | 0e4b2055b49bd52b08ccd5ac3562e416fa4d17ae | [
"MIT"
] | 41 | 2021-01-30T18:56:28.000Z | 2021-04-02T13:51:17.000Z | processing.py | dremovd/data-fusion-1-baseline | 0e4b2055b49bd52b08ccd5ac3562e416fa4d17ae | [
"MIT"
] | null | null | null | processing.py | dremovd/data-fusion-1-baseline | 0e4b2055b49bd52b08ccd5ac3562e416fa4d17ae | [
"MIT"
] | 6 | 2021-01-30T23:04:58.000Z | 2021-11-18T09:58:50.000Z | import numpy as np
import pandas as pd
def load_dataset(dataset_name, drop_unlabeled=True):
data = pd.read_parquet(dataset_name)
data['weight'] = 1
receipt_item_count = data.groupby('receipt_id').agg(
{'weight': 'count'}).rename({'weight': 'receipt_item_count'}, axis=1)
data = data.merge(receipt_item_count, on='receipt_id')
if drop_unlabeled:
data = data[data.category_id != -1]
return data
def unique_item_name(data):
grouping = {
'receipt_dayofweek': 'first',
'item_nds_rate': 'first',
'receipt_item_count': 'first',
'item_quantity': 'first',
'item_price': 'first',
'receipt_id': 'first',
}
if 'category_id' in data.columns:
grouping['category_id'] = 'first'
data_unique = data.groupby(['item_name']).agg(grouping).reset_index()
return data_unique
| 27.34375 | 77 | 0.64 |
fe979ba9631aec07b8f8fddaf035b7cad26cec47 | 6,590 | py | Python | scripts/addRing.py | ekraka/EDHB | a6aa3ab4f41448894b821c2a0944d37d441a7d96 | [
"MIT"
] | 1 | 2021-03-07T17:19:09.000Z | 2021-03-07T17:19:09.000Z | scripts/addRing.py | ekraka/EDHB | a6aa3ab4f41448894b821c2a0944d37d441a7d96 | [
"MIT"
] | null | null | null | scripts/addRing.py | ekraka/EDHB | a6aa3ab4f41448894b821c2a0944d37d441a7d96 | [
"MIT"
] | 1 | 2021-03-04T18:44:33.000Z | 2021-03-04T18:44:33.000Z | import sys
import hb_connections as hb
import os
def get_ids(path,suffix='_ah'):
f=open(path,'r')
lines=f.readlines()
f.close()
i=-1
i2=0
ref=0
lm={}
length=0
kr_st=None
for line in lines:
i+=1
if ref==5:
break
if ref==1 and not kr_st:
kr=line.strip().split()
if kr[0]!='DonarSymbol':
kr_st=(43,67)
else:
kr_st=(11,36)
if 'File format' in line:
ref+=1
if len(line.strip().split())==0 and ref>0:
ref+=1
if ref==2:
lines[i-1]=' '.join(lines[i-1].strip().split())+', IntramolecularRing(size) \n'
if ref==2 or ref==3:
ref+=1
if ref==4:
s,e=kr_st # 10,28
#print line[s:e]
l=line[s:e].split(',')
st=l
lm[str(i2+1)+'.']=st
#lines[i]=lines[i].strip()+' '+lmodes[i2]+'\n'
i2+=1
return lm
def addRing(path,p_id):
f=open(path,'r')
lines=f.readlines()
f.close()
i=-1
i2=0
ref=0
lm={}
length=0
for line in lines:
i+=1
if ref==5:
break
if 'File format' in line:
ref+=1
if len(line.strip().split())==0 and ref>0:
ref+=1
if ref==2:
lines[i-1]=' '.join(lines[i-1].strip().split())+', IntramolecularRing(size) \n'
if ref==2 or ref==3:
ref+=1
if ref==4:
if i2==0:
if length<len(line):
length=len(line)
lm[str(i2+1)+'.']=p_id[str(i2+1)+'.']
i2+=1
j=0
length+=1
for line in lines:
if len(line.strip().split())==0:
j+=1
continue
if line.strip().split()[0] in lm:
string=lm[line.strip().split()[0]]
lines[j]=lines[j][:-1]+' '*(length-len(lines[j])+2)+string+'\n'
j+=1
g=open(path,'w')
g.write(''.join(lines))
g.close()
return lm
def job(path):
dic=get_ids(path,suffix='_ah')
p_id={}
con=hb.connections(path[:-4]+'.xyz')
for i in dic:
a,b,c=dic[i]
#print a,b,c
refe_lis=con.bfs(int(a)-1,int(c)-1)
if len(refe_lis)==0:
p_id[i]='None'
elif 0 and len(refe_lis)==4:
# for writing dihedral in bracket for 5 memberd rings
p_id[i]='I('+str(len(refe_lis)+1)+')'+'('+str(con.dihedral(refe_lis))+')'
else:
p_id[i]='I('+str(len(refe_lis)+1)+')'
return addRing(path,p_id)
def make_input_pucker(lis,d,count):
st0 = ''
for i in lis:
st0+=' '.join(list(map(str,d[i][1:])))+'\n'
f = open('P'+str(d[lis[0]][0])+'-'+str(d[lis[0]][0])+'_'+str(count)+'.dat','w')
f.write(st0)
f.close()
def test(path):
def lmode_id(path):
f=open(path,'r')
lines=f.readlines()
f.close()
i=-1
i2=0
ref=0
lm={}
length=0
kr_st=None
for line in lines:
i+=1
if ref==5:
break
if ref==1 and not kr_st:
kr=line.strip().split()
if kr[0]!='DonarSymbol':
kr_st=(43,67)
else:
kr_st=(11,36)
if 'File format' in line:
ref+=1
if len(line.strip().split())==0 and ref>0:
ref+=1
if ref==2:
lines[i-1]=' '.join(lines[i-1].strip().split())+', IntramolecularRing(size)(dihedral for C5 type) \n'
if ref==2 or ref==3:
ref+=1
if ref==4:
s,e=kr_st
#print line[s:e]
l=line.strip().split()
st=l[-3]
lm[str(i2+1)+'.']=st
#lines[i]=lines[i].strip()+' '+lmodes[i2]+'\n'
i2+=1
return lm
dic=get_ids(path,suffix='_ah')
l_d=lmode_id(path)
#print l_d
p_id={}
con=hb.connections(path[:-4]+'.xyz')
st_d = {}
count = 0
for i in dic:
a,b,c=dic[i]
#print a,b,c
a,b,c = list(map(int,[a,b,c]))
refe_lis=con.bfs(int(a)-1,int(c)-1)
dist=con.distance(int(a),int(c))
hbl = con.distance(int(b),int(c))
if len(refe_lis)==0:
p_id[i]='None'
elif len(refe_lis)==4:
make_input_pucker(refe_lis+[b],con.d,count)
count+=1
p_id[i]='I('+str(len(refe_lis)+1)+')'+'('+str(con.dihedral(refe_lis))+')'
bond = con.atom_name(int(a))+'-'+con.atom_name(int(c))
if bond in st_d:
st_d[bond]+=bond+' '+str(dist)+' '+str(con.dihedral(refe_lis))+' '+l_d[i]+' '+str(hbl)+' '+str(a)+' '+str(b)+' '+str(c)+'\n'
else:
st_d[bond] = bond+' '+str(dist)+' '+str(con.dihedral(refe_lis))+' '+l_d[i]+' '+str(hbl)+' '+str(a)+' '+str(b)+' '+str(c)+'\n'
else:
p_id[i]='I('+str(len(refe_lis)+1)+')'
return st_d
if __name__=='__main__':
d_c = {}
for i in os.listdir('.'):
if i[-4:]=='.txt':
print (i)
d = test(i)
#print d
for i in d:
if i in d_c:
d_c[i]+=d[i]
else:
d_c[i]=d[i]
for i in d_c:
print (d_c[i]+'\n\n')
| 30.651163 | 145 | 0.355994 |
968f3fa973045054b20d6f040a284809161df57b | 6,343 | py | Python | CTL/causal_tree/sig_diff/sig.py | Youngyi/CTL | 3dc578b17adf7ddc4eecb5630ed96b3693c53f68 | [
"MIT"
] | 38 | 2019-08-03T08:06:44.000Z | 2022-03-27T17:24:35.000Z | CTL/causal_tree/sig_diff/sig.py | springbarley/CTL | 3dc578b17adf7ddc4eecb5630ed96b3693c53f68 | [
"MIT"
] | 6 | 2019-09-19T02:43:43.000Z | 2021-07-29T03:40:23.000Z | CTL/causal_tree/sig_diff/sig.py | springbarley/CTL | 3dc578b17adf7ddc4eecb5630ed96b3693c53f68 | [
"MIT"
] | 6 | 2019-10-24T18:31:36.000Z | 2022-02-25T01:31:14.000Z | # from CTL.causal_tree.util import *
try:
from CTL.causal_tree.util_c import *
except:
from CTL.causal_tree.util import *
from CTL.causal_tree.ct import *
import numpy as np
from scipy.stats import ttest_ind_from_stats
class SigNode(CTNode):
def __init__(self, p_val=1.0, effect=0.0, node_depth=0, control_mean=0.0, treatment_mean=0.0, col=-1, value=-1,
is_leaf=False, leaf_num=-1, num_samples=0.0, obj=0.0):
super().__init__()
# not tree specific features (most likely added at creation)
self.p_val = p_val
self.effect = effect
self.node_depth = node_depth
self.control_mean = control_mean
self.treatment_mean = treatment_mean
# during tree building
self.obj = obj
self.num_samples = num_samples
# after building tree
self.col = col
self.value = value
self.is_leaf = is_leaf
self.leaf_num = leaf_num
self.true_branch = None
self.false_branch = None
# after calling functions
self.column_name = ""
self.decision = ""
class SigTree(CausalTree):
def __init__(self, alpha=0.05, max_depth=-1, min_size=2, seed=724, max_values=None, verbose=False):
super().__init__()
self.alpha = 0.05
self.max_depth = max_depth
self.min_size = min_size
self.seed = seed
self.max_values = max_values
self.verbose = verbose
self.max_effect = 0.0
self.min_effect = 0.0
self.features = None
self.root = SigNode()
@abstractmethod
def fit(self, x, y, t):
pass
def _eval_util(self, train_y, train_t):
var_t, var_c = variance(train_y, train_t)
std = np.sqrt(var_t) + np.sqrt(var_c)
effect = ace(train_y, train_t)
return effect, std
def _eval(self, y_train1, t_train1, y_train2, t_train2):
total1 = y_train1.shape[0]
total2 = y_train2.shape[0]
return_val = (1, 1)
if total1 < 1 or total2 < 1:
return return_val
effect1, std1 = self._eval_util(y_train1, t_train1)
effect2, std2 = self._eval_util(y_train2, t_train2)
stat, p_val = ttest_ind_from_stats(effect1, std1, total1, effect2, std2, total2)
return stat, p_val
def predict(self, x):
def _predict(node: SigNode, observation):
if node.is_leaf:
return node.effect
else:
v = observation[node.col]
if v >= node.value:
branch = node.true_branch
else:
branch = node.false_branch
return _predict(branch, observation)
if len(x.shape) == 1:
prediction = _predict(self.root, x)
return prediction
num_test = x.shape[0]
prediction = np.zeros(num_test)
for i in range(num_test):
test_example = x[i, :]
prediction[i] = _predict(self.root, test_example)
return prediction
def get_groups(self, x):
def _get_group(node: SigNode, observation):
if node.is_leaf:
return node.leaf_num
else:
v = observation[node.col]
if v >= node.value:
branch = node.true_branch
else:
branch = node.false_branch
return _get_group(branch, observation)
if len(x.shape) == 1:
return _get_group(self.root, x)
num_test = x.shape[0]
leaf_results = np.zeros(num_test)
for i in range(num_test):
test_example = x[i, :]
leaf_results[i] = _get_group(self.root, test_example)
return leaf_results
def get_features(self, x):
def _get_features(node: SigNode, observation, features):
if node.is_leaf:
return features
else:
v = observation[node.col]
if v >= node.value:
branch = node.true_branch
else:
branch = node.false_branch
features.append(node.decision)
return _get_features(branch, observation, features)
if len(x.shape) == 1:
features = []
return _get_features(self.root, x, features)
num_test = x.shape[0]
leaf_features = []
for i in range(num_test):
features = []
test_example = x[i, :]
leaf_features.append(_get_features(self.root, test_example, features))
return leaf_features
def prune(self, alpha=0.05):
def _prune(node: SigNode):
if node.true_branch is None or node.false_branch is None:
return
# recursive call for each branch
if not node.true_branch.is_leaf:
_prune(node.true_branch)
if not node.false_branch.is_leaf:
_prune(node.false_branch)
# merge leaves (potentially)
if node.true_branch.is_leaf and node.false_branch.is_leaf:
# Get branches
tb = node.true_branch
fb = node.false_branch
tb_pval = tb.p_val
fb_pval = fb.p_val
if tb_pval > alpha and fb_pval > alpha:
node.leaf_num = node.true_branch.leaf_num
node.true_branch = None
node.false_branch = None
self.num_leaves = self.num_leaves - 1
node.is_leaf = True
# ----------------------------------------------------------------
# Something about obj/mse? if that is added
#
# - can do a self function so that tree references itself/it's own type of node?
# ----------------------------------------------------------------
if tb.node_depth == self.tree_depth:
self.tree_depth = self.tree_depth - 1
_prune(self.root)
def get_triggers(self, x):
pass
def save(self, filename):
import pickle as pkl
check_dir(filename)
with open(filename, "wb") as file:
pkl.dump(self, file) | 29.919811 | 115 | 0.539335 |
1f2eb09d98c9984b79308aaee685cb79f8ed5953 | 3,840 | py | Python | docs/conf.py | ihsan1852/edge | 6f15a5a3f19f6b7dfd8ba0641ae5deddbb7d145d | [
"MIT"
] | 56 | 2021-01-22T17:05:21.000Z | 2022-03-31T11:09:56.000Z | docs/conf.py | ihsan1852/edge | 6f15a5a3f19f6b7dfd8ba0641ae5deddbb7d145d | [
"MIT"
] | null | null | null | docs/conf.py | ihsan1852/edge | 6f15a5a3f19f6b7dfd8ba0641ae5deddbb7d145d | [
"MIT"
] | 7 | 2021-05-25T00:31:36.000Z | 2022-02-01T16:52:42.000Z | import os
import sys
from datetime import datetime
import alabaster
sys.path.insert(0, os.path.abspath('../leaf'))
# -- Project information -----------------------------------------------------
project = "LEAF"
author = "Philipp Wiesner"
copyright = f"{datetime.now().year} {author}"
# The short X.Y version
try:
import importlib.metadata
version = importlib.metadata.version('leafsim')
except ModuleNotFoundError: # Python <3.8
import pkg_resources
version = pkg_resources.get_distribution('leaf').version
# The full version, including alpha/beta/rc tags
release = version
html_theme_path = [alabaster.get_path()]
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"alabaster",
]
html_static_path = ["_static"]
html_theme_options = {
"logo": "logo.svg",
"logo_name": True,
"logo_text_align": "center",
"description": "Simulator for modeling energy consumption in cloud, fog and edge computing environments.",
"github_user": "dos-group",
"github_repo": "leaf",
"github_banner": True,
"github_button": True,
#"travis_button": True,
#"codecov_button": True,
#"tidelift_url": "https://tidelift.com/subscription/pkg/pypi-fabric?utm_source=pypi-fabric&utm_medium=referral&utm_campaign=docs",
#"analytics_id": "UA-18486793-1",
"link": "#3782BE",
"link_hover": "#3782BE",
# Wide enough that 80-col code snippets aren't truncated on default font
# settings (at least for bitprophet's Chrome-on-OSX-Yosemite setup)
"page_width": "1024px",
}
html_theme = 'alabaster'
# Both the class’ and the __init__ method’s docstring are concatenated and inserted.
autoclass_content = "both"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of reference filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to reference directory, that match files and
# directories to ignore when looking for reference files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (reference start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'gcsfs.tex', 'gcsfs Documentation',
'Othoz GmbH', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (reference start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'LEAF', 'LEAF Documentation', [author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (reference start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'LEAF', 'LEAF Documentation', author, 'leaf', '-', 'Simulator'),
]
| 29.767442 | 134 | 0.665625 |
c83531fe55fad97a3eb341db1ea94b6926be0d04 | 806 | py | Python | src/importTerminology.py | aws-samples/aim317-uncover-insights-customer-conversations | 663ba08fe9ad1cf1e520e77a88dc6a96b611619b | [
"MIT-0"
] | null | null | null | src/importTerminology.py | aws-samples/aim317-uncover-insights-customer-conversations | 663ba08fe9ad1cf1e520e77a88dc6a96b611619b | [
"MIT-0"
] | null | null | null | src/importTerminology.py | aws-samples/aim317-uncover-insights-customer-conversations | 663ba08fe9ad1cf1e520e77a88dc6a96b611619b | [
"MIT-0"
] | 1 | 2021-12-20T16:40:55.000Z | 2021-12-20T16:40:55.000Z | import boto3
def lambda_handler(event, context):
record = event['Records'][0]
print("Record: " + str(record))
s3bucket = record['s3']['bucket']['name']
s3object = record['s3']['object']['key']
s3Path = "s3://" + s3bucket + "/" + s3object
s3_resource = boto3.resource('s3')
temp = s3_resource.Object(s3bucket, s3object)
term_file = temp.get()['Body'].read().decode('utf-8')
client = boto3.client('translate')
print("S3 Path:" + s3Path)
response = client.import_terminology(
Name="aim317-custom-terminology",
MergeStrategy='OVERWRITE',
TerminologyData={
'File': term_file,
'Format': 'CSV'
},
)
return {
'TerminologyName': response['TerminologyProperties']['Name']
} | 23.705882 | 68 | 0.578164 |
233736af717ba5172be48d9c0bde225177a5a352 | 1,381 | py | Python | KTU/Programavimo Kalbu Teorija/L1 PYTHON/Lab1.py | sandybridge9/KTU-darbai-ir-ataskaitos | 4d2e9874efdae5d1e55725c089946ecac3532c3a | [
"MIT"
] | null | null | null | KTU/Programavimo Kalbu Teorija/L1 PYTHON/Lab1.py | sandybridge9/KTU-darbai-ir-ataskaitos | 4d2e9874efdae5d1e55725c089946ecac3532c3a | [
"MIT"
] | null | null | null | KTU/Programavimo Kalbu Teorija/L1 PYTHON/Lab1.py | sandybridge9/KTU-darbai-ir-ataskaitos | 4d2e9874efdae5d1e55725c089946ecac3532c3a | [
"MIT"
] | null | null | null | #Number class used to store a number and a number which shows a cumulative sum of each numbers divisors from 1 to number
class Number:
def __init__(self, number, cumulativeSum):
self.number = number
self.cumulativeSum = cumulativeSum
def get_number(self):
return self.number
#finds sum of all viable divisors of number n
def findSumOfDivisors(n):
sum = 0
for x in range(2, int(n)):
z = n / x #temporary result of division
if z == int(z):
sum = sum + z
return sum
#finds cumulative sum of divisors for numbers 1 to Number.number
def findCumulativeSumOfDivisors(Number):
for x in range(0, Number.number + 1):
Number.cumulativeSum = Number.cumulativeSum + findSumOfDivisors(x)
print("Cumulative sum of divisors of number n: " + str(Number.number) + " is: " + str(Number.cumulativeSum))
return Number
#reads data from file into integer array
def readIntoArray(fileName):
array = []
with open('data.txt') as f:
for line in f: # read all lines
array.append(int(line))
return array
#finds results for all integers in array
def findResults(array):
numberArray = []
for x in array:
temp = Number(x, 0)
temp = findCumulativeSumOfDivisors(temp)
numberArray.append(temp)
array = readIntoArray("data.txt")
findResults(array) | 32.116279 | 120 | 0.666908 |
b4a153898455863facf5d726011513fd94cd18bc | 4,107 | py | Python | models/blocks.py | PRBonn/segcontrast | 3dbdb19aecfe1d78f88637b0ecb3b453daac8d97 | [
"MIT"
] | 22 | 2022-01-03T22:51:18.000Z | 2022-03-31T10:11:41.000Z | models/blocks.py | PRBonn/segcontrast | 3dbdb19aecfe1d78f88637b0ecb3b453daac8d97 | [
"MIT"
] | 3 | 2022-03-07T08:42:25.000Z | 2022-03-31T10:09:54.000Z | models/blocks.py | PRBonn/segcontrast | 3dbdb19aecfe1d78f88637b0ecb3b453daac8d97 | [
"MIT"
] | 2 | 2022-03-29T12:06:18.000Z | 2022-03-31T07:02:46.000Z | import torch.nn as nn
import MinkowskiEngine as ME
class BasicBlock(nn.Module):
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
bn_momentum=0.1,
dimension=-1):
super(BasicBlock, self).__init__()
assert dimension > 0
self.conv1 = ME.MinkowskiConvolution(
inplanes, planes, kernel_size=3, stride=stride, dilation=dilation, dimension=dimension)
self.norm1 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum)
self.conv2 = ME.MinkowskiConvolution(
planes, planes, kernel_size=3, stride=1, dilation=dilation, dimension=dimension)
self.norm2 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum)
self.relu = ME.MinkowskiReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
bn_momentum=0.1,
dimension=-1):
super(Bottleneck, self).__init__()
assert dimension > 0
self.conv1 = ME.MinkowskiConvolution(
inplanes, planes, kernel_size=1, dimension=dimension)
self.norm1 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum)
self.conv2 = ME.MinkowskiConvolution(
planes, planes, kernel_size=3, stride=stride, dilation=dilation, dimension=dimension)
self.norm2 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum)
self.conv3 = ME.MinkowskiConvolution(
planes, planes * self.expansion, kernel_size=1, dimension=dimension)
self.norm3 = ME.MinkowskiBatchNorm(
planes * self.expansion, momentum=bn_momentum)
self.relu = ME.MinkowskiReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ProjectionHead(nn.Module):
def __init__(self, in_channels, out_channels):
nn.Module.__init__(self)
self.projection_head = nn.Sequential(
nn.Linear(in_channels, out_channels),
nn.ReLU(inplace=True),
nn.Linear(out_channels, out_channels),
)
self.dropout = ME.MinkowskiDropout(p=0.4)
self.glob_pool = ME.MinkowskiGlobalMaxPooling()
def forward(self, x):
# from input points dropout some (increase randomness)
x = self.dropout(x)
# global max pooling over the remaining points
x = self.glob_pool(x)
# project the max pooled features
out = self.projection_head(x.F)
return out
class SegmentationClassifierHead(nn.Module):
def __init__(self, in_channels=512, out_channels=26):
nn.Module.__init__(self)
self.fc = nn.Sequential(nn.Linear(in_channels, out_channels))
def forward(self, x):
return self.fc(x.F)
class ClassifierHead(nn.Module):
def __init__(self, in_channels=512, out_channels=40):
nn.Module.__init__(self)
self.fc = ME.MinkowskiLinear(in_channels, out_channels, bias=True)
self.glob_pool = ME.MinkowskiGlobalMaxPooling()
def forward(self, x):
return self.fc(self.glob_pool(x))
| 29.335714 | 99 | 0.602873 |
fc55e1e64b3a634b6468d3bf95b3f6e141092509 | 35,446 | py | Python | c7n/resources/account.py | fadiguezel/cloud-custodian | 147fffcf9a109ffd4248a746775b55def5a727c5 | [
"Apache-2.0"
] | null | null | null | c7n/resources/account.py | fadiguezel/cloud-custodian | 147fffcf9a109ffd4248a746775b55def5a727c5 | [
"Apache-2.0"
] | null | null | null | c7n/resources/account.py | fadiguezel/cloud-custodian | 147fffcf9a109ffd4248a746775b55def5a727c5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AWS Account as a custodian resource.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import json
from botocore.exceptions import ClientError
from datetime import datetime, timedelta
from dateutil.parser import parse as parse_date
from dateutil.tz import tzutc
from c7n.actions import ActionRegistry, BaseAction
from c7n.filters import Filter, FilterRegistry, ValueFilter, FilterValidationError
from c7n.manager import ResourceManager, resources
from c7n.utils import local_session, type_schema
from c7n.resources.iam import CredentialReport
filters = FilterRegistry('aws.account.actions')
actions = ActionRegistry('aws.account.filters')
def get_account(session_factory, config):
session = local_session(session_factory)
client = session.client('iam')
aliases = client.list_account_aliases().get(
'AccountAliases', ('',))
name = aliases and aliases[0] or ""
return {'account_id': config.account_id,
'account_name': name}
@resources.register('account')
class Account(ResourceManager):
filter_registry = filters
action_registry = actions
class resource_type(object):
id = 'account_id'
name = 'account_name'
filter_name = None
@classmethod
def get_permissions(cls):
return ('iam:ListAccountAliases',)
def get_model(self):
return self.resource_type
def resources(self):
return self.filter_resources([get_account(self.session_factory, self.config)])
def get_resources(self, resource_ids):
return [get_account(self.session_factory, self.config)]
@filters.register('credential')
class AccountCredentialReport(CredentialReport):
def process(self, resources, event=None):
super(AccountCredentialReport, self).process(resources, event)
report = self.get_credential_report()
if report is None:
return []
results = []
info = report.get('<root_account>')
for r in resources:
if self.match(info):
r['c7n:credential-report'] = info
results.append(r)
return results
@filters.register('check-cloudtrail')
class CloudTrailEnabled(Filter):
"""Verify cloud trail enabled for this account per specifications.
Returns an annotated account resource if trail is not enabled.
Of particular note, the current-region option will evaluate whether cloudtrail is available
in the current region, either as a multi region trail or as a trail with it as the home region.
:example:
.. code-block:: yaml
policies:
- name: account-cloudtrail-enabled
resource: account
region: us-east-1
filters:
- type: check-cloudtrail
global-events: true
multi-region: true
running: true
"""
schema = type_schema(
'check-cloudtrail',
**{'multi-region': {'type': 'boolean'},
'global-events': {'type': 'boolean'},
'current-region': {'type': 'boolean'},
'running': {'type': 'boolean'},
'notifies': {'type': 'boolean'},
'file-digest': {'type': 'boolean'},
'kms': {'type': 'boolean'},
'kms-key': {'type': 'string'}})
permissions = ('cloudtrail:DescribeTrails', 'cloudtrail:GetTrailStatus')
def process(self, resources, event=None):
session = local_session(self.manager.session_factory)
client = session.client('cloudtrail')
trails = client.describe_trails()['trailList']
resources[0]['c7n:cloudtrails'] = trails
if self.data.get('global-events'):
trails = [t for t in trails if t.get('IncludeGlobalServiceEvents')]
if self.data.get('current-region'):
current_region = session.region_name
trails = [t for t in trails if t.get(
'HomeRegion') == current_region or t.get('IsMultiRegionTrail')]
if self.data.get('kms'):
trails = [t for t in trails if t.get('KmsKeyId')]
if self.data.get('kms-key'):
trails = [t for t in trails
if t.get('KmsKeyId', '') == self.data['kms-key']]
if self.data.get('file-digest'):
trails = [t for t in trails
if t.get('LogFileValidationEnabled')]
if self.data.get('multi-region'):
trails = [t for t in trails if t.get('IsMultiRegionTrail')]
if self.data.get('notifies'):
trails = [t for t in trails if t.get('SNSTopicArn')]
if self.data.get('running', True):
running = []
for t in list(trails):
t['Status'] = status = client.get_trail_status(
Name=t['TrailARN'])
if status['IsLogging'] and not status.get(
'LatestDeliveryError'):
running.append(t)
trails = running
if trails:
return []
return resources
@filters.register('check-config')
class ConfigEnabled(Filter):
"""Is config service enabled for this account
:example:
.. code-block:: yaml
policies:
- name: account-check-config-services
resource: account
region: us-east-1
filters:
- type: check-config
all-resources: true
global-resources: true
running: true
"""
schema = type_schema(
'check-config', **{
'all-resources': {'type': 'boolean'},
'running': {'type': 'boolean'},
'global-resources': {'type': 'boolean'}})
permissions = ('config:DescribeDeliveryChannels',
'config:DescribeConfigurationRecorders',
'config:DescribeConfigurationRecorderStatus')
def process(self, resources, event=None):
client = local_session(
self.manager.session_factory).client('config')
channels = client.describe_delivery_channels()[
'DeliveryChannels']
recorders = client.describe_configuration_recorders()[
'ConfigurationRecorders']
resources[0]['c7n:config_recorders'] = recorders
resources[0]['c7n:config_channels'] = channels
if self.data.get('global-resources'):
recorders = [
r for r in recorders
if r['recordingGroup'].get('includeGlobalResourceTypes')]
if self.data.get('all-resources'):
recorders = [r for r in recorders
if r['recordingGroup'].get('allSupported')]
if self.data.get('running', True) and recorders:
status = {s['name']: s for
s in client.describe_configuration_recorder_status(
)['ConfigurationRecordersStatus']}
resources[0]['c7n:config_status'] = status
recorders = [r for r in recorders if status[r['name']]['recording'] and
status[r['name']]['lastStatus'].lower() in ('pending', 'success')]
if channels and recorders:
return []
return resources
@filters.register('iam-summary')
class IAMSummary(ValueFilter):
"""Return annotated account resource if iam summary filter matches.
Some use cases include, detecting root api keys or mfa usage.
Example iam summary wrt to matchable fields::
{
"AccessKeysPerUserQuota": 2,
"AccountAccessKeysPresent": 0,
"AccountMFAEnabled": 1,
"AccountSigningCertificatesPresent": 0,
"AssumeRolePolicySizeQuota": 2048,
"AttachedPoliciesPerGroupQuota": 10,
"AttachedPoliciesPerRoleQuota": 10,
"AttachedPoliciesPerUserQuota": 10,
"GroupPolicySizeQuota": 5120,
"Groups": 1,
"GroupsPerUserQuota": 10,
"GroupsQuota": 100,
"InstanceProfiles": 0,
"InstanceProfilesQuota": 100,
"MFADevices": 3,
"MFADevicesInUse": 2,
"Policies": 3,
"PoliciesQuota": 1000,
"PolicySizeQuota": 5120,
"PolicyVersionsInUse": 5,
"PolicyVersionsInUseQuota": 10000,
"Providers": 0,
"RolePolicySizeQuota": 10240,
"Roles": 4,
"RolesQuota": 250,
"ServerCertificates": 0,
"ServerCertificatesQuota": 20,
"SigningCertificatesPerUserQuota": 2,
"UserPolicySizeQuota": 2048,
"Users": 5,
"UsersQuota": 5000,
"VersionsPerPolicyQuota": 5,
}
For example to determine if an account has either not been
enabled with root mfa or has root api keys.
.. code-block:: yaml
policies:
- name: root-keys-or-no-mfa
resource: account
filters:
- type: iam-summary
key: AccountMFAEnabled
value: true
op: eq
value_type: swap
"""
schema = type_schema('iam-summary', rinherit=ValueFilter.schema)
permissions = ('iam:GetAccountSummary',)
def process(self, resources, event=None):
if not resources[0].get('c7n:iam_summary'):
client = local_session(
self.manager.session_factory).client('iam')
resources[0]['c7n:iam_summary'] = client.get_account_summary(
)['SummaryMap']
if self.match(resources[0]['c7n:iam_summary']):
return resources
return []
@filters.register('password-policy')
class AccountPasswordPolicy(ValueFilter):
"""Check an account's password policy.
Note that on top of the default password policy fields, we also add an extra key,
PasswordPolicyConfigured which will be set to true or false to signify if the given
account has attempted to set a policy at all.
:example:
.. code-block:: yaml
policies:
- name: password-policy-check
resource: account
region: us-east-1
filters:
- type: password-policy
key: MinimumPasswordLength
value: 10
op: ge
- type: password-policy
key: RequireSymbols
value: true
"""
schema = type_schema('password-policy', rinherit=ValueFilter.schema)
permissions = ('iam:GetAccountPasswordPolicy',)
def process(self, resources, event=None):
account = resources[0]
if not account.get('c7n:password_policy'):
client = local_session(self.manager.session_factory).client('iam')
policy = {}
try:
policy = client.get_account_password_policy().get('PasswordPolicy', {})
policy['PasswordPolicyConfigured'] = True
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchEntity':
policy['PasswordPolicyConfigured'] = False
else:
raise
account['c7n:password_policy'] = policy
if self.match(account['c7n:password_policy']):
return resources
return []
@filters.register('service-limit')
class ServiceLimit(Filter):
"""Check if account's service limits are past a given threshold.
Supported limits are per trusted advisor, which is variable based
on usage in the account and support level enabled on the account.
- service: AutoScaling limit: Auto Scaling groups
- service: AutoScaling limit: Launch configurations
- service: EBS limit: Active snapshots
- service: EBS limit: Active volumes
- service: EBS limit: General Purpose (SSD) volume storage (GiB)
- service: EBS limit: Magnetic volume storage (GiB)
- service: EBS limit: Provisioned IOPS
- service: EBS limit: Provisioned IOPS (SSD) storage (GiB)
- service: EC2 limit: Elastic IP addresses (EIPs)
# Note this is extant for each active instance type in the account
# however the total value is against sum of all instance types.
# see issue https://github.com/capitalone/cloud-custodian/issues/516
- service: EC2 limit: On-Demand instances - m3.medium
- service: EC2 limit: Reserved Instances - purchase limit (monthly)
- service: ELB limit: Active load balancers
- service: IAM limit: Groups
- service: IAM limit: Instance profiles
- service: IAM limit: Roles
- service: IAM limit: Server certificates
- service: IAM limit: Users
- service: RDS limit: DB instances
- service: RDS limit: DB parameter groups
- service: RDS limit: DB security groups
- service: RDS limit: DB snapshots per user
- service: RDS limit: Storage quota (GB)
- service: RDS limit: Internet gateways
- service: SES limit: Daily sending quota
- service: VPC limit: VPCs
- service: VPC limit: VPC Elastic IP addresses (EIPs)
:example:
.. code-block:: yaml
policies:
- name: account-service-limits
resource: account
filters:
- type: service-limit
services:
- EC2
threshold: 1.0
- name: specify-region-for-global-service
region: us-east-1
resource: account
filters:
- type: service-limit
services:
- IAM
limits:
- Roles
"""
schema = type_schema(
'service-limit',
threshold={'type': 'number'},
refresh_period={'type': 'integer'},
limits={'type': 'array', 'items': {'type': 'string'}},
services={'type': 'array', 'items': {
'enum': ['EC2', 'ELB', 'VPC', 'AutoScaling',
'RDS', 'EBS', 'SES', 'IAM']}})
permissions = ('support:DescribeTrustedAdvisorCheckResult',)
check_id = 'eW7HH0l7J9'
check_limit = ('region', 'service', 'check', 'limit', 'extant', 'color')
global_services = set(['IAM'])
def validate(self):
region = self.manager.data.get('region', '')
if len(self.global_services.intersection(self.data.get('services', []))):
if region != 'us-east-1':
raise FilterValidationError(
"Global services: %s must be targeted in us-east-1 on the policy"
% ', '.join(self.global_services))
return self
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client(
'support', region_name='us-east-1')
checks = client.describe_trusted_advisor_check_result(
checkId=self.check_id, language='en')['result']
region = self.manager.config.region
checks['flaggedResources'] = [r for r in checks['flaggedResources']
if r['metadata'][0] == region or (r['metadata'][0] == '-' and region == 'us-east-1')]
resources[0]['c7n:ServiceLimits'] = checks
delta = timedelta(self.data.get('refresh_period', 1))
check_date = parse_date(checks['timestamp'])
if datetime.now(tz=tzutc()) - delta > check_date:
client.refresh_trusted_advisor_check(checkId=self.check_id)
threshold = self.data.get('threshold')
services = self.data.get('services')
limits = self.data.get('limits')
exceeded = []
for resource in checks['flaggedResources']:
if threshold is None and resource['status'] == 'ok':
continue
limit = dict(zip(self.check_limit, resource['metadata']))
if services and limit['service'] not in services:
continue
if limits and limit['check'] not in limits:
continue
limit['status'] = resource['status']
limit['percentage'] = float(limit['extant'] or 0) / float(
limit['limit']) * 100
if threshold and limit['percentage'] < threshold:
continue
exceeded.append(limit)
if exceeded:
resources[0]['c7n:ServiceLimitsExceeded'] = exceeded
return resources
return []
@actions.register('request-limit-increase')
class RequestLimitIncrease(BaseAction):
r"""File support ticket to raise limit.
:Example:
.. code-block:: yaml
policies:
- name: account-service-limits
resource: account
filters:
- type: service-limit
services:
- EBS
limits:
- Provisioned IOPS (SSD) storage (GiB)
threshold: 60.5
actions:
- type: request-limit-increase
notify: [email, email2]
## You can use one of either percent-increase or an amount-increase.
percent-increase: 50
message: "Please raise the below account limit(s); \n {limits}"
"""
schema = {
'type': 'object',
'notify': {'type': 'array'},
'properties': {
'type': {'enum': ['request-limit-increase']},
'percent-increase': {'type': 'number', 'minimum': 1},
'amount-increase': {'type': 'number', 'minimum': 1},
'subject': {'type': 'string'},
'message': {'type': 'string'},
'severity': {'type': 'string', 'enum': ['urgent', 'high', 'normal', 'low']}
},
'oneOf': [
{'required': ['type', 'percent-increase']},
{'required': ['type', 'amount-increase']}
]
}
permissions = ('support:CreateCase',)
default_subject = '[Account:{account}]Raise the following limit(s) of {service} in {region}'
default_template = 'Please raise the below account limit(s); \n {limits}'
default_severity = 'normal'
service_code_mapping = {
'AutoScaling': 'auto-scaling',
'ELB': 'elastic-load-balancing',
'EBS': 'amazon-elastic-block-store',
'EC2': 'amazon-elastic-compute-cloud-linux',
'RDS': 'amazon-relational-database-service-aurora',
'VPC': 'amazon-virtual-private-cloud',
}
def process(self, resources):
session = local_session(self.manager.session_factory)
client = session.client('support', region_name='us-east-1')
account_id = self.manager.config.account_id
service_map = {}
region_map = {}
limit_exceeded = resources[0].get('c7n:ServiceLimitsExceeded', [])
percent_increase = self.data.get('percent-increase')
amount_increase = self.data.get('amount-increase')
for s in limit_exceeded:
current_limit = int(s['limit'])
if percent_increase:
increase_by = current_limit * float(percent_increase) / 100
increase_by = max(increase_by, 1)
else:
increase_by = amount_increase
increase_by = round(increase_by)
msg = '\nIncrease %s by %d in %s \n\t Current Limit: %s\n\t Current Usage: %s\n\t ' \
'Set New Limit to: %d' % (
s['check'], increase_by, s['region'], s['limit'], s['extant'],
(current_limit + increase_by))
service_map.setdefault(s['service'], []).append(msg)
region_map.setdefault(s['service'], s['region'])
for service in service_map:
subject = self.data.get('subject', self.default_subject).format(
service=service, region=region_map[service], account=account_id)
service_code = self.service_code_mapping.get(service)
body = self.data.get('message', self.default_template)
body = body.format(**{
'service': service,
'limits': '\n\t'.join(service_map[service]),
})
client.create_case(
subject=subject,
communicationBody=body,
serviceCode=service_code,
categoryCode='general-guidance',
severityCode=self.data.get('severity', self.default_severity),
ccEmailAddresses=self.data.get('notify', []))
def cloudtrail_policy(original, bucket_name, account_id):
'''add CloudTrail permissions to an S3 policy, preserving existing'''
ct_actions = [
{
'Action': 's3:GetBucketAcl',
'Effect': 'Allow',
'Principal': {'Service': 'cloudtrail.amazonaws.com'},
'Resource': 'arn:aws:s3:::' + bucket_name,
'Sid': 'AWSCloudTrailAclCheck20150319',
},
{
'Action': 's3:PutObject',
'Condition': {
'StringEquals':
{'s3:x-amz-acl': 'bucket-owner-full-control'},
},
'Effect': 'Allow',
'Principal': {'Service': 'cloudtrail.amazonaws.com'},
'Resource': 'arn:aws:s3:::%s/AWSLogs/%s/*' % (
bucket_name, account_id
),
'Sid': 'AWSCloudTrailWrite20150319',
},
]
# parse original policy
if original is None:
policy = {
'Statement': [],
'Version': '2012-10-17',
}
else:
policy = json.loads(original['Policy'])
original_actions = [a.get('Action') for a in policy['Statement']]
for cta in ct_actions:
if cta['Action'] not in original_actions:
policy['Statement'].append(cta)
return json.dumps(policy)
@actions.register('enable-cloudtrail')
class EnableTrail(BaseAction):
"""Enables logging on the trail(s) named in the policy
:Example:
.. code-block:: yaml
policies:
- name: trail-test
description: Ensure CloudTrail logging is enabled
resource: account
actions:
- type: enable-cloudtrail
trail: mytrail
bucket: trails
"""
permissions = (
'cloudtrail:CreateTrail',
'cloudtrail:DescribeTrails',
'cloudtrail:GetTrailStatus',
'cloudtrail:StartLogging',
'cloudtrail:UpdateTrail',
's3:CreateBucket',
's3:GetBucketPolicy',
's3:PutBucketPolicy',
)
schema = type_schema(
'enable-cloudtrail',
**{
'trail': {'type': 'string'},
'bucket': {'type': 'string'},
'bucket-region': {'type': 'string'},
'multi-region': {'type': 'boolean'},
'global-events': {'type': 'boolean'},
'notify': {'type': 'string'},
'file-digest': {'type': 'boolean'},
'kms': {'type': 'boolean'},
'kms-key': {'type': 'string'},
'required': ('bucket',),
}
)
def process(self, accounts):
"""Create or enable CloudTrail"""
session = local_session(self.manager.session_factory)
client = session.client('cloudtrail')
bucket_name = self.data['bucket']
bucket_region = self.data.get('bucket-region', 'us-east-1')
trail_name = self.data.get('trail', 'default-trail')
multi_region = self.data.get('multi-region', True)
global_events = self.data.get('global-events', True)
notify = self.data.get('notify', '')
file_digest = self.data.get('file-digest', False)
kms = self.data.get('kms', False)
kms_key = self.data.get('kms-key', '')
s3client = session.client('s3')
try:
s3client.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={'LocationConstraint': bucket_region}
)
except ClientError as ce:
if not ('Error' in ce.response and
ce.response['Error']['Code'] == 'BucketAlreadyOwnedByYou'):
raise ce
try:
current_policy = s3client.get_bucket_policy(Bucket=bucket_name)
except ClientError:
current_policy = None
policy_json = cloudtrail_policy(
current_policy, bucket_name, self.manager.config.account_id)
s3client.put_bucket_policy(Bucket=bucket_name, Policy=policy_json)
trails = client.describe_trails().get('trailList', ())
if trail_name not in [t.get('Name') for t in trails]:
new_trail = client.create_trail(
Name=trail_name,
S3BucketName=bucket_name,
)
if new_trail:
trails.append(new_trail)
# the loop below will configure the new trail
for trail in trails:
if trail.get('Name') != trail_name:
continue
# enable
arn = trail['TrailARN']
status = client.get_trail_status(Name=arn)
if not status['IsLogging']:
client.start_logging(Name=arn)
# apply configuration changes (if any)
update_args = {}
if multi_region != trail.get('IsMultiRegionTrail'):
update_args['IsMultiRegionTrail'] = multi_region
if global_events != trail.get('IncludeGlobalServiceEvents'):
update_args['IncludeGlobalServiceEvents'] = global_events
if notify != trail.get('SNSTopicArn'):
update_args['SnsTopicName'] = notify
if file_digest != trail.get('LogFileValidationEnabled'):
update_args['EnableLogFileValidation'] = file_digest
if kms_key != trail.get('KmsKeyId'):
if not kms and 'KmsKeyId' in trail:
kms_key = ''
update_args['KmsKeyId'] = kms_key
if update_args:
update_args['Name'] = trail_name
client.update_trail(**update_args)
@filters.register('has-virtual-mfa')
class HasVirtualMFA(Filter):
"""Is the account configured with a virtual MFA device?
:example:
.. code-block:: yaml
policies:
- name: account-with-virtual-mfa
resource: account
region: us-east-1
filters:
- type: has-virtual-mfa
value: true
"""
schema = type_schema('has-virtual-mfa', **{'value': {'type': 'boolean'}})
permissions = ('iam:ListVirtualMFADevices',)
def mfa_belongs_to_root_account(self, mfa):
return mfa['SerialNumber'].endswith(':mfa/root-account-mfa-device')
def account_has_virtual_mfa(self, account):
if not account.get('c7n:VirtualMFADevices'):
client = local_session(self.manager.session_factory).client('iam')
paginator = client.get_paginator('list_virtual_mfa_devices')
raw_list = paginator.paginate().build_full_result()['VirtualMFADevices']
account['c7n:VirtualMFADevices'] = list(filter(
self.mfa_belongs_to_root_account, raw_list))
expect_virtual_mfa = self.data.get('value', True)
has_virtual_mfa = any(account['c7n:VirtualMFADevices'])
return expect_virtual_mfa == has_virtual_mfa
def process(self, resources, event=None):
return list(filter(self.account_has_virtual_mfa, resources))
@actions.register('enable-data-events')
class EnableDataEvents(BaseAction):
"""Ensure all buckets in account are setup to log data events.
Note this works via a single trail for data events per
(https://goo.gl/1ux7RG).
This trail should NOT be used for api management events, the
configuration here is soley for data events. If directed to create
a trail this will do so without management events.
:example:
.. code-block:: yaml
policies:
- name: s3-remove-owner-tag
resource: actions
actions:
- type: enable-data-events
data-trail:
name: s3-events
multi-region: us-east-1
"""
schema = type_schema(
'enable-data-events', required=['data-trail'], **{
'data-trail': {
'type': 'object',
'additionalProperties': False,
'required': ['name'],
'properties': {
'create': {
'title': 'Should we create trail if needed for events?',
'type': 'boolean'},
'type': {'enum': ['ReadOnly', 'WriteOnly', 'All']},
'name': {
'title': 'The name of the event trail',
'type': 'string'},
'topic': {
'title': 'If creating, the sns topic for the trail to send updates',
'type': 'string'},
's3-bucket': {
'title': 'If creating, the bucket to store trail event data',
'type': 'string'},
's3-prefix': {'type': 'string'},
'key-id': {
'title': 'If creating, Enable kms on the trail',
'type': 'string'},
# region that we're aggregating via trails.
'multi-region': {
'title': 'If creating, use this region for all data trails',
'type': 'string'}}}})
def validate(self):
if self.data['data-trail'].get('create'):
if 's3-bucket' not in self.data['data-trail']:
raise FilterValidationError(
"If creating data trails, an s3-bucket is required")
return self
def get_permissions(self):
perms = [
'cloudtrail:DescribeTrails',
'cloudtrail:GetEventSelectors',
'cloudtrail:PutEventSelectors']
if self.data.get('data-trail', {}).get('create'):
perms.extend([
'cloudtrail:CreateTrail', 'cloudtrail:StartLogging'])
return perms
def add_data_trail(self, client, trail_cfg):
if not trail_cfg.get('create'):
raise ValueError(
"s3 data event trail missing and not configured to create")
params = dict(
Name=trail_cfg['name'],
S3BucketName=trail_cfg['s3-bucket'],
EnableLogFileValidation=True)
if 'key-id' in trail_cfg:
params['KmsKeyId'] = trail_cfg['key-id']
if 's3-prefix' in trail_cfg:
params['S3KeyPrefix'] = trail_cfg['s3-prefix']
if 'topic' in trail_cfg:
params['SnsTopicName'] = trail_cfg['topic']
if 'multi-region' in trail_cfg:
params['IsMultiRegionTrail'] = True
client.create_trail(**params)
return {'Name': trail_cfg['name']}
def process(self, resources):
session = local_session(self.manager.session_factory)
region = self.data['data-trail'].get('multi-region')
if region:
client = session.client('cloudtrail', region_name=region)
else:
client = session.client('cloudtrail')
added = False
tconfig = self.data['data-trail']
trails = client.describe_trails(
trailNameList=[tconfig['name']]).get('trailList', ())
if not trails:
trail = self.add_data_trail(client, tconfig)
added = True
else:
trail = trails[0]
events = client.get_event_selectors(
TrailName=trail['Name']).get('EventSelectors', [])
for e in events:
found = False
if not e.get('DataResources'):
continue
for data_events in e['DataResources']:
if data_events['Type'] != 'AWS::S3::Object':
continue
for b in data_events['Values']:
if b.rsplit(':')[-1].strip('/') == '':
found = True
break
if found:
resources[0]['c7n_data_trail'] = trail
return
# Opinionated choice, separate api and data events.
event_count = len(events)
events = [e for e in events if not e.get('IncludeManagementEvents')]
if len(events) != event_count:
self.log.warning("removing api trail from data trail")
# future proof'd for other data events, for s3 this trail
# encompasses all the buckets in the account.
events.append({
'IncludeManagementEvents': False,
'ReadWriteType': tconfig.get('type', 'All'),
'DataResources': [{
'Type': 'AWS::S3::Object',
'Values': ['arn:aws:s3:::']}]})
client.put_event_selectors(
TrailName=trail['Name'],
EventSelectors=events)
if added:
client.start_logging(Name=tconfig['name'])
resources[0]['c7n_data_trail'] = trail
@filters.register('shield-enabled')
class ShieldEnabled(Filter):
permissions = ('shield:DescribeSubscription',)
schema = type_schema(
'shield-enabled',
state={'type': 'boolean'})
def process(self, resources, event=None):
state = self.data.get('state', False)
client = self.manager.session_factory().client('shield')
try:
subscription = client.describe_subscription().get(
'Subscription', None)
except ClientError as e:
if e.response['Error']['Code'] != 'ResourceNotFoundException':
raise
subscription = None
resources[0]['c7n:ShieldSubscription'] = subscription
if state and subscription:
return resources
elif not state and not subscription:
return resources
return []
@actions.register('set-shield-advanced')
class SetShieldAdvanced(BaseAction):
"""Enable/disable Shield Advanced on an account."""
permissions = (
'shield:CreateSubscription', 'shield:DeleteSubscription')
schema = type_schema(
'set-shield-advanced',
state={'type': 'boolean'})
def process(self, resources):
client = self.manager.session_factory().client('shield')
state = self.data.get('state', True)
if state:
client.create_subscription()
else:
try:
client.delete_subscription()
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
return
raise
| 36.655636 | 99 | 0.567906 |
daad082596d259673fe4bba215bf2cd9e3b91096 | 1,587 | py | Python | src/backend/expungeservice/models/disposition.py | KentShikama/recordexpungPDX | 4d3d14339e9f408ae466b867615101bdee42e98b | [
"MIT"
] | null | null | null | src/backend/expungeservice/models/disposition.py | KentShikama/recordexpungPDX | 4d3d14339e9f408ae466b867615101bdee42e98b | [
"MIT"
] | null | null | null | src/backend/expungeservice/models/disposition.py | KentShikama/recordexpungPDX | 4d3d14339e9f408ae466b867615101bdee42e98b | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from datetime import date
from enum import Enum
class DispositionStatus(str, Enum):
CONVICTED = "Convicted"
DISMISSED = "Dismissed"
NO_COMPLAINT = "No Complaint"
DIVERTED = "Diverted"
UNRECOGNIZED = "Unrecognized"
@dataclass(frozen=True)
class Disposition:
date: date
ruling: str
status: DispositionStatus
amended: bool = False
class DispositionCreator:
@staticmethod
def create(date: date, ruling: str, amended: bool = False) -> Disposition:
status = DispositionCreator.__build_status(ruling)
return Disposition(date, ruling, status, amended)
@staticmethod
def __build_status(ruling_string):
ruling = ruling_string.lower()
conviction_rulings = ["convicted", "conviction", "reduced", "finding - guilty", "conversion", "converted"]
dismissal_rulings = [
"acquitted",
"acquittal",
"dismissed",
"dismissal",
"finding - not guilty",
"accusatory instrument filed",
"removed from charging instrument",
]
if any([rule in ruling for rule in conviction_rulings]):
return DispositionStatus.CONVICTED
elif any([rule in ruling for rule in dismissal_rulings]):
return DispositionStatus.DISMISSED
elif "diverted" in ruling:
return DispositionStatus.DIVERTED
elif "no complaint" in ruling:
return DispositionStatus.NO_COMPLAINT
else:
return DispositionStatus.UNRECOGNIZED
| 28.339286 | 114 | 0.647133 |
6eef6dcb38a57d37b3dc4cbf4df3476989300c43 | 6,913 | py | Python | sdk/python/pulumi_azure/apimanagement/named_value.py | davidobrien1985/pulumi-azure | 811beeea473bd798d77354521266a87a2fac5888 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/apimanagement/named_value.py | davidobrien1985/pulumi-azure | 811beeea473bd798d77354521266a87a2fac5888 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/apimanagement/named_value.py | davidobrien1985/pulumi-azure | 811beeea473bd798d77354521266a87a2fac5888 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class NamedValue(pulumi.CustomResource):
api_management_name: pulumi.Output[str]
"""
The name of the API Management Service in which the API Management Named Value should exist. Changing this forces a new resource to be created.
"""
display_name: pulumi.Output[str]
"""
The display name of this API Management Named Value.
"""
name: pulumi.Output[str]
"""
The name of the API Management Named Value. Changing this forces a new resource to be created.
"""
resource_group_name: pulumi.Output[str]
"""
The name of the Resource Group in which the API Management Named Value should exist. Changing this forces a new resource to be created.
"""
secret: pulumi.Output[bool]
"""
Specifies whether the API Management Named Value is secret. Valid values are `true` or `false`. The default value is `false`.
"""
tags: pulumi.Output[list]
"""
A list of tags to be applied to the API Management Named Value.
"""
value: pulumi.Output[str]
"""
The value of this API Management Named Value.
"""
def __init__(__self__, resource_name, opts=None, api_management_name=None, display_name=None, name=None, resource_group_name=None, secret=None, tags=None, value=None, __props__=None, __name__=None, __opts__=None):
"""
Manages an API Management Named Value.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_management_name: The name of the API Management Service in which the API Management Named Value should exist. Changing this forces a new resource to be created.
:param pulumi.Input[str] display_name: The display name of this API Management Named Value.
:param pulumi.Input[str] name: The name of the API Management Named Value. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group in which the API Management Named Value should exist. Changing this forces a new resource to be created.
:param pulumi.Input[bool] secret: Specifies whether the API Management Named Value is secret. Valid values are `true` or `false`. The default value is `false`.
:param pulumi.Input[list] tags: A list of tags to be applied to the API Management Named Value.
:param pulumi.Input[str] value: The value of this API Management Named Value.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if api_management_name is None:
raise TypeError("Missing required property 'api_management_name'")
__props__['api_management_name'] = api_management_name
if display_name is None:
raise TypeError("Missing required property 'display_name'")
__props__['display_name'] = display_name
__props__['name'] = name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['secret'] = secret
__props__['tags'] = tags
if value is None:
raise TypeError("Missing required property 'value'")
__props__['value'] = value
super(NamedValue, __self__).__init__(
'azure:apimanagement/namedValue:NamedValue',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, api_management_name=None, display_name=None, name=None, resource_group_name=None, secret=None, tags=None, value=None):
"""
Get an existing NamedValue resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_management_name: The name of the API Management Service in which the API Management Named Value should exist. Changing this forces a new resource to be created.
:param pulumi.Input[str] display_name: The display name of this API Management Named Value.
:param pulumi.Input[str] name: The name of the API Management Named Value. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group in which the API Management Named Value should exist. Changing this forces a new resource to be created.
:param pulumi.Input[bool] secret: Specifies whether the API Management Named Value is secret. Valid values are `true` or `false`. The default value is `false`.
:param pulumi.Input[list] tags: A list of tags to be applied to the API Management Named Value.
:param pulumi.Input[str] value: The value of this API Management Named Value.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["api_management_name"] = api_management_name
__props__["display_name"] = display_name
__props__["name"] = name
__props__["resource_group_name"] = resource_group_name
__props__["secret"] = secret
__props__["tags"] = tags
__props__["value"] = value
return NamedValue(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 52.770992 | 217 | 0.689426 |
9c8c46e6fa2672175fd7631b2fa6d376ddea0c3b | 5,010 | py | Python | pose_estimation/configs/fashion/udp/deepfashion/hrnet_w32_deepfashion_full_256x192_udp.py | AK391/UniFormer | 22c6b3b98b68236dda6a8fa7152a32af1af62a20 | [
"MIT"
] | 367 | 2022-01-14T03:32:25.000Z | 2022-03-31T04:48:20.000Z | pose_estimation/configs/fashion/udp/deepfashion/hrnet_w32_deepfashion_full_256x192_udp.py | hadlang/UniFormer | e8024703bffb89cb7c7d09e0d774a0d2a9f96c25 | [
"MIT"
] | 27 | 2022-01-27T07:12:49.000Z | 2022-03-31T04:31:13.000Z | pose_estimation/configs/fashion/udp/deepfashion/hrnet_w32_deepfashion_full_256x192_udp.py | hadlang/UniFormer | e8024703bffb89cb7c7d09e0d774a0d2a9f96c25 | [
"MIT"
] | 53 | 2022-01-18T11:21:43.000Z | 2022-03-31T06:42:41.000Z | log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=5, create_symlink=False)
evaluation = dict(interval=10, metric='PCK', key_indicator='PCK')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=10,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
target_type = 'GaussianHeatMap'
channel_cfg = dict(
num_output_channels=8,
dataset_joints=8,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7],
],
inference_channel=[0, 1, 2, 3, 4, 5, 6, 7])
# model settings
model = dict(
type='TopDown',
pretrained='https://download.openmmlab.com/mmpose/'
'pretrain_models/hrnet_w32-36af842e.pth',
backbone=dict(
type='HRNet',
in_channels=3,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))),
),
keypoint_head=dict(
type='TopDownSimpleHead',
in_channels=32,
out_channels=channel_cfg['num_output_channels'],
num_deconv_layers=0,
extra=dict(final_conv_kernel=1, ),
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=False,
target_type=target_type,
modulate_kernel=11,
use_udp=True))
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=False,
det_bbox_thr=0.0,
bbox_file='../../../../dataset/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine', use_udp=True),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='TopDownGenerateTarget',
sigma=2,
encoding='UDP',
target_type=target_type),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine', use_udp=True),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/fld'
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
val_dataloader=dict(samples_per_gpu=256),
test_dataloader=dict(samples_per_gpu=256),
train=dict(
type='DeepFashionDataset',
ann_file=f'{data_root}/annotations/fld_full_train.json',
img_prefix=f'{data_root}/img/',
subset='full',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='DeepFashionDataset',
ann_file=f'{data_root}/annotations/fld_full_val.json',
img_prefix=f'{data_root}/img/',
subset='full',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='DeepFashionDataset',
ann_file=f'{data_root}/annotations/fld_full_test.json',
img_prefix=f'{data_root}/img/',
subset='full',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
| 28.305085 | 79 | 0.586028 |
bdf269ce2a956875e13721438968639668e5e7bd | 5,531 | py | Python | py/bitbox02/u2fhid/u2fhid.py | jstrnbrg/bitbox02-firmware | 173a1fe340daf1a839b888286a654fb75745102d | [
"Apache-2.0"
] | 1 | 2019-11-29T07:06:59.000Z | 2019-11-29T07:06:59.000Z | py/bitbox02/u2fhid/u2fhid.py | jstrnbrg/bitbox02-firmware | 173a1fe340daf1a839b888286a654fb75745102d | [
"Apache-2.0"
] | null | null | null | py/bitbox02/u2fhid/u2fhid.py | jstrnbrg/bitbox02-firmware | 173a1fe340daf1a839b888286a654fb75745102d | [
"Apache-2.0"
] | 1 | 2019-11-29T07:07:01.000Z | 2019-11-29T07:07:01.000Z | # Copyright 2019 Shift Cryptosecurity AG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementations"""
import struct
import random
from typing_extensions import Protocol
USB_REPORT_SIZE = 64
ERR_NONE = 0x00
ERR_INVALID_CMD = 0x01
ERR_INVALID_PAR = 0x02
ERR_INVALID_LEN = 0x03
ERR_INVALID_SEQ = 0x04
ERR_MSG_TIMEOUT = 0x05
ERR_CHANNEL_BUSY = 0x06
ERR_LOCK_REQUIRED = 0x0A
ERR_INVALID_CID = 0x0B
ERR_OTHER = 0x7F
PING = 0x80 | 0x01
MSG = 0x80 | 0x03
LOCK = 0x80 | 0x04
INIT = 0x80 | 0x06
WINK = 0x80 | 0x08
SYNC = 0x80 | 0x3C
ERROR = 0x80 | 0x3F
CID_BROADCAST = 0xFFFFFFFF
class SupportsReadWrite(Protocol):
# pylint: disable=unused-argument,no-self-use
def write(self, msg: bytes) -> None:
...
def read(self, size: int, timeout_ms: int) -> bytes:
...
def generate_cid() -> int:
"""Generate a valid CID"""
# Exclude 0 and u32_max (0xffff_ffff)
return random.randrange(1, 0xFFFFFFFF)
# TODO: Create exceptions
def _throw_error(error_code: int) -> None:
if error_code == ERR_INVALID_CMD:
raise Exception("Received error: invalid command")
if error_code == ERR_INVALID_LEN:
raise Exception("Received error: invalid length")
if error_code == ERR_INVALID_SEQ:
raise Exception("Received error: invalid sequence")
if error_code == ERR_CHANNEL_BUSY:
raise Exception("Received error: channel busy")
if error_code == 0x7E:
raise Exception("Received error: encryption failed")
if error_code == ERR_OTHER:
raise Exception("Received unknown error")
raise Exception("Received error: %d" % error_code)
def write(hid_device: SupportsReadWrite, data: bytes, cmd: int, cid: int) -> None:
"""
Send data to the device.
Args:
hid_device: An object that implements read/write functions
data: Data to send
cmd: U2F HID command
cid: U2F HID channel ID
Throws:
ValueError: In case any value is out of range
"""
if cmd < 0 or cmd > 0xFF:
raise ValueError("Channel command is out of range '0 < cmd <= 0xFF'")
if cid < 0 or cid > 0xFFFFFFFF:
raise ValueError("Channel id is out of range '0 < cid <= 0xFFFFFFFF'")
data = bytearray(data)
data_len = len(data)
if data_len > 0xFFFF:
raise ValueError("Data is too large 'size <= 0xFFFF'")
seq = 0
idx = 0
buf = b""
# Allow to write an empty packet
single_empty_write = data_len == 0
while idx < data_len or single_empty_write:
if idx == 0:
# INIT frame
buf = data[idx : idx + min(data_len, USB_REPORT_SIZE - 7)]
hid_device.write(
b"\0"
+ struct.pack(">IBH", cid, cmd, data_len)
+ buf
+ b"\xEE" * (USB_REPORT_SIZE - 7 - len(buf))
)
else:
# CONT frame
buf = data[idx : idx + min(data_len, USB_REPORT_SIZE - 5)]
hid_device.write(
b"\0"
+ struct.pack(">IB", cid, seq)
+ buf
+ b"\xEE" * (USB_REPORT_SIZE - 5 - len(buf))
)
seq += 1
idx += len(buf)
single_empty_write = False
def read(hid_device: SupportsReadWrite, cmd: int, cid: int, timeout: int = 5000) -> bytes:
"""
Receive data from the device.
Args:
hid_device: An object that implements read/write functions
cmd: The expected returned U2F HID command
cid: The expected returned U2F HID channel ID
timeout: For how long to wait for more bytes from hid_device
Returns:
The read message combined from the u2fhid packets
Throws:
ValueError: In case any value is out of range
Exception: In case of USB communication issues
"""
if cmd < 0 or cmd > 0xFF:
raise ValueError("Channel command is out of range '0 < cmd <= 0xFF'")
if cid < 0 or cid > 0xFFFFFFFF:
raise ValueError("Channel id is out of range '0 < cid <= 0xFFFFFFFF'")
timeout_ms = timeout * 1000
buf = hid_device.read(USB_REPORT_SIZE, timeout_ms)
if len(buf) >= 3:
reply_cid = ((buf[0] * 256 + buf[1]) * 256 + buf[2]) * 256 + buf[3]
reply_cmd = buf[4]
data_len = buf[5] * 256 + buf[6]
data = buf[7:]
idx = len(buf) - 7
if reply_cmd == ERROR:
_throw_error(data[0])
while idx < data_len:
# CONT response
buf = hid_device.read(USB_REPORT_SIZE, timeout_ms)
if len(buf) < 3:
raise Exception("Did not receive a continuation frame after %d seconds" % timeout)
data += buf[5:]
idx += len(buf) - 5
if reply_cid != cid:
raise Exception(f"- USB channel ID mismatch {reply_cid:x} != {cid:x}")
if reply_cmd != cmd:
raise Exception(f"- USB command mismatch {reply_cmd:x} != {cmd:x}")
return data[:data_len]
raise Exception("Did not read anything after %d seconds" % timeout)
| 32.727811 | 98 | 0.619056 |
5b96c0befe2a9434560bdd357f3d8e8dc2a92bc5 | 3,144 | py | Python | nixos/modules/tests/stripe-api-double.py | PrivateStorageio/PrivateStorageio | fc3cf0fd0a3ccb31780984b99f4c04d96d1c3820 | [
"Apache-2.0"
] | 1 | 2020-12-22T23:40:34.000Z | 2020-12-22T23:40:34.000Z | nixos/modules/tests/stripe-api-double.py | PrivateStorageio/PrivateStorageio | fc3cf0fd0a3ccb31780984b99f4c04d96d1c3820 | [
"Apache-2.0"
] | 25 | 2019-08-08T15:54:29.000Z | 2021-03-08T18:06:10.000Z | nixos/modules/tests/stripe-api-double.py | PrivateStorageio/PrivateStorageio | fc3cf0fd0a3ccb31780984b99f4c04d96d1c3820 | [
"Apache-2.0"
] | 2 | 2019-10-24T20:08:17.000Z | 2020-05-19T21:09:48.000Z | #!/usr/bin/env python3
from sys import stdout, argv
from json import dumps
from twisted.internet.defer import Deferred
from twisted.internet.endpoints import serverFromString
from twisted.internet.task import react
from twisted.web.resource import Resource
from twisted.web.server import Site
from twisted.python.log import startLogging
class Charges(Resource):
def render_POST(self, request):
voucher = request.args[b"metadata[Voucher]"][0].decode("utf-8")
card = request.args[b"card"][0].decode("utf-8")
amount = int(request.args[b"amount"][0])
currency = request.args[b"currency"][0].decode("utf-8")
response = dumps(charge(card, amount, currency, {u"Voucher": voucher}))
return response.encode("utf-8")
def main(reactor, listenEndpoint):
charges = Charges()
v1 = Resource()
v1.putChild(b"charges", charges)
root = Resource()
root.putChild(b"v1", v1)
return serverFromString(reactor, listenEndpoint).listen(
Site(root),
).addCallback(
lambda ignored: Deferred()
)
def charge(source, amount, currency, metadata):
return {
"id": "ch_1Fj8frBHXBAMm9bPkekylvAq",
"object": "charge",
"amount": amount,
"amount_refunded": 0,
"application": None,
"application_fee": None,
"application_fee_amount": None,
"balance_transaction": "txn_1Fj8fr2eZvKYlo2CC5JzIGj5",
"billing_details": {
"address": {
"city": None,
"country": None,
"line1": None,
"line2": None,
"postal_code": None,
"state": None
},
"email": None,
"name": None,
"phone": None
},
"captured": False,
"created": 1574792527,
"currency": currency,
"customer": None,
"description": None,
"dispute": None,
"disputed": False,
"failure_code": None,
"failure_message": None,
"fraud_details": {},
"invoice": None,
"livemode": False,
"metadata": metadata,
"on_behalf_of": None,
"order": None,
"outcome": None,
"paid": True,
"payment_intent": None,
"payment_method": source,
"payment_method_details": {},
"receipt_email": None,
"receipt_number": None,
"receipt_url": "https://pay.stripe.com/receipts/acct_1FhhxTBHXBAMm9bP/ch_1Fj8frBHXBAMm9bPkekylvAq/rcpt_GFdxYuDoGKfYgokh9YA11XhnYC7Gnxp",
"refunded": False,
"refunds": {
"object": "list",
"data": [],
"has_more": False,
"url": "/v1/charges/ch_1Fj8frBHXBAMm9bPkekylvAq/refunds"
},
"review": None,
"shipping": None,
"source_transfer": None,
"statement_descriptor": None,
"statement_descriptor_suffix": None,
"status": "succeeded",
"transfer_data": None,
"transfer_group": None,
"source": source,
}
if __name__ == '__main__':
startLogging(stdout)
react(main, argv[1:])
| 30.823529 | 144 | 0.577608 |
8bd11f4af6e22c1087e3d5260f9d58249b97c553 | 3,029 | py | Python | app.py | bkhanale/SlackContestWatcherBot | 744dac74aecfedeeee8fa41b781d454fe105e6a1 | [
"MIT"
] | 12 | 2018-07-28T19:39:25.000Z | 2021-08-29T17:39:35.000Z | app.py | bkhanale/slackcontestwatcherbot | 744dac74aecfedeeee8fa41b781d454fe105e6a1 | [
"MIT"
] | 8 | 2018-07-28T19:41:03.000Z | 2018-08-20T17:14:56.000Z | app.py | bkhanale/slackcontestwatcherbot | 744dac74aecfedeeee8fa41b781d454fe105e6a1 | [
"MIT"
] | 2 | 2018-07-29T09:38:29.000Z | 2018-07-29T11:11:37.000Z | import datetime
from dateutil import tz
import json
import requests
from os import environ
base_url = "https://clist.by/api/v1/contest/"
header = {"Authorization": environ["CLIST_API_TOKEN"]}
def convert_time(utc):
return str(
utc.replace(tzinfo=tz.gettz('UTC'))
.astimezone(tz.gettz('Asia/Calcutta'))
.replace(microsecond=0)
.replace(tzinfo=None))
def convert_dt(string):
return datetime.datetime.strptime(string, '%Y-%m-%dT%H:%M:%S')
def watchcontest(now):
cur_time = now.replace(microsecond=0) + datetime.timedelta(hours=1)
para = {"start": cur_time.isoformat()}
resp = requests.get(base_url, params=para, headers=header)
flag = False
res = ""
if(resp.status_code == 200):
contests = json.loads(resp.content.decode("utf-8"))
if(len(contests["objects"]) >= 1):
flag = True
for con in contests["objects"]:
lcls = convert_time(convert_dt(con["start"]))
lcle = convert_time(convert_dt(con["end"]))
res += con["event"] + " will start in 1 hour!\n"
res += con["href"] + "\n"
res += "Start: " + lcls + "\n"
res += "End: " + lcle + "\n"
res += "Duration: " + str(
datetime.timedelta(seconds=con["duration"])) + "\n\n"
return flag, res
def upcoming(site, now):
now = now.replace(microsecond=0)
then = now + datetime.timedelta(days=7)
para = {
"start__gte": now.isoformat(),
"start__lte": then.isoformat(),
"resource__name__contains": site,
"order_by": "start"}
resp = requests.get(base_url, params=para, headers=header)
if(resp.status_code == 200):
return (
"Following are the upcoming contests within a week:\n\n" +
build_string(json.loads(resp.content.decode("utf-8"))))
else:
return "Error " + str(resp.status_code)
def ongoing(site, now):
now = now.replace(microsecond=0)
para = {
"start__lte": now.isoformat(),
"end__gt": now.isoformat(),
"resource__name__contains": site,
"order_by": "start"}
resp = requests.get(base_url, params=para, headers=header)
if(resp.status_code == 200):
return (
"Following are the ongoing contests:\n\n" +
build_string(json.loads(resp.content.decode("utf-8"))))
else:
return "Error " + str(resp.status_code)
def build_string(contests):
res = ""
con_ind = 1
for con in contests["objects"]:
res += str(con_ind) + ". " + con["event"] + "\n"
res += con["href"] + "\n"
res += "Start: " + convert_time(convert_dt(con["start"])) + "\n"
res += "End: " + convert_time(convert_dt(con["end"])) + "\n"
res += "Duration: " + str(datetime.timedelta(seconds=con["duration"]))
res += "\n\n"
con_ind += 1
return res
| 33.655556 | 79 | 0.555629 |
801729b67723302bafcc0f41bcf618c63a1abcea | 1,763 | py | Python | job_scripts/cori-haswell/slack_job_start.py | dwillcox/workflow | ff4a0e98eda5dd8ac002f6c0b518c5c9aa94bd8d | [
"MIT"
] | 3 | 2018-11-27T20:19:47.000Z | 2021-03-11T02:14:02.000Z | job_scripts/cori-haswell/slack_job_start.py | dwillcox/workflow | ff4a0e98eda5dd8ac002f6c0b518c5c9aa94bd8d | [
"MIT"
] | 6 | 2018-04-09T23:24:48.000Z | 2021-09-28T18:21:31.000Z | job_scripts/cori-haswell/slack_job_start.py | dwillcox/workflow | ff4a0e98eda5dd8ac002f6c0b518c5c9aa94bd8d | [
"MIT"
] | 6 | 2018-12-25T01:06:17.000Z | 2022-03-25T22:40:24.000Z | #!/usr/bin/env python
import json
import sys
import subprocess
import shlex
def slack_post(name, channel, message, webhook):
payload = {}
payload["channel"] = channel
payload["username"] = name
payload["text"] = message
s = json.dumps(payload)
cmd = "curl -X POST --data-urlencode 'payload={}' {}".format(s, webhook)
so = run(cmd)
def run(string, stdin=False, outfile=None, store_command=False, env=None, outfile_mode="a", log=None):
# shlex.split will preserve inner quotes
prog = shlex.split(string)
if stdin:
p0 = subprocess.Popen(prog, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env)
else:
p0 = subprocess.Popen(prog, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env)
stdout0, stderr0 = p0.communicate()
if stdin: p0.stdin.close()
rc = p0.returncode
p0.stdout.close()
if not outfile == None:
try: cf = open(outfile, outfile_mode)
except IOError:
log.fail(" ERROR: unable to open file for writing")
else:
if store_command:
cf.write(string)
for line in stdout0:
cf.write(line)
cf.close()
return stdout0, stderr0, rc
if __name__ == "__main__":
try:
f = open("/global/homes/z/zingale/.slack.webhook")
except:
sys.exit("ERROR: unable to open webhook file")
else:
webhook = str(f.readline())
f.close()
message = sys.argv[1]
channel = sys.argv[2]
if not channel.startswith("#"):
channel = "#{}".format(channel)
slack_post("bender", channel, message, webhook)
| 25.550725 | 102 | 0.581963 |
1bdb6d2dd320d7c51a7c0bcf01377c345a20ccdf | 1,110 | py | Python | apps/account/views.py | hakancelik96/eatingword | 722e0c5ce2c92812b79db106eb9df7bd90a83143 | [
"MIT"
] | 10 | 2020-09-24T12:25:59.000Z | 2020-09-24T12:28:11.000Z | apps/account/views.py | hakancelik96/eatingword | 722e0c5ce2c92812b79db106eb9df7bd90a83143 | [
"MIT"
] | 4 | 2021-06-04T23:52:28.000Z | 2021-09-22T19:34:29.000Z | apps/account/views.py | hakancelik96/eatingword | 722e0c5ce2c92812b79db106eb9df7bd90a83143 | [
"MIT"
] | null | null | null | from django.contrib import messages
from django.contrib.auth import authenticate, login
from django.contrib.auth import views as auth_views
from django.urls import reverse_lazy
from django.views import generic
from apps.account.forms import AuthenticationForm, RegisterForm
from apps.views import MessageMixin
class RegisterView(MessageMixin, generic.CreateView):
template_name = "registration/register.html"
success_url = reverse_lazy("wordapp:index")
form_class = RegisterForm
success_message = "You have successfully registered %(username)s"
def form_valid(self, form):
response = super().form_valid(form)
user = authenticate(
username=form.cleaned_data["username"],
password=form.cleaned_data["password1"],
)
if user is not None:
login(self.request, user)
else:
messages.error(self.request, "Could not login")
return response
class LoginView(MessageMixin, auth_views.LoginView):
form_class = AuthenticationForm
success_message = "You have successfully logged in %(username)s"
| 33.636364 | 69 | 0.722523 |
65977c52122caa1854d2df3c5485a7e08c0d074f | 96 | py | Python | python3-cgdk/model/WeatherType.py | leloykun/russian-ai-cup | aa06dcd594f8a33aa5d1accc128b931812de7297 | [
"MIT"
] | null | null | null | python3-cgdk/model/WeatherType.py | leloykun/russian-ai-cup | aa06dcd594f8a33aa5d1accc128b931812de7297 | [
"MIT"
] | null | null | null | python3-cgdk/model/WeatherType.py | leloykun/russian-ai-cup | aa06dcd594f8a33aa5d1accc128b931812de7297 | [
"MIT"
] | null | null | null | from enum import IntEnum
class WeatherType(IntEnum):
CLEAR = 0
CLOUD = 1
RAIN = 2
| 12 | 27 | 0.635417 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.