hexsha
stringlengths 40
40
| size
int64 24
287k
| ext
stringclasses 2
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 7
126
| max_stars_repo_name
stringlengths 8
97
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
sequence | max_stars_count
float64 1
15.9k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 7
126
| max_issues_repo_name
stringlengths 8
97
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
sequence | max_issues_count
float64 1
14.6k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 7
126
| max_forks_repo_name
stringlengths 8
97
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
sequence | max_forks_count
float64 1
8.43k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 24
287k
| avg_line_length
float64 12.3
530
| max_line_length
int64 24
10.2k
| alphanum_fraction
float64 0.41
0.88
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4f55f74a8fce041ad020d5273d8604fc0d889df6 | 626 | py | Python | atnlp/__init__.py | wedavey/atnlp | 002497f27abfcdac9701aa324301d482dbf4df0e | [
"MIT"
] | null | null | null | atnlp/__init__.py | wedavey/atnlp | 002497f27abfcdac9701aa324301d482dbf4df0e | [
"MIT"
] | null | null | null | atnlp/__init__.py | wedavey/atnlp | 002497f27abfcdac9701aa324301d482dbf4df0e | [
"MIT"
] | null | null | null | """Common configuration for the atnlp package
"""
import os
# globals
SRC_DIR = os.path.abspath(os.path.dirname(__file__))
TOP_DIR = os.path.abspath(os.path.join(SRC_DIR, '..'))
DATA_DIR = os.path.abspath(os.path.join(TOP_DIR,'data'))
RAW_DATA_DIR = os.path.abspath(os.path.join(DATA_DIR, 'raw'))
EXT_DATA_DIR = os.path.abspath(os.path.join(DATA_DIR, 'external'))
INT_DATA_DIR = os.path.abspath(os.path.join(DATA_DIR, 'interim'))
PRO_DATA_DIR = os.path.abspath(os.path.join(DATA_DIR, 'processed'))
NLTK_DIR = os.path.abspath(os.path.join(RAW_DATA_DIR, 'nltk'))
# set environment variable
os.environ["NLTK_DATA"] = NLTK_DIR
| 34.777778 | 67 | 0.744409 |
4f51d6220a544f965294f38e33117ad8456f5427 | 3,097 | py | Python | src/pactor.py | kstrempel/pactor | bc12dd6253bec7c08f691697108dcabd2a1c0e00 | [
"MIT"
] | 1 | 2021-03-19T21:36:35.000Z | 2021-03-19T21:36:35.000Z | src/pactor.py | kstrempel/pactor | bc12dd6253bec7c08f691697108dcabd2a1c0e00 | [
"MIT"
] | null | null | null | src/pactor.py | kstrempel/pactor | bc12dd6253bec7c08f691697108dcabd2a1c0e00 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
import argparse
import sys
import logging
import importlib # needed for runtime
from pactor.compiler import load_file, load_script
from pactor.vm import VM
from pactor.ast import Ast
from pactor.repl import repl
from pactor.runtime_exceptions import InnerPactorRuntimeError
from pactor.error_listener import SyntaxException
__author__ = "kstrempel"
__copyright__ = "kstrempel"
__license__ = "mit"
__version__ = 0.2
_logger = logging.getLogger(__name__)
def parse_args(args):
"""Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace
"""
parser = argparse.ArgumentParser(
description="Pactor Language")
parser.add_argument(
'file',
metavar='FILE',
nargs='?',
type=str,
default=None,
help='starts the pactor source file')
parser.add_argument(
"--version",
action="version",
version="pactor {ver}".format(ver=__version__))
parser.add_argument(
"-v",
"--verbose",
dest="loglevel",
help="set loglevel to INFO",
action="store_const",
const=logging.INFO)
parser.add_argument(
"-vv",
"--very-verbose",
dest="loglevel",
help="set loglevel to DEBUG",
action="store_const",
const=logging.DEBUG)
parser.add_argument(
"-s",
"--stack",
dest="stack",
help="prints the stack when script finised",
action="store_const",
const=logging.DEBUG)
return parser.parse_args(args)
def setup_logging(loglevel):
"""Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages
"""
logformat = "[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
logging.basicConfig(level=loglevel, stream=sys.stdout,
format=logformat, datefmt="%Y-%m-%d %H:%M:%S")
def main(args):
"""Main entry point allowing external calls
Args:
args ([str]): command line parameter list
"""
args = parse_args(args)
setup_logging(args.loglevel)
if args.file:
try:
ast = load_file(args.file)
vm = VM(ast)
vm.run()
except InnerPactorRuntimeError as e:
print(f"Runtime error in {args.file} at [{e.line}:{e.column}]")
with open(args.file) as f:
line = f.readline()
for _ in range(1, e.line):
line = f.readline()
print("> " + line[:-1])
print("> " + e.error_arrow)
print("> " + e.message)
except SyntaxException as e:
print(f"Syntax Error: {e.message}")
print(f"{e.error_arrow}")
except Exception as e:
print(f"Error: {e}")
if(args.stack):
print(vm.stack)
else:
repl()
def run():
"""Entry point for console_scripts
"""
main(sys.argv[1:])
if __name__ == "__main__":
run()
| 25.385246 | 75 | 0.586051 |
4f543e57d437c96d1f786e3fc5975e5fec8b9042 | 5,668 | py | Python | tests/endpoints/test_fact_types.py | nvllsvm/imbi-api | 1a5bd8894ac14c1026f33297994aa10782b0eea0 | [
"BSD-3-Clause"
] | 1 | 2021-06-23T18:34:47.000Z | 2021-06-23T18:34:47.000Z | tests/endpoints/test_fact_types.py | nvllsvm/imbi-api | 1a5bd8894ac14c1026f33297994aa10782b0eea0 | [
"BSD-3-Clause"
] | null | null | null | tests/endpoints/test_fact_types.py | nvllsvm/imbi-api | 1a5bd8894ac14c1026f33297994aa10782b0eea0 | [
"BSD-3-Clause"
] | 2 | 2022-02-11T22:06:18.000Z | 2022-02-21T19:35:06.000Z | import json
import uuid
import jsonpatch
from imbi.endpoints import project_types
from tests import base
class AsyncHTTPTestCase(base.TestCaseWithReset):
ADMIN_ACCESS = True
TRUNCATE_TABLES = ['v1.project_types', 'v1.project_fact_types']
def setUp(self) -> None:
super().setUp()
self.project_type = self.create_project_type()
def test_project_fact_type_lifecycle(self):
record = {
'project_type_ids': [self.project_type['id']],
'name': str(uuid.uuid4()),
'fact_type': 'free-form',
'data_type': 'string',
'description': 'Test description',
'ui_options': ['hidden'],
'weight': 100
}
# Create
result = self.fetch(
'/project-fact-types', method='POST',
body=json.dumps(record).encode('utf-8'), headers=self.headers)
self.assertEqual(result.code, 200)
response = json.loads(result.body.decode('utf-8'))
record['id'] = response['id']
url = self.get_url(
'/project-fact-types/{}'.format(response['id']))
self.assert_link_header_equals(result, url)
self.assertIsNotNone(result.headers['Date'])
self.assertIsNone(result.headers.get('Last-Modified', None))
self.assertEqual(
result.headers['Cache-Control'], 'public, max-age={}'.format(
project_types.RecordRequestHandler.TTL))
record.update({
'id': response['id'],
'created_by': self.USERNAME[self.ADMIN_ACCESS],
'last_modified_by': None
})
self.assertDictEqual(response, record)
# PATCH
updated = dict(record)
updated['weight'] = 25
patch = jsonpatch.make_patch(record, updated)
patch_value = patch.to_string().encode('utf-8')
record.update({
'weight': updated['weight'],
'last_modified_by': self.USERNAME[self.ADMIN_ACCESS]
})
result = self.fetch(
url, method='PATCH', body=patch_value, headers=self.headers)
self.assertEqual(result.code, 200)
self.assert_link_header_equals(result, url)
new_value = json.loads(result.body.decode('utf-8'))
self.assertDictEqual(new_value, record)
# Patch no change
result = self.fetch(
url, method='PATCH', body=patch_value, headers=self.headers)
self.assertEqual(result.code, 304)
# GET
result = self.fetch(url, headers=self.headers)
self.assertEqual(result.code, 200)
self.assert_link_header_equals(result, url)
self.assertIsNotNone(result.headers['Date'])
self.assertIsNotNone(result.headers['Last-Modified'])
self.assertEqual(
result.headers['Cache-Control'], 'public, max-age={}'.format(
project_types.RecordRequestHandler.TTL))
new_value = json.loads(result.body.decode('utf-8'))
self.assertDictEqual(new_value, record)
# Collection
result = self.fetch('/project-fact-types', headers=self.headers)
self.assertEqual(result.code, 200)
self.assertListEqual(
json.loads(result.body.decode('utf-8')),
[{k: v for k, v in record.items()
if k not in ['created_by', 'last_modified_by']}])
# DELETE
result = self.fetch(url, method='DELETE', headers=self.headers)
self.assertEqual(result.code, 204)
# GET record should not exist
result = self.fetch(url, headers=self.headers)
self.assertEqual(result.code, 404)
# DELETE should fail as record should not exist
result = self.fetch(url, method='DELETE', headers=self.headers)
self.assertEqual(result.code, 404)
def test_create_with_missing_fields(self):
record = {
'project_type_ids': [self.project_type['id']],
'name': str(uuid.uuid4())
}
result = self.fetch(
'/project-fact-types', method='POST', headers=self.headers,
body=json.dumps(record).encode('utf-8'))
self.assertEqual(result.code, 400)
def test_method_not_implemented(self):
for method in {'DELETE', 'PATCH'}:
result = self.fetch(
'/project-fact-types', method=method,
allow_nonstandard_methods=True, headers=self.headers)
self.assertEqual(result.code, 405)
result = self.fetch(
'/project-fact-types/99999', method='POST',
allow_nonstandard_methods=True, headers=self.headers)
self.assertEqual(result.code, 405)
def test_missing_project_type_id(self):
result = self.fetch(
'/project-fact-types', method='POST',
body=json.dumps({
'name': str(uuid.uuid4()),
'fact_type': 'free-form',
'data_type': 'string',
'description': 'Test description',
'ui_options': ['hidden'],
'weight': 100
}).encode('utf-8'), headers=self.headers)
self.assertEqual(result.code, 200)
def test_empty_ui_options(self):
result = self.fetch(
'/project-fact-types', method='POST',
body=json.dumps({
'project_type_ids': [],
'name': str(uuid.uuid4()),
'fact_type': 'free-form',
'data_type': 'string',
'description': 'Test description',
'ui_options': [],
'weight': 100
}).encode('utf-8'), headers=self.headers)
self.assertEqual(result.code, 200)
| 37.045752 | 74 | 0.582569 |
4f4fb1e44fceedef4f07a631eff1653499a44aef | 3,659 | py | Python | chatbot/models.py | lameinthebox/cid-backend | 1258000ab7801ebe2f6aef2a4006d3b35c9c88d8 | [
"MIT"
] | null | null | null | chatbot/models.py | lameinthebox/cid-backend | 1258000ab7801ebe2f6aef2a4006d3b35c9c88d8 | [
"MIT"
] | 14 | 2018-10-08T19:49:25.000Z | 2022-03-11T23:36:30.000Z | chatbot/models.py | lameinthebox/cid-backend | 1258000ab7801ebe2f6aef2a4006d3b35c9c88d8 | [
"MIT"
] | 2 | 2018-09-19T20:49:17.000Z | 2018-10-08T08:12:50.000Z | import os
from django.core.validators import int_list_validator
from django.db import models
from django.contrib.auth import models as auth_models
class Muser(auth_models.User):
objects = auth_models.UserManager()
gender = models.PositiveSmallIntegerField(blank=True, default=0) # 1: male, 2: female
birthdate = models.DateField(blank=True, null=True)
push_token = models.CharField(max_length=200, blank=True, null=True)
cluster = models.PositiveSmallIntegerField(blank=True, null=True)
recommended = models.ManyToManyField('chatbot.Music')
class Meta:
verbose_name = 'Muser'
def __str__(self):
return self.username
class Artist(models.Model):
original_id = models.IntegerField()
name = models.CharField(max_length=255, blank=True, default='')
debut = models.DateField(blank=True, null=True)
agent = models.CharField(max_length=255, blank=True, null=True)
def __str__(self):
return self.name
class SoloArtist(Artist):
gender = models.BooleanField(blank=True, null=True) # True: male, False: female
birthday = models.DateField(blank=True, null=True)
def __str__(self):
return self.name
class GroupArtist(Artist):
members = models.ManyToManyField(Artist, related_name='group_set')
def __str__(self):
return self.name
def album_image_path(album, filename):
return os.path.join('album_image', f"{album.title.replace('/', '-').replace(' ', '_')}.jpg")
class Album(models.Model):
original_id = models.IntegerField()
title = models.CharField(max_length=500, blank=True)
genre = models.CharField(max_length=255, blank=True, null=True)
artists = models.ManyToManyField(Artist, related_name='albums')
release = models.DateField(blank=True, null=True)
image = models.ImageField(null=True, upload_to=album_image_path, max_length=500)
def __str__(self):
return self.title
class Music(models.Model):
original_id = models.IntegerField()
title = models.CharField(max_length=500)
album = models.ForeignKey('chatbot.Album', related_name='music', on_delete=models.CASCADE, blank=True, null=True)
genre = models.CharField(max_length=255, blank=True, null=True)
artists = models.ManyToManyField(Artist, related_name='music')
release = models.DateField(blank=True, null=True)
length = models.PositiveSmallIntegerField(blank=True, default=0)
original_rating = models.PositiveIntegerField(default=0)
def __str__(self):
return self.title
class Evaluation(models.Model):
user = models.ForeignKey('chatbot.Muser', related_name='evaluations', on_delete=models.CASCADE)
music = models.ForeignKey('chatbot.Music', related_name='evaluations', on_delete=models.CASCADE)
rating = models.PositiveSmallIntegerField(blank=False)
def __str__(self):
return f'{self.music.title}-{self.user.username}'
class Message(models.Model):
created = models.DateTimeField(auto_now_add=True)
sender = models.ForeignKey('chatbot.Muser', related_name='sent_messages', on_delete=models.CASCADE, blank=True, null=True)
receiver = models.ForeignKey('chatbot.Muser', related_name='received_messages', on_delete=models.CASCADE, blank=True, null=True)
text = models.TextField(blank=True)
music = models.ForeignKey('chatbot.Music', on_delete=models.CASCADE, blank=True, null=True)
chips = models.CharField(validators=[int_list_validator], max_length=255, default=[])
def __str__(self):
if not self.sender:
return f'mu-bot -> {self.receiver.username}'
else:
return f'{self.sender.username} -> mu-bot'
| 35.524272 | 132 | 0.720689 |
4f55819c7fde2558b6072ee4e0544797f9dd0ca1 | 6,037 | py | Python | whoville/cloudbreak/models/user_profile_response.py | mikchaos/whoville | 6eabaea4b74ac0b632c03db8252590131c6ce63b | [
"Apache-2.0"
] | null | null | null | whoville/cloudbreak/models/user_profile_response.py | mikchaos/whoville | 6eabaea4b74ac0b632c03db8252590131c6ce63b | [
"Apache-2.0"
] | null | null | null | whoville/cloudbreak/models/user_profile_response.py | mikchaos/whoville | 6eabaea4b74ac0b632c03db8252590131c6ce63b | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.7.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class UserProfileResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'credential': 'CredentialResponse',
'owner': 'str',
'account': 'str',
'ui_properties': 'dict(str, object)'
}
attribute_map = {
'credential': 'credential',
'owner': 'owner',
'account': 'account',
'ui_properties': 'uiProperties'
}
def __init__(self, credential=None, owner=None, account=None, ui_properties=None):
"""
UserProfileResponse - a model defined in Swagger
"""
self._credential = None
self._owner = None
self._account = None
self._ui_properties = None
if credential is not None:
self.credential = credential
if owner is not None:
self.owner = owner
if account is not None:
self.account = account
if ui_properties is not None:
self.ui_properties = ui_properties
@property
def credential(self):
"""
Gets the credential of this UserProfileResponse.
:return: The credential of this UserProfileResponse.
:rtype: CredentialResponse
"""
return self._credential
@credential.setter
def credential(self, credential):
"""
Sets the credential of this UserProfileResponse.
:param credential: The credential of this UserProfileResponse.
:type: CredentialResponse
"""
self._credential = credential
@property
def owner(self):
"""
Gets the owner of this UserProfileResponse.
:return: The owner of this UserProfileResponse.
:rtype: str
"""
return self._owner
@owner.setter
def owner(self, owner):
"""
Sets the owner of this UserProfileResponse.
:param owner: The owner of this UserProfileResponse.
:type: str
"""
self._owner = owner
@property
def account(self):
"""
Gets the account of this UserProfileResponse.
:return: The account of this UserProfileResponse.
:rtype: str
"""
return self._account
@account.setter
def account(self, account):
"""
Sets the account of this UserProfileResponse.
:param account: The account of this UserProfileResponse.
:type: str
"""
self._account = account
@property
def ui_properties(self):
"""
Gets the ui_properties of this UserProfileResponse.
:return: The ui_properties of this UserProfileResponse.
:rtype: dict(str, object)
"""
return self._ui_properties
@ui_properties.setter
def ui_properties(self, ui_properties):
"""
Sets the ui_properties of this UserProfileResponse.
:param ui_properties: The ui_properties of this UserProfileResponse.
:type: dict(str, object)
"""
self._ui_properties = ui_properties
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, UserProfileResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 29.886139 | 984 | 0.604274 |
4f55dbd324c21f9786cae0bab2e9d64a5fe74bdb | 6,300 | py | Python | python/paddle/fluid/tests/unittests/dygraph_to_static/test_partial_program.py | shentanyue/Paddle | e0c61d88c4351876c3442447655bae52d26a9b2f | [
"Apache-2.0"
] | 8 | 2016-08-15T07:02:27.000Z | 2016-08-24T09:34:00.000Z | python/paddle/fluid/tests/unittests/dygraph_to_static/test_partial_program.py | shentanyue/Paddle | e0c61d88c4351876c3442447655bae52d26a9b2f | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/dygraph_to_static/test_partial_program.py | shentanyue/Paddle | e0c61d88c4351876c3442447655bae52d26a9b2f | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.layers.utils import flatten
from paddle.fluid.dygraph import declarative, ProgramTranslator
from test_fetch_feed import Linear
import unittest
SEED = 2020
def nested_input(x, y):
sum_res = x + y[0]
z_elem = y[3]['z']
sub_res = z_elem[0] - z_elem[1]
mul_res = y[-1]['d']['da'] * y[-1]['d']['dc']
mean_func = fluid.layers.mean
out = mean_func(sub_res) + mean_func(sum_res) + mean_func(mul_res)
return out
def nested_output(x, y):
sum_res = x + y
sub_res = x - y
mul_res = x * y
out = {}
out['z'] = sum_res
out['a'] = [sub_res, 64, [mul_res, "cmd"]]
return out
def fake_data(shape):
x_data = np.random.random(shape).astype('float32')
return fluid.dygraph.to_variable(x_data)
class TestWithNestedInput(unittest.TestCase):
def setUp(self):
self.x = None
self.y = None
def fake_input(self):
self.x = fake_data([10, 16])
self.y = [
fake_data([10, 16]), "preprocess_cmd", 64, {
'z': [fake_data([10, 12]), fake_data([10, 12])],
'c': fake_data([10, 10]),
'd': {
'da': 12,
'dc': fake_data([10, 10])
}
}
]
def _run(self, to_static):
with fluid.dygraph.guard():
if self.x is None or self.y is None:
self.fake_input()
if to_static:
out = declarative(nested_input)(self.x, self.y)
else:
out = nested_input(self.x, self.y)
return out.numpy()
def test_nest(self):
dygraph_res = self._run(to_static=False)
static_res = self._run(to_static=True)
self.assertTrue(np.allclose(dygraph_res, static_res))
class TestWithNestedOutput(unittest.TestCase):
def setUp(self):
self.x = None
self.y = None
def _run(self, to_static):
with fluid.dygraph.guard():
if self.x is None or self.y is None:
self.x = fake_data([10, 16])
self.y = fake_data([10, 16])
if to_static:
out = declarative(nested_output)(self.x, self.y)
else:
out = nested_output(self.x, self.y)
return out
def test_nest(self):
dygraph_res = self._run(to_static=False)
dygraph_res = flatten(dygraph_res)
static_res = self._run(to_static=True)
static_res = flatten(static_res)
self.assertTrue(len(dygraph_res) == len(static_res))
for dy_var, st_var in zip(dygraph_res, static_res):
if isinstance(dy_var,
(fluid.core.VarBase, fluid.core.eager.Tensor)):
self.assertTrue(np.allclose(dy_var.numpy(), st_var.numpy()))
else:
self.assertTrue(dy_var, st_var)
class TestWithTrainAndEval(unittest.TestCase):
def test_switch_eval_and_train(self):
program_translator = ProgramTranslator()
with fluid.dygraph.guard():
linear_net = Linear()
x_data = np.random.random((4, 10)).astype('float32')
x = fluid.dygraph.to_variable(x_data)
linear_net(x)
_, train_partial_layer = linear_net.forward.program_cache.last()[-1]
# check default mode is for training
self.assertEqual(train_partial_layer.program,
train_partial_layer._train_program)
# switch to run test program after `eval()`
linear_net.eval()
linear_net(x)
_, eval_partial_layer = linear_net.forward.program_cache.last()[-1]
self.assertEqual(eval_partial_layer.program,
eval_partial_layer._infer_program)
# switch back into training
linear_net.train()
linear_net(x)
self.assertEqual(train_partial_layer.program,
train_partial_layer._train_program)
class TestWithNoGrad(unittest.TestCase):
def test_with_no_grad(self):
with fluid.dygraph.guard():
linear_net = Linear()
x_data = np.random.random((5, 10)).astype('float32')
x = fluid.dygraph.to_variable(x_data)
with paddle.no_grad():
linear_net.train()
linear_net(x)
_, partial_layer = linear_net.forward.program_cache.last()[-1]
self.assertEqual(partial_layer.program,
partial_layer._train_program)
class GPT2LMHeadModel(fluid.dygraph.Layer):
def __init__(self):
super(GPT2LMHeadModel, self).__init__()
self.embedding0 = paddle.nn.Embedding(20, 16)
self.embedding1 = paddle.nn.Embedding(20, 32)
self.lm_head_weight = paddle.to_tensor(
np.random.rand(2, 3).astype('float32'))
@declarative
def forward(self, x):
x = fluid.layers.reshape(x, shape=[-1, 6])
x1, x2, x3 = fluid.layers.split(input=x, dim=1, num_or_sections=3)
return x1
class TestPruneUnusedParamInProgram(unittest.TestCase):
def test_prune(self):
input_ids = np.array([[15, 11, 6, 3, 18, 13]]).astype("float32")
place = fluid.CPUPlace()
with fluid.dygraph.guard(place):
model = GPT2LMHeadModel()
model.eval()
input_ids = paddle.to_tensor(input_ids)
out = model(input_ids)
self.assertTrue(np.array_equal(out.numpy(), [[15, 11]]))
if __name__ == '__main__':
unittest.main()
| 31.188119 | 80 | 0.597619 |
4f51596787d06c4179d93b7febeecfe07c78f71d | 2,856 | py | Python | tests/molecular/molecules/molecule/get_direction/test_get_direction.py | stevenkbennett/stk | a8311fa6110adc0ea593a24d9a0c064597b1b174 | [
"MIT"
] | null | null | null | tests/molecular/molecules/molecule/get_direction/test_get_direction.py | stevenkbennett/stk | a8311fa6110adc0ea593a24d9a0c064597b1b174 | [
"MIT"
] | null | null | null | tests/molecular/molecules/molecule/get_direction/test_get_direction.py | stevenkbennett/stk | a8311fa6110adc0ea593a24d9a0c064597b1b174 | [
"MIT"
] | null | null | null | import numpy as np
from ...utilities import get_num_atom_ids, normalize_ids
from ..utilities import get_direction
def test_get_direction(case_data, get_atom_ids):
"""
Test :meth:`.Molecule.get_direction`.
Parameters
----------
case_data : :class:`.CaseData`
A test case. Holds the molecule to test and its correct
atomic positions.
get_atom_ids : :class:`callable`
Takes a single parameter, `molecule`, and returns a valid
`atom_ids` parameter for :meth:`.Molecule.get_direction`. This
allows the testing of different values of this parameter.
Returns
-------
None : :class:`NoneType`
Notes
-----
This test compares the result of :meth:`.Molecule.get_direction`
to the result of :func:`.get_direction`, which is a utility
function defined for the purposes of this test. Because
:func:`.get_direction` is tested independently, in
:mod:`.test_get_direction_helper`, if its tests pass, then
it can be assumed, that :func:`.get_direction` gives correct
results.
Now, assuming that :func:`.get_direction` passed all of its tests,
this test compares the results of :meth:`.Molecule.get_direction`
to the results of :func:`.get_direction`. If the results do not
match, the fault can be placed on :meth:`.Molecule.get_direction`,
because :func:`.get_direction` has already been verified to be
correct by its own tests.
"""
_test_get_direction(
molecule=case_data.molecule,
direction=get_direction(
position_matrix=case_data.position_matrix,
atom_ids=tuple(normalize_ids(
molecule=case_data.molecule,
ids=get_atom_ids(case_data.molecule),
)),
),
get_atom_ids=get_atom_ids,
)
def _test_get_direction(molecule, direction, get_atom_ids):
"""
Test :meth:`.Molecule.get_direction`.
Parameters
----------
molecule : :class:`.Molecule`
The molecule to test.
direction : :class:`.Molecule`
The correct direction of `molecule`.
get_atom_ids : :class:`callable`
Takes a single parameter, `molecule`, and returns a valid
`atom_ids` parameter for :meth:`.Molecule.get_direction`. This
allows the testing of different values of this parameter.
Returns
-------
None : :class:`NoneType`
"""
if get_num_atom_ids(molecule, get_atom_ids) == 1:
# Any non-0 vector is valid in this case.
assert not np.allclose(
a=[0, 0, 0],
b=molecule.get_direction(get_atom_ids(molecule)),
atol=1e-13,
)
return
result = molecule.get_direction(get_atom_ids(molecule))
# The direction may be parallel or anti-parallel.
return abs(abs(result @ direction) - 1) < 1e-13
| 30.709677 | 70 | 0.65021 |
4f53700c746c416431c15700347a05ff64f471e9 | 327 | py | Python | docs/snippets/ov_extensions.py | danikireev/openvino | 1bbd92a8f816c3befde78dc1d5aa41645fd0db80 | [
"Apache-2.0"
] | 1 | 2022-03-25T10:35:59.000Z | 2022-03-25T10:35:59.000Z | docs/snippets/ov_extensions.py | danikireev/openvino | 1bbd92a8f816c3befde78dc1d5aa41645fd0db80 | [
"Apache-2.0"
] | null | null | null | docs/snippets/ov_extensions.py | danikireev/openvino | 1bbd92a8f816c3befde78dc1d5aa41645fd0db80 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import openvino.runtime as ov
#! [add_extension]
# Not implemented
#! [add_extension]
#! [add_extension_lib]
core = ov.Core()
# Load extensions library to ov::Core
core.add_extension("openvino_template_extension.so")
#! [add_extension_lib]
| 20.4375 | 52 | 0.755352 |
4f569d308bd01059f0af8ba74c24268181399d62 | 14,465 | py | Python | packit/copr_helper.py | majamassarini/packit | b319c4ac2321f012c17ab0b2d0ebfb6c8226eff0 | [
"MIT"
] | 81 | 2019-02-07T15:38:34.000Z | 2020-07-16T06:33:02.000Z | packit/copr_helper.py | majamassarini/packit | b319c4ac2321f012c17ab0b2d0ebfb6c8226eff0 | [
"MIT"
] | 825 | 2019-02-07T15:08:16.000Z | 2020-08-02T08:11:23.000Z | packit/copr_helper.py | majamassarini/packit | b319c4ac2321f012c17ab0b2d0ebfb6c8226eff0 | [
"MIT"
] | 51 | 2019-02-08T09:56:29.000Z | 2020-06-17T15:34:00.000Z | # Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
import logging
import time
from datetime import datetime, timedelta
from typing import Callable, List, Optional, Dict, Tuple, Any
from cachetools.func import ttl_cache
from copr.v3 import Client as CoprClient
from copr.v3.exceptions import (
CoprNoResultException,
CoprException,
CoprRequestException,
)
from munch import Munch
from packit.constants import COPR2GITHUB_STATE
from packit.exceptions import PackitCoprProjectException, PackitCoprSettingsException
from packit.local_project import LocalProject
logger = logging.getLogger(__name__)
class CoprHelper:
def __init__(self, upstream_local_project: LocalProject) -> None:
self.upstream_local_project = upstream_local_project
self._copr_client = None
def __repr__(self):
return (
"CoprHelper("
f"upstream_local_project='{self.upstream_local_project}', "
f"copr_client='{self.copr_client}')"
)
def get_copr_client(self) -> CoprClient:
"""Not static because of the flex-mocking."""
return CoprClient.create_from_config_file()
@property
def copr_client(self) -> CoprClient:
if self._copr_client is None:
self._copr_client = self.get_copr_client()
return self._copr_client
@property
def configured_owner(self) -> Optional[str]:
return self.copr_client.config.get("username")
def copr_web_build_url(self, build: Munch) -> str:
"""Construct web frontend url because build.repo_url is not much user-friendly."""
copr_url = self.copr_client.config.get("copr_url")
return f"{copr_url}/coprs/build/{build.id}/"
def get_copr_settings_url(
self, owner: str, project: str, section: Optional[str] = None
):
copr_url = self.copr_client.config.get("copr_url")
section = section or "edit"
# COPR groups starts with '@' but url have '/g/owner'
if owner.startswith("@"):
owner = f"g/{owner[1:]}"
return f"{copr_url}/coprs/{owner}/{project}/{section}/"
def create_copr_project_if_not_exists(
self,
project: str,
chroots: List[str],
owner: Optional[str] = None,
description: Optional[str] = None,
instructions: Optional[str] = None,
list_on_homepage: Optional[bool] = False,
preserve_project: Optional[bool] = False,
additional_packages: Optional[List[str]] = None,
additional_repos: Optional[List[str]] = None,
request_admin_if_needed: bool = False,
) -> None:
"""
Create a project in copr if it does not exists.
Raises PackitCoprException on any problems.
"""
logger.info(
f"Trying to get {owner}/{project} Copr project. "
"The project will be created if it does not exist."
)
try:
copr_proj = self.copr_client.project_proxy.get(
ownername=owner, projectname=project
)
except CoprNoResultException as ex:
if owner != self.configured_owner:
raise PackitCoprProjectException(
f"Copr project {owner}/{project} not found."
) from ex
logger.info(f"Copr project '{owner}/{project}' not found. Creating new.")
self.create_copr_project(
chroots=chroots,
description=description,
instructions=instructions,
owner=owner,
project=project,
list_on_homepage=list_on_homepage,
preserve_project=preserve_project,
additional_packages=additional_packages,
additional_repos=additional_repos,
)
return
except CoprRequestException as ex:
logger.debug(repr(ex))
logger.error(
f"We were not able to get copr project {owner}/{project}: {ex}"
)
raise
delete_after_days: Optional[int] = (
None if preserve_project is None else -1 if preserve_project else 60
)
fields_to_change = self.get_fields_to_change(
copr_proj=copr_proj,
additional_repos=additional_repos,
chroots=chroots,
description=description,
instructions=instructions,
list_on_homepage=list_on_homepage,
delete_after_days=delete_after_days,
)
if fields_to_change:
logger.info(f"Updating copr project '{owner}/{project}'")
for field, (old, new) in fields_to_change.items():
logger.debug(f"{field}: {old} -> {new}")
try:
kwargs: Dict[str, Any] = {
arg_name: new for arg_name, (old, new) in fields_to_change.items()
}
logger.debug(f"Copr edit arguments: {kwargs}")
self.copr_client.project_proxy.edit(
ownername=owner, projectname=project, **kwargs
)
except CoprRequestException as ex:
if "Only owners and admins may update their projects." in str(ex):
if request_admin_if_needed:
logger.info(
f"Admin permissions are required "
f"in order to be able to edit project settings. "
f"Requesting the admin rights for the copr '{owner}/{project}' project."
)
self.copr_client.project_proxy.request_permissions(
ownername=owner,
projectname=project,
permissions={"admin": True},
)
else:
logger.warning(
f"Admin permissions are required for copr '{owner}/{project}' project"
f"in order to be able to edit project settings. "
f"You can make a request by specifying --request-admin-if-needed "
f"when using Packit CLI."
)
raise PackitCoprSettingsException(
f"Copr project update failed for '{owner}/{project}' project.",
fields_to_change=fields_to_change,
) from ex
def get_fields_to_change(
self,
copr_proj,
additional_repos: Optional[List[str]] = None,
chroots: Optional[List[str]] = None,
description: Optional[str] = None,
instructions: Optional[str] = None,
list_on_homepage: Optional[bool] = True,
delete_after_days: Optional[int] = None,
) -> Dict[str, Tuple[Any, Any]]:
fields_to_change: Dict[str, Tuple[Any, Any]] = {}
if chroots is not None:
old_chroots = set(copr_proj.chroot_repos.keys())
new_chroots = None
if not set(chroots).issubset(old_chroots):
new_chroots = list(set(chroots) | old_chroots)
if new_chroots:
new_chroots.sort()
fields_to_change["chroots"] = (
list(old_chroots),
new_chroots,
)
if description and copr_proj.description != description:
fields_to_change["description"] = (copr_proj.description, description)
if instructions:
if "instructions" not in copr_proj:
logger.debug(
"The `instructions` key was not received from Copr. "
"We can't check that value to see if the update is needed."
)
elif copr_proj.instructions != instructions:
fields_to_change["instructions"] = (
copr_proj.instructions,
instructions,
)
if list_on_homepage is not None:
if "unlisted_on_hp" not in copr_proj:
logger.debug(
"The `unlisted_on_hp` key was not received from Copr. "
"We can't check that value to see if the update is needed."
)
elif copr_proj.unlisted_on_hp != (not list_on_homepage):
fields_to_change["unlisted_on_hp"] = (
copr_proj.unlisted_on_hp,
(not list_on_homepage),
)
if delete_after_days is not None:
if "delete_after_days" not in copr_proj:
logger.debug(
"The `delete_after_days` key was not received from Copr. "
"We can't check that value to see if the update is needed."
)
elif copr_proj.delete_after_days != delete_after_days:
fields_to_change["delete_after_days"] = (
copr_proj.delete_after_days,
delete_after_days,
)
if additional_repos is not None and set(copr_proj.additional_repos) != set(
additional_repos
):
fields_to_change["additional_repos"] = (
copr_proj.additional_repos,
additional_repos,
)
return fields_to_change
def create_copr_project(
self,
chroots: List[str],
description: str,
instructions: str,
owner: str,
project: str,
list_on_homepage: bool = False,
preserve_project: bool = False,
additional_packages: Optional[List[str]] = None,
additional_repos: Optional[List[str]] = None,
) -> None:
try:
self.copr_client.project_proxy.add(
ownername=owner,
projectname=project,
chroots=chroots,
description=(
description
or "Continuous builds initiated by packit service.\n"
"For more info check out https://packit.dev/"
),
contact="https://github.com/packit/packit/issues",
# don't show project on Copr homepage by default
unlisted_on_hp=not list_on_homepage,
# delete project after the specified period of time
delete_after_days=60 if not preserve_project else None,
additional_repos=additional_repos,
instructions=instructions
or "You can check out the upstream project "
f"{self.upstream_local_project.git_url} to find out how to consume these builds. "
f"This copr project is created and handled by the packit project "
"(https://packit.dev/).",
)
# TODO: additional_packages
except CoprException as ex:
# TODO: Remove once Copr doesn't throw for existing projects or new
# API endpoint is established.
if "You already have a project named" in ex.result.error:
# race condition between workers
logger.debug(f"Copr project ({owner}/{project}) is already present.")
return
error = (
f"Cannot create a new Copr project "
f"(owner={owner} project={project} chroots={chroots}): {ex}"
)
logger.error(error)
logger.error(ex.result)
raise PackitCoprProjectException(error, ex)
def watch_copr_build(
self, build_id: int, timeout: int, report_func: Callable = None
) -> str:
"""returns copr build state"""
watch_end = datetime.now() + timedelta(seconds=timeout)
logger.debug(f"Watching copr build {build_id}.")
state_reported = ""
while True:
build = self.copr_client.build_proxy.get(build_id)
if build.state == state_reported:
continue
state_reported = build.state
logger.debug(f"COPR build {build_id}, state = {state_reported}")
try:
gh_state, description = COPR2GITHUB_STATE[state_reported]
except KeyError as exc:
logger.error(f"COPR gave us an invalid state: {exc}")
gh_state, description = "error", "Something went wrong."
if report_func:
report_func(
gh_state,
description,
build_id=build.id,
url=self.copr_web_build_url(build),
)
if gh_state != "pending":
logger.debug(f"State is now {gh_state}, ending the watch.")
return state_reported
if datetime.now() > watch_end:
logger.error(f"The build did not finish in time ({timeout}s).")
report_func("error", "Build watch timeout")
return state_reported
time.sleep(10)
def get_copr_builds(self, number_of_builds: int = 5) -> List:
"""
Get the copr builds of this project done by packit.
:return: list of builds
"""
client = CoprClient.create_from_config_file()
projects = [
project.name
for project in reversed(client.project_proxy.get_list(ownername="packit"))
if project.name.startswith(
f"{self.upstream_local_project.namespace}-{self.upstream_local_project.repo_name}-"
)
][:5]
builds: List = []
for project in projects:
builds += client.build_proxy.get_list(
ownername="packit", projectname=project
)
logger.debug("Copr builds fetched.")
return [(build.id, build.projectname, build.state) for build in builds][
:number_of_builds
]
@staticmethod
@ttl_cache(maxsize=1, ttl=timedelta(hours=12).seconds)
def get_available_chroots() -> list:
"""
Gets available copr chroots. Uses cache to avoid repetitive url fetching.
Returns:
List of valid chroots.
"""
client = CoprClient.create_from_config_file()
return list(
filter(
lambda chroot: not chroot.startswith("_"),
client.mock_chroot_proxy.get_list().keys(),
)
)
| 38.573333 | 100 | 0.563222 |
4f56feb5d68bd08816674227866c888feb955d42 | 14,044 | py | Python | skel/siteSettings.py | geocam/geocamDjangoSiteSkeleton | 8efcebfcb3d0a6a03b3ecac7e5305d858ac0fc17 | [
"Apache-2.0"
] | null | null | null | skel/siteSettings.py | geocam/geocamDjangoSiteSkeleton | 8efcebfcb3d0a6a03b3ecac7e5305d858ac0fc17 | [
"Apache-2.0"
] | null | null | null | skel/siteSettings.py | geocam/geocamDjangoSiteSkeleton | 8efcebfcb3d0a6a03b3ecac7e5305d858ac0fc17 | [
"Apache-2.0"
] | null | null | null | #__BEGIN_LICENSE__
# Copyright (c) 2015, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All rights reserved.
#
# The xGDS platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#__END_LICENSE__
# siteSettings.py -- site default settings
#
# This contains the default settings for the site-level django app. This will
# override any application-default settings and define the default set of
# installed applications. This should be a full settings.py file which needs
# minimal overrides by the settings.py file for the application to actually
# function.
#
# As a bare minimum, please edit INSTALLED_APPS!
#
# This file *should* be checked into git.
import sys
import os
import importlib
from django.conf import global_settings
from django.core.urlresolvers import reverse
# apps should be listed from "most specific" to "most general". that
# way, templates in more specific apps override ones from more general
# apps.
INSTALLED_APPS = ('$$$$APP_NAME$$$$',
# TODO uncomment the submodules that you are including
# 'xgds_notes2',
# 'xgds_planner2',
'xgds_map_server',
'xgds_data',
# 'xgds_image',
# 'xgds_video',
# 'xgds_plot',
# 'geocamTrack',
# 'geocamPycroraptor2',
'geocamUtil',
'pipeline',
# 'taggit',
'djangobower',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
)
for app in INSTALLED_APPS:
try:
appSettings = importlib.import_module(app + ".defaultSettings")
for key, val in vars(appSettings).iteritems():
if not key.startswith('_'):
globals()[key] = val
except:
pass
USING_DJANGO_DEV_SERVER = ('runserver' in sys.argv)
USE_STATIC_SERVE = USING_DJANGO_DEV_SERVER
SCRIPT_NAME = os.environ['DJANGO_SCRIPT_NAME'] # set in sourceme.sh
if USING_DJANGO_DEV_SERVER:
# django dev server deployment won't work with other SCRIPT_NAME settings
SCRIPT_NAME = '/'
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# APP = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
# PROJ_ROOT = os.path.abspath(os.path.dirname(__file__))
PROJ_ROOT = os.path.abspath(os.path.dirname(__file__))
if not PROJ_ROOT.endswith('/'):
PROJ_ROOT += '/'
# Python path is agnostic to what the site-level dir is. It also prefers the
# checked-out version of an app over the standard python install locations.
sys.path.append(PROJ_ROOT)
ADMINS = (
# ('$$$$AUTHOR$$$$', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.mysql', # django.db.backends.mysql',
'NAME': '$$$$SITE_NAME$$$$',
'USER': 'vagrant',
'PASSWORD': 'vagrant',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Los_Angeles'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-US'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds static.
# Example: "/home/static/static.lawrence.com/"
STATIC_ROOT = os.path.join(PROJ_ROOT, "build", "static")
# URL that handles the static served from STATIC_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://static.lawrence.com", "http://example.com/static/"
STATIC_URL = SCRIPT_NAME + 'static/'
EXTERNAL_URL = STATIC_URL
# Absolute path to the directory that holds data. This is different than static
# in that it's uploaded/processed data that's not needed for the operation of
# the site, but may need to be network-accessible, or be linked to from the
# database. Examples: images, generate kml files, etc.
# Example: "/data"
DATA_ROOT = os.path.join(PROJ_ROOT, 'data', '')
# URL that handles the data served from DATA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://data.lawrence.com", "http://example.com/data/"
DATA_URL = SCRIPT_NAME + 'data/'
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = DATA_ROOT
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = DATA_URL
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
TEMPLATE_CONTEXT_PROCESSORS = (global_settings.TEMPLATE_CONTEXT_PROCESSORS
+ ('django.core.context_processors.request',
'django.core.context_processors.static',
'geocamUtil.context_processors.settings',
'geocamUtil.context_processors.AuthUrlsContextProcessor.AuthUrlsContextProcessor',
'geocamUtil.context_processors.SettingsContextProcessor.SettingsContextProcessor',
))
# Make this unique, and don't share it with anybody.
SECRET_KEY = '$$$$SECRET_KEY$$$$'
# Session Serializer: we use Pickle for backward compatibility and to allow more flexible session storage, but
# be sure to keep the SECRET_KEY secret for security (see:
# https://docs.djangoproject.com/en/1.7/topics/http/sessions/#session-serialization)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = ('django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'geocamUtil.middleware.LogErrorsMiddleware',
'django.middleware.gzip.GZipMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'reversion.middleware.RevisionMiddleware',
'geocamUtil.middleware.SecurityMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJ_ROOT, 'apps/$$$$APP_NAME$$$$/templates'),
os.path.join(PROJ_ROOT, 'apps/$$$$APP_NAME$$$$/templates/$$$$APP_NAME$$$$'),
os.path.join(PROJ_ROOT, 'apps/$$$$APP_NAME$$$$/templates/registration'),
# Templates for utility scripts
os.path.join(PROJ_ROOT, 'bin/templates'),
)
LOGIN_URL = SCRIPT_NAME + 'accounts/login/'
LOGIN_REDIRECT_URL = '/'
GEOCAM_UTIL_INSTALLER_USE_SYMLINKS = True
GEOCAM_UTIL_SECURITY_ENABLED = not USING_DJANGO_DEV_SERVER
GEOCAM_UTIL_SECURITY_SSL_REQUIRED_BY_DEFAULT = False
GEOCAM_UTIL_SECURITY_REQUIRE_ENCRYPTED_PASSWORDS = False
GEOCAM_UTIL_SECURITY_LOGIN_REQUIRED_BY_DEFAULT = 'write'
# This is an optional setting but if you don't have it enabled then the map server and the xgds_data won't work
XGDS_DATA_LOG_ENABLED = True
# email settings
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = '/tmp/xgds_messages'
EMAIL_SUBJECT_PREFIX = '[xGDS] '
SERVER_EMAIL = 'noreply@xgds.org'
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.FileSystemFinder',
'pipeline.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
'pipeline.finders.CachedFileFinder',
'djangobower.finders.BowerFinder',
)
BOWER_COMPONENTS_ROOT = PROJ_ROOT
PIPELINE_JS = {'custom_map': {'source_filenames': ('xgds_map_server/js/showMapCoords.js',
'xgds_map_server/js/initial_layers.js',
),
'output_filename': 'js/custom_map.js',
},
}
PIPELINE_JS_COMPRESSOR = 'pipeline.compressors.yuglify.YuglifyCompressor'
PIPELINE_YUGLIFY_JS_ARGUMENTS = 'mangle:false --terminal'
PIPELINE_CSS = {}
PIPELINE_CSS_COMPRESSOR = 'pipeline.compressors.yuglify.YuglifyCompressor'
PIPELINE_DISABLE_WRAPPER = True
COMPRESS_ENABLED = True
COMPRESS_CSSTIDY_BINARY = '/usr/bin/csstidy'
PIPELINE_ENABLED = True
PIPELINE_COMPILERS = ()
DEBUG_TOOLBAR = False
if DEBUG_TOOLBAR:
INSTALLED_APPS += ('debug_toolbar',)
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INTERNAL_IPS = ('127.0.0.1', '::1') # TODO add your virtual machine's IP here
DEBUG_TOOLBAR_PANELS = ['debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
]
DEBUG_TOOLBAR_CONFIG = {'INTERCEPT_REDIRECTS': False,
}
VAR_ROOT = PROJ_ROOT + 'var/'
XGDS_DATA_MASKED_FIELDS = {
'auth': {'User': ['password', 'is_staff', 'is_active', 'is_superuser',
'last_login',
'date_joined', 'groups', 'user_permissions',
],
},
}
XGDS_DATA_EXPAND_RELATED = {'xgds_data': {'RequestLog': [('requestargument_set', 'all', 'Arguments'),
('responselog_set', 'all', 'Response'),
],
'ResponseLog': [('responseargument_set', 'all', 'Arguments'),
('responselist_set', 'all', 'Results'),
],
'Collection': [ (None,'contents','Contents'),
] ,
},
}
BOWER_INSTALLED_APPS = ()
BOWER_INSTALLED_APPS += GEOCAM_UTIL_BOWER_INSTALLED_APPS
BOWER_INSTALLED_APPS += XGDS_MAP_SERVER_BOWER_INSTALLED_APPS
BOWER_INSTALLED_APPS += XGDS_DATA_BOWER_INSTALLED_APPS
# BOWER_INSTALLED_APPS += XGDS_IMAGE_BOWER_INSTALLED_APPS
# BOWER_INSTALLED_APPS += XGDS_VIDEO_BOWER_INSTALLED_APPS
# BOWER_INSTALLED_APPS += XGDS_PLOT_BOWER_INSTALLED_APPS
# BOWER_INSTALLED_APPS += XGDS_NOTES_BOWER_INSTALLED_APPS
# BOWER_INSTALLED_APPS += GEOCAM_TRACK_BOWER_INSTALLED_APPS
# BOWER_INSTALLED_APPS += XGDS_PLANNER2_BOWER_INSTALLED_APPS
PYRAPTORD_SERVICE = False
# TODO replace this with your siteframes
XGDS_SITEFRAMES = {'10S': { # ROVERSCAPE site frame
'east0': 582724.0,
'north0': 4141835.0,
'zone': '10S',
'zoneNumber': 10,
'axes': 'ENU',
'north': 'grid'
},
}
XGDS_CURRENT_SITEFRAME = XGDS_SITEFRAMES['10S']
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
XGDS_MAP_SERVER_DEFAULT_ZOOM = 15
# If you are including imports, connect them to the import framework by uncommenting below
XGDS_DATA_IMPORTS = {
# XGDS_NOTES_MONIKER: '/notes/import',
# "GPS Track": '/geocamTrack/importTrack',
#"Images": '/xgds_image/import',
}
# TODO add useful comment
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',
'LOCATION': '127.0.0.1:11211',
'TIMEOUT': 604800,
}
}
| 39.784703 | 116 | 0.646112 |
4f549b8ac545180bb7898bea23ca915ddf29e793 | 1,558 | py | Python | setup.py | randomtask1155/pylinkchecker | 42645112edcf5d9af77c670415c5f69f04b6b8aa | [
"BSD-3-Clause"
] | 27 | 2015-04-27T15:52:51.000Z | 2020-04-20T07:19:21.000Z | setup.py | randomtask1155/pylinkchecker | 42645112edcf5d9af77c670415c5f69f04b6b8aa | [
"BSD-3-Clause"
] | 6 | 2015-06-09T20:11:50.000Z | 2017-06-23T07:40:05.000Z | setup.py | randomtask1155/pylinkchecker | 42645112edcf5d9af77c670415c5f69f04b6b8aa | [
"BSD-3-Clause"
] | 10 | 2015-04-17T19:47:40.000Z | 2020-03-01T20:01:03.000Z | #!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import sys
version = __import__('pylinkchecker').__version__
if sys.version_info[0] >= 3:
requires = ['beautifulsoup4>=4.2.0']
else:
requires = []
setup(
name='pylinkchecker',
version=version,
description='Simple crawler that detects link errors such as 404 and 500.',
long_description=
'''
pylinkchecker is a simple crawler that traverses a web sites and reports errors
(e.g., 500 and 404 errors) encountered. The crawler can try to download
resources like images.
''',
author='Evolio.ca, Auto123.com, Xprima.com',
author_email='mtl-infrastructure@auto123.com',
license='BSD License',
url='https://github.com/auto123/pylinkchecker',
packages=['pylinkchecker', 'pylinkchecker.bs4', 'pylinkchecker.bs4.builder'],
scripts = ['pylinkchecker/bin/pylinkcheck.py'],
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP :: Site Management :: Link Checking',
'Topic :: Utilities',
],
install_requires=requires,
) | 32.458333 | 81 | 0.655327 |
4f55b8752db8a5de1a9e8eac1b9b4d2b1321e585 | 12,122 | py | Python | integrations/tensorflow/compiler/test/saved_model_adopt_exports.py | EmperorYP7/iree | fffa1f3e33bc1031c790583166ce82c91dd8b3e7 | [
"Apache-2.0"
] | 1 | 2020-08-13T09:25:59.000Z | 2020-08-13T09:25:59.000Z | integrations/tensorflow/compiler/test/saved_model_adopt_exports.py | EmperorYP7/iree | fffa1f3e33bc1031c790583166ce82c91dd8b3e7 | [
"Apache-2.0"
] | null | null | null | integrations/tensorflow/compiler/test/saved_model_adopt_exports.py | EmperorYP7/iree | fffa1f3e33bc1031c790583166ce82c91dd8b3e7 | [
"Apache-2.0"
] | 1 | 2021-01-29T09:30:09.000Z | 2021-01-29T09:30:09.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests supported features of saved models."""
# pylint: disable=invalid-name
# pylint: disable=missing-docstring
# pylint: disable=line-too-long
from pyiree.tf.support import tf_test_driver
import tensorflow.compat.v2 as tf
SAVED_MODEL_IMPORT_PASSES = [
"tf-executor-graph-pruning",
"tf-standard-pipeline",
"iree-tf-import-pipeline",
"canonicalize",
]
# Tests that a simple example with flat args and a single result and no
# captures imports properly.
# CHECK-LABEL: RUN_TEST: T0001_FlatArgsResultsNoBoundGlobals
# CHECK: module
# CHECK-NOT: tf_saved_model.semantics
# CHECK: @simple_mul_no_capture
# CHECK: iree.module.export
# CHECK: FINISH_TEST
class T0001_FlatArgsResultsNoBoundGlobals(tf.Module):
@tf.function(input_signature=[
tf.TensorSpec([4], tf.float32),
tf.TensorSpec([4], tf.float32)
])
def simple_mul_no_capture(self, a, b):
return a * b
tf_test_driver.add_test(
test_name="T0001_FlatArgsResultsNoBoundGlobals",
tf_module_builder=T0001_FlatArgsResultsNoBoundGlobals,
passes=SAVED_MODEL_IMPORT_PASSES,
print_input_module=True)
# T0002: Tests that bound global vars import properly.
# CHECK-LABEL: RUN_TEST: T0002a_SimpleVarRead
# CHECK: flow.variable @v mutable dense<0.000000e+00> : tensor<f32>
# CHECK: func @f() -> tensor<f32>
# CHECK: attributes
# CHECK-SAME: iree.module.export
# CHECK-SAME: iree.reflection = {abi = "sip", abiv = 1 : i32, sip = "I1!R3!_0"}
# CHECK: flow.variable.load @v : tensor<f32>
# CHECK: FINISH_TEST
class T0002a_SimpleVarRead(tf.Module):
def __init__(self):
self.v = tf.Variable(0.)
@tf.function(input_signature=[])
def f(self):
return self.v
# CHECK-LABEL: RUN_TEST: T0002b_SimpleVarWrite
# CHECK: flow.variable @v mutable dense<0.000000e+00> : tensor<f32>
# CHECK: func @f(%arg0: tensor<f32> {tf._user_specified_name = "a"})
# CHECK: attributes
# CHECK-SAME: iree.module.export
# CHECK-SAME: iree.reflection = {abi = "sip", abiv = 1 : i32, sip = "I8!S5!k0_0R1!"}
# CHECK: flow.variable.store %arg0, @v : tensor<f32>
# CHECK: FINISH_TEST
class T0002b_SimpleVarWrite(tf.Module):
def __init__(self):
self.v = tf.Variable(0.)
@tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
def f(self, a):
self.v.assign(a)
# CHECK-LABEL: RUN_TEST: T0002c_SimpleConst
# CHECK: flow.variable [[CONST:@.+]] dense<0.000000e+00> : tensor<f32>
# CHECK: func @f() -> tensor<f32>
# CHECK: attributes
# CHECK-SAME: iree.module.export
# CHECK-SAME: iree.reflection = {abi = "sip", abiv = 1 : i32, sip = "I1!R3!_0"}
# NOTE: the constant variable gets inlined:
# CHECK: = constant dense<0.000000e+00> : tensor<f32>
# CHECK: FINISH_TEST
class T0002c_SimpleConst(tf.Module):
def __init__(self):
self.c = tf.constant(0.)
@tf.function(input_signature=[])
def f(self):
return self.c
# CHECK-LABEL: RUN_TEST: T0002d_VarCompatibleShapeChange
# CHECK: flow.variable @v mutable dense<0.000000e+00> : tensor<1xf32>
# CHECK: func @f()
# CHECK: attributes
# CHECK-SAME: iree.module.export
# CHECK-SAME: iree.reflection = {abi = "sip", abiv = 1 : i32, sip = "I1!R1!"}
# CHECK-DAG: [[CONST_2xf32:%.+]] = "tf.Const"() {value = dense<[0.000000e+00, 1.000000e+00]> : tensor<2xf32>} : () -> tensor<2xf32>
# CHECK-DAG: [[CONST_3xf32:%.+]] = "tf.Const"() {value = dense<[0.000000e+00, 1.000000e+00, 2.000000e+00]> : tensor<3xf32>} : () -> tensor<3xf32>
# CHECK-DAG: flow.variable.store [[CONST_2xf32]], @v : tensor<2xf32>
# CHECK-DAG: flow.variable.store [[CONST_3xf32]], @v : tensor<3xf32>
# CHECK: FINISH_TEST
class T0002d_VarCompatibleShapeChange(tf.Module):
def __init__(self):
self.v = tf.Variable([0.], shape=[None])
@tf.function(input_signature=[])
def f(self):
self.v.assign(tf.constant([0., 1.]))
self.v.assign(tf.constant([0., 1., 2.]))
# CHECK-LABEL: RUN_TEST: T0002e_Error_VarMultipleExportedNames
# CHECK: [ERROR]: Multiple exported names for global tensor not supported yet
# CHECK: FINISH_TEST
class T0002e_Error_VarMultipleExportedNames(tf.Module):
def __init__(self):
self.v = tf.Variable(0.)
self.v2 = self.v
# CHECK-LABEL: RUN_TEST: T0002f_Error_UnsupportedResourceOp
# CHECK: [ERROR]: could not lower resource op to flow
# CHECK: FINISH_TEST
class T0002f_Error_UnsupportedResourceOp(tf.Module):
def __init__(self):
self.v = tf.Variable([0.], shape=[None])
@tf.function(input_signature=[])
def f(self):
self.v.assign_add(tf.constant([0., 1.]))
tf_test_driver.add_test(
test_name="T0002a_SimpleVarRead",
tf_module_builder=T0002a_SimpleVarRead,
passes=SAVED_MODEL_IMPORT_PASSES,
print_input_module=True)
tf_test_driver.add_test(
test_name="T0002b_SimpleVarWrite",
tf_module_builder=T0002b_SimpleVarWrite,
passes=SAVED_MODEL_IMPORT_PASSES,
print_input_module=True)
tf_test_driver.add_test(
test_name="T0002c_SimpleConst",
tf_module_builder=T0002c_SimpleConst,
passes=SAVED_MODEL_IMPORT_PASSES,
print_input_module=True)
tf_test_driver.add_test(
test_name="T0002d_VarCompatibleShapeChange",
tf_module_builder=T0002d_VarCompatibleShapeChange,
passes=SAVED_MODEL_IMPORT_PASSES,
print_input_module=True)
tf_test_driver.add_test(
test_name="T0002e_Error_VarMultipleExportedNames",
tf_module_builder=T0002e_Error_VarMultipleExportedNames,
passes=SAVED_MODEL_IMPORT_PASSES,
print_input_module=True,
expect_pass_failure=True)
tf_test_driver.add_test(
test_name="T0002f_Error_UnsupportedResourceOp",
tf_module_builder=T0002f_Error_UnsupportedResourceOp,
passes=SAVED_MODEL_IMPORT_PASSES,
print_input_module=True,
expect_pass_failure=True)
# Tests that a structured argument is handled properly.
# NOTE: This is currently an error and needs to be implemented
# CHECK-LABEL: RUN_TEST: T0003a_StructuredArgs
# CHECK: func @simple_mul
# CHECK: attributes
# CHECK-SAME: iree.module.export
# CHECK-SAME: iree.reflection = {abi = "sip", abiv = 1 : i32, sip = "I23!S19!k0D13!K2!x_0K2!y_1R3!_0"}
# CHECK: FINISH_TEST
class T0003a_StructuredArgs(tf.Module):
@tf.function(input_signature=[{
"x": tf.TensorSpec([4], tf.float32),
"y": tf.TensorSpec([4], tf.float32)
}])
def simple_mul(self, d):
return d["x"] * d["y"]
tf_test_driver.add_test(
test_name="T0003a_StructuredArgs",
tf_module_builder=T0003a_StructuredArgs,
passes=SAVED_MODEL_IMPORT_PASSES,
print_input_module=True)
# Tests that a structured argument is handled properly.
# NOTE: This is currently an error and needs to be implemented
# CHECK-LABEL: RUN_TEST: T0003b_StructuredMultipleDictResult
# CHECK: func @simple_mul
# CHECK: attributes
# CHECK-SAME: iree.module.export
# CHECK-SAME: iree.reflection = {abi = "sip", abiv = 1 : i32, sip = "I12!S9!k0_0k1_1R26!D22!K2!x_0K10!x_squared_1"}
# CHECK: FINISH_TEST
class T0003b_StructuredMultipleDictResult(tf.Module):
@tf.function(input_signature=[
tf.TensorSpec([4], tf.float32),
tf.TensorSpec([4], tf.float32)
])
def simple_mul(self, a, b):
product = a * b
return {"x": product, "x_squared": product * product}
tf_test_driver.add_test(
test_name="T0003b_StructuredMultipleDictResult",
tf_module_builder=T0003b_StructuredMultipleDictResult,
passes=SAVED_MODEL_IMPORT_PASSES,
print_input_module=True)
# Tests that a structured argument is handled properly.
# NOTE: This is currently an error and needs to be implemented
# CHECK-LABEL: RUN_TEST: T0003c_StructuredSingleDictResult
# CHECK: func @simple_mul
# CHECK: attributes
# CHECK-SAME: iree.module.export
# CHECK-SAME: iree.reflection = {abi = "sip", abiv = 1 : i32, sip = "I12!S9!k0_0k1_1R10!D7!K2!x_0"}
# CHECK: FINISH_TEST
class T0003c_StructuredSingleDictResult(tf.Module):
@tf.function(input_signature=[
tf.TensorSpec([4], tf.float32),
tf.TensorSpec([4], tf.float32)
])
def simple_mul(self, a, b):
product = a * b
return {"x": product}
tf_test_driver.add_test(
test_name="T0003c_StructuredSingleDictResult",
tf_module_builder=T0003c_StructuredSingleDictResult,
passes=SAVED_MODEL_IMPORT_PASSES,
print_input_module=True)
# Tests that a structured argument is handled properly.
# NOTE: This is currently an error and needs to be implemented
# CHECK-LABEL: RUN_TEST: T0003d_StructuredSingleResult
# CHECK: func @simple_mul
# CHECK: attributes
# CHECK-SAME: iree.module.export
# CHECK-SAME: iree.reflection = {abi = "sip", abiv = 1 : i32, sip = "I12!S9!k0_0k1_1R3!_0"}
# CHECK: FINISH_TEST
class T0003d_StructuredSingleResult(tf.Module):
@tf.function(input_signature=[
tf.TensorSpec([4], tf.float32),
tf.TensorSpec([4], tf.float32)
])
def simple_mul(self, a, b):
product = a * b
return product
tf_test_driver.add_test(
test_name="T0003d_StructuredSingleResult",
tf_module_builder=T0003d_StructuredSingleResult,
passes=SAVED_MODEL_IMPORT_PASSES,
print_input_module=True)
# Tests that a structured argument is handled properly.
# NOTE: This is currently an error and needs to be implemented
# CHECK-LABEL: RUN_TEST: T0003e_StructuredSequenceResult
# CHECK: func @simple_mul
# CHECK: attributes
# CHECK-SAME: iree.module.export
# CHECK-SAME: iree.reflection = {abi = "sip", abiv = 1 : i32, sip = "I12!S9!k0_0k1_1R17!S13!k0_0k1_1k2_2"}
# CHECK: FINISH_TEST
class T0003e_StructuredSequenceResult(tf.Module):
@tf.function(input_signature=[
tf.TensorSpec([4], tf.float32),
tf.TensorSpec([4], tf.float32)
])
def simple_mul(self, a, b):
product = a * b
return product, a, b
tf_test_driver.add_test(
test_name="T0003e_StructuredSequenceResult",
tf_module_builder=T0003e_StructuredSequenceResult,
passes=SAVED_MODEL_IMPORT_PASSES,
print_input_module=True)
# Tests that a structured argument is handled properly.
# NOTE: This is currently an error and needs to be implemented
# CHECK-LABEL: RUN_TEST: T0003f_StructuredNestedResult
# CHECK: func @simple_mul
# CHECK: attributes
# CHECK-SAME: iree.module.export
# CHECK-SAME: iree.reflection = {abi = "sip", abiv = 1 : i32, sip = "I12!S9!k0_0k1_1R27!S23!k0_0k1D13!K2!a_1K2!b_2"}
# CHECK: FINISH_TEST
class T0003f_StructuredNestedResult(tf.Module):
@tf.function(input_signature=[
tf.TensorSpec([4], tf.float32),
tf.TensorSpec([4], tf.float32)
])
def simple_mul(self, a, b):
product = a * b
return product, {"a": a, "b": b}
tf_test_driver.add_test(
test_name="T0003f_StructuredNestedResult",
tf_module_builder=T0003f_StructuredNestedResult,
passes=SAVED_MODEL_IMPORT_PASSES,
print_input_module=True)
# Tests that a structured argument is handled properly.
# NOTE: This is currently an error and needs to be implemented
# CHECK-LABEL: RUN_TEST: T0005_MultipleExportedFuncNames
# CHECK: [ERROR]: Multiple exported names not supported yet
# CHECK: FINISH_TEST_WITH_EXCEPTION
class T0005_MultipleExportedFuncNames(tf.Module):
@tf.function(input_signature=[
tf.TensorSpec([4], tf.float32),
tf.TensorSpec([4], tf.float32)
])
def simple_mul(self, a, b):
product = a * b
return {"x": product}
# Force a function alias.
T0005_MultipleExportedFuncNames.another_copy = (
T0005_MultipleExportedFuncNames.simple_mul)
tf_test_driver.add_test(
test_name="T0005_MultipleExportedFuncNames",
tf_module_builder=T0005_MultipleExportedFuncNames,
passes=SAVED_MODEL_IMPORT_PASSES,
print_input_module=True,
expect_pass_failure=True)
if __name__ == "__main__":
tf_test_driver.run_tests(__file__, with_filecheck=True)
| 32.239362 | 147 | 0.732965 |
4f572a9da23cd07fb0358486e32676088d63c810 | 370 | py | Python | apps/logs/migrations/0010_verbose_name_plural.py | remocrevo/celus | 682b13168eb475d7f970502113e756e40a899877 | [
"MIT"
] | null | null | null | apps/logs/migrations/0010_verbose_name_plural.py | remocrevo/celus | 682b13168eb475d7f970502113e756e40a899877 | [
"MIT"
] | null | null | null | apps/logs/migrations/0010_verbose_name_plural.py | remocrevo/celus | 682b13168eb475d7f970502113e756e40a899877 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.4 on 2019-08-16 08:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('logs', '0009_accesslog_import_batch'),
]
operations = [
migrations.AlterModelOptions(
name='importbatch',
options={'verbose_name_plural': 'Import batches'},
),
]
| 20.555556 | 62 | 0.618919 |
4f5578681aa56c667d06cc884504f6a16f2261ac | 16,267 | py | Python | pycue/tests/wrappers/layer_test.py | jkellefiel4/OpenCue | b0faee1d49f52dda076fe03467446f0a0c6ef327 | [
"Apache-2.0"
] | 334 | 2019-01-23T13:48:08.000Z | 2019-06-10T06:58:49.000Z | pycue/tests/wrappers/layer_test.py | jkellefiel4/OpenCue | b0faee1d49f52dda076fe03467446f0a0c6ef327 | [
"Apache-2.0"
] | 564 | 2019-06-11T15:31:48.000Z | 2022-03-31T19:53:03.000Z | pycue/tests/wrappers/layer_test.py | jkellefiel4/OpenCue | b0faee1d49f52dda076fe03467446f0a0c6ef327 | [
"Apache-2.0"
] | 155 | 2019-06-13T11:42:00.000Z | 2022-03-16T18:31:24.000Z | #!/usr/bin/env python
# Copyright Contributors to the OpenCue Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `opencue.wrappers.layer`"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
import mock
from opencue.compiled_proto import depend_pb2
from opencue.compiled_proto import job_pb2
import opencue.wrappers.frame
import opencue.wrappers.layer
import opencue.wrappers.job
TEST_LAYER_NAME = 'testLayer'
TEST_OUTPUT_PATH = '/path/to/file.txt'
@mock.patch('opencue.cuebot.Cuebot.getStub')
class LayerTests(unittest.TestCase):
"""Tests for `opencue.wrappers.layer.Layer`."""
def testKill(self, getStubMock):
stubMock = mock.Mock()
stubMock.KillFrames.return_value = job_pb2.LayerKillFramesResponse()
getStubMock.return_value = stubMock
layer = opencue.wrappers.layer.Layer(
job_pb2.Layer(name=TEST_LAYER_NAME))
layer.kill()
stubMock.KillFrames.assert_called_with(
job_pb2.LayerKillFramesRequest(layer=layer.data), timeout=mock.ANY)
def testEat(self, getStubMock):
stubMock = mock.Mock()
stubMock.EatFrames.return_value = job_pb2.LayerEatFramesResponse()
getStubMock.return_value = stubMock
layer = opencue.wrappers.layer.Layer(
job_pb2.Layer(name=TEST_LAYER_NAME))
layer.eat()
stubMock.EatFrames.assert_called_with(
job_pb2.LayerEatFramesRequest(layer=layer.data), timeout=mock.ANY)
def testRetry(self, getStubMock):
stubMock = mock.Mock()
stubMock.RetryFrames.return_value = job_pb2.LayerRetryFramesResponse()
getStubMock.return_value = stubMock
layer = opencue.wrappers.layer.Layer(
job_pb2.Layer(name=TEST_LAYER_NAME))
layer.retry()
stubMock.RetryFrames.assert_called_with(
job_pb2.LayerRetryFramesRequest(layer=layer.data), timeout=mock.ANY)
def testMarkdone(self, getStubMock):
stubMock = mock.Mock()
stubMock.MarkdoneFrames.return_value = job_pb2.LayerMarkdoneFramesResponse()
getStubMock.return_value = stubMock
layer = opencue.wrappers.layer.Layer(
job_pb2.Layer(name=TEST_LAYER_NAME))
layer.markdone()
stubMock.MarkdoneFrames.assert_called_with(
job_pb2.LayerMarkdoneFramesRequest(layer=layer.data), timeout=mock.ANY)
def testAddLimit(self, getStubMock):
test_limit_id = 'lll-llll-lll'
stubMock = mock.Mock()
stubMock.AddLimit.return_value = job_pb2.LayerAddLimitResponse()
getStubMock.return_value = stubMock
layer = opencue.wrappers.layer.Layer(
job_pb2.Layer(name=TEST_LAYER_NAME))
layer.addLimit(test_limit_id)
stubMock.AddLimit.assert_called_with(
job_pb2.LayerAddLimitRequest(layer=layer.data, limit_id=test_limit_id),
timeout=mock.ANY)
def testDropLimit(self, getStubMock):
test_limit_id = 'lll-llll-lll'
stubMock = mock.Mock()
stubMock.DropLimit.return_value = job_pb2.LayerDropLimitResponse()
getStubMock.return_value = stubMock
layer = opencue.wrappers.layer.Layer(
job_pb2.Layer(name=TEST_LAYER_NAME))
layer.dropLimit(test_limit_id)
stubMock.DropLimit.assert_called_with(
job_pb2.LayerDropLimitRequest(layer=layer.data, limit_id=test_limit_id),
timeout=mock.ANY)
def testEnableMemoryOptimizerTrue(self, getStubMock):
stubMock = mock.Mock()
stubMock.EnableMemoryOptimizer.return_value = job_pb2.LayerEnableMemoryOptimizerResponse()
getStubMock.return_value = stubMock
layer = opencue.wrappers.layer.Layer(
job_pb2.Layer(name=TEST_LAYER_NAME))
layer.enableMemoryOptimizer(True)
stubMock.EnableMemoryOptimizer.assert_called_with(
job_pb2.LayerEnableMemoryOptimizerRequest(layer=layer.data, value=True),
timeout=mock.ANY)
def testEnableMemoryOptimizerFalse(self, getStubMock):
stubMock = mock.Mock()
stubMock.EnableMemoryOptimizer.return_value = job_pb2.LayerEnableMemoryOptimizerResponse()
getStubMock.return_value = stubMock
layer = opencue.wrappers.layer.Layer(
job_pb2.Layer(name=TEST_LAYER_NAME))
layer.enableMemoryOptimizer(False)
stubMock.EnableMemoryOptimizer.assert_called_with(
job_pb2.LayerEnableMemoryOptimizerRequest(layer=layer.data, value=False),
timeout=mock.ANY)
def testGetFrames(self, getStubMock):
stubMock = mock.Mock()
stubMock.GetFrames.return_value = job_pb2.LayerGetFramesResponse(
frames=job_pb2.FrameSeq(frames=[job_pb2.Frame(layer_name=TEST_LAYER_NAME)]))
getStubMock.return_value = stubMock
layer = opencue.wrappers.layer.Layer(
job_pb2.Layer(name=TEST_LAYER_NAME))
frames = layer.getFrames()
stubMock.GetFrames.assert_called_with(
job_pb2.LayerGetFramesRequest(
layer=layer.data,
s=opencue.search.FrameSearch.criteriaFromOptions()),
timeout=mock.ANY)
self.assertEqual(len(frames), 1)
self.assertEqual(frames[0].data.layer_name, TEST_LAYER_NAME)
def testGetOutputPaths(self, getStubMock):
stubMock = mock.Mock()
stubMock.GetOutputPaths.return_value = job_pb2.LayerGetOutputPathsResponse(
output_paths=[TEST_OUTPUT_PATH])
getStubMock.return_value = stubMock
layer = opencue.wrappers.layer.Layer(
job_pb2.Layer(name=TEST_LAYER_NAME))
outputPaths = layer.getOutputPaths()
stubMock.GetOutputPaths.assert_called_with(
job_pb2.LayerGetOutputPathsRequest(layer=layer.data), timeout=mock.ANY)
self.assertEqual(len(outputPaths), 1)
self.assertEqual(outputPaths[0], TEST_OUTPUT_PATH)
def testSetTags(self, getStubMock):
tags = ['cloud', 'local']
stubMock = mock.Mock()
stubMock.SetTags.return_value = job_pb2.LayerSetTagsResponse()
getStubMock.return_value = stubMock
layer = opencue.wrappers.layer.Layer(
job_pb2.Layer(name=TEST_LAYER_NAME))
layer.setTags(tags)
stubMock.SetTags.assert_called_with(
job_pb2.LayerSetTagsRequest(layer=layer.data, tags=tags), timeout=mock.ANY)
def testSetMaxCores(self, getStubMock):
stubMock = mock.Mock()
stubMock.SetMaxCores.return_value = job_pb2.LayerSetMaxCoresResponse()
getStubMock.return_value = stubMock
testCores = 100
testCoresActual = testCores/100.0
layer = opencue.wrappers.layer.Layer(
job_pb2.Layer(name=TEST_LAYER_NAME))
layer.setMaxCores(testCores)
stubMock.SetMaxCores.assert_called_with(
job_pb2.LayerSetMaxCoresRequest(layer=layer.data, cores=testCoresActual),
timeout=mock.ANY)
def testSetMinGpuMemory(self, getStubMock):
stubMock = mock.Mock()
stubMock.SetMinGpuMemory.return_value = job_pb2.LayerSetMinGpuResponse()
getStubMock.return_value = stubMock
testCores = 100
layer = opencue.wrappers.layer.Layer(
job_pb2.Layer(name=TEST_LAYER_NAME))
layer.setMinGpuMemory(testCores)
stubMock.SetMinGpuMemory.assert_called_with(
job_pb2.LayerSetMinGpuMemoryRequest(layer=layer.data, gpu_memory=testCores),
timeout=mock.ANY)
def testSetMinMemory(self, getStubMock):
stubMock = mock.Mock()
stubMock.SetMinMemory.return_value = job_pb2.LayerSetMinMemoryResponse()
getStubMock.return_value = stubMock
memory = 2048
layer = opencue.wrappers.layer.Layer(
job_pb2.Layer(name=TEST_LAYER_NAME))
layer.setMinMemory(memory)
stubMock.SetMinMemory.assert_called_with(
job_pb2.LayerSetMinMemoryRequest(layer=layer.data, memory=memory),
timeout=mock.ANY)
def testSetThreadable(self, getStubMock):
stubMock = mock.Mock()
stubMock.SetThreadable.return_value = job_pb2.LayerSetThreadableResponse()
getStubMock.return_value = stubMock
value = True
layer = opencue.wrappers.layer.Layer(
job_pb2.Layer(name=TEST_LAYER_NAME))
layer.setThreadable(value)
stubMock.SetThreadable.assert_called_with(
job_pb2.LayerSetThreadableRequest(layer=layer.data, threadable=value),
timeout=mock.ANY)
def testGetWhatDependsOnThis(self, getStubMock):
dependId = 'dddd-ddd-dddd'
stubMock = mock.Mock()
stubMock.GetWhatDependsOnThis.return_value = job_pb2.LayerGetWhatDependsOnThisResponse(
depends=depend_pb2.DependSeq(depends=[depend_pb2.Depend(id=dependId)]))
getStubMock.return_value = stubMock
layer = opencue.wrappers.layer.Layer(
job_pb2.Layer(name=TEST_LAYER_NAME))
depends = layer.getWhatDependsOnThis()
stubMock.GetWhatDependsOnThis.assert_called_with(
job_pb2.LayerGetWhatDependsOnThisRequest(layer=layer.data),
timeout=mock.ANY)
self.assertEqual(len(depends), 1)
self.assertEqual(depends[0].id(), dependId)
def testGetWhatThisDependsOn(self, getStubMock):
dependId = 'dddd-ddd-dddd'
stubMock = mock.Mock()
stubMock.GetWhatThisDependsOn.return_value = job_pb2.LayerGetWhatThisDependsOnResponse(
depends=depend_pb2.DependSeq(depends=[depend_pb2.Depend(id=dependId)]))
getStubMock.return_value = stubMock
layer = opencue.wrappers.layer.Layer(
job_pb2.Layer(name=TEST_LAYER_NAME))
depends = layer.getWhatThisDependsOn()
stubMock.GetWhatThisDependsOn.assert_called_with(
job_pb2.LayerGetWhatThisDependsOnRequest(layer=layer.data),
timeout=mock.ANY)
self.assertEqual(len(depends), 1)
self.assertEqual(depends[0].id(), dependId)
def testCreateDependencyOnJob(self, getStubMock):
dependId = 'dddd-ddd-dddd'
jobId = 'jjjj-jjj-jjjj'
stubMock = mock.Mock()
stubMock.CreateDependencyOnJob.return_value = job_pb2.LayerCreateDependOnJobResponse(
depend=depend_pb2.Depend(id=dependId))
getStubMock.return_value = stubMock
layer = opencue.wrappers.layer.Layer(
job_pb2.Layer(name=TEST_LAYER_NAME))
job = opencue.wrappers.job.Job(
job_pb2.Job(id=jobId))
depend = layer.createDependencyOnJob(job)
stubMock.CreateDependencyOnJob.assert_called_with(
job_pb2.LayerCreateDependOnJobRequest(layer=layer.data, job=job.data),
timeout=mock.ANY)
self.assertEqual(depend.id(), dependId)
def testCreateDependencyOnLayer(self, getStubMock):
dependId = 'dddd-ddd-dddd'
layerId = 'llll-lll-llll'
stubMock = mock.Mock()
stubMock.CreateDependencyOnLayer.return_value = job_pb2.LayerCreateDependOnLayerResponse(
depend=depend_pb2.Depend(id=dependId))
getStubMock.return_value = stubMock
layer = opencue.wrappers.layer.Layer(
job_pb2.Layer(name=TEST_LAYER_NAME))
dependLayer = opencue.wrappers.layer.Layer(
job_pb2.Layer(id=layerId))
depend = layer.createDependencyOnLayer(dependLayer)
stubMock.CreateDependencyOnLayer.assert_called_with(
job_pb2.LayerCreateDependOnLayerRequest(layer=layer.data,
depend_on_layer=dependLayer.data),
timeout=mock.ANY)
self.assertEqual(depend.id(), dependId)
def testCreateDependencyOnFrame(self, getStubMock):
dependId = 'dddd-ddd-dddd'
frameId = 'ffff-fff-ffff'
stubMock = mock.Mock()
stubMock.CreateDependencyOnFrame.return_value = job_pb2.LayerCreateDependOnFrameResponse(
depend=depend_pb2.Depend(id=dependId))
getStubMock.return_value = stubMock
layer = opencue.wrappers.layer.Layer(
job_pb2.Layer(name=TEST_LAYER_NAME))
frame = opencue.wrappers.frame.Frame(
job_pb2.Frame(id=frameId))
depend = layer.createDependencyOnFrame(frame)
stubMock.CreateDependencyOnFrame.assert_called_with(
job_pb2.LayerCreateDependOnFrameRequest(layer=layer.data, frame=frame.data),
timeout=mock.ANY)
self.assertEqual(depend.id(), dependId)
def testCreateFrameByFrameDependency(self, getStubMock):
dependId = 'dddd-ddd-dddd'
layerId = 'llll-lll-llll'
stubMock = mock.Mock()
stubMock.CreateFrameByFrameDependency.return_value = \
job_pb2.LayerCreateFrameByFrameDependResponse(depend=depend_pb2.Depend(id=dependId))
getStubMock.return_value = stubMock
layer = opencue.wrappers.layer.Layer(
job_pb2.Layer(name=TEST_LAYER_NAME))
dependLayer = opencue.wrappers.layer.Layer(
job_pb2.Layer(id=layerId))
depend = layer.createFrameByFrameDependency(dependLayer)
stubMock.CreateFrameByFrameDependency.assert_called_with(
job_pb2.LayerCreateFrameByFrameDependRequest(layer=layer.data,
depend_layer=dependLayer.data,
any_frame=False),
timeout=mock.ANY)
self.assertEqual(depend.id(), dependId)
def testRegisterOutputPath(self, getStubMock):
stubMock = mock.Mock()
stubMock.RegisterOutputPath.return_value = job_pb2.LayerRegisterOutputPathResponse()
getStubMock.return_value = stubMock
outputPath = '/test/output/path'
layer = opencue.wrappers.layer.Layer(
job_pb2.Layer(name=TEST_LAYER_NAME))
layer.registerOutputPath(outputPath)
stubMock.RegisterOutputPath.assert_called_with(
job_pb2.LayerRegisterOutputPathRequest(layer=layer.data, spec=outputPath),
timeout=mock.ANY)
def testReorderFrames(self, getStubMock):
stubMock = mock.Mock()
stubMock.ReorderFrames.return_value = job_pb2.LayerReorderFramesResponse()
getStubMock.return_value = stubMock
frameRange = '1-10'
order = job_pb2.REVERSE
layer = opencue.wrappers.layer.Layer(job_pb2.Layer(name=TEST_LAYER_NAME))
layer.reorderFrames(frameRange, order)
stubMock.ReorderFrames.assert_called_with(
job_pb2.LayerReorderFramesRequest(layer=layer.data, range=frameRange, order=order),
timeout=mock.ANY)
def testStaggerFrames(self, getStubMock):
stubMock = mock.Mock()
stubMock.StaggerFrames.return_value = job_pb2.LayerStaggerFramesResponse()
getStubMock.return_value = stubMock
frameRange = '1-10'
stagger = 4
layer = opencue.wrappers.layer.Layer(
job_pb2.Layer(name=TEST_LAYER_NAME))
layer.staggerFrames(frameRange, stagger)
stubMock.StaggerFrames.assert_called_with(
job_pb2.LayerStaggerFramesRequest(layer=layer.data, range=frameRange, stagger=stagger),
timeout=mock.ANY)
class LayerEnumTests(unittest.TestCase):
def testLayerType(self):
self.assertEqual(opencue.api.Layer.LayerType.PRE, opencue.compiled_proto.job_pb2.PRE)
self.assertEqual(opencue.api.Layer.LayerType.PRE, 0)
def testOrder(self):
self.assertEqual(opencue.api.Layer.Order.LAST, opencue.compiled_proto.job_pb2.LAST)
self.assertEqual(opencue.api.Layer.Order.LAST, 1)
if __name__ == '__main__':
unittest.main()
| 39.103365 | 99 | 0.686789 |
4f5741695a0f188d00a6adfad3711c9f9eff3f18 | 6,567 | py | Python | src/ghpusher/gh_pusher.py | dbradf/gh-pusher | 6454e41154b61ab3a6beea54019684363740182a | [
"Apache-2.0"
] | null | null | null | src/ghpusher/gh_pusher.py | dbradf/gh-pusher | 6454e41154b61ab3a6beea54019684363740182a | [
"Apache-2.0"
] | null | null | null | src/ghpusher/gh_pusher.py | dbradf/gh-pusher | 6454e41154b61ab3a6beea54019684363740182a | [
"Apache-2.0"
] | null | null | null | """Command line tool for pushing to github pages."""
from glob import glob
import os
import shutil
from typing import Callable, NamedTuple, Any, Dict
import click
from plumbum import local
SEP = ":"
class CommitMetaData(NamedTuple):
"""Details about a commit."""
author: str
email: str
message: str
@classmethod
def from_git_history(cls, commit_str: str) -> "CommitMetaData":
"""
Create commit history metadata from the given string.
:param commit_str: String containing commit metadata.
:return: CommitMetaData based on given string.
"""
(author, email, *message) = commit_str.split(SEP)
return cls(author=author, email=email, message=SEP.join(message))
def author_string(self) -> str:
"""Get a string describing the author."""
return f"{self.author} <{self.email}>"
class GitService(object):
"""Service to handle git interactions."""
def __init__(self, git: Any) -> None:
"""
Create a new GitService.
:param git: Git command object.
"""
self.git = git
def get_last_commit(self) -> CommitMetaData:
"""Get the commit metadata for the last commit."""
format = SEP.join([r"%an", r"%ae", r"%s"])
output = self.git("log", "-n", "1", f'--pretty=format:"{format}"')
return CommitMetaData.from_git_history(output.strip('"'))
def git_changes_exist(self) -> bool:
"""Determine if the current directory has any git changes."""
output = self.git("status", "--short")
return len(output.strip()) > 0
def switch_branch(self, branch: str) -> None:
"""
Switch to the specified branch.
:param branch: Branch to switch to.
"""
self.git("checkout", branch)
def commit_all_files(self, commit_data: CommitMetaData) -> None:
"""
Commit all files with the given commit metadata.
:param commit_data: Meta data to use for the commit.
"""
self.git("add", ".")
self.git("commit", "-m", commit_data.message, f"--author={commit_data.author_string()}")
def push_branch(self, branch: str) -> None:
"""
Push changes on the branch to the origin.
:param branch: Branch to push.
"""
self.git("push", "origin", branch)
def get_active_branch(self) -> str:
"""Get the active branch."""
return self.git("rev-parse", "--abbrev-ref", "HEAD").strip()
class FileService(object):
"""Service to orchestra file operations."""
def __init__(self, shutil: Any, globber: Callable, path_ops: Any, file_ops: Any) -> None:
"""
Create a new FileService.
:param shutil: shell utilities.
:param globber: Function to glob directories.
:param path_ops: Function to operate on paths.
"""
self.shutil = shutil
self.globber = globber
self.path_ops = path_ops
self.file_ops = file_ops
def remove(self, target: str) -> None:
"""
Remove the given file if it exists.
:param target: File or directory to remove.
"""
if self.path_ops.exists(target):
if self.path_ops.isfile(target):
self.file_ops.remove(target)
else:
self.shutil.rmtree(target)
def move_files(self, parent_dir: str, target_dir: str) -> None:
"""
Move files under the one directory to another directory.
:param parent_dir: Path to directory containing files to move.
:param target_dir: Path to directory to move files to.
"""
files = self.globber(f"{parent_dir}/*")
for f in files:
target_file = f"{target_dir}/{self.path_ops.basename(f)}"
self.remove(target_file)
self.shutil.move(f, target_dir)
class GhPushService(object):
"""Service to orchestrate pushing to gh pages."""
def __init__(self, git_service: GitService, file_service: FileService) -> None:
"""
Create a new github push service.
:param git_service: Git service.
:param file_service: File service.
"""
self.git_service = git_service
self.file_service = file_service
def push_changes(self, repo_base: str, build_dir: str, target_branch: str) -> None:
"""
Move changes to root of repo, commit them and publish them.
:param repo_base: Path to base of git repository.
:param build_dir: Path to directory containing changes to publish.
:param target_branch: Name of branch to publish to.
"""
with local.cwd(repo_base):
active_branch = self.git_service.get_active_branch()
commit_data = self.git_service.get_last_commit()
self.git_service.switch_branch(target_branch)
self.file_service.move_files(build_dir, ".")
if self.git_service.git_changes_exist():
self.git_service.commit_all_files(commit_data)
self.git_service.push_branch(target_branch)
self.git_service.switch_branch(active_branch)
@click.command()
@click.option("--target-branch", default="gh-pages", help="Branch to publish documentation.")
@click.option(
"--build-dir",
type=click.Path(exists=True),
required=True,
help="Directory containing documentation to publish.",
)
@click.option("--git-binary", type=click.Path(exists=True), help="Path to git binary.")
@click.option(
"--repo-base", type=click.Path(exists=True), default=".", help="Path to base of repository."
)
def gh_push(**options: Dict[str, Any]) -> None:
"""
Publish documentation changes to a github changes branch.
Move a directory of built documentation from the build directory to
the base on the repository on the target github pages branch. If there
are any changes to the documention, they will be added in a commit under
the same author and commit message as the last commit message on the active
branch.
"""
target_branch = str(options["target_branch"])
build_dir = os.path.expanduser(str(options["build_dir"]))
repo_base = os.path.expanduser(str(options["repo_base"]))
git_binary = options["git_binary"] or local.which("git")
git = local[git_binary]
git_service = GitService(git)
file_service = FileService(shutil, glob, os.path, os)
gh_push_service = GhPushService(git_service, file_service)
gh_push_service.push_changes(repo_base, build_dir, target_branch)
| 33.335025 | 96 | 0.634993 |
4f4fdf7cbc544a35b163d355043483769c6ae69e | 3,897 | py | Python | configs/recognition/timesformer/timesformer_jointST_8x32x1_15e_kinetics400_rgb.py | EquipoVandV/mmactionVandV | a807d9e258d67b7b2d0fabe98da97a801d63ae7d | [
"Apache-2.0"
] | 1,870 | 2020-07-11T09:33:46.000Z | 2022-03-31T13:21:36.000Z | configs/recognition/timesformer/timesformer_jointST_8x32x1_15e_kinetics400_rgb.py | EquipoVandV/mmactionVandV | a807d9e258d67b7b2d0fabe98da97a801d63ae7d | [
"Apache-2.0"
] | 1,285 | 2020-07-11T11:18:57.000Z | 2022-03-31T08:41:17.000Z | configs/recognition/timesformer/timesformer_jointST_8x32x1_15e_kinetics400_rgb.py | EquipoVandV/mmactionVandV | a807d9e258d67b7b2d0fabe98da97a801d63ae7d | [
"Apache-2.0"
] | 557 | 2020-07-11T09:51:57.000Z | 2022-03-31T13:21:35.000Z | _base_ = ['../../_base_/default_runtime.py']
# model settings
model = dict(
type='Recognizer3D',
backbone=dict(
type='TimeSformer',
pretrained= # noqa: E251
'https://download.openmmlab.com/mmaction/recognition/timesformer/vit_base_patch16_224.pth', # noqa: E501
num_frames=8,
img_size=224,
patch_size=16,
embed_dims=768,
in_channels=3,
dropout_ratio=0.,
transformer_layers=None,
attention_type='joint_space_time',
norm_cfg=dict(type='LN', eps=1e-6)),
cls_head=dict(type='TimeSformerHead', num_classes=400, in_channels=768),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='prob'))
# dataset settings
dataset_type = 'RawframeDataset'
data_root = 'data/kinetics400/rawframes_train'
data_root_val = 'data/kinetics400/rawframes_val'
ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt'
ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
img_norm_cfg = dict(
mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_bgr=False)
train_pipeline = [
dict(type='SampleFrames', clip_len=8, frame_interval=32, num_clips=1),
dict(type='RawFrameDecode'),
dict(type='RandomRescale', scale_range=(256, 320)),
dict(type='RandomCrop', size=224),
dict(type='Flip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(
type='SampleFrames',
clip_len=8,
frame_interval=32,
num_clips=1,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
test_pipeline = [
dict(
type='SampleFrames',
clip_len=8,
frame_interval=32,
num_clips=1,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 224)),
dict(type='ThreeCrop', crop_size=224),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
data = dict(
videos_per_gpu=7,
workers_per_gpu=2,
test_dataloader=dict(videos_per_gpu=1),
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=val_pipeline),
test=dict(
type=dataset_type,
ann_file=ann_file_test,
data_prefix=data_root_val,
pipeline=test_pipeline))
evaluation = dict(
interval=1, metrics=['top_k_accuracy', 'mean_class_accuracy'])
# optimizer
optimizer = dict(
type='SGD',
lr=0.004375,
momentum=0.9,
paramwise_cfg=dict(
custom_keys={
'.backbone.cls_token': dict(decay_mult=0.0),
'.backbone.pos_embed': dict(decay_mult=0.0),
'.backbone.time_embed': dict(decay_mult=0.0)
}),
weight_decay=1e-4,
nesterov=True) # this lr is used for 8 gpus
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(policy='step', step=[5, 10])
total_epochs = 15
# runtime settings
checkpoint_config = dict(interval=1)
work_dir = './work_dirs/timesformer_divST_8x32x1_15e_kinetics400_rgb'
| 32.475 | 113 | 0.658712 |
4f57559d90288027328bc98fe029f43f4e117793 | 734 | py | Python | ExtentionPackages/pysnmp/proto/secmod/rfc3414/auth/base.py | hongsofwing/PyQYT-master | 9a112d9adbf9885a8b7535b7ef7759b60a0f9a29 | [
"CNRI-Python"
] | null | null | null | ExtentionPackages/pysnmp/proto/secmod/rfc3414/auth/base.py | hongsofwing/PyQYT-master | 9a112d9adbf9885a8b7535b7ef7759b60a0f9a29 | [
"CNRI-Python"
] | null | null | null | ExtentionPackages/pysnmp/proto/secmod/rfc3414/auth/base.py | hongsofwing/PyQYT-master | 9a112d9adbf9885a8b7535b7ef7759b60a0f9a29 | [
"CNRI-Python"
] | null | null | null | #
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2016, Ilya Etingof <ilya@glas.net>
# License: http://pysnmp.sf.net/license.html
#
from pysnmp.proto import errind, error
class AbstractAuthenticationService:
serviceID = None
def hashPassphrase(self, authKey):
raise error.ProtocolError(errind.noAuthentication)
def localizeKey(self, authKey, snmpEngineID):
raise error.ProtocolError(errind.noAuthentication)
# 7.2.4.1
def authenticateOutgoingMsg(self, authKey, wholeMsg):
raise error.ProtocolError(errind.noAuthentication)
# 7.2.4.2
def authenticateIncomingMsg(self, authKey, authParameters, wholeMsg):
raise error.ProtocolError(errind.noAuthentication)
| 29.36 | 73 | 0.734332 |
4f56900916622bbd923bb0a28428fac1bf7efcd0 | 2,975 | py | Python | src/the_tale/the_tale/game/persons/storage.py | SilentWrangler/the-tale | a121128edd2a9e36133eb047946ccb9593801ea6 | [
"BSD-3-Clause"
] | 85 | 2017-11-21T12:22:02.000Z | 2022-03-27T23:07:17.000Z | src/the_tale/the_tale/game/persons/storage.py | SilentWrangler/the-tale | a121128edd2a9e36133eb047946ccb9593801ea6 | [
"BSD-3-Clause"
] | 545 | 2017-11-04T14:15:04.000Z | 2022-03-27T14:19:27.000Z | src/the_tale/the_tale/game/persons/storage.py | SilentWrangler/the-tale | a121128edd2a9e36133eb047946ccb9593801ea6 | [
"BSD-3-Clause"
] | 45 | 2017-11-11T12:36:30.000Z | 2022-02-25T06:10:44.000Z |
import smart_imports
smart_imports.all()
class PersonsStorage(utils_storage.Storage):
SETTINGS_KEY = 'persons change time'
EXCEPTION = exceptions.PersonsStorageError
def _construct_object(self, model):
return logic.load_person(person_model=model)
def _save_object(self, person):
return logic.save_person(person)
def _get_all_query(self):
return models.Person.objects.all()
persons = PersonsStorage()
class SocialConnectionsStorage(utils_storage.CachedStorage):
SETTINGS_KEY = 'social-connections-storage'
EXCEPTION = exceptions.PersonsStorageError
def _construct_object(self, model):
return logic.social_connection_from_model(model)
def _get_all_query(self):
return models.SocialConnection.objects.all()
def _reset_cache(self):
self._person_connections = {}
def _update_cached_data(self, item):
if item.person_1_id not in self._person_connections:
self._person_connections[item.person_1_id] = {}
if item.person_2_id not in self._person_connections:
self._person_connections[item.person_2_id] = {}
self._person_connections[item.person_1_id][item.person_2_id] = item
self._person_connections[item.person_2_id][item.person_1_id] = item
def get_person_connections(self, person):
self.sync()
connections = self._person_connections.get(person.id, {})
result = []
for connected_person_id, item in connections.items():
connected_person = persons.get(connected_person_id)
if connected_person is None:
continue
result.append((item.connection, connected_person.id))
return result
def get_connected_persons(self, person):
return [(connection, persons[person_id]) for connection, person_id in self.get_person_connections(person)]
def get_connection(self, person_1, person_2):
self.sync()
for connected_person_id, connection in self._person_connections.get(person_1.id).items():
if person_2.id == connected_person_id:
return connection
def connections_limit_reached(self, person):
self.sync()
return len(self.get_person_connections(person)) >= c.PERSON_SOCIAL_CONNECTIONS_LIMIT
def has_connections(self, person):
self.sync()
return len(self.get_person_connections(person)) > 0
def get_connected_persons_ids(self, person):
self.sync()
return list(self._person_connections.get(person.id, {}).keys())
def is_connected(self, person_1, person_2):
return person_2.id in self.get_connected_persons_ids(person_1)
def get_connection_type(self, person_1, person_2):
self.sync()
if not self.is_connected(person_1, person_2):
return None
return self._person_connections[person_1.id][person_2.id].connection
social_connections = SocialConnectionsStorage()
| 31.989247 | 114 | 0.700504 |
4f54ebde85e60192a65649ca0ec7dfe8cfe55fce | 39,871 | py | Python | applications/admin/controllers/default.py | trosa/listool | f175c6e30bcc788a381a92c3a8b818a1f1a88de6 | [
"BSD-3-Clause"
] | 1 | 2017-08-11T20:37:21.000Z | 2017-08-11T20:37:21.000Z | applications/admin/controllers/default.py | trosa/listool | f175c6e30bcc788a381a92c3a8b818a1f1a88de6 | [
"BSD-3-Clause"
] | null | null | null | applications/admin/controllers/default.py | trosa/listool | f175c6e30bcc788a381a92c3a8b818a1f1a88de6 | [
"BSD-3-Clause"
] | null | null | null | # coding: utf8
from gluon.admin import *
from gluon.fileutils import abspath
from glob import glob
import shutil
import platform
if DEMO_MODE and request.function in ['change_password','pack','pack_plugin','upgrade_web2py','uninstall','cleanup','compile_app','remove_compiled_app','delete','delete_plugin','create_file','upload_file','update_languages']:
session.flash = T('disabled in demo mode')
redirect(URL('site'))
if FILTER_APPS and request.args(0) and not request.args(0) in FILTER_APPS:
session.flash = T('disabled in demo mode')
redirect(URL('site'))
def safe_open(a,b):
if DEMO_MODE and 'w' in b:
class tmp:
def write(self,data): pass
return tmp()
return open(a,b)
def index():
""" Index handler """
send = request.vars.send
if DEMO_MODE:
session.authorized = True
session.last_time = t0
if not send:
send = URL('site')
if session.authorized:
redirect(send)
elif request.vars.password:
if verify_password(request.vars.password):
session.authorized = True
if CHECK_VERSION:
session.check_version = True
else:
session.check_version = False
session.last_time = t0
if isinstance(send, list): # ## why does this happen?
send = str(send[0])
redirect(send)
else:
response.flash = T('invalid password')
# f == file
apps = [f for f in os.listdir(apath(r=request)) if f.find('.') < 0]
return dict(apps=apps, send=send)
def check_version():
""" Checks if web2py is up to date """
session.forget()
session._unlock(response)
new_version, version_number = check_new_version(request.env.web2py_version,
WEB2PY_VERSION_URL)
if new_version == -1:
return A(T('Unable to check for upgrades'), _href=WEB2PY_URL)
elif new_version != True:
return A(T('web2py is up to date'), _href=WEB2PY_URL)
elif platform.system().lower() in ('windows','win32','win64'):
return SPAN('You should upgrade to version %' % version_number)
else:
return sp_button(URL('upgrade_web2py'), T('upgrade now')) \
+ XML(' <strong class="upgrade_version">%s</strong>' % version_number)
def logout():
""" Logout handler """
session.authorized = None
redirect(URL('index'))
def change_password():
if session.pam_user:
session.flash = T('PAM authenticated user, cannot change password here')
redirect(URL('site'))
form=SQLFORM.factory(Field('current_admin_password','password'),
Field('new_admin_password','password',requires=IS_STRONG()),
Field('new_admin_password_again','password'))
if form.accepts(request.vars):
if not verify_password(request.vars.current_admin_password):
form.errors.current_admin_password = T('invalid password')
elif form.vars.new_admin_password != form.vars.new_admin_password_again:
form.errors.new_admin_password_again = T('no match')
else:
path = abspath('parameters_%s.py' % request.env.server_port)
safe_open(path,'w').write('password="%s"' % CRYPT()(request.vars.new_admin_password)[0])
session.flash = T('password changed')
redirect(URL('site'))
return dict(form=form)
def site():
""" Site handler """
myversion = request.env.web2py_version
# Shortcut to make the elif statements more legible
file_or_appurl = 'file' in request.vars or 'appurl' in request.vars
if DEMO_MODE:
pass
elif request.vars.filename and not 'file' in request.vars:
# create a new application
appname = cleanpath(request.vars.filename).replace('.', '_')
if app_create(appname, request):
session.flash = T('new application "%s" created', appname)
redirect(URL('design',args=appname))
else:
session.flash = \
T('unable to create application "%s"', request.vars.filename)
redirect(URL(r=request))
elif file_or_appurl and not request.vars.filename:
# can't do anything without an app name
msg = 'you must specify a name for the uploaded application'
response.flash = T(msg)
elif file_or_appurl and request.vars.filename:
# fetch an application via URL or file upload
f = None
if request.vars.appurl is not '':
try:
f = urllib.urlopen(request.vars.appurl)
except Exception, e:
session.flash = DIV(T('Unable to download app because:'),PRE(str(e)))
redirect(URL(r=request))
fname = request.vars.appurl
elif request.vars.file is not '':
f = request.vars.file.file
fname = request.vars.file.filename
if f:
appname = cleanpath(request.vars.filename).replace('.', '_')
installed = app_install(appname, f, request, fname,
overwrite=request.vars.overwrite_check)
if f and installed:
msg = 'application %(appname)s installed with md5sum: %(digest)s'
session.flash = T(msg, dict(appname=appname,
digest=md5_hash(installed)))
elif f and request.vars.overwrite_check:
msg = 'unable to install application "%(appname)s"'
session.flash = T(msg, dict(appname=request.vars.filename))
else:
msg = 'unable to install application "%(appname)s"'
session.flash = T(msg, dict(appname=request.vars.filename))
redirect(URL(r=request))
regex = re.compile('^\w+$')
apps = sorted([(f.upper(), f) for f in os.listdir(apath(r=request)) \
if regex.match(f)])
apps = [item[1] for item in apps]
if FILTER_APPS:
apps = [f for f in apps if f in FILTER_APPS]
return dict(app=None, apps=apps, myversion=myversion)
def pack():
if len(request.args) == 1:
fname = 'web2py.app.%s.w2p' % request.args[0]
filename = app_pack(request.args[0], request)
else:
fname = 'web2py.app.%s.compiled.w2p' % request.args[0]
filename = app_pack_compiled(request.args[0], request)
if filename:
response.headers['Content-Type'] = 'application/w2p'
disposition = 'attachment; filename=%s' % fname
response.headers['Content-Disposition'] = disposition
return safe_open(filename, 'rb').read()
else:
session.flash = T('internal error')
redirect(URL('site'))
def pack_plugin():
if len(request.args) == 2:
fname = 'web2py.plugin.%s.w2p' % request.args[1]
filename = plugin_pack(request.args[0], request.args[1], request)
if filename:
response.headers['Content-Type'] = 'application/w2p'
disposition = 'attachment; filename=%s' % fname
response.headers['Content-Disposition'] = disposition
return safe_open(filename, 'rb').read()
else:
session.flash = T('internal error')
redirect(URL('plugin',args=request.args))
def upgrade_web2py():
if 'upgrade' in request.vars:
(success, error) = upgrade(request)
if success:
session.flash = T('web2py upgraded; please restart it')
else:
session.flash = T('unable to upgrade because "%s"', error)
redirect(URL('site'))
elif 'noupgrade' in request.vars:
redirect(URL('site'))
return dict()
def uninstall():
app = request.args[0]
if 'delete' in request.vars:
deleted = app_uninstall(app, request)
if deleted:
session.flash = T('application "%s" uninstalled', app)
else:
session.flash = T('unable to uninstall "%s"', app)
redirect(URL('site'))
elif 'nodelete' in request.vars:
redirect(URL('site'))
return dict(app=app)
def cleanup():
clean = app_cleanup(request.args[0], request)
if not clean:
session.flash = T("some files could not be removed")
else:
session.flash = T('cache, errors and sessions cleaned')
redirect(URL('site'))
def compile_app():
c = app_compile(request.args[0], request)
if not c:
session.flash = T('application compiled')
else:
session.flash = DIV(T('Cannot compile: there are errors in your app:',
CODE(c)))
redirect(URL('site'))
def remove_compiled_app():
""" Remove the compiled application """
remove_compiled_application(apath(request.args[0], r=request))
session.flash = T('compiled application removed')
redirect(URL('site'))
def delete():
""" Object delete handler """
filename = '/'.join(request.args)
sender = request.vars.sender
if isinstance(sender, list): # ## fix a problem with Vista
sender = sender[0]
if 'nodelete' in request.vars:
redirect(URL(sender))
elif 'delete' in request.vars:
try:
os.unlink(apath(filename, r=request))
session.flash = T('file "%(filename)s" deleted',
dict(filename=filename))
except Exception:
session.flash = T('unable to delete file "%(filename)s"',
dict(filename=filename))
redirect(URL(sender))
return dict(filename=filename, sender=sender)
def peek():
""" Visualize object code """
filename = '/'.join(request.args)
try:
data = safe_open(apath(filename, r=request), 'r').read().replace('\r','')
except IOError:
session.flash = T('file does not exist')
redirect(URL('site'))
extension = filename[filename.rfind('.') + 1:].lower()
return dict(app=request.args[0],
filename=filename,
data=data,
extension=extension)
def test():
""" Execute controller tests """
app = request.args[0]
if len(request.args) > 1:
file = request.args[1]
else:
file = '.*\.py'
controllers = listdir(apath('%s/controllers/' % app, r=request), file + '$')
return dict(app=app, controllers=controllers)
def keepalive():
return ''
def search():
keywords=request.vars.keywords or ''
def match(filename,keywords):
filename=os.path.join(apath(request.args[0], r=request),filename)
if keywords in open(filename,'rb').read():
return True
return False
path=apath(request.args[0], r=request)
files1 = glob(os.path.join(path,'*/*.py'))
files2 = glob(os.path.join(path,'*/*.html'))
files3 = glob(os.path.join(path,'*/*/*.html'))
files=[x[len(path)+1:].replace('\\','/') for x in files1+files2+files3 if match(x,keywords)]
return response.json({'files':files})
def edit():
""" File edit handler """
# Load json only if it is ajax edited...
filename = '/'.join(request.args)
# Try to discover the file type
if filename[-3:] == '.py':
filetype = 'python'
elif filename[-5:] == '.html':
filetype = 'html'
elif filename[-5:] == '.load':
filetype = 'html'
elif filename[-4:] == '.css':
filetype = 'css'
elif filename[-3:] == '.js':
filetype = 'js'
else:
filetype = 'html'
# ## check if file is not there
path = apath(filename, r=request)
if request.vars.revert and os.path.exists(path + '.bak'):
try:
data = safe_open(path + '.bak', 'r').read()
data1 = safe_open(path, 'r').read()
except IOError:
session.flash = T('Invalid action')
if 'from_ajax' in request.vars:
return response.json({'error': str(T('Invalid action'))})
else:
redirect(URL('site'))
safe_open(path, 'w').write(data)
file_hash = md5_hash(data)
saved_on = time.ctime(os.stat(path)[stat.ST_MTIME])
safe_open(path + '.bak', 'w').write(data1)
response.flash = T('file "%s" of %s restored', (filename, saved_on))
else:
try:
data = safe_open(path, 'r').read()
except IOError:
session.flash = T('Invalid action')
if 'from_ajax' in request.vars:
return response.json({'error': str(T('Invalid action'))})
else:
redirect(URL('site'))
file_hash = md5_hash(data)
saved_on = time.ctime(os.stat(path)[stat.ST_MTIME])
if request.vars.file_hash and request.vars.file_hash != file_hash:
session.flash = T('file changed on disk')
data = request.vars.data.replace('\r\n', '\n').strip() + '\n'
safe_open(path + '.1', 'w').write(data)
if 'from_ajax' in request.vars:
return response.json({'error': str(T('file changed on disk')),
'redirect': URL('resolve',
args=request.args)})
else:
redirect(URL('resolve', args=request.args))
elif request.vars.data:
safe_open(path + '.bak', 'w').write(data)
data = request.vars.data.replace('\r\n', '\n').strip() + '\n'
safe_open(path, 'w').write(data)
file_hash = md5_hash(data)
saved_on = time.ctime(os.stat(path)[stat.ST_MTIME])
response.flash = T('file saved on %s', saved_on)
data_or_revert = (request.vars.data or request.vars.revert)
# Check compile errors
highlight = None
if filetype == 'python' and request.vars.data:
import _ast
try:
code = request.vars.data.rstrip().replace('\r\n','\n')+'\n'
compile(code, path, "exec", _ast.PyCF_ONLY_AST)
except Exception, e:
start = sum([len(line)+1 for l, line
in enumerate(request.vars.data.split("\n"))
if l < e.lineno-1])
if e.text and e.offset:
offset = e.offset - (len(e.text) - len(e.text.splitlines()[-1]))
else:
offset = 0
highlight = {'start': start, 'end': start + offset + 1}
try:
ex_name = e.__class__.__name__
except:
ex_name = 'unknown exception!'
response.flash = DIV(T('failed to compile file because:'), BR(),
B(ex_name), T(' at line %s') % e.lineno,
offset and T(' at char %s') % offset or '',
PRE(str(e)))
if data_or_revert and request.args[1] == 'modules':
# Lets try to reload the modules
try:
mopath = '.'.join(request.args[2:])[:-3]
exec 'import applications.%s.modules.%s' % (request.args[0], mopath)
reload(sys.modules['applications.%s.modules.%s'
% (request.args[0], mopath)])
except Exception, e:
response.flash = DIV(T('failed to reload module because:'),PRE(str(e)))
edit_controller = None
editviewlinks = None
view_link = None
if filetype == 'html' and len(request.args) >= 3:
cfilename = os.path.join(request.args[0], 'controllers',
request.args[2] + '.py')
if os.path.exists(apath(cfilename, r=request)):
edit_controller = URL('edit', args=[cfilename])
view = request.args[3].replace('.html','')
view_link = URL(request.args[0],request.args[2],view)
elif filetype == 'python' and request.args[1] == 'controllers':
## it's a controller file.
## Create links to all of the associated view files.
app = request.args[0]
viewname = os.path.splitext(request.args[2])[0]
viewpath = os.path.join(app,'views',viewname)
aviewpath = apath(viewpath, r=request)
viewlist = []
if os.path.exists(aviewpath):
if os.path.isdir(aviewpath):
viewlist = glob(os.path.join(aviewpath,'*.html'))
elif os.path.exists(aviewpath+'.html'):
viewlist.append(aviewpath+'.html')
if len(viewlist):
editviewlinks = []
for v in viewlist:
vf = os.path.split(v)[-1]
vargs = "/".join([viewpath.replace(os.sep,"/"),vf])
editviewlinks.append(A(T(vf.split(".")[0]),\
_href=URL('edit',args=[vargs])))
if len(request.args) > 2 and request.args[1] == 'controllers':
controller = (request.args[2])[:-3]
functions = regex_expose.findall(data)
else:
(controller, functions) = (None, None)
if 'from_ajax' in request.vars:
return response.json({'file_hash': file_hash, 'saved_on': saved_on, 'functions':functions, 'controller': controller, 'application': request.args[0], 'highlight': highlight })
else:
editarea_preferences = {}
editarea_preferences['FONT_SIZE'] = '10'
editarea_preferences['FULL_SCREEN'] = 'false'
editarea_preferences['ALLOW_TOGGLE'] = 'true'
editarea_preferences['REPLACE_TAB_BY_SPACES'] = '4'
editarea_preferences['DISPLAY'] = 'onload'
for key in editarea_preferences:
if globals().has_key(key):
editarea_preferences[key]=globals()[key]
return dict(app=request.args[0],
filename=filename,
filetype=filetype,
data=data,
edit_controller=edit_controller,
file_hash=file_hash,
saved_on=saved_on,
controller=controller,
functions=functions,
view_link=view_link,
editarea_preferences=editarea_preferences,
editviewlinks=editviewlinks)
def resolve():
""" """
filename = '/'.join(request.args)
# ## check if file is not there
path = apath(filename, r=request)
a = safe_open(path, 'r').readlines()
try:
b = safe_open(path + '.1', 'r').readlines()
except IOError:
session.flash = 'Other file, no longer there'
redirect(URL('edit', args=request.args))
d = difflib.ndiff(a, b)
def leading(line):
""" """
# TODO: we really need to comment this
z = ''
for (k, c) in enumerate(line):
if c == ' ':
z += ' '
elif c == ' \t':
z += ' '
elif k == 0 and c == '?':
pass
else:
break
return XML(z)
def getclass(item):
""" Determine item class """
if item[0] == ' ':
return 'normal'
if item[0] == '+':
return 'plus'
if item[0] == '-':
return 'minus'
if request.vars:
c = ''.join([item[2:] for (i, item) in enumerate(d) if item[0] \
== ' ' or 'line%i' % i in request.vars])
safe_open(path, 'w').write(c)
session.flash = 'files merged'
redirect(URL('edit', args=request.args))
else:
# Making the short circuit compatible with <= python2.4
gen_data = lambda index,item: not item[:1] in ['+','-'] and "" \
or INPUT(_type='checkbox',
_name='line%i' % index,
value=item[0] == '+')
diff = TABLE(*[TR(TD(gen_data(i,item)),
TD(item[0]),
TD(leading(item[2:]),
TT(item[2:].rstrip())), _class=getclass(item))
for (i, item) in enumerate(d) if item[0] != '?'])
return dict(diff=diff, filename=filename)
def edit_language():
""" Edit language file """
filename = '/'.join(request.args)
from gluon.languages import read_dict, write_dict
strings = read_dict(apath(filename, r=request))
keys = sorted(strings.keys(),lambda x,y: cmp(x.lower(), y.lower()))
rows = []
rows.append(H2(T('Original/Translation')))
for key in keys:
name = md5_hash(key)
if key==strings[key]:
_class='untranslated'
else:
_class='translated'
if len(key) <= 40:
elem = INPUT(_type='text', _name=name,value=strings[key],
_size=70,_class=_class)
else:
elem = TEXTAREA(_name=name, value=strings[key], _cols=70,
_rows=5, _class=_class)
# Making the short circuit compatible with <= python2.4
k = (strings[key] != key) and key or B(key)
rows.append(P(k, BR(), elem, TAG.BUTTON(T('delete'),
_onclick='return delkey("%s")' % name), _id=name))
rows.append(INPUT(_type='submit', _value=T('update')))
form = FORM(*rows)
if form.accepts(request.vars, keepvalues=True):
strs = dict()
for key in keys:
name = md5_hash(key)
if form.vars[name]==chr(127): continue
strs[key] = form.vars[name]
write_dict(apath(filename, r=request), strs)
session.flash = T('file saved on %(time)s', dict(time=time.ctime()))
redirect(URL(r=request,args=request.args))
return dict(app=request.args[0], filename=filename, form=form)
def about():
""" Read about info """
app = request.args[0]
# ## check if file is not there
about = safe_open(apath('%s/ABOUT' % app, r=request), 'r').read()
license = safe_open(apath('%s/LICENSE' % app, r=request), 'r').read()
return dict(app=app, about=MARKMIN(about), license=MARKMIN(license))
def design():
""" Application design handler """
app = request.args[0]
if not response.flash and app == request.application:
msg = T('ATTENTION: you cannot edit the running application!')
response.flash = msg
if request.vars.pluginfile!=None:
filename=os.path.basename(request.vars.pluginfile.filename)
if plugin_install(app, request.vars.pluginfile.file,
request, filename):
session.flash = T('new plugin installed')
redirect(URL('design',args=app))
else:
session.flash = \
T('unable to create application "%s"', request.vars.filename)
redirect(URL(r=request))
# If we have only pyc files it means that
# we cannot design
if os.path.exists(apath('%s/compiled' % app, r=request)):
session.flash = \
T('application is compiled and cannot be designed')
redirect(URL('site'))
# Get all models
models = listdir(apath('%s/models/' % app, r=request), '.*\.py$')
models=[x.replace('\\','/') for x in models]
defines = {}
for m in models:
data = safe_open(apath('%s/models/%s' % (app, m), r=request), 'r').read()
defines[m] = regex_tables.findall(data)
defines[m].sort()
# Get all controllers
controllers = sorted(listdir(apath('%s/controllers/' % app, r=request), '.*\.py$'))
controllers = [x.replace('\\','/') for x in controllers]
functions = {}
for c in controllers:
data = safe_open(apath('%s/controllers/%s' % (app, c), r=request), 'r').read()
items = regex_expose.findall(data)
functions[c] = items
# Get all views
views = sorted(listdir(apath('%s/views/' % app, r=request), '[\w/\-]+\.\w+$'))
views = [x.replace('\\','/') for x in views]
extend = {}
include = {}
for c in views:
data = safe_open(apath('%s/views/%s' % (app, c), r=request), 'r').read()
items = regex_extend.findall(data)
if items:
extend[c] = items[0][1]
items = regex_include.findall(data)
include[c] = [i[1] for i in items]
# Get all modules
modules = listdir(apath('%s/modules/' % app, r=request), '.*\.py$')
modules = modules=[x.replace('\\','/') for x in modules]
modules.sort()
# Get all static files
statics = listdir(apath('%s/static/' % app, r=request), '[^\.#].*')
statics = [x.replace('\\','/') for x in statics]
statics.sort()
# Get all languages
languages = listdir(apath('%s/languages/' % app, r=request), '[\w-]*\.py')
#Get crontab
cronfolder = apath('%s/cron' % app, r=request)
if not os.path.exists(cronfolder): os.mkdir(cronfolder)
crontab = apath('%s/cron/crontab' % app, r=request)
if not os.path.exists(crontab):
safe_open(crontab,'w').write('#crontab')
plugins=[]
def filter_plugins(items,plugins):
plugins+=[item[7:].split('/')[0].split('.')[0] for item in items if item.startswith('plugin_')]
plugins[:]=list(set(plugins))
plugins.sort()
return [item for item in items if not item.startswith('plugin_')]
return dict(app=app,
models=filter_plugins(models,plugins),
defines=defines,
controllers=filter_plugins(controllers,plugins),
functions=functions,
views=filter_plugins(views,plugins),
modules=filter_plugins(modules,plugins),
extend=extend,
include=include,
statics=filter_plugins(statics,plugins),
languages=languages,
crontab=crontab,
plugins=plugins)
def delete_plugin():
""" Object delete handler """
app=request.args(0)
plugin = request.args(1)
plugin_name='plugin_'+plugin
if 'nodelete' in request.vars:
redirect(URL('design',args=app))
elif 'delete' in request.vars:
try:
for folder in ['models','views','controllers','static','modules']:
path=os.path.join(apath(app,r=request),folder)
for item in os.listdir(path):
if item.startswith(plugin_name):
filename=os.path.join(path,item)
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.unlink(filename)
session.flash = T('plugin "%(plugin)s" deleted',
dict(plugin=plugin))
except Exception:
session.flash = T('unable to delete file plugin "%(plugin)s"',
dict(plugin=plugin))
redirect(URL('design',args=request.args(0)))
return dict(plugin=plugin)
def plugin():
""" Application design handler """
app = request.args(0)
plugin = request.args(1)
if not response.flash and app == request.application:
msg = T('ATTENTION: you cannot edit the running application!')
response.flash = msg
# If we have only pyc files it means that
# we cannot design
if os.path.exists(apath('%s/compiled' % app, r=request)):
session.flash = \
T('application is compiled and cannot be designed')
redirect(URL('site'))
# Get all models
models = listdir(apath('%s/models/' % app, r=request), '.*\.py$')
models=[x.replace('\\','/') for x in models]
defines = {}
for m in models:
data = safe_open(apath('%s/models/%s' % (app, m), r=request), 'r').read()
defines[m] = regex_tables.findall(data)
defines[m].sort()
# Get all controllers
controllers = sorted(listdir(apath('%s/controllers/' % app, r=request), '.*\.py$'))
controllers = [x.replace('\\','/') for x in controllers]
functions = {}
for c in controllers:
data = safe_open(apath('%s/controllers/%s' % (app, c), r=request), 'r').read()
items = regex_expose.findall(data)
functions[c] = items
# Get all views
views = sorted(listdir(apath('%s/views/' % app, r=request), '[\w/\-]+\.\w+$'))
views = [x.replace('\\','/') for x in views]
extend = {}
include = {}
for c in views:
data = safe_open(apath('%s/views/%s' % (app, c), r=request), 'r').read()
items = regex_extend.findall(data)
if items:
extend[c] = items[0][1]
items = regex_include.findall(data)
include[c] = [i[1] for i in items]
# Get all modules
modules = listdir(apath('%s/modules/' % app, r=request), '.*\.py$')
modules = modules=[x.replace('\\','/') for x in modules]
modules.sort()
# Get all static files
statics = listdir(apath('%s/static/' % app, r=request), '[^\.#].*')
statics = [x.replace('\\','/') for x in statics]
statics.sort()
# Get all languages
languages = listdir(apath('%s/languages/' % app, r=request), '[\w-]*\.py')
#Get crontab
crontab = apath('%s/cron/crontab' % app, r=request)
if not os.path.exists(crontab):
safe_open(crontab,'w').write('#crontab')
def filter_plugins(items):
regex=re.compile('^plugin_'+plugin+'(/.*|\..*)?$')
return [item for item in items if regex.match(item)]
return dict(app=app,
models=filter_plugins(models),
defines=defines,
controllers=filter_plugins(controllers),
functions=functions,
views=filter_plugins(views),
modules=filter_plugins(modules),
extend=extend,
include=include,
statics=filter_plugins(statics),
languages=languages,
crontab=crontab)
def create_file():
""" Create files handler """
try:
path = apath(request.vars.location, r=request)
filename = re.sub('[^\w./-]+', '_', request.vars.filename)
if path[-11:] == '/languages/':
# Handle language files
if len(filename) == 0:
raise SyntaxError
if not filename[-3:] == '.py':
filename += '.py'
app = path.split('/')[-3]
path=os.path.join(apath(app, r=request),'languages',filename)
if not os.path.exists(path):
safe_open(path,'w').write('')
findT(apath(app, r=request), filename[:-3])
session.flash = T('language file "%(filename)s" created/updated',
dict(filename=filename))
redirect(request.vars.sender)
elif path[-8:] == '/models/':
# Handle python models
if not filename[-3:] == '.py':
filename += '.py'
if len(filename) == 3:
raise SyntaxError
text = '# coding: utf8\n'
elif path[-13:] == '/controllers/':
# Handle python controllers
if not filename[-3:] == '.py':
filename += '.py'
if len(filename) == 3:
raise SyntaxError
text = '# coding: utf8\n# %s\ndef index(): return dict(message="hello from %s")'
text = text % (T('try something like'), filename)
elif path[-7:] == '/views/':
if request.vars.plugin and not filename.startswith('plugin_%s/' % request.vars.plugin):
filename = 'plugin_%s/%s' % (request.vars.plugin, filename)
# Handle template (html) views
if filename.find('.')<0:
filename += '.html'
if len(filename) == 5:
raise SyntaxError
msg = T('This is the %(filename)s template',
dict(filename=filename))
text = dedent("""
{{extend 'layout.html'}}
<h1>%s</h1>
{{=BEAUTIFY(response._vars)}}""" % msg)
elif path[-9:] == '/modules/':
if request.vars.plugin and not filename.startswith('plugin_%s/' % request.vars.plugin):
filename = 'plugin_%s/%s' % (request.vars.plugin, filename)
# Handle python module files
if not filename[-3:] == '.py':
filename += '.py'
if len(filename) == 3:
raise SyntaxError
text = dedent("""
#!/usr/bin/env python
# coding: utf8
from gluon.html import *
from gluon.http import *
from gluon.validators import *
from gluon.sqlhtml import *
# request, response, session, cache, T, db(s)
# must be passed and cannot be imported!""")
elif path[-8:] == '/static/':
if request.vars.plugin and not filename.startswith('plugin_%s/' % request.vars.plugin):
filename = 'plugin_%s/%s' % (request.vars.plugin, filename)
text = ''
else:
redirect(request.vars.sender)
full_filename = os.path.join(path, filename)
dirpath = os.path.dirname(full_filename)
if not os.path.exists(dirpath):
os.makedirs(dirpath)
if os.path.exists(full_filename):
raise SyntaxError
safe_open(full_filename, 'w').write(text)
session.flash = T('file "%(filename)s" created',
dict(filename=full_filename[len(path):]))
redirect(URL('edit',
args=[os.path.join(request.vars.location, filename)]))
except Exception, e:
if not isinstance(e,HTTP):
session.flash = T('cannot create file')
redirect(request.vars.sender)
def upload_file():
""" File uploading handler """
try:
path = apath(request.vars.location, r=request)
if request.vars.filename:
filename = re.sub('[^\w\./]+', '_', request.vars.filename)
else:
filename = os.path.split(request.vars.file.filename)[-1]
if path[-8:] == '/models/' and not filename[-3:] == '.py':
filename += '.py'
if path[-9:] == '/modules/' and not filename[-3:] == '.py':
filename += '.py'
if path[-13:] == '/controllers/' and not filename[-3:] == '.py':
filename += '.py'
if path[-7:] == '/views/' and not filename[-5:] == '.html':
filename += '.html'
if path[-11:] == '/languages/' and not filename[-3:] == '.py':
filename += '.py'
filename = os.path.join(path, filename)
dirpath = os.path.dirname(filename)
if not os.path.exists(dirpath):
os.makedirs(dirpath)
safe_open(filename, 'wb').write(request.vars.file.file.read())
session.flash = T('file "%(filename)s" uploaded',
dict(filename=filename[len(path):]))
except Exception:
session.flash = T('cannot upload file "%(filename)s"',
dict(filename[len(path):]))
redirect(request.vars.sender)
def errors():
""" Error handler """
import operator
import os
import pickle
import hashlib
app = request.args[0]
method = request.args(1) or 'new'
if method == 'new':
errors_path = apath('%s/errors' % app, r=request)
delete_hashes = []
for item in request.vars:
if item[:7] == 'delete_':
delete_hashes.append(item[7:])
hash2error = dict()
for fn in listdir(errors_path, '^\w.*'):
fullpath = os.path.join(errors_path, fn)
if not os.path.isfile(fullpath): continue
try:
error = pickle.load(open(fullpath, 'r'))
except IOError:
continue
hash = hashlib.md5(error['traceback']).hexdigest()
if hash in delete_hashes:
os.unlink(fullpath)
else:
try:
hash2error[hash]['count'] += 1
except KeyError:
error_lines = error['traceback'].split("\n")
last_line = error_lines[-2]
error_causer = os.path.split(error['layer'])[1]
hash2error[hash] = dict(count=1, pickel=error,
causer=error_causer,
last_line=last_line,
hash=hash,ticket=fn)
decorated = [(x['count'], x) for x in hash2error.values()]
decorated.sort(key=operator.itemgetter(0), reverse=True)
return dict(errors = [x[1] for x in decorated], app=app, method=method)
else:
for item in request.vars:
if item[:7] == 'delete_':
os.unlink(apath('%s/errors/%s' % (app, item[7:]), r=request))
func = lambda p: os.stat(apath('%s/errors/%s' % \
(app, p), r=request)).st_mtime
tickets = sorted(listdir(apath('%s/errors/' % app, r=request), '^\w.*'),
key=func,
reverse=True)
return dict(app=app, tickets=tickets, method=method)
def make_link(path):
""" Create a link from a path """
tryFile = path.replace('\\', '/')
if os.path.isabs(tryFile) and os.path.isfile(tryFile):
(folder, filename) = os.path.split(tryFile)
(base, ext) = os.path.splitext(filename)
app = request.args[0]
editable = {'controllers': '.py', 'models': '.py', 'views': '.html'}
for key in editable.keys():
check_extension = folder.endswith("%s/%s" % (app,key))
if ext.lower() == editable[key] and check_extension:
return A('"' + tryFile + '"',
_href=URL(r=request,
f='edit/%s/%s/%s' % (app, key, filename))).xml()
return ''
def make_links(traceback):
""" Make links using the given traceback """
lwords = traceback.split('"')
# Making the short circuit compatible with <= python2.4
result = (len(lwords) != 0) and lwords[0] or ''
i = 1
while i < len(lwords):
link = make_link(lwords[i])
if link == '':
result += '"' + lwords[i]
else:
result += link
if i + 1 < len(lwords):
result += lwords[i + 1]
i = i + 1
i = i + 1
return result
class TRACEBACK(object):
""" Generate the traceback """
def __init__(self, text):
""" TRACEBACK constructor """
self.s = make_links(CODE(text).xml())
def xml(self):
""" Returns the xml """
return self.s
def ticket():
""" Ticket handler """
if len(request.args) != 2:
session.flash = T('invalid ticket')
redirect(URL('site'))
myversion = request.env.web2py_version
app = request.args[0]
ticket = request.args[1]
e = RestrictedError()
e.load(request, app, ticket)
return dict(app=app,
ticket=ticket,
output=e.output,
traceback=(e.traceback and TRACEBACK(e.traceback)),
snapshot=e.snapshot,
code=e.code,
layer=e.layer,
myversion=myversion)
def error():
""" Generate a ticket (for testing) """
raise RuntimeError('admin ticket generator at your service')
def update_languages():
""" Update available languages """
app = request.args[0]
update_all_languages(apath(app, r=request))
session.flash = T('Language files (static strings) updated')
redirect(URL('design',args=app))
def twitter():
session.forget()
session._unlock(response)
import gluon.tools
import gluon.contrib.simplejson as sj
try:
page = gluon.tools.fetch('http://twitter.com/web2py?format=json')
return sj.loads(page)['#timeline']
except Exception, e:
return DIV(T('Unable to download because:'),BR(),str(e))
| 34.430915 | 225 | 0.547641 |
4f57b6c6afe520af1cfc0d6a61581f96384d86e1 | 25,568 | py | Python | hops/constants.py | robzor92/hops-util-py | 88540a0c2b4e366fe6d2acb0441cea9378150c01 | [
"Apache-2.0"
] | null | null | null | hops/constants.py | robzor92/hops-util-py | 88540a0c2b4e366fe6d2acb0441cea9378150c01 | [
"Apache-2.0"
] | null | null | null | hops/constants.py | robzor92/hops-util-py | 88540a0c2b4e366fe6d2acb0441cea9378150c01 | [
"Apache-2.0"
] | null | null | null | """
String Constants used in Hops-Util: Environment variables, Kafka Config, SSL Config etc.
"""
class HTTP_CONFIG:
"""
HTTP String constants
"""
HTTP_CONTENT_TYPE = "Content-type"
HTTP_APPLICATION_JSON = "application/json"
HTTP_AUTHORIZATION = "Authorization"
HTTP_POST = "POST"
HTTP_PUT = "PUT"
HTTP_GET = "GET"
HTTP_DELETE = "DELETE"
HTTP_UNAUTHORIZED = 401
class ENV_VARIABLES:
"""
Environment variable names (accessible in os.environ)
"""
KAFKA_BROKERS_ENV_VAR = "KAFKA_BROKERS"
ELASTIC_ENDPOINT_ENV_VAR = "ELASTIC_ENDPOINT"
PWD_ENV_VAR = "PWD"
KAFKA_VERSION_ENV_VAR = "KAFKA_VERSION"
LIVY_VERSION_ENV_VAR = "LIVY_VERSION"
SPARK_VERSION_ENV_VAR = "SPARK_VERSION"
REST_ENDPOINT_END_VAR = "REST_ENDPOINT"
TENSORFLOW_VERSION_ENV_VAR = "TENSORFLOW_VERSION"
CUDA_VERSION_ENV_VAR = "CUDA_VERSION"
HOPSWORKS_VERSION_ENV_VAR = "HOPSWORKS_VERSION"
HADOOP_VERSION_ENV_VAR = "HADOOP_VERSION"
HADOOP_USER_NAME_ENV_VAR = "HADOOP_USER_NAME"
HADOOP_HOME = "HADOOP_HOME"
HADOOP_CLASSPATH_GLOB = "HADOOP_CLASSPATH_GLOB"
HDFS_USER_ENV_VAR = "HDFS_USER"
HOPSWORKS_USER_ENV_VAR = "HOPSWORKS_USER"
PATH_ENV_VAR = "PATH"
PYTHONPATH_ENV_VAR = "PYTHONPATH"
JOB_NAME_ENV_VAR = "HOPSWORKS_JOB_NAME"
KERNEL_ID_ENV_VAR = "HOPSWORKS_KERNEL_ID"
HOPSWORKS_PROJECT_ID_ENV_VAR = "HOPSWORKS_PROJECT_ID"
HOPSWORKS_PROJECT_NAME_ENV_VAR = "HOPSWORKS_PROJECT_NAME"
API_KEY_ENV_VAR = "API_KEY"
REGION_NAME_ENV_VAR = "REGION_NAME"
FLINK_CONF_DIR = "FLINK_CONF_DIR"
FLINK_LIB_DIR = "FLINK_LIB_DIR"
REQUESTS_VERIFY = "REQUESTS_VERIFY"
REQUESTS_VERIFY_ENV_VAR = "REQUESTS_VERIFY"
DOMAIN_CA_TRUSTSTORE_ENV_VAR = "DOMAIN_CA_TRUSTSTORE"
DOMAIN_CA_TRUSTSTORE_PEM_ENV_VAR = "DOMAIN_CA_TRUSTSTORE_PEM"
SECRETS_DIR_ENV_VAR = "SECRETS_DIR"
SPARK_IS_DRIVER = "IS_HOPS_DRIVER"
class KAFKA_SSL_CONFIG:
"""
Kafka SSL constant strings for configuration
"""
SSL = "SSL"
SSL_TRUSTSTORE_LOCATION_CONFIG = "ssl.truststore.location"
SSL_TRUSTSTORE_LOCATION_DOC = "The location of the trust store file. "
SSL_TRUSTSTORE_PASSWORD_CONFIG = "ssl.truststore.password"
SSL_TRUSTSTORE_PASSWORD_DOC = "The password for the trust store file. If a password is not set access to the truststore is still available, but integrity checking is disabled."
SSL_KEYSTORE_LOCATION_CONFIG = "ssl.keystore.location"
SSL_KEYSTORE_PASSWORD_CONFIG = "ssl.keystore.password"
SSL_KEY_PASSWORD_CONFIG = "ssl.key.password"
SECURITY_PROTOCOL_CONFIG = "security.protocol"
SSL_CERTIFICATE_LOCATION_CONFIG = "ssl.certificate.location"
SSL_CA_LOCATION_CONFIG = "ssl.ca.location"
SSL_PRIVATE_KEY_LOCATION_CONFIG = "ssl.key.location"
SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG = "ssl.endpoint.identification.algorithm"
# General SSL config properties
class SSL_CONFIG:
"""
General SSL configuration constants for Hops-TLS
"""
KEYSTORE_SUFFIX = "__kstore.jks"
TRUSTSTORE_SUFFIX = "__tstore.jks"
PASSWORD_SUFFIX = "__cert.key"
K_CERTIFICATE_CONFIG = "k_certificate"
T_CERTIFICATE_CONFIG = "t_certificate"
PEM_CLIENT_CERTIFICATE_CONFIG = "client.pem"
PEM_CLIENT_KEY_CONFIG = "client_key.pem"
PEM_CA_CHAIN_CERTIFICATE_CONFIG = "ca_chain.pem"
DOMAIN_CA_TRUSTSTORE = "domain_ca_truststore"
CRYPTO_MATERIAL_PASSWORD = "material_passwd"
PEM_CA_ROOT_CERT = "/srv/hops/kagent/host-certs/hops_root_ca.pem"
SSL_ENABLED = "ipc.server.ssl.enabled"
class KAFKA_PRODUCER_CONFIG:
"""
Constant strings for Kafka producers
"""
BOOTSTRAP_SERVERS_CONFIG = "bootstrap.servers"
KEY_SERIALIZER_CLASS_CONFIG = "key.serializer"
VALUE_SERIALIZER_CLASS_CONFIG = "value.serializer"
class KAFKA_CONSUMER_CONFIG:
"""
Constant strings for Kafka consumers
"""
GROUP_ID_CONFIG = "group.id"
ENABLE_AUTO_COMMIT_CONFIG = "enable.auto.commit"
AUTO_COMMIT_INTERVAL_MS_CONFIG = "auto.commit.interval.ms"
SESSION_TIMEOUT_MS_CONFIG = "session.timeout.ms"
KEY_DESERIALIZER_CLASS_CONFIG = "key.deserializer"
VALUE_DESERIALIZER_CLASS_CONFIG = "value.deserializer"
AUTO_OFFSET_RESET_CONFIG = "auto.offset.reset"
ENABLE_AUTO_COMMIT_CONFIG = "enable.auto.commit"
KEY_DESERIALIZER_CLASS_CONFIG = "key.deserializer"
VALUE_DESERIALIZER_CLASS_CONFIG = "value.deserializer"
class SPARK_CONFIG:
"""
Spark string constants
"""
SPARK_SCHEMA_FIELD_METADATA = "metadata"
SPARK_SCHEMA_FIELDS = "fields"
SPARK_SCHEMA_FIELD_NAME = "name"
SPARK_SCHEMA_FIELD_TYPE = "type"
SPARK_SCHEMA_ELEMENT_TYPE = "elementType"
SPARK_OVERWRITE_MODE = "overwrite"
SPARK_APPEND_MODE = "append"
SPARK_WRITE_DELIMITER = "delimiter"
SPARK_INFER_SCHEMA = "inferSchema"
SPARK_WRITE_HEADER = "header"
SPARK_TF_CONNECTOR_RECORD_TYPE = "recordType"
SPARK_TF_CONNECTOR_RECORD_TYPE_EXAMPLE = "Example"
SPARK_LONG_TYPE = "long"
SPARK_SHORT_TYPE = "short"
SPARK_BYTE_TYPE = "byte"
SPARK_INTEGER_TYPE = "integer"
SPARK_INT_TYPE = "int"
SPARK_FLOAT_TYPE = "float"
SPARK_DOUBLE_TYPE = 'double'
SPARK_DECIMAL_TYPE = "decimal"
SPARK_BIGINT_TYPE = "bigint"
SPARK_SMALLINT_TYPE = "smallint"
SPARK_STRING_TYPE = "string"
SPARK_BINARY_TYPE = "binary"
SPARK_NUMERIC_TYPES = [SPARK_BIGINT_TYPE,
SPARK_DECIMAL_TYPE,
SPARK_INTEGER_TYPE,
SPARK_INT_TYPE,
SPARK_DOUBLE_TYPE,
SPARK_LONG_TYPE,
SPARK_FLOAT_TYPE,
SPARK_SHORT_TYPE]
SPARK_STRUCT = "struct"
SPARK_ARRAY = "array"
SPARK_ARRAY_DOUBLE = "array<double>"
SPARK_ARRAY_INTEGER = "array<integer>"
SPARK_ARRAY_INT = "array<int>"
SPARK_ARRAY_BIGINT = "array<bigint>"
SPARK_ARRAY_FLOAT = "array<float>"
SPARK_ARRAY_DECIMAL = "array<decimal>"
SPARK_ARRAY_STRING = "array<string>"
SPARK_ARRAY_LONG = "array<long>"
SPARK_ARRAY_BINARY = "array<binary>"
SPARK_VECTOR = "vector"
SPARK_SQL_CATALOG_IMPLEMENTATION = "spark.sql.catalogImplementation"
SPARK_SQL_CATALOG_HIVE = "hive"
SPARK_JDBC_FORMAT= "jdbc"
SPARK_JDBC_URL= "url"
SPARK_JDBC_DBTABLE= "dbtable"
SPARK_JDBC_USER = "user"
SPARK_JDBC_PW = "password"
class MODEL_SERVING:
MODELS_DATASET = "Models"
MODEL_SERVER_TENSORFLOW_SERVING = "TENSORFLOW_SERVING"
MODEL_SERVER_FLASK = "FLASK"
MODEL_SERVERS = [MODEL_SERVER_TENSORFLOW_SERVING, MODEL_SERVER_FLASK]
SERVING_TOOL_DEFAULT = "DEFAULT"
SERVING_TOOL_KFSERVING = "KFSERVING"
SERVING_TOOLS = [SERVING_TOOL_DEFAULT, SERVING_TOOL_KFSERVING]
SERVING_ACTION_START = "START"
SERVING_ACTION_STOP = "STOP"
SERVING_ACTIONS = [SERVING_ACTION_STOP, SERVING_ACTION_STOP]
SERVING_START_OR_STOP_PATH_PARAM = "?action="
class FEATURE_STORE:
"""
Featurestore constants
"""
TRAINING_DATASET_PROVENANCE_FEATUREGROUP = "featuregroup"
TRAINING_DATASET_PROVENANCE_VERSION = "version"
MAX_CORRELATION_MATRIX_COLUMNS = 50
TRAINING_DATASET_CSV_FORMAT = "csv"
TRAINING_DATASET_TSV_FORMAT = "tsv"
TRAINING_DATASET_PARQUET_FORMAT = "parquet"
TRAINING_DATASET_TFRECORDS_FORMAT = "tfrecords"
TRAINING_DATASET_TFRECORD_FORMAT = "tfrecord"
TRAINING_DATASET_AVRO_FORMAT = "avro"
TRAINING_DATASET_ORC_FORMAT = "orc"
TRAINING_DATASET_NPY_FORMAT = "npy"
TRAINING_DATASET_IMAGE_FORMAT = "image"
TRAINING_DATASET_HDF5_FORMAT = "hdf5"
TRAINING_DATASET_PETASTORM_FORMAT = "petastorm"
TRAINING_DATASET_NPY_SUFFIX = ".npy"
TRAINING_DATASET_HDF5_SUFFIX = ".hdf5"
TRAINING_DATASET_CSV_SUFFIX = ".csv"
TRAINING_DATASET_TSV_SUFFIX = ".tsv"
TRAINING_DATASET_PARQUET_SUFFIX = ".parquet"
TRAINING_DATASET_AVRO_SUFFIX = ".avro"
TRAINING_DATASET_ORC_SUFFIX = ".orc"
TRAINING_DATASET_IMAGE_SUFFIX = ".image"
TRAINING_DATASET_TFRECORDS_SUFFIX = ".tfrecords"
TRAINING_DATASET_PETASTORM_SUFFIX = ".petastorm"
TRAINING_DATASET_SUPPORTED_FORMATS = [
TRAINING_DATASET_TSV_FORMAT,
TRAINING_DATASET_CSV_FORMAT,
TRAINING_DATASET_PARQUET_FORMAT,
TRAINING_DATASET_TFRECORDS_FORMAT,
TRAINING_DATASET_TFRECORD_FORMAT,
TRAINING_DATASET_NPY_FORMAT,
TRAINING_DATASET_HDF5_FORMAT,
TRAINING_DATASET_AVRO_FORMAT,
TRAINING_DATASET_ORC_FORMAT,
TRAINING_DATASET_IMAGE_FORMAT,
TRAINING_DATASET_PETASTORM_FORMAT
]
FEATURE_GROUP_INSERT_APPEND_MODE = "append"
FEATURE_GROUP_INSERT_OVERWRITE_MODE = "overwrite"
FEATURESTORE_SUFFIX = "_featurestore"
TRAINING_DATASETS_SUFFIX = "_Training_Datasets"
TRAINING_DATASET_TF_RECORD_SCHEMA_FILE_NAME = "tf_record_schema.txt"
TF_RECORD_SCHEMA_FEATURE = "feature"
TF_RECORD_SCHEMA_FEATURE_FIXED = "fixed_len"
TF_RECORD_SCHEMA_FEATURE_VAR = "var_len"
TF_RECORD_SCHEMA_TYPE = "type"
TF_RECORD_SCHEMA_SHAPE = "shape"
TF_RECORD_INT_TYPE = "int"
TF_RECORD_FLOAT_TYPE = "float"
TF_RECORD_STRING_TYPE = "string"
TF_RECORD_INT_ARRAY_SPARK_TYPES = [SPARK_CONFIG.SPARK_ARRAY_INTEGER, SPARK_CONFIG.SPARK_ARRAY_BIGINT,
SPARK_CONFIG.SPARK_ARRAY_INT, SPARK_CONFIG.SPARK_ARRAY_LONG]
TF_RECORD_INT_SPARK_TYPES = [SPARK_CONFIG.SPARK_INTEGER_TYPE, SPARK_CONFIG.SPARK_BIGINT_TYPE,
SPARK_CONFIG.SPARK_INT_TYPE, SPARK_CONFIG.SPARK_LONG_TYPE]
TF_RECORD_STRING_SPARK_TYPES = [SPARK_CONFIG.SPARK_STRING_TYPE, SPARK_CONFIG.SPARK_BINARY_TYPE]
TF_RECORD_STRING_ARRAY_SPARK_TYPES = [SPARK_CONFIG.SPARK_ARRAY_STRING, SPARK_CONFIG.SPARK_ARRAY_BINARY]
TF_RECORD_FLOAT_SPARK_TYPES = [SPARK_CONFIG.SPARK_FLOAT_TYPE, SPARK_CONFIG.SPARK_DECIMAL_TYPE,
SPARK_CONFIG.SPARK_DOUBLE_TYPE]
TF_RECORD_FLOAT_ARRAY_SPARK_TYPES = [SPARK_CONFIG.SPARK_ARRAY_FLOAT, SPARK_CONFIG.SPARK_ARRAY_DECIMAL,
SPARK_CONFIG.SPARK_ARRAY_DOUBLE, SPARK_CONFIG.SPARK_VECTOR]
RECOGNIZED_TF_RECORD_TYPES = [SPARK_CONFIG.SPARK_VECTOR, SPARK_CONFIG.SPARK_ARRAY_BINARY,
SPARK_CONFIG.SPARK_ARRAY_STRING, SPARK_CONFIG.SPARK_ARRAY_DECIMAL,
SPARK_CONFIG.SPARK_ARRAY_DOUBLE, SPARK_CONFIG.SPARK_ARRAY_FLOAT,
SPARK_CONFIG.SPARK_ARRAY_LONG, SPARK_CONFIG.SPARK_ARRAY_INTEGER,
SPARK_CONFIG.SPARK_BINARY_TYPE, SPARK_CONFIG.SPARK_STRING_TYPE,
SPARK_CONFIG.SPARK_DECIMAL_TYPE, SPARK_CONFIG.SPARK_DOUBLE_TYPE,
SPARK_CONFIG.SPARK_FLOAT_TYPE, SPARK_CONFIG.SPARK_LONG_TYPE,
SPARK_CONFIG.SPARK_INT_TYPE, SPARK_CONFIG.SPARK_INTEGER_TYPE,
SPARK_CONFIG.SPARK_ARRAY_BIGINT, SPARK_CONFIG.SPARK_BIGINT_TYPE,
SPARK_CONFIG.SPARK_ARRAY_INT
]
DATAFRAME_TYPE_SPARK = "spark"
DATAFRAME_TYPE_NUMPY = "numpy"
DATAFRAME_TYPE_PYTHON = "python"
DATAFRAME_TYPE_PANDAS = "pandas"
JDBC_TRUSTSTORE_ARG = "sslTrustStore"
JDBC_TRUSTSTORE_PW_ARG = "trustStorePassword"
JDBC_KEYSTORE_ARG = "sslKeyStore"
JDBC_KEYSTORE_PW_ARG = "keyStorePassword"
IMPORT_HOPS_UTIL_FEATURESTORE_HELPER = "import io.hops.util.featurestore.FeaturestoreHelper"
class PETASTORM_CONFIG:
"""
Petastorm String constants
"""
FILESYSTEM_FACTORY = "pyarrow_filesystem"
SCHEMA = "schema"
LIBHDFS = "libhdfs"
class MYSQL_CONFIG:
""" MYSQL string constants """
MYSQL_DATA_TYPES = [
"None", "INT(11)", "TINYINT(1)", "SMALLINT(5)", "MEDIUMINT(7)", "BIGINT(20)", "FLOAT", "DOUBLE", "DECIMAL",
"DATE", "DATETIME", "TIMESTAMP", "TIME", "YEAR", "CHAR", "VARCHAR(25)", "VARCHAR(125)", "VARCHAR(225)",
"VARCHAR(500)", "VARCHAR(1000)", "VARCHAR(2000)", "VARCHAR(5000)", "VARCHAR(10000)", "BLOB", "TEXT",
"TINYBLOB", "TINYTEXT", "MEDIUMBLOB", "MEDIUMTEXT", "LONGBLOB", "LONGTEXT", "JSON"
]
MYSQL_BIGINT_TYPE = "BIGINT(20)"
MYSQL_SMALLINT_TYPE = "SMALLINT(5)"
MYSQL_CHAR_TYPE = "CHAR"
MYSQL_INTEGER_TYPE = "INT(11)"
MYSQL_VARCHAR_1000_TYPE = "VARCHAR(1000)"
MYSQL_BLOB_TYPE = "BLOB"
class HIVE_CONFIG:
"""
Hive string constants
"""
HIVE_DATA_TYPES = [
"TINYINT", "SMALLINT", "INT", "BIGINT", "FLOAT", "DOUBLE",
"DECIMAL", "TIMESTAMP", "DATE", "INTERVAL", "STRING", "VARCHAR",
"CHAR", "BOOLEAN", "BINARY", "ARRAY", "MAP", "STRUCT", "UNIONTYPE"
]
HIVE_BIGINT_TYPE = "BIGINT"
HIVE_INT_TYPE = "INT"
HIVE_CHAR_TYPE = "CHAR"
class REST_CONFIG:
"""
REST endpoints and JSON properties used for communicating with Hopsworks REST API
"""
JSON_KEYSTOREPWD = "keyStorePwd"
JSON_SCHEMA_CONTENTS = "contents"
JSON_TYPE="type"
JSON_FEATURESTORE_UPDATE_STATS_QUERY_PARAM = "updateStats"
JSON_FEATURESTORE_UPDATE_METADATA_QUERY_PARAM = "updateMetadata"
JSON_FEATURESTORE_UPDATE_JOB_QUERY_PARAM = "updateJob"
JSON_FEATURESTORE_ENABLE_ONLINE_QUERY_PARAM = "enableOnline"
JSON_FEATURESTORE_DISABLE_ONLINE_QUERY_PARAM = "disableOnline"
JSON_FEATURESTORE_UPDATE_STATISTICS_SETTINGS = "updateStatsSettings"
JSON_FEATURESTORE_SETTINGS_ENTITY_NAME_MAX_LENGTH = "featurestoreEntityNameMaxLength"
JSON_FEATURESTORE_SETTINGS_ENTITY_DESCRIPTION_MAX_LENGTH = "featurestoreEntityDescriptionMaxLength"
JSON_FEATURESTORE_SETTINGS_CACHED_FEATUREGROUP_DTO_TYPE = "cachedFeaturegroupDtoType"
JSON_FEATURESTORE_SETTINGS_EXTERNAL_TRAINING_DATASET_TYPE = "externalTrainingDatasetType"
JSON_FEATURESTORE_SETTINGS_FEATURESTORE_REGEX = "featurestoreRegex"
JSON_FEATURESTORE_SETTINGS_HOPSFS_CONNECTOR_DTO_TYPE = "hopsfsConnectorDtoType"
JSON_FEATURESTORE_SETTINGS_HOPSFS_CONNECTOR_TYPE = "hopsfsConnectorType"
JSON_FEATURESTORE_SETTINGS_HOPSFS_TRAINING_DATASET_TYPE = "hopsfsTrainingDatasetType"
JSON_FEATURESTORE_SETTINGS_JDBC_CONNECTOR_DTO_TYPE = "jdbcConnectorDtoType"
JSON_FEATURESTORE_SETTINGS_JDBC_CONNECTOR_TYPE = "jdbcConnectorType"
JSON_FEATURESTORE_SETTINGS_JDBC_CONNECTOR_ARGUMENTS_MAX_LEN = "jdbcStorageConnectorArgumentsMaxLength"
JSON_FEATURESTORE_SETTINGS_JDBC_CONNECTOR_CONNECTION_STRING_MAX_LEN = \
"jdbcStorageConnectorConnectionstringMaxLength"
JSON_FEATURESTORE_SETTINGS_ON_DEMAND_FEATUREGROUP_DTO_TYPE = "onDemandFeaturegroupDtoType"
JSON_FEATURESTORE_SETTINGS_ON_DEMAND_FEATUREGROUP_SQL_QUERY_MAX_LEN = "onDemandFeaturegroupSqlQueryMaxLength"
JSON_FEATURESTORE_SETTINGS_S3_CONNECTOR_DTO_TYPE = "s3ConnectorDtoType"
JSON_FEATURESTORE_SETTINGS_S3_CONNECTOR_TYPE = "s3ConnectorType"
JSON_FEATURESTORE_SETTINGS_S3_CONNECTOR_ACCESS_KEY_MAX_LEN = "s3StorageConnectorAccesskeyMaxLength"
JSON_FEATURESTORE_SETTINGS_S3_CONNECTOR_BUCKET_MAX_LEN = "s3StorageConnectorBucketMaxLength"
JSON_FEATURESTORE_SETTINGS_S3_CONNECTOR_SECRET_KEY_MAX_LEN = "s3StorageConnectorSecretkeyMaxLength"
JSON_FEATURESTORE_SETTINGS_STORAGE_CONNECTOR_DESCRIPTION_MAX_LEN = "storageConnectorDescriptionMaxLength"
JSON_FEATURESTORE_SETTINGS_STORAGE_CONNECTOR_NAME_MAX_LEN = "storageConnectorDescriptionMaxLength"
JSON_FEATURESTORE_SETTINGS_HIVE_SUGGESTED_FEATURE_TYPES = "suggestedHiveFeatureTypes"
JSON_FEATURESTORE_SETTINGS_MYSQL_SUGGESTED_FEATURE_TYPES = "suggestedMysqlFeatureTypes"
JSON_FEATURESTORE_SETTINGS_TRAINING_DATASET_DATA_FORMATS = "trainingDatasetDataFormats"
JSON_FEATURESTORE_SETTINGS_TRAINING_DATASET_TYPE = "trainingDatasetType"
JSON_FEATURESTORE_SETTINGS = "settings"
JSON_FEATURESTORE_STORAGE_CONNECTORS = "storageConnectors"
JSON_FEATURESTORE_SETTINGS_IMPORT_CONNECTORS = "featureImportConnectors"
JSON_FEATURESTORE_SETTINGS_ONLINE_ENABLED = "onlineFeaturestoreEnabled"
JSON_FEATURESTORE_JOB_FEATUREGROUP_ID = "featuregroupId"
JSON_FEATURESTORE_JOB_TRAINING_DATASET_ID = "trainingDatasetId"
JSON_FEATURESTORE_JOB_LAST_COMPUTED = "lastComputed"
JSON_FEATURESTORE_JOB_STATUS = "jobStatus"
JSON_FEATURESTORE_JOB_NAME = "jobName"
JSON_FEATURESTORE_JOB_ID = "jobId"
JSON_FEATURESTORE_LOCATION = "location"
JSON_FEATUREGROUP_ON_DEMAND_QUERY = "query"
JSON_FEATUREGROUP_JDBC_CONNECTOR_NAME = "jdbcConnectorName"
JSON_FEATUREGROUP_JDBC_CONNECTOR_ID = "jdbcConnectorId"
JSON_FEATUREGROUP_TYPE = "type"
JSON_FEATUREGROUP_NAME = "name"
JSON_FEATUREGROUP_ID = "id"
JSON_FEATUREGROUP_VERSION = "version"
JSON_FEATUREGROUP_JOBS = "jobs"
JSON_FEATUREGROUP_FEATURES = "features"
JSON_FEATUREGROUP_DESCRIPTION = "description"
JSON_FEATUREGROUP_CREATED = "created"
JSON_FEATUREGROUP_CREATOR = "creator"
JSON_FEATUREGROUPS = "featuregroups"
JSON_FEATUREGROUP_ONLINE = "onlineEnabled"
JSON_FEATUREGROUP_HUDI = "hudiEnabled"
JSON_FEATUREGROUP_FEATURE_HISTOGRAM_ENABLED = "featHistEnabled"
JSON_FEATUREGROUP_FEATURE_CORRELATION_ENABLED = "featCorrEnabled"
JSON_FEATUREGROUP_DESCRIPTIVE_STATISTICS_ENABLED = "descStatsEnabled"
JSON_FEATUREGROUP_STATISTIC_COLUMNS = "statisticColumns"
JSON_ONLINE_FEATUREGROUP_ID = "id"
JSON_ONLINE_FEATUREGROUP_DB = "dbName"
JSON_ONLINE_FEATUREGROUP_TABLE = "tableName"
JSON_ONLINE_FEATUREGROUP_TABLE_TYPE = "tableType"
JSON_ONLINE_FEATUREGROUP_TABLE_ROWS = "tableRows"
JSON_ONLINE_FEATUREGROUP_SIZE = "size"
JSON_FEATURESTORE = "featurestore"
JSON_FEATURESTORE_ID = "featurestoreId"
JSON_FEATURESTORE_NAME = "featurestoreName"
JSON_FEATURESTORE_PROJECT_ID = "projectId"
JSON_FEATURESTORE_PROJECT_NAME = "projectName"
JSON_FEATURESTORE_INODE_ID = "inodeId"
JSON_FEATURESTORE_DESCRIPTION = "featurestoreDescription"
JSON_FEATURESTORE_HDFS_PATH = "hdfsStorePath"
JSON_FEATURESTORE_ONLINE_CONNECTOR = "onlineFeaturestoreConnector"
JSON_FEATURESTORE_ONLINE_ENABLED = "onlineEnabled"
JSON_FEATURESTORE_ONLINE_FEATURESTORE_TYPE = "onlineFeaturestoreType"
JSON_FEATURESTORE_OFFLINE_FEATURESTORE_TYPE = "offlineFeaturestoreType"
JSON_FEATURESTORE_ONLINE_FEATURESTORE_NAME = "onlineFeaturestoreName"
JSON_FEATURESTORE_OFFLINE_FEATURESTORE_NAME = "offlineFeaturestoreName"
JSON_FEATURE_NAME = "name"
JSON_FEATURE_TYPE = "type"
JSON_FEATURE_INDEX = "index"
JSON_FEATURE_DESCRIPTION = "description"
JSON_FEATURE_PRIMARY = "primary"
JSON_FEATURE_PARTITION = "partition"
JSON_FEATURE_ONLINE_TYPE = "onlineType"
JSON_FEATURE_FEATUREGROUP = "featuregroup"
JSON_FEATURE_VERSION = "version"
JSON_TRAINING_DATASET_EXTERNAL_TYPE = "EXTERNAL_TRAINING_DATASET"
JSON_TRAINING_DATASET_HOPSFS_TYPE = "HOPSFS_TRAINING_DATASET"
JSON_TRAINING_DATASET_TYPE = "trainingDatasetType"
JSON_TRAINING_DATASET_CONNECTOR_NAME = "storageConnectorName"
JSON_TRAINING_DATASET_CONNECTOR_ID = "storageConnectorId"
JSON_TRAINING_DATASET_SIZE = "size"
JSON_TRAINING_DATASET_ID = "id"
JSON_TRAINING_DATASET_NAME = "name"
JSON_TRAINING_DATASETS = "trainingDatasets"
JSON_TRAINING_DATASET_HDFS_STORE_PATH = "hdfsStorePath"
JSON_TRAINING_DATASET_FORMAT = "dataFormat"
JSON_TRAINING_DATASET_SCHEMA = "features"
JSON_TRAINING_DATASET_VERSION = "version"
JSON_TRAINING_DATASET_CREATOR = "creator"
JSON_TRAINING_DATASET_CREATED = "created"
JSON_TRAINING_DATASET_DESCRIPTION = "description"
JSON_TRAINING_DATASET_JOBNAME = "jobName"
JSON_TRAINING_DATASET_INODE_ID = "inodeId"
JSON_TRAINING_DATASET_FEATURES = "features"
JSON_TRAINING_DATASET_JOBS = "jobs"
JSON_FEATURESTORE_HOPSFS_CONNECTOR_HOPSFS_PATH = "hopsfsPath"
JSON_FEATURESTORE_HOPSFS_CONNECTOR_DATASET_NAME = "datasetName"
JSON_FEATURESTORE_JDBC_CONNECTOR_CONNECTION_STRING = "connectionString"
JSON_FEATURESTORE_JDBC_CONNECTOR_ARGUMENTS = "arguments"
JSON_FEATURESTORE_S3_ACCESS_KEY = "accessKey"
JSON_FEATURESTORE_S3_SECRET_KEY = "secretKey"
JSON_FEATURESTORE_S3_BUCKET = "bucket"
JSON_FEATURESTORE_CONNECTOR_NAME = "name"
JSON_FEATURESTORE_CONNECTOR_DESCRIPTION = "description"
JSON_FEATURESTORE_CONNECTOR_ID = "id"
JSON_FEATURESTORE_CONNECTOR_FEATURESTORE_ID = "featurestoreId"
JSON_FEATURESTORE_CONNECTOR_TYPE = "storageConnectorType"
JSON_SCHEMA_VERSION = "version"
JSON_KEYSTORE = "keyStore"
HOPSWORKS_REST_RESOURCE = "hopsworks-api/api"
HOPSWORKS_SCHEMA_RESOURCE = "schema"
HOPSWORKS_FEATURESTORES_RESOURCE = "featurestores"
HOPSWORKS_FEATURESTORE_METADATA_RESOURCE = "metadata"
HOPSWORKS_FEATUREGROUPS_RESOURCE = "featuregroups"
HOPSWORKS_TRAININGDATASETS_RESOURCE = "trainingdatasets"
HOPSWORKS_FEATUREGROUP_CLEAR_RESOURCE = "clear"
HOPSWORKS_FEATUREGROUPS_SYNC_RESOURCE = "sync"
HOPSWORKS_SERVING_RESOURCE = "serving"
HOPSWORKS_INFERENCE_RESOURCE = "inference"
HOPSWORKS_MODELS_RESOURCE = "models"
HOPSWORKS_USERS_RESOURCE = "users"
HOPSWORKS_ADMIN_RESOURCE = "admin"
HOPSWORKS_FEATURESTORES_STORAGE_CONNECTORS_RESOURCE = "storageconnectors"
HOPSWORKS_ONLINE_FEATURESTORE_STORAGE_CONNECTOR_RESOURCE= "onlinefeaturestore"
HOPSWORKS_FEATURESTORE_TAGS_RESOURCE = "tags"
HOPSWORKS_VARIABLES_RESOURCE = "variables"
HOPSWORKS_ENDPOINT = "hopsworks_endpoint"
HOPSWORKS_EXPERIMENTS_RESOURCE = "experiments"
HOPSWORKS_KAFKA_RESOURCE = "kafka"
HOPSWORKS_TOPICS_RESOURCE = "topics"
HOPSWORKS_SUBJECTS_RESOURCE = "subjects"
HOPSWORKS_AS_SHARED = "asShared"
HOPSWORKS_SHARED = "shared"
HOPSWORKS_PROJECT_RESOURCE = "project"
HOPSWORKS_USER_RESOURCE = "users"
HOPSWORKS_PROJECT_INFO_RESOURCE = "getProjectInfo"
HOPSWORKS_JOBS_RESOURCE = "jobs"
HOPSWORKS_SECRETS_RESOURCE = "secrets"
HOPSWORKS_EXECUTIONS_RESOURCE = "executions"
HOPSWORKS_DATASETS_RESOURCE = "dataset"
HOPSWORKS_PROJECT_CREDENTIALS_RESOURCE = "credentials"
HOPSWORKS_PROJECT_CLIENT = "client"
HOPSWORKS_AUTH_RESOURCE = "auth"
HOPSWORKS_AUTH_RESOURCE_REGISTER = "register"
HOPSWORKS_XATTR_RESOURCE = "xattrs"
HOPSWORKS_ELASTIC_RESOURCE = "elastic"
HOPSWORKS_ELASTIC_JWT_RESOURCE = "jwt"
JSON_ERROR_CODE = "errorCode"
JSON_ERROR_MSG = "errorMsg"
JSON_USR_MSG = "usrMsg"
JWT_TOKEN = "token.jwt"
JSON_SERVING_STATUS = "status"
JSON_SERVING_ARTIFACT_PATH = "artifactPath"
JSON_SERVING_NAME = "name"
JSON_SERVING_CREATOR = "creator"
JSON_MODEL_SERVER = "modelServer"
JSON_SERVING_TOOL = "servingTool"
JSON_SERVING_MODEL_VERSION = "modelVersion"
JSON_SERVING_CREATED = "created"
JSON_SERVING_REQUESTED_INSTANCES = "requestedInstances"
JSON_SERVING_BATCHING_ENABLED = "batchingEnabled"
JSON_SERVING_AVAILABLE_INSTANCES = "availableInstances"
JSON_SERVING_KAFKA_TOPIC_DTO = "kafkaTopicDTO"
JSON_SERVING_ID = "id"
JSON_SERVING_CREATE_KAFKA_TOPIC = "CREATE"
JSON_SERVING_DONT_CREATE_KAFKA_TOPIC = "NONE"
JSON_KAFKA_TOPIC_SCHEMA_VERSION = "schemaVersion"
JSON_KAFKA_TOPIC_NAME = "name"
JSON_KAFKA_NUM_PARTITIONS = "numOfPartitions"
JSON_KAFKA_NUM_REPLICAS = "numOfReplicas"
HOPSWORKS_CLOUD_RESOURCE = "cloud"
HOPSWORKS_AWS_CLOUD_SESSION_TOKEN_RESOURCE = "aws/session-token"
HOPSWORKS_CLOUD_ROLE_MAPPINGS_RESOURCE = "role-mappings"
HOPSWORKS_CLOUD_SESSION_TOKEN_RESOURCE_QUERY_ROLE = "roleARN"
HOPSWORKS_CLOUD_SESSION_TOKEN_RESOURCE_QUERY_SESSION = "roleSessionName"
HOPSWORKS_CLOUD_SESSION_TOKEN_RESOURCE_QUERY_SESSION_DURATION = "durationSeconds"
JSON_ACCESS_KEY_ID = "accessKeyId"
JSON_SECRET_KEY_ID = "secretAccessKey"
JSON_SESSION_TOKEN_ID = "sessionToken"
JSON_ARRAY_ITEMS = "items"
JSON_CLOUD_ROLE = "cloudRole"
class DELIMITERS:
"""
String delimiters constants
"""
SLASH_DELIMITER = "/"
COMMA_DELIMITER = ","
TAB_DELIMITER = "\t"
COLON_DELIMITER = ":"
DOT_DELIMITER = "."
AMPERSAND_DELIMITER = "&"
SEMI_COLON_DELIMITER = ";"
JDBC_CONNECTION_STRING_VALUE_DELIMITER = "="
JDBC_CONNECTION_STRING_DELIMITER = ";"
QUESTION_MARK_DELIMITER = "?"
class S3_CONFIG:
"""
String constants for S3
"""
S3_FILE_PREFIX = "s3a://"
S3_ACCESS_KEY_ENV = "fs.s3a.access.key"
S3_SECRET_KEY_ENV = "fs.s3a.secret.key"
S3_SESSION_KEY_ENV = "fs.s3a.session.token"
S3_CREDENTIAL_PROVIDER_ENV = "fs.s3a.aws.credentials.provider"
S3_TEMPORARY_CREDENTIAL_PROVIDER = "org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider"
S3_TRAINING_DATASETS_FOLDER = "TRAINING_DATASETS"
AWS_ACCESS_KEY_ID_ENV = "AWS_ACCESS_KEY_ID"
AWS_SECRET_ACCESS_KEY_ENV = "AWS_SECRET_ACCESS_KEY"
AWS_SESSION_TOKEN_ENV = "AWS_SESSION_TOKEN"
class AWS:
DEFAULT_REGION = 'default'
SECRETS_MANAGER = "secretsmanager"
PARAMETER_STORE = "parameterstore"
class LOCAL:
LOCAL_STORE = "local"
class XATTRS:
XATTRS_PARAM_NAME = 'name'
class ELASTICSEARCH_CONFIG:
SSL_CONFIG = "es.net.ssl"
NODES_WAN_ONLY = "es.nodes.wan.only"
NODES = "es.nodes"
SSL_KEYSTORE_LOCATION = "es.net.ssl.keystore.location"
SSL_KEYSTORE_PASSWORD = "es.net.ssl.keystore.pass"
SSL_TRUSTSTORE_LOCATION = "es.net.ssl.truststore.location"
SSL_TRUSTSTORE_PASSWORD = "es.net.ssl.truststore.pass"
HTTP_AUTHORIZATION = "es.net.http.header.Authorization"
INDEX = "es.resource" | 42.827471 | 180 | 0.756375 |
4f574e63835549991e9838f0972e26775617dd0c | 722 | py | Python | Chapter_01/blog/urls.py | codingEzio/code_py_book_django2_by_example | d215d0c87a557685824286822186966b06fa8d59 | [
"Unlicense"
] | 1 | 2021-04-23T16:35:45.000Z | 2021-04-23T16:35:45.000Z | Chapter_01/blog/urls.py | codingEzio/code_py_book_django2_by_example | d215d0c87a557685824286822186966b06fa8d59 | [
"Unlicense"
] | null | null | null | Chapter_01/blog/urls.py | codingEzio/code_py_book_django2_by_example | d215d0c87a557685824286822186966b06fa8d59 | [
"Unlicense"
] | null | null | null | from django.urls import path
from . import views
from .feeds import LatestPostsFeed
# This one is ?related to the 'mysite/urls.py'
app_name = 'blog'
urlpatterns = [
# path('', views.PostListView.as_view(), name='post_list'),
path('',
views.post_list, name='post_list'),
path('tag/<slug:tag_slug>/',
views.post_list, name='post_list_by_tag'),
path('<int:year>/<int:month>/<int:day>/<slug:post>/',
views.post_detail, name='post_detail'),
path('<int:post_id>/share/',
views.post_share, name='post_share'),
path('feed/',
LatestPostsFeed(), name='post_feed'),
path('search/',
views.post_search, name='post_search'),
] | 25.785714 | 63 | 0.608033 |
4f562f16dad1110232eec9c388d72e2cfbff4791 | 2,944 | py | Python | pluginsmanager/observer/observer_manager.py | SpotlightKid/PluginsManager | 2dcc9f6a79b48e9c9be82efffd855352fa15c5c7 | [
"Apache-2.0"
] | null | null | null | pluginsmanager/observer/observer_manager.py | SpotlightKid/PluginsManager | 2dcc9f6a79b48e9c9be82efffd855352fa15c5c7 | [
"Apache-2.0"
] | null | null | null | pluginsmanager/observer/observer_manager.py | SpotlightKid/PluginsManager | 2dcc9f6a79b48e9c9be82efffd855352fa15c5c7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 SrMouraSilva
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pluginsmanager.observer.scope import ManagerScopes
from pluginsmanager.observer.updates_observer import UpdatesObserver
class ObserverManager(UpdatesObserver):
def __init__(self):
super(ObserverManager, self).__init__()
self.observers = []
self._scope = ManagerScopes()
def enter_scope(self, observer):
"""
Open a observer scope.
Informs that changes occurs by the ``observer`` and isn't necessary
informs the changes for observer
:param UpdatesObserver observer: Observer that causes changes
"""
self._scope.enter(observer)
def exit_scope(self):
"""
Closes the last observer scope added
"""
self._scope.exit()
@property
def scope(self):
return self._scope.current.identifier
def append(self, observer):
self.observers.append(observer)
def on_bank_updated(self, bank, update_type, index, origin, **kwargs):
for observer in self.observers:
if observer != self.scope:
observer.on_bank_updated(bank, update_type, index=index, origin=origin, **kwargs)
def on_pedalboard_updated(self, pedalboard, update_type, index, origin, **kwargs):
for observer in self.observers:
if observer != self.scope:
observer.on_pedalboard_updated(pedalboard, update_type, index=index, origin=origin, **kwargs)
def on_effect_updated(self, effect, update_type, index, origin, **kwargs):
for observer in self.observers:
if observer != self.scope:
observer.on_effect_updated(effect, update_type, index=index, origin=origin, **kwargs)
def on_effect_status_toggled(self, effect, **kwargs):
for observer in self.observers:
if observer != self.scope:
observer.on_effect_status_toggled(effect, **kwargs)
def on_param_value_changed(self, param, **kwargs):
for observer in self.observers:
if observer != self.scope:
observer.on_param_value_changed(param, **kwargs)
def on_connection_updated(self, connection, update_type, pedalboard, **kwargs):
for observer in self.observers:
if observer != self.scope:
observer.on_connection_updated(connection, update_type, pedalboard=pedalboard, **kwargs)
| 37.74359 | 109 | 0.681726 |
4f5636e1426f4df691556f298b32c80559e91a72 | 136,266 | py | Python | Lib/test/test_unicode.py | Pixmew/cpython | aa01011003bb855cd52abfd49f2443446590d913 | [
"0BSD"
] | 5 | 2019-04-28T05:24:54.000Z | 2021-05-08T02:04:27.000Z | Lib/test/test_unicode.py | Pixmew/cpython | aa01011003bb855cd52abfd49f2443446590d913 | [
"0BSD"
] | 14 | 2020-12-01T05:38:50.000Z | 2022-03-01T05:00:49.000Z | Lib/test/test_unicode.py | Pixmew/cpython | aa01011003bb855cd52abfd49f2443446590d913 | [
"0BSD"
] | 1 | 2020-10-30T14:25:00.000Z | 2020-10-30T14:25:00.000Z | """ Test script for the Unicode implementation.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import _string
import codecs
import itertools
import operator
import struct
import sys
import textwrap
import unicodedata
import unittest
import warnings
from test.support import import_helper
from test.support import warnings_helper
from test import support, string_tests
from test.support.script_helper import assert_python_failure
# Error handling (bad decoder return)
def search_function(encoding):
def decode1(input, errors="strict"):
return 42 # not a tuple
def encode1(input, errors="strict"):
return 42 # not a tuple
def encode2(input, errors="strict"):
return (42, 42) # no unicode
def decode2(input, errors="strict"):
return (42, 42) # no unicode
if encoding=="test.unicode1":
return (encode1, decode1, None, None)
elif encoding=="test.unicode2":
return (encode2, decode2, None, None)
else:
return None
def duplicate_string(text):
"""
Try to get a fresh clone of the specified text:
new object with a reference count of 1.
This is a best-effort: latin1 single letters and the empty
string ('') are singletons and cannot be cloned.
"""
return text.encode().decode()
class StrSubclass(str):
pass
class UnicodeTest(string_tests.CommonTest,
string_tests.MixinStrUnicodeUserStringTest,
string_tests.MixinStrUnicodeTest,
unittest.TestCase):
type2test = str
def setUp(self):
codecs.register(search_function)
self.addCleanup(codecs.unregister, search_function)
def checkequalnofix(self, result, object, methodname, *args):
method = getattr(object, methodname)
realresult = method(*args)
self.assertEqual(realresult, result)
self.assertTrue(type(realresult) is type(result))
# if the original is returned make sure that
# this doesn't happen with subclasses
if realresult is object:
class usub(str):
def __repr__(self):
return 'usub(%r)' % str.__repr__(self)
object = usub(object)
method = getattr(object, methodname)
realresult = method(*args)
self.assertEqual(realresult, result)
self.assertTrue(object is not realresult)
def test_literals(self):
self.assertEqual('\xff', '\u00ff')
self.assertEqual('\uffff', '\U0000ffff')
self.assertRaises(SyntaxError, eval, '\'\\Ufffffffe\'')
self.assertRaises(SyntaxError, eval, '\'\\Uffffffff\'')
self.assertRaises(SyntaxError, eval, '\'\\U%08x\'' % 0x110000)
# raw strings should not have unicode escapes
self.assertNotEqual(r"\u0020", " ")
def test_ascii(self):
if not sys.platform.startswith('java'):
# Test basic sanity of repr()
self.assertEqual(ascii('abc'), "'abc'")
self.assertEqual(ascii('ab\\c'), "'ab\\\\c'")
self.assertEqual(ascii('ab\\'), "'ab\\\\'")
self.assertEqual(ascii('\\c'), "'\\\\c'")
self.assertEqual(ascii('\\'), "'\\\\'")
self.assertEqual(ascii('\n'), "'\\n'")
self.assertEqual(ascii('\r'), "'\\r'")
self.assertEqual(ascii('\t'), "'\\t'")
self.assertEqual(ascii('\b'), "'\\x08'")
self.assertEqual(ascii("'\""), """'\\'"'""")
self.assertEqual(ascii("'\""), """'\\'"'""")
self.assertEqual(ascii("'"), '''"'"''')
self.assertEqual(ascii('"'), """'"'""")
latin1repr = (
"'\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\t\\n\\x0b\\x0c\\r"
"\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a"
"\\x1b\\x1c\\x1d\\x1e\\x1f !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHI"
"JKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\\x7f"
"\\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\\x88\\x89\\x8a\\x8b\\x8c\\x8d"
"\\x8e\\x8f\\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\\x98\\x99\\x9a\\x9b"
"\\x9c\\x9d\\x9e\\x9f\\xa0\\xa1\\xa2\\xa3\\xa4\\xa5\\xa6\\xa7\\xa8\\xa9"
"\\xaa\\xab\\xac\\xad\\xae\\xaf\\xb0\\xb1\\xb2\\xb3\\xb4\\xb5\\xb6\\xb7"
"\\xb8\\xb9\\xba\\xbb\\xbc\\xbd\\xbe\\xbf\\xc0\\xc1\\xc2\\xc3\\xc4\\xc5"
"\\xc6\\xc7\\xc8\\xc9\\xca\\xcb\\xcc\\xcd\\xce\\xcf\\xd0\\xd1\\xd2\\xd3"
"\\xd4\\xd5\\xd6\\xd7\\xd8\\xd9\\xda\\xdb\\xdc\\xdd\\xde\\xdf\\xe0\\xe1"
"\\xe2\\xe3\\xe4\\xe5\\xe6\\xe7\\xe8\\xe9\\xea\\xeb\\xec\\xed\\xee\\xef"
"\\xf0\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf7\\xf8\\xf9\\xfa\\xfb\\xfc\\xfd"
"\\xfe\\xff'")
testrepr = ascii(''.join(map(chr, range(256))))
self.assertEqual(testrepr, latin1repr)
# Test ascii works on wide unicode escapes without overflow.
self.assertEqual(ascii("\U00010000" * 39 + "\uffff" * 4096),
ascii("\U00010000" * 39 + "\uffff" * 4096))
class WrongRepr:
def __repr__(self):
return b'byte-repr'
self.assertRaises(TypeError, ascii, WrongRepr())
def test_repr(self):
if not sys.platform.startswith('java'):
# Test basic sanity of repr()
self.assertEqual(repr('abc'), "'abc'")
self.assertEqual(repr('ab\\c'), "'ab\\\\c'")
self.assertEqual(repr('ab\\'), "'ab\\\\'")
self.assertEqual(repr('\\c'), "'\\\\c'")
self.assertEqual(repr('\\'), "'\\\\'")
self.assertEqual(repr('\n'), "'\\n'")
self.assertEqual(repr('\r'), "'\\r'")
self.assertEqual(repr('\t'), "'\\t'")
self.assertEqual(repr('\b'), "'\\x08'")
self.assertEqual(repr("'\""), """'\\'"'""")
self.assertEqual(repr("'\""), """'\\'"'""")
self.assertEqual(repr("'"), '''"'"''')
self.assertEqual(repr('"'), """'"'""")
latin1repr = (
"'\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\t\\n\\x0b\\x0c\\r"
"\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a"
"\\x1b\\x1c\\x1d\\x1e\\x1f !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHI"
"JKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\\x7f"
"\\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\\x88\\x89\\x8a\\x8b\\x8c\\x8d"
"\\x8e\\x8f\\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\\x98\\x99\\x9a\\x9b"
"\\x9c\\x9d\\x9e\\x9f\\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9"
"\xaa\xab\xac\\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
"\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5"
"\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3"
"\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1"
"\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef"
"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd"
"\xfe\xff'")
testrepr = repr(''.join(map(chr, range(256))))
self.assertEqual(testrepr, latin1repr)
# Test repr works on wide unicode escapes without overflow.
self.assertEqual(repr("\U00010000" * 39 + "\uffff" * 4096),
repr("\U00010000" * 39 + "\uffff" * 4096))
class WrongRepr:
def __repr__(self):
return b'byte-repr'
self.assertRaises(TypeError, repr, WrongRepr())
def test_iterators(self):
# Make sure unicode objects have an __iter__ method
it = "\u1111\u2222\u3333".__iter__()
self.assertEqual(next(it), "\u1111")
self.assertEqual(next(it), "\u2222")
self.assertEqual(next(it), "\u3333")
self.assertRaises(StopIteration, next, it)
def test_count(self):
string_tests.CommonTest.test_count(self)
# check mixed argument types
self.checkequalnofix(3, 'aaa', 'count', 'a')
self.checkequalnofix(0, 'aaa', 'count', 'b')
self.checkequalnofix(3, 'aaa', 'count', 'a')
self.checkequalnofix(0, 'aaa', 'count', 'b')
self.checkequalnofix(0, 'aaa', 'count', 'b')
self.checkequalnofix(1, 'aaa', 'count', 'a', -1)
self.checkequalnofix(3, 'aaa', 'count', 'a', -10)
self.checkequalnofix(2, 'aaa', 'count', 'a', 0, -1)
self.checkequalnofix(0, 'aaa', 'count', 'a', 0, -10)
# test mixed kinds
self.checkequal(10, '\u0102' + 'a' * 10, 'count', 'a')
self.checkequal(10, '\U00100304' + 'a' * 10, 'count', 'a')
self.checkequal(10, '\U00100304' + '\u0102' * 10, 'count', '\u0102')
self.checkequal(0, 'a' * 10, 'count', '\u0102')
self.checkequal(0, 'a' * 10, 'count', '\U00100304')
self.checkequal(0, '\u0102' * 10, 'count', '\U00100304')
self.checkequal(10, '\u0102' + 'a_' * 10, 'count', 'a_')
self.checkequal(10, '\U00100304' + 'a_' * 10, 'count', 'a_')
self.checkequal(10, '\U00100304' + '\u0102_' * 10, 'count', '\u0102_')
self.checkequal(0, 'a' * 10, 'count', 'a\u0102')
self.checkequal(0, 'a' * 10, 'count', 'a\U00100304')
self.checkequal(0, '\u0102' * 10, 'count', '\u0102\U00100304')
def test_find(self):
string_tests.CommonTest.test_find(self)
# test implementation details of the memchr fast path
self.checkequal(100, 'a' * 100 + '\u0102', 'find', '\u0102')
self.checkequal(-1, 'a' * 100 + '\u0102', 'find', '\u0201')
self.checkequal(-1, 'a' * 100 + '\u0102', 'find', '\u0120')
self.checkequal(-1, 'a' * 100 + '\u0102', 'find', '\u0220')
self.checkequal(100, 'a' * 100 + '\U00100304', 'find', '\U00100304')
self.checkequal(-1, 'a' * 100 + '\U00100304', 'find', '\U00100204')
self.checkequal(-1, 'a' * 100 + '\U00100304', 'find', '\U00102004')
# check mixed argument types
self.checkequalnofix(0, 'abcdefghiabc', 'find', 'abc')
self.checkequalnofix(9, 'abcdefghiabc', 'find', 'abc', 1)
self.checkequalnofix(-1, 'abcdefghiabc', 'find', 'def', 4)
self.assertRaises(TypeError, 'hello'.find)
self.assertRaises(TypeError, 'hello'.find, 42)
# test mixed kinds
self.checkequal(100, '\u0102' * 100 + 'a', 'find', 'a')
self.checkequal(100, '\U00100304' * 100 + 'a', 'find', 'a')
self.checkequal(100, '\U00100304' * 100 + '\u0102', 'find', '\u0102')
self.checkequal(-1, 'a' * 100, 'find', '\u0102')
self.checkequal(-1, 'a' * 100, 'find', '\U00100304')
self.checkequal(-1, '\u0102' * 100, 'find', '\U00100304')
self.checkequal(100, '\u0102' * 100 + 'a_', 'find', 'a_')
self.checkequal(100, '\U00100304' * 100 + 'a_', 'find', 'a_')
self.checkequal(100, '\U00100304' * 100 + '\u0102_', 'find', '\u0102_')
self.checkequal(-1, 'a' * 100, 'find', 'a\u0102')
self.checkequal(-1, 'a' * 100, 'find', 'a\U00100304')
self.checkequal(-1, '\u0102' * 100, 'find', '\u0102\U00100304')
def test_rfind(self):
string_tests.CommonTest.test_rfind(self)
# test implementation details of the memrchr fast path
self.checkequal(0, '\u0102' + 'a' * 100 , 'rfind', '\u0102')
self.checkequal(-1, '\u0102' + 'a' * 100 , 'rfind', '\u0201')
self.checkequal(-1, '\u0102' + 'a' * 100 , 'rfind', '\u0120')
self.checkequal(-1, '\u0102' + 'a' * 100 , 'rfind', '\u0220')
self.checkequal(0, '\U00100304' + 'a' * 100, 'rfind', '\U00100304')
self.checkequal(-1, '\U00100304' + 'a' * 100, 'rfind', '\U00100204')
self.checkequal(-1, '\U00100304' + 'a' * 100, 'rfind', '\U00102004')
# check mixed argument types
self.checkequalnofix(9, 'abcdefghiabc', 'rfind', 'abc')
self.checkequalnofix(12, 'abcdefghiabc', 'rfind', '')
self.checkequalnofix(12, 'abcdefghiabc', 'rfind', '')
# test mixed kinds
self.checkequal(0, 'a' + '\u0102' * 100, 'rfind', 'a')
self.checkequal(0, 'a' + '\U00100304' * 100, 'rfind', 'a')
self.checkequal(0, '\u0102' + '\U00100304' * 100, 'rfind', '\u0102')
self.checkequal(-1, 'a' * 100, 'rfind', '\u0102')
self.checkequal(-1, 'a' * 100, 'rfind', '\U00100304')
self.checkequal(-1, '\u0102' * 100, 'rfind', '\U00100304')
self.checkequal(0, '_a' + '\u0102' * 100, 'rfind', '_a')
self.checkequal(0, '_a' + '\U00100304' * 100, 'rfind', '_a')
self.checkequal(0, '_\u0102' + '\U00100304' * 100, 'rfind', '_\u0102')
self.checkequal(-1, 'a' * 100, 'rfind', '\u0102a')
self.checkequal(-1, 'a' * 100, 'rfind', '\U00100304a')
self.checkequal(-1, '\u0102' * 100, 'rfind', '\U00100304\u0102')
def test_index(self):
string_tests.CommonTest.test_index(self)
self.checkequalnofix(0, 'abcdefghiabc', 'index', '')
self.checkequalnofix(3, 'abcdefghiabc', 'index', 'def')
self.checkequalnofix(0, 'abcdefghiabc', 'index', 'abc')
self.checkequalnofix(9, 'abcdefghiabc', 'index', 'abc', 1)
self.assertRaises(ValueError, 'abcdefghiabc'.index, 'hib')
self.assertRaises(ValueError, 'abcdefghiab'.index, 'abc', 1)
self.assertRaises(ValueError, 'abcdefghi'.index, 'ghi', 8)
self.assertRaises(ValueError, 'abcdefghi'.index, 'ghi', -1)
# test mixed kinds
self.checkequal(100, '\u0102' * 100 + 'a', 'index', 'a')
self.checkequal(100, '\U00100304' * 100 + 'a', 'index', 'a')
self.checkequal(100, '\U00100304' * 100 + '\u0102', 'index', '\u0102')
self.assertRaises(ValueError, ('a' * 100).index, '\u0102')
self.assertRaises(ValueError, ('a' * 100).index, '\U00100304')
self.assertRaises(ValueError, ('\u0102' * 100).index, '\U00100304')
self.checkequal(100, '\u0102' * 100 + 'a_', 'index', 'a_')
self.checkequal(100, '\U00100304' * 100 + 'a_', 'index', 'a_')
self.checkequal(100, '\U00100304' * 100 + '\u0102_', 'index', '\u0102_')
self.assertRaises(ValueError, ('a' * 100).index, 'a\u0102')
self.assertRaises(ValueError, ('a' * 100).index, 'a\U00100304')
self.assertRaises(ValueError, ('\u0102' * 100).index, '\u0102\U00100304')
def test_rindex(self):
string_tests.CommonTest.test_rindex(self)
self.checkequalnofix(12, 'abcdefghiabc', 'rindex', '')
self.checkequalnofix(3, 'abcdefghiabc', 'rindex', 'def')
self.checkequalnofix(9, 'abcdefghiabc', 'rindex', 'abc')
self.checkequalnofix(0, 'abcdefghiabc', 'rindex', 'abc', 0, -1)
self.assertRaises(ValueError, 'abcdefghiabc'.rindex, 'hib')
self.assertRaises(ValueError, 'defghiabc'.rindex, 'def', 1)
self.assertRaises(ValueError, 'defghiabc'.rindex, 'abc', 0, -1)
self.assertRaises(ValueError, 'abcdefghi'.rindex, 'ghi', 0, 8)
self.assertRaises(ValueError, 'abcdefghi'.rindex, 'ghi', 0, -1)
# test mixed kinds
self.checkequal(0, 'a' + '\u0102' * 100, 'rindex', 'a')
self.checkequal(0, 'a' + '\U00100304' * 100, 'rindex', 'a')
self.checkequal(0, '\u0102' + '\U00100304' * 100, 'rindex', '\u0102')
self.assertRaises(ValueError, ('a' * 100).rindex, '\u0102')
self.assertRaises(ValueError, ('a' * 100).rindex, '\U00100304')
self.assertRaises(ValueError, ('\u0102' * 100).rindex, '\U00100304')
self.checkequal(0, '_a' + '\u0102' * 100, 'rindex', '_a')
self.checkequal(0, '_a' + '\U00100304' * 100, 'rindex', '_a')
self.checkequal(0, '_\u0102' + '\U00100304' * 100, 'rindex', '_\u0102')
self.assertRaises(ValueError, ('a' * 100).rindex, '\u0102a')
self.assertRaises(ValueError, ('a' * 100).rindex, '\U00100304a')
self.assertRaises(ValueError, ('\u0102' * 100).rindex, '\U00100304\u0102')
def test_maketrans_translate(self):
# these work with plain translate()
self.checkequalnofix('bbbc', 'abababc', 'translate',
{ord('a'): None})
self.checkequalnofix('iiic', 'abababc', 'translate',
{ord('a'): None, ord('b'): ord('i')})
self.checkequalnofix('iiix', 'abababc', 'translate',
{ord('a'): None, ord('b'): ord('i'), ord('c'): 'x'})
self.checkequalnofix('c', 'abababc', 'translate',
{ord('a'): None, ord('b'): ''})
self.checkequalnofix('xyyx', 'xzx', 'translate',
{ord('z'): 'yy'})
# this needs maketrans()
self.checkequalnofix('abababc', 'abababc', 'translate',
{'b': '<i>'})
tbl = self.type2test.maketrans({'a': None, 'b': '<i>'})
self.checkequalnofix('<i><i><i>c', 'abababc', 'translate', tbl)
# test alternative way of calling maketrans()
tbl = self.type2test.maketrans('abc', 'xyz', 'd')
self.checkequalnofix('xyzzy', 'abdcdcbdddd', 'translate', tbl)
# various tests switching from ASCII to latin1 or the opposite;
# same length, remove a letter, or replace with a longer string.
self.assertEqual("[a]".translate(str.maketrans('a', 'X')),
"[X]")
self.assertEqual("[a]".translate(str.maketrans({'a': 'X'})),
"[X]")
self.assertEqual("[a]".translate(str.maketrans({'a': None})),
"[]")
self.assertEqual("[a]".translate(str.maketrans({'a': 'XXX'})),
"[XXX]")
self.assertEqual("[a]".translate(str.maketrans({'a': '\xe9'})),
"[\xe9]")
self.assertEqual('axb'.translate(str.maketrans({'a': None, 'b': '123'})),
"x123")
self.assertEqual('axb'.translate(str.maketrans({'a': None, 'b': '\xe9'})),
"x\xe9")
# test non-ASCII (don't take the fast-path)
self.assertEqual("[a]".translate(str.maketrans({'a': '<\xe9>'})),
"[<\xe9>]")
self.assertEqual("[\xe9]".translate(str.maketrans({'\xe9': 'a'})),
"[a]")
self.assertEqual("[\xe9]".translate(str.maketrans({'\xe9': None})),
"[]")
self.assertEqual("[\xe9]".translate(str.maketrans({'\xe9': '123'})),
"[123]")
self.assertEqual("[a\xe9]".translate(str.maketrans({'a': '<\u20ac>'})),
"[<\u20ac>\xe9]")
# invalid Unicode characters
invalid_char = 0x10ffff+1
for before in "a\xe9\u20ac\U0010ffff":
mapping = str.maketrans({before: invalid_char})
text = "[%s]" % before
self.assertRaises(ValueError, text.translate, mapping)
# errors
self.assertRaises(TypeError, self.type2test.maketrans)
self.assertRaises(ValueError, self.type2test.maketrans, 'abc', 'defg')
self.assertRaises(TypeError, self.type2test.maketrans, 2, 'def')
self.assertRaises(TypeError, self.type2test.maketrans, 'abc', 2)
self.assertRaises(TypeError, self.type2test.maketrans, 'abc', 'def', 2)
self.assertRaises(ValueError, self.type2test.maketrans, {'xy': 2})
self.assertRaises(TypeError, self.type2test.maketrans, {(1,): 2})
self.assertRaises(TypeError, 'hello'.translate)
self.assertRaises(TypeError, 'abababc'.translate, 'abc', 'xyz')
def test_split(self):
string_tests.CommonTest.test_split(self)
# test mixed kinds
for left, right in ('ba', '\u0101\u0100', '\U00010301\U00010300'):
left *= 9
right *= 9
for delim in ('c', '\u0102', '\U00010302'):
self.checkequal([left + right],
left + right, 'split', delim)
self.checkequal([left, right],
left + delim + right, 'split', delim)
self.checkequal([left + right],
left + right, 'split', delim * 2)
self.checkequal([left, right],
left + delim * 2 + right, 'split', delim *2)
def test_rsplit(self):
string_tests.CommonTest.test_rsplit(self)
# test mixed kinds
for left, right in ('ba', '\u0101\u0100', '\U00010301\U00010300'):
left *= 9
right *= 9
for delim in ('c', '\u0102', '\U00010302'):
self.checkequal([left + right],
left + right, 'rsplit', delim)
self.checkequal([left, right],
left + delim + right, 'rsplit', delim)
self.checkequal([left + right],
left + right, 'rsplit', delim * 2)
self.checkequal([left, right],
left + delim * 2 + right, 'rsplit', delim *2)
def test_partition(self):
string_tests.MixinStrUnicodeUserStringTest.test_partition(self)
# test mixed kinds
self.checkequal(('ABCDEFGH', '', ''), 'ABCDEFGH', 'partition', '\u4200')
for left, right in ('ba', '\u0101\u0100', '\U00010301\U00010300'):
left *= 9
right *= 9
for delim in ('c', '\u0102', '\U00010302'):
self.checkequal((left + right, '', ''),
left + right, 'partition', delim)
self.checkequal((left, delim, right),
left + delim + right, 'partition', delim)
self.checkequal((left + right, '', ''),
left + right, 'partition', delim * 2)
self.checkequal((left, delim * 2, right),
left + delim * 2 + right, 'partition', delim * 2)
def test_rpartition(self):
string_tests.MixinStrUnicodeUserStringTest.test_rpartition(self)
# test mixed kinds
self.checkequal(('', '', 'ABCDEFGH'), 'ABCDEFGH', 'rpartition', '\u4200')
for left, right in ('ba', '\u0101\u0100', '\U00010301\U00010300'):
left *= 9
right *= 9
for delim in ('c', '\u0102', '\U00010302'):
self.checkequal(('', '', left + right),
left + right, 'rpartition', delim)
self.checkequal((left, delim, right),
left + delim + right, 'rpartition', delim)
self.checkequal(('', '', left + right),
left + right, 'rpartition', delim * 2)
self.checkequal((left, delim * 2, right),
left + delim * 2 + right, 'rpartition', delim * 2)
def test_join(self):
string_tests.MixinStrUnicodeUserStringTest.test_join(self)
class MyWrapper:
def __init__(self, sval): self.sval = sval
def __str__(self): return self.sval
# mixed arguments
self.checkequalnofix('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
self.checkequalnofix('abcd', '', 'join', ('a', 'b', 'c', 'd'))
self.checkequalnofix('w x y z', ' ', 'join', string_tests.Sequence('wxyz'))
self.checkequalnofix('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
self.checkequalnofix('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
self.checkequalnofix('abcd', '', 'join', ('a', 'b', 'c', 'd'))
self.checkequalnofix('w x y z', ' ', 'join', string_tests.Sequence('wxyz'))
self.checkraises(TypeError, ' ', 'join', ['1', '2', MyWrapper('foo')])
self.checkraises(TypeError, ' ', 'join', ['1', '2', '3', bytes()])
self.checkraises(TypeError, ' ', 'join', [1, 2, 3])
self.checkraises(TypeError, ' ', 'join', ['1', '2', 3])
@unittest.skipIf(sys.maxsize > 2**32,
'needs too much memory on a 64-bit platform')
def test_join_overflow(self):
size = int(sys.maxsize**0.5) + 1
seq = ('A' * size,) * size
self.assertRaises(OverflowError, ''.join, seq)
def test_replace(self):
string_tests.CommonTest.test_replace(self)
# method call forwarded from str implementation because of unicode argument
self.checkequalnofix('one@two!three!', 'one!two!three!', 'replace', '!', '@', 1)
self.assertRaises(TypeError, 'replace'.replace, "r", 42)
# test mixed kinds
for left, right in ('ba', '\u0101\u0100', '\U00010301\U00010300'):
left *= 9
right *= 9
for delim in ('c', '\u0102', '\U00010302'):
for repl in ('d', '\u0103', '\U00010303'):
self.checkequal(left + right,
left + right, 'replace', delim, repl)
self.checkequal(left + repl + right,
left + delim + right,
'replace', delim, repl)
self.checkequal(left + right,
left + right, 'replace', delim * 2, repl)
self.checkequal(left + repl + right,
left + delim * 2 + right,
'replace', delim * 2, repl)
@support.cpython_only
def test_replace_id(self):
pattern = 'abc'
text = 'abc def'
self.assertIs(text.replace(pattern, pattern), text)
def test_bytes_comparison(self):
with warnings_helper.check_warnings():
warnings.simplefilter('ignore', BytesWarning)
self.assertEqual('abc' == b'abc', False)
self.assertEqual('abc' != b'abc', True)
self.assertEqual('abc' == bytearray(b'abc'), False)
self.assertEqual('abc' != bytearray(b'abc'), True)
def test_comparison(self):
# Comparisons:
self.assertEqual('abc', 'abc')
self.assertTrue('abcd' > 'abc')
self.assertTrue('abc' < 'abcd')
if 0:
# Move these tests to a Unicode collation module test...
# Testing UTF-16 code point order comparisons...
# No surrogates, no fixup required.
self.assertTrue('\u0061' < '\u20ac')
# Non surrogate below surrogate value, no fixup required
self.assertTrue('\u0061' < '\ud800\udc02')
# Non surrogate above surrogate value, fixup required
def test_lecmp(s, s2):
self.assertTrue(s < s2)
def test_fixup(s):
s2 = '\ud800\udc01'
test_lecmp(s, s2)
s2 = '\ud900\udc01'
test_lecmp(s, s2)
s2 = '\uda00\udc01'
test_lecmp(s, s2)
s2 = '\udb00\udc01'
test_lecmp(s, s2)
s2 = '\ud800\udd01'
test_lecmp(s, s2)
s2 = '\ud900\udd01'
test_lecmp(s, s2)
s2 = '\uda00\udd01'
test_lecmp(s, s2)
s2 = '\udb00\udd01'
test_lecmp(s, s2)
s2 = '\ud800\ude01'
test_lecmp(s, s2)
s2 = '\ud900\ude01'
test_lecmp(s, s2)
s2 = '\uda00\ude01'
test_lecmp(s, s2)
s2 = '\udb00\ude01'
test_lecmp(s, s2)
s2 = '\ud800\udfff'
test_lecmp(s, s2)
s2 = '\ud900\udfff'
test_lecmp(s, s2)
s2 = '\uda00\udfff'
test_lecmp(s, s2)
s2 = '\udb00\udfff'
test_lecmp(s, s2)
test_fixup('\ue000')
test_fixup('\uff61')
# Surrogates on both sides, no fixup required
self.assertTrue('\ud800\udc02' < '\ud84d\udc56')
def test_islower(self):
super().test_islower()
self.checkequalnofix(False, '\u1FFc', 'islower')
self.assertFalse('\u2167'.islower())
self.assertTrue('\u2177'.islower())
# non-BMP, uppercase
self.assertFalse('\U00010401'.islower())
self.assertFalse('\U00010427'.islower())
# non-BMP, lowercase
self.assertTrue('\U00010429'.islower())
self.assertTrue('\U0001044E'.islower())
# non-BMP, non-cased
self.assertFalse('\U0001F40D'.islower())
self.assertFalse('\U0001F46F'.islower())
def test_isupper(self):
super().test_isupper()
if not sys.platform.startswith('java'):
self.checkequalnofix(False, '\u1FFc', 'isupper')
self.assertTrue('\u2167'.isupper())
self.assertFalse('\u2177'.isupper())
# non-BMP, uppercase
self.assertTrue('\U00010401'.isupper())
self.assertTrue('\U00010427'.isupper())
# non-BMP, lowercase
self.assertFalse('\U00010429'.isupper())
self.assertFalse('\U0001044E'.isupper())
# non-BMP, non-cased
self.assertFalse('\U0001F40D'.isupper())
self.assertFalse('\U0001F46F'.isupper())
def test_istitle(self):
super().test_istitle()
self.checkequalnofix(True, '\u1FFc', 'istitle')
self.checkequalnofix(True, 'Greek \u1FFcitlecases ...', 'istitle')
# non-BMP, uppercase + lowercase
self.assertTrue('\U00010401\U00010429'.istitle())
self.assertTrue('\U00010427\U0001044E'.istitle())
# apparently there are no titlecased (Lt) non-BMP chars in Unicode 6
for ch in ['\U00010429', '\U0001044E', '\U0001F40D', '\U0001F46F']:
self.assertFalse(ch.istitle(), '{!a} is not title'.format(ch))
def test_isspace(self):
super().test_isspace()
self.checkequalnofix(True, '\u2000', 'isspace')
self.checkequalnofix(True, '\u200a', 'isspace')
self.checkequalnofix(False, '\u2014', 'isspace')
# There are no non-BMP whitespace chars as of Unicode 12.
for ch in ['\U00010401', '\U00010427', '\U00010429', '\U0001044E',
'\U0001F40D', '\U0001F46F']:
self.assertFalse(ch.isspace(), '{!a} is not space.'.format(ch))
@support.requires_resource('cpu')
def test_isspace_invariant(self):
for codepoint in range(sys.maxunicode + 1):
char = chr(codepoint)
bidirectional = unicodedata.bidirectional(char)
category = unicodedata.category(char)
self.assertEqual(char.isspace(),
(bidirectional in ('WS', 'B', 'S')
or category == 'Zs'))
def test_isalnum(self):
super().test_isalnum()
for ch in ['\U00010401', '\U00010427', '\U00010429', '\U0001044E',
'\U0001D7F6', '\U00011066', '\U000104A0', '\U0001F107']:
self.assertTrue(ch.isalnum(), '{!a} is alnum.'.format(ch))
def test_isalpha(self):
super().test_isalpha()
self.checkequalnofix(True, '\u1FFc', 'isalpha')
# non-BMP, cased
self.assertTrue('\U00010401'.isalpha())
self.assertTrue('\U00010427'.isalpha())
self.assertTrue('\U00010429'.isalpha())
self.assertTrue('\U0001044E'.isalpha())
# non-BMP, non-cased
self.assertFalse('\U0001F40D'.isalpha())
self.assertFalse('\U0001F46F'.isalpha())
def test_isascii(self):
super().test_isascii()
self.assertFalse("\u20ac".isascii())
self.assertFalse("\U0010ffff".isascii())
def test_isdecimal(self):
self.checkequalnofix(False, '', 'isdecimal')
self.checkequalnofix(False, 'a', 'isdecimal')
self.checkequalnofix(True, '0', 'isdecimal')
self.checkequalnofix(False, '\u2460', 'isdecimal') # CIRCLED DIGIT ONE
self.checkequalnofix(False, '\xbc', 'isdecimal') # VULGAR FRACTION ONE QUARTER
self.checkequalnofix(True, '\u0660', 'isdecimal') # ARABIC-INDIC DIGIT ZERO
self.checkequalnofix(True, '0123456789', 'isdecimal')
self.checkequalnofix(False, '0123456789a', 'isdecimal')
self.checkraises(TypeError, 'abc', 'isdecimal', 42)
for ch in ['\U00010401', '\U00010427', '\U00010429', '\U0001044E',
'\U0001F40D', '\U0001F46F', '\U00011065', '\U0001F107']:
self.assertFalse(ch.isdecimal(), '{!a} is not decimal.'.format(ch))
for ch in ['\U0001D7F6', '\U00011066', '\U000104A0']:
self.assertTrue(ch.isdecimal(), '{!a} is decimal.'.format(ch))
def test_isdigit(self):
super().test_isdigit()
self.checkequalnofix(True, '\u2460', 'isdigit')
self.checkequalnofix(False, '\xbc', 'isdigit')
self.checkequalnofix(True, '\u0660', 'isdigit')
for ch in ['\U00010401', '\U00010427', '\U00010429', '\U0001044E',
'\U0001F40D', '\U0001F46F', '\U00011065']:
self.assertFalse(ch.isdigit(), '{!a} is not a digit.'.format(ch))
for ch in ['\U0001D7F6', '\U00011066', '\U000104A0', '\U0001F107']:
self.assertTrue(ch.isdigit(), '{!a} is a digit.'.format(ch))
def test_isnumeric(self):
self.checkequalnofix(False, '', 'isnumeric')
self.checkequalnofix(False, 'a', 'isnumeric')
self.checkequalnofix(True, '0', 'isnumeric')
self.checkequalnofix(True, '\u2460', 'isnumeric')
self.checkequalnofix(True, '\xbc', 'isnumeric')
self.checkequalnofix(True, '\u0660', 'isnumeric')
self.checkequalnofix(True, '0123456789', 'isnumeric')
self.checkequalnofix(False, '0123456789a', 'isnumeric')
self.assertRaises(TypeError, "abc".isnumeric, 42)
for ch in ['\U00010401', '\U00010427', '\U00010429', '\U0001044E',
'\U0001F40D', '\U0001F46F']:
self.assertFalse(ch.isnumeric(), '{!a} is not numeric.'.format(ch))
for ch in ['\U00011065', '\U0001D7F6', '\U00011066',
'\U000104A0', '\U0001F107']:
self.assertTrue(ch.isnumeric(), '{!a} is numeric.'.format(ch))
def test_isidentifier(self):
self.assertTrue("a".isidentifier())
self.assertTrue("Z".isidentifier())
self.assertTrue("_".isidentifier())
self.assertTrue("b0".isidentifier())
self.assertTrue("bc".isidentifier())
self.assertTrue("b_".isidentifier())
self.assertTrue("µ".isidentifier())
self.assertTrue("𝔘𝔫𝔦𝔠𝔬𝔡𝔢".isidentifier())
self.assertFalse(" ".isidentifier())
self.assertFalse("[".isidentifier())
self.assertFalse("©".isidentifier())
self.assertFalse("0".isidentifier())
@support.cpython_only
@support.requires_legacy_unicode_capi
def test_isidentifier_legacy(self):
import _testcapi
u = '𝖀𝖓𝖎𝖈𝖔𝖉𝖊'
self.assertTrue(u.isidentifier())
with warnings_helper.check_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
self.assertTrue(_testcapi.unicode_legacy_string(u).isidentifier())
def test_isprintable(self):
self.assertTrue("".isprintable())
self.assertTrue(" ".isprintable())
self.assertTrue("abcdefg".isprintable())
self.assertFalse("abcdefg\n".isprintable())
# some defined Unicode character
self.assertTrue("\u0374".isprintable())
# undefined character
self.assertFalse("\u0378".isprintable())
# single surrogate character
self.assertFalse("\ud800".isprintable())
self.assertTrue('\U0001F46F'.isprintable())
self.assertFalse('\U000E0020'.isprintable())
def test_surrogates(self):
for s in ('a\uD800b\uDFFF', 'a\uDFFFb\uD800',
'a\uD800b\uDFFFa', 'a\uDFFFb\uD800a'):
self.assertTrue(s.islower())
self.assertFalse(s.isupper())
self.assertFalse(s.istitle())
for s in ('A\uD800B\uDFFF', 'A\uDFFFB\uD800',
'A\uD800B\uDFFFA', 'A\uDFFFB\uD800A'):
self.assertFalse(s.islower())
self.assertTrue(s.isupper())
self.assertTrue(s.istitle())
for meth_name in ('islower', 'isupper', 'istitle'):
meth = getattr(str, meth_name)
for s in ('\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF'):
self.assertFalse(meth(s), '%a.%s() is False' % (s, meth_name))
for meth_name in ('isalpha', 'isalnum', 'isdigit', 'isspace',
'isdecimal', 'isnumeric',
'isidentifier', 'isprintable'):
meth = getattr(str, meth_name)
for s in ('\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF',
'a\uD800b\uDFFF', 'a\uDFFFb\uD800',
'a\uD800b\uDFFFa', 'a\uDFFFb\uD800a'):
self.assertFalse(meth(s), '%a.%s() is False' % (s, meth_name))
def test_lower(self):
string_tests.CommonTest.test_lower(self)
self.assertEqual('\U00010427'.lower(), '\U0001044F')
self.assertEqual('\U00010427\U00010427'.lower(),
'\U0001044F\U0001044F')
self.assertEqual('\U00010427\U0001044F'.lower(),
'\U0001044F\U0001044F')
self.assertEqual('X\U00010427x\U0001044F'.lower(),
'x\U0001044Fx\U0001044F')
self.assertEqual('fi'.lower(), 'fi')
self.assertEqual('\u0130'.lower(), '\u0069\u0307')
# Special case for GREEK CAPITAL LETTER SIGMA U+03A3
self.assertEqual('\u03a3'.lower(), '\u03c3')
self.assertEqual('\u0345\u03a3'.lower(), '\u0345\u03c3')
self.assertEqual('A\u0345\u03a3'.lower(), 'a\u0345\u03c2')
self.assertEqual('A\u0345\u03a3a'.lower(), 'a\u0345\u03c3a')
self.assertEqual('A\u0345\u03a3'.lower(), 'a\u0345\u03c2')
self.assertEqual('A\u03a3\u0345'.lower(), 'a\u03c2\u0345')
self.assertEqual('\u03a3\u0345 '.lower(), '\u03c3\u0345 ')
self.assertEqual('\U0008fffe'.lower(), '\U0008fffe')
self.assertEqual('\u2177'.lower(), '\u2177')
def test_casefold(self):
self.assertEqual('hello'.casefold(), 'hello')
self.assertEqual('hELlo'.casefold(), 'hello')
self.assertEqual('ß'.casefold(), 'ss')
self.assertEqual('fi'.casefold(), 'fi')
self.assertEqual('\u03a3'.casefold(), '\u03c3')
self.assertEqual('A\u0345\u03a3'.casefold(), 'a\u03b9\u03c3')
self.assertEqual('\u00b5'.casefold(), '\u03bc')
def test_upper(self):
string_tests.CommonTest.test_upper(self)
self.assertEqual('\U0001044F'.upper(), '\U00010427')
self.assertEqual('\U0001044F\U0001044F'.upper(),
'\U00010427\U00010427')
self.assertEqual('\U00010427\U0001044F'.upper(),
'\U00010427\U00010427')
self.assertEqual('X\U00010427x\U0001044F'.upper(),
'X\U00010427X\U00010427')
self.assertEqual('fi'.upper(), 'FI')
self.assertEqual('\u0130'.upper(), '\u0130')
self.assertEqual('\u03a3'.upper(), '\u03a3')
self.assertEqual('ß'.upper(), 'SS')
self.assertEqual('\u1fd2'.upper(), '\u0399\u0308\u0300')
self.assertEqual('\U0008fffe'.upper(), '\U0008fffe')
self.assertEqual('\u2177'.upper(), '\u2167')
def test_capitalize(self):
string_tests.CommonTest.test_capitalize(self)
self.assertEqual('\U0001044F'.capitalize(), '\U00010427')
self.assertEqual('\U0001044F\U0001044F'.capitalize(),
'\U00010427\U0001044F')
self.assertEqual('\U00010427\U0001044F'.capitalize(),
'\U00010427\U0001044F')
self.assertEqual('\U0001044F\U00010427'.capitalize(),
'\U00010427\U0001044F')
self.assertEqual('X\U00010427x\U0001044F'.capitalize(),
'X\U0001044Fx\U0001044F')
self.assertEqual('h\u0130'.capitalize(), 'H\u0069\u0307')
exp = '\u0399\u0308\u0300\u0069\u0307'
self.assertEqual('\u1fd2\u0130'.capitalize(), exp)
self.assertEqual('finnish'.capitalize(), 'Finnish')
self.assertEqual('A\u0345\u03a3'.capitalize(), 'A\u0345\u03c2')
def test_title(self):
super().test_title()
self.assertEqual('\U0001044F'.title(), '\U00010427')
self.assertEqual('\U0001044F\U0001044F'.title(),
'\U00010427\U0001044F')
self.assertEqual('\U0001044F\U0001044F \U0001044F\U0001044F'.title(),
'\U00010427\U0001044F \U00010427\U0001044F')
self.assertEqual('\U00010427\U0001044F \U00010427\U0001044F'.title(),
'\U00010427\U0001044F \U00010427\U0001044F')
self.assertEqual('\U0001044F\U00010427 \U0001044F\U00010427'.title(),
'\U00010427\U0001044F \U00010427\U0001044F')
self.assertEqual('X\U00010427x\U0001044F X\U00010427x\U0001044F'.title(),
'X\U0001044Fx\U0001044F X\U0001044Fx\U0001044F')
self.assertEqual('fiNNISH'.title(), 'Finnish')
self.assertEqual('A\u03a3 \u1fa1xy'.title(), 'A\u03c2 \u1fa9xy')
self.assertEqual('A\u03a3A'.title(), 'A\u03c3a')
def test_swapcase(self):
string_tests.CommonTest.test_swapcase(self)
self.assertEqual('\U0001044F'.swapcase(), '\U00010427')
self.assertEqual('\U00010427'.swapcase(), '\U0001044F')
self.assertEqual('\U0001044F\U0001044F'.swapcase(),
'\U00010427\U00010427')
self.assertEqual('\U00010427\U0001044F'.swapcase(),
'\U0001044F\U00010427')
self.assertEqual('\U0001044F\U00010427'.swapcase(),
'\U00010427\U0001044F')
self.assertEqual('X\U00010427x\U0001044F'.swapcase(),
'x\U0001044FX\U00010427')
self.assertEqual('fi'.swapcase(), 'FI')
self.assertEqual('\u0130'.swapcase(), '\u0069\u0307')
# Special case for GREEK CAPITAL LETTER SIGMA U+03A3
self.assertEqual('\u03a3'.swapcase(), '\u03c3')
self.assertEqual('\u0345\u03a3'.swapcase(), '\u0399\u03c3')
self.assertEqual('A\u0345\u03a3'.swapcase(), 'a\u0399\u03c2')
self.assertEqual('A\u0345\u03a3a'.swapcase(), 'a\u0399\u03c3A')
self.assertEqual('A\u0345\u03a3'.swapcase(), 'a\u0399\u03c2')
self.assertEqual('A\u03a3\u0345'.swapcase(), 'a\u03c2\u0399')
self.assertEqual('\u03a3\u0345 '.swapcase(), '\u03c3\u0399 ')
self.assertEqual('\u03a3'.swapcase(), '\u03c3')
self.assertEqual('ß'.swapcase(), 'SS')
self.assertEqual('\u1fd2'.swapcase(), '\u0399\u0308\u0300')
def test_center(self):
string_tests.CommonTest.test_center(self)
self.assertEqual('x'.center(2, '\U0010FFFF'),
'x\U0010FFFF')
self.assertEqual('x'.center(3, '\U0010FFFF'),
'\U0010FFFFx\U0010FFFF')
self.assertEqual('x'.center(4, '\U0010FFFF'),
'\U0010FFFFx\U0010FFFF\U0010FFFF')
@unittest.skipUnless(sys.maxsize == 2**31 - 1, "requires 32-bit system")
@support.cpython_only
def test_case_operation_overflow(self):
# Issue #22643
size = 2**32//12 + 1
try:
s = "ü" * size
except MemoryError:
self.skipTest('no enough memory (%.0f MiB required)' % (size / 2**20))
try:
self.assertRaises(OverflowError, s.upper)
finally:
del s
def test_contains(self):
# Testing Unicode contains method
self.assertIn('a', 'abdb')
self.assertIn('a', 'bdab')
self.assertIn('a', 'bdaba')
self.assertIn('a', 'bdba')
self.assertNotIn('a', 'bdb')
self.assertIn('a', 'bdba')
self.assertIn('a', ('a',1,None))
self.assertIn('a', (1,None,'a'))
self.assertIn('a', ('a',1,None))
self.assertIn('a', (1,None,'a'))
self.assertNotIn('a', ('x',1,'y'))
self.assertNotIn('a', ('x',1,None))
self.assertNotIn('abcd', 'abcxxxx')
self.assertIn('ab', 'abcd')
self.assertIn('ab', 'abc')
self.assertIn('ab', (1,None,'ab'))
self.assertIn('', 'abc')
self.assertIn('', '')
self.assertIn('', 'abc')
self.assertNotIn('\0', 'abc')
self.assertIn('\0', '\0abc')
self.assertIn('\0', 'abc\0')
self.assertIn('a', '\0abc')
self.assertIn('asdf', 'asdf')
self.assertNotIn('asdf', 'asd')
self.assertNotIn('asdf', '')
self.assertRaises(TypeError, "abc".__contains__)
# test mixed kinds
for fill in ('a', '\u0100', '\U00010300'):
fill *= 9
for delim in ('c', '\u0102', '\U00010302'):
self.assertNotIn(delim, fill)
self.assertIn(delim, fill + delim)
self.assertNotIn(delim * 2, fill)
self.assertIn(delim * 2, fill + delim * 2)
def test_issue18183(self):
'\U00010000\U00100000'.lower()
'\U00010000\U00100000'.casefold()
'\U00010000\U00100000'.upper()
'\U00010000\U00100000'.capitalize()
'\U00010000\U00100000'.title()
'\U00010000\U00100000'.swapcase()
'\U00100000'.center(3, '\U00010000')
'\U00100000'.ljust(3, '\U00010000')
'\U00100000'.rjust(3, '\U00010000')
def test_format(self):
self.assertEqual(''.format(), '')
self.assertEqual('a'.format(), 'a')
self.assertEqual('ab'.format(), 'ab')
self.assertEqual('a{{'.format(), 'a{')
self.assertEqual('a}}'.format(), 'a}')
self.assertEqual('{{b'.format(), '{b')
self.assertEqual('}}b'.format(), '}b')
self.assertEqual('a{{b'.format(), 'a{b')
# examples from the PEP:
import datetime
self.assertEqual("My name is {0}".format('Fred'), "My name is Fred")
self.assertEqual("My name is {0[name]}".format(dict(name='Fred')),
"My name is Fred")
self.assertEqual("My name is {0} :-{{}}".format('Fred'),
"My name is Fred :-{}")
d = datetime.date(2007, 8, 18)
self.assertEqual("The year is {0.year}".format(d),
"The year is 2007")
# classes we'll use for testing
class C:
def __init__(self, x=100):
self._x = x
def __format__(self, spec):
return spec
class D:
def __init__(self, x):
self.x = x
def __format__(self, spec):
return str(self.x)
# class with __str__, but no __format__
class E:
def __init__(self, x):
self.x = x
def __str__(self):
return 'E(' + self.x + ')'
# class with __repr__, but no __format__ or __str__
class F:
def __init__(self, x):
self.x = x
def __repr__(self):
return 'F(' + self.x + ')'
# class with __format__ that forwards to string, for some format_spec's
class G:
def __init__(self, x):
self.x = x
def __str__(self):
return "string is " + self.x
def __format__(self, format_spec):
if format_spec == 'd':
return 'G(' + self.x + ')'
return object.__format__(self, format_spec)
class I(datetime.date):
def __format__(self, format_spec):
return self.strftime(format_spec)
class J(int):
def __format__(self, format_spec):
return int.__format__(self * 2, format_spec)
class M:
def __init__(self, x):
self.x = x
def __repr__(self):
return 'M(' + self.x + ')'
__str__ = None
class N:
def __init__(self, x):
self.x = x
def __repr__(self):
return 'N(' + self.x + ')'
__format__ = None
self.assertEqual(''.format(), '')
self.assertEqual('abc'.format(), 'abc')
self.assertEqual('{0}'.format('abc'), 'abc')
self.assertEqual('{0:}'.format('abc'), 'abc')
# self.assertEqual('{ 0 }'.format('abc'), 'abc')
self.assertEqual('X{0}'.format('abc'), 'Xabc')
self.assertEqual('{0}X'.format('abc'), 'abcX')
self.assertEqual('X{0}Y'.format('abc'), 'XabcY')
self.assertEqual('{1}'.format(1, 'abc'), 'abc')
self.assertEqual('X{1}'.format(1, 'abc'), 'Xabc')
self.assertEqual('{1}X'.format(1, 'abc'), 'abcX')
self.assertEqual('X{1}Y'.format(1, 'abc'), 'XabcY')
self.assertEqual('{0}'.format(-15), '-15')
self.assertEqual('{0}{1}'.format(-15, 'abc'), '-15abc')
self.assertEqual('{0}X{1}'.format(-15, 'abc'), '-15Xabc')
self.assertEqual('{{'.format(), '{')
self.assertEqual('}}'.format(), '}')
self.assertEqual('{{}}'.format(), '{}')
self.assertEqual('{{x}}'.format(), '{x}')
self.assertEqual('{{{0}}}'.format(123), '{123}')
self.assertEqual('{{{{0}}}}'.format(), '{{0}}')
self.assertEqual('}}{{'.format(), '}{')
self.assertEqual('}}x{{'.format(), '}x{')
# weird field names
self.assertEqual("{0[foo-bar]}".format({'foo-bar':'baz'}), 'baz')
self.assertEqual("{0[foo bar]}".format({'foo bar':'baz'}), 'baz')
self.assertEqual("{0[ ]}".format({' ':3}), '3')
self.assertEqual('{foo._x}'.format(foo=C(20)), '20')
self.assertEqual('{1}{0}'.format(D(10), D(20)), '2010')
self.assertEqual('{0._x.x}'.format(C(D('abc'))), 'abc')
self.assertEqual('{0[0]}'.format(['abc', 'def']), 'abc')
self.assertEqual('{0[1]}'.format(['abc', 'def']), 'def')
self.assertEqual('{0[1][0]}'.format(['abc', ['def']]), 'def')
self.assertEqual('{0[1][0].x}'.format(['abc', [D('def')]]), 'def')
# strings
self.assertEqual('{0:.3s}'.format('abc'), 'abc')
self.assertEqual('{0:.3s}'.format('ab'), 'ab')
self.assertEqual('{0:.3s}'.format('abcdef'), 'abc')
self.assertEqual('{0:.0s}'.format('abcdef'), '')
self.assertEqual('{0:3.3s}'.format('abc'), 'abc')
self.assertEqual('{0:2.3s}'.format('abc'), 'abc')
self.assertEqual('{0:2.2s}'.format('abc'), 'ab')
self.assertEqual('{0:3.2s}'.format('abc'), 'ab ')
self.assertEqual('{0:x<0s}'.format('result'), 'result')
self.assertEqual('{0:x<5s}'.format('result'), 'result')
self.assertEqual('{0:x<6s}'.format('result'), 'result')
self.assertEqual('{0:x<7s}'.format('result'), 'resultx')
self.assertEqual('{0:x<8s}'.format('result'), 'resultxx')
self.assertEqual('{0: <7s}'.format('result'), 'result ')
self.assertEqual('{0:<7s}'.format('result'), 'result ')
self.assertEqual('{0:>7s}'.format('result'), ' result')
self.assertEqual('{0:>8s}'.format('result'), ' result')
self.assertEqual('{0:^8s}'.format('result'), ' result ')
self.assertEqual('{0:^9s}'.format('result'), ' result ')
self.assertEqual('{0:^10s}'.format('result'), ' result ')
self.assertEqual('{0:10000}'.format('a'), 'a' + ' ' * 9999)
self.assertEqual('{0:10000}'.format(''), ' ' * 10000)
self.assertEqual('{0:10000000}'.format(''), ' ' * 10000000)
# issue 12546: use \x00 as a fill character
self.assertEqual('{0:\x00<6s}'.format('foo'), 'foo\x00\x00\x00')
self.assertEqual('{0:\x01<6s}'.format('foo'), 'foo\x01\x01\x01')
self.assertEqual('{0:\x00^6s}'.format('foo'), '\x00foo\x00\x00')
self.assertEqual('{0:^6s}'.format('foo'), ' foo ')
self.assertEqual('{0:\x00<6}'.format(3), '3\x00\x00\x00\x00\x00')
self.assertEqual('{0:\x01<6}'.format(3), '3\x01\x01\x01\x01\x01')
self.assertEqual('{0:\x00^6}'.format(3), '\x00\x003\x00\x00\x00')
self.assertEqual('{0:<6}'.format(3), '3 ')
self.assertEqual('{0:\x00<6}'.format(3.14), '3.14\x00\x00')
self.assertEqual('{0:\x01<6}'.format(3.14), '3.14\x01\x01')
self.assertEqual('{0:\x00^6}'.format(3.14), '\x003.14\x00')
self.assertEqual('{0:^6}'.format(3.14), ' 3.14 ')
self.assertEqual('{0:\x00<12}'.format(3+2.0j), '(3+2j)\x00\x00\x00\x00\x00\x00')
self.assertEqual('{0:\x01<12}'.format(3+2.0j), '(3+2j)\x01\x01\x01\x01\x01\x01')
self.assertEqual('{0:\x00^12}'.format(3+2.0j), '\x00\x00\x00(3+2j)\x00\x00\x00')
self.assertEqual('{0:^12}'.format(3+2.0j), ' (3+2j) ')
# format specifiers for user defined type
self.assertEqual('{0:abc}'.format(C()), 'abc')
# !r, !s and !a coercions
self.assertEqual('{0!s}'.format('Hello'), 'Hello')
self.assertEqual('{0!s:}'.format('Hello'), 'Hello')
self.assertEqual('{0!s:15}'.format('Hello'), 'Hello ')
self.assertEqual('{0!s:15s}'.format('Hello'), 'Hello ')
self.assertEqual('{0!r}'.format('Hello'), "'Hello'")
self.assertEqual('{0!r:}'.format('Hello'), "'Hello'")
self.assertEqual('{0!r}'.format(F('Hello')), 'F(Hello)')
self.assertEqual('{0!r}'.format('\u0378'), "'\\u0378'") # nonprintable
self.assertEqual('{0!r}'.format('\u0374'), "'\u0374'") # printable
self.assertEqual('{0!r}'.format(F('\u0374')), 'F(\u0374)')
self.assertEqual('{0!a}'.format('Hello'), "'Hello'")
self.assertEqual('{0!a}'.format('\u0378'), "'\\u0378'") # nonprintable
self.assertEqual('{0!a}'.format('\u0374'), "'\\u0374'") # printable
self.assertEqual('{0!a:}'.format('Hello'), "'Hello'")
self.assertEqual('{0!a}'.format(F('Hello')), 'F(Hello)')
self.assertEqual('{0!a}'.format(F('\u0374')), 'F(\\u0374)')
# test fallback to object.__format__
self.assertEqual('{0}'.format({}), '{}')
self.assertEqual('{0}'.format([]), '[]')
self.assertEqual('{0}'.format([1]), '[1]')
self.assertEqual('{0:d}'.format(G('data')), 'G(data)')
self.assertEqual('{0!s}'.format(G('data')), 'string is data')
self.assertRaises(TypeError, '{0:^10}'.format, E('data'))
self.assertRaises(TypeError, '{0:^10s}'.format, E('data'))
self.assertRaises(TypeError, '{0:>15s}'.format, G('data'))
self.assertEqual("{0:date: %Y-%m-%d}".format(I(year=2007,
month=8,
day=27)),
"date: 2007-08-27")
# test deriving from a builtin type and overriding __format__
self.assertEqual("{0}".format(J(10)), "20")
# string format specifiers
self.assertEqual('{0:}'.format('a'), 'a')
# computed format specifiers
self.assertEqual("{0:.{1}}".format('hello world', 5), 'hello')
self.assertEqual("{0:.{1}s}".format('hello world', 5), 'hello')
self.assertEqual("{0:.{precision}s}".format('hello world', precision=5), 'hello')
self.assertEqual("{0:{width}.{precision}s}".format('hello world', width=10, precision=5), 'hello ')
self.assertEqual("{0:{width}.{precision}s}".format('hello world', width='10', precision='5'), 'hello ')
# test various errors
self.assertRaises(ValueError, '{'.format)
self.assertRaises(ValueError, '}'.format)
self.assertRaises(ValueError, 'a{'.format)
self.assertRaises(ValueError, 'a}'.format)
self.assertRaises(ValueError, '{a'.format)
self.assertRaises(ValueError, '}a'.format)
self.assertRaises(IndexError, '{0}'.format)
self.assertRaises(IndexError, '{1}'.format, 'abc')
self.assertRaises(KeyError, '{x}'.format)
self.assertRaises(ValueError, "}{".format)
self.assertRaises(ValueError, "abc{0:{}".format)
self.assertRaises(ValueError, "{0".format)
self.assertRaises(IndexError, "{0.}".format)
self.assertRaises(ValueError, "{0.}".format, 0)
self.assertRaises(ValueError, "{0[}".format)
self.assertRaises(ValueError, "{0[}".format, [])
self.assertRaises(KeyError, "{0]}".format)
self.assertRaises(ValueError, "{0.[]}".format, 0)
self.assertRaises(ValueError, "{0..foo}".format, 0)
self.assertRaises(ValueError, "{0[0}".format, 0)
self.assertRaises(ValueError, "{0[0:foo}".format, 0)
self.assertRaises(KeyError, "{c]}".format)
self.assertRaises(ValueError, "{{ {{{0}}".format, 0)
self.assertRaises(ValueError, "{0}}".format, 0)
self.assertRaises(KeyError, "{foo}".format, bar=3)
self.assertRaises(ValueError, "{0!x}".format, 3)
self.assertRaises(ValueError, "{0!}".format, 0)
self.assertRaises(ValueError, "{0!rs}".format, 0)
self.assertRaises(ValueError, "{!}".format)
self.assertRaises(IndexError, "{:}".format)
self.assertRaises(IndexError, "{:s}".format)
self.assertRaises(IndexError, "{}".format)
big = "23098475029384702983476098230754973209482573"
self.assertRaises(ValueError, ("{" + big + "}").format)
self.assertRaises(ValueError, ("{[" + big + "]}").format, [0])
# issue 6089
self.assertRaises(ValueError, "{0[0]x}".format, [None])
self.assertRaises(ValueError, "{0[0](10)}".format, [None])
# can't have a replacement on the field name portion
self.assertRaises(TypeError, '{0[{1}]}'.format, 'abcdefg', 4)
# exceed maximum recursion depth
self.assertRaises(ValueError, "{0:{1:{2}}}".format, 'abc', 's', '')
self.assertRaises(ValueError, "{0:{1:{2:{3:{4:{5:{6}}}}}}}".format,
0, 1, 2, 3, 4, 5, 6, 7)
# string format spec errors
self.assertRaises(ValueError, "{0:-s}".format, '')
self.assertRaises(ValueError, format, "", "-")
self.assertRaises(ValueError, "{0:=s}".format, '')
# Alternate formatting is not supported
self.assertRaises(ValueError, format, '', '#')
self.assertRaises(ValueError, format, '', '#20')
# Non-ASCII
self.assertEqual("{0:s}{1:s}".format("ABC", "\u0410\u0411\u0412"),
'ABC\u0410\u0411\u0412')
self.assertEqual("{0:.3s}".format("ABC\u0410\u0411\u0412"),
'ABC')
self.assertEqual("{0:.0s}".format("ABC\u0410\u0411\u0412"),
'')
self.assertEqual("{[{}]}".format({"{}": 5}), "5")
self.assertEqual("{[{}]}".format({"{}" : "a"}), "a")
self.assertEqual("{[{]}".format({"{" : "a"}), "a")
self.assertEqual("{[}]}".format({"}" : "a"}), "a")
self.assertEqual("{[[]}".format({"[" : "a"}), "a")
self.assertEqual("{[!]}".format({"!" : "a"}), "a")
self.assertRaises(ValueError, "{a{}b}".format, 42)
self.assertRaises(ValueError, "{a{b}".format, 42)
self.assertRaises(ValueError, "{[}".format, 42)
self.assertEqual("0x{:0{:d}X}".format(0x0,16), "0x0000000000000000")
# Blocking fallback
m = M('data')
self.assertEqual("{!r}".format(m), 'M(data)')
self.assertRaises(TypeError, "{!s}".format, m)
self.assertRaises(TypeError, "{}".format, m)
n = N('data')
self.assertEqual("{!r}".format(n), 'N(data)')
self.assertEqual("{!s}".format(n), 'N(data)')
self.assertRaises(TypeError, "{}".format, n)
def test_format_map(self):
self.assertEqual(''.format_map({}), '')
self.assertEqual('a'.format_map({}), 'a')
self.assertEqual('ab'.format_map({}), 'ab')
self.assertEqual('a{{'.format_map({}), 'a{')
self.assertEqual('a}}'.format_map({}), 'a}')
self.assertEqual('{{b'.format_map({}), '{b')
self.assertEqual('}}b'.format_map({}), '}b')
self.assertEqual('a{{b'.format_map({}), 'a{b')
# using mappings
class Mapping(dict):
def __missing__(self, key):
return key
self.assertEqual('{hello}'.format_map(Mapping()), 'hello')
self.assertEqual('{a} {world}'.format_map(Mapping(a='hello')), 'hello world')
class InternalMapping:
def __init__(self):
self.mapping = {'a': 'hello'}
def __getitem__(self, key):
return self.mapping[key]
self.assertEqual('{a}'.format_map(InternalMapping()), 'hello')
class C:
def __init__(self, x=100):
self._x = x
def __format__(self, spec):
return spec
self.assertEqual('{foo._x}'.format_map({'foo': C(20)}), '20')
# test various errors
self.assertRaises(TypeError, ''.format_map)
self.assertRaises(TypeError, 'a'.format_map)
self.assertRaises(ValueError, '{'.format_map, {})
self.assertRaises(ValueError, '}'.format_map, {})
self.assertRaises(ValueError, 'a{'.format_map, {})
self.assertRaises(ValueError, 'a}'.format_map, {})
self.assertRaises(ValueError, '{a'.format_map, {})
self.assertRaises(ValueError, '}a'.format_map, {})
# issue #12579: can't supply positional params to format_map
self.assertRaises(ValueError, '{}'.format_map, {'a' : 2})
self.assertRaises(ValueError, '{}'.format_map, 'a')
self.assertRaises(ValueError, '{a} {}'.format_map, {"a" : 2, "b" : 1})
class BadMapping:
def __getitem__(self, key):
return 1/0
self.assertRaises(KeyError, '{a}'.format_map, {})
self.assertRaises(TypeError, '{a}'.format_map, [])
self.assertRaises(ZeroDivisionError, '{a}'.format_map, BadMapping())
def test_format_huge_precision(self):
format_string = ".{}f".format(sys.maxsize + 1)
with self.assertRaises(ValueError):
result = format(2.34, format_string)
def test_format_huge_width(self):
format_string = "{}f".format(sys.maxsize + 1)
with self.assertRaises(ValueError):
result = format(2.34, format_string)
def test_format_huge_item_number(self):
format_string = "{{{}:.6f}}".format(sys.maxsize + 1)
with self.assertRaises(ValueError):
result = format_string.format(2.34)
def test_format_auto_numbering(self):
class C:
def __init__(self, x=100):
self._x = x
def __format__(self, spec):
return spec
self.assertEqual('{}'.format(10), '10')
self.assertEqual('{:5}'.format('s'), 's ')
self.assertEqual('{!r}'.format('s'), "'s'")
self.assertEqual('{._x}'.format(C(10)), '10')
self.assertEqual('{[1]}'.format([1, 2]), '2')
self.assertEqual('{[a]}'.format({'a':4, 'b':2}), '4')
self.assertEqual('a{}b{}c'.format(0, 1), 'a0b1c')
self.assertEqual('a{:{}}b'.format('x', '^10'), 'a x b')
self.assertEqual('a{:{}x}b'.format(20, '#'), 'a0x14b')
# can't mix and match numbering and auto-numbering
self.assertRaises(ValueError, '{}{1}'.format, 1, 2)
self.assertRaises(ValueError, '{1}{}'.format, 1, 2)
self.assertRaises(ValueError, '{:{1}}'.format, 1, 2)
self.assertRaises(ValueError, '{0:{}}'.format, 1, 2)
# can mix and match auto-numbering and named
self.assertEqual('{f}{}'.format(4, f='test'), 'test4')
self.assertEqual('{}{f}'.format(4, f='test'), '4test')
self.assertEqual('{:{f}}{g}{}'.format(1, 3, g='g', f=2), ' 1g3')
self.assertEqual('{f:{}}{}{g}'.format(2, 4, f=1, g='g'), ' 14g')
def test_formatting(self):
string_tests.MixinStrUnicodeUserStringTest.test_formatting(self)
# Testing Unicode formatting strings...
self.assertEqual("%s, %s" % ("abc", "abc"), 'abc, abc')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", 1, 2, 3), 'abc, abc, 1, 2.000000, 3.00')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", 1, -2, 3), 'abc, abc, 1, -2.000000, 3.00')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", -1, -2, 3.5), 'abc, abc, -1, -2.000000, 3.50')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", -1, -2, 3.57), 'abc, abc, -1, -2.000000, 3.57')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", -1, -2, 1003.57), 'abc, abc, -1, -2.000000, 1003.57')
if not sys.platform.startswith('java'):
self.assertEqual("%r, %r" % (b"abc", "abc"), "b'abc', 'abc'")
self.assertEqual("%r" % ("\u1234",), "'\u1234'")
self.assertEqual("%a" % ("\u1234",), "'\\u1234'")
self.assertEqual("%(x)s, %(y)s" % {'x':"abc", 'y':"def"}, 'abc, def')
self.assertEqual("%(x)s, %(\xfc)s" % {'x':"abc", '\xfc':"def"}, 'abc, def')
self.assertEqual('%c' % 0x1234, '\u1234')
self.assertEqual('%c' % 0x21483, '\U00021483')
self.assertRaises(OverflowError, "%c".__mod__, (0x110000,))
self.assertEqual('%c' % '\U00021483', '\U00021483')
self.assertRaises(TypeError, "%c".__mod__, "aa")
self.assertRaises(ValueError, "%.1\u1032f".__mod__, (1.0/3))
self.assertRaises(TypeError, "%i".__mod__, "aa")
# formatting jobs delegated from the string implementation:
self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc",'def':123}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc",'def':123}, '...abc...')
self.assertEqual('...%s...%s...%s...%s...' % (1,2,3,"abc"), '...1...2...3...abc...')
self.assertEqual('...%%...%%s...%s...%s...%s...%s...' % (1,2,3,"abc"), '...%...%s...1...2...3...abc...')
self.assertEqual('...%s...' % "abc", '...abc...')
self.assertEqual('%*s' % (5,'abc',), ' abc')
self.assertEqual('%*s' % (-5,'abc',), 'abc ')
self.assertEqual('%*.*s' % (5,2,'abc',), ' ab')
self.assertEqual('%*.*s' % (5,3,'abc',), ' abc')
self.assertEqual('%i %*.*s' % (10, 5,3,'abc',), '10 abc')
self.assertEqual('%i%s %*.*s' % (10, 3, 5, 3, 'abc',), '103 abc')
self.assertEqual('%c' % 'a', 'a')
class Wrapper:
def __str__(self):
return '\u1234'
self.assertEqual('%s' % Wrapper(), '\u1234')
# issue 3382
NAN = float('nan')
INF = float('inf')
self.assertEqual('%f' % NAN, 'nan')
self.assertEqual('%F' % NAN, 'NAN')
self.assertEqual('%f' % INF, 'inf')
self.assertEqual('%F' % INF, 'INF')
# PEP 393
self.assertEqual('%.1s' % "a\xe9\u20ac", 'a')
self.assertEqual('%.2s' % "a\xe9\u20ac", 'a\xe9')
#issue 19995
class PseudoInt:
def __init__(self, value):
self.value = int(value)
def __int__(self):
return self.value
def __index__(self):
return self.value
class PseudoFloat:
def __init__(self, value):
self.value = float(value)
def __int__(self):
return int(self.value)
pi = PseudoFloat(3.1415)
letter_m = PseudoInt(109)
self.assertEqual('%x' % 42, '2a')
self.assertEqual('%X' % 15, 'F')
self.assertEqual('%o' % 9, '11')
self.assertEqual('%c' % 109, 'm')
self.assertEqual('%x' % letter_m, '6d')
self.assertEqual('%X' % letter_m, '6D')
self.assertEqual('%o' % letter_m, '155')
self.assertEqual('%c' % letter_m, 'm')
self.assertRaisesRegex(TypeError, '%x format: an integer is required, not float', operator.mod, '%x', 3.14),
self.assertRaisesRegex(TypeError, '%X format: an integer is required, not float', operator.mod, '%X', 2.11),
self.assertRaisesRegex(TypeError, '%o format: an integer is required, not float', operator.mod, '%o', 1.79),
self.assertRaisesRegex(TypeError, '%x format: an integer is required, not PseudoFloat', operator.mod, '%x', pi),
self.assertRaises(TypeError, operator.mod, '%c', pi),
def test_formatting_with_enum(self):
# issue18780
import enum
class Float(float, enum.Enum):
PI = 3.1415926
class Int(enum.IntEnum):
IDES = 15
class Str(str, enum.Enum):
ABC = 'abc'
# Testing Unicode formatting strings...
self.assertEqual("%s, %s" % (Str.ABC, Str.ABC),
'Str.ABC, Str.ABC')
self.assertEqual("%s, %s, %d, %i, %u, %f, %5.2f" %
(Str.ABC, Str.ABC,
Int.IDES, Int.IDES, Int.IDES,
Float.PI, Float.PI),
'Str.ABC, Str.ABC, 15, 15, 15, 3.141593, 3.14')
# formatting jobs delegated from the string implementation:
self.assertEqual('...%(foo)s...' % {'foo':Str.ABC},
'...Str.ABC...')
self.assertEqual('...%(foo)s...' % {'foo':Int.IDES},
'...Int.IDES...')
self.assertEqual('...%(foo)i...' % {'foo':Int.IDES},
'...15...')
self.assertEqual('...%(foo)d...' % {'foo':Int.IDES},
'...15...')
self.assertEqual('...%(foo)u...' % {'foo':Int.IDES, 'def':Float.PI},
'...15...')
self.assertEqual('...%(foo)f...' % {'foo':Float.PI,'def':123},
'...3.141593...')
def test_formatting_huge_precision(self):
format_string = "%.{}f".format(sys.maxsize + 1)
with self.assertRaises(ValueError):
result = format_string % 2.34
def test_issue28598_strsubclass_rhs(self):
# A subclass of str with an __rmod__ method should be able to hook
# into the % operator
class SubclassedStr(str):
def __rmod__(self, other):
return 'Success, self.__rmod__({!r}) was called'.format(other)
self.assertEqual('lhs %% %r' % SubclassedStr('rhs'),
"Success, self.__rmod__('lhs %% %r') was called")
@support.cpython_only
def test_formatting_huge_precision_c_limits(self):
from _testcapi import INT_MAX
format_string = "%.{}f".format(INT_MAX + 1)
with self.assertRaises(ValueError):
result = format_string % 2.34
def test_formatting_huge_width(self):
format_string = "%{}f".format(sys.maxsize + 1)
with self.assertRaises(ValueError):
result = format_string % 2.34
def test_startswith_endswith_errors(self):
for meth in ('foo'.startswith, 'foo'.endswith):
with self.assertRaises(TypeError) as cm:
meth(['f'])
exc = str(cm.exception)
self.assertIn('str', exc)
self.assertIn('tuple', exc)
@support.run_with_locale('LC_ALL', 'de_DE', 'fr_FR')
def test_format_float(self):
# should not format with a comma, but always with C locale
self.assertEqual('1.0', '%.1f' % 1.0)
def test_constructor(self):
# unicode(obj) tests (this maps to PyObject_Unicode() at C level)
self.assertEqual(
str('unicode remains unicode'),
'unicode remains unicode'
)
for text in ('ascii', '\xe9', '\u20ac', '\U0010FFFF'):
subclass = StrSubclass(text)
self.assertEqual(str(subclass), text)
self.assertEqual(len(subclass), len(text))
if text == 'ascii':
self.assertEqual(subclass.encode('ascii'), b'ascii')
self.assertEqual(subclass.encode('utf-8'), b'ascii')
self.assertEqual(
str('strings are converted to unicode'),
'strings are converted to unicode'
)
class StringCompat:
def __init__(self, x):
self.x = x
def __str__(self):
return self.x
self.assertEqual(
str(StringCompat('__str__ compatible objects are recognized')),
'__str__ compatible objects are recognized'
)
# unicode(obj) is compatible to str():
o = StringCompat('unicode(obj) is compatible to str()')
self.assertEqual(str(o), 'unicode(obj) is compatible to str()')
self.assertEqual(str(o), 'unicode(obj) is compatible to str()')
for obj in (123, 123.45, 123):
self.assertEqual(str(obj), str(str(obj)))
# unicode(obj, encoding, error) tests (this maps to
# PyUnicode_FromEncodedObject() at C level)
if not sys.platform.startswith('java'):
self.assertRaises(
TypeError,
str,
'decoding unicode is not supported',
'utf-8',
'strict'
)
self.assertEqual(
str(b'strings are decoded to unicode', 'utf-8', 'strict'),
'strings are decoded to unicode'
)
if not sys.platform.startswith('java'):
self.assertEqual(
str(
memoryview(b'character buffers are decoded to unicode'),
'utf-8',
'strict'
),
'character buffers are decoded to unicode'
)
self.assertRaises(TypeError, str, 42, 42, 42)
def test_constructor_keyword_args(self):
"""Pass various keyword argument combinations to the constructor."""
# The object argument can be passed as a keyword.
self.assertEqual(str(object='foo'), 'foo')
self.assertEqual(str(object=b'foo', encoding='utf-8'), 'foo')
# The errors argument without encoding triggers "decode" mode.
self.assertEqual(str(b'foo', errors='strict'), 'foo') # not "b'foo'"
self.assertEqual(str(object=b'foo', errors='strict'), 'foo')
def test_constructor_defaults(self):
"""Check the constructor argument defaults."""
# The object argument defaults to '' or b''.
self.assertEqual(str(), '')
self.assertEqual(str(errors='strict'), '')
utf8_cent = '¢'.encode('utf-8')
# The encoding argument defaults to utf-8.
self.assertEqual(str(utf8_cent, errors='strict'), '¢')
# The errors argument defaults to strict.
self.assertRaises(UnicodeDecodeError, str, utf8_cent, encoding='ascii')
def test_codecs_utf7(self):
utfTests = [
('A\u2262\u0391.', b'A+ImIDkQ.'), # RFC2152 example
('Hi Mom -\u263a-!', b'Hi Mom -+Jjo--!'), # RFC2152 example
('\u65E5\u672C\u8A9E', b'+ZeVnLIqe-'), # RFC2152 example
('Item 3 is \u00a31.', b'Item 3 is +AKM-1.'), # RFC2152 example
('+', b'+-'),
('+-', b'+--'),
('+?', b'+-?'),
(r'\?', b'+AFw?'),
('+?', b'+-?'),
(r'\\?', b'+AFwAXA?'),
(r'\\\?', b'+AFwAXABc?'),
(r'++--', b'+-+---'),
('\U000abcde', b'+2m/c3g-'), # surrogate pairs
('/', b'/'),
]
for (x, y) in utfTests:
self.assertEqual(x.encode('utf-7'), y)
# Unpaired surrogates are passed through
self.assertEqual('\uD801'.encode('utf-7'), b'+2AE-')
self.assertEqual('\uD801x'.encode('utf-7'), b'+2AE-x')
self.assertEqual('\uDC01'.encode('utf-7'), b'+3AE-')
self.assertEqual('\uDC01x'.encode('utf-7'), b'+3AE-x')
self.assertEqual(b'+2AE-'.decode('utf-7'), '\uD801')
self.assertEqual(b'+2AE-x'.decode('utf-7'), '\uD801x')
self.assertEqual(b'+3AE-'.decode('utf-7'), '\uDC01')
self.assertEqual(b'+3AE-x'.decode('utf-7'), '\uDC01x')
self.assertEqual('\uD801\U000abcde'.encode('utf-7'), b'+2AHab9ze-')
self.assertEqual(b'+2AHab9ze-'.decode('utf-7'), '\uD801\U000abcde')
# Issue #2242: crash on some Windows/MSVC versions
self.assertEqual(b'+\xc1'.decode('utf-7', 'ignore'), '')
# Direct encoded characters
set_d = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'(),-./:?"
# Optional direct characters
set_o = '!"#$%&*;<=>@[]^_`{|}'
for c in set_d:
self.assertEqual(c.encode('utf7'), c.encode('ascii'))
self.assertEqual(c.encode('ascii').decode('utf7'), c)
for c in set_o:
self.assertEqual(c.encode('ascii').decode('utf7'), c)
with self.assertRaisesRegex(UnicodeDecodeError,
'ill-formed sequence'):
b'+@'.decode('utf-7')
def test_codecs_utf8(self):
self.assertEqual(''.encode('utf-8'), b'')
self.assertEqual('\u20ac'.encode('utf-8'), b'\xe2\x82\xac')
self.assertEqual('\U00010002'.encode('utf-8'), b'\xf0\x90\x80\x82')
self.assertEqual('\U00023456'.encode('utf-8'), b'\xf0\xa3\x91\x96')
self.assertEqual('\ud800'.encode('utf-8', 'surrogatepass'), b'\xed\xa0\x80')
self.assertEqual('\udc00'.encode('utf-8', 'surrogatepass'), b'\xed\xb0\x80')
self.assertEqual(('\U00010002'*10).encode('utf-8'),
b'\xf0\x90\x80\x82'*10)
self.assertEqual(
'\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f'
'\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00'
'\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c'
'\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067'
'\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das'
' Nunstuck git und'.encode('utf-8'),
b'\xe6\xad\xa3\xe7\xa2\xba\xe3\x81\xab\xe8\xa8\x80\xe3\x81'
b'\x86\xe3\x81\xa8\xe7\xbf\xbb\xe8\xa8\xb3\xe3\x81\xaf\xe3'
b'\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe'
b'\xe3\x81\x9b\xe3\x82\x93\xe3\x80\x82\xe4\xb8\x80\xe9\x83'
b'\xa8\xe3\x81\xaf\xe3\x83\x89\xe3\x82\xa4\xe3\x83\x84\xe8'
b'\xaa\x9e\xe3\x81\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81'
b'\xe3\x81\x82\xe3\x81\xa8\xe3\x81\xaf\xe3\x81\xa7\xe3\x81'
b'\x9f\xe3\x82\x89\xe3\x82\x81\xe3\x81\xa7\xe3\x81\x99\xe3'
b'\x80\x82\xe5\xae\x9f\xe9\x9a\x9b\xe3\x81\xab\xe3\x81\xaf'
b'\xe3\x80\x8cWenn ist das Nunstuck git und'
)
# UTF-8 specific decoding tests
self.assertEqual(str(b'\xf0\xa3\x91\x96', 'utf-8'), '\U00023456' )
self.assertEqual(str(b'\xf0\x90\x80\x82', 'utf-8'), '\U00010002' )
self.assertEqual(str(b'\xe2\x82\xac', 'utf-8'), '\u20ac' )
# Other possible utf-8 test cases:
# * strict decoding testing for all of the
# UTF8_ERROR cases in PyUnicode_DecodeUTF8
def test_utf8_decode_valid_sequences(self):
sequences = [
# single byte
(b'\x00', '\x00'), (b'a', 'a'), (b'\x7f', '\x7f'),
# 2 bytes
(b'\xc2\x80', '\x80'), (b'\xdf\xbf', '\u07ff'),
# 3 bytes
(b'\xe0\xa0\x80', '\u0800'), (b'\xed\x9f\xbf', '\ud7ff'),
(b'\xee\x80\x80', '\uE000'), (b'\xef\xbf\xbf', '\uffff'),
# 4 bytes
(b'\xF0\x90\x80\x80', '\U00010000'),
(b'\xf4\x8f\xbf\xbf', '\U0010FFFF')
]
for seq, res in sequences:
self.assertEqual(seq.decode('utf-8'), res)
def test_utf8_decode_invalid_sequences(self):
# continuation bytes in a sequence of 2, 3, or 4 bytes
continuation_bytes = [bytes([x]) for x in range(0x80, 0xC0)]
# start bytes of a 2-byte sequence equivalent to code points < 0x7F
invalid_2B_seq_start_bytes = [bytes([x]) for x in range(0xC0, 0xC2)]
# start bytes of a 4-byte sequence equivalent to code points > 0x10FFFF
invalid_4B_seq_start_bytes = [bytes([x]) for x in range(0xF5, 0xF8)]
invalid_start_bytes = (
continuation_bytes + invalid_2B_seq_start_bytes +
invalid_4B_seq_start_bytes + [bytes([x]) for x in range(0xF7, 0x100)]
)
for byte in invalid_start_bytes:
self.assertRaises(UnicodeDecodeError, byte.decode, 'utf-8')
for sb in invalid_2B_seq_start_bytes:
for cb in continuation_bytes:
self.assertRaises(UnicodeDecodeError, (sb+cb).decode, 'utf-8')
for sb in invalid_4B_seq_start_bytes:
for cb1 in continuation_bytes[:3]:
for cb3 in continuation_bytes[:3]:
self.assertRaises(UnicodeDecodeError,
(sb+cb1+b'\x80'+cb3).decode, 'utf-8')
for cb in [bytes([x]) for x in range(0x80, 0xA0)]:
self.assertRaises(UnicodeDecodeError,
(b'\xE0'+cb+b'\x80').decode, 'utf-8')
self.assertRaises(UnicodeDecodeError,
(b'\xE0'+cb+b'\xBF').decode, 'utf-8')
# surrogates
for cb in [bytes([x]) for x in range(0xA0, 0xC0)]:
self.assertRaises(UnicodeDecodeError,
(b'\xED'+cb+b'\x80').decode, 'utf-8')
self.assertRaises(UnicodeDecodeError,
(b'\xED'+cb+b'\xBF').decode, 'utf-8')
for cb in [bytes([x]) for x in range(0x80, 0x90)]:
self.assertRaises(UnicodeDecodeError,
(b'\xF0'+cb+b'\x80\x80').decode, 'utf-8')
self.assertRaises(UnicodeDecodeError,
(b'\xF0'+cb+b'\xBF\xBF').decode, 'utf-8')
for cb in [bytes([x]) for x in range(0x90, 0xC0)]:
self.assertRaises(UnicodeDecodeError,
(b'\xF4'+cb+b'\x80\x80').decode, 'utf-8')
self.assertRaises(UnicodeDecodeError,
(b'\xF4'+cb+b'\xBF\xBF').decode, 'utf-8')
def test_issue8271(self):
# Issue #8271: during the decoding of an invalid UTF-8 byte sequence,
# only the start byte and the continuation byte(s) are now considered
# invalid, instead of the number of bytes specified by the start byte.
# See https://www.unicode.org/versions/Unicode5.2.0/ch03.pdf (page 95,
# table 3-8, Row 2) for more information about the algorithm used.
FFFD = '\ufffd'
sequences = [
# invalid start bytes
(b'\x80', FFFD), # continuation byte
(b'\x80\x80', FFFD*2), # 2 continuation bytes
(b'\xc0', FFFD),
(b'\xc0\xc0', FFFD*2),
(b'\xc1', FFFD),
(b'\xc1\xc0', FFFD*2),
(b'\xc0\xc1', FFFD*2),
# with start byte of a 2-byte sequence
(b'\xc2', FFFD), # only the start byte
(b'\xc2\xc2', FFFD*2), # 2 start bytes
(b'\xc2\xc2\xc2', FFFD*3), # 3 start bytes
(b'\xc2\x41', FFFD+'A'), # invalid continuation byte
# with start byte of a 3-byte sequence
(b'\xe1', FFFD), # only the start byte
(b'\xe1\xe1', FFFD*2), # 2 start bytes
(b'\xe1\xe1\xe1', FFFD*3), # 3 start bytes
(b'\xe1\xe1\xe1\xe1', FFFD*4), # 4 start bytes
(b'\xe1\x80', FFFD), # only 1 continuation byte
(b'\xe1\x41', FFFD+'A'), # invalid continuation byte
(b'\xe1\x41\x80', FFFD+'A'+FFFD), # invalid cb followed by valid cb
(b'\xe1\x41\x41', FFFD+'AA'), # 2 invalid continuation bytes
(b'\xe1\x80\x41', FFFD+'A'), # only 1 valid continuation byte
(b'\xe1\x80\xe1\x41', FFFD*2+'A'), # 1 valid and the other invalid
(b'\xe1\x41\xe1\x80', FFFD+'A'+FFFD), # 1 invalid and the other valid
# with start byte of a 4-byte sequence
(b'\xf1', FFFD), # only the start byte
(b'\xf1\xf1', FFFD*2), # 2 start bytes
(b'\xf1\xf1\xf1', FFFD*3), # 3 start bytes
(b'\xf1\xf1\xf1\xf1', FFFD*4), # 4 start bytes
(b'\xf1\xf1\xf1\xf1\xf1', FFFD*5), # 5 start bytes
(b'\xf1\x80', FFFD), # only 1 continuation bytes
(b'\xf1\x80\x80', FFFD), # only 2 continuation bytes
(b'\xf1\x80\x41', FFFD+'A'), # 1 valid cb and 1 invalid
(b'\xf1\x80\x41\x41', FFFD+'AA'), # 1 valid cb and 1 invalid
(b'\xf1\x80\x80\x41', FFFD+'A'), # 2 valid cb and 1 invalid
(b'\xf1\x41\x80', FFFD+'A'+FFFD), # 1 invalid cv and 1 valid
(b'\xf1\x41\x80\x80', FFFD+'A'+FFFD*2), # 1 invalid cb and 2 invalid
(b'\xf1\x41\x80\x41', FFFD+'A'+FFFD+'A'), # 2 invalid cb and 1 invalid
(b'\xf1\x41\x41\x80', FFFD+'AA'+FFFD), # 1 valid cb and 1 invalid
(b'\xf1\x41\xf1\x80', FFFD+'A'+FFFD),
(b'\xf1\x41\x80\xf1', FFFD+'A'+FFFD*2),
(b'\xf1\xf1\x80\x41', FFFD*2+'A'),
(b'\xf1\x41\xf1\xf1', FFFD+'A'+FFFD*2),
# with invalid start byte of a 4-byte sequence (rfc2279)
(b'\xf5', FFFD), # only the start byte
(b'\xf5\xf5', FFFD*2), # 2 start bytes
(b'\xf5\x80', FFFD*2), # only 1 continuation byte
(b'\xf5\x80\x80', FFFD*3), # only 2 continuation byte
(b'\xf5\x80\x80\x80', FFFD*4), # 3 continuation bytes
(b'\xf5\x80\x41', FFFD*2+'A'), # 1 valid cb and 1 invalid
(b'\xf5\x80\x41\xf5', FFFD*2+'A'+FFFD),
(b'\xf5\x41\x80\x80\x41', FFFD+'A'+FFFD*2+'A'),
# with invalid start byte of a 5-byte sequence (rfc2279)
(b'\xf8', FFFD), # only the start byte
(b'\xf8\xf8', FFFD*2), # 2 start bytes
(b'\xf8\x80', FFFD*2), # only one continuation byte
(b'\xf8\x80\x41', FFFD*2 + 'A'), # 1 valid cb and 1 invalid
(b'\xf8\x80\x80\x80\x80', FFFD*5), # invalid 5 bytes seq with 5 bytes
# with invalid start byte of a 6-byte sequence (rfc2279)
(b'\xfc', FFFD), # only the start byte
(b'\xfc\xfc', FFFD*2), # 2 start bytes
(b'\xfc\x80\x80', FFFD*3), # only 2 continuation bytes
(b'\xfc\x80\x80\x80\x80\x80', FFFD*6), # 6 continuation bytes
# invalid start byte
(b'\xfe', FFFD),
(b'\xfe\x80\x80', FFFD*3),
# other sequences
(b'\xf1\x80\x41\x42\x43', '\ufffd\x41\x42\x43'),
(b'\xf1\x80\xff\x42\x43', '\ufffd\ufffd\x42\x43'),
(b'\xf1\x80\xc2\x81\x43', '\ufffd\x81\x43'),
(b'\x61\xF1\x80\x80\xE1\x80\xC2\x62\x80\x63\x80\xBF\x64',
'\x61\uFFFD\uFFFD\uFFFD\x62\uFFFD\x63\uFFFD\uFFFD\x64'),
]
for n, (seq, res) in enumerate(sequences):
self.assertRaises(UnicodeDecodeError, seq.decode, 'utf-8', 'strict')
self.assertEqual(seq.decode('utf-8', 'replace'), res)
self.assertEqual((seq+b'b').decode('utf-8', 'replace'), res+'b')
self.assertEqual(seq.decode('utf-8', 'ignore'),
res.replace('\uFFFD', ''))
def assertCorrectUTF8Decoding(self, seq, res, err):
"""
Check that an invalid UTF-8 sequence raises a UnicodeDecodeError when
'strict' is used, returns res when 'replace' is used, and that doesn't
return anything when 'ignore' is used.
"""
with self.assertRaises(UnicodeDecodeError) as cm:
seq.decode('utf-8')
exc = cm.exception
self.assertIn(err, str(exc))
self.assertEqual(seq.decode('utf-8', 'replace'), res)
self.assertEqual((b'aaaa' + seq + b'bbbb').decode('utf-8', 'replace'),
'aaaa' + res + 'bbbb')
res = res.replace('\ufffd', '')
self.assertEqual(seq.decode('utf-8', 'ignore'), res)
self.assertEqual((b'aaaa' + seq + b'bbbb').decode('utf-8', 'ignore'),
'aaaa' + res + 'bbbb')
def test_invalid_start_byte(self):
"""
Test that an 'invalid start byte' error is raised when the first byte
is not in the ASCII range or is not a valid start byte of a 2-, 3-, or
4-bytes sequence. The invalid start byte is replaced with a single
U+FFFD when errors='replace'.
E.g. <80> is a continuation byte and can appear only after a start byte.
"""
FFFD = '\ufffd'
for byte in b'\x80\xA0\x9F\xBF\xC0\xC1\xF5\xFF':
self.assertCorrectUTF8Decoding(bytes([byte]), '\ufffd',
'invalid start byte')
def test_unexpected_end_of_data(self):
"""
Test that an 'unexpected end of data' error is raised when the string
ends after a start byte of a 2-, 3-, or 4-bytes sequence without having
enough continuation bytes. The incomplete sequence is replaced with a
single U+FFFD when errors='replace'.
E.g. in the sequence <F3 80 80>, F3 is the start byte of a 4-bytes
sequence, but it's followed by only 2 valid continuation bytes and the
last continuation bytes is missing.
Note: the continuation bytes must be all valid, if one of them is
invalid another error will be raised.
"""
sequences = [
'C2', 'DF',
'E0 A0', 'E0 BF', 'E1 80', 'E1 BF', 'EC 80', 'EC BF',
'ED 80', 'ED 9F', 'EE 80', 'EE BF', 'EF 80', 'EF BF',
'F0 90', 'F0 BF', 'F0 90 80', 'F0 90 BF', 'F0 BF 80', 'F0 BF BF',
'F1 80', 'F1 BF', 'F1 80 80', 'F1 80 BF', 'F1 BF 80', 'F1 BF BF',
'F3 80', 'F3 BF', 'F3 80 80', 'F3 80 BF', 'F3 BF 80', 'F3 BF BF',
'F4 80', 'F4 8F', 'F4 80 80', 'F4 80 BF', 'F4 8F 80', 'F4 8F BF'
]
FFFD = '\ufffd'
for seq in sequences:
self.assertCorrectUTF8Decoding(bytes.fromhex(seq), '\ufffd',
'unexpected end of data')
def test_invalid_cb_for_2bytes_seq(self):
"""
Test that an 'invalid continuation byte' error is raised when the
continuation byte of a 2-bytes sequence is invalid. The start byte
is replaced by a single U+FFFD and the second byte is handled
separately when errors='replace'.
E.g. in the sequence <C2 41>, C2 is the start byte of a 2-bytes
sequence, but 41 is not a valid continuation byte because it's the
ASCII letter 'A'.
"""
FFFD = '\ufffd'
FFFDx2 = FFFD * 2
sequences = [
('C2 00', FFFD+'\x00'), ('C2 7F', FFFD+'\x7f'),
('C2 C0', FFFDx2), ('C2 FF', FFFDx2),
('DF 00', FFFD+'\x00'), ('DF 7F', FFFD+'\x7f'),
('DF C0', FFFDx2), ('DF FF', FFFDx2),
]
for seq, res in sequences:
self.assertCorrectUTF8Decoding(bytes.fromhex(seq), res,
'invalid continuation byte')
def test_invalid_cb_for_3bytes_seq(self):
"""
Test that an 'invalid continuation byte' error is raised when the
continuation byte(s) of a 3-bytes sequence are invalid. When
errors='replace', if the first continuation byte is valid, the first
two bytes (start byte + 1st cb) are replaced by a single U+FFFD and the
third byte is handled separately, otherwise only the start byte is
replaced with a U+FFFD and the other continuation bytes are handled
separately.
E.g. in the sequence <E1 80 41>, E1 is the start byte of a 3-bytes
sequence, 80 is a valid continuation byte, but 41 is not a valid cb
because it's the ASCII letter 'A'.
Note: when the start byte is E0 or ED, the valid ranges for the first
continuation byte are limited to A0..BF and 80..9F respectively.
Python 2 used to consider all the bytes in range 80..BF valid when the
start byte was ED. This is fixed in Python 3.
"""
FFFD = '\ufffd'
FFFDx2 = FFFD * 2
sequences = [
('E0 00', FFFD+'\x00'), ('E0 7F', FFFD+'\x7f'), ('E0 80', FFFDx2),
('E0 9F', FFFDx2), ('E0 C0', FFFDx2), ('E0 FF', FFFDx2),
('E0 A0 00', FFFD+'\x00'), ('E0 A0 7F', FFFD+'\x7f'),
('E0 A0 C0', FFFDx2), ('E0 A0 FF', FFFDx2),
('E0 BF 00', FFFD+'\x00'), ('E0 BF 7F', FFFD+'\x7f'),
('E0 BF C0', FFFDx2), ('E0 BF FF', FFFDx2), ('E1 00', FFFD+'\x00'),
('E1 7F', FFFD+'\x7f'), ('E1 C0', FFFDx2), ('E1 FF', FFFDx2),
('E1 80 00', FFFD+'\x00'), ('E1 80 7F', FFFD+'\x7f'),
('E1 80 C0', FFFDx2), ('E1 80 FF', FFFDx2),
('E1 BF 00', FFFD+'\x00'), ('E1 BF 7F', FFFD+'\x7f'),
('E1 BF C0', FFFDx2), ('E1 BF FF', FFFDx2), ('EC 00', FFFD+'\x00'),
('EC 7F', FFFD+'\x7f'), ('EC C0', FFFDx2), ('EC FF', FFFDx2),
('EC 80 00', FFFD+'\x00'), ('EC 80 7F', FFFD+'\x7f'),
('EC 80 C0', FFFDx2), ('EC 80 FF', FFFDx2),
('EC BF 00', FFFD+'\x00'), ('EC BF 7F', FFFD+'\x7f'),
('EC BF C0', FFFDx2), ('EC BF FF', FFFDx2), ('ED 00', FFFD+'\x00'),
('ED 7F', FFFD+'\x7f'),
('ED A0', FFFDx2), ('ED BF', FFFDx2), # see note ^
('ED C0', FFFDx2), ('ED FF', FFFDx2), ('ED 80 00', FFFD+'\x00'),
('ED 80 7F', FFFD+'\x7f'), ('ED 80 C0', FFFDx2),
('ED 80 FF', FFFDx2), ('ED 9F 00', FFFD+'\x00'),
('ED 9F 7F', FFFD+'\x7f'), ('ED 9F C0', FFFDx2),
('ED 9F FF', FFFDx2), ('EE 00', FFFD+'\x00'),
('EE 7F', FFFD+'\x7f'), ('EE C0', FFFDx2), ('EE FF', FFFDx2),
('EE 80 00', FFFD+'\x00'), ('EE 80 7F', FFFD+'\x7f'),
('EE 80 C0', FFFDx2), ('EE 80 FF', FFFDx2),
('EE BF 00', FFFD+'\x00'), ('EE BF 7F', FFFD+'\x7f'),
('EE BF C0', FFFDx2), ('EE BF FF', FFFDx2), ('EF 00', FFFD+'\x00'),
('EF 7F', FFFD+'\x7f'), ('EF C0', FFFDx2), ('EF FF', FFFDx2),
('EF 80 00', FFFD+'\x00'), ('EF 80 7F', FFFD+'\x7f'),
('EF 80 C0', FFFDx2), ('EF 80 FF', FFFDx2),
('EF BF 00', FFFD+'\x00'), ('EF BF 7F', FFFD+'\x7f'),
('EF BF C0', FFFDx2), ('EF BF FF', FFFDx2),
]
for seq, res in sequences:
self.assertCorrectUTF8Decoding(bytes.fromhex(seq), res,
'invalid continuation byte')
def test_invalid_cb_for_4bytes_seq(self):
"""
Test that an 'invalid continuation byte' error is raised when the
continuation byte(s) of a 4-bytes sequence are invalid. When
errors='replace',the start byte and all the following valid
continuation bytes are replaced with a single U+FFFD, and all the bytes
starting from the first invalid continuation bytes (included) are
handled separately.
E.g. in the sequence <E1 80 41>, E1 is the start byte of a 3-bytes
sequence, 80 is a valid continuation byte, but 41 is not a valid cb
because it's the ASCII letter 'A'.
Note: when the start byte is E0 or ED, the valid ranges for the first
continuation byte are limited to A0..BF and 80..9F respectively.
However, when the start byte is ED, Python 2 considers all the bytes
in range 80..BF valid. This is fixed in Python 3.
"""
FFFD = '\ufffd'
FFFDx2 = FFFD * 2
sequences = [
('F0 00', FFFD+'\x00'), ('F0 7F', FFFD+'\x7f'), ('F0 80', FFFDx2),
('F0 8F', FFFDx2), ('F0 C0', FFFDx2), ('F0 FF', FFFDx2),
('F0 90 00', FFFD+'\x00'), ('F0 90 7F', FFFD+'\x7f'),
('F0 90 C0', FFFDx2), ('F0 90 FF', FFFDx2),
('F0 BF 00', FFFD+'\x00'), ('F0 BF 7F', FFFD+'\x7f'),
('F0 BF C0', FFFDx2), ('F0 BF FF', FFFDx2),
('F0 90 80 00', FFFD+'\x00'), ('F0 90 80 7F', FFFD+'\x7f'),
('F0 90 80 C0', FFFDx2), ('F0 90 80 FF', FFFDx2),
('F0 90 BF 00', FFFD+'\x00'), ('F0 90 BF 7F', FFFD+'\x7f'),
('F0 90 BF C0', FFFDx2), ('F0 90 BF FF', FFFDx2),
('F0 BF 80 00', FFFD+'\x00'), ('F0 BF 80 7F', FFFD+'\x7f'),
('F0 BF 80 C0', FFFDx2), ('F0 BF 80 FF', FFFDx2),
('F0 BF BF 00', FFFD+'\x00'), ('F0 BF BF 7F', FFFD+'\x7f'),
('F0 BF BF C0', FFFDx2), ('F0 BF BF FF', FFFDx2),
('F1 00', FFFD+'\x00'), ('F1 7F', FFFD+'\x7f'), ('F1 C0', FFFDx2),
('F1 FF', FFFDx2), ('F1 80 00', FFFD+'\x00'),
('F1 80 7F', FFFD+'\x7f'), ('F1 80 C0', FFFDx2),
('F1 80 FF', FFFDx2), ('F1 BF 00', FFFD+'\x00'),
('F1 BF 7F', FFFD+'\x7f'), ('F1 BF C0', FFFDx2),
('F1 BF FF', FFFDx2), ('F1 80 80 00', FFFD+'\x00'),
('F1 80 80 7F', FFFD+'\x7f'), ('F1 80 80 C0', FFFDx2),
('F1 80 80 FF', FFFDx2), ('F1 80 BF 00', FFFD+'\x00'),
('F1 80 BF 7F', FFFD+'\x7f'), ('F1 80 BF C0', FFFDx2),
('F1 80 BF FF', FFFDx2), ('F1 BF 80 00', FFFD+'\x00'),
('F1 BF 80 7F', FFFD+'\x7f'), ('F1 BF 80 C0', FFFDx2),
('F1 BF 80 FF', FFFDx2), ('F1 BF BF 00', FFFD+'\x00'),
('F1 BF BF 7F', FFFD+'\x7f'), ('F1 BF BF C0', FFFDx2),
('F1 BF BF FF', FFFDx2), ('F3 00', FFFD+'\x00'),
('F3 7F', FFFD+'\x7f'), ('F3 C0', FFFDx2), ('F3 FF', FFFDx2),
('F3 80 00', FFFD+'\x00'), ('F3 80 7F', FFFD+'\x7f'),
('F3 80 C0', FFFDx2), ('F3 80 FF', FFFDx2),
('F3 BF 00', FFFD+'\x00'), ('F3 BF 7F', FFFD+'\x7f'),
('F3 BF C0', FFFDx2), ('F3 BF FF', FFFDx2),
('F3 80 80 00', FFFD+'\x00'), ('F3 80 80 7F', FFFD+'\x7f'),
('F3 80 80 C0', FFFDx2), ('F3 80 80 FF', FFFDx2),
('F3 80 BF 00', FFFD+'\x00'), ('F3 80 BF 7F', FFFD+'\x7f'),
('F3 80 BF C0', FFFDx2), ('F3 80 BF FF', FFFDx2),
('F3 BF 80 00', FFFD+'\x00'), ('F3 BF 80 7F', FFFD+'\x7f'),
('F3 BF 80 C0', FFFDx2), ('F3 BF 80 FF', FFFDx2),
('F3 BF BF 00', FFFD+'\x00'), ('F3 BF BF 7F', FFFD+'\x7f'),
('F3 BF BF C0', FFFDx2), ('F3 BF BF FF', FFFDx2),
('F4 00', FFFD+'\x00'), ('F4 7F', FFFD+'\x7f'), ('F4 90', FFFDx2),
('F4 BF', FFFDx2), ('F4 C0', FFFDx2), ('F4 FF', FFFDx2),
('F4 80 00', FFFD+'\x00'), ('F4 80 7F', FFFD+'\x7f'),
('F4 80 C0', FFFDx2), ('F4 80 FF', FFFDx2),
('F4 8F 00', FFFD+'\x00'), ('F4 8F 7F', FFFD+'\x7f'),
('F4 8F C0', FFFDx2), ('F4 8F FF', FFFDx2),
('F4 80 80 00', FFFD+'\x00'), ('F4 80 80 7F', FFFD+'\x7f'),
('F4 80 80 C0', FFFDx2), ('F4 80 80 FF', FFFDx2),
('F4 80 BF 00', FFFD+'\x00'), ('F4 80 BF 7F', FFFD+'\x7f'),
('F4 80 BF C0', FFFDx2), ('F4 80 BF FF', FFFDx2),
('F4 8F 80 00', FFFD+'\x00'), ('F4 8F 80 7F', FFFD+'\x7f'),
('F4 8F 80 C0', FFFDx2), ('F4 8F 80 FF', FFFDx2),
('F4 8F BF 00', FFFD+'\x00'), ('F4 8F BF 7F', FFFD+'\x7f'),
('F4 8F BF C0', FFFDx2), ('F4 8F BF FF', FFFDx2)
]
for seq, res in sequences:
self.assertCorrectUTF8Decoding(bytes.fromhex(seq), res,
'invalid continuation byte')
def test_codecs_idna(self):
# Test whether trailing dot is preserved
self.assertEqual("www.python.org.".encode("idna"), b"www.python.org.")
def test_codecs_errors(self):
# Error handling (encoding)
self.assertRaises(UnicodeError, 'Andr\202 x'.encode, 'ascii')
self.assertRaises(UnicodeError, 'Andr\202 x'.encode, 'ascii','strict')
self.assertEqual('Andr\202 x'.encode('ascii','ignore'), b"Andr x")
self.assertEqual('Andr\202 x'.encode('ascii','replace'), b"Andr? x")
self.assertEqual('Andr\202 x'.encode('ascii', 'replace'),
'Andr\202 x'.encode('ascii', errors='replace'))
self.assertEqual('Andr\202 x'.encode('ascii', 'ignore'),
'Andr\202 x'.encode(encoding='ascii', errors='ignore'))
# Error handling (decoding)
self.assertRaises(UnicodeError, str, b'Andr\202 x', 'ascii')
self.assertRaises(UnicodeError, str, b'Andr\202 x', 'ascii', 'strict')
self.assertEqual(str(b'Andr\202 x', 'ascii', 'ignore'), "Andr x")
self.assertEqual(str(b'Andr\202 x', 'ascii', 'replace'), 'Andr\uFFFD x')
self.assertEqual(str(b'\202 x', 'ascii', 'replace'), '\uFFFD x')
# Error handling (unknown character names)
self.assertEqual(b"\\N{foo}xx".decode("unicode-escape", "ignore"), "xx")
# Error handling (truncated escape sequence)
self.assertRaises(UnicodeError, b"\\".decode, "unicode-escape")
self.assertRaises(TypeError, b"hello".decode, "test.unicode1")
self.assertRaises(TypeError, str, b"hello", "test.unicode2")
self.assertRaises(TypeError, "hello".encode, "test.unicode1")
self.assertRaises(TypeError, "hello".encode, "test.unicode2")
# Error handling (wrong arguments)
self.assertRaises(TypeError, "hello".encode, 42, 42, 42)
# Error handling (lone surrogate in
# _PyUnicode_TransformDecimalAndSpaceToASCII())
self.assertRaises(ValueError, int, "\ud800")
self.assertRaises(ValueError, int, "\udf00")
self.assertRaises(ValueError, float, "\ud800")
self.assertRaises(ValueError, float, "\udf00")
self.assertRaises(ValueError, complex, "\ud800")
self.assertRaises(ValueError, complex, "\udf00")
def test_codecs(self):
# Encoding
self.assertEqual('hello'.encode('ascii'), b'hello')
self.assertEqual('hello'.encode('utf-7'), b'hello')
self.assertEqual('hello'.encode('utf-8'), b'hello')
self.assertEqual('hello'.encode('utf-8'), b'hello')
self.assertEqual('hello'.encode('utf-16-le'), b'h\000e\000l\000l\000o\000')
self.assertEqual('hello'.encode('utf-16-be'), b'\000h\000e\000l\000l\000o')
self.assertEqual('hello'.encode('latin-1'), b'hello')
# Default encoding is utf-8
self.assertEqual('\u2603'.encode(), b'\xe2\x98\x83')
# Roundtrip safety for BMP (just the first 1024 chars)
for c in range(1024):
u = chr(c)
for encoding in ('utf-7', 'utf-8', 'utf-16', 'utf-16-le',
'utf-16-be', 'raw_unicode_escape',
'unicode_escape'):
self.assertEqual(str(u.encode(encoding),encoding), u)
# Roundtrip safety for BMP (just the first 256 chars)
for c in range(256):
u = chr(c)
for encoding in ('latin-1',):
self.assertEqual(str(u.encode(encoding),encoding), u)
# Roundtrip safety for BMP (just the first 128 chars)
for c in range(128):
u = chr(c)
for encoding in ('ascii',):
self.assertEqual(str(u.encode(encoding),encoding), u)
# Roundtrip safety for non-BMP (just a few chars)
with warnings.catch_warnings():
u = '\U00010001\U00020002\U00030003\U00040004\U00050005'
for encoding in ('utf-8', 'utf-16', 'utf-16-le', 'utf-16-be',
'raw_unicode_escape', 'unicode_escape'):
self.assertEqual(str(u.encode(encoding),encoding), u)
# UTF-8 must be roundtrip safe for all code points
# (except surrogates, which are forbidden).
u = ''.join(map(chr, list(range(0, 0xd800)) +
list(range(0xe000, 0x110000))))
for encoding in ('utf-8',):
self.assertEqual(str(u.encode(encoding),encoding), u)
def test_codecs_charmap(self):
# 0-127
s = bytes(range(128))
for encoding in (
'cp037', 'cp1026', 'cp273',
'cp437', 'cp500', 'cp720', 'cp737', 'cp775', 'cp850',
'cp852', 'cp855', 'cp858', 'cp860', 'cp861', 'cp862',
'cp863', 'cp865', 'cp866', 'cp1125',
'iso8859_10', 'iso8859_13', 'iso8859_14', 'iso8859_15',
'iso8859_2', 'iso8859_3', 'iso8859_4', 'iso8859_5', 'iso8859_6',
'iso8859_7', 'iso8859_9',
'koi8_r', 'koi8_t', 'koi8_u', 'kz1048', 'latin_1',
'mac_cyrillic', 'mac_latin2',
'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
'cp1256', 'cp1257', 'cp1258',
'cp856', 'cp857', 'cp864', 'cp869', 'cp874',
'mac_greek', 'mac_iceland','mac_roman', 'mac_turkish',
'cp1006', 'iso8859_8',
### These have undefined mappings:
#'cp424',
### These fail the round-trip:
#'cp875'
):
self.assertEqual(str(s, encoding).encode(encoding), s)
# 128-255
s = bytes(range(128, 256))
for encoding in (
'cp037', 'cp1026', 'cp273',
'cp437', 'cp500', 'cp720', 'cp737', 'cp775', 'cp850',
'cp852', 'cp855', 'cp858', 'cp860', 'cp861', 'cp862',
'cp863', 'cp865', 'cp866', 'cp1125',
'iso8859_10', 'iso8859_13', 'iso8859_14', 'iso8859_15',
'iso8859_2', 'iso8859_4', 'iso8859_5',
'iso8859_9', 'koi8_r', 'koi8_u', 'latin_1',
'mac_cyrillic', 'mac_latin2',
### These have undefined mappings:
#'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
#'cp1256', 'cp1257', 'cp1258',
#'cp424', 'cp856', 'cp857', 'cp864', 'cp869', 'cp874',
#'iso8859_3', 'iso8859_6', 'iso8859_7', 'koi8_t', 'kz1048',
#'mac_greek', 'mac_iceland','mac_roman', 'mac_turkish',
### These fail the round-trip:
#'cp1006', 'cp875', 'iso8859_8',
):
self.assertEqual(str(s, encoding).encode(encoding), s)
def test_concatenation(self):
self.assertEqual(("abc" "def"), "abcdef")
self.assertEqual(("abc" "def"), "abcdef")
self.assertEqual(("abc" "def"), "abcdef")
self.assertEqual(("abc" "def" "ghi"), "abcdefghi")
self.assertEqual(("abc" "def" "ghi"), "abcdefghi")
def test_ucs4(self):
x = '\U00100000'
y = x.encode("raw-unicode-escape").decode("raw-unicode-escape")
self.assertEqual(x, y)
y = br'\U00100000'
x = y.decode("raw-unicode-escape").encode("raw-unicode-escape")
self.assertEqual(x, y)
y = br'\U00010000'
x = y.decode("raw-unicode-escape").encode("raw-unicode-escape")
self.assertEqual(x, y)
try:
br'\U11111111'.decode("raw-unicode-escape")
except UnicodeDecodeError as e:
self.assertEqual(e.start, 0)
self.assertEqual(e.end, 10)
else:
self.fail("Should have raised UnicodeDecodeError")
def test_conversion(self):
# Make sure __str__() works properly
class ObjectToStr:
def __str__(self):
return "foo"
class StrSubclassToStr(str):
def __str__(self):
return "foo"
class StrSubclassToStrSubclass(str):
def __new__(cls, content=""):
return str.__new__(cls, 2*content)
def __str__(self):
return self
self.assertEqual(str(ObjectToStr()), "foo")
self.assertEqual(str(StrSubclassToStr("bar")), "foo")
s = str(StrSubclassToStrSubclass("foo"))
self.assertEqual(s, "foofoo")
self.assertIs(type(s), StrSubclassToStrSubclass)
s = StrSubclass(StrSubclassToStrSubclass("foo"))
self.assertEqual(s, "foofoo")
self.assertIs(type(s), StrSubclass)
def test_unicode_repr(self):
class s1:
def __repr__(self):
return '\\n'
class s2:
def __repr__(self):
return '\\n'
self.assertEqual(repr(s1()), '\\n')
self.assertEqual(repr(s2()), '\\n')
def test_printable_repr(self):
self.assertEqual(repr('\U00010000'), "'%c'" % (0x10000,)) # printable
self.assertEqual(repr('\U00014000'), "'\\U00014000'") # nonprintable
# This test only affects 32-bit platforms because expandtabs can only take
# an int as the max value, not a 64-bit C long. If expandtabs is changed
# to take a 64-bit long, this test should apply to all platforms.
@unittest.skipIf(sys.maxsize > (1 << 32) or struct.calcsize('P') != 4,
'only applies to 32-bit platforms')
def test_expandtabs_overflows_gracefully(self):
self.assertRaises(OverflowError, 't\tt\t'.expandtabs, sys.maxsize)
@support.cpython_only
def test_expandtabs_optimization(self):
s = 'abc'
self.assertIs(s.expandtabs(), s)
def test_raiseMemError(self):
if struct.calcsize('P') == 8:
# 64 bits pointers
ascii_struct_size = 48
compact_struct_size = 72
else:
# 32 bits pointers
ascii_struct_size = 24
compact_struct_size = 36
for char in ('a', '\xe9', '\u20ac', '\U0010ffff'):
code = ord(char)
if code < 0x100:
char_size = 1 # sizeof(Py_UCS1)
struct_size = ascii_struct_size
elif code < 0x10000:
char_size = 2 # sizeof(Py_UCS2)
struct_size = compact_struct_size
else:
char_size = 4 # sizeof(Py_UCS4)
struct_size = compact_struct_size
# Note: sys.maxsize is half of the actual max allocation because of
# the signedness of Py_ssize_t. Strings of maxlen-1 should in principle
# be allocatable, given enough memory.
maxlen = ((sys.maxsize - struct_size) // char_size)
alloc = lambda: char * maxlen
self.assertRaises(MemoryError, alloc)
self.assertRaises(MemoryError, alloc)
def test_format_subclass(self):
class S(str):
def __str__(self):
return '__str__ overridden'
s = S('xxx')
self.assertEqual("%s" % s, '__str__ overridden')
self.assertEqual("{}".format(s), '__str__ overridden')
def test_subclass_add(self):
class S(str):
def __add__(self, o):
return "3"
self.assertEqual(S("4") + S("5"), "3")
class S(str):
def __iadd__(self, o):
return "3"
s = S("1")
s += "4"
self.assertEqual(s, "3")
def test_getnewargs(self):
text = 'abc'
args = text.__getnewargs__()
self.assertIsNot(args[0], text)
self.assertEqual(args[0], text)
self.assertEqual(len(args), 1)
@support.cpython_only
@support.requires_legacy_unicode_capi
def test_resize(self):
from _testcapi import getargs_u
for length in range(1, 100, 7):
# generate a fresh string (refcount=1)
text = 'a' * length + 'b'
# fill wstr internal field
abc = getargs_u(text)
self.assertEqual(abc, text)
# resize text: wstr field must be cleared and then recomputed
text += 'c'
abcdef = getargs_u(text)
self.assertNotEqual(abc, abcdef)
self.assertEqual(abcdef, text)
def test_compare(self):
# Issue #17615
N = 10
ascii = 'a' * N
ascii2 = 'z' * N
latin = '\x80' * N
latin2 = '\xff' * N
bmp = '\u0100' * N
bmp2 = '\uffff' * N
astral = '\U00100000' * N
astral2 = '\U0010ffff' * N
strings = (
ascii, ascii2,
latin, latin2,
bmp, bmp2,
astral, astral2)
for text1, text2 in itertools.combinations(strings, 2):
equal = (text1 is text2)
self.assertEqual(text1 == text2, equal)
self.assertEqual(text1 != text2, not equal)
if equal:
self.assertTrue(text1 <= text2)
self.assertTrue(text1 >= text2)
# text1 is text2: duplicate strings to skip the "str1 == str2"
# optimization in unicode_compare_eq() and really compare
# character per character
copy1 = duplicate_string(text1)
copy2 = duplicate_string(text2)
self.assertIsNot(copy1, copy2)
self.assertTrue(copy1 == copy2)
self.assertFalse(copy1 != copy2)
self.assertTrue(copy1 <= copy2)
self.assertTrue(copy2 >= copy2)
self.assertTrue(ascii < ascii2)
self.assertTrue(ascii < latin)
self.assertTrue(ascii < bmp)
self.assertTrue(ascii < astral)
self.assertFalse(ascii >= ascii2)
self.assertFalse(ascii >= latin)
self.assertFalse(ascii >= bmp)
self.assertFalse(ascii >= astral)
self.assertFalse(latin < ascii)
self.assertTrue(latin < latin2)
self.assertTrue(latin < bmp)
self.assertTrue(latin < astral)
self.assertTrue(latin >= ascii)
self.assertFalse(latin >= latin2)
self.assertFalse(latin >= bmp)
self.assertFalse(latin >= astral)
self.assertFalse(bmp < ascii)
self.assertFalse(bmp < latin)
self.assertTrue(bmp < bmp2)
self.assertTrue(bmp < astral)
self.assertTrue(bmp >= ascii)
self.assertTrue(bmp >= latin)
self.assertFalse(bmp >= bmp2)
self.assertFalse(bmp >= astral)
self.assertFalse(astral < ascii)
self.assertFalse(astral < latin)
self.assertFalse(astral < bmp2)
self.assertTrue(astral < astral2)
self.assertTrue(astral >= ascii)
self.assertTrue(astral >= latin)
self.assertTrue(astral >= bmp2)
self.assertFalse(astral >= astral2)
def test_free_after_iterating(self):
support.check_free_after_iterating(self, iter, str)
support.check_free_after_iterating(self, reversed, str)
def test_check_encoding_errors(self):
# bpo-37388: str(bytes) and str.decode() must check encoding and errors
# arguments in dev mode
encodings = ('ascii', 'utf8', 'latin1')
invalid = 'Boom, Shaka Laka, Boom!'
code = textwrap.dedent(f'''
import sys
encodings = {encodings!r}
for data in (b'', b'short string'):
try:
str(data, encoding={invalid!r})
except LookupError:
pass
else:
sys.exit(21)
try:
str(data, errors={invalid!r})
except LookupError:
pass
else:
sys.exit(22)
for encoding in encodings:
try:
str(data, encoding, errors={invalid!r})
except LookupError:
pass
else:
sys.exit(22)
for data in ('', 'short string'):
try:
data.encode(encoding={invalid!r})
except LookupError:
pass
else:
sys.exit(23)
try:
data.encode(errors={invalid!r})
except LookupError:
pass
else:
sys.exit(24)
for encoding in encodings:
try:
data.encode(encoding, errors={invalid!r})
except LookupError:
pass
else:
sys.exit(24)
sys.exit(10)
''')
proc = assert_python_failure('-X', 'dev', '-c', code)
self.assertEqual(proc.rc, 10, proc)
class CAPITest(unittest.TestCase):
# Test PyUnicode_FromFormat()
def test_from_format(self):
import_helper.import_module('ctypes')
from ctypes import (
c_char_p,
pythonapi, py_object, sizeof,
c_int, c_long, c_longlong, c_ssize_t,
c_uint, c_ulong, c_ulonglong, c_size_t, c_void_p)
name = "PyUnicode_FromFormat"
_PyUnicode_FromFormat = getattr(pythonapi, name)
_PyUnicode_FromFormat.argtypes = (c_char_p,)
_PyUnicode_FromFormat.restype = py_object
def PyUnicode_FromFormat(format, *args):
cargs = tuple(
py_object(arg) if isinstance(arg, str) else arg
for arg in args)
return _PyUnicode_FromFormat(format, *cargs)
def check_format(expected, format, *args):
text = PyUnicode_FromFormat(format, *args)
self.assertEqual(expected, text)
# ascii format, non-ascii argument
check_format('ascii\x7f=unicode\xe9',
b'ascii\x7f=%U', 'unicode\xe9')
# non-ascii format, ascii argument: ensure that PyUnicode_FromFormatV()
# raises an error
self.assertRaisesRegex(ValueError,
r'^PyUnicode_FromFormatV\(\) expects an ASCII-encoded format '
'string, got a non-ASCII byte: 0xe9$',
PyUnicode_FromFormat, b'unicode\xe9=%s', 'ascii')
# test "%c"
check_format('\uabcd',
b'%c', c_int(0xabcd))
check_format('\U0010ffff',
b'%c', c_int(0x10ffff))
with self.assertRaises(OverflowError):
PyUnicode_FromFormat(b'%c', c_int(0x110000))
# Issue #18183
check_format('\U00010000\U00100000',
b'%c%c', c_int(0x10000), c_int(0x100000))
# test "%"
check_format('%',
b'%')
check_format('%',
b'%%')
check_format('%s',
b'%%s')
check_format('[%]',
b'[%%]')
check_format('%abc',
b'%%%s', b'abc')
# truncated string
check_format('abc',
b'%.3s', b'abcdef')
check_format('abc[\ufffd',
b'%.5s', 'abc[\u20ac]'.encode('utf8'))
check_format("'\\u20acABC'",
b'%A', '\u20acABC')
check_format("'\\u20",
b'%.5A', '\u20acABCDEF')
check_format("'\u20acABC'",
b'%R', '\u20acABC')
check_format("'\u20acA",
b'%.3R', '\u20acABCDEF')
check_format('\u20acAB',
b'%.3S', '\u20acABCDEF')
check_format('\u20acAB',
b'%.3U', '\u20acABCDEF')
check_format('\u20acAB',
b'%.3V', '\u20acABCDEF', None)
check_format('abc[\ufffd',
b'%.5V', None, 'abc[\u20ac]'.encode('utf8'))
# following tests comes from #7330
# test width modifier and precision modifier with %S
check_format("repr= abc",
b'repr=%5S', 'abc')
check_format("repr=ab",
b'repr=%.2S', 'abc')
check_format("repr= ab",
b'repr=%5.2S', 'abc')
# test width modifier and precision modifier with %R
check_format("repr= 'abc'",
b'repr=%8R', 'abc')
check_format("repr='ab",
b'repr=%.3R', 'abc')
check_format("repr= 'ab",
b'repr=%5.3R', 'abc')
# test width modifier and precision modifier with %A
check_format("repr= 'abc'",
b'repr=%8A', 'abc')
check_format("repr='ab",
b'repr=%.3A', 'abc')
check_format("repr= 'ab",
b'repr=%5.3A', 'abc')
# test width modifier and precision modifier with %s
check_format("repr= abc",
b'repr=%5s', b'abc')
check_format("repr=ab",
b'repr=%.2s', b'abc')
check_format("repr= ab",
b'repr=%5.2s', b'abc')
# test width modifier and precision modifier with %U
check_format("repr= abc",
b'repr=%5U', 'abc')
check_format("repr=ab",
b'repr=%.2U', 'abc')
check_format("repr= ab",
b'repr=%5.2U', 'abc')
# test width modifier and precision modifier with %V
check_format("repr= abc",
b'repr=%5V', 'abc', b'123')
check_format("repr=ab",
b'repr=%.2V', 'abc', b'123')
check_format("repr= ab",
b'repr=%5.2V', 'abc', b'123')
check_format("repr= 123",
b'repr=%5V', None, b'123')
check_format("repr=12",
b'repr=%.2V', None, b'123')
check_format("repr= 12",
b'repr=%5.2V', None, b'123')
# test integer formats (%i, %d, %u)
check_format('010',
b'%03i', c_int(10))
check_format('0010',
b'%0.4i', c_int(10))
check_format('-123',
b'%i', c_int(-123))
check_format('-123',
b'%li', c_long(-123))
check_format('-123',
b'%lli', c_longlong(-123))
check_format('-123',
b'%zi', c_ssize_t(-123))
check_format('-123',
b'%d', c_int(-123))
check_format('-123',
b'%ld', c_long(-123))
check_format('-123',
b'%lld', c_longlong(-123))
check_format('-123',
b'%zd', c_ssize_t(-123))
check_format('123',
b'%u', c_uint(123))
check_format('123',
b'%lu', c_ulong(123))
check_format('123',
b'%llu', c_ulonglong(123))
check_format('123',
b'%zu', c_size_t(123))
# test long output
min_longlong = -(2 ** (8 * sizeof(c_longlong) - 1))
max_longlong = -min_longlong - 1
check_format(str(min_longlong),
b'%lld', c_longlong(min_longlong))
check_format(str(max_longlong),
b'%lld', c_longlong(max_longlong))
max_ulonglong = 2 ** (8 * sizeof(c_ulonglong)) - 1
check_format(str(max_ulonglong),
b'%llu', c_ulonglong(max_ulonglong))
PyUnicode_FromFormat(b'%p', c_void_p(-1))
# test padding (width and/or precision)
check_format('123'.rjust(10, '0'),
b'%010i', c_int(123))
check_format('123'.rjust(100),
b'%100i', c_int(123))
check_format('123'.rjust(100, '0'),
b'%.100i', c_int(123))
check_format('123'.rjust(80, '0').rjust(100),
b'%100.80i', c_int(123))
check_format('123'.rjust(10, '0'),
b'%010u', c_uint(123))
check_format('123'.rjust(100),
b'%100u', c_uint(123))
check_format('123'.rjust(100, '0'),
b'%.100u', c_uint(123))
check_format('123'.rjust(80, '0').rjust(100),
b'%100.80u', c_uint(123))
check_format('123'.rjust(10, '0'),
b'%010x', c_int(0x123))
check_format('123'.rjust(100),
b'%100x', c_int(0x123))
check_format('123'.rjust(100, '0'),
b'%.100x', c_int(0x123))
check_format('123'.rjust(80, '0').rjust(100),
b'%100.80x', c_int(0x123))
# test %A
check_format(r"%A:'abc\xe9\uabcd\U0010ffff'",
b'%%A:%A', 'abc\xe9\uabcd\U0010ffff')
# test %V
check_format('repr=abc',
b'repr=%V', 'abc', b'xyz')
# Test string decode from parameter of %s using utf-8.
# b'\xe4\xba\xba\xe6\xb0\x91' is utf-8 encoded byte sequence of
# '\u4eba\u6c11'
check_format('repr=\u4eba\u6c11',
b'repr=%V', None, b'\xe4\xba\xba\xe6\xb0\x91')
#Test replace error handler.
check_format('repr=abc\ufffd',
b'repr=%V', None, b'abc\xff')
# not supported: copy the raw format string. these tests are just here
# to check for crashes and should not be considered as specifications
check_format('%s',
b'%1%s', b'abc')
check_format('%1abc',
b'%1abc')
check_format('%+i',
b'%+i', c_int(10))
check_format('%.%s',
b'%.%s', b'abc')
# Issue #33817: empty strings
check_format('',
b'')
check_format('',
b'%s', b'')
# Test PyUnicode_AsWideChar()
@support.cpython_only
def test_aswidechar(self):
from _testcapi import unicode_aswidechar
import_helper.import_module('ctypes')
from ctypes import c_wchar, sizeof
wchar, size = unicode_aswidechar('abcdef', 2)
self.assertEqual(size, 2)
self.assertEqual(wchar, 'ab')
wchar, size = unicode_aswidechar('abc', 3)
self.assertEqual(size, 3)
self.assertEqual(wchar, 'abc')
wchar, size = unicode_aswidechar('abc', 4)
self.assertEqual(size, 3)
self.assertEqual(wchar, 'abc\0')
wchar, size = unicode_aswidechar('abc', 10)
self.assertEqual(size, 3)
self.assertEqual(wchar, 'abc\0')
wchar, size = unicode_aswidechar('abc\0def', 20)
self.assertEqual(size, 7)
self.assertEqual(wchar, 'abc\0def\0')
nonbmp = chr(0x10ffff)
if sizeof(c_wchar) == 2:
buflen = 3
nchar = 2
else: # sizeof(c_wchar) == 4
buflen = 2
nchar = 1
wchar, size = unicode_aswidechar(nonbmp, buflen)
self.assertEqual(size, nchar)
self.assertEqual(wchar, nonbmp + '\0')
# Test PyUnicode_AsWideCharString()
@support.cpython_only
def test_aswidecharstring(self):
from _testcapi import unicode_aswidecharstring
import_helper.import_module('ctypes')
from ctypes import c_wchar, sizeof
wchar, size = unicode_aswidecharstring('abc')
self.assertEqual(size, 3)
self.assertEqual(wchar, 'abc\0')
wchar, size = unicode_aswidecharstring('abc\0def')
self.assertEqual(size, 7)
self.assertEqual(wchar, 'abc\0def\0')
nonbmp = chr(0x10ffff)
if sizeof(c_wchar) == 2:
nchar = 2
else: # sizeof(c_wchar) == 4
nchar = 1
wchar, size = unicode_aswidecharstring(nonbmp)
self.assertEqual(size, nchar)
self.assertEqual(wchar, nonbmp + '\0')
# Test PyUnicode_AsUCS4()
@support.cpython_only
def test_asucs4(self):
from _testcapi import unicode_asucs4
for s in ['abc', '\xa1\xa2', '\u4f60\u597d', 'a\U0001f600',
'a\ud800b\udfffc', '\ud834\udd1e']:
l = len(s)
self.assertEqual(unicode_asucs4(s, l, True), s+'\0')
self.assertEqual(unicode_asucs4(s, l, False), s+'\uffff')
self.assertEqual(unicode_asucs4(s, l+1, True), s+'\0\uffff')
self.assertEqual(unicode_asucs4(s, l+1, False), s+'\0\uffff')
self.assertRaises(SystemError, unicode_asucs4, s, l-1, True)
self.assertRaises(SystemError, unicode_asucs4, s, l-2, False)
s = '\0'.join([s, s])
self.assertEqual(unicode_asucs4(s, len(s), True), s+'\0')
self.assertEqual(unicode_asucs4(s, len(s), False), s+'\uffff')
# Test PyUnicode_AsUTF8()
@support.cpython_only
def test_asutf8(self):
from _testcapi import unicode_asutf8
bmp = '\u0100'
bmp2 = '\uffff'
nonbmp = chr(0x10ffff)
self.assertEqual(unicode_asutf8(bmp), b'\xc4\x80')
self.assertEqual(unicode_asutf8(bmp2), b'\xef\xbf\xbf')
self.assertEqual(unicode_asutf8(nonbmp), b'\xf4\x8f\xbf\xbf')
self.assertRaises(UnicodeEncodeError, unicode_asutf8, 'a\ud800b\udfffc')
# Test PyUnicode_AsUTF8AndSize()
@support.cpython_only
def test_asutf8andsize(self):
from _testcapi import unicode_asutf8andsize
bmp = '\u0100'
bmp2 = '\uffff'
nonbmp = chr(0x10ffff)
self.assertEqual(unicode_asutf8andsize(bmp), (b'\xc4\x80', 2))
self.assertEqual(unicode_asutf8andsize(bmp2), (b'\xef\xbf\xbf', 3))
self.assertEqual(unicode_asutf8andsize(nonbmp), (b'\xf4\x8f\xbf\xbf', 4))
self.assertRaises(UnicodeEncodeError, unicode_asutf8andsize, 'a\ud800b\udfffc')
# Test PyUnicode_FindChar()
@support.cpython_only
def test_findchar(self):
from _testcapi import unicode_findchar
for str in "\xa1", "\u8000\u8080", "\ud800\udc02", "\U0001f100\U0001f1f1":
for i, ch in enumerate(str):
self.assertEqual(unicode_findchar(str, ord(ch), 0, len(str), 1), i)
self.assertEqual(unicode_findchar(str, ord(ch), 0, len(str), -1), i)
str = "!>_<!"
self.assertEqual(unicode_findchar(str, 0x110000, 0, len(str), 1), -1)
self.assertEqual(unicode_findchar(str, 0x110000, 0, len(str), -1), -1)
# start < end
self.assertEqual(unicode_findchar(str, ord('!'), 1, len(str)+1, 1), 4)
self.assertEqual(unicode_findchar(str, ord('!'), 1, len(str)+1, -1), 4)
# start >= end
self.assertEqual(unicode_findchar(str, ord('!'), 0, 0, 1), -1)
self.assertEqual(unicode_findchar(str, ord('!'), len(str), 0, 1), -1)
# negative
self.assertEqual(unicode_findchar(str, ord('!'), -len(str), -1, 1), 0)
self.assertEqual(unicode_findchar(str, ord('!'), -len(str), -1, -1), 0)
# Test PyUnicode_CopyCharacters()
@support.cpython_only
def test_copycharacters(self):
from _testcapi import unicode_copycharacters
strings = [
'abcde', '\xa1\xa2\xa3\xa4\xa5',
'\u4f60\u597d\u4e16\u754c\uff01',
'\U0001f600\U0001f601\U0001f602\U0001f603\U0001f604'
]
for idx, from_ in enumerate(strings):
# wide -> narrow: exceed maxchar limitation
for to in strings[:idx]:
self.assertRaises(
SystemError,
unicode_copycharacters, to, 0, from_, 0, 5
)
# same kind
for from_start in range(5):
self.assertEqual(
unicode_copycharacters(from_, 0, from_, from_start, 5),
(from_[from_start:from_start+5].ljust(5, '\0'),
5-from_start)
)
for to_start in range(5):
self.assertEqual(
unicode_copycharacters(from_, to_start, from_, to_start, 5),
(from_[to_start:to_start+5].rjust(5, '\0'),
5-to_start)
)
# narrow -> wide
# Tests omitted since this creates invalid strings.
s = strings[0]
self.assertRaises(IndexError, unicode_copycharacters, s, 6, s, 0, 5)
self.assertRaises(IndexError, unicode_copycharacters, s, -1, s, 0, 5)
self.assertRaises(IndexError, unicode_copycharacters, s, 0, s, 6, 5)
self.assertRaises(IndexError, unicode_copycharacters, s, 0, s, -1, 5)
self.assertRaises(SystemError, unicode_copycharacters, s, 1, s, 0, 5)
self.assertRaises(SystemError, unicode_copycharacters, s, 0, s, 0, -1)
self.assertRaises(SystemError, unicode_copycharacters, s, 0, b'', 0, 0)
@support.cpython_only
@support.requires_legacy_unicode_capi
def test_encode_decimal(self):
from _testcapi import unicode_encodedecimal
self.assertEqual(unicode_encodedecimal('123'),
b'123')
self.assertEqual(unicode_encodedecimal('\u0663.\u0661\u0664'),
b'3.14')
self.assertEqual(unicode_encodedecimal("\N{EM SPACE}3.14\N{EN SPACE}"),
b' 3.14 ')
self.assertRaises(UnicodeEncodeError,
unicode_encodedecimal, "123\u20ac", "strict")
self.assertRaisesRegex(
ValueError,
"^'decimal' codec can't encode character",
unicode_encodedecimal, "123\u20ac", "replace")
@support.cpython_only
@support.requires_legacy_unicode_capi
def test_transform_decimal(self):
from _testcapi import unicode_transformdecimaltoascii as transform_decimal
self.assertEqual(transform_decimal('123'),
'123')
self.assertEqual(transform_decimal('\u0663.\u0661\u0664'),
'3.14')
self.assertEqual(transform_decimal("\N{EM SPACE}3.14\N{EN SPACE}"),
"\N{EM SPACE}3.14\N{EN SPACE}")
self.assertEqual(transform_decimal('123\u20ac'),
'123\u20ac')
@support.cpython_only
def test_pep393_utf8_caching_bug(self):
# Issue #25709: Problem with string concatenation and utf-8 cache
from _testcapi import getargs_s_hash
for k in 0x24, 0xa4, 0x20ac, 0x1f40d:
s = ''
for i in range(5):
# Due to CPython specific optimization the 's' string can be
# resized in-place.
s += chr(k)
# Parsing with the "s#" format code calls indirectly
# PyUnicode_AsUTF8AndSize() which creates the UTF-8
# encoded string cached in the Unicode object.
self.assertEqual(getargs_s_hash(s), chr(k).encode() * (i + 1))
# Check that the second call returns the same result
self.assertEqual(getargs_s_hash(s), chr(k).encode() * (i + 1))
class StringModuleTest(unittest.TestCase):
def test_formatter_parser(self):
def parse(format):
return list(_string.formatter_parser(format))
formatter = parse("prefix {2!s}xxx{0:^+10.3f}{obj.attr!s} {z[0]!s:10}")
self.assertEqual(formatter, [
('prefix ', '2', '', 's'),
('xxx', '0', '^+10.3f', None),
('', 'obj.attr', '', 's'),
(' ', 'z[0]', '10', 's'),
])
formatter = parse("prefix {} suffix")
self.assertEqual(formatter, [
('prefix ', '', '', None),
(' suffix', None, None, None),
])
formatter = parse("str")
self.assertEqual(formatter, [
('str', None, None, None),
])
formatter = parse("")
self.assertEqual(formatter, [])
formatter = parse("{0}")
self.assertEqual(formatter, [
('', '0', '', None),
])
self.assertRaises(TypeError, _string.formatter_parser, 1)
def test_formatter_field_name_split(self):
def split(name):
items = list(_string.formatter_field_name_split(name))
items[1] = list(items[1])
return items
self.assertEqual(split("obj"), ["obj", []])
self.assertEqual(split("obj.arg"), ["obj", [(True, 'arg')]])
self.assertEqual(split("obj[key]"), ["obj", [(False, 'key')]])
self.assertEqual(split("obj.arg[key1][key2]"), [
"obj",
[(True, 'arg'),
(False, 'key1'),
(False, 'key2'),
]])
self.assertRaises(TypeError, _string.formatter_field_name_split, 1)
if __name__ == "__main__":
unittest.main()
| 44.987124 | 120 | 0.535416 |
4f53f32265c9a0a8ce7215865c7d21fb325eb38f | 6,309 | py | Python | sdk/cosmos/azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/operations/_percentile_operations.py | casperlehmann/azure-sdk-for-python | d57163e25c82e4f53a0a11e6bd777726ce5f3d88 | [
"MIT"
] | 2 | 2019-08-23T21:14:00.000Z | 2021-09-07T18:32:34.000Z | sdk/cosmos/azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/operations/_percentile_operations.py | casperlehmann/azure-sdk-for-python | d57163e25c82e4f53a0a11e6bd777726ce5f3d88 | [
"MIT"
] | 2 | 2021-11-03T06:10:36.000Z | 2021-12-01T06:29:39.000Z | sdk/cosmos/azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/operations/_percentile_operations.py | casperlehmann/azure-sdk-for-python | d57163e25c82e4f53a0a11e6bd777726ce5f3d88 | [
"MIT"
] | 1 | 2021-05-19T02:55:10.000Z | 2021-05-19T02:55:10.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PercentileOperations(object):
"""PercentileOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.cosmosdb.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_metrics(
self,
resource_group_name, # type: str
account_name, # type: str
filter, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.PercentileMetricListResult"]
"""Retrieves the metrics determined by the given filter for the given database account. This url
is only for PBS and Replication Latency data.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param filter: An OData filter expression that describes a subset of metrics to return. The
parameters that can be filtered are name.value (name of the metric, can have an or of multiple
names), startTime, endTime, and timeGrain. The supported operator is eq.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PercentileMetricListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.cosmosdb.models.PercentileMetricListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PercentileMetricListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_metrics.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PercentileMetricListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_metrics.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/percentile/metrics'} # type: ignore
| 48.906977 | 202 | 0.659851 |
4f569b1e7332db4186fec5229af498a49397333f | 4,446 | py | Python | examples/subcommands.py | scartill/cmd2 | 1b4e1e25f84bcc800a5f369783c3c3448a42361e | [
"MIT"
] | 1 | 2021-07-06T23:59:46.000Z | 2021-07-06T23:59:46.000Z | examples/subcommands.py | scartill/cmd2 | 1b4e1e25f84bcc800a5f369783c3c3448a42361e | [
"MIT"
] | null | null | null | examples/subcommands.py | scartill/cmd2 | 1b4e1e25f84bcc800a5f369783c3c3448a42361e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding=utf-8
"""A simple example demonstrating how to use Argparse to support subcommands.
This example shows an easy way for a single command to have many subcommands, each of which takes different arguments
and provides separate contextual help.
"""
import argparse
import cmd2
sport_item_strs = ['Bat', 'Basket', 'Basketball', 'Football', 'Space Ball']
# create the top-level parser for the base command
base_parser = argparse.ArgumentParser()
base_subparsers = base_parser.add_subparsers(title='subcommands', help='subcommand help')
# create the parser for the "foo" subcommand
parser_foo = base_subparsers.add_parser('foo', help='foo help')
parser_foo.add_argument('-x', type=int, default=1, help='integer')
parser_foo.add_argument('y', type=float, help='float')
parser_foo.add_argument('input_file', type=str, help='Input File')
# create the parser for the "bar" subcommand
parser_bar = base_subparsers.add_parser('bar', help='bar help')
bar_subparsers = parser_bar.add_subparsers(title='layer3', help='help for 3rd layer of commands')
parser_bar.add_argument('z', help='string')
bar_subparsers.add_parser('apple', help='apple help')
bar_subparsers.add_parser('artichoke', help='artichoke help')
bar_subparsers.add_parser('cranberries', help='cranberries help')
# create the parser for the "sport" subcommand
parser_sport = base_subparsers.add_parser('sport', help='sport help')
sport_arg = parser_sport.add_argument('sport', help='Enter name of a sport', choices=sport_item_strs)
# create the top-level parser for the alternate command
# The alternate command doesn't provide its own help flag
base2_parser = argparse.ArgumentParser(add_help=False)
base2_subparsers = base2_parser.add_subparsers(title='subcommands', help='subcommand help')
# create the parser for the "foo" subcommand
parser_foo2 = base2_subparsers.add_parser('foo', help='foo help')
parser_foo2.add_argument('-x', type=int, default=1, help='integer')
parser_foo2.add_argument('y', type=float, help='float')
parser_foo2.add_argument('input_file', type=str, help='Input File')
# create the parser for the "bar" subcommand
parser_bar2 = base2_subparsers.add_parser('bar', help='bar help')
bar2_subparsers = parser_bar2.add_subparsers(title='layer3', help='help for 3rd layer of commands')
parser_bar2.add_argument('z', help='string')
bar2_subparsers.add_parser('apple', help='apple help')
bar2_subparsers.add_parser('artichoke', help='artichoke help')
bar2_subparsers.add_parser('cranberries', help='cranberries help')
# create the parser for the "sport" subcommand
parser_sport2 = base2_subparsers.add_parser('sport', help='sport help')
sport2_arg = parser_sport2.add_argument('sport', help='Enter name of a sport', choices=sport_item_strs)
class SubcommandsExample(cmd2.Cmd):
"""
Example cmd2 application where we a base command which has a couple subcommands
and the "sport" subcommand has tab completion enabled.
"""
def __init__(self):
super().__init__()
# subcommand functions for the base command
def base_foo(self, args):
"""foo subcommand of base command"""
self.poutput(args.x * args.y)
def base_bar(self, args):
"""bar subcommand of base command"""
self.poutput('((%s))' % args.z)
def base_sport(self, args):
"""sport subcommand of base command"""
self.poutput('Sport is {}'.format(args.sport))
# Set handler functions for the subcommands
parser_foo.set_defaults(func=base_foo)
parser_bar.set_defaults(func=base_bar)
parser_sport.set_defaults(func=base_sport)
@cmd2.with_argparser(base_parser)
def do_base(self, args):
"""Base command help"""
func = getattr(args, 'func', None)
if func is not None:
# Call whatever subcommand function was selected
func(self, args)
else:
# No subcommand was provided, so call help
self.do_help('base')
@cmd2.with_argparser(base2_parser)
def do_alternate(self, args):
"""Alternate command help"""
func = getattr(args, 'func', None)
if func is not None:
# Call whatever subcommand function was selected
func(self, args)
else:
# No subcommand was provided, so call help
self.do_help('alternate')
if __name__ == '__main__':
import sys
app = SubcommandsExample()
sys.exit(app.cmdloop())
| 37.677966 | 117 | 0.718399 |
4f571c2f825f39727588421953d914a920b6cbd1 | 2,167 | bzl | Python | src/tools/launcher/win_rules.bzl | alishaIBM/bazel | 6efd96b6e37c64c87cbc95e4c1f5f0f9544aad98 | [
"Apache-2.0"
] | 4 | 2019-06-25T08:16:52.000Z | 2022-01-12T11:35:49.000Z | src/tools/launcher/win_rules.bzl | alishaIBM/bazel | 6efd96b6e37c64c87cbc95e4c1f5f0f9544aad98 | [
"Apache-2.0"
] | 25 | 2019-05-27T17:56:38.000Z | 2020-08-21T01:45:40.000Z | src/tools/launcher/win_rules.bzl | alishaIBM/bazel | 6efd96b6e37c64c87cbc95e4c1f5f0f9544aad98 | [
"Apache-2.0"
] | 4 | 2019-11-22T23:23:41.000Z | 2022-03-01T01:51:48.000Z | """
Copyright 2017 The Bazel Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This is a quick and dirty rule to make Bazel compile itself. It
only supports Java.
"""
load("@rules_cc//cc:defs.bzl", macro_cc_bin = "cc_binary", macro_cc_lib = "cc_library", macro_cc_test = "cc_test")
def win_cc_library(srcs = [], deps = [], hdrs = [], **kwargs):
"""Replace srcs and hdrs with a dummy.cc on non-Windows platforms."""
macro_cc_lib(
srcs = select({
"//conditions:default": ["dummy.cc"],
"//src/conditions:windows": srcs,
}),
hdrs = select({
"//conditions:default": [],
"//src/conditions:windows": hdrs,
}),
deps = select({
"//conditions:default": [],
"//src/conditions:windows": deps,
}),
**kwargs
)
def win_cc_binary(srcs = [], deps = [], **kwargs):
"""Replace srcs with a dummy.cc on non-Windows platforms."""
macro_cc_bin(
srcs = select({
"//conditions:default": ["dummy.cc"],
"//src/conditions:windows": srcs,
}),
deps = select({
"//conditions:default": [],
"//src/conditions:windows": deps,
}),
**kwargs
)
def win_cc_test(srcs = [], deps = [], **kwargs):
"""Replace srcs with a dummy.cc on non-Windows platforms."""
macro_cc_test(
srcs = select({
"//conditions:default": ["dummy.cc"],
"//src/conditions:windows": srcs,
}),
deps = select({
"//conditions:default": [],
"//src/conditions:windows": deps,
}),
**kwargs
)
| 32.343284 | 114 | 0.588832 |
4f58a0548cc304be5cba3e065720c89aed0da1cf | 7,769 | py | Python | L1Trigger/Configuration/python/L1Trigger_EventContent_cff.py | AlexDroll/cmssw | ef485116d14d07f9c9e591c01b4597c1c9a967cb | [
"Apache-2.0"
] | null | null | null | L1Trigger/Configuration/python/L1Trigger_EventContent_cff.py | AlexDroll/cmssw | ef485116d14d07f9c9e591c01b4597c1c9a967cb | [
"Apache-2.0"
] | null | null | null | L1Trigger/Configuration/python/L1Trigger_EventContent_cff.py | AlexDroll/cmssw | ef485116d14d07f9c9e591c01b4597c1c9a967cb | [
"Apache-2.0"
] | null | null | null | import FWCore.ParameterSet.Config as cms
# RAW content
L1TriggerRAW = cms.PSet(
outputCommands = cms.untracked.vstring(
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*')
)
# RAWDEBUG content
L1TriggerRAWDEBUG = cms.PSet(
outputCommands = cms.untracked.vstring(
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*',
'keep L1GlobalTriggerReadoutRecord_gtDigis_*_*',
'keep *_l1GtRecord_*_*',
'keep *_l1GtTriggerMenuLite_*_*',
'keep *_conditionsInEdm_*_*',
'keep *_l1extraParticles_*_*')
)
# RECO content
L1TriggerRECO = cms.PSet(
outputCommands = cms.untracked.vstring(
'keep L1GlobalTriggerReadoutRecord_gtDigis_*_*',
'keep *_l1GtRecord_*_*',
'keep *_l1GtTriggerMenuLite_*_*',
'keep *_conditionsInEdm_*_*',
'keep *_l1extraParticles_*_*',
'keep *_l1L1GtObjectMap_*_*',
'keep L1MuGMTReadoutCollection_gtDigis_*_*',
'keep L1GctEmCand*_gctDigis_*_*',
'keep L1GctJetCand*_gctDigis_*_*',
'keep L1GctEtHad*_gctDigis_*_*',
'keep L1GctEtMiss*_gctDigis_*_*',
'keep L1GctEtTotal*_gctDigis_*_*',
'keep L1GctHtMiss*_gctDigis_*_*',
'keep L1GctJetCounts*_gctDigis_*_*',
'keep L1GctHFRingEtSums*_gctDigis_*_*',
'keep L1GctHFBitCounts*_gctDigis_*_*',
'keep LumiDetails_lumiProducer_*_*',
'keep LumiSummary_lumiProducer_*_*')
)
# AOD content
L1TriggerAOD = cms.PSet(
outputCommands = cms.untracked.vstring(
'keep L1GlobalTriggerReadoutRecord_gtDigis_*_*',
'keep *_l1GtRecord_*_*',
'keep *_l1GtTriggerMenuLite_*_*',
'keep *_conditionsInEdm_*_*',
'keep *_l1extraParticles_*_*',
'keep *_l1L1GtObjectMap_*_*',
'keep LumiSummary_lumiProducer_*_*')
)
L1TriggerFEVTDEBUG = cms.PSet(
outputCommands = cms.untracked.vstring(
'keep *_simCscTriggerPrimitiveDigis_*_*',
'keep *_simDtTriggerPrimitiveDigis_*_*',
'keep *_simRpcTriggerDigis_*_*',
'keep *_simRctDigis_*_*',
'keep *_simCsctfDigis_*_*',
'keep *_simCsctfTrackDigis_*_*',
'keep *_simDttfDigis_*_*',
'keep *_simGctDigis_*_*',
'keep *_simCaloStage1Digis_*_*',
'keep *_simCaloStage1FinalDigis_*_*',
'keep *_simCaloStage2Layer1Digis_*_*',
'keep *_simCaloStage2Digis_*_*',
'keep *_simGmtDigis_*_*',
"keep *_simBmtfDigis_*_*",
"keep *_simKBmtfDigis_*_*",
"keep *_simOmtfDigis_*_*",
"keep *_simEmtfDigis_*_*",
"keep *_simGmtStage2Digis_*_*",
'keep *_simGtDigis_*_*',
"keep *_simGtStage2Digis_*_*",
'keep *_cscTriggerPrimitiveDigis_*_*',
'keep *_dtTriggerPrimitiveDigis_*_*',
'keep *_rpcTriggerDigis_*_*',
'keep *_rctDigis_*_*',
'keep *_csctfDigis_*_*',
'keep *_csctfTrackDigis_*_*',
'keep *_dttfDigis_*_*',
'keep *_gctDigis_*_*',
'keep *_gmtDigis_*_*',
'keep *_gtDigis_*_*',
'keep *_gtEvmDigis_*_*',
'keep *_l1GtRecord_*_*',
'keep *_l1GtTriggerMenuLite_*_*',
'keep *_conditionsInEdm_*_*',
'keep *_l1extraParticles_*_*',
'keep *_l1L1GtObjectMap_*_*',
'keep LumiDetails_lumiProducer_*_*',
'keep LumiSummary_lumiProducer_*_*')
)
def _appendStage2Digis(obj):
l1Stage2Digis = [
'keep *_gtStage2Digis_*_*',
'keep *_gmtStage2Digis_*_*',
'keep *_caloStage2Digis_*_*',
]
obj.outputCommands += l1Stage2Digis
# adding them to all places where we had l1extraParticles
from Configuration.Eras.Modifier_stage2L1Trigger_cff import stage2L1Trigger
stage2L1Trigger.toModify(L1TriggerRAWDEBUG, func=_appendStage2Digis)
stage2L1Trigger.toModify(L1TriggerRECO, func=_appendStage2Digis)
stage2L1Trigger.toModify(L1TriggerAOD, func=_appendStage2Digis)
stage2L1Trigger.toModify(L1TriggerFEVTDEBUG, func=_appendStage2Digis)
# adding HGCal L1 trigger digis
def _appendHGCalDigis(obj):
l1HGCalDigis = [
'keep l1tHGCalTriggerCellBXVector_hgcalVFEProducer_*_*',
'keep l1tHGCalTriggerCellBXVector_hgcalConcentratorProducer_*_*',
'keep l1tHGCalTowerBXVector_hgcalTowerProducer_*_*',
'keep l1tHGCalClusterBXVector_hgcalBackEndLayer1Producer_*_*',
'keep l1tHGCalMulticlusterBXVector_hgcalBackEndLayer2Producer_*_*'
]
obj.outputCommands += l1HGCalDigis
from Configuration.Eras.Modifier_phase2_hgcal_cff import phase2_hgcal
phase2_hgcal.toModify(L1TriggerFEVTDEBUG, func=_appendHGCalDigis)
# adding GEM trigger primitives
def _appendGEMDigis(obj):
l1GEMDigis = [
'keep *_simMuonGEMPadDigis_*_*',
'keep *_simMuonGEMPadDigiClusters_*_*',
]
obj.outputCommands += l1GEMDigis
from Configuration.Eras.Modifier_run3_GEM_cff import run3_GEM
run3_GEM.toModify(L1TriggerFEVTDEBUG, func=_appendGEMDigis)
# adding ME0 trigger primitives
def _appendME0Digis(obj):
l1ME0Digis = [
'keep *_simMuonME0PadDigis__*',
'keep *_me0TriggerDigis__*',
'keep *_simMuonME0PseudoReDigisCoarse__*',
'keep *_me0RecHitsCoarse__*',
'keep *_me0TriggerPseudoDigis__*',
'keep *_me0TriggerConvertedPseudoDigis__*',
]
obj.outputCommands += l1ME0Digis
from Configuration.Eras.Modifier_phase2_muon_cff import phase2_muon
phase2_muon.toModify(L1TriggerFEVTDEBUG, func=_appendME0Digis)
# adding phase2 trigger
def _appendPhase2Digis(obj):
l1Phase2Digis = [
"keep *_simKBmtfDigis_*_*",
'keep *_hgcalVFEProducerhgcalConcentratorProducer_*_*',
'keep *_hgcalBackEndLayer1Producer_*_*',
'keep *_hgcalBackEndLayer2Producer_*_*',
'keep *_hgcalTowerMapProducer_*_*',
'keep *_hgcalTowerProducer_*_*',
'keep *_L1EGammaClusterEmuProducer_*_*',
'keep *_l1EGammaEEProducer_*_*',
'keep *_L1TkPrimaryVertex_*_*',
'keep *_L1TkElectronsCrystal_*_*',
'keep *_L1TkElectronsLooseCrystal_*_*',
'keep *_L1TkElectronsEllipticMatchCrystal_*_*',
'keep *_L1TkIsoElectronsCrystal_*_*',
'keep *_L1TkPhotonsCrystal_*_*',
'keep *_L1TkElectronsHGC_*_*',
'keep *_L1TkElectronsEllipticMatchHGC_*_*',
'keep *_L1TkIsoElectronsHGC_*_*',
'keep *_L1TkPhotonsHGC_*_*',
'keep *_L1TkMuons_*_*',
'keep *_pfClustersFromL1EGClusters_*_*',
'keep *_pfClustersFromCombinedCaloHCal_*_*',
'keep *_pfClustersFromCombinedCaloHF_*_*',
'keep *_pfClustersFromHGC3DClusters_*_*',
'keep *_pfTracksFromL1TracksBarrel_*_*',
'keep *_l1pfProducerBarrel_*_*',
'keep *_pfTracksFromL1TracksHGCal_*_*',
'keep *_l1pfProducerHGCal_*_*',
'keep *_l1pfProducerHGCalNoTK_*_*',
'keep *_l1pfProducerHF_*_*',
'keep *_l1pfCandidates_*_*',
'keep *_ak4PFL1Calo_*_*',
'keep *_ak4PFL1PF_*_*',
'keep *_ak4PFL1Puppi_*_*',
'keep *_ak4PFL1CaloCorrected_*_*',
'keep *_ak4PFL1PFCorrected_*_*',
'keep *_ak4PFL1PuppiCorrected_*_*',
'keep *_Phase1L1TJetProducer_*_*',
'keep *_Phase1L1TJetCalibrator_*_*',
'keep *_l1PFMetCalo_*_*',
'keep *_l1PFMetPF_*_*',
'keep *_l1PFMetPuppi_*_*',
'keep *_TTStubsFromPhase2TrackerDigis_*_*',
'keep *_TTClustersFromPhase2TrackerDigis_*_*',
'keep *_TTTracksFromExtendedTrackletEmulation_*_*',
'keep *_TTTracksFromTrackletEmulation_*_*',
]
obj.outputCommands += l1Phase2Digis
from Configuration.Eras.Modifier_phase2_trigger_cff import phase2_trigger
phase2_muon.toModify(L1TriggerFEVTDEBUG, func=_appendPhase2Digis)
| 36.819905 | 75 | 0.673703 |
4f5914318908896792c3cab08c7e174f1a1e5c08 | 8,492 | py | Python | ambari-agent/src/main/python/ambari_agent/alerts/ams_alert.py | MacgradyHuang/ApacheAmbari | 961ce825b9e2681bf21819147b0ee72438e0b04a | [
"Apache-2.0"
] | 25 | 2019-12-04T03:09:55.000Z | 2022-03-08T10:52:06.000Z | ambari-agent/src/main/python/ambari_agent/alerts/ams_alert.py | MacgradyHuang/ApacheAmbari | 961ce825b9e2681bf21819147b0ee72438e0b04a | [
"Apache-2.0"
] | 29 | 2019-12-04T03:00:39.000Z | 2022-03-02T06:25:44.000Z | ambari-agent/src/main/python/ambari_agent/alerts/ams_alert.py | MacgradyHuang/ApacheAmbari | 961ce825b9e2681bf21819147b0ee72438e0b04a | [
"Apache-2.0"
] | 33 | 2019-12-04T02:51:30.000Z | 2022-03-24T02:47:38.000Z | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import httplib
import imp
import time
import urllib
from alerts.metric_alert import MetricAlert
import ambari_simplejson as json
import logging
import re
import uuid
from resource_management.libraries.functions.get_port_from_url import get_port_from_url
from ambari_commons import inet_utils
logger = logging.getLogger(__name__)
AMS_METRICS_GET_URL = "/ws/v1/timeline/metrics?%s"
class AmsAlert(MetricAlert):
"""
Allow alerts to fire based on an AMS metrics.
Alert is triggered if the aggregated function of the specified metric has
grown beyond the specified threshold within a given time interval.
"""
def __init__(self, alert_meta, alert_source_meta, config):
super(AmsAlert, self).__init__(alert_meta, alert_source_meta, config)
self.metric_info = None
if 'ams' in alert_source_meta:
self.metric_info = AmsMetric(alert_source_meta['ams'])
def _collect(self):
"""
Low level function to collect alert data. The result is a tuple as:
res[0] = the result code
res[1] = the list of arguments supplied to the reporting text for the result code
"""
if self.metric_info is None:
raise Exception("Could not determine result. Specific metric collector is not defined.")
if self.uri_property_keys is None:
raise Exception("Could not determine result. URL(s) were not defined.")
# use the URI lookup keys to get a final URI value to query
alert_uri = self._get_uri_from_structure(self.uri_property_keys)
if logger.isEnabledFor(logging.DEBUG):
logger.debug("[Alert][{0}] Calculated metric URI to be {1} (ssl={2})".format(
self.get_name(), alert_uri.uri, str(alert_uri.is_ssl_enabled)))
host = inet_utils.get_host_from_url(alert_uri.uri)
if host is None:
host = self.host_name
try:
port = int(get_port_from_url(alert_uri.uri))
except:
port = 6188
collect_result = None
value_list = []
if isinstance(self.metric_info, AmsMetric):
raw_data_points, http_code = self._load_metric(alert_uri.is_ssl_enabled, host, port, self.metric_info)
if not raw_data_points and http_code not in [200, 307]:
collect_result = self.RESULT_UNKNOWN
value_list.append('HTTP {0} response (metrics unavailable)'.format(str(http_code)))
elif not raw_data_points and http_code in [200, 307]:
raise Exception("[Alert][{0}] Unable to extract JSON from HTTP response".format(self.get_name()))
else:
data_points = self.metric_info.calculate_value(raw_data_points)
compute_result = self.metric_info.calculate_compute(data_points)
value_list.append(compute_result)
collect_result = self._get_result(value_list[0] if compute_result is None else compute_result)
if logger.isEnabledFor(logging.DEBUG):
logger.debug("[Alert][{0}] Computed result = {1}".format(self.get_name(), str(value_list)))
return (collect_result, value_list)
def _load_metric(self, ssl, host, port, ams_metric):
""" creates a AmsMetric object that holds info about ams-based metrics """
if "0.0.0.0" in str(host):
host = self.host_name
current_time = int(time.time()) * 1000
interval = ams_metric.interval
get_metrics_parameters = {
"metricNames": ",".join(ams_metric.metric_list),
"appId": ams_metric.app_id,
"hostname": self.host_name,
"startTime": current_time - 60 * 1000 * interval,
"endTime": current_time,
"precision": "seconds",
"grouped": "true",
}
encoded_get_metrics_parameters = urllib.urlencode(get_metrics_parameters)
url = AMS_METRICS_GET_URL % encoded_get_metrics_parameters
try:
# TODO Implement HTTPS support
conn = httplib.HTTPConnection(host, port,
timeout=self.connection_timeout)
conn.request("GET", url)
response = conn.getresponse()
data = response.read()
except Exception, exception:
if logger.isEnabledFor(logging.DEBUG):
logger.exception("[Alert][{0}] Unable to retrieve metrics from AMS: {1}".format(self.get_name(), str(exception)))
status = response.status if 'response' in vars() else None
return (None, status)
finally:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("""
AMS request parameters - {0}
AMS response - {1}
""".format(encoded_get_metrics_parameters, data))
# explicitely close the connection as we've seen python hold onto these
if conn is not None:
try:
conn.close()
except:
logger.debug("[Alert][{0}] Unable to close URL connection to {1}".format(self.get_name(), url))
json_is_valid = True
try:
data_json = json.loads(data)
except Exception, exception:
json_is_valid = False
if logger.isEnabledFor(logging.DEBUG):
logger.exception("[Alert][{0}] Convert response to json failed or json doesn't contain needed data: {1}".
format(self.get_name(), str(exception)))
metrics = []
if json_is_valid:
metric_dict = {}
for metrics_data in data_json["metrics"]:
metric_dict[metrics_data["metricname"]] = metrics_data["metrics"]
for metric_name in self.metric_info.metric_list:
if metric_name in metric_dict:
# TODO sorted data points by timestamp
# OrderedDict was implemented in Python2.7
sorted_data_points = metric_dict[metric_name]
metrics.append(sorted_data_points)
pass
return (metrics, response.status)
class AmsMetric:
DYNAMIC_CODE_VALUE_TEMPLATE = """
# ensure that division yields a float, use // for integer division
from __future__ import division
def f(args):
l = []
for k in args[0]:
try:
data_point = {0}
l.append(data_point)
except:
continue
return l
"""
DYNAMIC_CODE_COMPUTE_TEMPLATE = """
# ensure that division yields a float, use // for integer division
from __future__ import division
from ambari_commons.aggregate_functions import sample_standard_deviation_percentage
from ambari_commons.aggregate_functions import sample_standard_deviation
from ambari_commons.aggregate_functions import mean
from ambari_commons.aggregate_functions import count
def f(args):
func = {0}
return func(args)
"""
def __init__(self, metric_info):
self.custom_value_module = None
self.custom_compute_module = None
self.metric_list = metric_info['metric_list']
self.interval = metric_info['interval'] # in minutes
self.app_id = metric_info['app_id']
self.minimum_value = metric_info['minimum_value']
if 'value' in metric_info:
realcode = re.sub('(\{(\d+)\})', 'args[\g<2>][k]', metric_info['value'])
self.custom_value_module = imp.new_module(str(uuid.uuid4()))
code = self.DYNAMIC_CODE_VALUE_TEMPLATE.format(realcode)
exec code in self.custom_value_module.__dict__
if 'compute' in metric_info:
realcode = metric_info['compute']
self.custom_compute_module = imp.new_module(str(uuid.uuid4()))
code = self.DYNAMIC_CODE_COMPUTE_TEMPLATE.format(realcode)
exec code in self.custom_compute_module.__dict__
def calculate_value(self, args):
data_points = None
if self.custom_value_module is not None:
data_points = self.custom_value_module.f(args)
if self.minimum_value:
data_points = [data_point for data_point in data_points if data_point > self.minimum_value]
return data_points
def calculate_compute(self, args):
compute_result = None
if self.custom_compute_module is not None:
compute_result = self.custom_compute_module.f(args)
return compute_result
| 35.236515 | 121 | 0.707018 |
4f55dac6d66c6c399cfe64811bcbcf6367495ab3 | 4,625 | py | Python | apps/admin/views/dashboard.py | tuanquanghpvn/flask-intro | 4dbc6bfbbdee13bc601b7ba8f10ede3635a2cfaf | [
"MIT"
] | null | null | null | apps/admin/views/dashboard.py | tuanquanghpvn/flask-intro | 4dbc6bfbbdee13bc601b7ba8f10ede3635a2cfaf | [
"MIT"
] | null | null | null | apps/admin/views/dashboard.py | tuanquanghpvn/flask-intro | 4dbc6bfbbdee13bc601b7ba8f10ede3635a2cfaf | [
"MIT"
] | null | null | null | from flask import render_template, redirect, url_for, session, request
from flask.ext.classy import FlaskView
from flask.ext.login import logout_user
from flask_wtf import Form
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired, EqualTo
from apps import db
from apps.admin import admin_blueprint
from apps.core.views import AdminRequireMixin
from apps.users.models import User
class LoginForm(Form):
username = StringField('username', validators=[DataRequired()])
password = PasswordField('password', validators=[DataRequired()])
def __init__(self, *args, **kwargs):
Form.__init__(self, *args, **kwargs)
self.user = None
def validate(self):
rv = Form.validate(self)
if not rv:
return False
if not self.username.data or not self.password.data:
return False
user = User.query.filter_by(username=self.username.data).first()
if user is None:
self.username.errors.append('Unknown username')
return False
if not user.check_password(self.password.data):
self.password.errors.append('Invalid password')
return False
self.user = user
return True
class LoginView(FlaskView):
route_base = '/login'
def get_context_data(self):
context = {
'info': {
'title': 'Administrator'
},
'form': LoginForm()
}
return context
def index(self):
return render_template('/admin/login.html', **self.get_context_data())
def post(self):
form = LoginForm()
if form.validate_on_submit():
session['user_id'] = form.user.id
return redirect(url_for('admin.DashboardView:index'))
else:
context = self.get_context_data()
context['form'] = form
return render_template('/admin/login.html', **context)
LoginView.register(admin_blueprint)
class LogoutView(AdminRequireMixin):
route_base = '/logout'
def index(self):
logout_user()
session.clear()
return redirect(url_for('admin.LoginView:index'))
LogoutView.register(admin_blueprint)
class DashboardView(AdminRequireMixin):
"""
Dashboard View
"""
route_base = '/'
def get_context_data(self):
context = {
'info': {
'title': 'Dashboard',
'sidebar': ['dashboard']
},
}
return context
def index(self):
return render_template('/admin/dashboard.html', **self.get_context_data())
DashboardView.register(admin_blueprint)
class ProfileForm(Form):
"""
Profile Form: Change password form
"""
old_password = PasswordField('old_password', validators=[DataRequired()])
password1 = PasswordField('password1', validators=[DataRequired()])
password2 = PasswordField('password2', validators=[DataRequired(),
EqualTo('password1', message='Re password not equal password!')])
def __init__(self, *args, **kwargs):
Form.__init__(self, *args, **kwargs)
self.user = None
def validate(self):
rv = Form.validate(self)
if not rv:
return False
user = User.query.filter_by(id=session['user_id']).first()
if not user.check_password(self.old_password.data):
self.old_password.errors.append('Invalid old password!')
return False
self.user = user
return True
class ProfileView(AdminRequireMixin):
"""
Profile View: Change password
"""
route_base = '/profile'
def get_context_data(self):
context = {
'info': {
'title': 'Change Profile',
'sidebar': ['dashboard']
},
'form': ProfileForm(),
}
return context
def index(self):
return render_template('/admin/profile.html', **self.get_context_data())
def post(self):
form = ProfileForm()
if form.validate_on_submit():
user = form.user
user.set_password(form.password1.data)
db.session.commit()
# Logout and redirect to login page
logout_user()
session.clear()
return redirect(url_for('admin.LoginView:index'))
else:
context = self.get_context_data()
context['form'] = form
return render_template('/admin/profile.html', **context)
ProfileView.register(admin_blueprint) | 28.549383 | 120 | 0.602811 |
4f585a6ea029a5754afeeaf0f0231520a0090e57 | 4,652 | py | Python | pandas/tests/series/methods/test_update.py | vicentegnz/pandas | 1c1cce74ddbd53c203a3fa3abc11455a383aa93c | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2021-05-07T04:58:36.000Z | 2021-05-07T04:58:59.000Z | pandas/tests/series/methods/test_update.py | muyuguo/pandas | 735f40cb2b820e949e7a0540fc86dba2453a2dbe | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2021-05-11T00:05:40.000Z | 2021-05-11T00:05:40.000Z | pandas/tests/series/methods/test_update.py | muyuguo/pandas | 735f40cb2b820e949e7a0540fc86dba2453a2dbe | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2021-06-16T07:19:12.000Z | 2021-12-16T10:24:44.000Z | import numpy as np
import pytest
from pandas import (
CategoricalDtype,
DataFrame,
NaT,
Series,
Timestamp,
)
import pandas._testing as tm
class TestUpdate:
def test_update(self):
s = Series([1.5, np.nan, 3.0, 4.0, np.nan])
s2 = Series([np.nan, 3.5, np.nan, 5.0])
s.update(s2)
expected = Series([1.5, 3.5, 3.0, 5.0, np.nan])
tm.assert_series_equal(s, expected)
# GH 3217
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df["c"] = np.nan
df["c"].update(Series(["foo"], index=[0]))
expected = DataFrame(
[[1, np.nan, "foo"], [3, 2.0, np.nan]], columns=["a", "b", "c"]
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"other, dtype, expected",
[
# other is int
([61, 63], "int32", Series([10, 61, 12], dtype="int32")),
([61, 63], "int64", Series([10, 61, 12])),
([61, 63], float, Series([10.0, 61.0, 12.0])),
([61, 63], object, Series([10, 61, 12], dtype=object)),
# other is float, but can be cast to int
([61.0, 63.0], "int32", Series([10, 61, 12], dtype="int32")),
([61.0, 63.0], "int64", Series([10, 61, 12])),
([61.0, 63.0], float, Series([10.0, 61.0, 12.0])),
([61.0, 63.0], object, Series([10, 61.0, 12], dtype=object)),
# others is float, cannot be cast to int
([61.1, 63.1], "int32", Series([10.0, 61.1, 12.0])),
([61.1, 63.1], "int64", Series([10.0, 61.1, 12.0])),
([61.1, 63.1], float, Series([10.0, 61.1, 12.0])),
([61.1, 63.1], object, Series([10, 61.1, 12], dtype=object)),
# other is object, cannot be cast
([(61,), (63,)], "int32", Series([10, (61,), 12])),
([(61,), (63,)], "int64", Series([10, (61,), 12])),
([(61,), (63,)], float, Series([10.0, (61,), 12.0])),
([(61,), (63,)], object, Series([10, (61,), 12])),
],
)
def test_update_dtypes(self, other, dtype, expected):
ser = Series([10, 11, 12], dtype=dtype)
other = Series(other, index=[1, 3])
ser.update(other)
tm.assert_series_equal(ser, expected)
@pytest.mark.parametrize(
"series, other, expected",
[
# update by key
(
Series({"a": 1, "b": 2, "c": 3, "d": 4}),
{"b": 5, "c": np.nan},
Series({"a": 1, "b": 5, "c": 3, "d": 4}),
),
# update by position
(Series([1, 2, 3, 4]), [np.nan, 5, 1], Series([1, 5, 1, 4])),
],
)
def test_update_from_non_series(self, series, other, expected):
# GH 33215
series.update(other)
tm.assert_series_equal(series, expected)
@pytest.mark.parametrize(
"result, target, expected",
[
(
Series(["a", None], dtype="string"),
Series([None, "b"], dtype="string"),
Series(["a", "b"], dtype="string"),
),
(
Series([1, None], dtype="Int64"),
Series([None, 2], dtype="Int64"),
Series([1, 2], dtype="Int64"),
),
(
Series([True, None], dtype="boolean"),
Series([None, False], dtype="boolean"),
Series([True, False], dtype="boolean"),
),
(
Series(["a", None], dtype=CategoricalDtype(categories=["a", "b"])),
Series([None, "b"], dtype=CategoricalDtype(categories=["a", "b"])),
Series(["a", "b"], dtype=CategoricalDtype(categories=["a", "b"])),
),
(
Series([Timestamp(year=2020, month=1, day=1, tz="Europe/London"), NaT]),
Series([NaT, Timestamp(year=2020, month=1, day=1, tz="Europe/London")]),
Series([Timestamp(year=2020, month=1, day=1, tz="Europe/London")] * 2),
),
],
)
def test_update_extension_array_series(self, result, target, expected):
result.update(target)
tm.assert_series_equal(result, expected)
def test_update_with_categorical_type(self):
# GH 25744
dtype = CategoricalDtype(["a", "b", "c", "d"])
s1 = Series(["a", "b", "c"], index=[1, 2, 3], dtype=dtype)
s2 = Series(["b", "a"], index=[1, 2], dtype=dtype)
s1.update(s2)
result = s1
expected = Series(["b", "a", "c"], index=[1, 2, 3], dtype=dtype)
tm.assert_series_equal(result, expected)
| 36.629921 | 88 | 0.467326 |
4f586dc00bd0e4ff5256695e0bba06949298155e | 12,393 | py | Python | tests/core/integrations/labelstudio/test_labelstudio.py | hoverinc/lightning-flash | d68dfc104ad114a6e4cfd4a1e1f823a5c18a580d | [
"Apache-2.0"
] | null | null | null | tests/core/integrations/labelstudio/test_labelstudio.py | hoverinc/lightning-flash | d68dfc104ad114a6e4cfd4a1e1f823a5c18a580d | [
"Apache-2.0"
] | null | null | null | tests/core/integrations/labelstudio/test_labelstudio.py | hoverinc/lightning-flash | d68dfc104ad114a6e4cfd4a1e1f823a5c18a580d | [
"Apache-2.0"
] | null | null | null | import pytest
from flash.core.data.data_source import DefaultDataSources
from flash.core.data.utils import download_data
from flash.core.integrations.labelstudio.data_source import (
LabelStudioDataSource,
LabelStudioImageClassificationDataSource,
LabelStudioTextClassificationDataSource,
)
from flash.core.integrations.labelstudio.visualizer import launch_app
from flash.image.classification.data import ImageClassificationData
from flash.text.classification.data import TextClassificationData
from flash.video.classification.data import VideoClassificationData, VideoClassificationPreprocess
from tests.helpers.utils import _IMAGE_TESTING, _TEXT_TESTING, _VIDEO_TESTING
def test_utility_load():
"""Test for label studio json loader."""
data = [
{
"id": 191,
"annotations": [
{
"id": 130,
"completed_by": {"id": 1, "email": "test@heartex.com", "first_name": "", "last_name": ""},
"result": [
{
"id": "dv1Tn-zdez",
"type": "rectanglelabels",
"value": {
"x": 46.5625,
"y": 21.666666666666668,
"width": 8.75,
"height": 12.083333333333334,
"rotation": 0,
"rectanglelabels": ["Car"],
},
"to_name": "image",
"from_name": "label",
"image_rotation": 0,
"original_width": 320,
"original_height": 240,
},
{
"id": "KRa8jEvpK0",
"type": "rectanglelabels",
"value": {
"x": 66.875,
"y": 22.5,
"width": 14.0625,
"height": 17.5,
"rotation": 0,
"rectanglelabels": ["Car"],
},
"to_name": "image",
"from_name": "label",
"image_rotation": 0,
"original_width": 320,
"original_height": 240,
},
{
"id": "kAKaSxNnvH",
"type": "rectanglelabels",
"value": {
"x": 93.4375,
"y": 22.916666666666668,
"width": 6.5625,
"height": 18.75,
"rotation": 0,
"rectanglelabels": ["Car"],
},
"to_name": "image",
"from_name": "label",
"image_rotation": 0,
"original_width": 320,
"original_height": 240,
},
{
"id": "_VXKV2nz14",
"type": "rectanglelabels",
"value": {
"x": 0,
"y": 39.583333333333336,
"width": 100,
"height": 60.416666666666664,
"rotation": 0,
"rectanglelabels": ["Road"],
},
"to_name": "image",
"from_name": "label",
"image_rotation": 0,
"original_width": 320,
"original_height": 240,
},
{
"id": "vCuvi_jLHn",
"type": "rectanglelabels",
"value": {
"x": 0,
"y": 17.5,
"width": 48.125,
"height": 41.66666666666666,
"rotation": 0,
"rectanglelabels": ["Obstacle"],
},
"to_name": "image",
"from_name": "label",
"image_rotation": 0,
"original_width": 320,
"original_height": 240,
},
],
"was_cancelled": False,
"ground_truth": False,
"prediction": {},
"result_count": 0,
"task": 191,
}
],
"file_upload": "Highway20030201_1002591.jpg",
"data": {"image": "/data/upload/Highway20030201_1002591.jpg"},
"meta": {},
"created_at": "2021-05-12T18:43:41.241095Z",
"updated_at": "2021-05-12T19:42:28.156609Z",
"project": 7,
}
]
ds = LabelStudioDataSource._load_json_data(data=data, data_folder=".", multi_label=False)
assert ds[3] == {"image"}
assert ds[2] == {"Road", "Car", "Obstacle"}
assert len(ds[1]) == 0
assert len(ds[0]) == 5
ds_multi = LabelStudioDataSource._load_json_data(data=data, data_folder=".", multi_label=True)
assert ds_multi[3] == {"image"}
assert ds_multi[2] == {"Road", "Car", "Obstacle"}
assert len(ds_multi[1]) == 0
assert len(ds_multi[0]) == 5
def test_datasource_labelstudio():
"""Test creation of LabelStudioDataSource."""
download_data("https://label-studio-testdata.s3.us-east-2.amazonaws.com/lightning-flash/data.zip")
ds = LabelStudioDataSource()
data = {
"data_folder": "data/upload/",
"export_json": "data/project.json",
"split": 0.2,
"multi_label": False,
}
train, val, test, predict = ds.to_datasets(train_data=data)
train_sample = train[0]
val_sample = val[0]
assert train_sample
assert val_sample
assert test
assert not predict
ds_no_split = LabelStudioDataSource()
data = {
"data_folder": "data/upload/",
"export_json": "data/project.json",
"multi_label": True,
}
train, val, test, predict = ds_no_split.to_datasets(train_data=data)
sample = train[0]
assert sample
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_datasource_labelstudio_image():
"""Test creation of LabelStudioImageClassificationDataSource from images."""
download_data("https://label-studio-testdata.s3.us-east-2.amazonaws.com/lightning-flash/data_nofile.zip")
data = {
"data_folder": "data/upload/",
"export_json": "data/project_nofile.json",
"split": 0.2,
"multi_label": True,
}
ds = LabelStudioImageClassificationDataSource()
train, val, test, predict = ds.to_datasets(train_data=data, val_data=data, test_data=data, predict_data=data)
train_sample = train[0]
val_sample = val[0]
test_sample = test[0]
predict_sample = predict[0]
assert train_sample
assert val_sample
assert test_sample
assert predict_sample
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_datamodule_labelstudio_image():
"""Test creation of LabelStudioImageClassificationDataSource and Datamodule from images."""
download_data("https://label-studio-testdata.s3.us-east-2.amazonaws.com/lightning-flash/data.zip")
datamodule = ImageClassificationData.from_labelstudio(
train_export_json="data/project.json",
train_data_folder="data/upload/",
test_export_json="data/project.json",
test_data_folder="data/upload/",
val_split=0.5,
)
assert datamodule
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_label_studio_predictions_visualization():
"""Test creation of LabelStudioImageClassificationDataSource and Datamodule from images."""
download_data("https://label-studio-testdata.s3.us-east-2.amazonaws.com/lightning-flash/data.zip")
datamodule = ImageClassificationData.from_labelstudio(
train_export_json="data/project.json",
train_data_folder="data/upload/",
test_export_json="data/project.json",
test_data_folder="data/upload/",
val_split=0.5,
)
assert datamodule
app = launch_app(datamodule)
predictions = [0, 1, 1, 0]
vis_predictions = app.show_predictions(predictions)
assert len(vis_predictions) == 4
assert vis_predictions[0]["result"][0]["id"] != vis_predictions[3]["result"][0]["id"]
assert vis_predictions[1]["result"][0]["id"] != vis_predictions[2]["result"][0]["id"]
tasks_predictions = app.show_tasks(predictions)
assert len(tasks_predictions) == 4
tasks_predictions_json = app.show_tasks(predictions, export_json="data/project.json")
assert tasks_predictions_json
@pytest.mark.skipif(not _TEXT_TESTING, reason="text libraries aren't installed.")
def test_datasource_labelstudio_text():
"""Test creation of LabelStudioTextClassificationDataSource and Datamodule from text."""
download_data("https://label-studio-testdata.s3.us-east-2.amazonaws.com/lightning-flash/text_data.zip", "./data/")
backbone = "prajjwal1/bert-medium"
data = {
"data_folder": "data/upload/",
"export_json": "data/project.json",
"split": 0.2,
"multi_label": False,
}
ds = LabelStudioTextClassificationDataSource(backbone=backbone)
train, val, test, predict = ds.to_datasets(train_data=data, test_data=data)
train_sample = train[0]
test_sample = test[0]
val_sample = val[0]
assert train_sample
assert test_sample
assert val_sample
assert not predict
@pytest.mark.skipif(not _TEXT_TESTING, reason="text libraries aren't installed.")
def test_datamodule_labelstudio_text():
"""Test creation of LabelStudioTextClassificationDataSource and Datamodule from text."""
download_data("https://label-studio-testdata.s3.us-east-2.amazonaws.com/lightning-flash/text_data.zip", "./data/")
backbone = "prajjwal1/bert-medium"
datamodule = TextClassificationData.from_labelstudio(
train_export_json="data/project.json",
val_export_json="data/project.json",
test_export_json="data/project.json",
predict_export_json="data/project.json",
data_folder="data/upload/",
val_split=0.8,
backbone=backbone,
)
assert datamodule
@pytest.mark.skipif(not _VIDEO_TESTING, reason="PyTorchVideo isn't installed.")
def test_datasource_labelstudio_video():
"""Test creation of LabelStudioVideoClassificationDataSource from video."""
download_data("https://label-studio-testdata.s3.us-east-2.amazonaws.com/lightning-flash/video_data.zip")
data = {"data_folder": "data/upload/", "export_json": "data/project.json", "multi_label": True}
preprocess = VideoClassificationPreprocess()
ds = preprocess.data_source_of_name(DefaultDataSources.LABELSTUDIO)
train, val, test, predict = ds.to_datasets(train_data=data, test_data=data)
sample_iter = iter(train)
sample = next(sample_iter)
assert train
assert not val
assert test
assert not predict
assert sample
@pytest.mark.skipif(not _VIDEO_TESTING, reason="PyTorchVideo isn't installed.")
def test_datamodule_labelstudio_video():
"""Test creation of Datamodule from video."""
download_data("https://label-studio-testdata.s3.us-east-2.amazonaws.com/lightning-flash/video_data.zip")
datamodule = VideoClassificationData.from_labelstudio(
export_json="data/project.json",
data_folder="data/upload/",
val_split=0.2,
clip_sampler="uniform",
clip_duration=1,
decode_audio=False,
)
assert datamodule
| 41.31 | 118 | 0.542403 |
4f5990b903123c0da0986852c442e25357403271 | 2,997 | py | Python | huaweisms/api/dialup.py | growield/huawei-modem-py-api-client-through-proxy | 8cae130f43871938392292e527b79bb15f314fd3 | [
"MIT"
] | 1 | 2021-03-05T12:53:16.000Z | 2021-03-05T12:53:16.000Z | huaweisms/api/dialup.py | growield/huawei-modem-py-api-client-through-proxy | 8cae130f43871938392292e527b79bb15f314fd3 | [
"MIT"
] | null | null | null | huaweisms/api/dialup.py | growield/huawei-modem-py-api-client-through-proxy | 8cae130f43871938392292e527b79bb15f314fd3 | [
"MIT"
] | null | null | null | import huaweisms.api.common
XML_TEMPLATE = (
'<?xml version="1.0" encoding="UTF-8"?>'
'<request>'
'<dataswitch>{enable}</dataswitch>'
'</request>'
)
def connect_mobile(ctx: huaweisms.api.common.ApiCtx, proxy=None):
return switch_mobile_on(ctx, proxy=proxy)
def disconnect_mobile(ctx: huaweisms.api.common.ApiCtx, proxy=None):
return switch_mobile_off(ctx, proxy=proxy)
def get_mobile_status(ctx: huaweisms.api.common.ApiCtx, proxy=None):
url = "{}/dialup/mobile-dataswitch".format(ctx.api_base_url)
result = huaweisms.api.common.get_from_url(url, ctx, proxy=proxy)
if result and result.get('type') == 'response':
response = result['response']
if response and response.get('dataswitch') == '1':
return 'CONNECTED'
if response and response.get('dataswitch') == '0':
return 'DISCONNECTED'
return 'UNKNOWN'
def switch_mobile_off(ctx: huaweisms.api.common.ApiCtx, proxy=None):
data = XML_TEMPLATE.format(enable=0)
headers = {
'__RequestVerificationToken': ctx.token,
}
url = "{}/dialup/mobile-dataswitch".format(ctx.api_base_url)
return huaweisms.api.common.post_to_url(url, data, ctx, additional_headers=headers, proxy=proxy)
def switch_mobile_on(ctx: huaweisms.api.common.ApiCtx, proxy=None):
data = XML_TEMPLATE.format(enable=1)
headers = {
'__RequestVerificationToken': ctx.token,
}
url = "{}/dialup/mobile-dataswitch".format(ctx.api_base_url)
return huaweisms.api.common.post_to_url(url, data, ctx, additional_headers=headers, proxy=proxy)
def switch_net_mode(ctx: huaweisms.api.common.ApiCtx, net_mode='lte_umts', proxy=None):
xml_template = (
'<?xml version="1.0" encoding="UTF-8"?>'
'<request>'
'<NetworkMode>{mode}</NetworkMode>'
'<NetworkBand>3FFFFFFF</NetworkBand>'
'<LTEBand>7FFFFFFFFFFFFFFF</LTEBand>'
'</request>'
)
if net_mode == 'lte':
data = xml_template.format(mode='03')
elif net_mode == 'umts':
data = xml_template.format(mode='02')
elif net_mode == 'lte_umts':
data = xml_template.format(mode='0302')
else:
data = xml_template.format(mode='0302')
headers = {
'__RequestVerificationToken': ctx.token,
}
url = "{}/net/net-mode".format(ctx.api_base_url)
return huaweisms.api.common.post_to_url(url, data, ctx, additional_headers=headers, proxy=proxy)
def get_net_mode(ctx: huaweisms.api.common.ApiCtx, proxy=None):
url = "{}/net/net-mode".format(ctx.api_base_url)
result = huaweisms.api.common.get_from_url(url, ctx, proxy=proxy)
if result and result.get('type') == 'response':
response = result['response']
if response and response.get('NetworkMode') == '0302':
return 'lte_umts'
if response and response.get('NetworkMode') == '03':
return 'lte'
if response and response.get('NetworkMode') == '02':
return 'umts'
return 'UNKNOWN_MODE'
| 34.848837 | 100 | 0.664665 |
4f55aabe2371cfcd08abb5fb94b31f8f1d100454 | 5,185 | py | Python | facedancer/backends/raspdancer.py | benquike/Facedancer | b4656fe3ecf7c4ebc0bcf2d511b3ad5f8aca3a83 | [
"BSD-3-Clause"
] | 1 | 2021-09-20T08:38:08.000Z | 2021-09-20T08:38:08.000Z | facedancer/backends/raspdancer.py | Acidburn0zzz/Facedancer | 28d3a900179e9dd280e007026a68fbdf97e4e35a | [
"BSD-3-Clause"
] | null | null | null | facedancer/backends/raspdancer.py | Acidburn0zzz/Facedancer | 28d3a900179e9dd280e007026a68fbdf97e4e35a | [
"BSD-3-Clause"
] | null | null | null | #
# Raspdancer
#
# Implementation of the Facedacner API that supports direct access to the MAX324x
# chip via a RasPi's SoC SPI bus. Emulates talking to a Facedancer, but ignores
# the details of the GreatFET protocol.
#
import os
import sys
import time
import logging
from ..core import FacedancerApp
from ..backends.MAXUSBApp import MAXUSBApp
from ..USB import *
from ..USBDevice import USBDeviceRequest
class RaspdancerMaxUSBApp(MAXUSBApp):
app_name = "MAXUSB"
app_num = 0x00 # Not meaningful for us. TODO: Remove!
@classmethod
def appropriate_for_environment(cls, backend_name):
"""
Determines if the current environment seems appropriate
for using the GoodFET::MaxUSB backend.
"""
# Only ever try Raspdancer backends if the backend is set to raspdancer;
# we don't want to start randomly spamming a system's SPI bus.
if backend_name != "raspdancer":
return False
# If we're not explicitly trying to use something else,
# see if there's a connected GreatFET.
try:
rd = Raspdancer()
return True
except ImportError as e:
logging.info("Skipping Raspdancer devices, as prequisites aren't installed ({}).".format(e))
return False
except:
return False
def __init__(self, device=None, verbose=0, quirks=None):
if device is None:
device = Raspdancer(verbose=verbose)
FacedancerApp.__init__(self, device, verbose)
self.connected_device = None
self.enable()
if verbose > 0:
rev = self.read_register(self.reg_revision)
print(self.app_name, "revision", rev)
# set duplex and negative INT level (from GoodFEDMAXUSB.py)
self.write_register(self.reg_pin_control,
self.full_duplex | self.interrupt_level)
def init_commands(self):
pass
def enable(self):
for i in range(3):
self.device.set_up_comms()
if self.verbose > 0:
print(self.app_name, "enabled")
def ack_status_stage(self, blocking=False):
if self.verbose > 5:
print(self.app_name, "sending ack!")
self.device.transfer(b'\x01')
def read_register(self, reg_num, ack=False):
if self.verbose > 1:
print(self.app_name, "reading register 0x%02x" % reg_num)
data = bytearray([ reg_num << 3, 0 ])
if ack:
data[0] |= 1
resp = self.device.transfer(data)
if self.verbose > 2:
print(self.app_name, "read register 0x%02x has value 0x%02x" %
(reg_num, resp[1]))
return resp[1]
def write_register(self, reg_num, value, ack=False):
if self.verbose > 2:
print(self.app_name, "writing register 0x%02x with value 0x%02x" %
(reg_num, value))
data = bytearray([ (reg_num << 3) | 2, value ])
if ack:
data[0] |= 1
self.device.transfer(data)
def read_bytes(self, reg, n):
if self.verbose > 2:
print(self.app_name, "reading", n, "bytes from register", reg)
data = bytes([ (reg << 3) ] + ([0] * n))
resp = self.device.transfer(data)
if self.verbose > 3:
print(self.app_name, "read", len(resp) - 1, "bytes from register", reg)
return resp[1:]
def write_bytes(self, reg, data):
data = bytes([ (reg << 3) | 3 ]) + data
self.device.transfer(data)
if self.verbose > 3:
print(self.app_name, "wrote", len(data) - 1, "bytes to register", reg)
class Raspdancer(object):
"""
Extended version of the Facedancer class that accepts a direct
SPI connection to the MAX324x chip, as used by the Raspdancer.
"""
def __init__(self, verbose=0):
"""
Initializes our connection to the MAXUSB device.
"""
import spi
import RPi.GPIO as GPIO
self.verbose = verbose
self.buffered_result = b''
self.last_verb = -1
self.spi = spi
self.gpio = GPIO
self.gpio.setwarnings(False)
self.gpio.setmode(self.gpio.BOARD)
self.reset()
def reset(self):
"""
Resets the connected MAXUSB chip.
"""
self.gpio.setup(15, self.gpio.OUT)
self.gpio.output(15, self.gpio.LOW)
self.gpio.output(15, self.gpio.HIGH)
def set_up_comms(self):
"""
Sets up the Raspdancer to communicate with the MAX324x.
"""
# pin15=GPIO22 is linked to MAX3420 -RST
self.gpio.setup(15, self.gpio.OUT)
self.gpio.output(15,self.gpio.LOW)
self.gpio.output(15,self.gpio.HIGH)
self.spi.openSPI(speed=26000000)
def transfer(self, data):
"""
Emulate the facedancer's write command, which blasts data
directly over to the SPI bus.
"""
if isinstance(data,str):
data = [ord(x) for x in data]
data = tuple(data)
data = self.spi.transfer(data)
return bytearray(data)
| 27.289474 | 104 | 0.588428 |
4f559878ce325bc467bfb825333fd787cb09eb7b | 3,089 | py | Python | osmocom-python/osmocom/network.py | vbohinc/CommunityCellularManager | ab330fcb1bc70ee3a8e9bcdac2846ab6c327f87c | [
"BSD-3-Clause"
] | null | null | null | osmocom-python/osmocom/network.py | vbohinc/CommunityCellularManager | ab330fcb1bc70ee3a8e9bcdac2846ab6c327f87c | [
"BSD-3-Clause"
] | 3 | 2021-03-20T00:02:37.000Z | 2022-02-11T03:46:59.000Z | osmocom-python/osmocom/network.py | vbohinc/CommunityCellularManager | ab330fcb1bc70ee3a8e9bcdac2846ab6c327f87c | [
"BSD-3-Clause"
] | null | null | null | """
Copyright (c) 2016-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
import re
from vty import BaseVTY
class Network(BaseVTY):
def __init__(self, host='127.0.0.1', port=4242, timeout=None):
super(Network, self).__init__('OpenBSC', host, port, timeout)
self.PARSE_SHOW = [
re.compile('BSC is on Country Code (?P<mcc>\d+), Network Code (?P<mnc>\d+) and has (?P<bts_count>\d+) BTS'),
re.compile('Long network name: \'(?P<long_name>[^\s]+)\''),
re.compile('Short network name: \'(?P<short_name>[^\s]+)\''),
re.compile('Authentication policy: (?P<auth_policy>[^\s]+)'),
re.compile('Location updating reject cause: (?P<lur_reject_cause>\d+)'),
re.compile('Encryption: (?P<encryption>[^\s]+)'),
re.compile('NECI \(TCH/H\): (?P<neci>\d+)'),
re.compile('Use TCH for Paging any: (?P<tch_paging>\d+)'),
re.compile('RRLP Mode: (?P<rrlp_mode>[^\s]+)'),
re.compile('MM Info: (?P<mm_info>[^\s]+)'),
re.compile('Handover: (?P<handover>[^\s]+)'),
re.compile('Current Channel Load: (?P<channel_load>[^\s]+)')]
def running_config(self):
"""Return network running configuration"""
conf = super(Network, self).running_config()
return conf['network']
def show(self):
"""Retreives data returned when issuing the show command
on the VTTY as a dictionary with data entries corresponding
to the named regex matching groups in `self.PARSE_SHOW`
"""
with self.enable_mode():
resp = self.sendrecv('show network')
return self._parse_show(resp)
def set_mcc(self, mcc):
"""Set the MCC."""
return self.__set('network country code', mcc)
def set_mnc(self, mnc):
"""Set the MNC."""
return self.__set('mobile network code', mnc)
def set_short_name(self, name):
"""Set the short name"""
return self.__set('short name', name)
def set_long_name(self, name):
"""Set the long name"""
return self.__set('long name', name)
def set_handover(self, value):
"""Enable or disable handover"""
return self.__set('handover', value)
def set_timer(self, timer, value):
"""Set the value of a timer"""
return self.__set('timer t%d' % timer, value)
def __set(self, field, value):
"""Generic method for issuing set commands.
Handles entering the correct configure mode for
writing network settings.
"""
with self.configure_mode():
with self.configure('network'):
ret = self.sendrecv('%s %s' % (field, value))
if '%' in ret:
raise ValueError(ret)
self.sendrecv('write') #persist
return ret
| 37.670732 | 120 | 0.586598 |
4f54539cae4ee02ab2c8d6802ca27b35c75deed8 | 38,632 | py | Python | src/ezdxf/entities/polyline.py | mherzog01/ezdxf | 1325714c8455d571d5119e0d0edd54d1a3c23e41 | [
"MIT"
] | null | null | null | src/ezdxf/entities/polyline.py | mherzog01/ezdxf | 1325714c8455d571d5119e0d0edd54d1a3c23e41 | [
"MIT"
] | null | null | null | src/ezdxf/entities/polyline.py | mherzog01/ezdxf | 1325714c8455d571d5119e0d0edd54d1a3c23e41 | [
"MIT"
] | null | null | null | # Copyright (c) 2019-2020 Manfred Moitzi
# License: MIT License
# Created 2019-02-16
from typing import TYPE_CHECKING, Iterable, Union, List, cast, Tuple, Sequence, Dict
from itertools import chain
from ezdxf.math import Vector, Matrix44, NULLVEC
from ezdxf.math.transformtools import OCSTransform, NonUniformScalingError
from ezdxf.lldxf.attributes import DXFAttr, DXFAttributes, DefSubclass, XType
from ezdxf.lldxf.const import DXF12, SUBCLASS_MARKER, VERTEXNAMES
from ezdxf.lldxf import const
from .dxfentity import base_class, SubclassProcessor
from .dxfgfx import DXFGraphic, acdb_entity, SeqEnd
from .factory import register_entity
from .lwpolyline import FORMAT_CODES
from ezdxf.explode import virtual_polyline_entities, explode_entity
from ezdxf.query import EntityQuery
from ezdxf.entities import factory
if TYPE_CHECKING:
from ezdxf.eztypes import (
TagWriter, Vertex, FaceType, DXFNamespace, DXFEntity, Drawing, UCS, Line, Arc, Face3d, BaseLayout,
)
__all__ = ['Polyline', 'Polyface', 'Polymesh']
acdb_polyline = DefSubclass('AcDbPolylineDummy', { # AcDbPolylineDummy is a temp solution while importing
# 66: obsolete - not read and not written, because POLYLINE without vertices makes no sense
# a “dummy” point; the X and Y values are always 0, and the Z value is the polyline's elevation
# (in OCS when 2D, WCS when 3D) x, y ALWAYS 0
'elevation': DXFAttr(10, xtype=XType.point3d, default=NULLVEC),
# Polyline flag (bit-coded):
'flags': DXFAttr(70, default=0),
# 1 = This is a closed polyline (or a polygon mesh closed in the M direction)
# 2 = Curve-fit vertices have been added
# 4 = Spline-fit vertices have been added
# 8 = This is a 3D polyline
# 16 = This is a 3D polygon mesh
# 32 = The polygon mesh is closed in the N direction
# 64 = The polyline is a polyface mesh
# 128 = The linetype pattern is generated continuously around the vertices of this polyline
'default_start_width': DXFAttr(40, default=0, optional=True),
'default_end_width': DXFAttr(41, default=0, optional=True),
'm_count': DXFAttr(71, default=0, optional=True),
'n_count': DXFAttr(72, default=0, optional=True),
'm_smooth_density': DXFAttr(73, default=0, optional=True),
'n_smooth_density': DXFAttr(74, default=0, optional=True),
# Curves and smooth surface type; integer codes, not bit-coded:
'smooth_type': DXFAttr(75, default=0, optional=True),
# 0 = No smooth surface fitted
# 5 = Quadratic B-spline surface
# 6 = Cubic B-spline surface
# 8 = Bezier surface
'thickness': DXFAttr(39, default=0, optional=True),
'extrusion': DXFAttr(210, xtype=XType.point3d, default=Vector(0, 0, 1), optional=True),
})
@register_entity
class Polyline(DXFGraphic):
""" DXF POLYLINE entity """
DXFTYPE = 'POLYLINE'
DXFATTRIBS = DXFAttributes(base_class, acdb_entity, acdb_polyline)
# polyline flags (70)
CLOSED = 1
MESH_CLOSED_M_DIRECTION = CLOSED
CURVE_FIT_VERTICES_ADDED = 2
SPLINE_FIT_VERTICES_ADDED = 4
POLYLINE_3D = 8
POLYMESH = 16
MESH_CLOSED_N_DIRECTION = 32
POLYFACE = 64
GENERATE_LINETYPE_PATTERN = 128
# polymesh smooth type (75)
NO_SMOOTH = 0
QUADRATIC_BSPLINE = 5
CUBIC_BSPLINE = 6
BEZIER_SURFACE = 8
ANY3D = POLYLINE_3D | POLYMESH | POLYFACE
def __init__(self, doc: 'Drawing' = None):
super().__init__(doc)
self.vertices = [] # type: List[DXFVertex]
self.seqend = None # type: SeqEnd
def linked_entities(self) -> Iterable['DXFVertex']:
# don't yield SEQEND here, because it is not a DXFGraphic entity
return self.vertices
def link_entity(self, entity: 'DXFEntity') -> None:
assert isinstance(entity, DXFVertex)
entity.set_owner(self.dxf.owner, self.dxf.paperspace)
self.vertices.append(entity)
def link_seqend(self, seqend: 'DXFEntity') -> None:
seqend.dxf.owner = self.dxf.owner
self.seqend = seqend
def _copy_data(self, entity: 'Polyline') -> None:
""" Copy vertices, does not store the copies into the entity database. """
entity.vertices = [vertex.copy() for vertex in self.vertices]
entity.seqend = self.seqend.copy()
def add_sub_entities_to_entitydb(self):
""" Called by Entitydb.add(). (internal API) """
for vertex in self.vertices:
vertex.doc = self.doc # grant same document
self.entitydb.add(vertex)
if self.seqend:
self.seqend.doc = self.doc # grant same document
self.entitydb.add(self.seqend)
else:
self.new_seqend()
def new_seqend(self):
""" Create new ENDSEQ. (internal API)"""
seqend = self.doc.dxffactory.create_db_entry('SEQEND', dxfattribs={'layer': self.dxf.layer})
self.link_seqend(seqend)
def set_owner(self, owner: str, paperspace: int = 0):
# At loading from file:
# POLYLINE will be added to layout before vertices are linked, so set_owner() of POLYLINE
# does not set owner of vertices
super().set_owner(owner, paperspace)
# assigning new owner to vertices is done by super class set_owner() method
if self.seqend: # has no paperspace flag
self.seqend.dxf.owner = owner
def load_dxf_attribs(self, processor: SubclassProcessor = None) -> 'DXFNamespace':
"""
Adds subclass processing for 'AcDbLine', requires previous base class and 'AcDbEntity' processing by parent
class.
"""
dxf = super().load_dxf_attribs(processor)
if processor is None:
return dxf
if processor.r12:
processor.load_dxfattribs_into_namespace(dxf, acdb_polyline, index=0)
else:
tags = processor.load_dxfattribs_into_namespace(dxf, acdb_polyline, index=2)
name = processor.subclasses[2][0].value
if len(tags):
# do not log:
# 66: attribs follow, not required
processor.log_unprocessed_tags(tags.filter((66,)), subclass=name)
return dxf
def export_entity(self, tagwriter: 'TagWriter') -> None:
""" Export entity specific data as DXF tags. """
# base class export is done by parent class
super().export_entity(tagwriter)
# AcDbEntity export is done by parent class
if tagwriter.dxfversion > DXF12:
tagwriter.write_tag2(SUBCLASS_MARKER, self.get_mode())
tagwriter.write_tag2(66, 1) # entities follow, required for R12? (sure not for R2000+)
# for all DXF versions
self.dxf.export_dxf_attribs(tagwriter, [
'elevation',
'flags',
'default_start_width',
'default_end_width',
'm_count',
'n_count',
'm_smooth_density',
'n_smooth_density',
'smooth_type',
'thickness',
'extrusion',
])
# xdata and embedded objects export will be done by parent class
# following VERTEX entities and SEQEND is exported by EntitySpace()
def export_seqend(self, tagwriter: 'TagWriter'):
self.seqend.dxf.owner = self.dxf.owner
self.seqend.dxf.layer = self.dxf.layer
self.seqend.export_dxf(tagwriter)
def destroy(self) -> None:
"""
Delete all data and references.
"""
for v in self.vertices:
self.entitydb.delete_entity(v)
del self.vertices
self.entitydb.delete_entity(self.seqend)
super().destroy()
def on_layer_change(self, layer: str):
"""
Event handler for layer change. Changes also the layer of all vertices.
Args:
layer: new layer as string
"""
for v in self.vertices:
v.dxf.layer = layer
def on_linetype_change(self, linetype: str):
"""
Event handler for linetype change. Changes also the linetype of all vertices.
Args:
linetype: new linetype as string
"""
for v in self.vertices:
v.dxf.linetype = linetype
def get_vertex_flags(self) -> int:
return const.VERTEX_FLAGS[self.get_mode()]
def get_mode(self) -> str:
""" Returns a string: ``'AcDb2dPolyline'``, ``'AcDb3dPolyline'``, ``'AcDbPolygonMesh'`` or
``'AcDbPolyFaceMesh'``
"""
if self.is_3d_polyline:
return 'AcDb3dPolyline'
elif self.is_polygon_mesh:
return 'AcDbPolygonMesh'
elif self.is_poly_face_mesh:
return 'AcDbPolyFaceMesh'
else:
return 'AcDb2dPolyline'
@property
def is_2d_polyline(self) -> bool:
""" ``True`` if POLYLINE is a 2D polyline. """
return self.dxf.flags & self.ANY3D == 0
@property
def is_3d_polyline(self) -> bool:
""" ``True`` if POLYLINE is a 3D polyline. """
return bool(self.dxf.flags & self.POLYLINE_3D)
@property
def is_polygon_mesh(self) -> bool:
""" ``True`` if POLYLINE is a polygon mesh, see :class:`Polymesh` """
return bool(self.dxf.flags & self.POLYMESH)
@property
def is_poly_face_mesh(self) -> bool:
""" ``True`` if POLYLINE is a poly face mesh, see :class:`Polyface` """
return bool(self.dxf.flags & self.POLYFACE)
@property
def is_closed(self) -> bool:
""" ``True`` if POLYLINE is closed. """
return bool(self.dxf.flags & self.CLOSED)
@property
def is_m_closed(self) -> bool:
""" ``True`` if POLYLINE (as :class:`Polymesh`) is closed in m direction. """
return bool(self.dxf.flags & self.MESH_CLOSED_M_DIRECTION)
@property
def is_n_closed(self) -> bool:
""" ``True`` if POLYLINE (as :class:`Polymesh`) is closed in n direction. """
return bool(self.dxf.flags & self.MESH_CLOSED_N_DIRECTION)
@property
def has_arc(self) -> bool:
""" Returns ``True`` if 2D POLYLINE has an arc segment. """
if self.is_2d_polyline:
return any(bool(v.dxf.bulge) for v in self.vertices)
else:
return False
@property
def has_width(self) -> bool:
""" Returns ``True`` if 2D POLYLINE has default width values or any segment with width attributes.
.. versionadded:: 0.14
"""
if self.is_2d_polyline:
if self.dxf.hasattr('default_start_width') and bool(self.dxf.default_start_width):
return True
if self.dxf.hasattr('default_end_width') and bool(self.dxf.default_end_width):
return True
for v in self.vertices:
if v.dxf.hasattr('start_width') and bool(v.dxf.start_width):
return True
if v.dxf.hasattr('end_width') and bool(v.dxf.end_width):
return True
return False
def m_close(self, status=True) -> None:
"""
Close POLYMESH in m direction if `status` is ``True`` (also closes POLYLINE),
clears closed state if `status` is ``False``.
"""
self.set_flag_state(self.MESH_CLOSED_M_DIRECTION, status, name='flags')
def n_close(self, status=True) -> None:
"""
Close POLYMESH in n direction if `status` is ``True``, clears closed state if `status` is ``False``.
"""
self.set_flag_state(self.MESH_CLOSED_N_DIRECTION, status, name='flags')
def close(self, m_close=True, n_close=False) -> None:
""" Set closed state of POLYMESH and POLYLINE in m direction and n direction. ``True`` set closed flag,
``False`` clears closed flag.
"""
self.m_close(m_close)
self.n_close(n_close)
def __len__(self) -> int:
""" Returns count of :class:`Vertex` entities. """
return len(self.vertices)
def __getitem__(self, pos) -> 'DXFVertex':
""" Get :class:`Vertex` entity at position `pos`, supports ``list`` slicing. """
return self.vertices[pos]
def points(self) -> Iterable[Vector]:
""" Returns iterable of all polyline vertices as ``(x, y, z)`` tuples, not as :class:`Vertex` objects."""
return (vertex.dxf.location for vertex in self.vertices)
def append_vertices(self, points: Iterable['Vertex'], dxfattribs: dict = None) -> None:
""" Append multiple :class:`Vertex` entities at location `points`.
Args:
points: iterable of ``(x, y[, z])`` tuples
dxfattribs: dict of DXF attributes for :class:`Vertex` class
"""
dxfattribs = dxfattribs or {}
self.vertices.extend(self._build_dxf_vertices(points, dxfattribs))
def append_formatted_vertices(self, points: Iterable['Vertex'], format: str = 'xy',
dxfattribs: dict = None) -> None:
""" Append multiple :class:`Vertex` entities at location `points`.
Args:
points: iterable of (x, y, [start_width, [end_width, [bulge]]]) tuple
format: format: format string, default is ``'xy'``, see: :ref:`format codes`
dxfattribs: dict of DXF attributes for :class:`Vertex` class
"""
dxfattribs = dxfattribs or {}
dxfattribs['flags'] = dxfattribs.get('flags', 0) | self.get_vertex_flags()
# same DXF attributes for VERTEX entities as for POLYLINE
dxfattribs['owner'] = self.dxf.owner
dxfattribs['layer'] = self.dxf.layer
if self.dxf.hasattr('linetype'):
dxfattribs['linetype'] = self.dxf.linetype
if self.doc:
create_vertex = self.doc.dxffactory.create_db_entry
else:
create_vertex = factory.new
for point in points:
attribs = vertex_attribs(point, format)
attribs.update(dxfattribs)
self.vertices.append(create_vertex('VERTEX', attribs))
def append_vertex(self, point: 'Vertex', dxfattribs: dict = None) -> None:
"""
Append single :class:`Vertex` entity at location `point`.
Args:
point: as ``(x, y[, z])`` tuple
dxfattribs: dict of DXF attributes for :class:`Vertex` class
"""
dxfattribs = dxfattribs or {}
self.vertices.extend(self._build_dxf_vertices([point], dxfattribs))
def insert_vertices(self, pos: int, points: Iterable['Vertex'], dxfattribs: dict = None) -> None:
"""
Insert :class:`Vertex` entities at location `points` at insertion position `pos``
of list :attr:`Polyline.vertices`.
Args:
pos: insertion position of list :attr:`Polyline.vertices`
points: list of ``(x, y[, z])`` tuples
dxfattribs: dict of DXF attributes for :class:`Vertex` class
"""
dxfattribs = dxfattribs or {}
self.vertices[pos:pos] = list(self._build_dxf_vertices(points, dxfattribs))
def _build_dxf_vertices(self, points: Iterable['Vertex'], dxfattribs: dict) -> List['DXFVertex']:
""" Converts point (x, y, z)-tuples into DXFVertex objects.
Args:
points: list of (x, y, z)-tuples
dxfattribs: dict of DXF attributes
"""
dxfattribs['flags'] = dxfattribs.get('flags', 0) | self.get_vertex_flags()
# same DXF attributes for VERTEX entities as for POLYLINE
dxfattribs['owner'] = self.dxf.owner
dxfattribs['layer'] = self.dxf.layer
if self.dxf.hasattr('linetype'):
dxfattribs['linetype'] = self.dxf.linetype
if self.doc:
create_vertex = self.doc.dxffactory.create_db_entry
else:
create_vertex = factory.new
for point in points:
dxfattribs['location'] = Vector(point)
yield create_vertex('VERTEX', dxfattribs)
def cast(self) -> Union['Polyline', 'Polymesh', 'Polyface']:
mode = self.get_mode()
if mode == 'AcDbPolyFaceMesh':
return Polyface.from_polyline(self)
elif mode == 'AcDbPolygonMesh':
return Polymesh.from_polyline(self)
else:
return self
def transform(self, m: Matrix44) -> 'Polyline':
""" Transform POLYLINE entity by transformation matrix `m` inplace.
.. versionadded:: 0.13
"""
def _ocs_locations(elevation):
for vertex in self.vertices:
location = vertex.dxf.location
if elevation is not None:
# Older DXF version may not have written the z-axis, which is now 0 by default in ezdxf,
# so replace existing z-axis by elevation value
location = location.replace(z=elevation)
yield location
if self.is_2d_polyline:
dxf = self.dxf
ocs = OCSTransform(self.dxf.extrusion, m)
# Newer DXF versions write 2d polylines always as LWPOLYLINE entities.
# No need for optimizations.
if not ocs.scale_uniform:
raise NonUniformScalingError('2D POLYLINE with arcs does not support non uniform scaling')
# Parent function has to catch this Exception and explode this 2D POLYLINE into LINE and ELLIPSE entities.
if dxf.hasattr('elevation'):
z_axis = dxf.elevation.z
else:
z_axis = None
# transform old OCS locations into new OCS locations by transformation matrix m
vertices = [ocs.transform_vertex(vertex) for vertex in _ocs_locations(z_axis)]
# set new elevation, all vertices of a 2D polyline must have the same z-axis
if vertices:
dxf.elevation = vertices[0].replace(x=0, y=0)
# set new vertex locations
for vertex, location in zip(self.vertices, vertices):
vertex.dxf.location = location
if dxf.hasattr('thickness'):
dxf.thickness = ocs.transform_length((0, 0, dxf.thickness))
dxf.extrusion = ocs.new_extrusion
else:
for vertex in self.vertices:
vertex.transform(m)
return self
def explode(self, target_layout: 'BaseLayout' = None) -> 'EntityQuery':
"""
Explode parts of POLYLINE as LINE, ARC or 3DFACE entities into target layout, if target layout is ``None``,
the target layout is the layout of the POLYLINE.
Returns an :class:`~ezdxf.query.EntityQuery` container with all DXF parts.
Args:
target_layout: target layout for DXF parts, ``None`` for same layout as source entity.
.. versionadded:: 0.12
"""
return explode_entity(self, target_layout)
def virtual_entities(self) -> Iterable[Union['Line', 'Arc', 'Face3d']]:
"""
Yields 'virtual' parts of POLYLINE as LINE, ARC or 3DFACE entities.
This entities are located at the original positions, but are not stored in the entity database, have no handle
and are not assigned to any layout.
.. versionadded:: 0.12
"""
return virtual_polyline_entities(self)
class Polyface(Polyline):
pass
"""
PolyFace structure:
POLYLINE
AcDbEntity
AcDbPolyFaceMesh
VERTEX - Vertex
AcDbEntity
AcDbVertex
AcDbPolyFaceMeshVertex
VERTEX - Face
AcDbEntity
AcDbFaceRecord
SEQEND
Order of mesh_vertices and face_records is important (DXF R2010):
1. mesh_vertices: the polyface mesh vertex locations
2. face_records: indices of the face forming vertices
"""
@classmethod
def from_polyline(cls, polyline: Polyline) -> 'Polyface':
polyface = cls.shallow_copy(polyline)
polyface.vertices = polyline.vertices
polyface.seqend = polyline.seqend
# do not destroy polyline - all data would be lost
return polyface
def append_face(self, face: 'FaceType', dxfattribs: dict = None) -> None:
"""
Append a single face. A `face` is a list of ``(x, y, z)`` tuples.
Args:
face: List[``(x, y, z)`` tuples]
dxfattribs: dict of DXF attributes for :class:`Vertex` entity
"""
self.append_faces([face], dxfattribs)
def _points_to_dxf_vertices(self, points: Iterable['Vertex'], dxfattribs: dict) -> List['DXFVertex']:
""" Converts point (x,y, z)-tuples into DXFVertex objects.
Args:
points: List[``(x, y, z)`` tuples]
dxfattribs: dict of DXF attributes for :class:`Vertex` entity
"""
dxfattribs['flags'] = dxfattribs.get('flags', 0) | self.get_vertex_flags()
dxfattribs['layer'] = self.get_dxf_attrib('layer', '0') # all vertices on the same layer as the POLYLINE entity
vertices = [] # type: List[DXFVertex]
for point in points:
dxfattribs['location'] = point
vertices.append(cast('DXFVertex', self._new_compound_entity('VERTEX', dxfattribs)))
return vertices
def append_faces(self, faces: Iterable['FaceType'], dxfattribs: dict = None) -> None:
"""
Append multiple `faces`. `faces` is a list of single faces and a single face is a list of ``(x, y, z)`` tuples.
Args:
faces: list of List[``(x, y, z)`` tuples]
dxfattribs: dict of DXF attributes for :class:`Vertex` entity
"""
def new_face_record() -> 'DXFVertex':
dxfattribs['flags'] = const.VTX_3D_POLYFACE_MESH_VERTEX
# location of face record vertex is always (0, 0, 0)
dxfattribs['location'] = Vector()
return self._new_compound_entity('VERTEX', dxfattribs)
dxfattribs = dxfattribs or {}
existing_vertices, existing_faces = self.indexed_faces()
# existing_faces is a generator, can't append new data
new_faces = [] # type: List[FaceProxy]
for face in faces:
# convert face point coordinates to DXF Vertex() objects.
face_mesh_vertices = self._points_to_dxf_vertices(face, {}) # type: List[DXFVertex]
# index of first new vertex
index = len(existing_vertices)
existing_vertices.extend(face_mesh_vertices)
# create a new face_record with all indices set to 0
face_record = FaceProxy(new_face_record(), existing_vertices)
# set correct indices
face_record.indices = tuple(range(index, index + len(face_mesh_vertices)))
new_faces.append(face_record)
self._rebuild(chain(existing_faces, new_faces))
def _rebuild(self, faces: Iterable['FaceProxy'], precision: int = 6) -> None:
"""
Build a valid Polyface structure out of *faces*.
Args:
faces: iterable of FaceProxy objects.
"""
polyface_builder = PolyfaceBuilder(faces, precision=precision)
self.vertices = []
# polyline._unlink_all_vertices() # but don't remove it from database
self.vertices = polyface_builder.get_vertices()
self.update_count(polyface_builder.nvertices, polyface_builder.nfaces)
def update_count(self, nvertices: int, nfaces: int) -> None:
self.dxf.m_count = nvertices
self.dxf.n_count = nfaces
def optimize(self, precision: int = 6) -> None:
"""
Rebuilds :class:`Polyface` with vertex optimization. Merges vertices with nearly same vertex locations.
Polyfaces created by `ezdxf` are optimized automatically.
Args:
precision: decimal precision for determining identical vertex locations
"""
vertices, faces = self.indexed_faces()
self._rebuild(faces, precision)
def faces(self) -> Iterable[List['DXFVertex']]:
"""
Iterable of all faces, a face is a tuple of vertices.
Returns:
list: [vertex, vertex, vertex, [vertex,] face_record]
"""
_, faces = self.indexed_faces() # just need the faces generator
for face in faces:
face_vertices = list(face)
face_vertices.append(face.face_record)
yield face_vertices
def indexed_faces(self) -> Tuple[List['DXFVertex'], Iterable['FaceProxy']]:
"""
Returns a list of all vertices and a generator of FaceProxy() objects.
(internal API)
"""
vertices = []
face_records = []
for vertex in self.vertices: # type: DXFVertex
(vertices if vertex.is_poly_face_mesh_vertex else face_records).append(vertex)
faces = (FaceProxy(face_record, vertices) for face_record in face_records)
return vertices, faces
class FaceProxy:
__slots__ = ('vertices', 'face_record', 'indices')
"""
Represents a single face of a polyface structure. (internal class)
vertices:
List of all polyface vertices.
face_record:
The face forming vertex of type ``AcDbFaceRecord``, contains the indices to the face building vertices. Indices
of the DXF structure are 1-based and a negative index indicates the beginning of an invisible edge.
Face.face_record.dxf.color determines the color of the face.
indices:
Indices to the face building vertices as tuple. This indices are 0-base and are used to get vertices from the
list *Face.vertices*.
"""
def __init__(self, face_record: 'DXFVertex', vertices: Sequence['DXFVertex']):
""" Returns iterable of all face vertices as :class:`Vertex` entities. """
self.vertices = vertices # type: Sequence[DXFVertex]
self.face_record = face_record # type: DXFVertex
self.indices = self._indices() # type: Sequence[int]
def __len__(self) -> int:
""" Returns count of face vertices (without face_record). """
return len(self.indices)
def __getitem__(self, pos: int) -> 'DXFVertex':
"""
Returns :class:`Vertex` at position `pos`.
Args:
pos: vertex position 0-based
"""
return self.vertices[self.indices[pos]]
def __iter__(self) -> Iterable['DXFVertex']:
return (self.vertices[index] for index in self.indices)
def points(self) -> Iterable['Vertex']:
""" Returns iterable of all face vertex locations as ``(x, y, z)`` tuples. """
return (vertex.dxf.location for vertex in self)
def _raw_indices(self) -> Iterable[int]:
return (self.face_record.get_dxf_attrib(name, 0) for name in const.VERTEXNAMES)
def _indices(self) -> Sequence[int]:
return tuple(abs(index) - 1 for index in self._raw_indices() if index != 0)
def is_edge_visible(self, pos: int) -> bool:
"""
Returns ``True`` if edge starting at vertex `pos` is visible.
Args:
pos: vertex position 0-based
"""
name = const.VERTEXNAMES[pos]
return self.face_record.get_dxf_attrib(name) > 0
class PolyfaceBuilder:
""" Optimized polyface builder. (internal class) """
def __init__(self, faces: Iterable['FaceProxy'], precision: int = 6):
self.precision = precision
self.faces = []
self.vertices = []
self.index_mapping = {}
self.build(faces)
@property
def nvertices(self) -> int:
return len(self.vertices)
@property
def nfaces(self) -> int:
return len(self.faces)
def get_vertices(self) -> List['DXFVertex']:
vertices = self.vertices[:]
vertices.extend(self.faces)
return vertices
def build(self, faces: Iterable['FaceProxy']) -> None:
for face in faces:
face_record = face.face_record
for vertex, name in zip(face, VERTEXNAMES):
index = self.add(vertex)
# preserve sign of old index value
sign = -1 if face_record.dxf.get(name, 0) < 0 else +1
face_record.dxf.set(name, (index + 1) * sign)
self.faces.append(face_record)
def add(self, vertex: 'DXFVertex') -> int:
def key(point):
return tuple((round(coord, self.precision) for coord in point))
location = key(vertex.dxf.location)
try:
return self.index_mapping[location]
except KeyError: # internal exception
index = len(self.vertices)
self.index_mapping[location] = index
self.vertices.append(vertex)
return index
class Polymesh(Polyline):
"""
PolyMesh structure:
POLYLINE
AcDbEntity
AcDbPolygonMesh
VERTEX
AcDbEntity
AcDbVertex
AcDbPolygonMeshVertex
"""
@classmethod
def from_polyline(cls, polyline: Polyline) -> 'Polymesh':
polymesh = cls.shallow_copy(polyline)
polymesh.vertices = polyline.vertices
polymesh.seqend = polyline.seqend
# do not destroy polyline - all data would be lost
return polymesh
def set_mesh_vertex(self, pos: Tuple[int, int], point: 'Vertex', dxfattribs: dict = None):
"""
Set location and DXF attributes of a single mesh vertex.
Args:
pos: 0-based (row, col)-tuple, position of mesh vertex
point: (x, y, z)-tuple, new 3D coordinates of the mesh vertex
dxfattribs: dict of DXF attributes
"""
dxfattribs = dxfattribs or {}
dxfattribs['location'] = point
vertex = self.get_mesh_vertex(pos)
vertex.update_dxf_attribs(dxfattribs)
def get_mesh_vertex(self, pos: Tuple[int, int]) -> 'DXFVertex':
"""
Get location of a single mesh vertex.
Args:
pos: 0-based ``(row, col)`` tuple, position of mesh vertex
"""
m_count = self.dxf.m_count
n_count = self.dxf.n_count
m, n = pos
if 0 <= m < m_count and 0 <= n < n_count:
pos = m * n_count + n
return self.vertices[pos]
else:
raise const.DXFIndexError(repr(pos))
def get_mesh_vertex_cache(self) -> 'MeshVertexCache':
"""
Get a :class:`MeshVertexCache` object for this polymesh. The caching object provides fast access
to the :attr:`location` attribute of mesh vertices.
"""
return MeshVertexCache(self)
class MeshVertexCache:
__slots__ = ('vertices',)
"""
Cache mesh vertices in a dict, keys are 0-based (row, col)-tuples.
vertices:
Dict of mesh vertices, keys are 0-based (row, col)-tuples. Writing to this dict doesn't change the DXF entity.
"""
def __init__(self, mesh: 'Polyline'):
self.vertices = self._setup(mesh, mesh.dxf.m_count, mesh.dxf.n_count) # type: Dict[Tuple[int, int], DXFVertex]
def _setup(self, mesh: 'Polyline', m_count: int, n_count: int) -> dict:
cache = {} # type: Dict[Tuple[int, int], DXFVertex]
vertices = iter(mesh.vertices)
for m in range(m_count):
for n in range(n_count):
cache[(m, n)] = next(vertices)
return cache
def __getitem__(self, pos: Tuple[int, int]) -> 'Vertex':
"""
Get mesh vertex location as ``(x, y, z)`` tuple.
Args:
pos: 0-based ``(row, col)`` tuple.
"""
try:
return self.vertices[pos].dxf.location
except KeyError:
raise const.DXFIndexError(repr(pos))
def __setitem__(self, pos: Tuple[int, int], location: 'Vertex') -> None:
"""
Get mesh vertex location as ``(x, y, z)`` tuple.
Args:
pos: 0-based ``(row, col)`` tuple.
location: ``(x, y, z)`` tuple
"""
try:
self.vertices[pos].dxf.location = location
except KeyError:
raise const.DXFIndexError(repr(pos))
acdb_vertex = DefSubclass('AcDbVertex', { # last subclass index -1
'location': DXFAttr(10, xtype=XType.point3d), # Location point (in OCS when 2D, and WCS when 3D)
'start_width': DXFAttr(40, default=0, optional=True), # Starting width
'end_width': DXFAttr(41, default=0, optional=True), # Ending width
# Bulge (optional; default is 0). The bulge is the tangent of one fourth the included angle for an arc segment, made
# negative if the arc goes clockwise from the start point to the endpoint. A bulge of 0 indicates a straight
# segment, and a bulge of 1 is a semicircle.
'bulge': DXFAttr(42, default=0, optional=True),
'flags': DXFAttr(70, default=0),
'tangent': DXFAttr(50, optional=True), # Curve fit tangent direction (in degrees?)
'vtx0': DXFAttr(71, optional=True),
'vtx1': DXFAttr(72, optional=True),
'vtx2': DXFAttr(73, optional=True),
'vtx3': DXFAttr(74, optional=True),
'vertex_identifier': DXFAttr(91, optional=True),
})
@register_entity
class DXFVertex(DXFGraphic):
""" DXF VERTEXE entity """
DXFTYPE = 'VERTEX'
DXFATTRIBS = DXFAttributes(base_class, acdb_entity, acdb_vertex)
EXTRA_VERTEX_CREATED = 1 # Extra vertex created by curve-fitting
CURVE_FIT_TANGENT = 2 # Curve-fit tangent defined for this vertex.
# A curve-fit tangent direction of 0 may be omitted from the DXF output, but is
# significant if this bit is set.
# 4 = unused, never set in dxf files
SPLINE_VERTEX_CREATED = 8 # Spline vertex created by spline-fitting
SPLINE_FRAME_CONTROL_POINT = 16
POLYLINE_3D_VERTEX = 32
POLYGON_MESH_VERTEX = 64
POLYFACE_MESH_VERTEX = 128
FACE_FLAGS = POLYGON_MESH_VERTEX + POLYFACE_MESH_VERTEX
VTX3D = POLYLINE_3D_VERTEX + POLYGON_MESH_VERTEX + POLYFACE_MESH_VERTEX
def load_dxf_attribs(self, processor: SubclassProcessor = None) -> 'DXFNamespace':
dxf = super().load_dxf_attribs(processor)
if processor is None:
return dxf
# VERTEX can have 3 subclasses if face record or 4 subclasses if vertex
# just last one has data
tags = processor.load_dxfattribs_into_namespace(dxf, acdb_vertex, index=-1)
if len(tags) and not processor.r12:
processor.log_unprocessed_tags(tags, subclass=acdb_polyline.name)
return dxf
def export_entity(self, tagwriter: 'TagWriter') -> None:
""" Export entity specific data as DXF tags. """
# base class export is done by parent class
super().export_entity(tagwriter)
# AcDbEntity export is done by parent class
if tagwriter.dxfversion > DXF12:
if self.is_face_record: # (flags & Vertex.FACE_FLAGS) == const.VTX_3D_POLYFACE_MESH_VERTEX:
tagwriter.write_tag2(SUBCLASS_MARKER, 'AcDbFaceRecord')
else:
tagwriter.write_tag2(SUBCLASS_MARKER, 'AcDbVertex')
if self.is_3d_polyline_vertex: # flags & const.VTX_3D_POLYLINE_VERTEX
tagwriter.write_tag2(SUBCLASS_MARKER, 'AcDb3dPolylineVertex')
elif self.is_poly_face_mesh_vertex: # flags & Vertex.FACE_FLAGS == Vertex.FACE_FLAGS:
tagwriter.write_tag2(SUBCLASS_MARKER, 'AcDbPolyFaceMeshVertex')
elif self.is_polygon_mesh_vertex: # flags & const.VTX_3D_POLYGON_MESH_VERTEX
tagwriter.write_tag2(SUBCLASS_MARKER, 'AcDbPolygonMeshVertex')
else:
tagwriter.write_tag2(SUBCLASS_MARKER, 'AcDb2dVertex')
# for all DXF versions
self.dxf.export_dxf_attribs(tagwriter, [
'location', 'start_width', 'end_width', 'bulge', 'flags', 'tangent', 'vtx0', 'vtx1', 'vtx2', 'vtx3',
'vertex_identifier'
])
# xdata and embedded objects export will be done by parent class
# following VERTEX entities and SEQEND is exported by EntitySpace()
@property
def is_2d_polyline_vertex(self) -> bool:
return self.dxf.flags & self.VTX3D == 0
@property
def is_3d_polyline_vertex(self) -> bool:
return self.dxf.flags & self.POLYLINE_3D_VERTEX
@property
def is_polygon_mesh_vertex(self) -> bool:
return self.dxf.flags & self.POLYGON_MESH_VERTEX
@property
def is_poly_face_mesh_vertex(self) -> bool:
return self.dxf.flags & self.FACE_FLAGS == self.FACE_FLAGS
@property
def is_face_record(self) -> bool:
return (self.dxf.flags & self.FACE_FLAGS) == self.POLYFACE_MESH_VERTEX
def transform(self, m: 'Matrix44') -> 'DXFVertex':
""" Transform VERTEX entity by transformation matrix `m` inplace.
.. versionadded:: 0.13
"""
if self.is_face_record:
return self
self.dxf.location = m.transform(self.dxf.location)
return self
def format(self, format='xyz') -> Sequence:
""" Return formatted vertex components as tuple.
Format codes:
- ``x`` = x-coordinate
- ``y`` = y-coordinate
- ``z`` = z-coordinate
- ``s`` = start width
- ``e`` = end width
- ``b`` = bulge value
- ``v`` = (x, y, z) as tuple
Args:
format: format string, default is "xyz"
.. versionadded:: 0.14
"""
dxf = self.dxf
v = Vector(dxf.location)
x, y, z = v.xyz
b = dxf.bulge
s = dxf.start_width
e = dxf.end_width
vars = locals()
return tuple(vars[code] for code in format.lower())
def vertex_attribs(data: Sequence[float], format='xyseb') -> dict:
"""
Create VERTEX attributes from input data.
Format codes:
- ``x`` = x-coordinate
- ``y`` = y-coordinate
- ``s`` = start width
- ``e`` = end width
- ``b`` = bulge value
- ``v`` = (x, y [,z]) tuple (z-axis is ignored)
Args:
data: list or tuple of point components
format: format string, default is 'xyseb'
Returns:
dict with keys: 'location', 'bulge', 'start_width', 'end_width'
"""
attribs = dict()
format = [code for code in format.lower() if code in FORMAT_CODES]
location = Vector()
for code, value in zip(format, data):
if code not in FORMAT_CODES:
continue
if code == 'v':
location = Vector(value)
elif code == 'b':
attribs['bulge'] = value
elif code == 's':
attribs['start_width'] = value
elif code == 'e':
attribs['end_width'] = value
elif code == 'x':
location = location.replace(x=value)
elif code == 'y':
location = location.replace(y=value)
attribs['location'] = location
return attribs
| 36.827455 | 122 | 0.617467 |
4f547b8aafac897880877dfac478ba4fb8a0bcb2 | 13,217 | py | Python | eland/common.py | mesejo/eland | d1444f8e094ef11ce4fa6713a521245b68a842d7 | [
"Apache-2.0"
] | null | null | null | eland/common.py | mesejo/eland | d1444f8e094ef11ce4fa6713a521245b68a842d7 | [
"Apache-2.0"
] | 1 | 2020-05-06T01:34:25.000Z | 2020-05-06T01:34:25.000Z | eland/common.py | mesejo/eland | d1444f8e094ef11ce4fa6713a521245b68a842d7 | [
"Apache-2.0"
] | 1 | 2020-05-06T01:31:18.000Z | 2020-05-06T01:31:18.000Z | # Licensed to Elasticsearch B.V under one or more agreements.
# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information
import re
import warnings
from enum import Enum
from typing import Union, List, Tuple, cast, Callable, Any, Optional, Dict
import numpy as np # type: ignore
import pandas as pd # type: ignore
from elasticsearch import Elasticsearch # type: ignore
# Default number of rows displayed (different to pandas where ALL could be displayed)
DEFAULT_NUM_ROWS_DISPLAYED = 60
DEFAULT_CHUNK_SIZE = 10000
DEFAULT_CSV_BATCH_OUTPUT_SIZE = 10000
DEFAULT_PROGRESS_REPORTING_NUM_ROWS = 10000
DEFAULT_ES_MAX_RESULT_WINDOW = 10000 # index.max_result_window
with warnings.catch_warnings():
warnings.simplefilter("ignore")
EMPTY_SERIES_DTYPE = pd.Series().dtype
def build_pd_series(
data: Dict[str, Any], dtype: Optional[np.dtype] = None, **kwargs: Any
) -> pd.Series:
"""Builds a pd.Series while squelching the warning
for unspecified dtype on empty series
"""
dtype = dtype or (EMPTY_SERIES_DTYPE if not data else dtype)
if dtype is not None:
kwargs["dtype"] = dtype
return pd.Series(data, **kwargs)
def docstring_parameter(*sub: Any) -> Callable[[Any], Any]:
def dec(obj: Any) -> Any:
obj.__doc__ = obj.__doc__.format(*sub)
return obj
return dec
class SortOrder(Enum):
ASC = 0
DESC = 1
@staticmethod
def reverse(order: "SortOrder") -> "SortOrder":
if order == SortOrder.ASC:
return SortOrder.DESC
return SortOrder.ASC
@staticmethod
def to_string(order: "SortOrder") -> str:
if order == SortOrder.ASC:
return "asc"
return "desc"
@staticmethod
def from_string(order: str) -> "SortOrder":
if order == "asc":
return SortOrder.ASC
return SortOrder.DESC
def elasticsearch_date_to_pandas_date(
value: Union[int, str], date_format: str
) -> pd.Timestamp:
"""
Given a specific Elasticsearch format for a date datatype, returns the
'partial' `to_datetime` function to parse a given value in that format
**Date Formats: https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-date-format.html#built-in-date-formats
Parameters
----------
value: Union[int, str]
The date value.
date_format: str
The Elasticsearch date format (ex. 'epoch_millis', 'epoch_second', etc.)
Returns
-------
datetime: pd.Timestamp
From https://www.elastic.co/guide/en/elasticsearch/reference/current/date.html
Date formats can be customised, but if no format is specified then it uses the default:
"strict_date_optional_time||epoch_millis"
Therefore if no format is specified we assume either strict_date_optional_time
or epoch_millis.
"""
if date_format is None:
try:
value = int(value)
return pd.to_datetime(value, unit="ms")
except ValueError:
return pd.to_datetime(value)
elif date_format == "epoch_millis":
return pd.to_datetime(value, unit="ms")
elif date_format == "epoch_second":
return pd.to_datetime(value, unit="s")
elif date_format == "strict_date_optional_time":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f%z", exact=False)
elif date_format == "basic_date":
return pd.to_datetime(value, format="%Y%m%d")
elif date_format == "basic_date_time":
return pd.to_datetime(value, format="%Y%m%dT%H%M%S.%f", exact=False)
elif date_format == "basic_date_time_no_millis":
return pd.to_datetime(value, format="%Y%m%dT%H%M%S%z")
elif date_format == "basic_ordinal_date":
return pd.to_datetime(value, format="%Y%j")
elif date_format == "basic_ordinal_date_time":
return pd.to_datetime(value, format="%Y%jT%H%M%S.%f%z", exact=False)
elif date_format == "basic_ordinal_date_time_no_millis":
return pd.to_datetime(value, format="%Y%jT%H%M%S%z")
elif date_format == "basic_time":
return pd.to_datetime(value, format="%H%M%S.%f%z", exact=False)
elif date_format == "basic_time_no_millis":
return pd.to_datetime(value, format="%H%M%S%z")
elif date_format == "basic_t_time":
return pd.to_datetime(value, format="T%H%M%S.%f%z", exact=False)
elif date_format == "basic_t_time_no_millis":
return pd.to_datetime(value, format="T%H%M%S%z")
elif date_format == "basic_week_date":
return pd.to_datetime(value, format="%GW%V%u")
elif date_format == "basic_week_date_time":
return pd.to_datetime(value, format="%GW%V%uT%H%M%S.%f%z", exact=False)
elif date_format == "basic_week_date_time_no_millis":
return pd.to_datetime(value, format="%GW%V%uT%H%M%S%z")
elif date_format == "strict_date":
return pd.to_datetime(value, format="%Y-%m-%d")
elif date_format == "date":
return pd.to_datetime(value, format="%Y-%m-%d")
elif date_format == "strict_date_hour":
return pd.to_datetime(value, format="%Y-%m-%dT%H")
elif date_format == "date_hour":
return pd.to_datetime(value, format="%Y-%m-%dT%H")
elif date_format == "strict_date_hour_minute":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M")
elif date_format == "date_hour_minute":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M")
elif date_format == "strict_date_hour_minute_second":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S")
elif date_format == "date_hour_minute_second":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S")
elif date_format == "strict_date_hour_minute_second_fraction":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f", exact=False)
elif date_format == "date_hour_minute_second_fraction":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f", exact=False)
elif date_format == "strict_date_hour_minute_second_millis":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f", exact=False)
elif date_format == "date_hour_minute_second_millis":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f", exact=False)
elif date_format == "strict_date_time":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f%z", exact=False)
elif date_format == "date_time":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f%z", exact=False)
elif date_format == "strict_date_time_no_millis":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S%z")
elif date_format == "date_time_no_millis":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S%z")
elif date_format == "strict_hour":
return pd.to_datetime(value, format="%H")
elif date_format == "hour":
return pd.to_datetime(value, format="%H")
elif date_format == "strict_hour_minute":
return pd.to_datetime(value, format="%H:%M")
elif date_format == "hour_minute":
return pd.to_datetime(value, format="%H:%M")
elif date_format == "strict_hour_minute_second":
return pd.to_datetime(value, format="%H:%M:%S")
elif date_format == "hour_minute_second":
return pd.to_datetime(value, format="%H:%M:%S")
elif date_format == "strict_hour_minute_second_fraction":
return pd.to_datetime(value, format="%H:%M:%S.%f", exact=False)
elif date_format == "hour_minute_second_fraction":
return pd.to_datetime(value, format="%H:%M:%S.%f", exact=False)
elif date_format == "strict_hour_minute_second_millis":
return pd.to_datetime(value, format="%H:%M:%S.%f", exact=False)
elif date_format == "hour_minute_second_millis":
return pd.to_datetime(value, format="%H:%M:%S.%f", exact=False)
elif date_format == "strict_ordinal_date":
return pd.to_datetime(value, format="%Y-%j")
elif date_format == "ordinal_date":
return pd.to_datetime(value, format="%Y-%j")
elif date_format == "strict_ordinal_date_time":
return pd.to_datetime(value, format="%Y-%jT%H:%M:%S.%f%z", exact=False)
elif date_format == "ordinal_date_time":
return pd.to_datetime(value, format="%Y-%jT%H:%M:%S.%f%z", exact=False)
elif date_format == "strict_ordinal_date_time_no_millis":
return pd.to_datetime(value, format="%Y-%jT%H:%M:%S%z")
elif date_format == "ordinal_date_time_no_millis":
return pd.to_datetime(value, format="%Y-%jT%H:%M:%S%z")
elif date_format == "strict_time":
return pd.to_datetime(value, format="%H:%M:%S.%f%z", exact=False)
elif date_format == "time":
return pd.to_datetime(value, format="%H:%M:%S.%f%z", exact=False)
elif date_format == "strict_time_no_millis":
return pd.to_datetime(value, format="%H:%M:%S%z")
elif date_format == "time_no_millis":
return pd.to_datetime(value, format="%H:%M:%S%z")
elif date_format == "strict_t_time":
return pd.to_datetime(value, format="T%H:%M:%S.%f%z", exact=False)
elif date_format == "t_time":
return pd.to_datetime(value, format="T%H:%M:%S.%f%z", exact=False)
elif date_format == "strict_t_time_no_millis":
return pd.to_datetime(value, format="T%H:%M:%S%z")
elif date_format == "t_time_no_millis":
return pd.to_datetime(value, format="T%H:%M:%S%z")
elif date_format == "strict_week_date":
return pd.to_datetime(value, format="%G-W%V-%u")
elif date_format == "week_date":
return pd.to_datetime(value, format="%G-W%V-%u")
elif date_format == "strict_week_date_time":
return pd.to_datetime(value, format="%G-W%V-%uT%H:%M:%S.%f%z", exact=False)
elif date_format == "week_date_time":
return pd.to_datetime(value, format="%G-W%V-%uT%H:%M:%S.%f%z", exact=False)
elif date_format == "strict_week_date_time_no_millis":
return pd.to_datetime(value, format="%G-W%V-%uT%H:%M:%S%z")
elif date_format == "week_date_time_no_millis":
return pd.to_datetime(value, format="%G-W%V-%uT%H:%M:%S%z")
elif date_format == "strict_weekyear" or date_format == "weekyear":
# TODO investigate if there is a way of converting this
raise NotImplementedError(
"strict_weekyear is not implemented due to support in pandas"
)
return pd.to_datetime(value, format="%G")
# Not supported in pandas
# ValueError: ISO year directive '%G' must be used with the ISO week directive '%V'
# and a weekday directive '%A', '%a', '%w', or '%u'.
elif date_format == "strict_weekyear_week" or date_format == "weekyear_week":
# TODO investigate if there is a way of converting this
raise NotImplementedError(
"strict_weekyear_week is not implemented due to support in pandas"
)
return pd.to_datetime(value, format="%G-W%V")
# Not supported in pandas
# ValueError: ISO year directive '%G' must be used with the ISO week directive '%V'
# and a weekday directive '%A', '%a', '%w', or '%u'.
elif date_format == "strict_weekyear_week_day":
return pd.to_datetime(value, format="%G-W%V-%u")
elif date_format == "weekyear_week_day":
return pd.to_datetime(value, format="%G-W%V-%u")
elif date_format == "strict_year":
return pd.to_datetime(value, format="%Y")
elif date_format == "year":
return pd.to_datetime(value, format="%Y")
elif date_format == "strict_year_month":
return pd.to_datetime(value, format="%Y-%m")
elif date_format == "year_month":
return pd.to_datetime(value, format="%Y-%m")
elif date_format == "strict_year_month_day":
return pd.to_datetime(value, format="%Y-%m-%d")
elif date_format == "year_month_day":
return pd.to_datetime(value, format="%Y-%m-%d")
else:
warnings.warn(
f"The '{date_format}' format is not explicitly supported."
f"Using pandas.to_datetime(value) to parse value",
Warning,
)
# TODO investigate how we could generate this just once for a bulk read.
return pd.to_datetime(value)
def ensure_es_client(
es_client: Union[str, List[str], Tuple[str, ...], Elasticsearch]
) -> Elasticsearch:
if not isinstance(es_client, Elasticsearch):
es_client = Elasticsearch(es_client)
return es_client
def es_version(es_client: Elasticsearch) -> Tuple[int, int, int]:
"""Tags the current ES client with a cached '_eland_es_version'
property if one doesn't exist yet for the current Elasticsearch version.
"""
if not hasattr(es_client, "_eland_es_version"):
version_info = es_client.info()["version"]["number"]
match = re.match(r"^(\d+)\.(\d+)\.(\d+)", version_info)
if match is None:
raise ValueError(
f"Unable to determine Elasticsearch version. "
f"Received: {version_info}"
)
major, minor, patch = [int(x) for x in match.groups()]
es_client._eland_es_version = (major, minor, patch)
return cast(Tuple[int, int, int], es_client._eland_es_version)
| 44.501684 | 130 | 0.654914 |
4f5444f15f2bc168250805bcf469f1addde2ab22 | 3,906 | py | Python | web3/tools/pytest_ethereum/linker.py | realjohnward/web3.py | f54c66d9de687aacdb448c9e12a89fbbfdee18e1 | [
"MIT"
] | 1 | 2021-05-15T12:00:27.000Z | 2021-05-15T12:00:27.000Z | web3/tools/pytest_ethereum/linker.py | realjohnward/web3.py | f54c66d9de687aacdb448c9e12a89fbbfdee18e1 | [
"MIT"
] | null | null | null | web3/tools/pytest_ethereum/linker.py | realjohnward/web3.py | f54c66d9de687aacdb448c9e12a89fbbfdee18e1 | [
"MIT"
] | null | null | null | import logging
from typing import (
Any,
Callable,
Dict,
)
from eth_typing import (
ContractName,
)
from eth_utils import (
to_checksum_address,
to_hex,
)
from eth_utils.toolz import (
assoc_in,
curry,
pipe,
)
from ethpm import (
Package,
)
from ethpm.uri import (
create_latest_block_uri,
)
from web3.tools.pytest_ethereum._utils import (
create_deployment_data,
get_deployment_address,
insert_deployment,
)
from web3.tools.pytest_ethereum.exceptions import (
LinkerError,
)
logger = logging.getLogger("pytest_ethereum.linker")
def linker(*args: Callable[..., Any]) -> Callable[..., Any]:
return _linker(args)
@curry
def _linker(operations: Callable[..., Any], package: Package) -> Callable[..., Package]:
return pipe(package, *operations)
def deploy(
contract_name: str, *args: Any, transaction: Dict[str, Any] = None
) -> Callable[..., Package]:
"""
Return a newly created package and contract address.
Will deploy the given contract_name, if data exists in package. If
a deployment is found on the current w3 instance, it will return that deployment
rather than creating a new instance.
"""
return _deploy(contract_name, args, transaction)
@curry
def _deploy(
contract_name: ContractName, args: Any, transaction: Dict[str, Any], package: Package
) -> Package:
# Deploy new instance
factory = package.get_contract_factory(contract_name)
if not factory.linked_references and factory.unlinked_references:
raise LinkerError(
f"Contract factory: {contract_name} is missing runtime link references, which are "
"necessary to populate manifest deployments that have a link reference. If using the "
"builder tool, use `contract_type(..., runtime_bytecode=True)`."
)
tx_hash = factory.constructor(*args).transact(transaction)
tx_receipt = package.w3.eth.waitForTransactionReceipt(tx_hash)
# Create manifest copy with new deployment instance
latest_block_uri = create_latest_block_uri(package.w3, 0)
deployment_data = create_deployment_data(
contract_name,
to_checksum_address(tx_receipt["contractAddress"]),
tx_receipt,
factory.linked_references,
)
manifest = insert_deployment(
package, contract_name, deployment_data, latest_block_uri
)
logger.info("%s deployed." % contract_name)
return Package(manifest, package.w3)
@curry
def link(contract: ContractName, linked_type: str, package: Package) -> Package:
"""
Return a new package, created with a new manifest after applying the linked type
reference to the contract factory.
"""
deployment_address = get_deployment_address(linked_type, package)
unlinked_factory = package.get_contract_factory(contract)
if not unlinked_factory.needs_bytecode_linking:
raise LinkerError(
f"Contract factory: {unlinked_factory.__repr__()} does not need bytecode linking, "
"so it is not a valid contract type for link()"
)
linked_factory = unlinked_factory.link_bytecode({linked_type: deployment_address})
# todo replace runtime_bytecode in manifest
manifest = assoc_in(
package.manifest,
("contract_types", contract, "deployment_bytecode", "bytecode"),
to_hex(linked_factory.bytecode),
)
logger.info(
"%s linked to %s at address %s."
% (contract, linked_type, to_checksum_address(deployment_address))
)
return Package(manifest, package.w3)
@curry
def run_python(callback_fn: Callable[..., None], package: Package) -> Package:
"""
Return the unmodified package, after performing any user-defined callback function on
the contracts in the package.
"""
callback_fn(package)
logger.info("%s python function ran." % callback_fn.__name__)
return package
| 31.248 | 98 | 0.702765 |
4f57af08969a4fe12e30c646955944795674e95b | 7,511 | py | Python | web-component/python/admin_api/models/card_address.py | AbhiGupta03/SDK | f3a61aae7a847f07f0c22a154ca88dc378e9d25e | [
"Apache-2.0"
] | null | null | null | web-component/python/admin_api/models/card_address.py | AbhiGupta03/SDK | f3a61aae7a847f07f0c22a154ca88dc378e9d25e | [
"Apache-2.0"
] | null | null | null | web-component/python/admin_api/models/card_address.py | AbhiGupta03/SDK | f3a61aae7a847f07f0c22a154ca88dc378e9d25e | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Hydrogen Admin API
The Hydrogen Admin API # noqa: E501
OpenAPI spec version: 1.0.2
Contact: info@hydrogenplatform.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from admin_api.configuration import Configuration
class CardAddress(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'address_line1': 'str',
'address_line2': 'str',
'city': 'str',
'country': 'str',
'postalcode': 'str',
'state': 'str',
'type': 'str'
}
attribute_map = {
'address_line1': 'address_line1',
'address_line2': 'address_line2',
'city': 'city',
'country': 'country',
'postalcode': 'postalcode',
'state': 'state',
'type': 'type'
}
def __init__(self, address_line1=None, address_line2=None, city=None, country=None, postalcode=None, state=None, type=None, _configuration=None): # noqa: E501
"""CardAddress - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._address_line1 = None
self._address_line2 = None
self._city = None
self._country = None
self._postalcode = None
self._state = None
self._type = None
self.discriminator = None
self.address_line1 = address_line1
if address_line2 is not None:
self.address_line2 = address_line2
self.city = city
self.country = country
if postalcode is not None:
self.postalcode = postalcode
self.state = state
self.type = type
@property
def address_line1(self):
"""Gets the address_line1 of this CardAddress. # noqa: E501
addressLine1 # noqa: E501
:return: The address_line1 of this CardAddress. # noqa: E501
:rtype: str
"""
return self._address_line1
@address_line1.setter
def address_line1(self, address_line1):
"""Sets the address_line1 of this CardAddress.
addressLine1 # noqa: E501
:param address_line1: The address_line1 of this CardAddress. # noqa: E501
:type: str
"""
self._address_line1 = address_line1
@property
def address_line2(self):
"""Gets the address_line2 of this CardAddress. # noqa: E501
addressLine2 # noqa: E501
:return: The address_line2 of this CardAddress. # noqa: E501
:rtype: str
"""
return self._address_line2
@address_line2.setter
def address_line2(self, address_line2):
"""Sets the address_line2 of this CardAddress.
addressLine2 # noqa: E501
:param address_line2: The address_line2 of this CardAddress. # noqa: E501
:type: str
"""
self._address_line2 = address_line2
@property
def city(self):
"""Gets the city of this CardAddress. # noqa: E501
city # noqa: E501
:return: The city of this CardAddress. # noqa: E501
:rtype: str
"""
return self._city
@city.setter
def city(self, city):
"""Sets the city of this CardAddress.
city # noqa: E501
:param city: The city of this CardAddress. # noqa: E501
:type: str
"""
self._city = city
@property
def country(self):
"""Gets the country of this CardAddress. # noqa: E501
country # noqa: E501
:return: The country of this CardAddress. # noqa: E501
:rtype: str
"""
return self._country
@country.setter
def country(self, country):
"""Sets the country of this CardAddress.
country # noqa: E501
:param country: The country of this CardAddress. # noqa: E501
:type: str
"""
self._country = country
@property
def postalcode(self):
"""Gets the postalcode of this CardAddress. # noqa: E501
postalcode # noqa: E501
:return: The postalcode of this CardAddress. # noqa: E501
:rtype: str
"""
return self._postalcode
@postalcode.setter
def postalcode(self, postalcode):
"""Sets the postalcode of this CardAddress.
postalcode # noqa: E501
:param postalcode: The postalcode of this CardAddress. # noqa: E501
:type: str
"""
self._postalcode = postalcode
@property
def state(self):
"""Gets the state of this CardAddress. # noqa: E501
state # noqa: E501
:return: The state of this CardAddress. # noqa: E501
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this CardAddress.
state # noqa: E501
:param state: The state of this CardAddress. # noqa: E501
:type: str
"""
self._state = state
@property
def type(self):
"""Gets the type of this CardAddress. # noqa: E501
type # noqa: E501
:return: The type of this CardAddress. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this CardAddress.
type # noqa: E501
:param type: The type of this CardAddress. # noqa: E501
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CardAddress, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CardAddress):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, CardAddress):
return True
return self.to_dict() != other.to_dict()
| 26.079861 | 163 | 0.566236 |
4f58bcb5ee763442e62db904b9f24925b689b885 | 2,237 | py | Python | pandas/tests/groupby/test_pipe.py | georgehaan/pandas | d5ba8c090475cc933b94139c3039d22f03194709 | [
"BSD-3-Clause"
] | 1 | 2019-11-01T08:44:40.000Z | 2019-11-01T08:44:40.000Z | pandas/tests/groupby/test_pipe.py | georgehaan/pandas | d5ba8c090475cc933b94139c3039d22f03194709 | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/groupby/test_pipe.py | georgehaan/pandas | d5ba8c090475cc933b94139c3039d22f03194709 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import pandas as pd
from pandas import (
DataFrame,
Index,
)
import pandas._testing as tm
from pandas.core.api import Int64Index
def test_pipe():
# Test the pipe method of DataFrameGroupBy.
# Issue #17871
random_state = np.random.RandomState(1234567890)
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": random_state.randn(8),
"C": random_state.randn(8),
}
)
def f(dfgb):
return dfgb.B.max() - dfgb.C.min().min()
def square(srs):
return srs**2
# Note that the transformations are
# GroupBy -> Series
# Series -> Series
# This then chains the GroupBy.pipe and the
# NDFrame.pipe methods
result = df.groupby("A").pipe(f).pipe(square)
index = Index(["bar", "foo"], dtype="object", name="A")
expected = pd.Series([8.99110003361, 8.17516964785], name="B", index=index)
tm.assert_series_equal(expected, result)
def test_pipe_args():
# Test passing args to the pipe method of DataFrameGroupBy.
# Issue #17871
df = DataFrame(
{
"group": ["A", "A", "B", "B", "C"],
"x": [1.0, 2.0, 3.0, 2.0, 5.0],
"y": [10.0, 100.0, 1000.0, -100.0, -1000.0],
}
)
def f(dfgb, arg1):
return dfgb.filter(lambda grp: grp.y.mean() > arg1, dropna=False).groupby(
dfgb.grouper
)
def g(dfgb, arg2):
msg = "The default value of numeric_only"
with tm.assert_produces_warning(FutureWarning, match=msg):
return dfgb.sum() / dfgb.sum().sum() + arg2
def h(df, arg3):
return df.x + df.y - arg3
result = df.groupby("group").pipe(f, 0).pipe(g, 10).pipe(h, 100)
# Assert the results here
index = Index(["A", "B", "C"], name="group")
expected = pd.Series([-79.5160891089, -78.4839108911, -80], index=index)
tm.assert_series_equal(expected, result)
# test SeriesGroupby.pipe
ser = pd.Series([1, 1, 2, 2, 3, 3])
result = ser.groupby(ser).pipe(lambda grp: grp.sum() * grp.count())
expected = pd.Series([4, 8, 12], index=Int64Index([1, 2, 3]))
tm.assert_series_equal(result, expected)
| 26.317647 | 82 | 0.57443 |
4f5845d2e16d48a159397b8b7953659435ee1d51 | 5,247 | py | Python | discovery-provider/integration_tests/tasks/test_generate_trending.py | RahulBansal123/audius-protocol | 963ecfe898efd52713602cff5fd1a012aaf3c9d3 | [
"Apache-2.0"
] | 1 | 2022-03-27T21:40:36.000Z | 2022-03-27T21:40:36.000Z | discovery-provider/integration_tests/tasks/test_generate_trending.py | RahulBansal123/audius-protocol | 963ecfe898efd52713602cff5fd1a012aaf3c9d3 | [
"Apache-2.0"
] | null | null | null | discovery-provider/integration_tests/tasks/test_generate_trending.py | RahulBansal123/audius-protocol | 963ecfe898efd52713602cff5fd1a012aaf3c9d3 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime, timedelta
from src.models import AggregatePlays, Block, Play, Track
from src.tasks.generate_trending import get_listen_counts
# Setup trending from simplified metadata
def setup_trending(db, date):
# Test data
# test tracks
# when creating tracks, track_id == index
test_tracks = [
{"genre": "Electronic"},
{"genre": "Pop"},
{"genre": "Electronic"},
# Tracks we don't want to count
{"genre": "Electronic", "is_unlisted": True},
{"genre": "Electronic", "is_delete": True},
]
test_plays = [
# Current Plays
{"item_id": 0},
{"item_id": 0},
{"item_id": 1},
{"item_id": 1},
{"item_id": 2},
{"item_id": 3},
# > 1 wk plays
{"item_id": 2, "created_at": date - timedelta(weeks=2)},
{"item_id": 2, "created_at": date - timedelta(weeks=2)},
{"item_id": 3, "created_at": date - timedelta(weeks=2)},
# We don't want to count these guys (tracks deleted/unlisted)
{"item_id": 3},
{"item_id": 3},
{"item_id": 4},
{"item_id": 4},
]
# pylint: disable=W0621
with db.scoped_session() as session:
# seed tracks + blocks
for i, track_meta in enumerate(test_tracks):
blockhash = hex(i)
block = Block(
blockhash=blockhash,
number=i,
parenthash="0x01",
is_current=True,
)
track = Track(
blockhash=blockhash,
blocknumber=i,
track_id=i,
is_current=track_meta.get("is_current", True),
is_delete=track_meta.get("is_delete", False),
owner_id=300,
route_id="",
track_segments=[],
genre=track_meta.get("genre", ""),
updated_at=track_meta.get("updated_at", date),
created_at=track_meta.get("created_at", date),
is_unlisted=track_meta.get("is_unlisted", False),
)
# add block and then flush before
# adding track, bc track.blocknumber foreign key
# references block
session.add(block)
session.flush()
session.add(track)
# seed plays
aggregate_plays = {}
for i, play_meta in enumerate(test_plays):
item_id = play_meta.get("item_id")
if item_id in aggregate_plays:
aggregate_plays[item_id] += 1
else:
aggregate_plays[item_id] = 1
play = Play(
id=i, play_item_id=item_id, created_at=play_meta.get("created_at", date)
)
session.add(play)
for i, count in aggregate_plays.items():
session.add(AggregatePlays(play_item_id=i, count=count))
# Helper to sort results before validating
def validate_results(actual, expected):
assert sorted(actual, key=lambda x: x["track_id"]) == sorted(
expected, key=lambda x: x["track_id"]
)
# Tests
def test_get_listen_counts_year(postgres_mock_db):
"""Happy path test: test that we get all valid listens from prior year"""
# setup
date = datetime.now()
setup_trending(postgres_mock_db, date)
# run
with postgres_mock_db.scoped_session() as session:
res = get_listen_counts(session, "year", None, 10, 0)
# validate
expected = [
{"track_id": 0, "listens": 2, "created_at": date},
{"track_id": 1, "listens": 2, "created_at": date},
{"track_id": 2, "listens": 3, "created_at": date},
]
validate_results(res, expected)
def test_get_listen_counts_week(postgres_mock_db):
"""Test slicing by time range"""
# setup
date = datetime.now()
setup_trending(postgres_mock_db, date)
# run
with postgres_mock_db.scoped_session() as session:
res = get_listen_counts(session, "week", None, 10, 0)
# validate
expected = [
{"track_id": 0, "listens": 2, "created_at": date},
{"track_id": 1, "listens": 2, "created_at": date},
{"track_id": 2, "listens": 1, "created_at": date},
]
validate_results(res, expected)
def test_get_listen_counts_genre_filtered(postgres_mock_db):
"""Test slicing by genre"""
# setup
date = datetime.now()
setup_trending(postgres_mock_db, date)
# run
with postgres_mock_db.scoped_session() as session:
res = get_listen_counts(session, "year", "Pop", 10, 0)
# validate
expected = [{"track_id": 1, "listens": 2, "created_at": date}]
validate_results(res, expected)
def test_get_listen_counts_all_time(postgres_mock_db):
"""Test slicing by genre"""
# setup
date = datetime.now()
setup_trending(postgres_mock_db, date)
# run
with postgres_mock_db.scoped_session() as session:
res = get_listen_counts(session, None, None, 10, 0)
# validate
expected = [
{"track_id": 0, "listens": 2, "created_at": date},
{"track_id": 1, "listens": 2, "created_at": date},
{"track_id": 2, "listens": 3, "created_at": date},
]
validate_results(res, expected)
| 30.505814 | 88 | 0.577854 |
4f59b93011c8a1c3f2a800f646f85f33f6c6eb6f | 4,039 | py | Python | tests/test_integration/test_index.py | vchrombie/opensearch-dsl-py | dce9d45ca2502c1ce7a0e60cb84c698a5aee7265 | [
"Apache-2.0"
] | 13 | 2021-10-16T13:11:57.000Z | 2022-02-11T19:13:05.000Z | tests/test_integration/test_index.py | vchrombie/opensearch-dsl-py | dce9d45ca2502c1ce7a0e60cb84c698a5aee7265 | [
"Apache-2.0"
] | 9 | 2021-10-15T18:40:15.000Z | 2022-03-23T21:56:29.000Z | tests/test_integration/test_index.py | vchrombie/opensearch-dsl-py | dce9d45ca2502c1ce7a0e60cb84c698a5aee7265 | [
"Apache-2.0"
] | 8 | 2021-10-30T13:21:29.000Z | 2022-03-29T20:14:40.000Z | # SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
#
# Modifications Copyright OpenSearch Contributors. See
# GitHub history for details.
#
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from opensearch_dsl import Date, Document, Index, IndexTemplate, Text, analysis
class Post(Document):
title = Text(analyzer=analysis.analyzer("my_analyzer", tokenizer="keyword"))
published_from = Date()
def test_index_template_works(write_client):
it = IndexTemplate("test-template", "test-*")
it.document(Post)
it.settings(number_of_replicas=0, number_of_shards=1)
it.save()
i = Index("test-blog")
i.create()
assert {
"test-blog": {
"mappings": {
"properties": {
"title": {"type": "text", "analyzer": "my_analyzer"},
"published_from": {"type": "date"},
}
}
}
} == write_client.indices.get_mapping(index="test-blog")
def test_index_can_be_saved_even_with_settings(write_client):
i = Index("test-blog", using=write_client)
i.settings(number_of_shards=3, number_of_replicas=0)
i.save()
i.settings(number_of_replicas=1)
i.save()
assert (
"1" == i.get_settings()["test-blog"]["settings"]["index"]["number_of_replicas"]
)
def test_index_exists(data_client):
assert Index("git").exists()
assert not Index("not-there").exists()
def test_index_can_be_created_with_settings_and_mappings(write_client):
i = Index("test-blog", using=write_client)
i.document(Post)
i.settings(number_of_replicas=0, number_of_shards=1)
i.create()
assert {
"test-blog": {
"mappings": {
"properties": {
"title": {"type": "text", "analyzer": "my_analyzer"},
"published_from": {"type": "date"},
}
}
}
} == write_client.indices.get_mapping(index="test-blog")
settings = write_client.indices.get_settings(index="test-blog")
assert settings["test-blog"]["settings"]["index"]["number_of_replicas"] == "0"
assert settings["test-blog"]["settings"]["index"]["number_of_shards"] == "1"
assert settings["test-blog"]["settings"]["index"]["analysis"] == {
"analyzer": {"my_analyzer": {"type": "custom", "tokenizer": "keyword"}}
}
def test_delete(write_client):
write_client.indices.create(
index="test-index",
body={"settings": {"number_of_replicas": 0, "number_of_shards": 1}},
)
i = Index("test-index", using=write_client)
i.delete()
assert not write_client.indices.exists(index="test-index")
def test_multiple_indices_with_same_doc_type_work(write_client):
i1 = Index("test-index-1", using=write_client)
i2 = Index("test-index-2", using=write_client)
for i in (i1, i2):
i.document(Post)
i.create()
for i in ("test-index-1", "test-index-2"):
settings = write_client.indices.get_settings(index=i)
assert settings[i]["settings"]["index"]["analysis"] == {
"analyzer": {"my_analyzer": {"type": "custom", "tokenizer": "keyword"}}
}
| 33.106557 | 87 | 0.650409 |
4f59f508c54172a2e1fb4462ec111d6d60388168 | 2,958 | py | Python | src/bindings/python/tests_compatibility/test_ngraph/test_node_factory.py | mgrdv/openvino | 86495ceb0f1811da6220dc7a8922bc8f44b26e16 | [
"Apache-2.0"
] | 1,127 | 2018-10-15T14:36:58.000Z | 2020-04-20T09:29:44.000Z | src/bindings/python/tests_compatibility/test_ngraph/test_node_factory.py | mgrdv/openvino | 86495ceb0f1811da6220dc7a8922bc8f44b26e16 | [
"Apache-2.0"
] | 439 | 2018-10-20T04:40:35.000Z | 2020-04-19T05:56:25.000Z | src/bindings/python/tests_compatibility/test_ngraph/test_node_factory.py | mgrdv/openvino | 86495ceb0f1811da6220dc7a8922bc8f44b26e16 | [
"Apache-2.0"
] | 414 | 2018-10-17T05:53:46.000Z | 2020-04-16T17:29:53.000Z | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import ngraph as ng
from ngraph.exceptions import UserInputError
from ngraph.utils.node_factory import NodeFactory
from _pyngraph import NodeFactory as _NodeFactory
def test_node_factory_add():
shape = [2, 2]
dtype = np.int8
parameter_a = ng.parameter(shape, dtype=dtype, name="A")
parameter_b = ng.parameter(shape, dtype=dtype, name="B")
factory = _NodeFactory("opset1")
arguments = NodeFactory._arguments_as_outputs([parameter_a, parameter_b])
node = factory.create("Add", arguments, {})
assert node.get_type_name() == "Add"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == [2, 2]
def test_node_factory_wrapper_add():
shape = [2, 2]
dtype = np.int8
parameter_a = ng.parameter(shape, dtype=dtype, name="A")
parameter_b = ng.parameter(shape, dtype=dtype, name="B")
node = ng.add(parameter_a, parameter_b, name="TestNode")
assert node.get_type_name() == "Add"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == [2, 2]
assert node.friendly_name == "TestNode"
def test_node_factory_topk():
dtype = np.int32
data = ng.parameter([2, 10], dtype=dtype, name="A")
k = ng.constant(3, dtype=dtype, name="B")
factory = _NodeFactory("opset1")
arguments = NodeFactory._arguments_as_outputs([data, k])
node = factory.create(
"TopK", arguments, {"axis": 1, "mode": "max", "sort": "value"}
)
attributes = node.get_attributes()
assert node.get_type_name() == "TopK"
assert node.get_output_size() == 2
assert list(node.get_output_shape(0)) == [2, 3]
assert attributes["axis"] == 1
assert attributes["mode"] == "max"
assert attributes["sort"] == "value"
def test_node_factory_empty_topk():
factory = NodeFactory("opset1")
node = factory.create("TopK")
assert node.get_type_name() == "TopK"
def test_node_factory_empty_topk_with_args_and_attrs():
dtype = np.int32
data = ng.parameter([2, 10], dtype=dtype, name="A")
k = ng.constant(3, dtype=dtype, name="B")
factory = NodeFactory("opset1")
arguments = NodeFactory._arguments_as_outputs([data, k])
node = factory.create("TopK", None, None)
node.set_arguments(arguments)
node.set_attribute("axis", 1)
node.set_attribute("mode", "max")
node.set_attribute("sort", "value")
node.validate()
assert node.get_type_name() == "TopK"
assert node.get_output_size() == 2
assert list(node.get_output_shape(0)) == [2, 3]
def test_node_factory_validate_missing_arguments():
factory = NodeFactory("opset1")
try:
factory.create(
"TopK", None, {"axis": 1, "mode": "max", "sort": "value"}
)
except UserInputError:
pass
else:
raise AssertionError("Validation of missing arguments has unexpectedly passed.")
| 31.136842 | 88 | 0.668695 |
4f5ab66e04f140a4b6edfc3c948dadabc0d55761 | 5,410 | py | Python | external/py3dpi/src/pydpi/drug/getmol.py | pgniewko/deep-toxin | fa61b06405749e5de7d74eedadb5de7c67981471 | [
"BSD-3-Clause"
] | 1 | 2020-08-20T07:49:10.000Z | 2020-08-20T07:49:10.000Z | external/py3dpi/src/pydpi/drug/getmol.py | pgniewko/deep-toxin | fa61b06405749e5de7d74eedadb5de7c67981471 | [
"BSD-3-Clause"
] | null | null | null | external/py3dpi/src/pydpi/drug/getmol.py | pgniewko/deep-toxin | fa61b06405749e5de7d74eedadb5de7c67981471 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 23 20:13:08 2012
@author: orient
"""
import urllib.request, urllib.parse, urllib.error
import re
import string
import os
from rdkit import Chem
Version = 1.0
def ReadMolFromSDF(filename=""):
"""
Read a set of molecules by SDF file format.
Note: the output of this function is a set of molecular objects.
You need to use for statement to call each object.
Usage:
res=ReadMolFromSDF(filename)
Input: filename is a file name with path.
Output: res is a set of molecular object.
"""
molset = Chem.SDMolSupplier(filename)
return molset
def ReadMolFromMOL(filename=""):
"""
Read a molecule by mol file format.
Usage:
res=ReadMolFromMOL(filename)
Input: filename is a file name with path.
Output: res is a molecular object.
"""
mol = Chem.MolFromMolFile(filename)
return mol
def ReadMolFromSmile(smi=""):
"""
#################################################################
Read a molecule by SMILES string.
Usage:
res=ReadMolFromSmile(smi)
Input: smi is a SMILES string.
Output: res is a molecule object.
#################################################################
"""
mol = Chem.MolFromSmiles(smi.strip())
return mol
def ReadMolFromInchi(inchi=""):
"""
#################################################################
Read a molecule by Inchi string.
Usage:
res=ReadMolFromInchi(inchi)
Input: inchi is a InChi string.
Output: res is a molecule object.
#################################################################
"""
import pybel
temp = pybel.readstring("inchi", inchi)
smi = temp.write("smi")
mol = Chem.MolFromSmiles(smi.strip())
return mol
def ReadMolFromMol(filename=""):
"""
#################################################################
Read a molecule with mol file format.
Usage:
res=ReadMolFromMol(filename)
Input: filename is a file name.
Output: res is a molecule object.
#################################################################
"""
mol = Chem.MolFromMolFile(filename)
return mol
#############################################################################
def GetMolFromCAS(casid=""):
"""
Downloading the molecules from http://www.chemnet.com/cas/ by CAS ID (casid).
if you want to use this function, you must be install pybel.
"""
import pybel
casid = casid.strip()
localfile = urllib.request.urlopen(
"http://www.chemnet.com/cas/supplier.cgi?terms=" + casid + "&l=&exact=dict"
)
temp = localfile.readlines()
for i in temp:
if re.findall("InChI=", i) == ["InChI="]:
k = i.split(' <td align="left">')
kk = k[1].split("</td>\r\n")
if kk[0][0:5] == "InChI":
res = kk[0]
else:
res = "None"
localfile.close()
mol = pybel.readstring("inchi", res.strip())
smile = mol.write("smi")
return smile.strip()
def GetMolFromEBI():
"""
"""
pass
def GetMolFromNCBI(cid=""):
"""
Downloading the molecules from http://pubchem.ncbi.nlm.nih.gov/ by cid (cid).
"""
cid = cid.strip()
localfile = urllib.request.urlopen(
"http://pubchem.ncbi.nlm.nih.gov/summary/summary.cgi?cid="
+ cid
+ "&disopt=SaveSDF"
)
temp = localfile.readlines()
f = file("temp.sdf", "w")
f.writelines(temp)
f.close()
localfile.close()
m = Chem.MolFromMolFile("temp.sdf")
os.remove("temp.sdf")
temp = Chem.MolToSmiles(m, isomericSmiles=True)
return temp
def GetMolFromDrugbank(dbid=""):
"""
Downloading the molecules from http://www.drugbank.ca/ by dbid (dbid).
"""
dbid = dbid.strip()
localfile = urllib.request.urlopen("http://www.drugbank.ca/drugs/" + dbid + ".sdf")
temp = localfile.readlines()
f = file("temp.sdf", "w")
f.writelines(temp)
f.close()
localfile.close()
m = Chem.MolFromMolFile("temp.sdf")
os.remove("temp.sdf")
temp = Chem.MolToSmiles(m, isomericSmiles=True)
return temp
def GetMolFromKegg(kid=""):
"""
Downloading the molecules from http://www.genome.jp/ by kegg id (kid).
"""
ID = str(kid)
localfile = urllib.request.urlopen(
"http://www.genome.jp/dbget-bin/www_bget?-f+m+drug+" + ID
)
temp = localfile.readlines()
f = file("temp.mol", "w")
f.writelines(temp)
f.close()
localfile.close()
m = Chem.MolFromMolFile("temp.mol")
os.remove("temp.mol")
temp = Chem.MolToSmiles(m, isomericSmiles=True)
return temp
#############################################################################
if __name__ == "__main__":
print("Downloading......")
temp = GetMolFromCAS(casid="50-12-4")
print(temp)
temp = GetMolFromNCBI(cid="2244")
print(temp)
temp = GetMolFromDrugbank(dbid="DB00133")
print(temp)
temp = GetMolFromKegg(kid="D02176")
print(temp)
temp = ReadMolFromSDF("drug.sdf")
print(temp)
| 24.151786 | 87 | 0.514603 |
4f5a072c4cd7e520eb9c2cf57361cbf744ba61d2 | 214 | py | Python | tests/app/conftest.py | ajmaddaford/eq-questionnaire-runner | 474d16c9a85cdda58419dec68e38537e244210ec | [
"MIT"
] | null | null | null | tests/app/conftest.py | ajmaddaford/eq-questionnaire-runner | 474d16c9a85cdda58419dec68e38537e244210ec | [
"MIT"
] | null | null | null | tests/app/conftest.py | ajmaddaford/eq-questionnaire-runner | 474d16c9a85cdda58419dec68e38537e244210ec | [
"MIT"
] | null | null | null | from pytest import fixture
from app.setup import create_app
@fixture
def app():
setting_overrides = {"LOGIN_DISABLED": True}
the_app = create_app(setting_overrides=setting_overrides)
return the_app
| 17.833333 | 61 | 0.761682 |
4f58c6741731049b304903e1523f670f3c6e6d1e | 8,903 | py | Python | ndn_hydra/client/main.py | UCLA-IRL/ndn-hydra | e01829253876ff6dbfd6dfaa92142eca08cd6705 | [
"Apache-2.0"
] | 2 | 2021-04-14T04:12:14.000Z | 2021-04-23T02:28:49.000Z | ndn_hydra/client/main.py | ZixuanZhong/hydra | e01829253876ff6dbfd6dfaa92142eca08cd6705 | [
"Apache-2.0"
] | 10 | 2021-07-03T18:29:26.000Z | 2022-02-01T04:11:20.000Z | ndn_hydra/client/main.py | ZixuanZhong/ndn-distributed-repo | e01829253876ff6dbfd6dfaa92142eca08cd6705 | [
"Apache-2.0"
] | null | null | null | # ----------------------------------------------------------
# NDN Hydra Client
# ----------------------------------------------------------
# @Project: NDN Hydra
# @Date: 2021-01-25
# @Authors: Please check AUTHORS.rst
# @Source-Code: https://github.com/UCLA-IRL/ndn-hydra
# @Documentation: https://ndn-hydra.readthedocs.io/
# @Pip-Library: https://pypi.org/project/ndn-hydra/
# ----------------------------------------------------------
import asyncio
from argparse import ArgumentParser, Namespace
import logging
from ndn.app import NDNApp
from ndn.encoding import Name, FormalName
import sys
import os
import pkg_resources
from ndn_hydra.client.functions import *
def parse_hydra_cmd_opts() -> Namespace:
def interpret_version() -> None:
set = True if "-v" in sys.argv else False
if set and (len(sys.argv)-1 < 2):
try: print("ndn-hydra " + pkg_resources.require("ndn-hydra")[0].version)
except pkg_resources.DistributionNotFound: print("ndn-hydra source,undetermined")
sys.exit(0)
def interpret_help() -> None:
set = True if "-h" in sys.argv else False
if set:
if (len(sys.argv)-1 < 2):
print("usage: ndn-hydra-client [-h] [-v] {insert,delete,fetch,query} ...")
print(" ndn-hydra-client: a client made specifically for hydra, the NDN distributed repo.")
print(" ('python3 ./examples/client.py' instead of 'ndn-hydra-client' if from source.)")
print("")
print("* informational args:")
print(" -h, --help | shows this help message and exits.")
print(" -v, --version | shows the current version and exits.")
print("")
print("* function 'insert':")
print(" usage: ndn-hydra-client insert -r REPO -f FILENAME -p PATH [-c COPIES]")
print(" required args:")
print(" -r, --repoprefix REPO | a proper name of the repo prefix.")
print(" -f, --filename FILENAME | a proper name for the input file.")
print(" -p, --path PATH | path of the file desired to be the input i.e. input path.")
print(" optional args:")
print(" -c, --copies COPIES | number of copies for files, default 2.")
print("")
print("* function 'delete':")
print(" usage: ndn-hydra-client delete -r REPO -f FILENAME")
print(" required args:")
print(" -r, --repoprefix REPO | a proper name of the repo prefix.")
print(" -f, --filename FILENAME | a proper name for selected file.")
print("")
print("* function 'fetch':")
print(" usage: ndn-hydra-client fetch -r REPO -f FILENAME [-p PATH]")
print(" required args:")
print(" -r, --repoprefix REPO | a proper name of the repo prefix.")
print(" -f, --filename FILENAME | a proper name for desired file.")
print(" optional args:")
print(" -p, --path PATH | path for the file to be placed i.e. output path.")
print("")
print("* function 'query':")
print(" usage: ndn-hydra-client query -r REPO -q QUERY [-s SESSIONID]")
print(" required args:")
print(" -r, --repoprefix REPO | a proper name of the repo prefix.")
print(" -q, --query QUERY | the type of query desired.")
print(" optional args:")
print(" -s, --sessionid SESSIONID | certain sessionid-node targeted for query, default closest node.")
print("")
print("Thank you for using hydra.")
sys.exit(0)
# Command Line Parser
parser = ArgumentParser(prog="ndn-hydra-client",add_help=False,allow_abbrev=False)
parser.add_argument("-h","--help",action="store_true",dest="help",default=False,required=False)
parser.add_argument("-v","--version",action="store_true",dest="version",default=False,required=False)
subparsers = parser.add_subparsers(dest="function",required=True)
# Define All Subparsers
insertsp = subparsers.add_parser('insert',add_help=False)
insertsp.add_argument("-r","--repoprefix",action="store",dest="repo",required=True)
insertsp.add_argument("-f","--filename",action="store",dest="filename",required=True)
insertsp.add_argument("-p","--path",action="store",dest="path",required=True)
insertsp.add_argument("-c","--copies",action="store",dest="copies",required=False,default=2,type=int,nargs=None)
deletesp = subparsers.add_parser('delete',add_help=False)
deletesp.add_argument("-r","--repoprefix",action="store",dest="repo",required=True)
deletesp.add_argument("-f","--filename",action="store",dest="filename",required=True)
fetchsp = subparsers.add_parser('fetch',add_help=False)
fetchsp.add_argument("-r","--repoprefix",action="store",dest="repo",required=True)
fetchsp.add_argument("-f","--filename",action="store",dest="filename",required=True)
fetchsp.add_argument("-p","--path",action="store",dest="path",default="./fetchedHydraFile", required=False)
querysp = subparsers.add_parser('query',add_help=False)
querysp.add_argument("-r","--repoprefix",action="store",dest="repo",required=True)
querysp.add_argument("-q","--query",action="store",dest="query",required=True)
querysp.add_argument("-s","--sessionid",action="store",dest="sessionid",default=None, required=False)
# Interpret Informational Arguments
interpret_version()
interpret_help()
# Getting all Arguments
vars = parser.parse_args()
logging.basicConfig(level=logging.DEBUG)
# Configure Arguments
if vars.function == "insert":
if not os.path.isfile(vars.path):
print('Error: path specified is not an actual file. Unable to insert.')
sys.exit()
if vars.copies < 2:
print('Error: insufficient number of copies, must be 2 or above.')
sys.exit()
return vars
class HydraClient():
def __init__(self, app: NDNApp, client_prefix: FormalName, repo_prefix: FormalName) -> None:
self.cinsert = HydraInsertClient(app, client_prefix, repo_prefix)
self.cdelete = HydraDeleteClient(app, client_prefix, repo_prefix)
self.cfetch = HydraFetchClient(app, client_prefix, repo_prefix)
self.cquery = HydraQueryClient(app, client_prefix, repo_prefix)
async def insert(self, file_name: FormalName, desired_copies: int, path: str) -> bool:
return await self.cinsert.insert_file(file_name, desired_copies, path);
async def delete(self, file_name: FormalName) -> bool:
return await self.cdelete.delete_file(file_name);
async def fetch(self, file_name: FormalName, local_filename: str = None, overwrite: bool = False) -> None:
return await self.cfetch.fetch_file(file_name, local_filename, overwrite)
async def query(self, query: Name, sid: str=None) -> None:
return await self.cquery.send_query(query, sid)
async def run_hydra_client(app: NDNApp, args: Namespace) -> None:
repo_prefix = Name.from_str(args.repo)
client_prefix = Name.from_str("/client")
filename = None
desired_copies = 2
if hasattr(args, 'copies'):
desired_copies = args.copies
client = HydraClient(app, client_prefix, repo_prefix)
if args.function != "query":
filename = Name.from_str(args.filename)
if args.function == "insert":
await client.insert(filename, desired_copies, args.path)
print("Client finished Insert Command!")
await asyncio.sleep(60)
elif args.function == "delete":
await client.delete(filename)
print("Client finished Delete Command!")
elif args.function == "fetch":
await client.fetch(filename, args.path, True)
print("Client finished Fetch Command!")
elif args.function == "query":
await client.query(Name.from_str(str(args.query)), args.sessionid)
print("Client finished Query Command!")
else:
print("Not Implemented Yet / Unknown Command.")
app.shutdown()
def main() -> None:
args = parse_hydra_cmd_opts()
app = NDNApp()
try:
app.run_forever(after_start=run_hydra_client(app, args))
except FileNotFoundError:
print('Error: could not connect to NFD.')
sys.exit()
if __name__ == "__main__":
sys.exit(main())
| 50.016854 | 128 | 0.592497 |
4f5a5b8d0db98a1d16dd0432f3a3a935311f976b | 49 | py | Python | libs/post_processor.py | ShinYoung-hwan/Word_Reader | 903bade2dd22a728a2a86b0dad5ad1b61f0d9823 | [
"MIT"
] | null | null | null | libs/post_processor.py | ShinYoung-hwan/Word_Reader | 903bade2dd22a728a2a86b0dad5ad1b61f0d9823 | [
"MIT"
] | null | null | null | libs/post_processor.py | ShinYoung-hwan/Word_Reader | 903bade2dd22a728a2a86b0dad5ad1b61f0d9823 | [
"MIT"
] | null | null | null |
def postprocessing(txt):
return txt.strip()
| 12.25 | 24 | 0.693878 |
4f56dc211a286604a421b8ca574e6ecfa9d634e5 | 205 | py | Python | maps/kartsndarts/kartsndarts.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | maps/kartsndarts/kartsndarts.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | maps/kartsndarts/kartsndarts.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | from map import Map
class KartsNDarts(Map):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.name = 'kartsndarts'
self.difficulty = 'intermediate'
| 20.5 | 41 | 0.62439 |
4f5a4794cb87b341b5f441962583592b4dd20882 | 10,987 | py | Python | theseus/meanteacher/trainer/semisup_trainer.py | kaylode/shrec22-pothole | 700d1632de686214e42a2f56aeaceab30c8b9a3f | [
"MIT"
] | 1 | 2022-03-19T11:52:53.000Z | 2022-03-19T11:52:53.000Z | theseus/meanteacher/trainer/semisup_trainer.py | kaylode/shrec22-pothole | 700d1632de686214e42a2f56aeaceab30c8b9a3f | [
"MIT"
] | null | null | null | theseus/meanteacher/trainer/semisup_trainer.py | kaylode/shrec22-pothole | 700d1632de686214e42a2f56aeaceab30c8b9a3f | [
"MIT"
] | 1 | 2022-03-19T11:53:10.000Z | 2022-03-19T11:53:10.000Z | from typing import List, Optional, Tuple
import torch
from torch.cuda import amp
import os
import time
import numpy as np
from tqdm import tqdm
from theseus.utilities.loggers.cp_logger import Checkpoint
from theseus.base.optimizers.scalers import NativeScaler
from theseus.utilities.loggers.observer import LoggerObserver
LOGGER = LoggerObserver.getLogger("main")
class SemiSupervisedTrainer(object):
"""Trainer for SemiSupervised tasks
"""
def __init__(self,
model,
suptrainloader,
unsuptrainloader,
valloader,
metrics,
optimizer,
scheduler,
save_dir: str = 'runs',
use_fp16: bool = False,
num_epochs: int = 100,
num_iter_per_epoch: int = 1000,
total_accumulate_steps: Optional[int] = None,
clip_grad: float = 10.0,
print_per_iter: int = 100,
save_per_iter: int = 100,
evaluate_per_epoch: int = 1,
visualize_when_val: bool = True,
best_value: float = 0.0,
resume: str = Optional[None],
):
self.model = model
self.metrics = metrics
self.optimizer = optimizer
self.scheduler = scheduler
self.suptrainloader = suptrainloader
self.unsuptrainloader = unsuptrainloader
self.valloader = valloader
self.save_dir = save_dir
self.checkpoint = Checkpoint(os.path.join(self.save_dir, 'checkpoints'))
self.num_epochs = num_epochs
self.num_iter_per_epoch = num_iter_per_epoch
self.step_per_epoch = self.scheduler.step_per_epoch
self.use_amp = True if use_fp16 else False
self.scaler = NativeScaler() if use_fp16 else False
if total_accumulate_steps is None:
self.accumulate_steps = 1
else:
self.accumulate_steps = max(round(total_accumulate_steps / suptrainloader.batch_size), 1)
self.clip_grad = clip_grad
self.evaluate_per_epoch = evaluate_per_epoch
self.print_per_iter = print_per_iter
self.save_per_iter = save_per_iter
self.visualize_when_val = visualize_when_val
self.best_value = best_value
self.resume = resume
self.epoch = 0
self.iters = 0
self.start_iter = 0
def fit(self):
# Total number of training iterations
self.num_iters = (self.num_epochs+1) * self.num_iter_per_epoch
# On start callbacks
self.on_start()
# Init scheduler params
if self.step_per_epoch:
self.scheduler.last_epoch = self.epoch - 1
LOGGER.text(f'===========================START TRAINING=================================', level=LoggerObserver.INFO)
for epoch in range(self.epoch, self.num_epochs):
try:
# Save current epoch
self.epoch = epoch
# Start training
self.training_epoch()
self.on_training_end()
# Start evaluation
if self.evaluate_per_epoch != 0:
if epoch % self.evaluate_per_epoch == 0 and epoch+1 >= self.evaluate_per_epoch:
self.evaluate_epoch()
self.on_evaluate_end()
# On epoch end callbacks
self.on_epoch_end()
except KeyboardInterrupt:
break
# On training finish callbacks
self.on_finish()
LOGGER.text("Training Completed!", level=LoggerObserver.INFO)
def sanity_check(self):
raise NotImplementedError
def save_checkpoint(self):
raise NotImplementedError
def visualize_batch(self):
raise NotImplementedError
def on_start(self):
return
def on_training_end(self):
return
def on_evaluate_end(self):
return
def on_epoch_end(self):
if self.step_per_epoch:
self.scheduler.step()
lrl = [x['lr'] for x in self.optimizer.param_groups]
lr = sum(lrl) / len(lrl)
LOGGER.log([{
'tag': 'Training/Learning rate',
'value': lr,
'type': LoggerObserver.SCALAR,
'kwargs': {
'step': self.epoch
}
}])
def on_finish(self):
self.save_checkpoint()
def training_epoch(self):
"""
Perform training one epoch
"""
self.model.train()
suptrainloader = iter(self.suptrainloader)
unsuptrainloader = iter(self.unsuptrainloader)
running_loss = {}
running_time = 0
self.optimizer.zero_grad()
for i in range(self.num_iter_per_epoch):
try:
sup_batch = suptrainloader.next()
except StopIteration as e:
suptrainloader = iter(self.suptrainloader)
sup_batch = suptrainloader.next()
try:
unsup_batch = unsuptrainloader.next()
except StopIteration as e:
unsuptrainloader = iter(self.unsuptrainloader)
unsup_batch = unsuptrainloader.next()
start_time = time.time()
loss = 0
# Gradient scaler
with amp.autocast(enabled=self.use_amp):
outputs = self.model.training_step(sup_batch, unsup_batch, global_step=self.iters)
loss = outputs['loss']
loss_dict = outputs['loss_dict']
loss /= self.accumulate_steps
# Backward loss
self.scaler(loss, self.optimizer)
if i % self.accumulate_steps == 0 or i == len(self.trainloader)-1:
self.scaler.step(self.optimizer, clip_grad=self.clip_grad, parameters=self.model.model_s.parameters())
if not self.step_per_epoch:
self.scheduler.step()
lrl = [x['lr'] for x in self.optimizer.param_groups]
lr = sum(lrl) / len(lrl)
LOGGER.log([{
'tag': 'Training/Learning rate',
'value': lr,
'type': LoggerObserver.SCALAR,
'kwargs': {
'step': self.iters
}
}])
self.optimizer.zero_grad()
torch.cuda.synchronize()
end_time = time.time()
for (key,value) in loss_dict.items():
if key in running_loss.keys():
running_loss[key] += value
else:
running_loss[key] = value
running_time += end_time-start_time
# Calculate current iteration
self.iters = self.start_iter + self.num_iter_per_epoch*self.epoch + i + 1
# Logging
if self.iters % self.print_per_iter == 0:
for key in running_loss.keys():
running_loss[key] /= self.print_per_iter
running_loss[key] = np.round(running_loss[key], 5)
loss_string = '{}'.format(running_loss)[1:-1].replace("'",'').replace(",",' ||')
LOGGER.text(
"[{}|{}] [{}|{}] || {} || Time: {:10.4f}s".format(
self.epoch, self.num_epochs, self.iters,
self.num_iters,loss_string, running_time),
LoggerObserver.INFO)
log_dict = [{
'tag': f"Training/{k} Loss",
'value': v/self.print_per_iter,
'type': LoggerObserver.SCALAR,
'kwargs': {
'step': self.iters
}
} for k,v in running_loss.items()]
LOGGER.log(log_dict)
running_loss = {}
running_time = 0
# Saving checkpoint
if (self.iters % self.save_per_iter == 0 or self.iters == self.num_iters - 1):
LOGGER.text(f'Save model at [{self.iters}|{self.num_iters}] to last.pth', LoggerObserver.INFO)
self.save_checkpoint()
@torch.no_grad()
def evaluate_epoch(self):
"""
Perform validation one epoch
"""
self.model.eval()
epoch_loss = {}
metric_dict = {}
LOGGER.text('=============================EVALUATION===================================', LoggerObserver.INFO)
start_time = time.time()
# Gradient scaler
with amp.autocast(enabled=self.use_amp):
for batch in tqdm(self.valloader):
outputs = self.model.evaluate_step(batch, self.metrics)
loss_dict = outputs['loss_dict']
for (key,value) in loss_dict.items():
if key in epoch_loss.keys():
epoch_loss[key] += value
else:
epoch_loss[key] = value
end_time = time.time()
running_time = end_time - start_time
metric_dict = {}
for metric in self.metrics:
metric_dict.update(metric.value())
metric.reset()
# Logging
for key in epoch_loss.keys():
epoch_loss[key] /= len(self.valloader)
epoch_loss[key] = np.round(epoch_loss[key], 5)
loss_string = '{}'.format(epoch_loss)[1:-1].replace("'",'').replace(",",' ||')
LOGGER.text(
"[{}|{}] || {} || Time: {:10.4f} s".format(
self.epoch, self.num_epochs, loss_string, running_time),
level=LoggerObserver.INFO)
metric_string = ""
for metric, score in metric_dict.items():
if isinstance(score, (int, float)):
metric_string += metric +': ' + f"{score:.5f}" +' | '
metric_string +='\n'
LOGGER.text(metric_string, level=LoggerObserver.INFO)
LOGGER.text('==========================================================================', level=LoggerObserver.INFO)
log_dict = [{
'tag': f"Validation/{k} Loss",
'value': v/len(self.valloader),
'type': LoggerObserver.SCALAR,
'kwargs': {
'step': self.epoch
}
} for k,v in epoch_loss.items()]
log_dict += [{
'tag': f"Validation/{k}",
'value': v,
'kwargs': {
'step': self.epoch
}
} for k,v in metric_dict.items()]
LOGGER.log(log_dict)
# Hook function
self.check_best(metric_dict)
def check_best(self, metric_dict):
return | 33.910494 | 125 | 0.513789 |
4f52957f16a7480978d51ed986b9430591ee8117 | 20,708 | py | Python | koku/masu/database/aws_report_db_accessor.py | pavanyadavalli/koku | 88e2d679148d0e4735c5018faada638f73d4dc5c | [
"Apache-2.0"
] | 2 | 2022-01-12T03:42:39.000Z | 2022-01-12T03:42:40.000Z | koku/masu/database/aws_report_db_accessor.py | pavanyadavalli/koku | 88e2d679148d0e4735c5018faada638f73d4dc5c | [
"Apache-2.0"
] | null | null | null | koku/masu/database/aws_report_db_accessor.py | pavanyadavalli/koku | 88e2d679148d0e4735c5018faada638f73d4dc5c | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Database accessor for report data."""
import json
import logging
import pkgutil
import uuid
from dateutil.parser import parse
from django.db import connection
from django.db.models import F
from jinjasql import JinjaSql
from tenant_schemas.utils import schema_context
from masu.config import Config
from masu.database import AWS_CUR_TABLE_MAP
from masu.database.report_db_accessor_base import ReportDBAccessorBase
from masu.external.date_accessor import DateAccessor
from reporting.provider.aws.models import AWSCostEntry
from reporting.provider.aws.models import AWSCostEntryBill
from reporting.provider.aws.models import AWSCostEntryLineItem
from reporting.provider.aws.models import AWSCostEntryLineItemDaily
from reporting.provider.aws.models import AWSCostEntryLineItemDailySummary
from reporting.provider.aws.models import AWSCostEntryPricing
from reporting.provider.aws.models import AWSCostEntryProduct
from reporting.provider.aws.models import AWSCostEntryReservation
from reporting.provider.aws.models import PRESTO_LINE_ITEM_DAILY_TABLE
LOG = logging.getLogger(__name__)
class AWSReportDBAccessor(ReportDBAccessorBase):
"""Class to interact with customer reporting tables."""
def __init__(self, schema):
"""Establish the database connection.
Args:
schema (str): The customer schema to associate with
"""
super().__init__(schema)
self._datetime_format = Config.AWS_DATETIME_STR_FORMAT
self.date_accessor = DateAccessor()
self.jinja_sql = JinjaSql()
self._table_map = AWS_CUR_TABLE_MAP
@property
def line_item_daily_summary_table(self):
return AWSCostEntryLineItemDailySummary
@property
def line_item_table(self):
return AWSCostEntryLineItem
@property
def cost_entry_table(self):
return AWSCostEntry
@property
def line_item_daily_table(self):
return AWSCostEntryLineItemDaily
def get_cost_entry_bills(self):
"""Get all cost entry bill objects."""
table_name = AWSCostEntryBill
with schema_context(self.schema):
columns = ["id", "bill_type", "payer_account_id", "billing_period_start", "provider_id"]
bills = self._get_db_obj_query(table_name).values(*columns)
return {
(bill["bill_type"], bill["payer_account_id"], bill["billing_period_start"], bill["provider_id"]): bill[
"id"
]
for bill in bills
}
def get_cost_entry_bills_by_date(self, start_date):
"""Return a cost entry bill for the specified start date."""
table_name = AWSCostEntryBill
with schema_context(self.schema):
return self._get_db_obj_query(table_name).filter(billing_period_start=start_date)
def get_cost_entry_bills_query_by_provider(self, provider_uuid):
"""Return all cost entry bills for the specified provider."""
table_name = AWSCostEntryBill
with schema_context(self.schema):
return self._get_db_obj_query(table_name).filter(provider_id=provider_uuid)
def bills_for_provider_uuid(self, provider_uuid, start_date=None):
"""Return all cost entry bills for provider_uuid on date."""
bills = self.get_cost_entry_bills_query_by_provider(provider_uuid)
if start_date:
if isinstance(start_date, str):
start_date = parse(start_date)
bill_date = start_date.replace(day=1)
bills = bills.filter(billing_period_start=bill_date)
return bills
def get_bill_query_before_date(self, date, provider_uuid=None):
"""Get the cost entry bill objects with billing period before provided date."""
table_name = AWSCostEntryBill
with schema_context(self.schema):
base_query = self._get_db_obj_query(table_name)
if provider_uuid:
cost_entry_bill_query = base_query.filter(billing_period_start__lte=date, provider_id=provider_uuid)
else:
cost_entry_bill_query = base_query.filter(billing_period_start__lte=date)
return cost_entry_bill_query
def get_lineitem_query_for_billid(self, bill_id):
"""Get the AWS cost entry line item for a given bill query."""
table_name = AWSCostEntryLineItem
with schema_context(self.schema):
base_query = self._get_db_obj_query(table_name)
line_item_query = base_query.filter(cost_entry_bill_id=bill_id)
return line_item_query
def get_daily_query_for_billid(self, bill_id):
"""Get the AWS cost daily item for a given bill query."""
table_name = AWSCostEntryLineItemDaily
with schema_context(self.schema):
base_query = self._get_db_obj_query(table_name)
daily_item_query = base_query.filter(cost_entry_bill_id=bill_id)
return daily_item_query
def get_summary_query_for_billid(self, bill_id):
"""Get the AWS cost summary item for a given bill query."""
table_name = AWSCostEntryLineItemDailySummary
with schema_context(self.schema):
base_query = self._get_db_obj_query(table_name)
summary_item_query = base_query.filter(cost_entry_bill_id=bill_id)
return summary_item_query
def get_ocp_aws_summary_query_for_billid(self, bill_id):
"""Get the OCP-on-AWS report summary item for a given bill query."""
table_name = self._table_map["ocp_on_aws_daily_summary"]
base_query = self._get_db_obj_query(table_name)
summary_item_query = base_query.filter(cost_entry_bill_id=bill_id)
return summary_item_query
def get_ocp_aws_project_summary_query_for_billid(self, bill_id):
"""Get the OCP-on-AWS report project summary item for a given bill query."""
table_name = self._table_map["ocp_on_aws_project_daily_summary"]
base_query = self._get_db_obj_query(table_name)
summary_item_query = base_query.filter(cost_entry_bill_id=bill_id)
return summary_item_query
def get_cost_entry_query_for_billid(self, bill_id):
"""Get the AWS cost entry data for a given bill query."""
table_name = AWSCostEntry
with schema_context(self.schema):
base_query = self._get_db_obj_query(table_name)
line_item_query = base_query.filter(bill_id=bill_id)
return line_item_query
def get_cost_entries(self):
"""Make a mapping of cost entries by start time."""
table_name = AWSCostEntry
with schema_context(self.schema):
cost_entries = self._get_db_obj_query(table_name).all()
return {(ce.bill_id, ce.interval_start.strftime(self._datetime_format)): ce.id for ce in cost_entries}
def get_products(self):
"""Make a mapping of product sku to product objects."""
table_name = AWSCostEntryProduct
with schema_context(self.schema):
columns = ["id", "sku", "product_name", "region"]
products = self._get_db_obj_query(table_name, columns=columns).all()
return {
(product["sku"], product["product_name"], product["region"]): product["id"] for product in products
}
def get_pricing(self):
"""Make a mapping of pricing values string to pricing objects."""
table_name = AWSCostEntryPricing
with schema_context(self.schema):
pricing = self._get_db_obj_query(table_name).all()
return {f"{p.term}-{p.unit}": p.id for p in pricing}
def get_reservations(self):
"""Make a mapping of reservation ARN to reservation objects."""
table_name = AWSCostEntryReservation
with schema_context(self.schema):
columns = ["id", "reservation_arn"]
reservs = self._get_db_obj_query(table_name, columns=columns).all()
return {res["reservation_arn"]: res["id"] for res in reservs}
def populate_line_item_daily_table(self, start_date, end_date, bill_ids):
"""Populate the daily aggregate of line items table.
Args:
start_date (datetime.date) The date to start populating the table.
end_date (datetime.date) The date to end on.
bill_ids (list)
Returns
(None)
"""
table_name = self._table_map["line_item_daily"]
daily_sql = pkgutil.get_data("masu.database", "sql/reporting_awscostentrylineitem_daily.sql")
daily_sql = daily_sql.decode("utf-8")
daily_sql_params = {
"uuid": str(uuid.uuid4()).replace("-", "_"),
"start_date": start_date,
"end_date": end_date,
"bill_ids": bill_ids,
"schema": self.schema,
}
daily_sql, daily_sql_params = self.jinja_sql.prepare_query(daily_sql, daily_sql_params)
self._execute_raw_sql_query(table_name, daily_sql, start_date, end_date, bind_params=list(daily_sql_params))
def populate_line_item_daily_summary_table(self, start_date, end_date, bill_ids):
"""Populate the daily aggregated summary of line items table.
Args:
start_date (datetime.date) The date to start populating the table.
end_date (datetime.date) The date to end on.
Returns
(None)
"""
table_name = self._table_map["line_item_daily_summary"]
summary_sql = pkgutil.get_data("masu.database", "sql/reporting_awscostentrylineitem_daily_summary.sql")
summary_sql = summary_sql.decode("utf-8")
summary_sql_params = {
"uuid": str(uuid.uuid4()).replace("-", "_"),
"start_date": start_date,
"end_date": end_date,
"bill_ids": bill_ids,
"schema": self.schema,
}
summary_sql, summary_sql_params = self.jinja_sql.prepare_query(summary_sql, summary_sql_params)
self._execute_raw_sql_query(
table_name, summary_sql, start_date, end_date, bind_params=list(summary_sql_params)
)
def populate_line_item_daily_summary_table_presto(self, start_date, end_date, source_uuid, bill_id, markup_value):
"""Populate the daily aggregated summary of line items table.
Args:
start_date (datetime.date) The date to start populating the table.
end_date (datetime.date) The date to end on.
Returns
(None)
"""
summary_sql = pkgutil.get_data("masu.database", "presto_sql/reporting_awscostentrylineitem_daily_summary.sql")
summary_sql = summary_sql.decode("utf-8")
uuid_str = str(uuid.uuid4()).replace("-", "_")
summary_sql_params = {
"uuid": uuid_str,
"start_date": start_date,
"end_date": end_date,
"schema": self.schema,
"table": PRESTO_LINE_ITEM_DAILY_TABLE,
"source_uuid": source_uuid,
"year": start_date.strftime("%Y"),
"month": start_date.strftime("%m"),
"markup": markup_value if markup_value else 0,
"bill_id": bill_id,
}
summary_sql, summary_sql_params = self.jinja_sql.prepare_query(summary_sql, summary_sql_params)
LOG.info(f"Summary SQL: {str(summary_sql)}")
self._execute_presto_raw_sql_query(self.schema, summary_sql)
def mark_bill_as_finalized(self, bill_id):
"""Mark a bill in the database as finalized."""
table_name = AWSCostEntryBill
with schema_context(self.schema):
bill = self._get_db_obj_query(table_name).get(id=bill_id)
if bill.finalized_datetime is None:
bill.finalized_datetime = self.date_accessor.today_with_timezone("UTC")
bill.save()
def populate_tags_summary_table(self, bill_ids, start_date, end_date):
"""Populate the line item aggregated totals data table."""
table_name = self._table_map["tags_summary"]
agg_sql = pkgutil.get_data("masu.database", "sql/reporting_awstags_summary.sql")
agg_sql = agg_sql.decode("utf-8")
agg_sql_params = {"schema": self.schema, "bill_ids": bill_ids, "start_date": start_date, "end_date": end_date}
agg_sql, agg_sql_params = self.jinja_sql.prepare_query(agg_sql, agg_sql_params)
self._execute_raw_sql_query(table_name, agg_sql, bind_params=list(agg_sql_params))
def populate_ocp_on_aws_cost_daily_summary(self, start_date, end_date, cluster_id, bill_ids, markup_value):
"""Populate the daily cost aggregated summary for OCP on AWS.
Args:
start_date (datetime.date) The date to start populating the table.
end_date (datetime.date) The date to end on.
Returns
(None)
"""
table_name = self._table_map["ocp_on_aws_daily_summary"]
summary_sql = pkgutil.get_data("masu.database", "sql/reporting_ocpawscostlineitem_daily_summary.sql")
summary_sql = summary_sql.decode("utf-8")
summary_sql_params = {
"uuid": str(uuid.uuid4()).replace("-", "_"),
"start_date": start_date,
"end_date": end_date,
"bill_ids": bill_ids,
"cluster_id": cluster_id,
"schema": self.schema,
"markup": markup_value,
}
summary_sql, summary_sql_params = self.jinja_sql.prepare_query(summary_sql, summary_sql_params)
self._execute_raw_sql_query(
table_name, summary_sql, start_date, end_date, bind_params=list(summary_sql_params)
)
def populate_ocp_on_aws_cost_daily_summary_presto(
self, start_date, end_date, openshift_provider_uuid, aws_provider_uuid, report_period_id, bill_id, markup_value
):
"""Populate the daily cost aggregated summary for OCP on AWS.
Args:
start_date (datetime.date) The date to start populating the table.
end_date (datetime.date) The date to end on.
Returns
(None)
"""
summary_sql = pkgutil.get_data("masu.database", "presto_sql/reporting_ocpawscostlineitem_daily_summary.sql")
summary_sql = summary_sql.decode("utf-8")
summary_sql_params = {
"schema": self.schema,
"start_date": start_date,
"year": start_date.strftime("%Y"),
"month": start_date.strftime("%m"),
"end_date": end_date,
"aws_source_uuid": aws_provider_uuid,
"ocp_source_uuid": openshift_provider_uuid,
"bill_id": bill_id,
"report_period_id": report_period_id,
"markup": markup_value,
}
self._execute_presto_multipart_sql_query(self.schema, summary_sql, bind_params=summary_sql_params)
def back_populate_ocp_on_aws_daily_summary(self, start_date, end_date, report_period_id):
"""Populate the OCP on AWS and OCP daily summary tables. after populating the project table via trino."""
table_name = AWS_CUR_TABLE_MAP["ocp_on_aws_daily_summary"]
sql = pkgutil.get_data(
"masu.database", "sql/reporting_ocpawscostentrylineitem_daily_summary_back_populate.sql"
)
sql = sql.decode("utf-8")
sql_params = {
"schema": self.schema,
"start_date": start_date,
"end_date": end_date,
"report_period_id": report_period_id,
}
sql, sql_params = self.jinja_sql.prepare_query(sql, sql_params)
self._execute_raw_sql_query(table_name, sql, bind_params=list(sql_params))
def populate_ocp_on_aws_tags_summary_table(self, bill_ids, start_date, end_date):
"""Populate the line item aggregated totals data table."""
table_name = self._table_map["ocp_on_aws_tags_summary"]
agg_sql = pkgutil.get_data("masu.database", "sql/reporting_ocpawstags_summary.sql")
agg_sql = agg_sql.decode("utf-8")
agg_sql_params = {"schema": self.schema, "bill_ids": bill_ids, "start_date": start_date, "end_date": end_date}
agg_sql, agg_sql_params = self.jinja_sql.prepare_query(agg_sql, agg_sql_params)
self._execute_raw_sql_query(table_name, agg_sql, bind_params=list(agg_sql_params))
def populate_markup_cost(self, markup, start_date, end_date, bill_ids=None):
"""Set markup costs in the database."""
with schema_context(self.schema):
if bill_ids and start_date and end_date:
for bill_id in bill_ids:
AWSCostEntryLineItemDailySummary.objects.filter(
cost_entry_bill_id=bill_id, usage_start__gte=start_date, usage_start__lte=end_date
).update(markup_cost=(F("unblended_cost") * markup))
elif bill_ids:
for bill_id in bill_ids:
AWSCostEntryLineItemDailySummary.objects.filter(cost_entry_bill_id=bill_id).update(
markup_cost=(F("unblended_cost") * markup)
)
def populate_enabled_tag_keys(self, start_date, end_date, bill_ids):
"""Populate the enabled tag key table.
Args:
start_date (datetime.date) The date to start populating the table.
end_date (datetime.date) The date to end on.
bill_ids (list) A list of bill IDs.
Returns
(None)
"""
table_name = self._table_map["enabled_tag_keys"]
summary_sql = pkgutil.get_data("masu.database", "sql/reporting_awsenabledtagkeys.sql")
summary_sql = summary_sql.decode("utf-8")
summary_sql_params = {
"start_date": start_date,
"end_date": end_date,
"bill_ids": bill_ids,
"schema": self.schema,
}
summary_sql, summary_sql_params = self.jinja_sql.prepare_query(summary_sql, summary_sql_params)
self._execute_raw_sql_query(
table_name, summary_sql, start_date, end_date, bind_params=list(summary_sql_params)
)
def update_line_item_daily_summary_with_enabled_tags(self, start_date, end_date, bill_ids):
"""Populate the enabled tag key table.
Args:
start_date (datetime.date) The date to start populating the table.
end_date (datetime.date) The date to end on.
bill_ids (list) A list of bill IDs.
Returns
(None)
"""
table_name = self._table_map["line_item_daily_summary"]
summary_sql = pkgutil.get_data(
"masu.database", "sql/reporting_awscostentryline_item_daily_summary_update_enabled_tags.sql"
)
summary_sql = summary_sql.decode("utf-8")
summary_sql_params = {
"start_date": start_date,
"end_date": end_date,
"bill_ids": bill_ids,
"schema": self.schema,
}
summary_sql, summary_sql_params = self.jinja_sql.prepare_query(summary_sql, summary_sql_params)
self._execute_raw_sql_query(
table_name, summary_sql, start_date, end_date, bind_params=list(summary_sql_params)
)
def get_openshift_on_cloud_matched_tags(self, aws_bill_id, ocp_report_period_id):
"""Return a list of matched tags."""
sql = pkgutil.get_data("masu.database", "sql/reporting_ocpaws_matched_tags.sql")
sql = sql.decode("utf-8")
sql_params = {"bill_id": aws_bill_id, "report_period_id": ocp_report_period_id, "schema": self.schema}
sql, bind_params = self.jinja_sql.prepare_query(sql, sql_params)
with connection.cursor() as cursor:
cursor.db.set_schema(self.schema)
cursor.execute(sql, params=bind_params)
results = cursor.fetchall()
return [json.loads(result[0]) for result in results]
def get_openshift_on_cloud_matched_tags_trino(self, aws_source_uuid, ocp_source_uuid, start_date, end_date):
"""Return a list of matched tags."""
sql = pkgutil.get_data("masu.database", "presto_sql/reporting_ocpaws_matched_tags.sql")
sql = sql.decode("utf-8")
sql_params = {
"start_date": start_date,
"end_date": end_date,
"schema": self.schema,
"aws_source_uuid": aws_source_uuid,
"ocp_source_uuid": ocp_source_uuid,
"year": start_date.strftime("%Y"),
"month": start_date.strftime("%m"),
}
sql, sql_params = self.jinja_sql.prepare_query(sql, sql_params)
results = self._execute_presto_raw_sql_query(self.schema, sql, bind_params=sql_params)
return [json.loads(result[0]) for result in results]
| 43.322176 | 119 | 0.666844 |
4f54a89f01740f5fe20c38f240167b7a5de40bd6 | 5,440 | py | Python | mephisto/abstractions/test/utils.py | kushalchawla/Mephisto | 8a79a17d21dbfde029c1febe96847c8a668962d8 | [
"MIT"
] | 167 | 2020-06-15T21:25:19.000Z | 2022-03-31T20:58:19.000Z | mephisto/abstractions/test/utils.py | kushalchawla/Mephisto | 8a79a17d21dbfde029c1febe96847c8a668962d8 | [
"MIT"
] | 364 | 2020-06-22T17:02:18.000Z | 2022-03-31T23:40:35.000Z | mephisto/abstractions/test/utils.py | kushalchawla/Mephisto | 8a79a17d21dbfde029c1febe96847c8a668962d8 | [
"MIT"
] | 47 | 2020-07-26T18:03:21.000Z | 2022-03-31T07:46:45.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, Tuple
from mephisto.abstractions.database import (
MephistoDB,
MephistoDBException,
EntryAlreadyExistsException,
EntryDoesNotExistException,
)
from mephisto.data_model.agent import Agent
from mephisto.data_model.unit import Unit
from mephisto.data_model.assignment import Assignment
from mephisto.data_model.task_config import TaskConfig
from mephisto.data_model.requester import Requester
from mephisto.data_model.task import Task
from mephisto.data_model.task_run import TaskRun
from omegaconf import OmegaConf
import json
from mephisto.abstractions.providers.mock.mock_provider import MockProviderArgs
from mephisto.abstractions.blueprints.mock.mock_blueprint import MockBlueprintArgs
from mephisto.abstractions.architects.mock_architect import MockArchitectArgs
from mephisto.data_model.task_config import TaskConfigArgs
from mephisto.operations.hydra_config import MephistoConfig
MOCK_TASK_ARGS = TaskConfigArgs(
task_title="title",
task_description="This is a description",
task_reward="0.3",
task_tags="1,2,3",
)
MOCK_PROVIDER_ARGS = MockProviderArgs()
MOCK_ARCHITECT_ARGS = MockArchitectArgs()
MOCK_BLUEPRINT_ARGS = MockBlueprintArgs()
MOCK_CONFIG = MephistoConfig(
provider=MOCK_PROVIDER_ARGS,
blueprint=MOCK_BLUEPRINT_ARGS,
architect=MOCK_ARCHITECT_ARGS,
task=MOCK_TASK_ARGS,
)
def get_test_project(db: MephistoDB) -> Tuple[str, str]:
"""Helper to create a project for tests"""
project_name = "test_project"
project_id = db.new_project(project_name)
return project_name, project_id
def get_test_worker(db: MephistoDB) -> Tuple[str, str]:
"""Helper to create a worker for tests"""
worker_name = "test_worker"
provider_type = "mock"
worker_id = db.new_worker(worker_name, provider_type)
return worker_name, worker_id
def get_test_requester(db: MephistoDB) -> Tuple[str, str]:
"""Helper to create a requester for tests"""
requester_name = "test_requester"
provider_type = "mock"
requester_id = db.new_requester(requester_name, provider_type)
return requester_name, requester_id
def get_test_task(db: MephistoDB) -> Tuple[str, str]:
"""Helper to create a task for tests"""
task_name = "test_task"
task_type = "mock"
task_id = db.new_task(task_name, task_type)
return task_name, task_id
def get_test_task_run(db: MephistoDB) -> str:
"""Helper to create a task run for tests"""
task_name, task_id = get_test_task(db)
requester_name, requester_id = get_test_requester(db)
init_params = OmegaConf.to_yaml(OmegaConf.structured(MOCK_CONFIG))
return db.new_task_run(
task_id, requester_id, json.dumps(init_params), "mock", "mock"
)
def get_test_assignment(db: MephistoDB) -> str:
"""Helper to create an assignment for tests"""
task_run_id = get_test_task_run(db)
task_run = TaskRun.get(db, task_run_id)
return db.new_assignment(
task_run.task_id,
task_run_id,
task_run.requester_id,
task_run.task_type,
task_run.provider_type,
)
def get_test_unit(db: MephistoDB, unit_index=0) -> str:
# Check creation and retrieval of a unit
assignment_id = get_test_assignment(db)
pay_amount = 15.0
assignment = Assignment.get(db, assignment_id)
return db.new_unit(
assignment.task_id,
assignment.task_run_id,
assignment.requester_id,
assignment.db_id,
0,
pay_amount,
assignment.provider_type,
assignment.task_type,
)
def get_test_agent(db: MephistoDB, unit_id=None) -> str:
# Check creation and retrieval of a agent
worker_name, worker_id = get_test_worker(db)
if unit_id is None:
unit_id = get_test_unit(db)
provider_type = "mock"
task_type = "mock"
unit = Unit.get(db, unit_id)
return db.new_agent(
worker_id,
unit.db_id,
unit.task_id,
unit.task_run_id,
unit.assignment_id,
unit.task_type,
unit.provider_type,
)
def make_completed_unit(db: MephistoDB) -> str:
"""
Creates a completed unit for the most recently created task run
using some worker. Assumes
"""
workers = db.find_workers()
assert len(workers) > 0, "Must have at least one worker in database"
worker = workers[-1]
task_runs = db.find_task_runs(is_completed=False)
assert len(task_runs) > 0, "Must be at least one incomplete task run"
task_run = task_runs[-1]
assign_id = db.new_assignment(
task_run.task_id,
task_run.db_id,
task_run.requester_id,
task_run.task_type,
task_run.provider_type,
)
unit_id = db.new_unit(
task_run.task_id,
task_run.db_id,
task_run.requester_id,
assign_id,
0,
0.2,
task_run.provider_type,
task_run.task_type,
)
agent_id = db.new_agent(
worker.db_id,
unit_id,
task_run.task_id,
task_run.db_id,
assign_id,
task_run.task_type,
task_run.provider_type,
)
agent = Agent.get(db, agent_id)
agent.mark_done()
unit = Unit.get(db, unit_id)
unit.sync_status()
return unit.db_id
| 29.726776 | 82 | 0.706066 |
4f56f5c94eb97937d47c35b9e1b49de081ce321c | 2,603 | py | Python | .leetcode/98.validate-binary-search-tree.py | KuiyuanFu/PythonLeetCode | 8962df2fa838eb7ae48fa59de272ba55a89756d8 | [
"MIT"
] | null | null | null | .leetcode/98.validate-binary-search-tree.py | KuiyuanFu/PythonLeetCode | 8962df2fa838eb7ae48fa59de272ba55a89756d8 | [
"MIT"
] | null | null | null | .leetcode/98.validate-binary-search-tree.py | KuiyuanFu/PythonLeetCode | 8962df2fa838eb7ae48fa59de272ba55a89756d8 | [
"MIT"
] | null | null | null | # @lc app=leetcode id=98 lang=python3
#
# [98] Validate Binary Search Tree
#
# https://leetcode.com/problems/validate-binary-search-tree/description/
#
# algorithms
# Medium (29.00%)
# Likes: 6068
# Dislikes: 691
# Total Accepted: 982.4K
# Total Submissions: 3.4M
# Testcase Example: '[2,1,3]'
#
# Given the root of a binary tree, determine if it is a valid binary search
# tree (BST).
#
# A valid BST is defined as follows:
#
#
# The left subtree of a node contains only nodes with keys less than the node's
# key.
# The right subtree of a node contains only nodes with keys greater than the
# node's key.
# Both the left and right subtrees must also be binary search trees.
#
#
#
# Example 1:
#
#
# Input: root = [2,1,3]
# Output: true
#
#
# Example 2:
#
#
# Input: root = [5,1,4,null,null,3,6]
# Output: false
# Explanation: The root node's value is 5 but its right child's value is 4.
#
#
#
# Constraints:
#
#
# The number of nodes in the tree is in the range [1, 10^4].
# -2^31 <= Node.val <= 2^31 - 1
#
#
#
# @lc tags=tree;depth-first-search
# @lc imports=start
from imports import *
# @lc imports=end
# @lc idea=start
#
# 给定一棵树,判断其是否为二叉搜索树。
# 直接深度优先遍历。
# 实际上是线段树的思想。
#
#
# @lc idea=end
# @lc group=depth-first-search
# @lc rank=10
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isValidBST(self, root: TreeNode) -> bool:
def recur(root, l, r):
return not root \
or ((not root.left \
or ((root.left.val<root.val and (not l or l <root.left.val )) \
and recur(root.left ,l,root.val)))\
and (not root.right \
or ((root.right.val>root.val and (not r or r >root.right.val )) \
and recur(root.right,root.val,r))))
return recur(root, None, None)
pass
# @lc code=end
# @lc main=start
if __name__ == '__main__':
print('Example 1:')
print('Input : ')
print('root = [2,1,3]')
print('Output :')
print(str(Solution().isValidBST(listToTreeNode([2, 1, 3]))))
print('Exception :')
print('true')
print()
print('Example 2:')
print('Input : ')
print('root = [5,1,4,null,null,3,6]')
print('Output :')
print(
str(Solution().isValidBST(listToTreeNode([5, 1, 4, None, None, 3,
6]))))
print('Exception :')
print('false')
print()
pass
# @lc main=end | 21.336066 | 81 | 0.586247 |
4f5ad655007e9e7ca894b141eb6c5ba9c03def5c | 5,822 | py | Python | hutch_python/cli.py | ZryletTC/hutch-python | f0975c17ce3d773688af0dcc68fe57dc94b42518 | [
"BSD-3-Clause-LBNL"
] | null | null | null | hutch_python/cli.py | ZryletTC/hutch-python | f0975c17ce3d773688af0dcc68fe57dc94b42518 | [
"BSD-3-Clause-LBNL"
] | null | null | null | hutch_python/cli.py | ZryletTC/hutch-python | f0975c17ce3d773688af0dcc68fe57dc94b42518 | [
"BSD-3-Clause-LBNL"
] | null | null | null | """
This module defines the command-line interface arguments for the
``hutch-python`` script. It also provides utilities that are only used at
startup.
"""
import argparse
import logging
import os
from pathlib import Path
import IPython
from cookiecutter.main import cookiecutter
from IPython import start_ipython
from pcdsdaq.sim import set_sim_mode as set_daq_sim
from pcdsdevices.interface import set_engineering_mode
from traitlets.config import Config
from .constants import CONDA_BASE, DIR_MODULE
from .load_conf import load
from .log_setup import (debug_context, debug_mode, debug_wrapper,
set_console_level, setup_logging)
logger = logging.getLogger(__name__)
opts_cache = {}
# Define the parser
parser = argparse.ArgumentParser(prog='hutch-python',
description='Launch LCLS Hutch Python')
parser.add_argument('--cfg', required=False, default=None,
help='Configuration yaml file')
parser.add_argument('--exp', required=False, default=None,
help='Experiment number override')
parser.add_argument('--debug', action='store_true', default=False,
help='Start in debug mode')
parser.add_argument('--sim', action='store_true', default=False,
help='Run with simulated DAQ')
parser.add_argument('--create', action='store', default=False,
help='Create a new hutch deployment')
parser.add_argument('script', nargs='?',
help='Run a script instead of running interactively')
# Append to module docs
__doc__ += '\n::\n\n ' + parser.format_help().replace('\n', '\n ')
def configure_tab_completion(ipy_config):
"""
Disable Jedi and tweak IPython tab completion.
Parameters
----------
ipy_config : traitlets.config.Config
IPython configuration.
"""
# Old API for disabling Jedi. Keep in just in case API changes back.
ipy_config.InteractiveShellApp.Completer.use_jedi = False
# New API for disabling Jedi (two access points documented, use both)
ipy_config.Completer.use_jedi = False
ipy_config.IPCompleter.use_jedi = False
try:
# Monkeypatch IPython completion - we need it to respect __dir__
# when Jedi is disabled.
# Details: https://github.com/pcdshub/pcdsdevices/issues/709
# First, access it to see that the internals have not changed:
IPython.core.completer.dir2
except AttributeError:
logger.debug('Looks like the IPython API changed!')
else:
# Then monkeypatch it in:
IPython.core.completer.dir2 = dir
def configure_ipython_session():
"""
Configure a new IPython session.
Returns
-------
ipy_config : traitlets.config.Config
IPython configuration.
"""
ipy_config = Config()
# Important Utilities
ipy_config.InteractiveShellApp.extensions = [
'hutch_python.ipython_log',
'hutch_python.bug'
]
# Matplotlib setup if we have a screen
if os.getenv('DISPLAY'):
ipy_config.InteractiveShellApp.matplotlib = 'qt5'
else:
logger.warning('No DISPLAY environment variable detected. '
'Methods that create graphics will not '
'function properly.')
configure_tab_completion(ipy_config)
return ipy_config
def main():
"""
Do the full hutch-python launch sequence.
Parses the user's cli arguments and distributes them as needed to the
setup functions.
"""
# Parse the user's arguments
args = parser.parse_args()
# Set up logging first
if args.cfg is None:
log_dir = None
else:
log_dir = os.path.join(os.path.dirname(args.cfg), 'logs')
setup_logging(dir_logs=log_dir)
# Debug mode next
if args.debug:
debug_mode(True)
# Do the first log message, now that logging is ready
logger.debug('cli starting with args %s', args)
# Options that mean skipping the python environment
if args.create:
hutch = args.create
envs_dir = CONDA_BASE / 'envs'
if envs_dir.exists():
# Pick most recent pcds release in our common env
base = str(CONDA_BASE)
path_obj = sorted(envs_dir.glob('pcds-*'))[-1]
env = path_obj.name
else:
# Fallback: pick current env
base = str(Path(os.environ['CONDA_EXE']).parent.parent)
env = os.environ['CONDA_DEFAULT_ENV']
logger.info(('Creating hutch-python dir for hutch %s using'
' base=%s env=%s'), hutch, base, env)
cookiecutter(str(DIR_MODULE / 'cookiecutter'), no_input=True,
extra_context=dict(base=base, env=env, hutch=hutch))
return
# Now other flags
if args.sim:
set_daq_sim(True)
# Save whether we are an interactive session or a script session
opts_cache['script'] = args.script
# Load objects based on the configuration file
objs = load(cfg=args.cfg, args=args)
# Add cli debug tools
objs['debug_console_level'] = set_console_level
objs['debug_mode'] = debug_mode
objs['debug_context'] = debug_context
objs['debug_wrapper'] = debug_wrapper
# Turn engineering mode off by default and add to namespace
set_engineering_mode(False)
objs['set_engineering_mode'] = set_engineering_mode
script = opts_cache.get('script')
if script is None:
# Finally start the interactive session
start_ipython(argv=['--quick'], user_ns=objs,
config=configure_ipython_session())
else:
# Instead of setting up ipython, run the script with objs
with open(script) as fn:
code = compile(fn.read(), script, 'exec')
exec(code, objs, objs)
| 33.653179 | 73 | 0.654243 |
4f5a614b5f15590a8e93da047c5c9d41fcafca0a | 324 | py | Python | terrascript/template/d.py | mjuenema/python-terrascript | 6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d | [
"BSD-2-Clause"
] | 507 | 2017-07-26T02:58:38.000Z | 2022-01-21T12:35:13.000Z | terrascript/template/d.py | mjuenema/python-terrascript | 6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d | [
"BSD-2-Clause"
] | 135 | 2017-07-20T12:01:59.000Z | 2021-10-04T22:25:40.000Z | terrascript/template/d.py | mjuenema/python-terrascript | 6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d | [
"BSD-2-Clause"
] | 81 | 2018-02-20T17:55:28.000Z | 2022-01-31T07:08:40.000Z | # terrascript/template/d.py
# Automatically generated by tools/makecode.py ()
import warnings
warnings.warn(
"using the 'legacy layout' is deprecated", DeprecationWarning, stacklevel=2
)
import terrascript
class template_cloudinit_config(terrascript.Data):
pass
class template_file(terrascript.Data):
pass
| 18 | 79 | 0.774691 |
4f584ba5ee3d8d5daf62ae2980a30b237c550f1d | 605 | py | Python | construct_vocab.py | soskek/captioning_chainer | 1c3c950d393598a8aa0bc3c6f65a391c548142e7 | [
"MIT"
] | 19 | 2017-10-12T07:16:35.000Z | 2020-10-20T23:40:45.000Z | construct_vocab.py | soskek/captioning_chainer | 1c3c950d393598a8aa0bc3c6f65a391c548142e7 | [
"MIT"
] | 2 | 2017-10-14T10:24:24.000Z | 2018-06-12T07:08:04.000Z | construct_vocab.py | soskek/captioning_chainer | 1c3c950d393598a8aa0bc3c6f65a391c548142e7 | [
"MIT"
] | 5 | 2017-10-14T15:50:36.000Z | 2018-06-12T06:58:38.000Z | import argparse
import json
import sys
import datasets
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', '-d', default='mscoco')
parser.add_argument('--threshold', '-t', type=int, default=5)
parser.add_argument('--out', '-o', default='vocab.txt')
args = parser.parse_args()
directory = datasets.get_default_dataset_path(args.dataset)
vocab, count = datasets.construct_vocab(
directory, max_vocab_size=1e8, min_freq=5, with_count=True)
json.dump(vocab, open(args.out, 'w'))
json.dump(count, open(args.out + '.count', 'w'))
sys.stderr.write('# of words: {}\n'.format(len(vocab)))
| 28.809524 | 63 | 0.72562 |
4f52210888906f097319bee21405107f7f42424b | 1,515 | py | Python | examples/tutorial_classic/trafficlight.py | MISTCARRYYOU/PythonPDEVS | 53cad29832b3c489ab037bdc487affcbf1e3f408 | [
"Apache-2.0"
] | 1 | 2018-09-19T14:42:28.000Z | 2018-09-19T14:42:28.000Z | examples/tutorial_classic/trafficlight.py | capocchi/PythonPDEVS | 53cad29832b3c489ab037bdc487affcbf1e3f408 | [
"Apache-2.0"
] | null | null | null | examples/tutorial_classic/trafficlight.py | capocchi/PythonPDEVS | 53cad29832b3c489ab037bdc487affcbf1e3f408 | [
"Apache-2.0"
] | 1 | 2020-05-29T10:33:36.000Z | 2020-05-29T10:33:36.000Z | from pypdevs.DEVS import *
from pypdevs.infinity import INFINITY
class TrafficLight(AtomicDEVS):
def __init__(self):
AtomicDEVS.__init__(self, "Light")
self.state = "green"
self.elapsed = 0.0
self.observe = self.addOutPort("observer")
self.interrupt = self.addInPort("interrupt")
def intTransition(self):
state = self.state
return {"red": "green",
"yellow": "red",
"green": "yellow",
"going_manual": "manual",
"going_auto": "red"}[state]
def timeAdvance(self):
state = self.state
return {"red": 60,
"yellow": 3,
"green": 57,
"manual": INFINITY,
"going_manual": 0,
"going_auto": 0}[state]
def outputFnc(self):
state = self.state
if state == "red":
return {self.observe: "show_green"}
elif state == "yellow":
return {self.observe: "show_red"}
elif state == "green":
return {self.observe: "show_yellow"}
elif state == "going_manual":
return {self.observe: "turn_off"}
elif state == "going_auto":
return {self.observe: "show_red"}
def extTransition(self, inputs):
inp = inputs[self.interrupt]
if inp == "toManual":
return "going_manual"
elif inp == "toAuto":
if self.state == "manual":
return "going_auto"
| 30.918367 | 52 | 0.518812 |
4f5c4e6ea3d5c046f47312471ac46efd23afb3a9 | 316 | py | Python | scripts/merge_profiles.py | ajmaddaford/eq-questionnaire-runner | 474d16c9a85cdda58419dec68e38537e244210ec | [
"MIT"
] | 3 | 2020-09-28T13:21:21.000Z | 2021-05-05T14:14:51.000Z | scripts/merge_profiles.py | ajmaddaford/eq-questionnaire-runner | 474d16c9a85cdda58419dec68e38537e244210ec | [
"MIT"
] | 402 | 2019-11-06T17:23:03.000Z | 2022-03-31T16:03:35.000Z | scripts/merge_profiles.py | ajmaddaford/eq-questionnaire-runner | 474d16c9a85cdda58419dec68e38537e244210ec | [
"MIT"
] | 10 | 2020-03-03T14:23:27.000Z | 2022-01-31T12:21:21.000Z | #!/usr/bin/env python3
import pstats
from os import listdir
profiling_dir = "profiling/"
profiles = listdir(profiling_dir)
stats = None
for p in profiles:
if not stats:
stats = pstats.Stats(profiling_dir + p)
else:
stats.add(profiling_dir + p)
stats.dump_stats("combined_profile.prof")
| 16.631579 | 47 | 0.699367 |
4f595557acdaf8633b74f878601b6f688c7952b5 | 2,307 | py | Python | test/test_rayleigh.py | NuTufts/chroma_lartpc | ea6d1a62d22eeeaac069efdef1068a56be683fcc | [
"BSD-3-Clause"
] | null | null | null | test/test_rayleigh.py | NuTufts/chroma_lartpc | ea6d1a62d22eeeaac069efdef1068a56be683fcc | [
"BSD-3-Clause"
] | null | null | null | test/test_rayleigh.py | NuTufts/chroma_lartpc | ea6d1a62d22eeeaac069efdef1068a56be683fcc | [
"BSD-3-Clause"
] | null | null | null | from unittest_find import unittest
import numpy as np
from chroma.geometry import Solid, Geometry
from chroma.loader import create_geometry_from_obj
from chroma.make import box
from chroma.sim import Simulation
from chroma.demo.optics import water
from chroma.event import Photons
from chroma.rootimport import ROOT
ROOT.gROOT.SetBatch(1)
from chroma.histogram import Histogram, rootify
class TestRayleigh(unittest.TestCase):
def setUp(self):
self.cube = Geometry(water)
self.cube.add_solid(Solid(box(100,100,100), water, water))
self.geo = create_geometry_from_obj(self.cube, update_bvh_cache=False)
self.sim = Simulation(self.geo, geant4_processes=0)
nphotons = 100000
pos = np.tile([0,0,0], (nphotons,1)).astype(np.float32)
dir = np.tile([0,0,1], (nphotons,1)).astype(np.float32)
pol = np.zeros_like(pos)
phi = np.random.uniform(0, 2*np.pi, nphotons).astype(np.float32)
pol[:,0] = np.cos(phi)
pol[:,1] = np.sin(phi)
t = np.zeros(nphotons, dtype=np.float32)
wavelengths = np.empty(nphotons, np.float32)
wavelengths.fill(400.0)
self.photons = Photons(pos=pos, dir=dir, pol=pol, t=t, wavelengths=wavelengths)
def testAngularDistributionPolarized(self):
# Fully polarized photons
self.photons.pol[:] = [1.0, 0.0, 0.0]
photons_end = self.sim.simulate([self.photons], keep_photons_end=True, max_steps=1).next().photons_end
aborted = (photons_end.flags & (1 << 31)) > 0
self.assertFalse(aborted.any())
# Compute the dot product between initial and final dir
rayleigh_scatters = (photons_end.flags & (1 << 4)) > 0
cos_scatter = (self.photons.dir[rayleigh_scatters] * photons_end.dir[rayleigh_scatters]).sum(axis=1)
theta_scatter = np.arccos(cos_scatter)
h = Histogram(bins=100, range=(0, np.pi))
h.fill(theta_scatter)
h = rootify(h)
# The functional form for polarized light should be
# (1 + \cos^2 \theta)\sin \theta according to GEANT4 physics
# reference manual.
f = ROOT.TF1("pol_func", "[0]*(1+cos(x)**2)*sin(x)", 0, np.pi)
h.Fit(f, 'NQ')
self.assertGreater(f.GetProb(), 1e-3)
if __name__ == "__main__":
unittest.main()
| 37.819672 | 110 | 0.657564 |
4f5a3d77fbce6ea96076b14a4a8878b85e8841bb | 3,960 | py | Python | salt/utils/winservice.py | cbosdo/salt-1 | 9084d662781f9c0944804ba087e652c2ddb730bf | [
"Apache-2.0"
] | null | null | null | salt/utils/winservice.py | cbosdo/salt-1 | 9084d662781f9c0944804ba087e652c2ddb730bf | [
"Apache-2.0"
] | null | null | null | salt/utils/winservice.py | cbosdo/salt-1 | 9084d662781f9c0944804ba087e652c2ddb730bf | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# winservice.py
# Import python libs
from __future__ import print_function
from __future__ import absolute_import
from os.path import splitext, abspath
from sys import modules
# Import third party libs
try:
import win32serviceutil
import win32service
import win32event
import win32api
HAS_WIN32 = True
except ImportError:
HAS_WIN32 = False
# Although utils are often directly imported, it is also possible to use the
# loader.
def __virtual__():
'''
Only load if Win32 Libraries are installed
'''
if not HAS_WIN32:
return False, 'This utility requires pywin32'
return 'winservice'
def service(instantiated=True):
'''
Helper function to return an instance of the ServiceFramework class
Args:
instantiated (bool): True to return an instantiated object, False to
return the object definition. Use False if inherited by another
class. Default is True.
Returns:
class: An instance of the ServiceFramework class
'''
if not HAS_WIN32:
return
class Service(win32serviceutil.ServiceFramework):
_svc_name_ = '_unNamed'
_svc_display_name_ = '_Service Template'
def __init__(self, *args):
win32serviceutil.ServiceFramework.__init__(self, *args)
self.log('init')
self.stop_event = win32event.CreateEvent(None, 0, 0, None)
def log(self, msg):
import servicemanager
servicemanager.LogInfoMsg(str(msg))
def sleep(self, sec):
win32api.Sleep(sec * 1000, True)
def SvcDoRun(self): # pylint: disable=C0103
self.ReportServiceStatus(win32service.SERVICE_START_PENDING)
try:
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
self.log('start')
self.start()
self.log('wait')
win32event.WaitForSingleObject(self.stop_event,
win32event.INFINITE)
self.log('done')
except Exception as err:
self.log('Exception: {0}'.format(err))
self.SvcStop()
def SvcStop(self): # pylint: disable=C0103
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
self.log('stopping')
self.stop()
self.log('stopped')
win32event.SetEvent(self.stop_event)
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
# to be overridden
def start(self):
pass
# to be overridden
def stop(self):
pass
return Service() if instantiated else Service
def instart(cls, name, display_name=None, stay_alive=True):
'''Install and Start (auto) a Service
cls : the class (derived from Service) that implement the Service
name : Service name
display_name : the name displayed in the service manager
stay_alive : Service will stop on logout if False
'''
cls._svc_name_ = name
cls._svc_display_name_ = display_name or name
try:
module_path = modules[cls.__module__].__file__
except AttributeError:
# maybe py2exe went by
from sys import executable
module_path = executable
module_file = splitext(abspath(module_path))[0]
cls._svc_reg_class_ = '{0}.{1}'.format(module_file, cls.__name__)
if stay_alive:
win32api.SetConsoleCtrlHandler(lambda x: True, True)
try:
win32serviceutil.InstallService(
cls._svc_reg_class_,
cls._svc_name_,
cls._svc_display_name_,
startType=win32service.SERVICE_AUTO_START
)
print('Install ok')
win32serviceutil.StartService(
cls._svc_name_
)
print('Start ok')
except Exception as err:
print(str(err))
| 30 | 76 | 0.62096 |
4f5ce4c402000a348f7784cccdd2a21805b1448b | 291 | py | Python | scrapy/contrib/pipeline/images.py | nfunato/scrapy | 9d3ded5f2202e1b933e3f38671b114bb0ea238ce | [
"BSD-3-Clause"
] | 22 | 2018-03-13T13:51:41.000Z | 2022-02-19T07:27:48.000Z | scrapy/contrib/pipeline/images.py | nfunato/scrapy | 9d3ded5f2202e1b933e3f38671b114bb0ea238ce | [
"BSD-3-Clause"
] | 10 | 2020-02-11T23:34:28.000Z | 2022-03-11T23:16:12.000Z | scrapy/contrib/pipeline/images.py | nfunato/scrapy | 9d3ded5f2202e1b933e3f38671b114bb0ea238ce | [
"BSD-3-Clause"
] | 6 | 2017-12-28T03:59:54.000Z | 2020-02-26T16:01:45.000Z | import warnings
from scrapy.exceptions import ScrapyDeprecationWarning
warnings.warn("Module `scrapy.contrib.pipeline.images` is deprecated, "
"use `scrapy.pipelines.images` instead",
ScrapyDeprecationWarning, stacklevel=2)
from scrapy.pipelines.images import *
| 36.375 | 71 | 0.749141 |
4f57b5d29d3a00c1ce89cb19bd61e6433daa1175 | 1,294 | py | Python | e2e/scripts/st_text_area.py | datablets/streamlit_unbranded | 2ae5095d5f0b6bda9d32c92adb21d3fa645f285c | [
"Apache-2.0"
] | 3 | 2020-03-05T07:32:42.000Z | 2021-02-11T17:56:10.000Z | e2e/scripts/st_text_area.py | datablets/streamlit_unbranded | 2ae5095d5f0b6bda9d32c92adb21d3fa645f285c | [
"Apache-2.0"
] | 148 | 2020-10-19T20:16:32.000Z | 2022-03-31T03:34:25.000Z | e2e/scripts/st_text_area.py | datablets/streamlit_unbranded | 2ae5095d5f0b6bda9d32c92adb21d3fa645f285c | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2021 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import streamlit as st
i1 = st.text_area("text area 1")
st.write('value 1: "', i1, '"')
i2 = st.text_area("text area 2", "default text")
st.write('value 2: "', i2, '"')
i3 = st.text_area("text area 3", 1234)
st.write('value 3: "', i3, '"')
i4 = st.text_area("text area 4", None)
st.write('value 4: "', i4, '"')
i5 = st.text_area("text area 5", max_chars=10)
st.write('value 5: "', i5, '"')
if st._is_running_with_streamlit:
def on_change():
st.session_state.text_area_changed = True
st.text_area("text area 6", key="text_area6", on_change=on_change)
st.write('value 6: "', st.session_state.text_area6, '"')
st.write("text area changed:", "text_area_changed" in st.session_state)
| 32.35 | 75 | 0.697063 |
4f5cdae55642a7acfbdbcbebafa702f476aeff70 | 2,048 | py | Python | delfin/drivers/dell_emc/vnx/vnx_block/navicli_client.py | noelmcloughlin/delfin | 6dfa9bdb86d850410c82201f6fa621b4e5ea2917 | [
"Apache-2.0"
] | null | null | null | delfin/drivers/dell_emc/vnx/vnx_block/navicli_client.py | noelmcloughlin/delfin | 6dfa9bdb86d850410c82201f6fa621b4e5ea2917 | [
"Apache-2.0"
] | 1 | 2020-11-25T08:51:18.000Z | 2020-11-25T08:51:18.000Z | delfin/drivers/dell_emc/vnx/vnx_block/navicli_client.py | noelmcloughlin/delfin | 6dfa9bdb86d850410c82201f6fa621b4e5ea2917 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The SODA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from subprocess import Popen, PIPE
from oslo_log import log as logging
from delfin.drivers.dell_emc.vnx.vnx_block import consts
LOG = logging.getLogger(__name__)
class NaviClient(object):
@staticmethod
def exec(command_str, stdin_value=None):
"""execute command_str using Popen
:param command_str: should be list type
:param stdin_value: same as stdin of Popen
:return: output of Popen.communicate
"""
result = None
p = Popen(command_str, stdin=PIPE, stdout=PIPE, stderr=PIPE,
shell=False)
"""Call method when input is needed"""
if stdin_value:
out, err = p.communicate(
input=bytes(stdin_value, encoding='utf-8'))
else:
"""Call method when no input is required"""
out = p.stdout.read()
if isinstance(out, bytes):
out = out.decode("utf-8")
result = out.strip()
if result:
"""
Determine whether an exception occurs according
to the returned information
"""
for exception_key in consts.EXCEPTION_MAP.keys():
if stdin_value is None or stdin_value == consts.CER_STORE:
if exception_key == consts.CER_ERR:
continue
if exception_key in result:
raise consts.EXCEPTION_MAP.get(exception_key)(result)
return result
| 35.929825 | 74 | 0.63623 |
4f5d569a88b256602cb8f12613bb11fd9f555bd6 | 537 | py | Python | xlwings/mistune/plugins/__init__.py | Fervide/xlwings | aa9e187f18d95c0737fc48464aca3e8b4058e985 | [
"BSD-3-Clause"
] | 1,060 | 2019-04-03T08:01:38.000Z | 2022-03-31T02:17:03.000Z | mistune/plugins/__init__.py | drmingdrmer/mistune | 5f305d703aa871ab636007deee1e8cda0c9a562b | [
"BSD-3-Clause"
] | 832 | 2019-04-02T21:16:51.000Z | 2022-03-30T12:24:25.000Z | mistune/plugins/__init__.py | drmingdrmer/mistune | 5f305d703aa871ab636007deee1e8cda0c9a562b | [
"BSD-3-Clause"
] | 207 | 2019-04-03T03:44:02.000Z | 2022-03-29T17:41:16.000Z | from .def_list import plugin_def_list
from .extra import plugin_strikethrough, plugin_url
from .footnotes import plugin_footnotes
from .table import plugin_table
from .task_lists import plugin_task_lists
PLUGINS = {
"url": plugin_url,
"strikethrough": plugin_strikethrough,
"footnotes": plugin_footnotes,
"table": plugin_table,
"task_lists": plugin_task_lists,
"def_list": plugin_def_list,
}
__all__ = [
"PLUGINS",
"plugin_url",
"plugin_strikethrough",
"plugin_footnotes",
"plugin_table",
]
| 23.347826 | 51 | 0.733706 |
4f5d049800791922b9d88b250f0c59b8045f53cf | 79 | py | Python | prompt.py | Flux9665/TTS_Corpus_Creator | 501b11b7a349762632d3d7246a461f345d42156b | [
"Apache-2.0"
] | 1 | 2021-08-09T15:55:29.000Z | 2021-08-09T15:55:29.000Z | prompt.py | Flux9665/TTSCorpusCreator | 20e7957220eac78f292d784658dc3d92117608af | [
"Apache-2.0"
] | null | null | null | prompt.py | Flux9665/TTSCorpusCreator | 20e7957220eac78f292d784658dc3d92117608af | [
"Apache-2.0"
] | null | null | null | from Prompter import Prompter
if __name__ == '__main__':
Prompter().run()
| 15.8 | 29 | 0.696203 |
4f589f8ec0c1f7cbfb3af98a95f7fdc9a68caca1 | 4,156 | py | Python | ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/after-INSTALL/scripts/params.py | alexryndin/ambari | 478eeb02ebecef1f7f0506885a041d2070d2bccb | [
"Apache-2.0"
] | null | null | null | ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/after-INSTALL/scripts/params.py | alexryndin/ambari | 478eeb02ebecef1f7f0506885a041d2070d2bccb | [
"Apache-2.0"
] | null | null | null | ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/after-INSTALL/scripts/params.py | alexryndin/ambari | 478eeb02ebecef1f7f0506885a041d2070d2bccb | [
"Apache-2.0"
] | null | null | null | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from ambari_commons.constants import AMBARI_SUDO_BINARY
from resource_management.libraries.script import Script
from resource_management.libraries.functions import default
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import format_jvm_option
from resource_management.libraries.functions.version import format_stack_version
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
dfs_type = default("/commandParams/dfs_type", "")
is_parallel_execution_enabled = int(default("/agentConfigParams/agent/parallel_execution", 0)) == 1
sudo = AMBARI_SUDO_BINARY
stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)
# current host stack version
current_version = default("/hostLevelParams/current_version", None)
# default hadoop params
mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
# IOP 4.0+ params
if Script.is_stack_greater_or_equal("4.0"):
mapreduce_libs_path = "/usr/iop/current/hadoop-mapreduce-client/*"
# not supported in IOP 4.0+
hadoop_conf_empty_dir = None
versioned_stack_root = '/usr/iop/current'
#security params
security_enabled = config['configurations']['cluster-env']['security_enabled']
#java params
java_home = config['hostLevelParams']['java_home']
#hadoop params
hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
jsvc_path = "/usr/lib/bigtop-utils"
hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
jtnode_opt_newsize = "200m"
jtnode_opt_maxnewsize = "200m"
jtnode_heapsize = "1024m"
ttnode_heapsize = "1024m"
dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
#users and groups
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
user_group = config['configurations']['cluster-env']['user_group']
namenode_host = default("/clusterHostInfo/namenode_host", [])
has_namenode = not len(namenode_host) == 0
if has_namenode or dfs_type == 'HCFS':
hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
link_configs_lock_file = os.path.join(tmp_dir, "link_configs_lock_file")
stack_select_lock_file = os.path.join(tmp_dir, "stack_select_lock_file")
upgrade_suspended = default("/roleParams/upgrade_suspended", False)
| 40.745098 | 111 | 0.806545 |
4f5957752ed75989a78592cf50b9ecdf3d36ce91 | 4,369 | py | Python | src/sdk/pynni/nni/medianstop_assessor/medianstop_assessor.py | PurityFan/nni | 899c71c4f755d62c4bc87370ec240a100398fa7c | [
"MIT"
] | 3 | 2019-01-27T02:00:46.000Z | 2019-01-27T02:07:04.000Z | src/sdk/pynni/nni/medianstop_assessor/medianstop_assessor.py | PurityFan/nni | 899c71c4f755d62c4bc87370ec240a100398fa7c | [
"MIT"
] | 4 | 2022-02-10T06:23:52.000Z | 2022-03-08T23:37:29.000Z | src/sdk/pynni/nni/medianstop_assessor/medianstop_assessor.py | PurityFan/nni | 899c71c4f755d62c4bc87370ec240a100398fa7c | [
"MIT"
] | 1 | 2019-04-25T08:46:01.000Z | 2019-04-25T08:46:01.000Z | # Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import logging
from nni.assessor import Assessor, AssessResult
logger = logging.getLogger('medianstop_Assessor')
class MedianstopAssessor(Assessor):
'''
MedianstopAssessor is The median stopping rule stops a pending trial X at step S
if the trial’s best objective value by step S is strictly worse than the median value
of the running averages of all completed trials’ objectives reported up to step S
'''
def __init__(self, optimize_mode='maximize', start_step=0):
self.start_step = start_step
self.running_history = dict()
self.completed_avg_history = dict()
if optimize_mode == 'maximize':
self.high_better = True
elif optimize_mode == 'minimize':
self.high_better = False
else:
self.high_better = True
logger.warning('unrecognized optimize_mode', optimize_mode)
def _update_data(self, trial_job_id, trial_history):
if trial_job_id not in self.running_history:
self.running_history[trial_job_id] = []
self.running_history[trial_job_id].extend(trial_history[len(self.running_history[trial_job_id]):])
def trial_end(self, trial_job_id, success):
'''
trial_end
'''
if trial_job_id in self.running_history:
if success:
cnt = 0
history_sum = 0
self.completed_avg_history[trial_job_id] = []
for each in self.running_history[trial_job_id]:
cnt += 1
history_sum += each
self.completed_avg_history[trial_job_id].append(history_sum / cnt)
self.running_history.pop(trial_job_id)
else:
logger.warning('trial_end: trial_job_id does not in running_history')
def assess_trial(self, trial_job_id, trial_history):
'''
assess_trial
'''
curr_step = len(trial_history)
if curr_step < self.start_step:
return AssessResult.Good
try:
num_trial_history = [float(ele) for ele in trial_history]
except (TypeError, ValueError) as error:
logger.warning('incorrect data type or value:')
logger.exception(error)
except Exception as error:
logger.warning('unrecognized exception:')
logger.excpetion(error)
self._update_data(trial_job_id, num_trial_history)
if self.high_better:
best_history = max(trial_history)
else:
best_history = min(trial_history)
avg_array = []
for id in self.completed_avg_history:
if len(self.completed_avg_history[id]) >= curr_step:
avg_array.append(self.completed_avg_history[id][curr_step - 1])
if len(avg_array) > 0:
avg_array.sort()
if self.high_better:
median = avg_array[(len(avg_array)-1) // 2]
return AssessResult.Bad if best_history < median else AssessResult.Good
else:
median = avg_array[len(avg_array) // 2]
return AssessResult.Bad if best_history > median else AssessResult.Good
else:
return AssessResult.Good
| 43.69 | 128 | 0.66331 |
4f5a98ffe8666a75cba399402ef4463f96669f9f | 6,596 | py | Python | lib/galaxy/web/base/interactive_environments.py | bioinfo-center-pasteur-fr/galaxy-pasteur | 0be2c0194ce9615d9efa08affc0e6735b656855a | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/web/base/interactive_environments.py | bioinfo-center-pasteur-fr/galaxy-pasteur | 0be2c0194ce9615d9efa08affc0e6735b656855a | [
"CC-BY-3.0"
] | 1 | 2015-02-21T18:48:19.000Z | 2015-02-27T15:50:32.000Z | lib/galaxy/web/base/interactive_environments.py | bioinfo-center-pasteur-fr/galaxy-pasteur | 0be2c0194ce9615d9efa08affc0e6735b656855a | [
"CC-BY-3.0"
] | 3 | 2015-02-22T13:34:16.000Z | 2020-10-01T01:28:04.000Z | import ConfigParser
import hashlib
import os
import random
from galaxy.util.bunch import Bunch
from galaxy import web
from galaxy import eggs
eggs.require("PyYAML")
import yaml
from galaxy.managers import api_keys
class InteractiveEnviornmentRequest(object):
def __init__(self, trans, plugin):
plugin_config = plugin.config
self.trans = trans
self.attr = Bunch()
self.attr.viz_id = plugin_config["name"].lower()
self.attr.history_id = trans.security.encode_id( trans.history.id )
self.attr.proxy_request = trans.app.proxy_manager.setup_proxy( trans )
self.attr.proxy_url = self.attr.proxy_request[ 'proxy_url' ]
self.attr.galaxy_config = trans.app.config
self.attr.galaxy_root_dir = os.path.abspath(self.attr.galaxy_config.root)
self.attr.root = web.url_for("/")
self.attr.app_root = self.attr.root + "plugins/visualizations/" + self.attr.viz_id + "/static/"
plugin_path = os.path.abspath( plugin.path )
# Store our template and configuration path
self.attr.our_config_dir = os.path.join(plugin_path, "config")
self.attr.our_template_dir = os.path.join(plugin_path, "templates")
self.attr.HOST = trans.request.host.rsplit(':', 1)[0]
self.attr.PORT = self.attr.proxy_request[ 'proxied_port' ]
def load_deploy_config(self, default_dict={}):
viz_config = ConfigParser.SafeConfigParser(default_dict)
conf_path = os.path.join( self.attr.our_config_dir, self.attr.viz_id + ".ini" )
if not os.path.exists( conf_path ):
conf_path = "%s.sample" % conf_path
viz_config.read( conf_path )
self.attr.viz_config = viz_config
def _boolean_option(option, default=False):
if self.attr.viz_config.has_option("main", option):
return self.attr.viz_config.getboolean("main", option)
else:
return default
# Older style port range proxying - not sure we want to keep these around or should
# we always assume use of Galaxy dynamic proxy? None of these need to be specified
# if using the Galaxy dynamic proxy.
self.attr.PASSWORD_AUTH = _boolean_option("password_auth")
self.attr.APACHE_URLS = _boolean_option("apache_urls")
self.attr.SSL_URLS = _boolean_option("ssl")
def write_conf_file(self, output_directory, extra={}):
"""
Build up a configuration file that is standard for ALL IEs.
TODO: replace hashed password with plaintext.
"""
trans = self.trans
request = trans.request
api_key = api_keys.ApiKeyManager( trans.app ).get_or_create_api_key( trans.user )
conf_file = {
'history_id': self.attr.history_id,
'api_key': api_key,
'remote_host': request.remote_addr,
'docker_port': self.attr.PORT,
'cors_origin': request.host_url,
}
if self.attr.viz_config.has_option("docker", "galaxy_url"):
conf_file['galaxy_url'] = self.attr.viz_config.get("docker", "galaxy_url")
elif self.attr.galaxy_config.galaxy_infrastructure_url_set:
conf_file['galaxy_url'] = self.attr.galaxy_config.galaxy_infrastructure_url.rstrip('/') + '/'
else:
conf_file['galaxy_url'] = request.application_url.rstrip('/') + '/'
conf_file['galaxy_paster_port'] = self.attr.galaxy_config.guess_galaxy_port()
if self.attr.PASSWORD_AUTH:
# Generate a random password + salt
notebook_pw_salt = self.generate_password(length=12)
notebook_pw = self.generate_password(length=24)
m = hashlib.sha1()
m.update( notebook_pw + notebook_pw_salt )
conf_file['notebook_password'] = 'sha1:%s:%s' % (notebook_pw_salt, m.hexdigest())
# Should we use password based connection or "default" connection style in galaxy
else:
notebook_pw = "None"
# Some will need to pass extra data
for extra_key in extra:
conf_file[extra_key] = extra[extra_key]
self.attr.notebook_pw = notebook_pw
# Write conf
with open( os.path.join( output_directory, 'conf.yaml' ), 'wb' ) as handle:
handle.write( yaml.dump(conf_file, default_flow_style=False) )
def generate_hex(self, length):
return ''.join(random.choice('0123456789abcdef') for _ in range(length))
def generate_password(self, length):
"""
Generate a random alphanumeric password
"""
return ''.join(random.choice('0123456789abcdefghijklmnopqrstuvwxyz') for _ in range(length))
def javascript_boolean(self, python_boolean):
"""
Convenience function to convert boolean for use in JS
"""
if python_boolean:
return "true"
else:
return "false"
def url_template(self, url_template):
"""
Process a URL template
There are several variables accessible to the user:
- ${PROXY_URL} will be replaced with dynamically create proxy
- ${PORT} will be replaced with the port the docker image is attached to
"""
# Figure out our substitutions
# Next several lines for older style replacements (not used with Galaxy dynamic
# proxy)
if self.attr.SSL_URLS:
protocol = 'https'
else:
protocol = 'http'
if not self.attr.APACHE_URLS:
# If they are not using apache URLs, that implies there's a port attached to the host
# string, thus we replace just the first instance of host that we see.
url_template = url_template.replace('${HOST}', '${HOST}:${PORT}', 1)
url_template = url_template.replace('${PROTO}', protocol) \
.replace('${HOST}', self.attr.HOST)
# Only the following replacements are used with Galaxy dynamic proxy
# URLs
url = url_template.replace('${PROXY_URL}', str(self.attr.proxy_url)) \
.replace('${PORT}', str(self.attr.PORT))
return url
def docker_cmd(self, temp_dir):
"""
Generate and return the docker command to execute
"""
return '%s run -d -u %s --sig-proxy=true -p %s:%s -v "%s:/import/" %s' % \
(self.attr.viz_config.get("docker", "command"), os.geteuid(),
self.attr.PORT, self.attr.docker_port,
temp_dir, self.attr.viz_config.get("docker", "image"))
| 39.73494 | 105 | 0.629321 |
4f57a2c5338d709a9494c4466f07793d006c1d44 | 8,649 | py | Python | test/vanilla/legacy/AcceptanceTests/asynctests/test_xml.py | cfculhane/autorest.python | 8cbca95faee88d933a58bbbd17b76834faa8d387 | [
"MIT"
] | 35 | 2018-04-03T12:15:53.000Z | 2022-03-11T14:03:34.000Z | test/vanilla/legacy/AcceptanceTests/asynctests/test_xml.py | cfculhane/autorest.python | 8cbca95faee88d933a58bbbd17b76834faa8d387 | [
"MIT"
] | 652 | 2017-08-28T22:44:41.000Z | 2022-03-31T21:20:31.000Z | test/vanilla/legacy/AcceptanceTests/asynctests/test_xml.py | cfculhane/autorest.python | 8cbca95faee88d933a58bbbd17b76834faa8d387 | [
"MIT"
] | 29 | 2017-08-28T20:57:01.000Z | 2022-03-11T14:03:38.000Z | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import logging
import unittest
import subprocess
import sys
import isodate
import tempfile
from datetime import date, datetime, timedelta
import os
from os.path import dirname, pardir, join, realpath
from async_generator import yield_, async_generator
from xmlservice.aio import AutoRestSwaggerBATXMLService
from xmlservice.models import BlobType, ModelWithByteProperty, ModelWithUrlProperty
import pytest
_LOGGER = logging.getLogger(__name__)
@pytest.fixture
@async_generator
async def client():
async with AutoRestSwaggerBATXMLService(base_url="http://localhost:3000") as client:
await yield_(client)
async def _assert_with_log(func, *args, **kwargs):
def raise_for_status(response, deserialized, headers):
response.http_response.internal_response.raise_for_status()
try:
http_response = await func(*args, cls=raise_for_status, **kwargs)
except Exception as err:
print(err.response.text())
pytest.fail()
class TestXml(object):
@pytest.mark.asyncio
async def test_json_xml(self, client):
await client.xml.json_input(id=42)
result = await client.xml.json_output()
assert result.id == 42
@pytest.mark.asyncio
async def test_simple(self, client):
# Slideshow
slideshow = await client.xml.get_simple()
assert slideshow.title == "Sample Slide Show"
assert slideshow.date == "Date of publication"
assert slideshow.author == "Yours Truly"
assert len(slideshow.slides) == 2
slides = slideshow.slides
slide1 = slides[0]
assert slide1.type == "all"
assert slide1.title == "Wake up to WonderWidgets!"
assert len(slide1.items) == 0
slide2 = slides[1]
assert slide2.type == "all"
assert slide2.title == "Overview"
assert len(slide2.items) == 3
assert slide2.items[0] == "Why WonderWidgets are great"
assert slide2.items[1] == ''
assert slide2.items[2] == "Who buys WonderWidgets"
await _assert_with_log(client.xml.put_simple, slideshow)
@pytest.mark.asyncio
async def test_empty_child_element(self, client):
banana = await client.xml.get_empty_child_element()
assert banana.flavor == '' # That's the point of this test, it was an empty node.
await _assert_with_log(client.xml.put_empty_child_element, banana)
@pytest.mark.asyncio
async def test_empty_root_list(self, client):
bananas = await client.xml.get_empty_root_list()
assert bananas == []
await _assert_with_log(client.xml.put_empty_root_list, bananas)
@pytest.mark.asyncio
async def test_root_list_single_item(self, client):
bananas = await client.xml.get_root_list_single_item()
assert len(bananas) == 1
assert bananas[0].name == "Cavendish"
await _assert_with_log(client.xml.put_root_list_single_item, bananas)
@pytest.mark.asyncio
async def test_root_list(self, client):
bananas = await client.xml.get_root_list()
assert len(bananas) == 2
await _assert_with_log(client.xml.put_root_list, bananas)
@pytest.mark.asyncio
async def test_empty_wrapped_lists(self, client):
bananas = await client.xml.get_empty_wrapped_lists()
assert bananas.good_apples == []
assert bananas.bad_apples == []
await _assert_with_log(client.xml.put_empty_wrapped_lists, bananas)
@pytest.mark.asyncio
async def test_get_empty(self, client):
slideshow = await client.xml.get_empty_list()
await _assert_with_log(client.xml.put_empty_list, slideshow)
@pytest.mark.asyncio
async def test_wrapped_lists(self, client):
bananas = await client.xml.get_wrapped_lists()
assert bananas.good_apples == ['Fuji', 'Gala']
assert bananas.bad_apples == ['Red Delicious']
await _assert_with_log(client.xml.put_wrapped_lists, bananas)
@pytest.mark.asyncio
async def test_complex_types(self, client):
root = await client.xml.get_complex_type_ref_no_meta()
assert root.ref_to_model.id == "myid"
await client.xml.put_complex_type_ref_no_meta(root)
root = await client.xml.get_complex_type_ref_with_meta()
assert root.ref_to_model.id == "myid"
await client.xml.put_complex_type_ref_with_meta(root)
@pytest.mark.asyncio
async def test_list_containers(self, client):
containers = await client.xml.list_containers()
assert len(containers.containers) == 3
@pytest.mark.asyncio
async def test_list_blobs(self, client):
blobs = await client.xml.list_blobs()
assert len(blobs.blobs.blob) == 5
assert not blobs.blobs.blob_prefix
assert len(blobs.blobs.blob) == 5
blob = blobs.blobs.blob[0]
assert blob.name == "blob1.txt"
assert blob.properties.last_modified.date() == date(2009, 9, 9)
assert blob.properties.etag == "0x8CBFF45D8A29A19"
assert blob.properties.content_length == 100
assert blob.properties.content_type == "text/html"
# Check that an empty field in the XML is empty string
assert blob.properties.content_encoding == ''
assert blob.properties.content_language == "en-US"
assert blob.properties.content_md5 == ''
assert blob.properties.cache_control == "no-cache"
assert blob.properties.blob_type == BlobType.block_blob
# Check that a field NOT in the XML is None
assert blob.properties.destination_snapshot is None
assert len(blob.metadata) == 3
assert blob.metadata["Color"] == "blue"
assert blob.metadata["BlobNumber"] == "01"
assert blob.metadata["SomeMetadataName"] == "SomeMetadataValue"
@pytest.mark.asyncio
async def test_service_properties(self, client):
properties = await client.xml.get_service_properties()
assert properties.hour_metrics is not None
assert properties.minute_metrics is not None
await _assert_with_log(client.xml.put_service_properties, properties)
@pytest.mark.asyncio
async def test_acls(self, client):
acls = await client.xml.get_acls()
assert len(acls) == 1
assert acls[0].id == 'MTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTI='
await _assert_with_log(client.xml.put_acls, acls)
@pytest.mark.asyncio
async def test_xms_text(self, client):
xml_object = await client.xml.get_xms_text()
assert xml_object.language == "english"
assert xml_object.content == "I am text"
@pytest.mark.asyncio
async def test_get_bytes(self, client):
bytes_object = await client.xml.get_bytes()
assert isinstance(bytes_object, ModelWithByteProperty)
assert bytes_object.bytes == b"Hello world"
@pytest.mark.asyncio
async def test_put_bytes(self, client):
await client.xml.put_binary(b"Hello world")
@pytest.mark.asyncio
async def test_get_url(self, client):
url_object = await client.xml.get_uri()
assert isinstance(url_object, ModelWithUrlProperty)
assert url_object.url == 'https://myaccount.blob.core.windows.net/'
@pytest.mark.asyncio
async def test_put_url(self, client):
await client.xml.put_uri('https://myaccount.blob.core.windows.net/') | 39.857143 | 89 | 0.689559 |
4f59840ff158df58bc7f721f94afccda21bc9f8d | 1,003 | py | Python | custom_components/hacs/helpers/functions/template.py | sob/hass-config | 5b2793b3934487668da92b05ff9ae48a92506228 | [
"MIT"
] | 27 | 2018-10-13T10:00:53.000Z | 2022-02-07T23:33:12.000Z | custom_components/hacs/helpers/functions/template.py | sob/hass-config | 5b2793b3934487668da92b05ff9ae48a92506228 | [
"MIT"
] | 33 | 2021-11-22T16:30:43.000Z | 2022-03-29T18:00:13.000Z | custom_components/hacs/helpers/functions/template.py | sob/hass-config | 5b2793b3934487668da92b05ff9ae48a92506228 | [
"MIT"
] | 5 | 2019-06-01T10:27:37.000Z | 2020-09-18T14:14:56.000Z | """Custom template support."""
# pylint: disable=broad-except
from jinja2 import Template
from custom_components.hacs.helpers.functions.logger import getLogger
_LOGGER = getLogger()
def render_template(content, context):
"""Render templates in content."""
# Fix None issues
if context.releases.last_release_object is not None:
prerelease = context.releases.last_release_object.prerelease
else:
prerelease = False
# Render the template
try:
render = Template(content)
render = render.render(
installed=context.data.installed,
pending_update=context.pending_upgrade,
prerelease=prerelease,
selected_tag=context.data.selected_tag,
version_available=context.releases.last_release,
version_installed=context.display_installed_version,
)
return render
except (Exception, BaseException) as exception:
_LOGGER.debug(exception)
return content
| 30.393939 | 69 | 0.688933 |
4f5bcddd16d692cd303ce1ef3b07f0e253edde02 | 4,300 | py | Python | habitat/datasets/object_nav/create/synset_unify_py.py | Ram81/habitat-imitation-baselines | c6e11c8ebadbf1260e1bed58a5b8dfb7faf6a505 | [
"MIT"
] | null | null | null | habitat/datasets/object_nav/create/synset_unify_py.py | Ram81/habitat-imitation-baselines | c6e11c8ebadbf1260e1bed58a5b8dfb7faf6a505 | [
"MIT"
] | null | null | null | habitat/datasets/object_nav/create/synset_unify_py.py | Ram81/habitat-imitation-baselines | c6e11c8ebadbf1260e1bed58a5b8dfb7faf6a505 | [
"MIT"
] | null | null | null | import csv
import itertools
import nltk
from nltk.corpus import wordnet as wn
def ycb_google_16k_meta_filename_to_word(filename: str):
words = filename.replace(".glb", "")[4:].replace("_", " ").split()
if len(words) == 1:
return words[0], " ".join(words)
if {"box", "lego", "toy", "pitcher"}.intersection(words):
return words[-2], " ".join(words)
return words[-1], " ".join(words)
ycb_synsets = {
"container.n.01",
"bread.n.01",
"food",
"fruit.n.01",
"cooking_utensil.n.01",
"tableware.n.01",
"cutlery.n.02",
"tool.n.01",
"lever.n.01'",
"lock.n.01",
"hand_tool.n.01",
"device.n.01",
"equipment.n.01",
"cube.n.05",
"game.n.01",
"artifact.n.01",
}
def ycb_google_16k_meta_get_synset(word: str, object_name: str):
gen_synsets = {synset for synset in wn.synsets(word)}
gen_synsets = gen_synsets.union(
{hyper for synset in gen_synsets for hyper in synset.hypernyms()}
)
gen_synsets = gen_synsets.union(
{hyper for synset in gen_synsets for hyper in synset.hypernyms()}
)
gen_synsets = {synset.name() for synset in gen_synsets}
# two_level_synsets = {synset.name() for synset in wn.synsets(word)}.union(
# {hyper.name() for synset in wn.synsets(word) for hyper in synset.hypernyms()})
key_synsets = ycb_synsets.intersection(gen_synsets)
if key_synsets:
return key_synsets.pop()
return None
with open(
"habitat/datasets/object_nav/create/ycb_google_16k_meta.csv", "r"
) as f:
csv_r = csv.reader(f)
headers = next(csv_r)
print(headers)
key_ind = headers.index("filename")
rows = [
(line[key_ind], ycb_google_16k_meta_filename_to_word(line[key_ind]))
for line in csv_r
if line[key_ind] != ""
]
rows_synset = [
(filename, word[1], word[0], wn.synsets(word[0]))
for filename, word in rows
]
synsets_source = [
(
filename,
word[1],
word[0],
ycb_google_16k_meta_get_synset(word[0], word[1]),
# wn.synsets(word[0])[0].definition(), wn.synsets(word[0])[0].hypernyms(),
# wn.synsets(word[0])[0].hypernyms()[0].hypernyms()
)
for filename, word in rows
]
# [(line[0], wn.synset(line[key_ind])) for line in csv_r if line[key_ind] != '']
for row in synsets_source:
print(f"{row}")
with open(
"habitat/datasets/object_nav/create/ycb_google_16k_meta_output.csv",
"w",
) as of:
writer = csv.writer(of)
writer.writerow(["filename"])
for row in synsets_source:
writer.writerow(row) # [row[0], row[1], row[0]])
# container.n.01
# bread.n.01
# food
# fruit.n.01
# cooking_utensil.n.01
# tableware.n.01
# cutlery.n.02
# tool.n.01
# lever.n.01'
# lock.n.01
# hand_tool.n.01
# device.n.01
# equipment.n.01
# cube.n.05
# game.n.01
# artifact.n.01
with open("category_mapping.tsv", "r") as f:
csv_r = csv.reader(f, delimiter="\t")
headers = next(csv_r)
key_ind = headers.index("wnsynsetkey")
print(headers)
synsets_source = [
(line[1], wn.synset(line[key_ind]))
for line in csv_r
if line[key_ind] != ""
]
with open("dictionaries.csv", "r") as f:
csv_r = csv.reader(f)
next(csv_r)
next(csv_r)
synsets_target = [
[(line[1], sn) for sn in wn.synsets(line[1])] for line in csv_r
]
synsets_target = list(itertools.chain(*synsets_target))
best_matches = []
for word1, sn1 in synsets_source:
best = max(
[
[
sn1.path_similarity(sn2, simulate_root=False),
word1,
word2,
sn1,
sn2,
]
for word2, sn2 in synsets_target
if sn1.path_similarity(sn2, simulate_root=False) is not None
]
)
best = best + [best[-2].lowest_common_hypernyms(best[-1])]
best_matches.append(best)
print("Similarity,word1,word2,synset1,synset2,lcasynset")
for best_match in sorted(best_matches, key=lambda x: x[1]):
# Only difference between the results posted is this if statement
if best_match[-3] in best_match[-1] or best_match[-2] in best_match[-1]:
print(best_match)
| 27.388535 | 88 | 0.6 |
4f595954c7bde8b775cc2b6ffeded9770802aafb | 1,844 | py | Python | pyhanko/sign/beid.py | ooguz/pyHanko | f1490d9ecaa74ce600e464a27807290d486acc6f | [
"MIT"
] | null | null | null | pyhanko/sign/beid.py | ooguz/pyHanko | f1490d9ecaa74ce600e464a27807290d486acc6f | [
"MIT"
] | null | null | null | pyhanko/sign/beid.py | ooguz/pyHanko | f1490d9ecaa74ce600e464a27807290d486acc6f | [
"MIT"
] | 1 | 2021-04-08T05:11:03.000Z | 2021-04-08T05:11:03.000Z | """
Sign PDF files using a Belgian eID card.
This module defines a very thin convenience wrapper around
:mod:`.pyhanko.sign.pkcs11` to set up a PKCS#11 session with an eID card and
read the appropriate certificates on the device.
"""
from . import pkcs11 as sign_pkcs11
from pkcs11 import Session
__all__ = ['open_beid_session', 'BEIDSigner']
def open_beid_session(lib_location, slot_no=None) -> Session:
"""
Open a PKCS#11 session
:param lib_location:
Path to the shared library file containing the eID PKCS#11 module.
Usually, the file is named ``libbeidpkcs11.so``,
``libbeidpkcs11.dylib`` or ``beidpkcs11.dll``, depending on your
operating system.
:param slot_no:
Slot number to use. If not specified, the first slot containing a token
labelled ``BELPIC`` will be used.
:return:
An open PKCS#11 session object.
"""
# the middleware will prompt for the user's PIN when we attempt
# to sign later, so there's no need to specify it here
return sign_pkcs11.open_pkcs11_session(
lib_location, slot_no=slot_no, token_label='BELPIC'
)
class BEIDSigner(sign_pkcs11.PKCS11Signer):
"""
Belgian eID-specific signer implementation that automatically populates
the (trustless) certificate list with the relevant certificates stored
on the card.
This includes the government's (self-signed) root certificate and the
certificate of the appropriate intermediate CA.
"""
def __init__(self, pkcs11_session: Session, use_auth_cert: bool = False,
bulk_fetch: bool = False):
super().__init__(
pkcs11_session=pkcs11_session,
cert_label='Authentication' if use_auth_cert else 'Signature',
other_certs_to_pull=('Root', 'CA'), bulk_fetch=bulk_fetch
)
| 34.792453 | 79 | 0.694685 |
4f5ace93c9539de937214c5235da0824e7ea648b | 11,802 | py | Python | pytorch_widedeep/models/tab_resnet.py | cmcmaster1/pytorch-widedeep | d0feb29e56aa5612acdd3c60ce2c82ec04093110 | [
"MIT"
] | null | null | null | pytorch_widedeep/models/tab_resnet.py | cmcmaster1/pytorch-widedeep | d0feb29e56aa5612acdd3c60ce2c82ec04093110 | [
"MIT"
] | null | null | null | pytorch_widedeep/models/tab_resnet.py | cmcmaster1/pytorch-widedeep | d0feb29e56aa5612acdd3c60ce2c82ec04093110 | [
"MIT"
] | null | null | null | from collections import OrderedDict
import numpy as np
import torch
from torch import nn
from torch.nn import Module
from pytorch_widedeep.wdtypes import * # noqa: F403
from pytorch_widedeep.models.tab_mlp import MLP
class BasicBlock(nn.Module):
def __init__(self, inp: int, out: int, dropout: float = 0.0, resize: Module = None):
super(BasicBlock, self).__init__()
self.lin1 = nn.Linear(inp, out)
self.bn1 = nn.BatchNorm1d(out)
self.leaky_relu = nn.LeakyReLU(inplace=True)
if dropout > 0.0:
self.dropout = True
self.dp = nn.Dropout(dropout)
else:
self.dropout = False
self.lin2 = nn.Linear(out, out)
self.bn2 = nn.BatchNorm1d(out)
self.resize = resize
def forward(self, x):
identity = x
out = self.lin1(x)
out = self.bn1(out)
out = self.leaky_relu(out)
if self.dropout:
out = self.dp(out)
out = self.lin2(out)
out = self.bn2(out)
if self.resize is not None:
identity = self.resize(x)
out += identity
out = self.leaky_relu(out)
return out
class DenseResnet(nn.Module):
def __init__(self, input_dim: int, blocks_dims: List[int], dropout: float):
super(DenseResnet, self).__init__()
self.input_dim = input_dim
self.blocks_dims = blocks_dims
self.dropout = dropout
if input_dim != blocks_dims[0]:
self.dense_resnet = nn.Sequential(
OrderedDict(
[
("lin1", nn.Linear(input_dim, blocks_dims[0])),
("bn1", nn.BatchNorm1d(blocks_dims[0])),
]
)
)
else:
self.dense_resnet = nn.Sequential()
for i in range(1, len(blocks_dims)):
resize = None
if blocks_dims[i - 1] != blocks_dims[i]:
resize = nn.Sequential(
nn.Linear(blocks_dims[i - 1], blocks_dims[i]),
nn.BatchNorm1d(blocks_dims[i]),
)
self.dense_resnet.add_module(
"block_{}".format(i - 1),
BasicBlock(blocks_dims[i - 1], blocks_dims[i], dropout, resize),
)
def forward(self, X: Tensor) -> Tensor:
return self.dense_resnet(X)
class TabResnet(nn.Module):
def __init__(
self,
embed_input: List[Tuple[str, int, int]],
column_idx: Dict[str, int],
blocks_dims: List[int] = [200, 100, 100],
blocks_dropout: float = 0.1,
mlp_hidden_dims: Optional[List[int]] = None,
mlp_activation: str = "relu",
mlp_dropout: float = 0.1,
mlp_batchnorm: bool = False,
mlp_batchnorm_last: bool = False,
mlp_linear_first: bool = False,
embed_dropout: float = 0.1,
continuous_cols: Optional[List[str]] = None,
batchnorm_cont: bool = False,
concat_cont_first: bool = True,
):
r"""Defines a so-called ``TabResnet`` model that can be used as the
``deeptabular`` component of a Wide & Deep model.
This class combines embedding representations of the categorical
features with numerical (aka continuous) features. These are then
passed through a series of Resnet blocks. See
``pytorch_widedeep.models.deep_dense_resnet.BasicBlock`` for details
on the structure of each block.
.. note:: Unlike ``TabMlp``, ``TabResnet`` assumes that there are always
categorical columns
Parameters
----------
embed_input: List
List of Tuples with the column name, number of unique values and
embedding dimension. e.g. [(education, 11, 32), ...].
column_idx: Dict
Dict containing the index of the columns that will be passed through
the TabMlp model. Required to slice the tensors. e.g. {'education':
0, 'relationship': 1, 'workclass': 2, ...}
blocks_dims: List, default = [200, 100, 100]
List of integers that define the input and output units of each block.
For example: [200, 100, 100] will generate 2 blocks_dims. The first will
receive a tensor of size 200 and output a tensor of size 100, and the
second will receive a tensor of size 100 and output a tensor of size
100. See ``pytorch_widedeep.models.deep_dense_resnet.BasicBlock`` for
details on the structure of each block.
blocks_dropout: float, default = 0.1
Block's `"internal"` dropout. This dropout is applied to the first
of the two dense layers that comprise each ``BasicBlock``.e
mlp_hidden_dims: List, Optional, default = None
List with the number of neurons per dense layer in the mlp. e.g:
[64, 32]. If ``None`` the output of the Resnet Blocks will be
connected directly to the output neuron(s), i.e. using a MLP is
optional.
mlp_activation: str, default = "relu"
Activation function for the dense layers of the MLP
mlp_dropout: float, default = 0.1
float with the dropout between the dense layers of the MLP.
mlp_batchnorm: bool, default = False
Boolean indicating whether or not batch normalization will be applied
to the dense layers
mlp_batchnorm_last: bool, default = False
Boolean indicating whether or not batch normalization will be applied
to the last of the dense layers
mlp_linear_first: bool, default = False
Boolean indicating the order of the operations in the dense
layer. If ``True: [LIN -> ACT -> BN -> DP]``. If ``False: [BN -> DP ->
LIN -> ACT]``
embed_dropout: float, default = 0.1
embeddings dropout
continuous_cols: List, Optional, default = None
List with the name of the numeric (aka continuous) columns
batchnorm_cont: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
continuous input
concat_cont_first: bool, default = True
Boolean indicating whether the continuum columns will be
concatenated with the Embeddings and then passed through the
Resnet blocks (``True``) or, alternatively, will be concatenated
with the result of passing the embeddings through the Resnet
Blocks (``False``)
Attributes
----------
dense_resnet: ``nn.Sequential``
deep dense Resnet model that will receive the concatenation of the
embeddings and the continuous columns
embed_layers: ``nn.ModuleDict``
``ModuleDict`` with the embedding layers
tab_resnet_mlp: ``nn.Sequential``
if ``mlp_hidden_dims`` is ``True``, this attribute will be an mlp
model that will receive:
- the results of the concatenation of the embeddings and the
continuous columns -- if present -- and then passed it through
the ``dense_resnet`` (``concat_cont_first = True``), or
- the result of passing the embeddings through the ``dense_resnet``
and then concatenating the results with the continuous columns --
if present -- (``concat_cont_first = False``)
output_dim: `int`
The output dimension of the model. This is a required attribute
neccesary to build the WideDeep class
Example
--------
>>> import torch
>>> from pytorch_widedeep.models import TabResnet
>>> X_deep = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1)
>>> colnames = ['a', 'b', 'c', 'd', 'e']
>>> embed_input = [(u,i,j) for u,i,j in zip(colnames[:4], [4]*4, [8]*4)]
>>> column_idx = {k:v for v,k in enumerate(colnames)}
>>> model = TabResnet(blocks_dims=[16,4], column_idx=column_idx, embed_input=embed_input,
... continuous_cols = ['e'])
>>> out = model(X_deep)
"""
super(TabResnet, self).__init__()
self.embed_input = embed_input
self.column_idx = column_idx
self.blocks_dims = blocks_dims
self.blocks_dropout = blocks_dropout
self.mlp_activation = mlp_activation
self.mlp_hidden_dims = mlp_hidden_dims
self.mlp_batchnorm = mlp_batchnorm
self.mlp_batchnorm_last = mlp_batchnorm_last
self.mlp_linear_first = mlp_linear_first
self.embed_dropout = embed_dropout
self.continuous_cols = continuous_cols
self.batchnorm_cont = batchnorm_cont
self.concat_cont_first = concat_cont_first
if len(self.blocks_dims) < 2:
raise ValueError(
"'blocks' must contain at least two elements, e.g. [256, 128]"
)
# Embeddings: val + 1 because 0 is reserved for padding/unseen cateogories.
self.embed_layers = nn.ModuleDict(
{
"emb_layer_" + col: nn.Embedding(val + 1, dim, padding_idx=0)
for col, val, dim in self.embed_input
}
)
self.embedding_dropout = nn.Dropout(embed_dropout)
emb_inp_dim = np.sum([embed[2] for embed in self.embed_input])
# Continuous
if self.continuous_cols is not None:
cont_inp_dim = len(self.continuous_cols)
if self.batchnorm_cont:
self.norm = nn.BatchNorm1d(cont_inp_dim)
else:
cont_inp_dim = 0
# DenseResnet
if self.concat_cont_first:
dense_resnet_input_dim = emb_inp_dim + cont_inp_dim
self.output_dim = blocks_dims[-1]
else:
dense_resnet_input_dim = emb_inp_dim
self.output_dim = cont_inp_dim + blocks_dims[-1]
self.tab_resnet = DenseResnet(
dense_resnet_input_dim, blocks_dims, blocks_dropout # type: ignore[arg-type]
)
# MLP
if self.mlp_hidden_dims is not None:
if self.concat_cont_first:
mlp_input_dim = blocks_dims[-1]
else:
mlp_input_dim = cont_inp_dim + blocks_dims[-1]
mlp_hidden_dims = [mlp_input_dim] + mlp_hidden_dims
self.tab_resnet_mlp = MLP(
mlp_hidden_dims,
mlp_activation,
mlp_dropout,
mlp_batchnorm,
mlp_batchnorm_last,
mlp_linear_first,
)
self.output_dim = mlp_hidden_dims[-1]
def forward(self, X: Tensor) -> Tensor: # type: ignore
r"""Forward pass that concatenates the continuous features with the
embeddings. The result is then passed through a series of dense Resnet
blocks"""
embed = [
self.embed_layers["emb_layer_" + col](X[:, self.column_idx[col]].long())
for col, _, _ in self.embed_input
]
x = torch.cat(embed, 1)
x = self.embedding_dropout(x)
if self.continuous_cols is not None:
cont_idx = [self.column_idx[col] for col in self.continuous_cols]
x_cont = X[:, cont_idx].float()
if self.batchnorm_cont:
x_cont = self.norm(x_cont)
if self.concat_cont_first:
x = torch.cat([x, x_cont], 1)
out = self.tab_resnet(x)
else:
out = torch.cat([self.tab_resnet(x), x_cont], 1)
else:
out = self.tab_resnet(x)
if self.mlp_hidden_dims is not None:
out = self.tab_resnet_mlp(out)
return out
| 39.737374 | 97 | 0.590239 |
4f5834211096407130f96de95d5971ad47b626ce | 10,342 | py | Python | Allura/ldap-setup.py | shalithasuranga/allura | 4f7fba13415954d07f602a051ec697329dd3706b | [
"Apache-2.0"
] | 1 | 2017-07-31T23:13:58.000Z | 2017-07-31T23:13:58.000Z | Allura/ldap-setup.py | lym/allura-git | b2b53d0c1ba8b1e48f176ad75cf64675b3545d69 | [
"Apache-2.0"
] | null | null | null | Allura/ldap-setup.py | lym/allura-git | b2b53d0c1ba8b1e48f176ad75cf64675b3545d69 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import shutil
import string
import logging
from contextlib import contextmanager
from tempfile import mkstemp
from ConfigParser import ConfigParser, NoOptionError
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger('ldap-setup')
config = ConfigParser()
def main():
config.read('.setup-scm-cache')
if not config.has_section('scm'):
config.add_section('scm')
suffix = get_value('suffix', 'dc=localdomain')
secret = get_value('admin password', 'secret')
firstdc = suffix.split(',')[0].split('=')[1]
if get_value('clear ldap config', 'y') == 'y':
run('apt-get -f install')
run('apt-get remove --purge slapd ldap-utils')
run('apt-get install slapd ldap-utils')
if get_value('start slapd', 'y') == 'y':
run('service slapd start')
if get_value('add base ldap schemas', 'y') == 'y':
run('ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/ldap/schema/cosine.ldif')
run('ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/ldap/schema/nis.ldif')
run('ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/ldap/schema/inetorgperson.ldif')
if get_value('add backend ldif', 'y') == 'y':
with tempfile(backend_ldif, locals()) as name:
run('ldapadd -Y EXTERNAL -H ldapi:/// -f %s' % name)
with open('/etc/ldap.secret', 'w') as fp:
fp.write(secret)
os.chmod('/etc/ldap.secret', 0400)
if get_value('add frontend ldif', 'y') == 'y':
with tempfile(frontend_ldif, locals()) as name:
run('ldapadd -c -x -D cn=admin,%s -W -f %s -y /etc/ldap.secret' %
(suffix, name))
if get_value('add initial user/group', 'y') == 'y':
with tempfile(initial_user_ldif, locals()) as name:
run('ldapadd -c -x -D cn=admin,%s -W -f %s -y /etc/ldap.secret' %
(suffix, name))
if get_value('setup ldap auth', 'y') == 'y':
run('apt-get install libnss-ldap')
run('dpkg-reconfigure ldap-auth-config')
run('auth-client-config -t nss -p lac_ldap')
run('pam-auth-update')
if get_value('setup ldapscripts', 'y') == 'y':
run('apt-get install ldapscripts')
with tempfile(ldapscripts_conf, locals()) as name:
shutil.copy(name, '/etc/ldapscripts/ldapscripts.conf')
log.info('writing passwd')
with open('/etc/ldapscripts/ldapscripts.passwd', 'w') as fp:
fp.write(secret)
os.chmod('/etc/ldapscripts/ldapscripts.passwd', 0400)
log.info('writing runtime')
with open('/usr/share/ldapscripts/runtime.debian', 'w') as fp:
fp.write(ldapscripts_debian)
def get_value(key, default):
try:
default = config.get('scm', key)
except NoOptionError:
pass
value = raw_input('%s? [%s]' % (key, default))
if not value:
value = default
config.set('scm', key, value)
with open('.setup-scm-cache', 'w') as fp:
config.write(fp)
return value
def run(command):
rc = os.system(command)
if rc != 0:
log.error('Error running %s', command)
assert rc == 0
return rc
@contextmanager
def tempfile(template, values):
fd, name = mkstemp()
os.write(fd, template.safe_substitute(values))
os.close(fd)
yield name
os.remove(name)
backend_ldif = string.Template('''
# Load dynamic backend modules
dn: cn=module,cn=config
objectClass: olcModuleList
cn: module
olcModulepath: /usr/lib/ldap
olcModuleload: back_hdb
# Database settings
dn: olcDatabase=hdb,cn=config
objectClass: olcDatabaseConfig
objectClass: olcHdbConfig
olcDatabase: {1}hdb
olcSuffix: $suffix
olcDbDirectory: /var/lib/ldap
olcRootDN: cn=admin,$suffix
olcRootPW: $secret
olcDbConfig: set_cachesize 0 2097152 0
olcDbConfig: set_lk_max_objects 1500
olcDbConfig: set_lk_max_locks 1500
olcDbConfig: set_lk_max_lockers 1500
olcDbIndex: objectClass eq
olcLastMod: TRUE
olcDbCheckpoint: 512 30
olcAccess: to attrs=userPassword by dn="cn=admin,$suffix" write by anonymous auth by self write by * none
olcAccess: to attrs=shadowLastChange by self write by * read
olcAccess: to dn.base="" by * read
olcAccess: to * by dn="cn=admin,$suffix" write by * read
''')
frontend_ldif = string.Template('''
# Create top-level object in domain
dn: $suffix
objectClass: top
objectClass: dcObject
objectclass: organization
o: Example Organization
dc: $firstdc
description: LDAP Example
# Create max uid generator
dn: cn=maxUid,$suffix
objectClass: extensibleObject
objectClass: top
uidNumber: 10000
# Admin user.
dn: cn=admin,$suffix
objectClass: simpleSecurityObject
objectClass: organizationalRole
cn: admin
description: LDAP administrator
userPassword: $secret
dn: ou=people,$suffix
objectClass: organizationalUnit
ou: people
dn: ou=groups,$suffix
objectClass: organizationalUnit
ou: groups
''')
initial_user_ldif = string.Template('''
dn: uid=john,ou=people,$suffix
objectClass: inetOrgPerson
objectClass: posixAccount
objectClass: shadowAccount
uid: john
sn: Doe
givenName: John
cn: John Doe
displayName: John Doe
uidNumber: 1000
gidNumber: 10000
userPassword: password
gecos: John Doe
loginShell: /bin/bash
homeDirectory: /home/john
shadowExpire: -1
shadowFlag: 0
shadowWarning: 7
shadowMin: 8
shadowMax: 999999
shadowLastChange: 10877
mail: john.doe@example.com
postalCode: 31000
l: Toulouse
o: Example
mobile: +33 (0)6 xx xx xx xx
homePhone: +33 (0)5 xx xx xx xx
title: System Administrator
postalAddress:
initials: JD
dn: cn=example,ou=groups,$suffix
objectClass: posixGroup
cn: example
gidNumber: 10000
''')
open_ldap_config = string.Template('''
[open_ldap]
nss_passwd=passwd: files ldap
nss_group=group: files ldap
nss_shadow=shadow: files ldap
nss_netgroup=netgroup: files ldap
pam_auth=auth required pam_env.so
auth sufficient pam_unix.so likeauth nullok
#the following line (containing pam_group.so) must be placed before pam_ldap.so
#for ldap users to be placed in local groups such as fuse, plugdev, scanner, etc ...
auth required pam_group.so use_first_pass
auth sufficient pam_ldap.so use_first_pass
auth required pam_deny.so
pam_account=account sufficient pam_unix.so
account sufficient pam_ldap.so
account required pam_deny.so
pam_password=password sufficient pam_unix.so nullok md5 shadow
password sufficient pam_ldap.so use_first_pass
password required pam_deny.so
pam_session=session required pam_limits.so
session required pam_mkhomedir.so skel=/etc/skel/
session required pam_unix.so
session optional pam_ldap.so
''')
ldapscripts_conf = string.Template('''
SERVER=127.0.0.1
BINDDN='cn=admin,$suffix'
BINDPWDFILE="/etc/ldapscripts/ldapscripts.passwd"
SUFFIX='$suffix'
GSUFFIX='ou=Groups'
USUFFIX='ou=People'
MSUFFIX='ou=Computers'
GIDSTART=10000
UIDSTART=10000
MIDSTART=10000
''')
ldapscripts_debian = '''
### Allura-customized
### This file predefine some ldapscripts variables for Debian boxes.
#
# Copyright (c) 2005 Ganal LAPLANCHE - Linagora
# Copyright (c) 2005-2007 Pierre Habouzit
# Copyright (c) 2009 Alexander GQ Gerasiov
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
# USA.
##### Beginning of ldapscripts configuration #####
getfield() {
local field="$1"
local nssconffile='/etc/libnss-ldap.conf'
if [ -f "$nssconffile" ];then
local value=$(awk "/^\s*$field/ {print \$2}" /etc/libnss-ldap.conf)
else
local value="$2"
fi
echo ${value:-$2}
}
getsuffix() {
field="$1"
value="$(getfield "$1" | sed -e "s/,.*$//")"
echo ${value:-$2}
}
# LDAP Configuration
SERVER=$(getfield uri "$(getfield host '')")
BINDDN=$(getfield rootbinddn '')
if [ -f /etc/libnss-ldap.secret ];then
BINDPWDFILE=/etc/libnss-ldap.secret
elif [ -f /etc/ldap.secret ];then
BINDPWDFILE=/etc/ldap.secret
fi
SUFFIX=`getfield base`
GSUFFIX=`getsuffix nss_base_group 'ou=Group'`
USUFFIX=`getsuffix nss_base_passwd 'ou=People'`
MSUFFIX=`getsuffix nss_base_hosts 'ou=Hosts'`
# User properties
[ -f /etc/adduser.conf ] && . /etc/adduser.conf
USHELL=${DSHELL:-"/bin/bash"}
UHOMES=${DHOME:-"/home"}"/%u"
HOMESKEL=${SKEL:-"/etc/skel"}
HOMEPERMS=${DIR_MODE:-"0755"}
# Where to log
LOGFILE="/var/log/ldapscripts.log"
# Various binaries used within scripts
LDAPSEARCHBIN=`which ldapsearch`
LDAPADDBIN=`which ldapadd`
LDAPDELETEBIN=`which ldapdelete`
LDAPMODIFYBIN=`which ldapmodify`
LDAPMODRDNBIN=`which ldapmodrdn`
LDAPPASSWDBIN=`which ldappasswd`
# Getent command to use - choose the ones used on your system. Leave blank or comment for auto-guess.
# GNU/Linux
GETENTPWCMD="getent passwd"
GETENTGRCMD="getent group"
TMPDIR="/tmp"
##### End of configuration #####
'''
if __name__ == '__main__':
main()
| 30.417647 | 105 | 0.691356 |
4f5e86fcacf3b45cff56b901d942435a404483df | 10,327 | py | Python | test/vanilla/low-level/Expected/AcceptanceTests/BodyComplexLowLevel/bodycomplexlowlevel/rest/basic/_request_builders.py | cfculhane/autorest.python | 8cbca95faee88d933a58bbbd17b76834faa8d387 | [
"MIT"
] | null | null | null | test/vanilla/low-level/Expected/AcceptanceTests/BodyComplexLowLevel/bodycomplexlowlevel/rest/basic/_request_builders.py | cfculhane/autorest.python | 8cbca95faee88d933a58bbbd17b76834faa8d387 | [
"MIT"
] | null | null | null | test/vanilla/low-level/Expected/AcceptanceTests/BodyComplexLowLevel/bodycomplexlowlevel/rest/basic/_request_builders.py | cfculhane/autorest.python | 8cbca95faee88d933a58bbbd17b76834faa8d387 | [
"MIT"
] | 1 | 2022-03-28T08:58:03.000Z | 2022-03-28T08:58:03.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.rest import HttpRequest
from msrest import Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Dict, Optional, TypeVar
T = TypeVar("T")
JSONType = Any
_SERIALIZER = Serializer()
# fmt: off
def build_get_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Get complex type {id: 2, name: 'abc', color: 'YELLOW'}.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"color": "str", # Optional. Possible values include: "cyan", "Magenta", "YELLOW", "blacK".
"id": 0, # Optional. Basic Id.
"name": "str" # Optional. Name property with a very long description that does not fit on a single line and a line break.
}
"""
accept = "application/json"
# Construct URL
url = '/complex/basic/valid'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_put_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Please put {id: 2, name: 'abc', color: 'Magenta'}.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword api_version: Api Version. The default value is "2016-02-29". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. Please put {id: 2, name: 'abc', color: 'Magenta'}.
:paramtype json: JSONType
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). Please put {id: 2, name: 'abc', color: 'Magenta'}.
:paramtype content: any
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
json = {
"color": "str", # Optional. Possible values include: "cyan", "Magenta", "YELLOW", "blacK".
"id": 0, # Optional. Basic Id.
"name": "str" # Optional. Name property with a very long description that does not fit on a single line and a line break.
}
"""
api_version = kwargs.pop('api_version', "2016-02-29") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/complex/basic/valid'
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_invalid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Get a basic complex type that is invalid for the local strong type.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"color": "str", # Optional. Possible values include: "cyan", "Magenta", "YELLOW", "blacK".
"id": 0, # Optional. Basic Id.
"name": "str" # Optional. Name property with a very long description that does not fit on a single line and a line break.
}
"""
accept = "application/json"
# Construct URL
url = '/complex/basic/invalid'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_get_empty_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Get a basic complex type that is empty.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"color": "str", # Optional. Possible values include: "cyan", "Magenta", "YELLOW", "blacK".
"id": 0, # Optional. Basic Id.
"name": "str" # Optional. Name property with a very long description that does not fit on a single line and a line break.
}
"""
accept = "application/json"
# Construct URL
url = '/complex/basic/empty'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_get_null_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Get a basic complex type whose properties are null.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"color": "str", # Optional. Possible values include: "cyan", "Magenta", "YELLOW", "blacK".
"id": 0, # Optional. Basic Id.
"name": "str" # Optional. Name property with a very long description that does not fit on a single line and a line break.
}
"""
accept = "application/json"
# Construct URL
url = '/complex/basic/null'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_get_not_provided_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Get a basic complex type while the server doesn't provide a response payload.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"color": "str", # Optional. Possible values include: "cyan", "Magenta", "YELLOW", "blacK".
"id": 0, # Optional. Basic Id.
"name": "str" # Optional. Name property with a very long description that does not fit on a single line and a line break.
}
"""
accept = "application/json"
# Construct URL
url = '/complex/basic/notprovided'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
| 35.982578 | 138 | 0.626804 |
4f5e376ceba0c4ce651821d051c79c306ea163cb | 1,746 | py | Python | code/actors/off_policy/discrete.py | arjunchandra/continuous-rl | 8f3c655c6a4b2e9d15a6b052e5466c0a75191a08 | [
"MIT"
] | 17 | 2019-03-29T18:30:36.000Z | 2021-10-17T15:38:22.000Z | code/actors/off_policy/discrete.py | arjunchandra/continuous-rl | 8f3c655c6a4b2e9d15a6b052e5466c0a75191a08 | [
"MIT"
] | 1 | 2019-04-22T22:40:30.000Z | 2019-04-24T21:45:07.000Z | code/actors/off_policy/discrete.py | ctallec/continuous-rl | 8f3c655c6a4b2e9d15a6b052e5466c0a75191a08 | [
"MIT"
] | 5 | 2019-04-29T16:26:18.000Z | 2020-01-23T07:17:49.000Z | from torch import Tensor
from abstract import ParametricFunction, Arrayable
from stateful import StateDict
from noises import Noise
from actors.actor import Actor
class DiscreteActor(Actor):
""" Exact argmax actor.
Actor using an argmax on the critic as policy.
:args critic: parametric neural net representing the current critic
:args target_critic: parameter delayed critic network
:args noise: perturbative noise on the action
"""
def __init__(self, critic: ParametricFunction,
target_critic: ParametricFunction, noise: Noise) -> None:
self._critic = critic
self._target_critic = target_critic
self._noise = noise
def act_noisy(self, obs: Arrayable) -> Arrayable:
pre_action = self._noise.perturb_output(
obs, function=self._critic)
self._noise.step()
return pre_action.argmax(axis=-1)
def act(self, obs: Arrayable, target=False) -> Tensor:
"""If target is True, use target critic."""
critic = self._critic if not target else self._target_critic
pre_action = critic(obs)
return pre_action.argmax(dim=-1)
def optimize(self, loss: Tensor):
pass
def state_dict(self) -> StateDict:
return {}
def load_state_dict(self, state_dict):
pass
def to(self, device):
self._noise = self._noise.to(device)
return self
def log(self):
pass
@staticmethod
def configure(
critic_function: ParametricFunction,
target_critic_function: ParametricFunction,
noise: Noise, **kwargs
):
"""Configure the actor."""
return DiscreteActor(critic_function, target_critic_function, noise)
| 30.103448 | 76 | 0.661512 |
4f550e83012ae5940d198bb0dd967e8637f815e8 | 48,147 | py | Python | assemblyline/datastore/stores/es_store.py | malvidin/assemblyline-base | 6e7de0c15fbca2cf586e33f04df7d3717bd0c0b5 | [
"MIT"
] | null | null | null | assemblyline/datastore/stores/es_store.py | malvidin/assemblyline-base | 6e7de0c15fbca2cf586e33f04df7d3717bd0c0b5 | [
"MIT"
] | null | null | null | assemblyline/datastore/stores/es_store.py | malvidin/assemblyline-base | 6e7de0c15fbca2cf586e33f04df7d3717bd0c0b5 | [
"MIT"
] | null | null | null | from typing import Dict
import elasticsearch
import elasticsearch.helpers
import json
import logging
import time
from copy import deepcopy
from assemblyline.common import forge
from assemblyline import odm
from assemblyline.common.dict_utils import recursive_update
from assemblyline.common.uid import get_random_id
from assemblyline.datastore import Collection, BaseStore, log, BulkPlan
from assemblyline.datastore.exceptions import SearchException, SearchRetryException, MultiKeyError, ILMException
from assemblyline.datastore.support.elasticsearch.schemas import default_index, default_mapping, \
default_dynamic_templates
from assemblyline.datastore.support.elasticsearch.build import build_mapping, back_mapping
def _strip_lists(model, data):
"""Elasticsearch returns everything as lists, regardless of whether
we want the field to be multi-valued or not. This method uses the model's
knowlage of what should or should not have multiple values to fix the data.
"""
fields = model.fields()
out = {}
for key, value in odm.flat_to_nested(data).items():
doc_type = fields.get(key, fields.get('', model))
# TODO: While we strip lists we don't want to know that the field is optional but we want to know what
# type of optional field that is. The following two lines of code change the doc_type to the
# child_type of the field. (Should model.fields() actually do that for us instead?)
if isinstance(doc_type, odm.Optional):
doc_type = doc_type.child_type
if isinstance(doc_type, odm.List):
out[key] = value
elif isinstance(doc_type, odm.Compound) or isinstance(doc_type, odm.Mapping):
out[key] = _strip_lists(doc_type.child_type, value)
elif isinstance(value, list):
out[key] = value[0]
else:
out[key] = value
return out
def parse_sort(sort):
"""
This function tries to do two things at once:
- convert AL sort syntax to elastic,
- convert any sorts on the key _id to _id_
"""
if isinstance(sort, list):
return [parse_sort(row) for row in sort]
elif isinstance(sort, dict):
return {('id' if key == '_id' else key): value for key, value in sort.items()}
parts = sort.split(' ')
if len(parts) == 1:
if parts == '_id':
return ['id']
return [parts]
elif len(parts) == 2:
if parts[1] not in ['asc', 'desc']:
raise SearchException('Unknown sort parameter ' + sort)
if parts[0] == '_id':
return [{'id': parts[1]}]
return [{parts[0]: parts[1]}]
raise SearchException('Unknown sort parameter ' + sort)
class RetryableIterator(object):
def __init__(self, collection, iterable):
self._iter = iter(iterable)
self.collection = collection
def __iter__(self):
return self
def __next__(self):
return self.collection.with_retries(self._iter.__next__)
class ElasticBulkPlan(BulkPlan):
def __init__(self, index, model=None):
super().__init__(index, model)
def add_delete_operation(self, doc_id):
self.operations.append(json.dumps({"delete": {"_index": self.index, "_id": doc_id}}))
def add_insert_operation(self, doc_id, doc):
if isinstance(doc, self.model):
saved_doc = doc.as_primitives(hidden_fields=True)
elif self.model:
saved_doc = self.model(doc).as_primitives(hidden_fields=True)
else:
if not isinstance(doc, dict):
saved_doc = {'__non_doc_raw__': doc}
else:
saved_doc = deepcopy(doc)
saved_doc['id'] = doc_id
self.operations.append(json.dumps({"create": {"_index": self.index, "_id": doc_id}}))
self.operations.append(json.dumps(saved_doc))
def add_upsert_operation(self, doc_id, doc):
if isinstance(doc, self.model):
saved_doc = doc.as_primitives(hidden_fields=True)
elif self.model:
saved_doc = self.model(doc).as_primitives(hidden_fields=True)
else:
if not isinstance(doc, dict):
saved_doc = {'__non_doc_raw__': doc}
else:
saved_doc = deepcopy(doc)
saved_doc['id'] = doc_id
self.operations.append(json.dumps({"update": {"_index": self.index, "_id": doc_id}}))
self.operations.append(json.dumps({"doc": saved_doc, "doc_as_upsert": True}))
def add_update_operation(self, doc_id, doc):
if isinstance(doc, self.model):
saved_doc = doc.as_primitives(hidden_fields=True)
elif self.model:
saved_doc = self.model(doc, mask=list(doc.keys())).as_primitives(hidden_fields=True)
else:
if not isinstance(doc, dict):
saved_doc = {'__non_doc_raw__': doc}
else:
saved_doc = deepcopy(doc)
self.operations.append(json.dumps({"update": {"_index": self.index, "_id": doc_id}}))
self.operations.append(json.dumps({"doc": saved_doc}))
def get_plan_data(self):
return "\n".join(self.operations)
class ESCollection(Collection):
DEFAULT_SORT = [{'_id': 'asc'}]
MAX_SEARCH_ROWS = 500
MAX_GROUP_LIMIT = 10
MAX_FACET_LIMIT = 100
SCROLL_TIMEOUT = "5m"
DEFAULT_SEARCH_VALUES = {
'timeout': None,
'field_list': None,
'facet_active': False,
'facet_mincount': 1,
'facet_fields': [],
'stats_active': False,
'stats_fields': [],
'filters': [],
'group_active': False,
'group_field': None,
'group_sort': None,
'group_limit': 1,
'histogram_active': False,
'histogram_field': None,
'histogram_type': None,
'histogram_gap': None,
'histogram_mincount': 1,
'start': 0,
'rows': Collection.DEFAULT_ROW_SIZE,
'query': "*",
'sort': DEFAULT_SORT,
'df': None
}
def __init__(self, datastore, name, model_class=None, replicas=0, shards=1):
self.replicas = replicas
self.shards = shards
self._index_list = []
if name in datastore.ilm_config:
self.ilm_config = datastore.ilm_config[name]
else:
self.ilm_config = None
super().__init__(datastore, name, model_class=model_class)
self.bulk_plan_class = ElasticBulkPlan
self.stored_fields = {}
if model_class:
for name, field in model_class.flat_fields().items():
if field.store:
self.stored_fields[name] = field
@property
def archive_access(self):
if self.ilm_config and self.datastore.archive_access:
return True
return False
@property
def index_list_full(self):
if not self._index_list:
self._index_list = list(self.with_retries(self.datastore.client.indices.get, f"{self.name}-*").keys())
return [self.name] + sorted(self._index_list, reverse=True)
@property
def index_list(self):
if self.archive_access:
if not self._index_list:
self._index_list = list(self.with_retries(self.datastore.client.indices.get, f"{self.name}-*").keys())
return [self.name] + sorted(self._index_list, reverse=True)
else:
return [self.name]
def with_retries(self, func, *args, **kwargs):
retries = 0
updated = 0
deleted = 0
while True:
try:
ret_val = func(*args, **kwargs)
if retries:
log.info('Reconnected to elasticsearch!')
if updated:
ret_val['updated'] += updated
if deleted:
ret_val['deleted'] += deleted
return ret_val
except elasticsearch.exceptions.NotFoundError as e:
if "index_not_found_exception" in str(e):
time.sleep(min(retries, self.MAX_RETRY_BACKOFF))
log.debug("The index does not exist. Trying to recreate it...")
self._ensure_collection()
self.datastore.connection_reset()
retries += 1
else:
raise
except elasticsearch.exceptions.ConflictError as ce:
updated += ce.info.get('updated', 0)
deleted += ce.info.get('deleted', 0)
time.sleep(min(retries, self.MAX_RETRY_BACKOFF))
self.datastore.connection_reset()
retries += 1
except (SearchRetryException,
elasticsearch.exceptions.ConnectionError,
elasticsearch.exceptions.ConnectionTimeout,
elasticsearch.exceptions.AuthenticationException) as e:
if not isinstance(e, SearchRetryException):
log.warning(f"No connection to Elasticsearch server(s): "
f"{' | '.join(self.datastore.get_hosts(safe=True))}"
f", retrying...")
time.sleep(min(retries, self.MAX_RETRY_BACKOFF))
self.datastore.connection_reset()
retries += 1
except elasticsearch.exceptions.TransportError as e:
err_code, msg, cause = e.args
if err_code == 503 or err_code == '503':
log.warning("Looks like index is not ready yet, retrying...")
time.sleep(min(retries, self.MAX_RETRY_BACKOFF))
self.datastore.connection_reset()
retries += 1
elif err_code == 429 or err_code == '429':
log.warning("Elasticsearch is too busy to perform the requested task, "
"we will wait a bit and retry...")
time.sleep(min(retries, self.MAX_RETRY_BACKOFF))
self.datastore.connection_reset()
retries += 1
else:
raise
def archive(self, query):
if not self.archive_access:
return False
reindex_body = {
"source": {
"index": self.name,
"query": {
"bool": {
"must": {
"query_string": {
"query": query
}
}
}
}
},
"dest": {
"index": f"{self.name}-archive"
}
}
res_reindex = self.with_retries(self.datastore.client.reindex, reindex_body)
total_archived = res_reindex['updated'] + res_reindex['created']
if res_reindex['total'] == total_archived:
if total_archived != 0:
delete_body = {"query": {"bool": {"must": {"query_string": {"query": query}}}}}
info = self.with_retries(self.datastore.client.delete_by_query, index=self.name, body=delete_body)
return info.get('deleted', 0) == total_archived
else:
return True
else:
return False
def _bulk(self, operations):
return self.with_retries(self.datastore.client.bulk, body=operations)
def commit(self):
self.with_retries(self.datastore.client.indices.refresh, self.name)
self.with_retries(self.datastore.client.indices.clear_cache, self.name)
if self.archive_access:
self.with_retries(self.datastore.client.indices.refresh, f"{self.name}-archive")
self.with_retries(self.datastore.client.indices.clear_cache, f"{self.name}-archive")
return True
def reindex(self):
for index in self.index_list:
if self.with_retries(self.datastore.client.indices.exists, index):
new_name = f'{index}_{get_random_id().lower()}'
body = {
"source": {
"index": index
},
"dest": {
"index": new_name
}
}
self.with_retries(self.datastore.client.reindex, body)
if self.with_retries(self.datastore.client.indices.exists, new_name):
self.with_retries(self.datastore.client.indices.delete, index)
body = {
"source": {
"index": new_name
},
"dest": {
"index": index
}
}
self.with_retries(self.datastore.client.reindex, body)
self.with_retries(self.datastore.client.indices.delete, new_name)
return True
def multiget(self, key_list, as_dictionary=True, as_obj=True, error_on_missing=True):
def add_to_output(data_output, data_id):
if "__non_doc_raw__" in data_output:
if as_dictionary:
out[data_id] = data_output['__non_doc_raw__']
else:
out.append(data_output['__non_doc_raw__'])
else:
data_output.pop('id', None)
if as_dictionary:
out[data_id] = self.normalize(data_output, as_obj=as_obj)
else:
out.append(self.normalize(data_output, as_obj=as_obj))
if as_dictionary:
out = {}
else:
out = []
if key_list:
data = self.with_retries(self.datastore.client.mget, {'ids': key_list}, index=self.name)
for row in data.get('docs', []):
if 'found' in row and not row['found']:
continue
try:
key_list.remove(row['_id'])
add_to_output(row['_source'], row['_id'])
except ValueError:
log.error(f'MGet returned multiple documents for id: {row["_id"]}')
if key_list and self.archive_access:
query_body = {"query": {"ids": {"values": key_list}}}
iterator = RetryableIterator(
self,
elasticsearch.helpers.scan(
self.datastore.client,
query=query_body,
index=f"{self.name}-*",
preserve_order=True
)
)
for row in iterator:
try:
key_list.remove(row['_id'])
add_to_output(row['_source'], row['_id'])
except ValueError:
log.error(f'MGet returned multiple documents for id: {row["_id"]}')
if key_list and error_on_missing:
raise MultiKeyError(key_list, out)
return out
def _get(self, key, retries):
def normalize_output(data_output):
if "__non_doc_raw__" in data_output:
return data_output['__non_doc_raw__']
data_output.pop('id', None)
return data_output
if retries is None:
retries = self.RETRY_NONE
done = False
while not done:
try:
data = self.with_retries(self.datastore.client.get, index=self.name, id=key)['_source']
return normalize_output(data)
except elasticsearch.exceptions.NotFoundError:
pass
if self.archive_access:
query_body = {"query": {"ids": {"values": [key]}}}
hits = self.with_retries(self.datastore.client.search, index=f"{self.name}-*",
body=query_body)['hits']['hits']
if len(hits) > 0:
return normalize_output(max(hits, key=lambda row: row['_index'])['_source'])
if retries > 0:
time.sleep(0.05)
retries -= 1
elif retries < 0:
time.sleep(0.05)
else:
done = True
return None
def _save(self, key, data):
if self.model_class:
saved_data = data.as_primitives(hidden_fields=True)
else:
if not isinstance(data, dict):
saved_data = {'__non_doc_raw__': data}
else:
saved_data = deepcopy(data)
saved_data['id'] = key
self.with_retries(
self.datastore.client.index,
index=self.name,
id=key,
body=json.dumps(saved_data)
)
return True
def delete(self, key):
deleted = False
try:
info = self.with_retries(self.datastore.client.delete, id=key, index=self.name)
deleted = info['result'] == 'deleted'
except elasticsearch.NotFoundError:
pass
if self.archive_access:
query_body = {"query": {"ids": {"values": [key]}}}
info = self.with_retries(self.datastore.client.delete_by_query, index=f"{self.name}-*", body=query_body)
if not deleted:
deleted = info.get('deleted', 0) == info.get('total', 0)
else:
deleted = True
return deleted
def delete_matching(self, query, workers=20):
index = self.name
if self.archive_access:
index = f"{index},{self.name}-*"
query_body = {"query": {"bool": {"must": {"query_string": {"query": query}}}}}
info = self.with_retries(self.datastore.client.delete_by_query, index=index, body=query_body)
return info.get('deleted', 0) != 0
def _create_scripts_from_operations(self, operations):
op_sources = []
op_params = {}
val_id = 0
for op, doc_key, value in operations:
if op == self.UPDATE_SET:
op_sources.append(f"ctx._source.{doc_key} = params.value{val_id}")
op_params[f'value{val_id}'] = value
elif op == self.UPDATE_DELETE:
op_sources.append(f"ctx._source.{doc_key}.remove(params.value{val_id})")
op_params[f'value{val_id}'] = value
elif op == self.UPDATE_APPEND:
op_sources.append(f"ctx._source.{doc_key}.add(params.value{val_id})")
op_params[f'value{val_id}'] = value
elif op == self.UPDATE_REMOVE:
script = f"if (ctx._source.{doc_key}.indexOf(params.value{val_id}) != -1) " \
f"{{ctx._source.{doc_key}.remove(ctx._source.{doc_key}.indexOf(params.value{val_id}))}}"
op_sources.append(script)
op_params[f'value{val_id}'] = value
elif op == self.UPDATE_INC:
op_sources.append(f"ctx._source.{doc_key} += params.value{val_id}")
op_params[f'value{val_id}'] = value
elif op == self.UPDATE_DEC:
op_sources.append(f"ctx._source.{doc_key} -= params.value{val_id}")
op_params[f'value{val_id}'] = value
val_id += 1
joined_sources = """;\n""".join(op_sources)
script = {
"lang": "painless",
"source": joined_sources.replace("};\n", "}\n"),
"params": op_params
}
return script
def _update(self, key, operations):
script = self._create_scripts_from_operations(operations)
update_body = {
"script": script
}
# noinspection PyBroadException
try:
res = self.with_retries(self.datastore.client.update, index=self.name, id=key, body=update_body)
return res['result'] == "updated"
except elasticsearch.NotFoundError:
pass
except Exception:
return False
if self.archive_access:
query_body = {"query": {"ids": {"values": [key]}}}
update_body.update(query_body)
info = self.with_retries(self.datastore.client.update_by_query, index=f"{self.name}-*", body=update_body)
return info.get('updated', 0) != 0
return False
def _update_by_query(self, query, operations, filters):
if filters is None:
filters = []
index = self.name
if self.archive_access:
index = f"{index},{self.name}-*"
script = self._create_scripts_from_operations(operations)
query_body = {
"script": script,
"query": {
"bool": {
"must": {
"query_string": {
"query": query
}
},
'filter': [{'query_string': {'query': ff}} for ff in filters]
}
}
}
# noinspection PyBroadException
try:
res = self.with_retries(self.datastore.client.update_by_query, index=index, body=query_body)
except Exception:
return False
return res['updated']
def _format_output(self, result, fields=None, as_obj=True):
# Getting search document data
source = result.get('fields', {})
source_data = result.pop('_source', None)
item_id = result['_id']
# Remove extra fields that should not show up in the search results
source.pop('_version', None)
source.pop(self.DEFAULT_SEARCH_FIELD, None)
source.pop('id', None)
if self.model_class:
if not fields or '*' in fields:
fields = list(self.stored_fields.keys())
fields.append('id')
elif isinstance(fields, str):
fields = fields.split(',')
if source_data:
source_data.pop('id', None)
return self.model_class(source_data, docid=item_id)
source = _strip_lists(self.model_class, source)
if as_obj:
return self.model_class(source, mask=fields, docid=item_id)
else:
if 'id' in fields:
source['id'] = item_id
return source
if isinstance(fields, str):
fields = fields
if fields is None or '*' in fields or 'id' in fields:
source['id'] = [item_id]
if fields is None or '*' in fields:
return source
return {key: val for key, val in source.items() if key in fields}
def _search(self, args=None, deep_paging_id=None, use_archive=True, track_total_hits=None):
index = self.name
if self.archive_access and use_archive:
index = f"{index},{self.name}-*"
params = {}
if deep_paging_id is not None:
params = {'scroll': self.SCROLL_TIMEOUT}
elif track_total_hits:
params['track_total_hits'] = track_total_hits
parsed_values = deepcopy(self.DEFAULT_SEARCH_VALUES)
# TODO: we should validate values for max rows, group length, history length...
for key, value in args:
if key not in parsed_values:
all_args = '; '.join('%s=%s' % (field_name, field_value) for field_name, field_value in args)
raise ValueError("Unknown query argument: %s %s of [%s]" % (key, value, all_args))
parsed_values[key] = value
# This is our minimal query, the following sections will fill it out
# with whatever extra options the search has been given.
query_body = {
"query": {
"bool": {
"must": {
"query_string": {
"query": parsed_values['query']
}
},
'filter': [{'query_string': {'query': ff}} for ff in parsed_values['filters']]
}
},
'from': parsed_values['start'],
'size': parsed_values['rows'],
'sort': parse_sort(parsed_values['sort']),
"stored_fields": parsed_values['field_list'] or ['*']
}
if parsed_values['df']:
query_body["query"]["bool"]["must"]["query_string"]["default_field"] = parsed_values['df']
# Time limit for the query
if parsed_values['timeout']:
query_body['timeout'] = parsed_values['timeout']
# Add an histogram aggregation
if parsed_values['histogram_active']:
query_body["aggregations"] = query_body.get("aggregations", {})
query_body["aggregations"]["histogram"] = {
parsed_values['histogram_type']: {
"field": parsed_values['histogram_field'],
"interval": parsed_values['histogram_gap'],
"min_doc_count": parsed_values['histogram_mincount']
}
}
# Add a facet aggregation
if parsed_values['facet_active']:
query_body["aggregations"] = query_body.get("aggregations", {})
for field in parsed_values['facet_fields']:
query_body["aggregations"][field] = {
"terms": {
"field": field,
"min_doc_count": parsed_values['facet_mincount']
}
}
# Add a facet aggregation
if parsed_values['stats_active']:
query_body["aggregations"] = query_body.get("aggregations", {})
for field in parsed_values['stats_fields']:
query_body["aggregations"][f"{field}_stats"] = {
"stats": {
"field": field
}
}
# Add a group aggregation
if parsed_values['group_active']:
query_body["collapse"] = {
"field": parsed_values['group_field'],
"inner_hits": {
"name": "group",
"stored_fields": parsed_values['field_list'] or ['*'],
"size": parsed_values['group_limit'],
"sort": parse_sort(parsed_values['group_sort']) or [{parsed_values['group_field']: 'asc'}]
}
}
try:
if deep_paging_id is not None and not deep_paging_id == "*":
# Get the next page
result = self.with_retries(self.datastore.client.scroll, scroll_id=deep_paging_id, params=params)
else:
# Run the query
result = self.with_retries(self.datastore.client.search, index=index,
body=json.dumps(query_body), params=params)
return result
except (elasticsearch.TransportError, elasticsearch.RequestError) as e:
try:
err_msg = e.info['error']['root_cause'][0]['reason']
except (ValueError, KeyError, IndexError):
err_msg = str(e)
raise SearchException(err_msg)
except (elasticsearch.ConnectionError, elasticsearch.ConnectionTimeout) as error:
raise SearchRetryException("collection: %s, query: %s, error: %s" % (self.name, query_body, str(error)))
except Exception as error:
raise SearchException("collection: %s, query: %s, error: %s" % (self.name, query_body, str(error)))
def search(self, query, offset=0, rows=None, sort=None,
fl=None, timeout=None, filters=None, access_control=None,
deep_paging_id=None, as_obj=True, use_archive=True, track_total_hits=None):
if rows is None:
rows = self.DEFAULT_ROW_SIZE
if sort is None:
sort = self.DEFAULT_SORT
if filters is None:
filters = []
elif isinstance(filters, str):
filters = [filters]
if access_control:
filters.append(access_control)
args = [
('query', query),
('start', offset),
('rows', rows),
('sort', sort),
('df', self.DEFAULT_SEARCH_FIELD)
]
if fl:
field_list = fl.split(',')
args.append(('field_list', field_list))
else:
field_list = None
if timeout:
args.append(('timeout', "%sms" % timeout))
if filters:
args.append(('filters', filters))
result = self._search(args, deep_paging_id=deep_paging_id, use_archive=use_archive,
track_total_hits=track_total_hits)
ret_data = {
"offset": int(offset),
"rows": int(rows),
"total": int(result['hits']['total']['value']),
"items": [self._format_output(doc, field_list, as_obj=as_obj) for doc in result['hits']['hits']]
}
new_deep_paging_id = result.get("_scroll_id", None)
# Check if the scroll is finished and close it
if deep_paging_id is not None and new_deep_paging_id is None:
self.with_retries(self.datastore.client.clear_scroll, body={"scroll_id": [deep_paging_id]}, ignore=(404,))
# Check if we can tell from inspection that we have finished the scroll
if new_deep_paging_id is not None and len(ret_data["items"]) < ret_data["rows"]:
self.with_retries(self.datastore.client.clear_scroll,
body={"scroll_id": [new_deep_paging_id]}, ignore=(404,))
new_deep_paging_id = None
if new_deep_paging_id is not None:
ret_data['next_deep_paging_id'] = new_deep_paging_id
return ret_data
def stream_search(self, query, fl=None, filters=None, access_control=None,
item_buffer_size=200, as_obj=True, use_archive=True):
if item_buffer_size > 500 or item_buffer_size < 50:
raise SearchException("Variable item_buffer_size must be between 50 and 500.")
if query in ["*", "*:*"] and fl != 'id':
raise SearchException("You did not specified a query, you just asked for everything... Play nice.")
index = self.name
if self.archive_access and use_archive:
index = f"{index},{self.name}-*"
if filters is None:
filters = []
elif isinstance(filters, str):
filters = [filters]
if access_control:
filters.append(access_control)
if fl:
fl = fl.split(',')
query_body = {
"query": {
"bool": {
"must": {
"query_string": {
"default_field": 'id',
"query": query
}
},
'filter': [{'query_string': {'query': ff}} for ff in filters]
}
},
"sort": parse_sort(self.datastore.DEFAULT_SORT),
"stored_fields": fl or ['*']
}
iterator = RetryableIterator(
self,
elasticsearch.helpers.scan(
self.datastore.client,
query=query_body,
index=index,
preserve_order=True
)
)
for value in iterator:
# Unpack the results, ensure the id is always set
yield self._format_output(value, fl, as_obj=as_obj)
def histogram(self, field, start, end, gap, query="id:*", mincount=1,
filters=None, access_control=None, use_archive=True):
type_modifier = self._validate_steps_count(start, end, gap)
start = type_modifier(start)
end = type_modifier(end)
gap = type_modifier(gap)
if filters is None:
filters = []
elif isinstance(filters, str):
filters = [filters]
filters.append('{field}:[{min} TO {max}]'.format(field=field, min=start, max=end))
args = [
('query', query),
('histogram_active', True),
('histogram_field', field),
('histogram_type', "date_histogram" if isinstance(gap, str) else 'histogram'),
('histogram_gap', gap.strip('+') if isinstance(gap, str) else gap),
('histogram_mincount', mincount)
]
if access_control:
filters.append(access_control)
if filters:
args.append(('filters', filters))
result = self._search(args, use_archive=use_archive)
# Convert the histogram into a dictionary
return {type_modifier(row.get('key_as_string', row['key'])): row['doc_count']
for row in result['aggregations']['histogram']['buckets']}
def facet(self, field, query="id:*", prefix=None, contains=None, ignore_case=False, sort=None, limit=10,
mincount=1, filters=None, access_control=None, use_archive=True):
if filters is None:
filters = []
elif isinstance(filters, str):
filters = [filters]
args = [
('query', query),
('facet_active', True),
('facet_fields', [field]),
('facet_mincount', mincount),
('rows', 0)
]
# TODO: prefix, contains, ignore_case, sort
if access_control:
filters.append(access_control)
if filters:
args.append(('filters', filters))
result = self._search(args, use_archive=use_archive)
# Convert the histogram into a dictionary
return {row.get('key_as_string', row['key']): row['doc_count']
for row in result['aggregations'][field]['buckets']}
def stats(self, field, query="id:*", filters=None, access_control=None, use_archive=True):
if filters is None:
filters = []
elif isinstance(filters, str):
filters = [filters]
args = [
('query', query),
('stats_active', True),
('stats_fields', [field]),
('rows', 0)
]
if access_control:
filters.append(access_control)
if filters:
args.append(('filters', filters))
result = self._search(args, use_archive=use_archive)
return result['aggregations'][f"{field}_stats"]
def grouped_search(self, group_field, query="id:*", offset=0, sort=None, group_sort=None, fl=None, limit=1,
rows=None, filters=None, access_control=None, as_obj=True, use_archive=True):
if rows is None:
rows = self.DEFAULT_ROW_SIZE
if sort is None:
sort = self.DEFAULT_SORT
if group_sort is None:
group_sort = self.DEFAULT_SORT
if filters is None:
filters = []
elif isinstance(filters, str):
filters = [filters]
args = [
('query', query),
('group_active', True),
('group_field', group_field),
('group_limit', limit),
('group_sort', group_sort),
('start', offset),
('rows', rows),
('sort', sort)
]
filters.append("%s:*" % group_field)
if fl:
field_list = fl.split(',')
args.append(('field_list', field_list))
else:
field_list = None
if access_control:
filters.append(access_control)
if filters:
args.append(('filters', filters))
result = self._search(args, use_archive=use_archive)
return {
'offset': offset,
'rows': rows,
'total': int(result['hits']['total']['value']),
'items': [{
'value': collapsed['fields'][group_field][0],
'total': int(collapsed['inner_hits']['group']['hits']['total']['value']),
'items': [self._format_output(row, field_list, as_obj=as_obj)
for row in collapsed['inner_hits']['group']['hits']['hits']]
} for collapsed in result['hits']['hits']]
}
@staticmethod
def _get_odm_type(ds_type):
try:
return back_mapping[ds_type].__name__.lower()
except KeyError:
return ds_type.lower()
def fields(self):
def flatten_fields(props):
out = {}
for name, value in props.items():
if 'properties' in value:
for child, cprops in flatten_fields(value['properties']).items():
out[name + '.' + child] = cprops
elif 'type' in value:
out[name] = value
else:
raise ValueError("Unknown field data " + str(props))
return out
data = self.with_retries(self.datastore.client.indices.get, self.name)
properties = flatten_fields(data[self.name]['mappings'].get('properties', {}))
if self.model_class:
model_fields = self.model_class.flat_fields()
else:
model_fields = {}
collection_data = {}
for p_name, p_val in properties.items():
if p_name.startswith("_") or "//" in p_name:
continue
if not Collection.FIELD_SANITIZER.match(p_name):
continue
field_model = model_fields.get(p_name, None)
f_type = self._get_odm_type(p_val.get('analyzer', None) or p_val['type'])
collection_data[p_name] = {
"default": self.DEFAULT_SEARCH_FIELD in p_val.get('copy_to', []),
"indexed": p_val.get('index', p_val.get('enabled', True)),
"list": field_model.multivalued if field_model else False,
"stored": p_val.get('store', False),
"type": f_type
}
return collection_data
def _ilm_policy_exists(self):
conn = self.datastore.client.transport.get_connection()
pol_req = conn.session.get(f"{conn.base_url}/_ilm/policy/{self.name}_policy")
return pol_req.ok
def _create_ilm_policy(self):
data_base = {
"policy": {
"phases": {
"hot": {
"min_age": "0ms",
"actions": {
"set_priority": {
"priority": 100
},
"rollover": {
"max_age": f"{self.ilm_config['warm']}{self.ilm_config['unit']}"
}
}
},
"warm": {
"actions": {
"set_priority": {
"priority": 50
}
}
},
"cold": {
"min_age": f"{self.ilm_config['cold']}{self.ilm_config['unit']}",
"actions": {
"set_priority": {
"priority": 20
}
}
}
}
}
}
if self.ilm_config['delete']:
data_base['policy']['phases']['delete'] = {
"min_age": f"{self.ilm_config['delete']}{self.ilm_config['unit']}",
"actions": {
"delete": {}
}
}
conn = self.datastore.client.transport.get_connection()
pol_req = conn.session.put(f"{conn.base_url}/_ilm/policy/{self.name}_policy",
headers={"Content-Type": "application/json"},
data=json.dumps(data_base))
if not pol_req.ok:
raise ILMException(f"ERROR: Failed to create ILM policy: {self.name}_policy")
def _ensure_collection(self):
def get_index_definition():
index_def = deepcopy(default_index)
if 'settings' not in index_def:
index_def['settings'] = {}
if 'index' not in index_def['settings']:
index_def['settings']['index'] = {}
index_def['settings']['index']['number_of_shards'] = self.shards
index_def['settings']['index']['number_of_replicas'] = self.replicas
mappings = deepcopy(default_mapping)
if self.model_class:
mappings['properties'], mappings['dynamic_templates'] = \
build_mapping(self.model_class.fields().values())
else:
mappings['dynamic_templates'] = deepcopy(default_dynamic_templates)
if not mappings['dynamic_templates']:
# Setting dynamic to strict prevents any documents with fields not in the properties to be added
mappings['dynamic'] = "strict"
mappings['properties']['id'] = {
"store": True,
"doc_values": True,
"type": 'keyword'
}
mappings['properties']['__text__'] = {
"store": False,
"type": 'text',
}
index_def['mappings'] = mappings
return index_def
# Create HOT index
if not self.with_retries(self.datastore.client.indices.exists, self.name):
log.debug(f"Index {self.name.upper()} does not exists. Creating it now...")
try:
self.with_retries(self.datastore.client.indices.create, self.name, get_index_definition())
except elasticsearch.exceptions.RequestError as e:
if "resource_already_exists_exception" not in str(e):
raise
log.warning(f"Tried to create an index template that already exists: {self.name.upper()}")
if self.ilm_config:
# Create ILM policy
while not self._ilm_policy_exists():
try:
self.with_retries(self._create_ilm_policy)
except ILMException:
time.sleep(0.1)
pass
# Create WARM index template
if not self.with_retries(self.datastore.client.indices.exists_template, self.name):
log.debug(f"Index template {self.name.upper()} does not exists. Creating it now...")
index = get_index_definition()
index["index_patterns"] = [f"{self.name}-*"]
index["order"] = 1
index["settings"]["index.lifecycle.name"] = f"{self.name}_policy"
index["settings"]["index.lifecycle.rollover_alias"] = f"{self.name}-archive"
try:
self.with_retries(self.datastore.client.indices.put_template, self.name, index)
except elasticsearch.exceptions.RequestError as e:
if "resource_already_exists_exception" not in str(e):
raise
log.warning(f"Tried to create an index template that already exists: {self.name.upper()}")
if not self.with_retries(self.datastore.client.indices.exists_alias, f"{self.name}-archive"):
log.debug(f"Index alias {self.name.upper()}-archive does not exists. Creating it now...")
index = {"aliases": {f"{self.name}-archive": {"is_write_index": True}}}
try:
self.with_retries(self.datastore.client.indices.create, f"{self.name}-000001", index)
except elasticsearch.exceptions.RequestError as e:
if "resource_already_exists_exception" not in str(e):
raise
log.warning(f"Tried to create an index template that already exists: {self.name.upper()}-000001")
self._check_fields()
def _add_fields(self, missing_fields: Dict):
no_fix = []
properties = {}
for name, field in missing_fields.items():
# Figure out the path of the field in the document, if the name is set in the field, it
# is going to be duplicated in the path from missing_fields, so drop it
prefix = name.split('.')
if field.name:
prefix = prefix[:-1]
# Build the fields and templates for this new mapping
sub_properties, sub_templates = build_mapping([field], prefix=prefix, allow_refuse_implicit=False)
properties.update(sub_properties)
if sub_templates:
no_fix.append(name)
# If we have collected any fields that we can't just blindly add, as they might conflict
# with existing things, (we might have the refuse_all_implicit_mappings rule in place)
# simply raise an exception
if no_fix:
raise ValueError(f"Can't update database mapping for {self.name}, "
f"couldn't safely amend mapping for {no_fix}")
# If we got this far, the missing fields have been described in properties, upload them to the
# server, and we should be able to move on.
mappings = {"properties": properties}
for index in self.index_list_full:
self.with_retries(self.datastore.client.indices.put_mapping, index=index, body=mappings)
current_template = self.with_retries(self.datastore.client.indices.get_template, self.name)[self.name]
recursive_update(current_template, {'mappings': mappings})
self.with_retries(self.datastore.client.indices.put_template, self.name, body=current_template)
def wipe(self):
log.debug("Wipe operation started for collection: %s" % self.name.upper())
for index in self.index_list:
if self.with_retries(self.datastore.client.indices.exists, index):
self.with_retries(self.datastore.client.indices.delete, index)
if self.with_retries(self.datastore.client.indices.exists_template, self.name):
self.with_retries(self.datastore.client.indices.delete_template, self.name)
self._ensure_collection()
class ESStore(BaseStore):
""" Elasticsearch multi-index implementation of the ResultStore interface."""
DEFAULT_SORT = "id asc"
DATE_FORMAT = {
'NOW': 'now',
'YEAR': 'y',
'MONTH': 'M',
'WEEK': 'w',
'DAY': 'd',
'HOUR': 'h',
'MINUTE': 'm',
'SECOND': 's',
'MILLISECOND': 'ms',
'MICROSECOND': 'micros',
'NANOSECOND': 'nanos',
'SEPARATOR': '||',
'DATE_END': 'Z'
}
def __init__(self, hosts, collection_class=ESCollection, archive_access=True):
config = forge.get_config()
if config.datastore.ilm.enabled:
ilm_config = config.datastore.ilm.indexes.as_primitives()
else:
ilm_config = {}
super(ESStore, self).__init__(hosts, collection_class, ilm_config=ilm_config)
tracer = logging.getLogger('elasticsearch')
tracer.setLevel(logging.CRITICAL)
self.client = elasticsearch.Elasticsearch(hosts=hosts,
connection_class=elasticsearch.RequestsHttpConnection,
max_retries=0)
self.archive_access = archive_access
self.url_path = 'elastic'
def __str__(self):
return '{0} - {1}'.format(self.__class__.__name__, self._hosts)
def ping(self):
return self.client.ping()
def close(self):
super().close()
self.client = None
def connection_reset(self):
self.client = elasticsearch.Elasticsearch(hosts=self._hosts,
connection_class=elasticsearch.RequestsHttpConnection,
max_retries=0)
| 37.497664 | 118 | 0.541425 |
4f599817601f138a82a54b088cde8e08d10b630b | 5,222 | py | Python | sdk/containerregistry/azure-containerregistry/azure/containerregistry/_generated/aio/operations/_refresh_tokens_operations.py | JayDoubleu/azure-sdk-for-python | f3760fc8d7ea1b46b0def0628579d36abe75976f | [
"MIT"
] | 1 | 2021-04-05T17:38:42.000Z | 2021-04-05T17:38:42.000Z | sdk/containerregistry/azure-containerregistry/azure/containerregistry/_generated/aio/operations/_refresh_tokens_operations.py | JayDoubleu/azure-sdk-for-python | f3760fc8d7ea1b46b0def0628579d36abe75976f | [
"MIT"
] | 1 | 2021-06-23T14:50:11.000Z | 2021-06-24T12:26:05.000Z | sdk/containerregistry/azure-containerregistry/azure/containerregistry/_generated/aio/operations/_refresh_tokens_operations.py | JayDoubleu/azure-sdk-for-python | f3760fc8d7ea1b46b0def0628579d36abe75976f | [
"MIT"
] | 1 | 2021-12-18T20:01:22.000Z | 2021-12-18T20:01:22.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RefreshTokensOperations:
"""RefreshTokensOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.containerregistry.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get_from_exchange(
self,
grant_type: Union[str, "_models.PostContentSchemaGrantType"],
service: str,
tenant: Optional[str] = None,
refresh_token: Optional[str] = None,
access_token: Optional[str] = None,
**kwargs
) -> "_models.RefreshToken":
"""Exchange AAD tokens for an ACR refresh Token.
:param grant_type: Can take a value of access_token_refresh_token, or access_token, or
refresh_token.
:type grant_type: str or ~azure.containerregistry.models.PostContentSchemaGrantType
:param service: Indicates the name of your Azure container registry.
:type service: str
:param tenant: AAD tenant associated to the AAD credentials.
:type tenant: str
:param refresh_token: AAD refresh token, mandatory when grant_type is
access_token_refresh_token or refresh_token.
:type refresh_token: str
:param access_token: AAD access token, mandatory when grant_type is access_token_refresh_token
or access_token.
:type access_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RefreshToken, or the result of cls(response)
:rtype: ~azure.containerregistry.models.RefreshToken
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RefreshToken"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/x-www-form-urlencoded")
accept = "application/json"
# Construct URL
url = self.get_from_exchange.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(grant_type, 'str')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.AcrErrors, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('RefreshToken', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_from_exchange.metadata = {'url': '/oauth2/exchange'} # type: ignore
| 46.212389 | 133 | 0.680582 |
4f58c3d30daee620141a4f53b8fde63ab2eb67f4 | 5,508 | py | Python | models/models.py | shiram/br_connector | ac24c9c1bb9ac20f1d846d108dbd4bf425dedd48 | [
"Apache-2.0"
] | null | null | null | models/models.py | shiram/br_connector | ac24c9c1bb9ac20f1d846d108dbd4bf425dedd48 | [
"Apache-2.0"
] | null | null | null | models/models.py | shiram/br_connector | ac24c9c1bb9ac20f1d846d108dbd4bf425dedd48 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from odoo import models, fields, api
from odoo import tools, _
from odoo.exceptions import ValidationError, AccessError
import pyodbc
class BRDatabase(models.Model):
_name = 'br_connector.database'
_description = 'BR Database'
name = fields.Char(string='Name of Database')
database_host = fields.Char(string='Host')
database_port = fields.Char(string='Port')
database_uid = fields.Char(string='UID')
database_passwd = fields.Char(string='Password')
_sql_constraints = [
('name_uniq', 'UNIQUE (name)', _('You cannot have multiple databases wth the same name'))
]
@api.one
def action_test_connection(self):
connection_res = self.connect_database()
connection_res = connection_res[0]
if connection_res:
if 'status' in connection_res and connection_res['status'] == 200:
self.env.user.notify_success(message=_('Connection to database {0} is successful.'.
format(self.name)),
title=_('Connection Success'), sticky=True)
connection_res['cursor'].close()
connection_res['connection'].close()
else:
raise ValidationError(_('An Error was encountered: {0}'.format(connection_res['error'])))
else:
self.env.user.notify_danger(message=_('Connection to database {0} did not return any response'.format(self.name),
title=_('Connection Failed', sticky=True)))
@api.one
def connect_database(self):
"""
This method takes the objects attributes, gets config values and creates a connection to database
:return: a dict containing the cursor, connection, status and error.
status 200 - successful, 500 - failure.
"""
connection_res = {}
connect_br_enabled = self.env['ir.config_parameter'].sudo().get_param('br_connector.connect_to_br')
if connect_br_enabled:
if self.name and self.database_host and self.database_port and self.database_uid and self.database_passwd:
free_tds_path = self.env['ir.config_parameter'].sudo().get_param('br_connector.free_tds_driver_path')
tds_version = self.env['ir.config_parameter'].sudo().get_param('br_connector.tds_version')
if free_tds_path and tds_version:
quoted_connection = ';'.join(
[
'DRIVER={}'.format(free_tds_path),
'SERVER={}'.format(self.database_host),
'DATABASE={}'.format(self.name),
'UID={}'.format(self.database_uid),
'PWD={}'.format(self.database_passwd),
'PORT={}'.format(self.database_port),
'TDS_VERSION={}'.format(str(tds_version))
]
)
try:
connection = pyodbc.connect(quoted_connection)
if connection and connection is not None:
cursor = connection.cursor()
connection_res.update(
{'cursor': cursor, 'connection': connection, 'status': 200, 'error': ''})
except pyodbc.Error as ex:
connection_res.update(
{'cursor': None, 'connection': None, 'status': 500, 'error': str(ex)})
else:
raise ValidationError(_('No Free TDS driver path set, please set path to driver in general '
'settings.'))
else:
raise ValidationError(_("Please make sure all database credentials are provided."))
else:
raise ValidationError(_("SQL connection is not enabled in general settings."))
return connection_res
class ResConfig(models.TransientModel):
_inherit = ['res.config.settings']
connect_to_br = fields.Boolean(default=False, string='Connect to BR', help='Set true if you want to connect to BR.')
free_tds_driver_path = fields.Char(string='Free TDS Driver Path')
tds_version = fields.Float(string='TDS Version', digits=(2, 1))
@api.model
def get_values(self):
res = super(ResConfig, self).get_values()
res.update(
connect_to_br=self.env['ir.config_parameter'].sudo().get_param(
'br_connector.connect_to_br'),
free_tds_driver_path=self.env['ir.config_parameter'].sudo().get_param(
'br_connector.free_tds_driver_path'),
tds_version=float(self.env['ir.config_parameter'].sudo().get_param('br_connector.tds_version')),
)
return res
@api.multi
def set_values(self):
super(ResConfig, self).set_values()
self.env['ir.config_parameter'].sudo().set_param('br_connector.connect_to_br',
self.connect_to_br)
self.env['ir.config_parameter'].sudo().set_param('br_connector.free_tds_driver_path',
self.free_tds_driver_path)
self.env['ir.config_parameter'].sudo().set_param('br_connector.tds_version',
self.tds_version)
| 49.178571 | 125 | 0.567175 |
4f5cb09a5f01856f5684bb6bfe19668ba82bf34d | 2,909 | py | Python | peek_plugin_base/worker/CeleryDbConn.py | Synerty/peek-plugin-base | aa8a09a52bd5e1f95e4b61907f5de3b661fc9780 | [
"MIT"
] | null | null | null | peek_plugin_base/worker/CeleryDbConn.py | Synerty/peek-plugin-base | aa8a09a52bd5e1f95e4b61907f5de3b661fc9780 | [
"MIT"
] | null | null | null | peek_plugin_base/worker/CeleryDbConn.py | Synerty/peek-plugin-base | aa8a09a52bd5e1f95e4b61907f5de3b661fc9780 | [
"MIT"
] | 1 | 2016-12-12T15:05:18.000Z | 2016-12-12T15:05:18.000Z | import logging
import platform
from threading import Lock
from typing import Iterable, Optional
from sqlalchemy.engine import create_engine
from sqlalchemy.orm.scoping import scoped_session
from sqlalchemy.orm.session import sessionmaker
from peek_plugin_base.PeekVortexUtil import peekWorkerName
from peek_plugin_base.storage.DbConnection import _commonPrefetchDeclarativeIds
logger = logging.getLogger(__name__)
_dbConnectString = None
_dbEngineArgs = {}
__dbEngine = None
__ScopedSession = None
_isWindows = platform.system() is "Windows"
def setConnStringForWindows():
""" Set Conn String for Windiws
Windows has a different way of forking processes, which causes the
@worker_process_init.connect signal not to work in "CeleryDbConnInit"
"""
global _dbConnectString
global _dbEngineArgs
from peek_platform.file_config.PeekFileConfigABC import PeekFileConfigABC
from peek_platform.file_config.PeekFileConfigSqlAlchemyMixin import \
PeekFileConfigSqlAlchemyMixin
from peek_platform import PeekPlatformConfig
class _WorkerTaskConfigMixin(PeekFileConfigABC,
PeekFileConfigSqlAlchemyMixin):
pass
PeekPlatformConfig.componentName = peekWorkerName
_dbConnectString = _WorkerTaskConfigMixin().dbConnectString
_dbEngineArgs = _WorkerTaskConfigMixin().dbEngineArgs
# For celery, an engine is created per worker
def getDbEngine():
global __dbEngine
if _dbConnectString is None:
if _isWindows:
from peek_platform.ConfigCeleryApp import configureCeleryLogging
configureCeleryLogging()
setConnStringForWindows()
else:
msg = "CeleryDbConn initialisation error"
logger.error(msg)
raise Exception(msg)
if not __dbEngine:
__dbEngine = create_engine(
_dbConnectString,
**_dbEngineArgs
)
return __dbEngine
def getDbSession():
global __ScopedSession
if not __ScopedSession:
__ScopedSession = scoped_session(sessionmaker(bind=getDbEngine()))
return __ScopedSession()
_sequenceMutex = Lock()
def prefetchDeclarativeIds(Declarative, count) -> Optional[Iterable[int]]:
""" Prefetch Declarative IDs
This function prefetches a chunk of IDs from a database sequence.
Doing this allows us to preallocate the IDs before an insert, which significantly
speeds up :
* Orm inserts, especially those using inheritance
* When we need the ID to assign it to a related object that we're also inserting.
:param Declarative: The SQLAlchemy declarative class.
(The class that inherits from DeclarativeBase)
:param count: The number of IDs to prefetch
:return: An iterable that dispenses the new IDs
"""
return _commonPrefetchDeclarativeIds(
getDbEngine(), _sequenceMutex, Declarative, count
)
| 28.519608 | 85 | 0.738054 |
4f5f190159742a265e809d33b3b55641e553cc48 | 28,328 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_06_01/aio/operations/_route_filter_rules_operations.py | JayDoubleu/azure-sdk-for-python | f3760fc8d7ea1b46b0def0628579d36abe75976f | [
"MIT"
] | 2 | 2021-03-24T06:26:11.000Z | 2021-04-18T15:55:59.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_06_01/aio/operations/_route_filter_rules_operations.py | JayDoubleu/azure-sdk-for-python | f3760fc8d7ea1b46b0def0628579d36abe75976f | [
"MIT"
] | 4 | 2019-04-17T17:57:49.000Z | 2020-04-24T21:11:22.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_06_01/aio/operations/_route_filter_rules_operations.py | JayDoubleu/azure-sdk-for-python | f3760fc8d7ea1b46b0def0628579d36abe75976f | [
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RouteFilterRulesOperations:
"""RouteFilterRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs
) -> "_models.RouteFilterRule":
"""Gets the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteFilterRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.RouteFilterRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.RouteFilterRule",
**kwargs
) -> "_models.RouteFilterRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_rule_parameters, 'RouteFilterRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.RouteFilterRule",
**kwargs
) -> AsyncLROPoller["_models.RouteFilterRule"]:
"""Creates or updates a route in the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the route filter rule.
:type rule_name: str
:param route_filter_rule_parameters: Parameters supplied to the create or update route filter
rule operation.
:type route_filter_rule_parameters: ~azure.mgmt.network.v2019_06_01.models.RouteFilterRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteFilterRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_06_01.models.RouteFilterRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
route_filter_rule_parameters=route_filter_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.PatchRouteFilterRule",
**kwargs
) -> "_models.RouteFilterRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_rule_parameters, 'PatchRouteFilterRule')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.PatchRouteFilterRule",
**kwargs
) -> AsyncLROPoller["_models.RouteFilterRule"]:
"""Updates a route in the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the route filter rule.
:type rule_name: str
:param route_filter_rule_parameters: Parameters supplied to the update route filter rule
operation.
:type route_filter_rule_parameters: ~azure.mgmt.network.v2019_06_01.models.PatchRouteFilterRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteFilterRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_06_01.models.RouteFilterRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
route_filter_rule_parameters=route_filter_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def list_by_route_filter(
self,
resource_group_name: str,
route_filter_name: str,
**kwargs
) -> AsyncIterable["_models.RouteFilterRuleListResult"]:
"""Gets all RouteFilterRules in a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_06_01.models.RouteFilterRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_route_filter.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_route_filter.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules'} # type: ignore
| 50.405694 | 221 | 0.673256 |
4f5e350727ec22d9c7f0a36e81e7d76e8ed97b34 | 850 | py | Python | myenv/lib/python3.7/site-packages/google/cloud/texttospeech.py | theCydonian/AudioEyes | 3dece4529b31e6c63771c4358457962999bda3b4 | [
"MIT"
] | null | null | null | myenv/lib/python3.7/site-packages/google/cloud/texttospeech.py | theCydonian/AudioEyes | 3dece4529b31e6c63771c4358457962999bda3b4 | [
"MIT"
] | 40 | 2019-07-16T10:04:48.000Z | 2020-01-20T09:04:59.000Z | myenv/lib/python3.7/site-packages/google/cloud/texttospeech.py | theCydonian/AudioEyes | 3dece4529b31e6c63771c4358457962999bda3b4 | [
"MIT"
] | 2 | 2019-07-18T00:05:31.000Z | 2019-11-27T14:17:22.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from google.cloud.texttospeech_v1 import TextToSpeechClient
from google.cloud.texttospeech_v1 import enums
from google.cloud.texttospeech_v1 import types
__all__ = ("enums", "types", "TextToSpeechClient")
| 32.692308 | 74 | 0.770588 |
4f5b494726dcec7b77b4f34e53a82434dc17c758 | 17,184 | py | Python | saleor/graphql/order/mutations/draft_orders.py | rainerioagbayani/golftee-core | 228744d42391bae32288cfcac943b6a38f3b47e5 | [
"CC-BY-4.0"
] | 4 | 2021-01-19T09:53:41.000Z | 2021-01-19T20:39:27.000Z | saleor/graphql/order/mutations/draft_orders.py | rainerioagbayani/golftee-core | 228744d42391bae32288cfcac943b6a38f3b47e5 | [
"CC-BY-4.0"
] | 9 | 2021-03-31T19:53:28.000Z | 2021-12-13T20:42:32.000Z | saleor/graphql/order/mutations/draft_orders.py | rainerioagbayani/golftee-core | 228744d42391bae32288cfcac943b6a38f3b47e5 | [
"CC-BY-4.0"
] | 1 | 2021-08-03T14:45:50.000Z | 2021-08-03T14:45:50.000Z | import graphene
from django.core.exceptions import ValidationError
from django.db import transaction
from graphene.types import InputObjectType
from ....account.models import User
from ....core.exceptions import InsufficientStock
from ....core.permissions import OrderPermissions
from ....core.taxes import TaxError, zero_taxed_money
from ....order import OrderStatus, events, models
from ....order.actions import order_created
from ....order.error_codes import OrderErrorCode
from ....order.utils import (
add_variant_to_draft_order,
change_order_line_quantity,
delete_order_line,
get_order_country,
recalculate_order,
update_order_prices,
)
from ....warehouse.management import allocate_stock
from ...account.i18n import I18nMixin
from ...account.types import AddressInput
from ...core.mutations import BaseMutation, ModelDeleteMutation, ModelMutation
from ...core.scalars import PositiveDecimal
from ...core.types.common import OrderError
from ...product.types import ProductVariant
from ..types import Order, OrderLine
from ..utils import validate_draft_order
class OrderLineInput(graphene.InputObjectType):
quantity = graphene.Int(
description="Number of variant items ordered.", required=True
)
class OrderLineCreateInput(OrderLineInput):
variant_id = graphene.ID(
description="Product variant ID.", name="variantId", required=True
)
class DraftOrderInput(InputObjectType):
billing_address = AddressInput(description="Billing address of the customer.")
user = graphene.ID(
descripton="Customer associated with the draft order.", name="user"
)
user_email = graphene.String(description="Email address of the customer.")
discount = PositiveDecimal(description="Discount amount for the order.")
shipping_address = AddressInput(description="Shipping address of the customer.")
shipping_method = graphene.ID(
description="ID of a selected shipping method.", name="shippingMethod"
)
voucher = graphene.ID(
description="ID of the voucher associated with the order.", name="voucher"
)
customer_note = graphene.String(
description="A note from a customer. Visible by customers in the order summary."
)
class DraftOrderCreateInput(DraftOrderInput):
lines = graphene.List(
OrderLineCreateInput,
description=(
"Variant line input consisting of variant ID and quantity of products."
),
)
class DraftOrderCreate(ModelMutation, I18nMixin):
class Arguments:
input = DraftOrderCreateInput(
required=True, description="Fields required to create an order."
)
class Meta:
description = "Creates a new draft order."
model = models.Order
permissions = (OrderPermissions.MANAGE_ORDERS,)
error_type_class = OrderError
error_type_field = "order_errors"
@classmethod
def clean_input(cls, info, instance, data):
shipping_address = data.pop("shipping_address", None)
billing_address = data.pop("billing_address", None)
cleaned_input = super().clean_input(info, instance, data)
lines = data.pop("lines", None)
if lines:
variant_ids = [line.get("variant_id") for line in lines]
variants = cls.get_nodes_or_error(variant_ids, "variants", ProductVariant)
quantities = [line.get("quantity") for line in lines]
cleaned_input["variants"] = variants
cleaned_input["quantities"] = quantities
cleaned_input["status"] = OrderStatus.DRAFT
display_gross_prices = info.context.site.settings.display_gross_prices
cleaned_input["display_gross_prices"] = display_gross_prices
# Set up default addresses if possible
user = cleaned_input.get("user")
if user and not shipping_address:
cleaned_input["shipping_address"] = user.default_shipping_address
if user and not billing_address:
cleaned_input["billing_address"] = user.default_billing_address
if shipping_address:
shipping_address = cls.validate_address(
shipping_address, instance=instance.shipping_address, info=info
)
shipping_address = info.context.plugins.change_user_address(
shipping_address, "shipping", user=instance
)
cleaned_input["shipping_address"] = shipping_address
if billing_address:
billing_address = cls.validate_address(
billing_address, instance=instance.billing_address, info=info
)
billing_address = info.context.plugins.change_user_address(
billing_address, "billing", user=instance
)
cleaned_input["billing_address"] = billing_address
return cleaned_input
@staticmethod
def _save_addresses(info, instance: models.Order, cleaned_input):
shipping_address = cleaned_input.get("shipping_address")
if shipping_address:
shipping_address.save()
instance.shipping_address = shipping_address.get_copy()
billing_address = cleaned_input.get("billing_address")
if billing_address:
billing_address.save()
instance.billing_address = billing_address.get_copy()
@staticmethod
def _save_lines(info, instance, quantities, variants):
if variants and quantities:
lines = []
for variant, quantity in zip(variants, quantities):
lines.append((quantity, variant))
add_variant_to_draft_order(instance, variant, quantity)
# New event
events.draft_order_added_products_event(
order=instance, user=info.context.user, order_lines=lines
)
@classmethod
def _commit_changes(cls, info, instance, cleaned_input):
created = instance.pk
super().save(info, instance, cleaned_input)
# Create draft created event if the instance is from scratch
if not created:
events.draft_order_created_event(order=instance, user=info.context.user)
instance.save(update_fields=["billing_address", "shipping_address"])
@classmethod
def _refresh_lines_unit_price(cls, info, instance, cleaned_input, new_instance):
if new_instance:
# It is a new instance, all new lines have already updated prices.
return
shipping_address = cleaned_input.get("shipping_address")
if shipping_address and instance.is_shipping_required():
update_order_prices(instance, info.context.discounts)
billing_address = cleaned_input.get("billing_address")
if billing_address and not instance.is_shipping_required():
update_order_prices(instance, info.context.discounts)
@classmethod
@transaction.atomic
def save(cls, info, instance, cleaned_input):
new_instance = not bool(instance.pk)
# Process addresses
cls._save_addresses(info, instance, cleaned_input)
# Save any changes create/update the draft
cls._commit_changes(info, instance, cleaned_input)
try:
# Process any lines to add
cls._save_lines(
info,
instance,
cleaned_input.get("quantities"),
cleaned_input.get("variants"),
)
cls._refresh_lines_unit_price(info, instance, cleaned_input, new_instance)
except TaxError as tax_error:
raise ValidationError(
"Unable to calculate taxes - %s" % str(tax_error),
code=OrderErrorCode.TAX_ERROR.value,
)
# Post-process the results
recalculate_order(instance)
class DraftOrderUpdate(DraftOrderCreate):
class Arguments:
id = graphene.ID(required=True, description="ID of a draft order to update.")
input = DraftOrderInput(
required=True, description="Fields required to update an order."
)
class Meta:
description = "Updates a draft order."
model = models.Order
permissions = (OrderPermissions.MANAGE_ORDERS,)
error_type_class = OrderError
error_type_field = "order_errors"
@classmethod
def get_instance(cls, info, **data):
instance = super().get_instance(info, **data)
if instance.status != OrderStatus.DRAFT:
raise ValidationError(
{
"id": ValidationError(
"Provided order id belongs to non-draft order. "
"Use `orderUpdate` mutation instead.",
code=OrderErrorCode.INVALID,
)
}
)
return instance
class DraftOrderDelete(ModelDeleteMutation):
class Arguments:
id = graphene.ID(required=True, description="ID of a draft order to delete.")
class Meta:
description = "Deletes a draft order."
model = models.Order
permissions = (OrderPermissions.MANAGE_ORDERS,)
error_type_class = OrderError
error_type_field = "order_errors"
class DraftOrderComplete(BaseMutation):
order = graphene.Field(Order, description="Completed order.")
class Arguments:
id = graphene.ID(
required=True, description="ID of the order that will be completed."
)
class Meta:
description = "Completes creating an order."
permissions = (OrderPermissions.MANAGE_ORDERS,)
error_type_class = OrderError
error_type_field = "order_errors"
@classmethod
def update_user_fields(cls, order):
if order.user:
order.user_email = order.user.email
elif order.user_email:
try:
order.user = User.objects.get(email=order.user_email)
except User.DoesNotExist:
order.user = None
@classmethod
def perform_mutation(cls, _root, info, id):
order = cls.get_node_or_error(info, id, only_type=Order)
country = get_order_country(order)
validate_draft_order(order, country)
cls.update_user_fields(order)
order.status = OrderStatus.UNFULFILLED
if not order.is_shipping_required():
order.shipping_method_name = None
order.shipping_price = zero_taxed_money()
if order.shipping_address:
order.shipping_address.delete()
order.shipping_address = None
order.save()
for line in order:
if line.variant.track_inventory:
try:
allocate_stock(line, country, line.quantity)
except InsufficientStock as exc:
raise ValidationError(
{
"lines": ValidationError(
f"Insufficient product stock: {exc.item}",
code=OrderErrorCode.INSUFFICIENT_STOCK,
)
}
)
order_created(order, user=info.context.user, from_draft=True)
return DraftOrderComplete(order=order)
class DraftOrderLinesCreate(BaseMutation):
order = graphene.Field(Order, description="A related draft order.")
order_lines = graphene.List(
graphene.NonNull(OrderLine), description="List of newly added order lines."
)
class Arguments:
id = graphene.ID(
required=True, description="ID of the draft order to add the lines to."
)
input = graphene.List(
OrderLineCreateInput,
required=True,
description="Fields required to add order lines.",
)
class Meta:
description = "Create order lines for a draft order."
permissions = (OrderPermissions.MANAGE_ORDERS,)
error_type_class = OrderError
error_type_field = "order_errors"
@classmethod
def perform_mutation(cls, _root, info, **data):
order = cls.get_node_or_error(info, data.get("id"), only_type=Order)
if order.status != OrderStatus.DRAFT:
raise ValidationError(
{
"id": ValidationError(
"Only draft orders can be edited.",
code=OrderErrorCode.NOT_EDITABLE,
)
}
)
lines_to_add = []
for input_line in data.get("input"):
variant_id = input_line["variant_id"]
variant = cls.get_node_or_error(
info, variant_id, "variant_id", only_type=ProductVariant
)
quantity = input_line["quantity"]
if quantity > 0:
if variant:
lines_to_add.append((quantity, variant))
else:
raise ValidationError(
{
"quantity": ValidationError(
"Ensure this value is greater than 0.",
code=OrderErrorCode.ZERO_QUANTITY,
)
}
)
# Add the lines
try:
lines = [
add_variant_to_draft_order(order, variant, quantity)
for quantity, variant in lines_to_add
]
except TaxError as tax_error:
raise ValidationError(
"Unable to calculate taxes - %s" % str(tax_error),
code=OrderErrorCode.TAX_ERROR.value,
)
# Create the event
events.draft_order_added_products_event(
order=order, user=info.context.user, order_lines=lines_to_add
)
recalculate_order(order)
return DraftOrderLinesCreate(order=order, order_lines=lines)
class DraftOrderLineDelete(BaseMutation):
order = graphene.Field(Order, description="A related draft order.")
order_line = graphene.Field(
OrderLine, description="An order line that was deleted."
)
class Arguments:
id = graphene.ID(description="ID of the order line to delete.", required=True)
class Meta:
description = "Deletes an order line from a draft order."
permissions = (OrderPermissions.MANAGE_ORDERS,)
error_type_class = OrderError
error_type_field = "order_errors"
@classmethod
def perform_mutation(cls, _root, info, id):
line = cls.get_node_or_error(info, id, only_type=OrderLine)
order = line.order
if order.status != OrderStatus.DRAFT:
raise ValidationError(
{
"id": ValidationError(
"Only draft orders can be edited.",
code=OrderErrorCode.NOT_EDITABLE,
)
}
)
db_id = line.id
delete_order_line(line)
line.id = db_id
# Create the removal event
events.draft_order_removed_products_event(
order=order, user=info.context.user, order_lines=[(line.quantity, line)]
)
recalculate_order(order)
return DraftOrderLineDelete(order=order, order_line=line)
class DraftOrderLineUpdate(ModelMutation):
order = graphene.Field(Order, description="A related draft order.")
class Arguments:
id = graphene.ID(description="ID of the order line to update.", required=True)
input = OrderLineInput(
required=True, description="Fields required to update an order line."
)
class Meta:
description = "Updates an order line of a draft order."
model = models.OrderLine
permissions = (OrderPermissions.MANAGE_ORDERS,)
error_type_class = OrderError
error_type_field = "order_errors"
@classmethod
def clean_input(cls, info, instance, data):
instance.old_quantity = instance.quantity
cleaned_input = super().clean_input(info, instance, data)
if instance.order.status != OrderStatus.DRAFT:
raise ValidationError(
{
"id": ValidationError(
"Only draft orders can be edited.",
code=OrderErrorCode.NOT_EDITABLE,
)
}
)
quantity = data["quantity"]
if quantity <= 0:
raise ValidationError(
{
"quantity": ValidationError(
"Ensure this value is greater than 0.",
code=OrderErrorCode.ZERO_QUANTITY,
)
}
)
return cleaned_input
@classmethod
def save(cls, info, instance, cleaned_input):
change_order_line_quantity(
info.context.user, instance, instance.old_quantity, instance.quantity
)
recalculate_order(instance.order)
@classmethod
def success_response(cls, instance):
response = super().success_response(instance)
response.order = instance.order
return response
| 35.874739 | 88 | 0.620461 |
4f5f2a3a038c8b6b9697febadc6d69293f1e4655 | 3,440 | py | Python | configs/recognition/tsn/tsn_r50_video_mixup_1x1x8_100e_kinetics400_rgb.py | EquipoVandV/mmactionVandV | a807d9e258d67b7b2d0fabe98da97a801d63ae7d | [
"Apache-2.0"
] | 1,870 | 2020-07-11T09:33:46.000Z | 2022-03-31T13:21:36.000Z | configs/recognition/tsn/tsn_r50_video_mixup_1x1x8_100e_kinetics400_rgb.py | EquipoVandV/mmactionVandV | a807d9e258d67b7b2d0fabe98da97a801d63ae7d | [
"Apache-2.0"
] | 1,285 | 2020-07-11T11:18:57.000Z | 2022-03-31T08:41:17.000Z | configs/recognition/tsn/tsn_r50_video_mixup_1x1x8_100e_kinetics400_rgb.py | EquipoVandV/mmactionVandV | a807d9e258d67b7b2d0fabe98da97a801d63ae7d | [
"Apache-2.0"
] | 557 | 2020-07-11T09:51:57.000Z | 2022-03-31T13:21:35.000Z | _base_ = [
'../../_base_/schedules/sgd_100e.py', '../../_base_/default_runtime.py'
]
# model settings
model = dict(
type='Recognizer2D',
backbone=dict(
type='ResNet',
pretrained='torchvision://resnet50',
depth=50,
norm_eval=False),
cls_head=dict(
type='TSNHead',
num_classes=400,
in_channels=2048,
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1),
dropout_ratio=0.4,
init_std=0.01),
# model training and testing settings
train_cfg=dict(
blending=dict(type='MixupBlending', num_classes=400, alpha=.2)),
test_cfg=dict(average_clips=None))
# dataset settings
dataset_type = 'VideoDataset'
data_root = 'data/kinetics400/videos_train'
data_root_val = 'data/kinetics400/videos_val'
ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt'
ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt'
ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='DecordInit'),
dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8),
dict(type='DecordDecode'),
dict(
type='MultiScaleCrop',
input_size=224,
scales=(1, 0.875, 0.75, 0.66),
random_crop=False,
max_wh_scale_gap=1),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(type='DecordInit'),
dict(
type='SampleFrames',
clip_len=1,
frame_interval=1,
num_clips=8,
test_mode=True),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
dict(type='DecordInit'),
dict(
type='SampleFrames',
clip_len=1,
frame_interval=1,
num_clips=25,
test_mode=True),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='ThreeCrop', crop_size=256),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=32,
workers_per_gpu=2,
test_dataloader=dict(videos_per_gpu=1),
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=val_pipeline),
test=dict(
type=dataset_type,
ann_file=ann_file_test,
data_prefix=data_root_val,
pipeline=test_pipeline))
evaluation = dict(
interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy'])
# runtime settings
work_dir = './work_dirs/tsn_r50_video_mixup_1x1x8_100e_kinetics400_rgb/'
| 31.851852 | 78 | 0.64593 |
4f597c9660a7b2681aae2ceebd5f4d754657f977 | 634 | py | Python | api/tests/__init__.py | cjmash/art-backend | fb1dfd69cca9cda1d8714bd7066c3920d1a97312 | [
"MIT"
] | null | null | null | api/tests/__init__.py | cjmash/art-backend | fb1dfd69cca9cda1d8714bd7066c3920d1a97312 | [
"MIT"
] | null | null | null | api/tests/__init__.py | cjmash/art-backend | fb1dfd69cca9cda1d8714bd7066c3920d1a97312 | [
"MIT"
] | null | null | null | from django.test import TestCase
from unittest.mock import patch
from core.slack_bot import SlackIntegration
class APIBaseTestCase(TestCase):
def setUp(self):
self.patch_slack_id = patch.object(
SlackIntegration, 'get_user_slack_id')
self.patch_send_message = patch.object(
SlackIntegration, 'send_message')
self.patch_slack_id.return_value = 'test_id'
self.patch_send_message.return_value = ''
self.patch_slack_id.start()
self.patch_send_message.start()
def tearDown(self):
self.patch_slack_id.stop()
self.patch_send_message.stop()
| 28.818182 | 52 | 0.695584 |
4f557155105e513ed783869d9ba80ee5a8802b54 | 704 | py | Python | planetmint/backend/__init__.py | liviu-lesan/planetmint | 54cf8e45108947aa8282ddaaf127d08e06ce842e | [
"Apache-2.0"
] | null | null | null | planetmint/backend/__init__.py | liviu-lesan/planetmint | 54cf8e45108947aa8282ddaaf127d08e06ce842e | [
"Apache-2.0"
] | null | null | null | planetmint/backend/__init__.py | liviu-lesan/planetmint | 54cf8e45108947aa8282ddaaf127d08e06ce842e | [
"Apache-2.0"
] | null | null | null | # Copyright © 2020 Interplanetary Database Association e.V.,
# Planetmint and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
"""Generic backend database interfaces expected by Planetmint.
The interfaces in this module allow Planetmint to be agnostic about its
database backend. One can configure Planetmint to use different databases as
its data store by setting the ``database.backend`` property in the
configuration or the ``PLANETMINT_DATABASE_BACKEND`` environment variable.
"""
# Include the backend interfaces
from planetmint.backend import schema, query # noqa
from planetmint.backend.connection import Connection
| 41.411765 | 76 | 0.798295 |
4f5c6a4aad95d89d11ba01c1e530b30a61b37511 | 10,321 | py | Python | modules/run/run.py | praeclarumjj3/OLIE | c0a27e7409f7db51b190bfac114677cb7b5dd669 | [
"BSD-2-Clause"
] | 1 | 2021-04-10T19:43:47.000Z | 2021-04-10T19:43:47.000Z | modules/run/run.py | praeclarumjj3/OLIE | c0a27e7409f7db51b190bfac114677cb7b5dd669 | [
"BSD-2-Clause"
] | null | null | null | modules/run/run.py | praeclarumjj3/OLIE | c0a27e7409f7db51b190bfac114677cb7b5dd669 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function
from torch._C import device
from datasets.coco_loader import get_loader
import torch
from torch import nn
import sys
from adet.config import get_cfg
from modules.networks.solov2 import SOLOv2
from modules.networks.reconstructor import Reconstructor
import matplotlib.pyplot as plt
import argparse
import os
from tqdm import tqdm
import numpy as np
import warnings
from detectron2.utils.logger import setup_logger
from etaprogress.progress import ProgressBar
from detectron2.checkpoint import DetectionCheckpointer
from loss import ReconLoss, VGGLoss
warnings.filterwarnings("ignore")
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="SOLOv2 Editor")
parser.add_argument(
"--config-file",
default="configs/R50_3x.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
parser.add_argument(
"--coco",
help="Path of coco dataset",
default='datasets/coco/'
)
parser.add_argument(
"--batch_size",
help="Batch Size for dataloaders",
default=4,
type=int
)
parser.add_argument(
"--num_epochs",
help="Epochs",
default=30,
type=int
)
parser.add_argument(
"--lr",
help="Learning Rate",
default=1e-3,
type=float
)
parser.add_argument(
"--eval",
help="To eval or not",
default=False,
type=bool
)
parser.add_argument(
"--PATH",
help="Path of the saved editor",
default='checkpoints/editor.pth',
type=str
)
parser.add_argument(
"--load",
help="To load pretrained weights for further training",
default=False,
type=bool
)
return parser
def visualize(x,y,z,i):
x = x[0].cpu()
x = x.permute(1, 2, 0).numpy()
y = y[0].cpu()
y = y.permute(1, 2, 0).numpy()
z = z[0].cpu()
z = z.permute(1, 2, 0).numpy()
f, (ax1,ax2,ax3) = plt.subplots(1,3)
x = x[:,:,::-1]
y = y[:,:,::-1]
z = z[:,:,::-1]
ax1.imshow(x)
ax1.set_title("Composite Image")
ax1.axis('off')
ax2.imshow(y)
ax2.set_title("Background Image")
ax2.axis('off')
ax3.imshow(z)
ax3.set_title("Reconstruction")
ax3.axis('off')
f.savefig('visualizations/runs/run_{}.jpg'.format(i))
def normalize(inputs):
pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(device).view(3, 1, 1).cuda()
pixel_std = torch.Tensor([57.375, 57.120, 58.395]).view(3, 1, 1).cuda()
un_normalizer = lambda x: (x - pixel_mean) / pixel_std
return un_normalizer(inputs)
def un_normalize(inputs):
pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(device).view(3, 1, 1).cuda()
pixel_std = torch.Tensor([57.375, 57.120, 58.395]).view(3, 1, 1).cuda()
un_normalizer = lambda x: x * pixel_std + pixel_mean
return un_normalizer(inputs)
def vgg_normalize(inputs):
pixel_mean = torch.tensor([0.485, 0.456, 0.406]).view(3,1,1).cuda()
pixel_std = torch.tensor([0.229, 0.224, 0.225]).view(3,1,1).cuda()
normalizer = lambda x: (x - pixel_mean) / pixel_std
return normalizer(inputs)
def vgg_preprocess(image):
image = image * torch.tensor(1./255)
image = torch.stack([image[:,2,:,:],image[:,1,:,:],image[:,0,:,:]],1)
image = vgg_normalize(image)
return image
def s_loss(targets, recons, masks=None):
targets = vgg_preprocess(targets)
recons = vgg_preprocess(recons)
style_loss = vgg_loss(recons, targets, masks)
return style_loss
def edit_loss(outputs, images):
inputs = torch.stack(images,0).cuda()
outputs = un_normalize(outputs)
# masks = torch.stack(masks,0).cuda()
bg_loss = recon_loss(outputs, inputs)
# style_loss = s_loss(outputs, inputs)
# alpha = torch.tensor(50., dtype=float)
# t_loss = bg_loss + alpha*style_loss
# print('Style Loss: {}'.format(hole_loss))
# print('Simple Loss: {}'.format(bg_loss))
# print('Total Loss: {}'.format(t_loss))
# print('----------------------')
return bg_loss
def tensor_to_list(maps):
masks = []
maps = maps.squeeze(0)
for i in range(maps.shape[0]):
masks.append(maps[i])
return masks
def train(model, num_epochs, dataloader):
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999), weight_decay=1e-2)
best_loss = 1e10
best_epoch = 0
epoch_loss = []
logger.info("Starting Training")
for j in range(num_epochs):
running_loss = []
total = len(dataloader)
bar = ProgressBar(total, max_width=80)
for i, data in tqdm(enumerate(dataloader, 0)):
bar.numerator = i+1
print(bar, end='\r')
comp_inputs, bg_inputs = data
comp_inputs, bg_inputs = tensor_to_list(comp_inputs), tensor_to_list(bg_inputs)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
maps, _ = model.solo(bg_inputs)
outputs = model.reconstructor(maps, torch.stack(comp_inputs,dim=0))
loss = edit_loss(outputs, bg_inputs)
loss.backward()
optimizer.step()
running_loss.append(loss.item())
if i%80 == 0:
print('Loss: {}'.format(loss.item()))
comp_inputs = torch.stack(comp_inputs,0).cuda()
bg_inputs = torch.stack(bg_inputs,0).cuda()
outputs = un_normalize(outputs)
# masks = torch.stack(masks,0).cuda()
visualize(comp_inputs*torch.tensor(1./255),bg_inputs*torch.tensor(1./255),torch.clamp(min=0., max=255.,input=torch.round(outputs.detach()))*torch.tensor(1./255),i//80)
sys.stdout.flush()
avg_loss = np.mean(running_loss)
print("Epoch {}: Loss: {}".format(j+1,avg_loss))
epoch_loss.append(avg_loss)
if avg_loss < best_loss:
best_loss = avg_loss
best_epoch = j+1
print('Model saved at Epoch: {}'.format(j+1))
torch.save(model.state_dict(),args.PATH)
logger.info("Finished Training with best loss: {} at Epoch: {}".format(best_loss, best_epoch))
plt.plot(np.linspace(1, num_epochs, num_epochs).astype(int), epoch_loss)
if not os.path.exists('losses/'):
os.makedirs('losses/')
plt.savefig('losses/train_loss_{}.png'.format(args.lr))
def eval(model, dataloader):
model.eval()
running_loss = []
total = len(dataloader)
bar = ProgressBar(total, max_width=80)
logger.info("Starting Evaluation")
with torch.no_grad():
for i, data in tqdm(enumerate(dataloader, 0)):
bar.numerator = i+1
print(bar, end='\r')
inputs = data
outputs = model(inputs)
loss = edit_loss(outputs, inputs)
running_loss.append(loss.item())
sys.stdout.flush()
avg_loss = np.mean(running_loss)
print("Eval Loss: {}".format(avg_loss))
plt.plot(np.linspace(1, total, total).astype(int), running_loss)
if not os.path.exists('losses/'):
os.makedirs('losses/')
plt.savefig('losses/eval_loss_{}.png'.format(args.lr))
if __name__ == "__main__":
logger = setup_logger()
args = get_parser().parse_args()
logger.info("Arguments: " + str(args))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
cfg = setup_cfg(args)
solo = SOLOv2(cfg=cfg).to(device)
checkpointer = DetectionCheckpointer(solo)
checkpointer.load(cfg.MODEL.WEIGHTS)
for param in solo.parameters():
param.requires_grad = False
image = torch.rand(3,64,64)
batched_input = []
batched_input.append(image)
r,_ = solo(batched_input)
reconstructor = Reconstructor(in_channels=r.shape[1])
edit_recons_params = sum(p.numel() for p in reconstructor.parameters())
solo_params = sum(p.numel() for p in solo.parameters())
logger.info("Total Params: {}".format(edit_recons_params+solo_params))
logger.info("Trainable Params: {}".format(edit_recons_params))
logger.info("Non-Trainable Params: {}".format(solo_params))
if args.eval:
coco_test_loader, _ = get_loader(device=device, \
root=args.coco+'val2017', \
json=args.coco+'annotations/instances_val2017.json', \
batch_size=args.batch_size, \
shuffle=False, \
num_workers=0)
editor_eval =Editor(solo,reconstructor)
editor_eval.load_state_dict(torch.load(args.PATH))
eval(editor_eval.to(device),coco_test_loader)
else:
if not os.path.exists('checkpoints/'):
os.makedirs('checkpoints/')
logger.info("Instantiating Editor")
editor = Editor(solo,reconstructor)
coco_train_loader, _ = get_loader(device=device, \
# root=args.coco+'train2017', \
root = 'datasets/bg_composite/', \
json=args.coco+'annotations/instances_train2017.json', \
batch_size=args.batch_size, \
shuffle=True, \
num_workers=0)
if args.load:
editor.load_state_dict(torch.load(args.PATH))
editor.to(device)
vgg_loss = VGGLoss()
recon_loss = ReconLoss()
train(model=editor,num_epochs=args.num_epochs, dataloader=coco_train_loader) | 31.181269 | 183 | 0.589672 |
4f5debe793a57d574fd9ee736c986d15e8129790 | 4,504 | py | Python | nemo_text_processing/inverse_text_normalization/de/taggers/tokenize_and_classify.py | JMichaelStringer/NeMo | b5b29a69ccb0ec3d8c9ace2f33872ee99858a559 | [
"Apache-2.0"
] | 1 | 2021-09-10T10:40:51.000Z | 2021-09-10T10:40:51.000Z | nemo_text_processing/inverse_text_normalization/de/taggers/tokenize_and_classify.py | JMichaelStringer/NeMo | b5b29a69ccb0ec3d8c9ace2f33872ee99858a559 | [
"Apache-2.0"
] | null | null | null | nemo_text_processing/inverse_text_normalization/de/taggers/tokenize_and_classify.py | JMichaelStringer/NeMo | b5b29a69ccb0ec3d8c9ace2f33872ee99858a559 | [
"Apache-2.0"
] | 1 | 2022-01-07T03:39:11.000Z | 2022-01-07T03:39:11.000Z | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.inverse_text_normalization.de.graph_utils import GraphFst, delete_extra_space, delete_space
from nemo_text_processing.inverse_text_normalization.de.taggers.cardinal import CardinalFst
from nemo_text_processing.inverse_text_normalization.de.taggers.date import DateFst
from nemo_text_processing.inverse_text_normalization.de.taggers.decimal import DecimalFst
from nemo_text_processing.inverse_text_normalization.de.taggers.electronic import ElectronicFst
from nemo_text_processing.inverse_text_normalization.de.taggers.fraction import FractionFst
from nemo_text_processing.inverse_text_normalization.de.taggers.measure import MeasureFst
from nemo_text_processing.inverse_text_normalization.de.taggers.money import MoneyFst
from nemo_text_processing.inverse_text_normalization.de.taggers.ordinal import OrdinalFst
from nemo_text_processing.inverse_text_normalization.de.taggers.telephone import TelephoneFst
from nemo_text_processing.inverse_text_normalization.de.taggers.time import TimeFst
from nemo_text_processing.inverse_text_normalization.de.taggers.whitelist import WhiteListFst
from nemo_text_processing.inverse_text_normalization.en.taggers.punctuation import PunctuationFst
from nemo_text_processing.inverse_text_normalization.en.taggers.word import WordFst
try:
import pynini
from pynini.lib import pynutil
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
PYNINI_AVAILABLE = False
class ClassifyFst(GraphFst):
"""
Final class that composes all other classification grammars. This class can process an entire sentence, that is lower cased.
For deployment, this grammar will be compiled and exported to OpenFst Finate State Archiv (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
"""
def __init__(self):
super().__init__(name="tokenize_and_classify", kind="classify")
cardinal = CardinalFst()
cardinal_graph = cardinal.fst
ordinal = OrdinalFst(cardinal)
ordinal_graph = ordinal.fst
decimal = DecimalFst(cardinal)
decimal_graph = decimal.fst
fraction = FractionFst(cardinal)
fraction_graph = fraction.fst
measure_graph = MeasureFst(cardinal=cardinal, decimal=decimal, fraction=fraction).fst
date_graph = DateFst(ordinal=ordinal, cardinal=cardinal).fst
word_graph = WordFst().fst
time_graph = TimeFst().fst
money_graph = MoneyFst(cardinal=cardinal, decimal=decimal).fst
whitelist_graph = WhiteListFst().fst
punct_graph = PunctuationFst().fst
electronic_graph = ElectronicFst().fst
telephone_graph = TelephoneFst().fst
classify = (
pynutil.add_weight(whitelist_graph, 1.01)
| pynutil.add_weight(time_graph, 1.1)
| pynutil.add_weight(date_graph, 1.09)
| pynutil.add_weight(decimal_graph, 1.1)
| pynutil.add_weight(measure_graph, 1.1)
| pynutil.add_weight(cardinal_graph, 1.1)
| pynutil.add_weight(ordinal_graph, 1.1)
| pynutil.add_weight(fraction_graph, 1.1)
| pynutil.add_weight(money_graph, 1.1)
| pynutil.add_weight(telephone_graph, 1.1)
| pynutil.add_weight(electronic_graph, 1.1)
| pynutil.add_weight(word_graph, 100)
)
punct = pynutil.insert("tokens { ") + pynutil.add_weight(punct_graph, weight=1.1) + pynutil.insert(" }")
token = pynutil.insert("tokens { ") + classify + pynutil.insert(" }")
token_plus_punct = (
pynini.closure(punct + pynutil.insert(" ")) + token + pynini.closure(pynutil.insert(" ") + punct)
)
graph = token_plus_punct + pynini.closure(delete_extra_space + token_plus_punct)
graph = delete_space + graph + delete_space
self.fst = graph.optimize()
| 46.916667 | 128 | 0.742673 |
4f5f376e6a8bc1b4a5e738245b150f04ca4bb8b3 | 2,178 | py | Python | eng/tox/allowed_pylint_failures.py | JayDoubleu/azure-sdk-for-python | f3760fc8d7ea1b46b0def0628579d36abe75976f | [
"MIT"
] | 2 | 2021-03-24T06:26:11.000Z | 2021-04-18T15:55:59.000Z | eng/tox/allowed_pylint_failures.py | JayDoubleu/azure-sdk-for-python | f3760fc8d7ea1b46b0def0628579d36abe75976f | [
"MIT"
] | 1 | 2021-06-23T14:50:11.000Z | 2021-06-24T12:26:05.000Z | eng/tox/allowed_pylint_failures.py | JayDoubleu/azure-sdk-for-python | f3760fc8d7ea1b46b0def0628579d36abe75976f | [
"MIT"
] | 1 | 2021-12-18T20:01:22.000Z | 2021-12-18T20:01:22.000Z | #!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
PYLINT_ACCEPTABLE_FAILURES = [
"azure-applicationinsights",
"azure-batch",
"azure-cognitiveservices-anomalydetector",
"azure-cognitiveservices-formrecognizer",
"azure-cognitiveservices-knowledge-nspkg",
"azure-cognitiveservices-knowledge-qnamaker",
"azure-cognitiveservices-language-luis",
"azure-cognitiveservices-language-nspkg",
"azure-cognitiveservices-language-spellcheck",
"azure-cognitiveservices-language-textanalytics",
"azure-cognitiveservices-nspkg",
"azure-cognitiveservices-personalizer",
"azure-cognitiveservices-search-autosuggest",
"azure-cognitiveservices-search-customimagesearch",
"azure-cognitiveservices-search-customsearch",
"azure-cognitiveservices-search-entitysearch",
"azure-cognitiveservices-search-imagesearch",
"azure-cognitiveservices-search-newssearch",
"azure-cognitiveservices-search-nspkg",
"azure-cognitiveservices-search-videosearch",
"azure-cognitiveservices-search-visualsearch",
"azure-cognitiveservices-search-websearch",
"azure-cognitiveservices-vision-computervision",
"azure-cognitiveservices-vision-contentmoderator",
"azure-cognitiveservices-vision-customvision",
"azure-cognitiveservices-vision-face",
"azure-cognitiveservices-vision-nspkg",
"azure-common",
"azure-nspkg",
"azure-servicemanagement-legacy",
"azure-graphrbac",
"azure-loganalytics",
"azure-servicefabric",
"azure-template",
"azure-keyvault",
"azure-synapse",
"azure-synapse-artifacts",
"azure-synapse-spark",
"azure-synapse-accesscontrol",
"azure-synapse-monitoring",
"azure-synapse-managedprivateendpoints",
"azure-synapse-nspkg",
"azure-ai-anomalydetector",
"azure-security-attestation",
"azure-iot-deviceupdate",
]
| 39.6 | 94 | 0.679063 |
4f5f6091847a82486609001d8df722c235eec2e0 | 319 | py | Python | jobs.py | Amsterdam/fixxx-cspp-mini-crm-api | 73af7cff8ca577bc13ce5f47713645b9edd6848a | [
"MIT"
] | 1 | 2021-09-29T12:41:14.000Z | 2021-09-29T12:41:14.000Z | jobs.py | Amsterdam/fixxx-cspp-mini-crm-api | 73af7cff8ca577bc13ce5f47713645b9edd6848a | [
"MIT"
] | 1 | 2020-09-23T14:03:28.000Z | 2020-09-23T14:03:28.000Z | jobs.py | Amsterdam/fixxx-cspp-mini-crm-api | 73af7cff8ca577bc13ce5f47713645b9edd6848a | [
"MIT"
] | 1 | 2020-10-22T13:50:45.000Z | 2020-10-22T13:50:45.000Z | from adapters.amsterdam import schools, tags
def run_jobs():
"""
Run the adapters to get data from remote sources.
"""
schools.run()
tags.run()
if __name__ == "__main__":
"""
Placeholder so the jobs script can also be ran from a command line, for cronjobs
"""
run_jobs()
| 18.764706 | 84 | 0.617555 |
4f5d28891e1cc98a1a050bb2eea9450fb7d8cf3d | 1,642 | py | Python | oort/util/templating.py | niklasl/oort.python-oortpub | fbd23a5693f3ce889a9113b4e7dd10fc8ae61281 | [
"BSD-3-Clause"
] | null | null | null | oort/util/templating.py | niklasl/oort.python-oortpub | fbd23a5693f3ce889a9113b4e7dd10fc8ae61281 | [
"BSD-3-Clause"
] | null | null | null | oort/util/templating.py | niklasl/oort.python-oortpub | fbd23a5693f3ce889a9113b4e7dd10fc8ae61281 | [
"BSD-3-Clause"
] | null | null | null | # Based on design in:
# <http://www.turbogears.org/docs/plugins/template.html>
# <http://projects.dowski.com/view/buffet>
import pkg_resources
available_engines = dict([ (entry_point.name, entry_point.load())
for entry_point in pkg_resources.
iter_entry_points('python.templating.engines') ])
class TemplateEngineManager(object):
def __init__(self, extra_vars_func=None, engine_opts=None):
self.extra_vars_func = extra_vars_func
self.engine_opts = engine_opts
self.engines = {}
def get_engine_and_path(self, path, default_engine=None):
if not ':' in path:
dotted_path = path
engine_name = default_engine
else:
engine_name, dotted_path = path.split(':', 1)
engine = self._get_engine(engine_name)
return engine, dotted_path
def render_template(self, path, data, format, fragment=False):
engine, dotted_path = self.get_engine_and_path(path)
result = engine.render(
data, format=format, fragment=fragment, template=dotted_path)
return result
def _get_engine(self, engine_name):
engine = self.engines.get(engine_name)
if not engine:
Engine = available_engines.get(engine_name, None)
if not Engine:
msg = 'No engine for "%s" found. Please install it.'
raise TemplateEngineMissing(msg % (engine_name,))
self.engines[engine_name] = \
engine = Engine(self.extra_vars_func, self.engine_opts)
return engine
class TemplateEngineMissing(Exception):
pass
| 34.93617 | 77 | 0.646163 |
4f5f9d9fe5460d478115cedb232c1268b5ccca14 | 4,722 | py | Python | figure_3/model_II/sobolev_alignment/taylor_feature_attribution.py | saroudant/sobolev_alignment_manuscript | 2b4d7ce4bbdac3a32ad8c02b950b4d1c91cda193 | [
"MIT"
] | null | null | null | figure_3/model_II/sobolev_alignment/taylor_feature_attribution.py | saroudant/sobolev_alignment_manuscript | 2b4d7ce4bbdac3a32ad8c02b950b4d1c91cda193 | [
"MIT"
] | null | null | null | figure_3/model_II/sobolev_alignment/taylor_feature_attribution.py | saroudant/sobolev_alignment_manuscript | 2b4d7ce4bbdac3a32ad8c02b950b4d1c91cda193 | [
"MIT"
] | null | null | null | """
This script performs the feature attribution by Taylor expansion, specifically:
- Loading Sobolev Alignment with VAE trained.
- Sample artificial data form the VAE distribution.
- Train KRR on these artificial data and align the latent factors.
- Using the trained KRR, compute the values of the projection on Hilbert polynomials.
- Save the results.
"""
import os, sys, getopt
import pandas as pd
import numpy as np
from anndata import AnnData
import torch
from pickle import dump, load
from copy import deepcopy
import gc
from sobolev_alignment import SobolevAlignment
# Import params
from model_II_synthetic_params import *
from read_data import read_data
# Import parameters
n_artificial_samples = None
tmp_file = None
n_iter = 1
opts, args = getopt.getopt(sys.argv[1:],'o:d:n:t:j:i:',['output=', 'data=', 'artifsamples=', 'temp=', 'job=', 'iter='])
for opt, arg in opts:
if opt in ("-o", "--output"):
output_folder = str(arg)
elif opt in ("-d", "--data"):
data_subfolder = str(arg)
elif opt in ('-n', '--artifsamples'):
n_artificial_samples = int(arg)
elif opt in ('-t', '--temp'):
tmp_file = str(arg)
elif opt in ('-j', '--job'):
n_jobs = int(arg)
elif opt in ('-i', '--iter'):
n_iter = int(arg)
n_artificial_samples = n_artificial_samples if n_artificial_samples is not None else 10**6
n_artificial_samples = int(n_artificial_samples)
tmp_file = tmp_file if tmp_file is not None else '/tmp/SM/'
###
# IMPORT DATA
###
X_source, X_target = read_data(data_folder, data_subfolder)
gc.collect()
###
# LOAD SOBOLEV ALIGNMENT
###
sobolev_alignment_clf = SobolevAlignment.load('%s/sobolev_alignment_model/'%(output_folder), with_model=True, with_krr=False)
###
# Feature analysis by weight estimation
###
print('START FEATURE ANALYSIS \n\t %s ITERATIONS'%(n_iter), flush=True)
source_krr_params = read_optimal_KRR_param(output_folder, 'source')
target_krr_params = read_optimal_KRR_param(output_folder, 'target')
for iter_idx in range(n_iter):
for nu in [source_krr_params['kernel_params']['nu'], np.inf]:
source_krr_params['kernel_params']['nu'] = nu
target_krr_params['kernel_params']['nu'] = nu
sobolev_alignment_clf.krr_params = {
'source': source_krr_params,
'target': target_krr_params
}
# Make saving folder or pass if already computed
iter_output_folder = '%s/iter_%s_nu_%s/'%(
output_folder,
iter_idx,
'laplacian' if nu == 0.5 else ('gaussian' if nu == np.inf else nu)
)
print('\t\t START %s'%(iter_output_folder))
if iter_output_folder.replace('%s/'%(output_folder), '').replace('/', '') not in os.listdir(output_folder):
os.mkdir(iter_output_folder)
else:
continue
sobolev_alignment_clf.n_jobs = 1
print('\t START ITER %s'%(iter_idx), flush=True)
sobolev_alignment_clf.fit(
X_source=X_source,
X_target=X_target,
source_batch_name=batch_name,
target_batch_name=batch_name,
continuous_covariate_names=continuous_covariate_names,
n_artificial_samples=n_artificial_samples,
fit_vae=False,
sample_artificial=True,
krr_approx=True,
n_samples_per_sample_batch=10**6,
frac_save_artificial=10**4/n_artificial_samples,
save_mmap=tmp_file,
log_input=log_input,
no_posterior_collapse=no_posterior_collapse,
mean_center=mean_center,
unit_std=unit_std,
frob_norm_source=frob_norm_source
)
sobolev_alignment_clf.frob_norm_source = None
sobolev_alignment_clf.save('%s/sobolev_alignment_model/'%(iter_output_folder), with_krr=True, with_model=False)
print('\t START ERROR COMPUTING', flush=True)
krr_approx_error = sobolev_alignment_clf.compute_error(size=10**4)
processed_error_df = {x: process_error_df(df) for x, df in krr_approx_error.items()}
processed_latent_error_df = {x: df[0] for x, df in processed_error_df.items()}
processed_latent_error_df = pd.concat(processed_latent_error_df)
processed_factor_error_df = {x: df[1] for x, df in processed_error_df.items()}
processed_factor_error_df = pd.concat(processed_factor_error_df)
#Save error logs
processed_latent_error_df.to_csv('%s/latent_error.csv'%(iter_output_folder))
processed_factor_error_df.to_csv('%s/factor_error.csv'%(iter_output_folder))
torch.cuda.empty_cache()
gc.collect()
gc.collect()
sys.exit("FINISH ORTHONORMAL BASIS ATTRIBUTION") | 35.238806 | 125 | 0.677679 |
4f5fffa685177412b420aa8d4bbee04051326b00 | 2,474 | py | Python | data/stanza/get_probe_matrix.py | muziyongshixin/pytorch_SSRP | e54b3098927ba2ff16bdc8f64f3a2bf46d1f72c5 | [
"MIT"
] | 1 | 2022-01-11T06:29:01.000Z | 2022-01-11T06:29:01.000Z | data/stanza/get_probe_matrix.py | muziyongshixin/pytorch_SSRP | e54b3098927ba2ff16bdc8f64f3a2bf46d1f72c5 | [
"MIT"
] | null | null | null | data/stanza/get_probe_matrix.py | muziyongshixin/pytorch_SSRP | e54b3098927ba2ff16bdc8f64f3a2bf46d1f72c5 | [
"MIT"
] | null | null | null | import numpy as np
from tqdm import tqdm
import json
import networkx as nx
import pickle
import time
def make_uid(img_id, dset, sent_idx):
return "{}_{}_{}".format(img_id, dset, sent_idx)
def get_one_sent_probe(sent):
G = nx.Graph()
all_edge = []
for edge in sent:
u = edge['id']
v = edge['head_id']
all_edge.append((u, v))
G.add_edges_from(all_edge)
# print(G.number_of_edges())
gen = nx.all_pairs_shortest_path(G)
shortest_path = dict(gen)
probe_size = len(sent)
probe = np.ones((probe_size, probe_size)) * -1
for i in range(probe_size):
for j in range(probe_size):
probe[i][j] = len(shortest_path[i + 1][j + 1]) - 1 # stanza的结果单词从1开始编号
return probe
def generate_probe_matrix(json_path, save_path):
start_time = time.time()
data = json.load(open(json_path))
all_result = {}
for img_id, img_sample in tqdm(data.items()):
raw_sent_num = len(img_sample) - 10
img_result = {}
for i, sent in enumerate(img_sample):
if i < raw_sent_num:
sent_cat = 'mscoco'
sent_idx = i
else:
sent_cat = 'mscoco_rephrase'
raw_idx = (i - raw_sent_num) // 2
j = (i - raw_sent_num) & 1
sent_idx = '{}_{}'.format(raw_idx, j)
key = make_uid(img_id, sent_cat, sent_idx)
probe_matrix = get_one_sent_probe(sent)
img_result[key] = probe_matrix
all_result[img_id] = img_result
pickle.dump(all_result, open(save_path, 'wb'))
print('save probe matrix data to {}, total data number is {}, using time is {}'.format(save_path, len(all_result),
time.time() - start_time))
json_path='/m/liyz/lxmert/data/probe/mscoco_minival_prob.json'
save_path='/m/liyz/lxmert/data/probe/mscoco_minival_prob_matrix.pickle'
nominival_json_path='/m/liyz/lxmert/data/probe/mscoco_nominival_prob.json'
nominival_save_path='/m/liyz/lxmert/data/probe/mscoco_nominival_prob_matrix.pickle'
generate_probe_matrix(nominival_json_path,nominival_save_path)
trian_json_path='/m/liyz/lxmert/data/probe/mscoco_train_prob.json'
train_save_path='/m/liyz/lxmert/data/probe/mscoco_train_prob_matrix.pickle'
generate_probe_matrix(trian_json_path,train_save_path) | 34.84507 | 119 | 0.616815 |
4f5da3a921f9df3e4fc752b077f2bd0fe40ef529 | 4,553 | py | Python | cellphonedb/src/tests/methods/test_terminal_method_statistical_analysis.py | jshilts/cellphonedb | 866e23b7785f04a609654de5c3e7621ffeb46c0f | [
"MIT"
] | null | null | null | cellphonedb/src/tests/methods/test_terminal_method_statistical_analysis.py | jshilts/cellphonedb | 866e23b7785f04a609654de5c3e7621ffeb46c0f | [
"MIT"
] | null | null | null | cellphonedb/src/tests/methods/test_terminal_method_statistical_analysis.py | jshilts/cellphonedb | 866e23b7785f04a609654de5c3e7621ffeb46c0f | [
"MIT"
] | null | null | null | import os
import pandas as pd
from cellphonedb.src.app.flask.flask_app import create_app
from cellphonedb.src.app.cellphonedb_app import output_test_dir, data_test_dir, cellphonedb_app
from cellphonedb.src.local_launchers.local_method_launcher import LocalMethodLauncher
from cellphonedb.src.tests.cellphone_flask_test_case import CellphoneFlaskTestCase
from cellphonedb.utils import dataframe_functions
class TestTerminalMethodStatisticalAnalysis(CellphoneFlaskTestCase):
def create_app(self):
return create_app(raise_non_defined_vars=False, verbose=False)
def test_test_data(self):
iterations = '10'
data = 'test'
debug_seed = '0'
project_name = 'test_data'
threshold = 0.1
self._method_call(data, iterations, project_name, threshold, debug_seed)
def _method_call(self, data: str, iterations: str, project_name: str, threshold: float, debug_seed: str):
result_means_filename = self._get_result_filename('means', data, iterations)
result_pvalues_filename = self._get_result_filename('pvalues', data, iterations)
result_significant_means_filename = self._get_result_filename('significant_means', data, iterations)
result_pvalues_means_filename = self._get_result_filename('pvalues_means', data, iterations)
result_deconvoluted_filename = self._get_result_filename('deconvoluted', data, iterations)
meta_filename = os.path.realpath('{}/hi_{}_meta.txt'.format(data_test_dir, data))
counts_filename = os.path.realpath('{}/hi_{}_counts.txt'.format(data_test_dir, data))
LocalMethodLauncher(cellphonedb_app.cellphonedb).cpdb_statistical_analysis_local_method_launcher(meta_filename,
counts_filename,
project_name,
iterations,
threshold,
output_test_dir,
result_means_filename,
result_pvalues_filename,
result_significant_means_filename,
result_pvalues_means_filename,
result_deconvoluted_filename,
debug_seed)
self._assert_result('means', data, iterations, project_name, result_means_filename)
self._assert_result('pvalues', data, iterations, project_name, result_pvalues_filename)
self._assert_result('significant_means', data, iterations, project_name, result_significant_means_filename)
self._assert_result('pvalues_means', data, iterations, project_name, result_pvalues_means_filename)
self._assert_result('deconvoluted', data, iterations, project_name, result_deconvoluted_filename)
def _assert_result(self, namefile: str, data: str, iterations: str, project_name: str,
result_means_filename: str) -> None:
means_test_filename = 'hi_{}_result__data-{}_it-{}.txt'.format(namefile, data, iterations)
original_means = pd.read_table(os.path.realpath('{}/{}'.format(data_test_dir, means_test_filename)))
result_means = pd.read_table('{}/{}/{}'.format(output_test_dir, project_name, result_means_filename))
self.assertTrue(dataframe_functions.dataframes_has_same_data(result_means, original_means))
self.remove_file('{}/{}/{}'.format(output_test_dir, project_name, result_means_filename))
def _get_result_filename(self, base_name, data: str, iterations: str) -> str:
base_filename = '{}__data-{}_it-{}'.format(base_name, data, iterations)
random_filename = self.get_test_filename(base_filename, 'txt')
return random_filename
| 68.984848 | 139 | 0.570832 |
4f5f5861c923beda0117e9ddb69989061e0ac988 | 11,546 | py | Python | pmaf/pipe/agents/miners/_miner.py | mmtechslv/PhyloMAF | bab43dd4a4d2812951b1fdf4f1abb83edb79ea88 | [
"BSD-3-Clause"
] | 1 | 2021-07-02T06:24:17.000Z | 2021-07-02T06:24:17.000Z | pmaf/pipe/agents/miners/_miner.py | mmtechslv/PhyloMAF | bab43dd4a4d2812951b1fdf4f1abb83edb79ea88 | [
"BSD-3-Clause"
] | 1 | 2021-06-28T12:02:46.000Z | 2021-06-28T12:02:46.000Z | pmaf/pipe/agents/miners/_miner.py | mmtechslv/PhyloMAF | bab43dd4a4d2812951b1fdf4f1abb83edb79ea88 | [
"BSD-3-Clause"
] | null | null | null | from ._base import MinerBase
from pmaf.pipe.agents.mediators._metakit import (
MediatorAccessionMetabase,
MediatorTaxonomyMetabase,
MediatorSequenceMetabase,
MediatorPhylogenyMetabase,
)
from pmaf.pipe.agents.dockers._metakit import (
DockerAccessionMetabase,
DockerIdentifierMetabase,
DockerPhylogenyMetabase,
DockerTaxonomyMetabase,
DockerSequenceMetabase,
)
from pmaf.pipe.agents.dockers._mediums._id_medium import DockerIdentifierMedium
from pmaf.pipe.agents.dockers._mediums._acs_medium import DockerAccessionMedium
from pmaf.pipe.agents.dockers._mediums._phy_medium import DockerPhylogenyMedium
from pmaf.pipe.agents.dockers._mediums._seq_medium import DockerSequenceMedium
from pmaf.pipe.agents.dockers._mediums._tax_medium import DockerTaxonomyMedium
from collections import defaultdict
from pmaf.internal._shared import chunk_generator
from typing import Any, Generator
class Miner(MinerBase):
"""Main class that perform data mining operations in :mod:`~pmaf.pipe`
module."""
def yield_taxonomy_by_identifier(
self, docker: DockerIdentifierMedium, **kwargs: Any
) -> Generator[DockerTaxonomyMedium, None, None]:
"""Yields the next taxonomy :term:`docker` from the given identifier
parameter `docker`
Parameters
----------
docker
The input identifier :term:`docker`
kwargs
Compatibility
Yields
------
Taxonomy :term:`docker`
"""
if isinstance(docker, DockerIdentifierMetabase):
if isinstance(self.mediator, MediatorTaxonomyMetabase):
yield from self.__yield_taxonomy_by_identifier(docker, **kwargs)
else:
raise RuntimeError("`mediator` does not support such request.")
else:
raise TypeError("`docker` must be instance of DockerIdentifierMetabase.")
def yield_phylogeny_by_identifier(
self, docker: DockerIdentifierMedium, **kwargs: Any
) -> DockerPhylogenyMedium:
"""Yield the next phylogeny :term:`docker` from given identifier
parameter `docker`
Parameters
----------
docker
The input identifier :term:`docker`
kwargs
Compatibility
Yields
------
Phylogeny :term:`docker`
"""
if isinstance(docker, DockerIdentifierMetabase):
if isinstance(self.mediator, MediatorPhylogenyMetabase):
yield from self.__yield_phylogeny_by_identifier(docker, **kwargs)
else:
raise RuntimeError("`mediator` does not support such request.")
else:
raise TypeError("`docker` must be instance of DockerIdentifierMetabase.")
def yield_sequence_by_identifier(
self, docker: DockerIdentifierMedium, **kwargs: Any
) -> DockerSequenceMedium:
"""Yield the next sequence :term:`docker` from given identifier
parameter `docker`
Parameters
----------
docker
The input identifier :term:`docker`
kwargs
Compatibility
Yields
------
Sequence :term:`docker`
"""
if isinstance(docker, DockerIdentifierMetabase):
if isinstance(self.mediator, MediatorSequenceMetabase):
yield from self.__yield_sequence_by_identifier(docker, **kwargs)
else:
raise RuntimeError("`mediator` does not support such request.")
else:
raise TypeError("`docker` must be instance of DockerIdentifierMetabase.")
def yield_accession_by_identifier(
self, docker: DockerIdentifierMedium, **kwargs: Any
) -> DockerAccessionMedium:
"""Yield the next accession :term:`docker` from given identifier
parameter `docker`
Parameters
----------
docker
The input identifier :term:`docker`
kwargs
Compatibility
Yields
------
Accession :term:`docker`
"""
if isinstance(docker, DockerIdentifierMetabase):
if isinstance(self.mediator, MediatorAccessionMetabase):
yield from self.__yield_accession_by_identifier(docker, **kwargs)
else:
raise RuntimeError("`mediator` does not support such request.")
else:
raise TypeError("`docker` must be instance of DockerIdentifierMetabase.")
def yield_identifier_by_docker(self, docker, **kwargs):
"""Yield the next identifier :term:`docker` from any given parameter
`docker`. This method will automatically decide which result to produce
depending on assigned :term:`mediator` instance.
Parameters
----------
docker
The input :term:`docker`
kwargs
Compatibility
Yields
------
Any type of :term:`docker` depending on assigned :term:`mediator`
"""
if self.verify_docker(docker):
if isinstance(docker, DockerAccessionMetabase):
yield from self.__yield_identifier_by_accession(docker, **kwargs)
elif isinstance(docker, DockerPhylogenyMetabase):
yield from self.__yield_identifier_by_phylogeny(docker, **kwargs)
elif isinstance(docker, DockerTaxonomyMetabase):
yield from self.__yield_identifier_by_taxonomy(docker, **kwargs)
elif isinstance(docker, DockerSequenceMetabase):
yield from self.__yield_identifier_by_sequence(docker, **kwargs)
elif isinstance(docker, DockerIdentifierMetabase):
yield from iter([docker])
else:
raise RuntimeError
else:
raise TypeError("`docker` must be instance of DockerIdentifierMetabase.")
def __yield_accession_by_identifier(self, docker, chunksize=None, **kwargs):
return self.__process_chunks_by_docker_for_method(
self.mediator.get_accession_by_identifier,
docker,
DockerAccessionMedium,
chunksize=chunksize,
factor=self.factor,
**kwargs
)
def __yield_taxonomy_by_identifier(self, docker, chunksize=None, **kwargs):
return self.__process_chunks_by_docker_for_method(
self.mediator.get_taxonomy_by_identifier,
docker,
DockerTaxonomyMedium,
chunksize=chunksize,
factor=self.factor,
**kwargs
)
def __yield_sequence_by_identifier(self, docker, chunksize=None, **kwargs):
return self.__process_chunks_by_docker_for_method(
self.mediator.get_sequence_by_identifier,
docker,
DockerSequenceMedium,
chunksize=chunksize,
factor=self.factor,
**kwargs
)
def __yield_phylogeny_by_identifier(self, docker, chunksize=None, **kwargs):
return self.__process_chunks_by_docker_for_method(
self.mediator.get_phylogeny_by_identifier,
docker,
DockerPhylogenyMedium,
chunksize=chunksize,
factor=self.factor,
**kwargs
)
def __yield_identifier_by_accession(self, docker, chunksize=None, **kwargs):
return self.__process_chunks_by_docker_for_method(
self.mediator.get_identifier_by_accession,
docker,
DockerIdentifierMedium,
chunksize=chunksize,
factor=self.factor,
**kwargs
)
def __yield_identifier_by_taxonomy(self, docker, chunksize=None, **kwargs):
return self.__process_chunks_by_docker_for_method(
self.mediator.get_identifier_by_taxonomy,
docker,
DockerIdentifierMedium,
chunksize=chunksize,
factor=self.factor,
**kwargs
)
def __yield_identifier_by_sequence(self, docker, chunksize=None, **kwargs):
return self.__process_chunks_by_docker_for_method(
self.mediator.get_identifier_by_sequence,
docker,
DockerIdentifierMedium,
chunksize=chunksize,
factor=self.factor,
**kwargs
)
def __yield_identifier_by_phylogeny(self, docker, chunksize=None, **kwargs):
return self.__process_chunks_by_docker_for_method(
self.mediator.get_identifier_by_phylogeny,
docker,
DockerIdentifierMedium,
chunksize=chunksize,
factor=self.factor,
**kwargs
)
def __process_chunks_by_docker_for_method(
self, method, docker, outlet, chunksize=None, **kwargs
):
"""Method to mine dockers in chunks."""
if docker.singleton:
return iter([method(docker, **kwargs)])
else:
if chunksize is not None:
tmp_chunks_gen = chunk_generator(docker.get_iterator(), chunksize)
tmp_docker_name = docker.name
tmp_docker_metadata = docker.metadata
tmp_docker_type = type(docker)
tmp_outlet = outlet
def chunk_products(**kwargs):
chunk_i = 0
for tmp_chunk in tmp_chunks_gen:
chunk_name = (
str(chunk_i)
if tmp_docker_name is None
else "{}-{}".format(tmp_docker_name, str(chunk_i))
)
tmp_chunk_dockers = {name: docker for name, docker in tmp_chunk}
tmp_docker_container = tmp_docker_type(
tmp_chunk_dockers,
name=chunk_name,
metadata=tmp_docker_metadata,
)
yield self.__process_recursive_by_docker_for_method(
method, tmp_docker_container, tmp_outlet, **kwargs
)
chunk_i += 1
return chunk_products(**kwargs)
else:
return iter(
[
self.__process_recursive_by_docker_for_method(
method, docker, outlet, **kwargs
)
]
)
def __process_recursive_by_docker_for_method(
self, method, docker_container, outlet, **kwargs
):
"""Method to mine non-singleton dockers recursively."""
def nested_parser(docker):
if docker.singleton:
return method(docker, **kwargs)
else:
product_dict = defaultdict(None)
product_metadata = defaultdict(dict)
for ix, sub_docker in docker.get_iterator():
tmp_parsed_docker = nested_parser(sub_docker)
product_dict[ix] = tmp_parsed_docker
product_metadata[ix] = tmp_parsed_docker.metadata
new_metadata = {
"verbose": dict(product_metadata),
"master": docker.wrap_meta(),
}
return outlet(
product_dict,
name=docker.name,
metadata=new_metadata,
_transit=docker_container,
)
return nested_parser(docker_container)
| 36.653968 | 88 | 0.596917 |
4f5fdb2dfe93659c1050cf200089b49db17cfc5e | 927 | py | Python | tests/modules/masked_layer_norm_test.py | jbrry/allennlp | d906175d953bebcc177567ec0157220c3bd1b9ad | [
"Apache-2.0"
] | 11,433 | 2017-06-27T03:08:46.000Z | 2022-03-31T18:14:33.000Z | tests/modules/masked_layer_norm_test.py | jbrry/allennlp | d906175d953bebcc177567ec0157220c3bd1b9ad | [
"Apache-2.0"
] | 4,006 | 2017-06-26T21:45:43.000Z | 2022-03-31T02:11:10.000Z | tests/modules/masked_layer_norm_test.py | jbrry/allennlp | d906175d953bebcc177567ec0157220c3bd1b9ad | [
"Apache-2.0"
] | 2,560 | 2017-06-26T21:16:53.000Z | 2022-03-30T07:55:46.000Z | import numpy as np
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.masked_layer_norm import MaskedLayerNorm
from allennlp.nn import util
class TestMaskedLayerNorm(AllenNlpTestCase):
def test_masked_layer_norm(self):
x_n = np.random.rand(2, 3, 7)
mask_n = np.array([[1, 1, 0], [1, 1, 1]])
x = torch.from_numpy(x_n).float()
mask = torch.from_numpy(mask_n).bool()
layer_norm = MaskedLayerNorm(7, gamma0=0.2)
normed_x = layer_norm(x, mask)
N = 7 * 5
mean = (x_n * np.expand_dims(mask_n, axis=-1)).sum() / N
std = np.sqrt(
(((x_n - mean) * np.expand_dims(mask_n, axis=-1)) ** 2).sum() / N
+ util.tiny_value_of_dtype(torch.float)
)
expected = 0.2 * (x_n - mean) / (std + util.tiny_value_of_dtype(torch.float))
assert np.allclose(normed_x.data.numpy(), expected)
| 31.965517 | 85 | 0.625674 |
4f5f2141dcff18fcd57c67e301c2fae8b25492bc | 1,384 | py | Python | lib/google/cloud/obsolete.py | Blue-IT-Marketing/blue-it-marketing | 51500378ba3b00acc1232fe1893ceeca10e9a491 | [
"MIT"
] | 2 | 2018-02-01T06:30:24.000Z | 2018-04-12T15:39:56.000Z | lib/google/cloud/obsolete.py | Blue-IT-Marketing/blue-it-marketing | 51500378ba3b00acc1232fe1893ceeca10e9a491 | [
"MIT"
] | 2 | 2021-02-08T20:18:59.000Z | 2021-04-30T20:35:11.000Z | lib/google/cloud/obsolete.py | Blue-IT-Marketing/blue-it-marketing | 51500378ba3b00acc1232fe1893ceeca10e9a491 | [
"MIT"
] | 1 | 2018-09-19T05:55:27.000Z | 2018-09-19T05:55:27.000Z | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for deprecated code and modules."""
import warnings
import pkg_resources
def complain(distribution_name):
"""Issue a warning if `distribution_name` is installed.
In a future release, this method will be updated to raise ImportError
rather than just send a warning.
Args:
distribution_name (str): The name of the obsolete distribution.
"""
try:
pkg_resources.get_distribution(distribution_name)
warnings.warn(
'The {pkg} distribution is now obsolete. '
'Please `pip uninstall {pkg}`. '
'In the future, this warning will become an ImportError.'.format(
pkg=distribution_name,
),
DeprecationWarning,
)
except pkg_resources.DistributionNotFound:
pass
| 32.186047 | 77 | 0.693642 |
4f5faf194ac6ca67b02a6bac3ee09d97e50fd479 | 5,004 | py | Python | tfx/components/trainer/rewriting/converters.py | joshtemple/tfx | 169519c8149534294f1f9bc39a6597d6a3fcefb2 | [
"Apache-2.0"
] | 1 | 2020-11-24T16:59:37.000Z | 2020-11-24T16:59:37.000Z | tfx/components/trainer/rewriting/converters.py | joshtemple/tfx | 169519c8149534294f1f9bc39a6597d6a3fcefb2 | [
"Apache-2.0"
] | null | null | null | tfx/components/trainer/rewriting/converters.py | joshtemple/tfx | 169519c8149534294f1f9bc39a6597d6a3fcefb2 | [
"Apache-2.0"
] | null | null | null | # Lint as: python2, python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converters rewrite models using the provided rewriters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from typing import Text
import tensorflow.compat.v1 as tf
from tfx.components.trainer.rewriting import rewriter
def _invoke_rewriter(src: Text, dst: Text, rewriter_inst: rewriter.BaseRewriter,
src_model_type: rewriter.ModelType,
dst_model_type: rewriter.ModelType):
"""Converts the provided model by invoking the specified rewriters.
Args:
src: Path to the source model.
dst: Path where the destination model is to be written.
rewriter_inst: instance of the rewriter to invoke.
src_model_type: the `rewriter.ModelType` of the source model.
dst_model_type: the `rewriter.ModelType` of the destination model.
Raises:
ValueError: if the source path is the same as the destination path.
"""
if src == dst:
raise ValueError('Source path and destination path cannot match.')
original_model = rewriter.ModelDescription(src_model_type, src)
rewritten_model = rewriter.ModelDescription(dst_model_type, dst)
rewriter_inst.perform_rewrite(original_model, rewritten_model)
class RewritingExporter(tf.estimator.Exporter):
"""This class invokes the base exporter and a series of rewriters."""
def __init__(self, base_exporter: tf.estimator.Exporter,
rewriter_inst: rewriter.BaseRewriter):
"""Initializes the rewriting exporter.
Args:
base_exporter: The exporter of the original model.
rewriter_inst: The rewriter instance to invoke. Must inherit from
`rewriter.BaseRewriter`.
"""
self._base_exporter = base_exporter
self._rewriter_inst = rewriter_inst
@property
def name(self):
"""Name of the exporter."""
return self._base_exporter.name
def export(self, estimator, export_path, checkpoint_path, eval_result,
is_the_final_export):
"""Exports the given `Estimator` to a specific format.
Performs the export as defined by the base_exporter and invokes all of the
specified rewriters.
Args:
estimator: the `Estimator` to export.
export_path: A string containing a directory where to write the export.
checkpoint_path: The checkpoint path to export.
eval_result: The output of `Estimator.evaluate` on this checkpoint.
is_the_final_export: This boolean is True when this is an export in the
end of training. It is False for the intermediate exports during the
training. When passing `Exporter` to `tf.estimator.train_and_evaluate`
`is_the_final_export` is always False if `TrainSpec.max_steps` is
`None`.
Returns:
The string path to the base exported directory or `None` if export is
skipped.
Raises:
RuntimeError: Unable to create a temporary rewrite directory.
"""
base_path = self._base_exporter.export(estimator, export_path,
checkpoint_path, eval_result,
is_the_final_export)
if not base_path:
return None
tmp_rewrite_folder = 'tmp-rewrite-' + str(int(time.time()))
tmp_rewrite_path = os.path.join(export_path, tmp_rewrite_folder)
if tf.io.gfile.exists(tmp_rewrite_path):
raise RuntimeError('Unable to create a unique temporary rewrite path.')
tf.io.gfile.makedirs(tmp_rewrite_path)
_invoke_rewriter(base_path, tmp_rewrite_path, self._rewriter_inst,
rewriter.ModelType.SAVED_MODEL,
rewriter.ModelType.ANY_MODEL)
tf.io.gfile.rmtree(base_path)
tf.io.gfile.rename(tmp_rewrite_path, base_path)
return base_path
def rewrite_saved_model(
src: Text,
dst: Text,
rewriter_inst: rewriter.BaseRewriter,
dst_model_type: rewriter.ModelType = rewriter.ModelType.SAVED_MODEL):
"""Rewrites the provided SavedModel.
Args:
src: location of the saved_model to rewrite.
dst: location of the rewritten saved_model.
rewriter_inst: the rewriter instance to invoke. Must inherit from
`rewriter.BaseRewriter`.
dst_model_type: the `rewriter.ModelType` of the destination model.
"""
_invoke_rewriter(src, dst, rewriter_inst, rewriter.ModelType.SAVED_MODEL,
dst_model_type)
| 36.525547 | 80 | 0.718225 |
4f607351c5ee15f809b0de6469b70ba046c5d96f | 14,321 | py | Python | checkraid.py | edelwi/zabbix-lsi-status | 696fbbc8498c81aa2aa1427f4ccd6724a93a00c9 | [
"Apache-2.0"
] | null | null | null | checkraid.py | edelwi/zabbix-lsi-status | 696fbbc8498c81aa2aa1427f4ccd6724a93a00c9 | [
"Apache-2.0"
] | null | null | null | checkraid.py | edelwi/zabbix-lsi-status | 696fbbc8498c81aa2aa1427f4ccd6724a93a00c9 | [
"Apache-2.0"
] | null | null | null | #!/bin/env python3
import re
import subprocess
import unicodedata
from os import remove, listdir, makedirs
from os.path import exists, join, isfile
LLD_JSON_PATH = '/var/local/lsi_zabbix'
LLD_METRICS_PATH = '/var/local/lsi_zabbix/metrics' # Be careful all files in this directory deleted automatically.
LLD_CONTROLLERS = 'controllers.json'
LLD_ARRAYS = 'arrays.json'
LLD_DISKS = 'disks.json'
MEGACLI = '/usr/bin/MegaCli64' # full path to MegaCli64
ADAPTER_INFO_CLI = [MEGACLI, '-AdpAllinfo', '-aALL']
DRIVES_INFO_CLI = [MEGACLI, '-LdPdInfo', '-aALL']
ARRAY_NAME_PATT = re.compile(r'(?P<array_name>Virtual Drive\s+#\d{1,2})')
PD_NAME_PATT = re.compile(r'(?P<pd_name>Physical Drive\s+#\d{1,2})')
ADAPTER_PATT = re.compile(r'^(?P<adp_name>Adapter\s+#(?P<adp_num>\d{1,2}))$')
VIRTUAL_DRIVE_PATT = re.compile(r'^Virtual Drive:\s+(?P<vd_id>\d{1,2})\s+\(Target Id:\s+(?P<tgt_id>\d{1,2})\)$')
PHYSICAL_DRIVE_PATT = re.compile(r'PD:\s+(?P<pd_id>\d{1,2})\s+Information$')
PARAM_PATT = re.compile(r'^\s*(?P<key>[^:]+)\s*:\s*(?P<value>.*)$')
# Data processing pipe.
def pipe(data, *fseq):
for fn in fseq:
data = fn(data)
return data
def debug(data):
print(data)
return data
def run_command(cli: list) -> str:
"""Function for running command and get detailed output about controllers."""
p = subprocess.Popen(
cli,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(output, err) = p.communicate()
if err:
raise ValueError(f'hpsa call error {err}')
return output
def create_dir(full_dir_path):
if not exists(full_dir_path):
makedirs(full_dir_path)
def remove_file(full_file_name):
if exists(full_file_name):
remove(full_file_name)
def remove_all_metrics_files(data):
"""Function to remove all files in LLD_METRICS_PATH directory."""
if exists(LLD_METRICS_PATH):
[remove(join(LLD_METRICS_PATH, _)) for _ in listdir(LLD_METRICS_PATH) if isfile(join(LLD_METRICS_PATH, _))]
return data
# from https://stackoverflow.com/questions/4814040/allowed-characters-in-filename
def clean_name(name, replace_space_with=None):
"""
Remove invalid file name chars from the specified name
:param name: the file name
:param replace_space_with: if not none replace space with this string
:return: a valid name for Win/Mac/Linux
"""
# ref: https://en.wikipedia.org/wiki/Filename
# ref: https://stackoverflow.com/questions/4814040/allowed-characters-in-filename
# No control chars, no: /, \, ?, %, *, :, |, ", <, >
# remove control chars
name = ''.join(ch for ch in name if unicodedata.category(ch)[0] != 'C')
cleaned_name = re.sub(r'[/\\?%*:|"<>]', '_', name)
if replace_space_with is not None:
return cleaned_name.replace(' ', replace_space_with)
return cleaned_name
def convert_to_dict(stdout: str):
lines = stdout.decode('utf-8').split("\n")
lines = list(filter(None, lines))
info_dict = _split_by_adapters(lines)
return info_dict
def convert_drive_info_to_dict(stdout: str):
lines = stdout.decode('utf-8').split("\n")
lines = list(filter(None, lines))
info_dict = _split_drive_info_by_adapters(lines)
return info_dict
def _split_by_params(params: list) -> dict:
params_dict = {}
for item in params:
if isinstance(item, str):
match = re.search(PARAM_PATT, item)
if match:
param_key = match.groupdict()['key'].strip()
param_value = match.groupdict()['value'].strip()
if param_key != 'Port' and param_value != 'Address':
params_dict[param_key] = param_value
return params_dict
def _split_by_sections(proto_section: list):
sections = {}
patams_delimiter = ' ================'
indexes = [i for i, e in enumerate(proto_section) if e == patams_delimiter]
start_index = 0
for index in indexes:
if start_index == 0:
start_index = index
continue
section_name = str(proto_section[start_index - 1]).strip(' :')
sections[section_name] = _split_by_params(proto_section[start_index + 1:index])
start_index = index
else:
index = len(proto_section) - 1
section_name = str(proto_section[start_index - 1]).strip()
sections[section_name] = _split_by_params(proto_section[start_index + 1:index])
return sections
def _split_by_adapters(lines: list):
adapters = {}
adapter_delimiter = '=' * 78
indexes = [i for i, e in enumerate(lines) if e == adapter_delimiter]
start_index = 0
for index in indexes:
if start_index == 0:
start_index = index
continue
adapter_name = lines[start_index - 1].strip()
adapters[adapter_name] = _split_by_sections(lines[start_index + 1:index])
start_index = index
else:
index = len(lines) - 1
adapter_name = lines[start_index - 1].strip()
adapters[adapter_name] = _split_by_sections(lines[start_index + 1:index])
return adapters
def _split_drive_info_by_adapters(lines: list):
adapters = {}
indexes = [i for i, e in enumerate(lines) if re.search(ADAPTER_PATT, e)]
start_index = 0
for index in indexes:
if start_index == 0:
start_index = index
adapters = _split_by_params(lines[0:start_index])
continue
adapters[lines[start_index]] = _split_drive_info_by_vd(lines[start_index + 1:index])
start_index = index
else:
index = len(lines) - 1
adapters[lines[start_index]] = _split_drive_info_by_vd(lines[start_index + 1:index])
return adapters
def _split_drive_info_by_vd(lines: list):
virt_drives = {}
indexes = [i for i, e in enumerate(lines) if re.search(VIRTUAL_DRIVE_PATT, e)]
start_index = 0
for index in indexes:
if start_index == 0:
start_index = index
virt_drives = _split_by_params(lines[0:start_index])
continue
match = re.search(VIRTUAL_DRIVE_PATT, lines[start_index])
if match:
vd_name = f'Virtual Drive #{match.groupdict()["vd_id"]}'
virt_drives[vd_name] = _split_drive_info_by_pd(lines[start_index + 1:index])
start_index = index
else:
index = len(lines)
match = re.search(VIRTUAL_DRIVE_PATT, lines[start_index])
if match:
vd_name = f'Virtual Drive #{match.groupdict()["vd_id"]}'
virt_drives[vd_name] = _split_drive_info_by_pd(lines[start_index + 1:index])
return virt_drives
def _split_drive_info_by_pd(lines: list):
physical_drives = {}
indexes = [i for i, e in enumerate(lines) if re.search(PHYSICAL_DRIVE_PATT, e)]
start_index = 0
for index in indexes:
if start_index == 0:
start_index = index
physical_drives = _split_by_params(lines[0:start_index])
continue
match = re.search(PHYSICAL_DRIVE_PATT, lines[start_index])
if match:
pd_name = f'Physical Drive #{match.groupdict()["pd_id"]}'
physical_drives[pd_name] = _split_by_params(lines[start_index + 1:index])
start_index = index
else:
index = len(lines)
match = re.search(PHYSICAL_DRIVE_PATT, lines[start_index])
if match:
pd_name = f'Physical Drive #{match.groupdict()["pd_id"]}'
physical_drives[pd_name] = _split_by_params(lines[start_index + 1:index])
return physical_drives
def pretty_print(info_dict, level=0):
"""Recursive function for printing dictionary with raid detailed information."""
indent = ' ' * 4
current_level = level
for k, v in info_dict.items():
if isinstance(v, str) or v is None:
print(f"{indent * current_level}{k}: {v}")
else:
print()
print(f"{indent * current_level}{k}:")
pretty_print(v, level=current_level + 1)
def lld_discovery_controllers(data):
"""Function for create LLD json with information about controllers."""
file_ = LLD_CONTROLLERS
discovery_file = join(LLD_JSON_PATH, file_)
create_dir(LLD_JSON_PATH)
remove_file(discovery_file)
controllers = data.keys()
json_data = ''
for item in controllers:
json_data = json_data + f'{{"{{#CTRLNAME}}":"{clean_name(item)}"}},'
json_data = json_data[:-1]
with open(discovery_file, 'w') as fl:
print(f'{{"data":[{json_data}]}}', file=fl)
return data
def lld_discovery_arrays(data):
"""Function for create LLD json with information about RAID arrays."""
file_ = LLD_ARRAYS
discovery_file = join(LLD_JSON_PATH, file_)
create_dir(LLD_JSON_PATH)
remove_file(discovery_file)
json_data = ''
for ctrl, ctrl_value in data.items():
if isinstance(ctrl_value, dict):
for ar_key, ar_value in ctrl_value.items():
match = re.search(ARRAY_NAME_PATT, ar_key)
if match:
ar_name = match.groupdict()['array_name']
json_data = json_data + f'{{"{{#CTRLNAME}}":"{clean_name(ctrl)}","' \
f'{{#ARRAYNAME}}":"{clean_name(ar_name)}"}},'
# else:
# print(ar_key)
json_data = json_data[:-1]
with open(discovery_file, 'w') as fl:
print(f'{{"data":[{json_data}]}}', file=fl)
return data
def lld_discovery_pds(data):
"""Function for create LLD json with information about RAID physical disks."""
file_ = LLD_DISKS
discovery_file = join(LLD_JSON_PATH, file_)
create_dir(LLD_JSON_PATH)
remove_file(discovery_file)
json_data = ''
for ctrl, ctrl_value in data.items():
if isinstance(ctrl_value, dict):
for ar_key, ar_value in ctrl_value.items():
match = re.search(ARRAY_NAME_PATT, ar_key)
if match:
ar_name = match.groupdict()['array_name']
for pd_key, pd_value in ar_value.items():
match2 = re.search(PD_NAME_PATT, pd_key)
if match2:
pd_name = match2.groupdict()['pd_name']
json_data = json_data + f'{{"{{#CTRLNAME}}":"{clean_name(ctrl)}","{{#ARRAYNAME}}":"' \
f'{clean_name(ar_name)}","{{#PDNAME}}":"{clean_name(pd_name)}"}},'
json_data = json_data[:-1]
with open(discovery_file, 'w') as fl:
print(f'{{"data":[{json_data}]}}', file=fl)
return data
def get_ctrl_metrics(data):
"""Function for create controllers metrics files."""
create_dir(LLD_METRICS_PATH)
for ctrl, ctrl_value in data.items():
file_name = clean_name(ctrl)
full_file_name = join(LLD_METRICS_PATH, file_name)
with open(full_file_name, 'w') as fl:
if isinstance(ctrl_value, dict):
for metric, value in ctrl_value.items():
if isinstance(value, str):
print(f"{metric}={value}", file=fl)
return data
def get_ctrl_sub_metrics(data):
"""Function for create controllers metrics files."""
create_dir(LLD_METRICS_PATH)
for ctrl, ctrl_value in data.items():
file_name = clean_name(ctrl)
full_file_name = join(LLD_METRICS_PATH, file_name)
with open(full_file_name, 'w') as fl:
if isinstance(ctrl_value, dict):
for ctl_cat, ctl_cat_val in ctrl_value.items():
if isinstance(ctl_cat_val, dict):
for metric, value in ctl_cat_val.items():
if isinstance(value, str):
print(f"{metric}={value}", file=fl)
return data
def get_array_metrics(data):
"""Function for create RAID arrays metrics files."""
create_dir(LLD_METRICS_PATH)
for ctrl, ctrl_value in data.items():
if isinstance(ctrl_value, dict):
for ar_key, ar_value in ctrl_value.items():
match = re.search(ARRAY_NAME_PATT, ar_key)
if match:
ar_name = match.groupdict()['array_name']
file_name = clean_name(f"{ctrl}__{ar_name}")
full_file_name = join(LLD_METRICS_PATH, file_name)
with open(full_file_name, 'w') as fl:
if isinstance(ar_value, dict):
for metric, value in ar_value.items():
if isinstance(value, str):
print(f"{metric}={value}", file=fl)
return data
def get_pd_metrics(data):
"""Function for create physical disks metrics files."""
create_dir(LLD_METRICS_PATH)
for ctrl, ctrl_value in data.items():
if isinstance(ctrl_value, dict):
for ar_key, ar_value in ctrl_value.items():
match = re.search(ARRAY_NAME_PATT, ar_key)
if match:
ar_name = match.groupdict()['array_name']
for pd_key, pd_value in ar_value.items():
match2 = re.search(PD_NAME_PATT, pd_key)
if match2:
pd_name = match2.groupdict()['pd_name']
file_name = clean_name(f"{ctrl}__{ar_name}__{pd_name}")
full_file_name = join(LLD_METRICS_PATH, file_name)
with open(full_file_name, 'w') as fl:
if isinstance(pd_value, dict):
for metric, value in pd_value.items():
if isinstance(value, str):
print(f"{metric}={value}", file=fl)
return data
if __name__ == '__main__':
pipe(
run_command(ADAPTER_INFO_CLI),
convert_to_dict,
remove_all_metrics_files,
get_ctrl_sub_metrics,
# pretty_print,
)
pipe(
run_command(DRIVES_INFO_CLI),
convert_drive_info_to_dict,
lld_discovery_controllers,
lld_discovery_arrays,
lld_discovery_pds,
get_array_metrics,
get_pd_metrics,
# pretty_print
)
| 36.073048 | 118 | 0.605824 |
4f5f1dfd29021637fb53794616233f98e366ba3a | 1,842 | py | Python | tests/fugue/dataframe/test_arrow_dataframe.py | LaurentErreca/fugue | 73d551b4d25b50b3d9051dd765e6111db2e3fc76 | [
"Apache-2.0"
] | null | null | null | tests/fugue/dataframe/test_arrow_dataframe.py | LaurentErreca/fugue | 73d551b4d25b50b3d9051dd765e6111db2e3fc76 | [
"Apache-2.0"
] | null | null | null | tests/fugue/dataframe/test_arrow_dataframe.py | LaurentErreca/fugue | 73d551b4d25b50b3d9051dd765e6111db2e3fc76 | [
"Apache-2.0"
] | null | null | null | import json
from datetime import datetime
from typing import Any
import numpy as np
import pandas as pd
from fugue.dataframe import ArrowDataFrame, PandasDataFrame
from fugue.dataframe.utils import _df_eq as df_eq
from fugue_test.dataframe_suite import DataFrameTests
from pytest import raises
from triad.collections.schema import Schema, SchemaError
from triad.exceptions import InvalidOperationError
class ArrowDataFrameTests(DataFrameTests.Tests):
def df(
self, data: Any = None, schema: Any = None, metadata: Any = None
) -> ArrowDataFrame:
return ArrowDataFrame(data, schema, metadata)
def test_init():
df = ArrowDataFrame(schema="a:str,b:int")
assert df.empty
assert df.schema == "a:str,b:int"
assert df.is_bounded
df = ArrowDataFrame(pd.DataFrame([], columns=["a", "b"]), schema="a:str,b:int")
assert df.empty
assert df.schema == "a:str,b:int"
assert df.is_bounded
data = [["a", "1"], ["b", "2"]]
df = ArrowDataFrame(data, "a:str,b:str")
assert [["a", "1"], ["b", "2"]] == df.as_array(type_safe=True)
data = [["a", 1], ["b", 2]]
df = ArrowDataFrame(data, "a:str,b:int")
assert [["a", 1.0], ["b", 2.0]] == df.as_array(type_safe=True)
df = ArrowDataFrame(data, "a:str,b:double")
assert [["a", 1.0], ["b", 2.0]] == df.as_array(type_safe=True)
ddf = ArrowDataFrame(df.native)
assert [["a", 1.0], ["b", 2.0]] == ddf.as_array(type_safe=True)
df = ArrowDataFrame(df.as_pandas(), "a:str,b:double")
assert [["a", 1.0], ["b", 2.0]] == df.as_array(type_safe=True)
df = ArrowDataFrame(df.as_pandas()["b"])
assert [[1.0], [2.0]] == df.as_array(type_safe=True)
df = ArrowDataFrame([], "x:str,y:double")
assert df.empty
assert df.is_local
assert df.is_bounded
raises(Exception, lambda: ArrowDataFrame(123))
| 32.892857 | 83 | 0.652009 |
4f5ffec669400e405fdc5a526b48ad05590c44e8 | 26,551 | py | Python | sonnet/python/modules/batch_norm_v2_test.py | lukas/sonnet | d7f936954d93660475bc4bdd02394ccfb34d76dd | [
"Apache-2.0"
] | 3 | 2019-07-31T12:36:26.000Z | 2020-12-16T14:37:19.000Z | sonnet/python/modules/batch_norm_v2_test.py | priyansh19/sonnet | b3ff3081de040776aa3dae536cd5fae483cb4997 | [
"Apache-2.0"
] | null | null | null | sonnet/python/modules/batch_norm_v2_test.py | priyansh19/sonnet | b3ff3081de040776aa3dae536cd5fae483cb4997 | [
"Apache-2.0"
] | 3 | 2019-07-29T08:55:20.000Z | 2019-07-30T06:36:56.000Z | # Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for sonnet.python.modules.batch_norm_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# Dependency imports
from absl.testing import parameterized
import numpy as np
import sonnet as snt
import tensorflow as tf
def _add_fused_and_unknown_batch_params(test_case_parameters):
for params in test_case_parameters:
yield dict(fused=False, batch_unknown=False, **params)
yield dict(fused=True, batch_unknown=False, **params)
yield dict(fused=False, batch_unknown=True, **params)
yield dict(fused=True, batch_unknown=True, **params)
class BatchNormV2Test(parameterized.TestCase, tf.test.TestCase):
def testConstruct(self):
inputs = tf.placeholder(tf.float32, shape=[None, 64, 64, 3])
batch_norm1 = snt.BatchNormV2(offset=False, scale=False, fused=False)
batch_norm1(inputs, is_training=True)
err = "Batch normalization doesn't have an offset, so no beta"
with self.assertRaisesRegexp(snt.Error, err):
_ = batch_norm1.beta
err = "Batch normalization doesn't have a scale, so no gamma"
with self.assertRaisesRegexp(snt.Error, err):
_ = batch_norm1.gamma
batch_norm2 = snt.BatchNormV2(offset=True, scale=False)
batch_norm2(inputs, is_training=True)
_ = batch_norm2.beta
batch_norm3 = snt.BatchNormV2(offset=False, scale=True)
batch_norm3(inputs, is_training=True)
_ = batch_norm3.gamma
batch_norm4 = snt.BatchNormV2(offset=True, scale=True)
batch_norm4(inputs, is_training=True)
_ = batch_norm4.beta
_ = batch_norm4.gamma
batch_norm4(inputs, is_training=True, test_local_stats=True)
batch_norm4(inputs,
is_training=tf.constant(True),
test_local_stats=tf.constant(True))
is_training_ph = tf.placeholder(tf.bool)
test_local_stats_ph = tf.placeholder(tf.bool)
batch_norm4(inputs,
is_training=is_training_ph,
test_local_stats=test_local_stats_ph)
@parameterized.parameters(
["NC", "NWC", "NHWC", "NDHWC", "NCW", "NCHW", "NCDHW"])
def testDataFormats(self, data_format):
"""Check that differing data formats give the correct output shape."""
dim_sizes = {
"N": None,
"D": 10,
"H": 64,
"W": 32,
"C": 3
}
inputs = tf.placeholder_with_default(
tf.zeros([dim_sizes[dim_name] or 5 for dim_name in data_format]),
[dim_sizes[dim_name] for dim_name in data_format])
bn_data_formats = [data_format]
if data_format.endswith("C"):
bn_data_formats.append(None)
for bn_data_format in bn_data_formats:
bn = snt.BatchNormV2(data_format=bn_data_format, offset=False)
bn(inputs, is_training=True)
mean_shape = bn.moving_mean.get_shape()
correct_mean_shape = [
dim_sizes["C"] if dim_name == "C" else 1 for dim_name in data_format
]
self.assertEqual(mean_shape, correct_mean_shape)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
for bn_data_format in "NC NWC NHWC NDHWC NCW NCHW NCDHW".split():
if len(data_format) != len(bn_data_format):
bn = snt.BatchNormV2(data_format=bn_data_format, offset=False)
err = r"Incorrect data format {} for input shape .*".format(
bn_data_format)
with self.assertRaisesRegexp(snt.IncompatibleShapeError, err):
outputs = bn(inputs, is_training=True)
sess.run(outputs)
@parameterized.named_parameters(
("Float32", tf.float32),
)
def testDataType(self, dtype):
inputs = tf.placeholder(dtype, shape=[None, 64, 32, 3])
batch_norm = snt.BatchNormV2(offset=True, scale=True)
output = batch_norm(inputs, is_training=True)
self.assertEqual(dtype, output.dtype)
self.assertEqual(dtype, batch_norm.moving_mean.dtype.base_dtype)
self.assertEqual(dtype, batch_norm.moving_variance.dtype.base_dtype)
self.assertEqual(dtype, batch_norm.gamma.dtype.base_dtype)
self.assertEqual(dtype, batch_norm.beta.dtype.base_dtype)
@parameterized.named_parameters(
("Float16", tf.float16),
("BFloat16", tf.bfloat16),
)
def test16Bit(self, dtype):
inputs = tf.placeholder(dtype, shape=[None, 64, 32, 3])
batch_norm = snt.BatchNormV2(offset=True, scale=True, fused=False)
output = batch_norm(inputs, is_training=True)
self.assertEqual(dtype, output.dtype)
self.assertEqual(tf.float32, batch_norm.moving_mean.dtype.base_dtype)
self.assertEqual(tf.float32, batch_norm.moving_variance.dtype.base_dtype)
self.assertEqual(dtype, batch_norm.gamma.dtype.base_dtype)
self.assertEqual(dtype, batch_norm.beta.dtype.base_dtype)
def _get_inputs(self, dtype=tf.float32):
v = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0], dtype=dtype.as_numpy_dtype)
input_v = np.array([v, v, v, v, v, v, v])
inputs = tf.constant(input_v)
return v, input_v, inputs
def testUpdateImproveStatistics(self):
"""Test that updating the moving_mean improves statistics."""
_, _, inputs = self._get_inputs()
# Use small decay_rate to update faster.
bn = snt.BatchNormV2(
offset=False,
scale=False,
decay_rate=0.1,
update_ops_collection=tf.GraphKeys.UPDATE_OPS)
out1 = bn(inputs, is_training=False, test_local_stats=False)
# Build the update ops.
bn(inputs, is_training=True)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
out_v = sess.run(out1)
# Before updating the moving_mean the results are off.
self.assertBetween(np.max(np.abs(np.zeros([7, 6]) - out_v)), 2, 5)
sess.run(tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS)))
# After updating the moving_mean the results are better.
out_v = sess.run(out1)
self.assertBetween(np.max(np.abs(np.zeros([7, 6]) - out_v)), 1, 2)
@parameterized.named_parameters(
("Float16", tf.float16),
("Float32", tf.float32),
)
def testCheckStatsDouble(self, dtype):
"""The correct statistics are being computed for double connection.
Connected in parallel, it's ill-defined what order the updates will happen
in. A double update could happen, or two sequential updates. E.g. If
decay_rate is 0.9, the start value is 1.0, and the target value is 0.0, the
value could progress as
1.00 -> 0.90 -> 0.81,
if the second update uses the fresh second value. Or as
1.00 -> 0.90 -> 0.80
if the second update uses the stale first value.
We fix this here by running them in sequential run calls to ensure that this
test is deterministic.
The two situations are minimally different, especially if decay_rate is
close to one (e.g. the default of 0.999).
Args:
dtype: TensorFlow datatype of input test batch.
"""
v, _, inputs = self._get_inputs(dtype)
bn = snt.BatchNormV2(
offset=False,
scale=False,
decay_rate=0.9,
update_ops_collection=tf.GraphKeys.UPDATE_OPS)
with tf.name_scope("net1"):
bn(inputs, is_training=True)
with tf.name_scope("net2"):
bn(inputs, is_training=True)
update_ops_1 = tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS, "net1"))
self.assertLen(update_ops_1, 2)
update_ops_2 = tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS, "net2"))
self.assertLen(update_ops_2, 2)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
mm, mv = sess.run([bn.moving_mean, bn.moving_variance])
self.assertAllClose(np.zeros([1, 6]), mm)
self.assertAllClose(np.ones([1, 6]), mv)
sess.run(update_ops_1)
sess.run(update_ops_2)
mm, mv = sess.run([bn.moving_mean,
bn.moving_variance])
correct_mm = (1.0 - bn._decay_rate) * v
correct_mm = (1.0 - bn._decay_rate) * v + bn._decay_rate * correct_mm
correct_mv = np.ones([1, 6]) * bn._decay_rate**2
atol = 1.e-2 if dtype == tf.float16 else 1.e-6
self.assertAllClose(np.reshape(correct_mm, [1, 6]), mm, atol=atol)
self.assertAllClose(np.reshape(correct_mv, [1, 6]), mv, atol=atol)
def testCheckStatsPython(self):
"""The correct normalization is being used for different Python flags."""
v, input_v, inputs = self._get_inputs()
bn = snt.BatchNormV2(
offset=False,
scale=False,
decay_rate=0.5,
update_ops_collection=tf.GraphKeys.UPDATE_OPS
)
out1 = bn(inputs, is_training=True, test_local_stats=True)
out2 = bn(inputs, is_training=False, test_local_stats=True)
out3 = bn(inputs, is_training=False, test_local_stats=False)
update_ops = tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
self.assertLen(update_ops, 2)
with tf.control_dependencies(update_ops):
out1 = tf.identity(out1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
out_v = sess.run(out1)
mm, mv = sess.run([bn.moving_mean, bn.moving_variance])
# Single moving average steps should have happened.
correct_mm = (1.0 - bn._decay_rate) * v
correct_mv = np.ones([1, 6]) * bn._decay_rate
self.assertAllClose(np.reshape(correct_mm, [1, 6]), mm)
self.assertAllClose(np.reshape(correct_mv, [1, 6]), mv)
self.assertAllClose(np.zeros([7, 6]), out_v, rtol=1e-6, atol=1e-5)
out2_, out3_ = sess.run([out2, out3])
# Out2: Tested using local batch stats.
# Better numerical precision due to using shifted estimators.
self.assertAllClose(np.zeros([7, 6]), out2_, rtol=1e-6, atol=1e-5)
# Out3: Tested using moving average stats.
self.assertAllClose(
(input_v - mm) / np.sqrt(mv + bn._eps),
out3_)
@parameterized.named_parameters(
("UseUpdateCollection", tf.GraphKeys.UPDATE_OPS),
("UseDifferentUpdateCollection", "my_update_ops"),
("UseControlDependencies", None),
)
def testCheckStatsInGraph(self, update_ops_collection):
"""The correct normalization is being used for different TF flags."""
v, input_v, inputs = self._get_inputs()
bn = snt.BatchNormV2(
offset=False,
scale=False,
decay_rate=0.5,
update_ops_collection=update_ops_collection)
is_training = tf.placeholder(tf.bool)
test_local_stats = tf.placeholder(tf.bool)
out = bn(inputs,
is_training=is_training,
test_local_stats=test_local_stats)
if update_ops_collection is not None:
update_ops = tuple(tf.get_collection(update_ops_collection))
self.assertLen(update_ops, 2)
with tf.control_dependencies(update_ops):
out = tf.identity(out)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
# Run with `is_training=True`, `test_local_stats=True`.
out_v = sess.run(out, feed_dict={is_training: True,
test_local_stats: True})
# Moving averages not updated until after calculation so shifted
# stats are poor.
self.assertAllClose(np.zeros([7, 6]), out_v, rtol=1e-6, atol=1e-5)
ops = (bn.moving_mean, bn.moving_variance)
mm1, mv1 = sess.run(ops)
# Single moving average step should have happened.
correct_mm = (1.0 - bn._decay_rate) * v
correct_mv = np.ones([1, 6]) * bn._decay_rate
self.assertAllClose(np.reshape(correct_mm, [1, 6]), mm1)
self.assertAllClose(np.reshape(correct_mv, [1, 6]), mv1)
# Run with `is_training=False`, `test_local_stats=True`.
# Should have used local batch stats.
out_v = sess.run(out, feed_dict={is_training: False,
test_local_stats: True})
# Moving averages should not have changed.
mm2, mv2 = sess.run(ops)
self.assertAllClose(mm1, mm2)
self.assertAllClose(mv1, mv2)
self.assertAllClose(np.zeros([7, 6]), out_v, rtol=1e-6, atol=1e-5)
# Run with `is_training=False`, `test_local_stats=False`.
# Should have used moving average stats.
out_v = sess.run(out, feed_dict={is_training: False,
test_local_stats: False})
# Moving averages should not have changed.
mm3, mv3 = sess.run(ops)
self.assertAllClose(mm1, mm3)
self.assertAllClose(mv1, mv3)
self.assertAllClose(
(input_v - mm3) / np.sqrt(mv3 + bn._eps),
out_v)
def testSharing(self):
"""Check that the correct number of variables are made when sharing."""
inputs1 = tf.placeholder(tf.float32, shape=[None, 64, 64, 3])
inputs2 = tf.placeholder(tf.float32, shape=[None, 64, 64, 3])
bn = snt.BatchNormV2(
offset=True,
scale=True,
update_ops_collection=tf.GraphKeys.UPDATE_OPS)
bn(inputs1, is_training=True)
bn(inputs2, is_training=False)
self.assertLen(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES), 4)
# We should have one set of update ops
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertLen(update_ops, 2)
def testUpdatesInsideCond(self):
"""Demonstrate that updates inside a cond fail."""
_, input_v, inputs = self._get_inputs()
bn = snt.BatchNormV2(
offset=False,
scale=False,
decay_rate=0.5,
update_ops_collection=tf.GraphKeys.UPDATE_OPS)
condition = tf.placeholder(tf.bool)
cond = tf.cond(condition,
lambda: bn(inputs, is_training=True),
lambda: inputs)
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
out_v = sess.run(cond, feed_dict={condition: False})
self.assertAllClose(input_v, out_v)
out_v = sess.run(cond, feed_dict={condition: True})
self.assertAllClose(np.zeros([7, 6]), out_v, rtol=1e-4, atol=1e-4)
# Variables are accessible outside the tf.cond()
mm, mv = sess.run([bn.moving_mean, bn.moving_variance])
self.assertAllClose(np.zeros([1, 6]), mm)
self.assertAllClose(np.ones([1, 6]), mv)
# Tensors are not accessible outside the tf.cond()
with self.assertRaisesRegexp(ValueError, "Operation"):
sess.run(tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS)))
def testVariableBatchSize(self):
"""Check the inputs batch_size can change."""
inputs_shape = [10, 10]
inputs = tf.placeholder(tf.float32, shape=[None] + inputs_shape)
bn = snt.BatchNormV2(
offset=False, scale=False)
# Outputs should be equal to inputs.
out = bn(inputs,
is_training=False,
test_local_stats=False)
init = tf.global_variables_initializer()
update_ops = tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
with self.test_session() as sess:
sess.run(init)
for batch_size in [1, 3, 10]:
input_data = np.random.rand(batch_size, *inputs_shape)
out_v = sess.run(out, feed_dict={inputs: input_data})
self.assertAllClose(input_data / np.sqrt(1.0 + bn._eps), out_v)
sess.run(update_ops, feed_dict={inputs: input_data})
def testInvalidInitializerParameters(self):
with self.assertRaisesRegexp(KeyError, "Invalid initializer keys.*"):
snt.BatchNormV2(
initializers={"not_gamma": tf.contrib.layers.l1_regularizer(0.5)})
err = "Initializer for 'gamma' is not a callable function"
with self.assertRaisesRegexp(TypeError, err):
snt.BatchNormV2(initializers={"gamma": tf.zeros([1, 2, 3])})
def testInvalidPartitionerParameters(self):
with self.assertRaisesRegexp(KeyError, "Invalid partitioner keys.*"):
snt.BatchNormV2(
partitioners={"not_gamma": tf.contrib.layers.l1_regularizer(0.5)})
err = "Partitioner for 'gamma' is not a callable function"
with self.assertRaisesRegexp(TypeError, err):
snt.BatchNormV2(partitioners={"gamma": tf.zeros([1, 2, 3])})
def testInvalidRegularizationParameters(self):
with self.assertRaisesRegexp(KeyError, "Invalid regularizer keys.*"):
snt.BatchNormV2(
regularizers={"not_gamma": tf.contrib.layers.l1_regularizer(0.5)})
err = "Regularizer for 'gamma' is not a callable function"
with self.assertRaisesRegexp(TypeError, err):
snt.BatchNormV2(regularizers={"gamma": tf.zeros([1, 2, 3])})
@parameterized.named_parameters(
("BNNoOffsetScale", False, True),
("BNNoOffsetNoScale", False, False),
("BNOffsetScale", True, True),
("BNOffsetNoScale", True, False),
)
def testInitializers(self, offset, scale):
initializers = {
"moving_mean": tf.constant_initializer(2.0),
"moving_variance": tf.constant_initializer(3.0),
}
if scale:
initializers["gamma"] = tf.constant_initializer(4.0)
if offset:
initializers["beta"] = tf.constant_initializer(5.0)
inputs_shape = [10, 10]
inputs = tf.placeholder(tf.float32, shape=[None] + inputs_shape)
bn = snt.BatchNormV2(
offset=offset,
scale=scale,
initializers=initializers)
self.assertEqual(bn.initializers, initializers)
bn(inputs, is_training=True)
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
ones_v = np.ones([1, 1, inputs_shape[-1]])
self.assertAllClose(bn.moving_mean.eval(), ones_v * 2.0)
self.assertAllClose(bn.moving_variance.eval(), ones_v * 3.0)
if scale:
self.assertAllClose(bn.gamma.eval(), ones_v * 4.0)
if offset:
self.assertAllClose(bn.beta.eval(), ones_v * 5.0)
@parameterized.named_parameters(
("BNNoOffsetScale", False, True),
("BNNoOffsetNoScale", False, False),
("BNOffsetScale", True, True),
("BNOffsetNoScale", True, False),
)
def testRegularizersInRegularizationLosses(self, offset, scale):
regularizers = {}
if offset:
regularizers["beta"] = tf.contrib.layers.l1_regularizer(scale=0.5)
if scale:
regularizers["gamma"] = tf.contrib.layers.l2_regularizer(scale=0.5)
inputs_shape = [10, 10]
inputs = tf.placeholder(tf.float32, shape=[None] + inputs_shape)
bn = snt.BatchNormV2(
offset=offset,
scale=scale,
regularizers=regularizers)
self.assertEqual(bn.regularizers, regularizers)
bn(inputs, is_training=True)
graph_regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if not offset and not scale:
self.assertFalse(graph_regularizers)
if offset and not scale:
self.assertRegexpMatches(graph_regularizers[0].name, ".*l1_regularizer.*")
if scale and not offset:
self.assertRegexpMatches(graph_regularizers[0].name, ".*l2_regularizer.*")
if scale and offset:
self.assertRegexpMatches(graph_regularizers[0].name, ".*l1_regularizer.*")
self.assertRegexpMatches(graph_regularizers[1].name, ".*l2_regularizer.*")
@parameterized.named_parameters(
("BNNoOffsetScale", False, True),
("BNNoOffsetNoScale", False, False),
("BNOffsetScale", True, True),
("BNOffsetNoScale", True, False),
)
def testPartitioners(self, offset, scale):
partitioners = {}
if scale:
partitioners["gamma"] = tf.fixed_size_partitioner(num_shards=2)
if offset:
partitioners["beta"] = tf.fixed_size_partitioner(num_shards=2)
inputs_shape = [10, 10]
inputs = tf.placeholder(tf.float32, shape=[None] + inputs_shape)
bn = snt.BatchNormV2(
offset=offset,
scale=scale,
partitioners=partitioners)
self.assertEqual(bn.partitioners, partitioners)
bn(inputs, is_training=True)
if scale:
self.assertLen(tf.global_variables("batch_norm/gamma"), 2)
if offset:
self.assertLen(tf.global_variables("batch_norm/beta"), 2)
@parameterized.named_parameters(
("IsTrainingBoolVal", True, False, False, True),
("IsTestingBoolVal", False, True, False, True),
("IsTestingBoolValMovingAverage", False, False, False, True),
("IsTrainingScaleBoolVal", True, False, True, True),
("IsTestingScaleBoolVal", False, True, True, True),
("IsTestingScaleBoolValMovingAverage", False, False, True, True),
("IsTrainingTensorVal", True, False, False, False),
("IsTestingTensorVal", False, True, False, False),
("IsTestingTensorValMovingAverage", False, False, False, False),
("IsTrainingScaleTensorVal", True, False, True, False),
("IsTestingScaleTensorVal", False, True, True, False),
("IsTestingScaleTensorValMovingAverage", False, False, True, False))
def testFusedBatchNormV2(self, is_training, test_local_stats, scale,
is_training_python_bool):
input_shape = (32, 9, 9, 8)
iterations = 5
x = tf.placeholder(tf.float32, shape=input_shape)
bn1 = snt.BatchNormV2(scale=scale)
bn2 = snt.BatchNormV2(fused=False, scale=scale)
xx = np.random.random(input_shape)
feed_dict = {x: xx}
if not is_training_python_bool:
is_training_node = tf.placeholder(tf.bool, shape=())
feed_dict.update({is_training_node: is_training})
is_training = is_training_node
test_local_stats_node = tf.placeholder(tf.bool, shape=())
feed_dict.update({test_local_stats_node: test_local_stats})
test_local_stats = test_local_stats_node
o1 = bn1(x, is_training=is_training, test_local_stats=test_local_stats)
o2 = bn2(x, is_training=is_training, test_local_stats=test_local_stats)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
params = [
o1, o2, bn1._moving_mean, bn1._moving_variance, bn2._moving_mean,
bn2._moving_variance
]
for _ in range(iterations):
y1, y2, mean1, var1, mean2, var2 = sess.run(params, feed_dict=feed_dict)
self.assertAllClose(y1, y2, atol=1e-4)
self.assertAllClose(mean1, mean2, atol=1e-4)
self.assertAllClose(var1, var2, atol=1e-4)
@parameterized.named_parameters(
("IsTraining", True, False),
("IsTesting", False, True),
("IsTestingMovingAverage", False, False))
def testFusedBatchNormFloat16(self, is_training, test_local_stats):
input_shape = (31, 7, 7, 5)
iterations = 3
x = tf.placeholder(tf.float16, shape=input_shape)
bn1 = snt.BatchNormV2(fused=False)
bn2 = snt.BatchNormV2()
feed_dict = {x: np.random.random(input_shape)}
o1 = bn1(x, is_training=is_training, test_local_stats=test_local_stats)
o2 = bn2(x, is_training=is_training, test_local_stats=test_local_stats)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
params = [
o1, o2, bn1._moving_mean, bn1._moving_variance, bn2._moving_mean,
bn2._moving_variance
]
for _ in range(iterations):
y1, y2, mean1, var1, mean2, var2 = sess.run(params, feed_dict=feed_dict)
self.assertAllClose(y1, y2, atol=1e-2)
self.assertAllClose(mean1, mean2, atol=1e-2)
self.assertAllClose(var1, var2, atol=1e-2)
def testCheckpointCompatibility(self):
save_path = os.path.join(self.get_temp_dir(), "basic_save_restore")
input_shape_1 = (31, 7, 7, 5)
input_shape_2 = (31, 5, 7, 7)
x1 = tf.placeholder(tf.float32, shape=input_shape_1)
bn1 = snt.BatchNormV2(data_format="NHWC")
bn1(x1, is_training=True)
saver1 = snt.get_saver(bn1)
x2 = tf.placeholder(tf.float32, shape=input_shape_2)
bn2 = snt.BatchNormV2(data_format="NCHW")
bn2(x2, is_training=False)
saver2 = snt.get_saver(bn2)
x3 = tf.placeholder(tf.float32, shape=input_shape_1)
bn3 = snt.BatchNormV2(data_format="NCHW")
bn3(x3, is_training=False)
saver3 = snt.get_saver(bn3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
saver1.save(sess, save_path)
saver2.restore(sess, save_path)
with self.assertRaises(tf.errors.InvalidArgumentError):
saver3.restore(sess, save_path)
@parameterized.parameters(*_add_fused_and_unknown_batch_params([
{"shape": [2, 10], "data_format": "NC"},
{"shape": [2, None, 3], "data_format": "NWC"},
{"shape": [2, 64, None], "data_format": "NCW"},
{"shape": [8, None, None, 3], "data_format": "NHWC"},
{"shape": [8, 10, None, None], "data_format": "NCHW"},
{"shape": [4, None, None, None, 10], "data_format": "NDHWC"},
{"shape": [4, 42, None, None, None], "data_format": "NCDHW"},
# We also check that tensors which are fully defined work correctly, as
# the new codepath for unknown spatial size has a likelihood of causing
# bugs where the output shape is unknown, but it previously was known.
{"shape": [2, 640, 3], "data_format": "NWC"},
{"shape": [2, 64, 480], "data_format": "NCW"},
{"shape": [2, 32, 32, 3], "data_format": "NHWC"},
{"shape": [2, 3, 72, 96], "data_format": "NCHW"},
{"shape": [4, 84, 83, 82, 10], "data_format": "NDHWC"},
{"shape": [4, 42, 10, 48, 64], "data_format": "NCDHW"}]))
def testDynamicImageShape(self, shape, data_format, fused, batch_unknown):
"""Check that tensors with unknown spatial dimensions work."""
if batch_unknown:
shape[0] = None
input_ph = tf.placeholder(tf.float32, shape=shape)
bn = snt.BatchNormV2(data_format=data_format, fused=fused)
output_train = bn(input_ph, is_training=True)
output_test = bn(input_ph, is_training=False)
self.assertEqual(output_train.get_shape().as_list(),
output_test.get_shape().as_list())
# Check that no information about the shape has been erased from the input.
self.assertEqual(output_train.get_shape().as_list(),
input_ph.get_shape().as_list())
if __name__ == "__main__":
tf.test.main()
| 36.774238 | 80 | 0.669429 |
4f5fc66a2385d52dc1d9aa50e6c9cc96c77c3425 | 1,699 | py | Python | sdk/keyvault/azure-keyvault/azure/keyvault/v2016_10_01/models/key_import_parameters_py3.py | OlhaTkachenko/azure-sdk-for-python | 0e43638457cc0c44d4a5e4a287fb4e53674b3231 | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | sdk/keyvault/azure-keyvault/azure/keyvault/v2016_10_01/models/key_import_parameters_py3.py | OlhaTkachenko/azure-sdk-for-python | 0e43638457cc0c44d4a5e4a287fb4e53674b3231 | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | sdk/keyvault/azure-keyvault/azure/keyvault/v2016_10_01/models/key_import_parameters_py3.py | OlhaTkachenko/azure-sdk-for-python | 0e43638457cc0c44d4a5e4a287fb4e53674b3231 | [
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class KeyImportParameters(Model):
"""The key import parameters.
All required parameters must be populated in order to send to Azure.
:param hsm: Whether to import as a hardware key (HSM) or software key.
:type hsm: bool
:param key: Required. The Json web key
:type key: ~azure.keyvault.v2016_10_01.models.JsonWebKey
:param key_attributes: The key management attributes.
:type key_attributes: ~azure.keyvault.v2016_10_01.models.KeyAttributes
:param tags: Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
"""
_validation = {
'key': {'required': True},
}
_attribute_map = {
'hsm': {'key': 'Hsm', 'type': 'bool'},
'key': {'key': 'key', 'type': 'JsonWebKey'},
'key_attributes': {'key': 'attributes', 'type': 'KeyAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, *, key, hsm: bool=None, key_attributes=None, tags=None, **kwargs) -> None:
super(KeyImportParameters, self).__init__(**kwargs)
self.hsm = hsm
self.key = key
self.key_attributes = key_attributes
self.tags = tags
| 36.148936 | 97 | 0.602707 |