hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
17134739b8596a864d6e6e37035825d72ffbe045 | 3,119 | py | Python | securetea/lib/antivirus/scanner/yara_scanner.py | neerajv18/SecureTea-Project | e999cbe7c8e497c69b76b4c886de0d063169ea03 | [
"MIT"
] | 257 | 2018-03-28T12:43:20.000Z | 2022-03-29T07:07:23.000Z | securetea/lib/antivirus/scanner/yara_scanner.py | neerajv18/SecureTea-Project | e999cbe7c8e497c69b76b4c886de0d063169ea03 | [
"MIT"
] | 155 | 2018-03-31T14:57:46.000Z | 2022-03-17T18:12:41.000Z | securetea/lib/antivirus/scanner/yara_scanner.py | neerajv18/SecureTea-Project | e999cbe7c8e497c69b76b4c886de0d063169ea03 | [
"MIT"
] | 132 | 2018-03-27T06:25:20.000Z | 2022-03-28T11:32:45.000Z | # -*- coding: utf-8 -*-
u"""Yara Scanner module for SecureTea AntiVirus.
Project:
╔═╗┌─┐┌─┐┬ ┬┬─┐┌─┐╔╦╗┌─┐┌─┐
╚═╗├┤ │ │ │├┬┘├┤ ║ ├┤ ├─┤
╚═╝└─┘└─┘└─┘┴└─└─┘ ╩ └─┘┴ ┴
Author: Abhishek Sharma <abhishek_official@hotmail.com> , Jul 4 2019
Version: 1.4
Module: SecureTea
"""
from securetea.lib.antivirus.scanner.scanner_parent import Scanner
import sys
import os
yara_status = True
try:
import yara
except ImportError:
yara_status = False
print("[-] Yara not installed")
except AttributeError:
yara_status = False
print("[-] Yara not configured: libyara.so not found")
except Exception as e:
yara_status = False
print(e)
class YaraScanner(Scanner):
"""YaraScanner class."""
def __init__(self, debug=False, config_path=None, vt_api_key=None, file_list=None):
"""
Initialize YaraEngine.
Args:
debug (bool): Log on terminal or not
config_path (str): Configuration JSON file path
vt_api_key (str): VirusTotal API Key
file_list (list): List of files to scan
Raises:
None
Returns:
None
"""
# Initialize parent class
super().__init__(debug, config_path, file_list, vt_api_key)
if self.os_name:
try:
# Load threads
self._WORKERS = self.config_dict[self.os_name]["scanner"]["yara"]["threads"]
# Load Yara rules storage path
self._YARA_STORAGE = self.config_dict[self.os_name]["update"]["yara"]["storage"]
except KeyError:
self.logger.log(
"Could not load configuration for: {}".format(self.os_name),
logtype="error"
)
sys.exit(0)
else:
self.logger.log(
"Could not determine the OS",
logtype="error"
)
sys.exit(0)
def scan_file(self, file_path):
"""
Scan file using Yara rules.
Args:
file_path (str): Path of the file to scan
Raises:
None
Returns:
None
"""
if yara_status:
yara_files_list = os.listdir(self._YARA_STORAGE)
for yara_file in yara_files_list:
if yara_file.endswith(".yar") or yara_file.endswith(".yara"):
yara_file_path = os.path.join(self._YARA_STORAGE, yara_file)
rule_compile = yara.compile(yara_file_path)
matches = rule_compile.match(file_path)
if matches:
self.logger.log(
"Possible malicious file detected: {0}".format(file_path),
logtype="warning"
)
if file_path not in self.malicious_file_list:
self.malicious_file_list.append(file_path)
super().check_virus_total(file_path)
return
return
| 30.281553 | 96 | 0.520359 |
20144ab955135ad084cf0fef939398cc14bdd008 | 4,305 | py | Python | server/app/services/products/views/products.py | goodfree/ActorCloud | e8db470830ea6f6f208ad43c2e56a2e8976bc468 | [
"Apache-2.0"
] | 173 | 2019-06-10T07:14:49.000Z | 2022-03-31T08:42:36.000Z | server/app/services/products/views/products.py | zlyz12345/ActorCloud | 9c34b371c23464981323ef9865d9913bde1fe09c | [
"Apache-2.0"
] | 27 | 2019-06-12T08:25:29.000Z | 2022-02-26T11:37:15.000Z | server/app/services/products/views/products.py | zlyz12345/ActorCloud | 9c34b371c23464981323ef9865d9913bde1fe09c | [
"Apache-2.0"
] | 67 | 2019-06-10T08:40:05.000Z | 2022-03-09T03:43:56.000Z | from flask import jsonify
from sqlalchemy import func
from sqlalchemy.exc import IntegrityError
from actor_libs.database.orm import db
from actor_libs.errors import ReferencedError
from actor_libs.utils import get_delete_ids
from app import auth
from app.models import DataPoint, DataStream, Device, Product, User
from app.schemas import ProductSchema, UpdateProductSchema
from . import bp
@bp.route('/products')
@auth.login_required
def list_products():
code_list = ['cloudProtocol', 'productType']
records = Product.query.pagination(code_list=code_list)
# Count the number of devices, applications,
# data points, and data streams of the product
records_item = records['items']
records['items'] = records_item_count(records_item)
return jsonify(records)
@bp.route('/products/<int:product_id>')
@auth.login_required
def view_product(product_id):
code_list = ['cloudProtocol', 'productType']
record = Product.query \
.outerjoin(Device, Device.productID == Product.productID) \
.join(User, User.id == Product.userIntID) \
.with_entities(Product, User.username.label('createUser'),
func.count(Device.id).label('deviceCount')) \
.filter(Product.id == product_id) \
.group_by(Product.id, User.username).to_dict(code_list=code_list)
return jsonify(record)
@bp.route('/products', methods=['POST'])
@auth.login_required
def create_product():
request_dict = ProductSchema.validate_request()
product = Product()
created_product = product.create(request_dict)
record = created_product.to_dict()
return jsonify(record), 201
@bp.route('/products/<int:product_id>', methods=['PUT'])
@auth.login_required
def update_product(product_id):
product = Product.query.filter(Product.id == product_id).first_or_404()
request_dict = UpdateProductSchema.validate_request(obj=product)
updated_product = product.update(request_dict)
record = updated_product.to_dict()
return jsonify(record)
@bp.route('/products', methods=['DELETE'])
@auth.login_required
def delete_product():
delete_ids = get_delete_ids()
query_results = Product.query \
.filter(Product.id.in_(delete_ids)) \
.many(allow_none=False, expect_result=len(delete_ids))
# check device is included in the delete product
device_count = db.session.query(func.count(Device.id)) \
.join(Product, Device.productID == Product.productID) \
.filter(Product.id.in_(delete_ids)) \
.scalar()
if device_count:
raise ReferencedError(field='device')
try:
for product in query_results:
db.session.delete(product)
db.session.commit()
except IntegrityError:
raise ReferencedError()
return '', 204
def records_item_count(records_item):
product_dict = {
item['productID']: item['cloudProtocol']
for item in records_item
}
product_uids = product_dict.keys()
# Device count
query = db.session \
.query(Product.productID, func.count(Device.id)) \
.outerjoin(Device, Device.productID == Product.productID) \
.group_by(Product.productID) \
.filter(Product.productID.in_(product_uids)).all()
product_device_dict = dict(query)
# data_point,data_stream or product_item(lwm2m) count
query = db.session \
.query(Product.productID, func.count(DataPoint.id)) \
.outerjoin(DataPoint, DataPoint.productID == Product.productID) \
.group_by(Product.productID) \
.filter(Product.productID.in_(product_uids)) \
.all()
product_point_dict = dict(query)
query = db.session \
.query(Product.productID, func.count(DataStream.id)) \
.outerjoin(DataStream, DataStream.productID == Product.productID) \
.group_by(Product.productID) \
.filter(Product.productID.in_(product_uids)) \
.all()
product_stream_dict = dict(query)
for record in records_item:
record_product_uid = record['productID']
record['deviceCount'] = product_device_dict.get(record_product_uid, 0)
record['dataPointCount'] = product_point_dict.get(record_product_uid, 0)
record['dataStreamCount'] = product_stream_dict.get(record_product_uid, 0)
return records_item
| 36.483051 | 82 | 0.698955 |
a1f17c6dbe84da7d775bb580e842005296053491 | 1,640 | py | Python | hooks/post_gen_project.py | BrianPugh/cookiecutter-pypackage | ec8b51cb59d2436d77ca1d802991103dd37c9a95 | [
"BSD-3-Clause"
] | null | null | null | hooks/post_gen_project.py | BrianPugh/cookiecutter-pypackage | ec8b51cb59d2436d77ca1d802991103dd37c9a95 | [
"BSD-3-Clause"
] | null | null | null | hooks/post_gen_project.py | BrianPugh/cookiecutter-pypackage | ec8b51cb59d2436d77ca1d802991103dd37c9a95 | [
"BSD-3-Clause"
] | null | null | null | import os
import os.path as osp
import subprocess
COOKIECUTTER_REPO_NAME = 'cookiecutter-pypackage'
par_dir_path = osp.normpath(osp.join(osp.abspath(osp.curdir), osp.pardir))
if osp.basename(par_dir_path) == COOKIECUTTER_REPO_NAME:
# This was most likely called `cookiecutter .`
cookiecutter_repo_path = par_dir_path
else:
# This was most likely called as `cookeicutter git@bitbucket.org:geomagical/labtech-wrapper.git`
# This is the canonical location for the cached cookiecutter template
cookiecutter_repo_path = osp.join(os.environ['HOME'], '.cookiecutters', COOKIECUTTER_REPO_NAME)
# Obtain Cookiecutter repo path
cookiecutter_hash = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=cookiecutter_repo_path)
cookiecutter_hash = cookiecutter_hash.strip().decode('utf-8')
cookiecutter_uri = subprocess.check_output(["git", "config", "--get", "remote.origin.url"], cwd=cookiecutter_repo_path)
cookiecutter_uri = cookiecutter_uri.strip().decode('uft-8')
#######################
# Setting up git repo #
#######################
shell_cmds = [
"""git init""",
"""git remote add origin git@github.com:{{cookiecutter.github_username}}/{{project_slug}}.git""",
"""git add *""",
"""git add .gitignore""",
f'''git commit -m "Initial commit from cookiecutter {cookiecutter_uri} commit {cookiecutter_hash}"''',
]
for cmd in shell_cmds:
subprocess.call(cmd, shell=True)
print("=======================================================================")
print("Project setup complete. If you are happy with the setup, run:")
print(" git push origin master")
| 39.047619 | 119 | 0.669512 |
272f9a294cfa1016c03db2b08daaf22b1cdf0748 | 10,110 | py | Python | sdk/python/pulumi_azure_native/devtestlab/policy.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/devtestlab/policy.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/devtestlab/policy.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from ._enums import *
__all__ = ['Policy']
class Policy(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
evaluator_type: Optional[pulumi.Input[Union[str, 'PolicyEvaluatorType']]] = None,
fact_data: Optional[pulumi.Input[str]] = None,
fact_name: Optional[pulumi.Input[Union[str, 'PolicyFactName']]] = None,
lab_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
policy_set_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[Union[str, 'PolicyStatus']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
threshold: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
A Policy.
API Version: 2018-09-15.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The description of the policy.
:param pulumi.Input[Union[str, 'PolicyEvaluatorType']] evaluator_type: The evaluator type of the policy (i.e. AllowedValuesPolicy, MaxValuePolicy).
:param pulumi.Input[str] fact_data: The fact data of the policy.
:param pulumi.Input[Union[str, 'PolicyFactName']] fact_name: The fact name of the policy (e.g. LabVmCount, LabVmSize, MaxVmsAllowedPerLab, etc.
:param pulumi.Input[str] lab_name: The name of the lab.
:param pulumi.Input[str] location: The location of the resource.
:param pulumi.Input[str] name: The name of the policy.
:param pulumi.Input[str] policy_set_name: The name of the policy set.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Union[str, 'PolicyStatus']] status: The status of the policy.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The tags of the resource.
:param pulumi.Input[str] threshold: The threshold of the policy (i.e. a number for MaxValuePolicy, and a JSON array of values for AllowedValuesPolicy).
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['description'] = description
__props__['evaluator_type'] = evaluator_type
__props__['fact_data'] = fact_data
__props__['fact_name'] = fact_name
if lab_name is None and not opts.urn:
raise TypeError("Missing required property 'lab_name'")
__props__['lab_name'] = lab_name
__props__['location'] = location
__props__['name'] = name
if policy_set_name is None and not opts.urn:
raise TypeError("Missing required property 'policy_set_name'")
__props__['policy_set_name'] = policy_set_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['status'] = status
__props__['tags'] = tags
__props__['threshold'] = threshold
__props__['created_date'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
__props__['unique_identifier'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:devtestlab:Policy"), pulumi.Alias(type_="azure-native:devtestlab/latest:Policy"), pulumi.Alias(type_="azure-nextgen:devtestlab/latest:Policy"), pulumi.Alias(type_="azure-native:devtestlab/v20150521preview:Policy"), pulumi.Alias(type_="azure-nextgen:devtestlab/v20150521preview:Policy"), pulumi.Alias(type_="azure-native:devtestlab/v20160515:Policy"), pulumi.Alias(type_="azure-nextgen:devtestlab/v20160515:Policy"), pulumi.Alias(type_="azure-native:devtestlab/v20180915:Policy"), pulumi.Alias(type_="azure-nextgen:devtestlab/v20180915:Policy")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Policy, __self__).__init__(
'azure-native:devtestlab:Policy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Policy':
"""
Get an existing Policy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["created_date"] = None
__props__["description"] = None
__props__["evaluator_type"] = None
__props__["fact_data"] = None
__props__["fact_name"] = None
__props__["location"] = None
__props__["name"] = None
__props__["provisioning_state"] = None
__props__["status"] = None
__props__["tags"] = None
__props__["threshold"] = None
__props__["type"] = None
__props__["unique_identifier"] = None
return Policy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createdDate")
def created_date(self) -> pulumi.Output[str]:
"""
The creation date of the policy.
"""
return pulumi.get(self, "created_date")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description of the policy.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="evaluatorType")
def evaluator_type(self) -> pulumi.Output[Optional[str]]:
"""
The evaluator type of the policy (i.e. AllowedValuesPolicy, MaxValuePolicy).
"""
return pulumi.get(self, "evaluator_type")
@property
@pulumi.getter(name="factData")
def fact_data(self) -> pulumi.Output[Optional[str]]:
"""
The fact data of the policy.
"""
return pulumi.get(self, "fact_data")
@property
@pulumi.getter(name="factName")
def fact_name(self) -> pulumi.Output[Optional[str]]:
"""
The fact name of the policy (e.g. LabVmCount, LabVmSize, MaxVmsAllowedPerLab, etc.
"""
return pulumi.get(self, "fact_name")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning status of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def status(self) -> pulumi.Output[Optional[str]]:
"""
The status of the policy.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def threshold(self) -> pulumi.Output[Optional[str]]:
"""
The threshold of the policy (i.e. a number for MaxValuePolicy, and a JSON array of values for AllowedValuesPolicy).
"""
return pulumi.get(self, "threshold")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uniqueIdentifier")
def unique_identifier(self) -> pulumi.Output[str]:
"""
The unique immutable identifier of a resource (Guid).
"""
return pulumi.get(self, "unique_identifier")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 41.950207 | 632 | 0.631256 |
419ed8ebc49f720d92ff045b931926cee71ebb7f | 22,924 | py | Python | tests/compute/test_sampling.py | LunaBlack/dgl | bd1e48a51e348b0e8e25622325adeb5ddea1c0ea | [
"Apache-2.0"
] | 2 | 2021-12-09T12:36:13.000Z | 2022-03-01T21:22:36.000Z | tests/compute/test_sampling.py | LunaBlack/dgl | bd1e48a51e348b0e8e25622325adeb5ddea1c0ea | [
"Apache-2.0"
] | null | null | null | tests/compute/test_sampling.py | LunaBlack/dgl | bd1e48a51e348b0e8e25622325adeb5ddea1c0ea | [
"Apache-2.0"
] | 2 | 2020-12-07T09:34:01.000Z | 2020-12-13T06:18:58.000Z | import dgl
import backend as F
import numpy as np
import unittest
def check_random_walk(g, metapath, traces, ntypes, prob=None):
traces = F.asnumpy(traces)
ntypes = F.asnumpy(ntypes)
for j in range(traces.shape[1] - 1):
assert ntypes[j] == g.get_ntype_id(g.to_canonical_etype(metapath[j])[0])
assert ntypes[j + 1] == g.get_ntype_id(g.to_canonical_etype(metapath[j])[2])
for i in range(traces.shape[0]):
for j in range(traces.shape[1] - 1):
assert g.has_edge_between(
traces[i, j], traces[i, j+1], etype=metapath[j])
if prob is not None and prob in g.edges[metapath[j]].data:
p = F.asnumpy(g.edges[metapath[j]].data['p'])
eids = g.edge_id(traces[i, j], traces[i, j+1], etype=metapath[j])
assert p[eids] != 0
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU random walk not implemented")
def test_random_walk():
g1 = dgl.heterograph({
('user', 'follow', 'user'): [(0, 1), (1, 2), (2, 0)]
})
g2 = dgl.heterograph({
('user', 'follow', 'user'): [(0, 1), (1, 2), (1, 3), (2, 0), (3, 0)]
})
g3 = dgl.heterograph({
('user', 'follow', 'user'): [(0, 1), (1, 2), (2, 0)],
('user', 'view', 'item'): [(0, 0), (1, 1), (2, 2)],
('item', 'viewed-by', 'user'): [(0, 0), (1, 1), (2, 2)]})
g4 = dgl.heterograph({
('user', 'follow', 'user'): [(0, 1), (1, 2), (1, 3), (2, 0), (3, 0)],
('user', 'view', 'item'): [(0, 0), (0, 1), (1, 1), (2, 2), (3, 2), (3, 1)],
('item', 'viewed-by', 'user'): [(0, 0), (1, 0), (1, 1), (2, 2), (2, 3), (1, 3)]})
g2.edata['p'] = F.tensor([3, 0, 3, 3, 3], dtype=F.float32)
g2.edata['p2'] = F.tensor([[3], [0], [3], [3], [3]], dtype=F.float32)
g4.edges['follow'].data['p'] = F.tensor([3, 0, 3, 3, 3], dtype=F.float32)
g4.edges['viewed-by'].data['p'] = F.tensor([1, 1, 1, 1, 1, 1], dtype=F.float32)
traces, ntypes = dgl.sampling.random_walk(g1, [0, 1, 2, 0, 1, 2], length=4)
check_random_walk(g1, ['follow'] * 4, traces, ntypes)
traces, ntypes = dgl.sampling.random_walk(g1, [0, 1, 2, 0, 1, 2], length=4, restart_prob=0.)
check_random_walk(g1, ['follow'] * 4, traces, ntypes)
traces, ntypes = dgl.sampling.random_walk(
g1, [0, 1, 2, 0, 1, 2], length=4, restart_prob=F.zeros((4,), F.float32, F.cpu()))
check_random_walk(g1, ['follow'] * 4, traces, ntypes)
traces, ntypes = dgl.sampling.random_walk(
g1, [0, 1, 2, 0, 1, 2], length=5,
restart_prob=F.tensor([0, 0, 0, 0, 1], dtype=F.float32))
check_random_walk(
g1, ['follow'] * 4, F.slice_axis(traces, 1, 0, 5), F.slice_axis(ntypes, 0, 0, 5))
assert (F.asnumpy(traces)[:, 5] == -1).all()
traces, ntypes = dgl.sampling.random_walk(
g2, [0, 1, 2, 3, 0, 1, 2, 3], length=4)
check_random_walk(g2, ['follow'] * 4, traces, ntypes)
traces, ntypes = dgl.sampling.random_walk(
g2, [0, 1, 2, 3, 0, 1, 2, 3], length=4, prob='p')
check_random_walk(g2, ['follow'] * 4, traces, ntypes, 'p')
try:
traces, ntypes = dgl.sampling.random_walk(
g2, [0, 1, 2, 3, 0, 1, 2, 3], length=4, prob='p2')
fail = False
except dgl.DGLError:
fail = True
assert fail
metapath = ['follow', 'view', 'viewed-by'] * 2
traces, ntypes = dgl.sampling.random_walk(
g3, [0, 1, 2, 0, 1, 2], metapath=metapath)
check_random_walk(g3, metapath, traces, ntypes)
metapath = ['follow', 'view', 'viewed-by'] * 2
traces, ntypes = dgl.sampling.random_walk(
g4, [0, 1, 2, 3, 0, 1, 2, 3], metapath=metapath)
check_random_walk(g4, metapath, traces, ntypes)
metapath = ['follow', 'view', 'viewed-by'] * 2
traces, ntypes = dgl.sampling.random_walk(
g4, [0, 1, 2, 3, 0, 1, 2, 3], metapath=metapath, prob='p')
check_random_walk(g4, metapath, traces, ntypes, 'p')
traces, ntypes = dgl.sampling.random_walk(
g4, [0, 1, 2, 3, 0, 1, 2, 3], metapath=metapath, prob='p', restart_prob=0.)
check_random_walk(g4, metapath, traces, ntypes, 'p')
traces, ntypes = dgl.sampling.random_walk(
g4, [0, 1, 2, 3, 0, 1, 2, 3], metapath=metapath, prob='p',
restart_prob=F.zeros((6,), F.float32, F.cpu()))
check_random_walk(g4, metapath, traces, ntypes, 'p')
traces, ntypes = dgl.sampling.random_walk(
g4, [0, 1, 2, 3, 0, 1, 2, 3], metapath=metapath + ['follow'], prob='p',
restart_prob=F.tensor([0, 0, 0, 0, 0, 0, 1], F.float32))
check_random_walk(g4, metapath, traces[:, :7], ntypes[:7], 'p')
assert (F.asnumpy(traces[:, 7]) == -1).all()
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU pack traces not implemented")
def test_pack_traces():
traces, types = (np.array(
[[ 0, 1, -1, -1, -1, -1, -1],
[ 0, 1, 1, 3, 0, 0, 0]], dtype='int64'),
np.array([0, 0, 1, 0, 0, 1, 0], dtype='int64'))
traces = F.zerocopy_from_numpy(traces)
types = F.zerocopy_from_numpy(types)
result = dgl.sampling.pack_traces(traces, types)
assert F.array_equal(result[0], F.tensor([0, 1, 0, 1, 1, 3, 0, 0, 0], dtype=F.int64))
assert F.array_equal(result[1], F.tensor([0, 0, 0, 0, 1, 0, 0, 1, 0], dtype=F.int64))
assert F.array_equal(result[2], F.tensor([2, 7], dtype=F.int64))
assert F.array_equal(result[3], F.tensor([0, 2], dtype=F.int64))
def test_pinsage_sampling():
def _test_sampler(g, sampler, ntype):
neighbor_g = sampler(F.tensor([0, 2], dtype=F.int64))
assert neighbor_g.ntypes == [ntype]
u, v = neighbor_g.all_edges(form='uv', order='eid')
uv = list(zip(F.asnumpy(u).tolist(), F.asnumpy(v).tolist()))
assert (1, 0) in uv or (0, 0) in uv
assert (2, 2) in uv or (3, 2) in uv
g = dgl.heterograph({
('item', 'bought-by', 'user'): [(0, 0), (0, 1), (1, 0), (1, 1), (2, 2), (2, 3), (3, 2), (3, 3)],
('user', 'bought', 'item'): [(0, 0), (1, 0), (0, 1), (1, 1), (2, 2), (3, 2), (2, 3), (3, 3)]})
sampler = dgl.sampling.PinSAGESampler(g, 'item', 'user', 4, 0.5, 3, 2)
_test_sampler(g, sampler, 'item')
sampler = dgl.sampling.RandomWalkNeighborSampler(g, 4, 0.5, 3, 2, ['bought-by', 'bought'])
_test_sampler(g, sampler, 'item')
sampler = dgl.sampling.RandomWalkNeighborSampler(g, 4, 0.5, 3, 2,
[('item', 'bought-by', 'user'), ('user', 'bought', 'item')])
_test_sampler(g, sampler, 'item')
g = dgl.graph([(0, 0), (0, 1), (1, 0), (1, 1), (2, 2), (2, 3), (3, 2), (3, 3)])
sampler = dgl.sampling.RandomWalkNeighborSampler(g, 4, 0.5, 3, 2)
_test_sampler(g, sampler, g.ntypes[0])
g = dgl.heterograph({
('A', 'AB', 'B'): [(0, 1), (2, 3)],
('B', 'BC', 'C'): [(1, 2), (3, 1)],
('C', 'CA', 'A'): [(2, 0), (1, 2)]})
sampler = dgl.sampling.RandomWalkNeighborSampler(g, 4, 0.5, 3, 2, ['AB', 'BC', 'CA'])
_test_sampler(g, sampler, 'A')
def _gen_neighbor_sampling_test_graph(hypersparse, reverse):
if hypersparse:
# should crash if allocated a CSR
card = 1 << 50
card2 = (1 << 50, 1 << 50)
else:
card = None
card2 = None
if reverse:
g = dgl.graph([(0,1),(0,2),(0,3),(1,0),(1,2),(1,3),(2,0)],
'user', 'follow', num_nodes=card)
g.edata['prob'] = F.tensor([.5, .5, 0., .5, .5, 0., 1.], dtype=F.float32)
g1 = dgl.bipartite([(0,0),(1,0),(2,1),(2,3)], 'game', 'play', 'user', num_nodes=card2)
g1.edata['prob'] = F.tensor([.8, .5, .5, .5], dtype=F.float32)
g2 = dgl.bipartite([(0,2),(1,2),(2,2),(0,1),(3,1),(0,0)], 'user', 'liked-by', 'game', num_nodes=card2)
g2.edata['prob'] = F.tensor([.3, .5, .2, .5, .1, .1], dtype=F.float32)
g3 = dgl.bipartite([(0,0),(0,1),(0,2),(0,3)], 'coin', 'flips', 'user', num_nodes=card2)
hg = dgl.hetero_from_relations([g, g1, g2, g3])
else:
g = dgl.graph([(1,0),(2,0),(3,0),(0,1),(2,1),(3,1),(0,2)],
'user', 'follow', num_nodes=card)
g.edata['prob'] = F.tensor([.5, .5, 0., .5, .5, 0., 1.], dtype=F.float32)
g1 = dgl.bipartite([(0,0),(0,1),(1,2),(3,2)], 'user', 'play', 'game', num_nodes=card2)
g1.edata['prob'] = F.tensor([.8, .5, .5, .5], dtype=F.float32)
g2 = dgl.bipartite([(2,0),(2,1),(2,2),(1,0),(1,3),(0,0)], 'game', 'liked-by', 'user', num_nodes=card2)
g2.edata['prob'] = F.tensor([.3, .5, .2, .5, .1, .1], dtype=F.float32)
g3 = dgl.bipartite([(0,0),(1,0),(2,0),(3,0)], 'user', 'flips', 'coin', num_nodes=card2)
hg = dgl.hetero_from_relations([g, g1, g2, g3])
return g, hg
def _gen_neighbor_topk_test_graph(hypersparse, reverse):
if hypersparse:
# should crash if allocated a CSR
card = 1 << 50
card2 = (1 << 50, 1 << 50)
else:
card = None
card2 = None
if reverse:
g = dgl.graph([(0,1),(0,2),(0,3),(1,0),(1,2),(1,3),(2,0)],
'user', 'follow')
g.edata['weight'] = F.tensor([.5, .3, 0., -5., 22., 0., 1.], dtype=F.float32)
g1 = dgl.bipartite([(0,0),(1,0),(2,1),(2,3)], 'game', 'play', 'user')
g1.edata['weight'] = F.tensor([.8, .5, .4, .5], dtype=F.float32)
g2 = dgl.bipartite([(0,2),(1,2),(2,2),(0,1),(3,1),(0,0)], 'user', 'liked-by', 'game')
g2.edata['weight'] = F.tensor([.3, .5, .2, .5, .1, .1], dtype=F.float32)
g3 = dgl.bipartite([(0,0),(0,1),(0,2),(0,3)], 'coin', 'flips', 'user')
g3.edata['weight'] = F.tensor([10, 2, 13, -1], dtype=F.float32)
hg = dgl.hetero_from_relations([g, g1, g2, g3])
else:
g = dgl.graph([(1,0),(2,0),(3,0),(0,1),(2,1),(3,1),(0,2)],
'user', 'follow')
g.edata['weight'] = F.tensor([.5, .3, 0., -5., 22., 0., 1.], dtype=F.float32)
g1 = dgl.bipartite([(0,0),(0,1),(1,2),(3,2)], 'user', 'play', 'game')
g1.edata['weight'] = F.tensor([.8, .5, .4, .5], dtype=F.float32)
g2 = dgl.bipartite([(2,0),(2,1),(2,2),(1,0),(1,3),(0,0)], 'game', 'liked-by', 'user')
g2.edata['weight'] = F.tensor([.3, .5, .2, .5, .1, .1], dtype=F.float32)
g3 = dgl.bipartite([(0,0),(1,0),(2,0),(3,0)], 'user', 'flips', 'coin')
g3.edata['weight'] = F.tensor([10, 2, 13, -1], dtype=F.float32)
hg = dgl.hetero_from_relations([g, g1, g2, g3])
return g, hg
def _test_sample_neighbors(hypersparse):
g, hg = _gen_neighbor_sampling_test_graph(hypersparse, False)
def _test1(p, replace):
for i in range(10):
subg = dgl.sampling.sample_neighbors(g, [0, 1], 2, prob=p, replace=replace)
assert subg.number_of_nodes() == g.number_of_nodes()
assert subg.number_of_edges() == 4
u, v = subg.edges()
assert set(F.asnumpy(F.unique(v))) == {0, 1}
assert F.array_equal(g.has_edges_between(u, v), F.ones((4,), dtype=F.int64))
assert F.array_equal(g.edge_ids(u, v), subg.edata[dgl.EID])
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
if not replace:
# check no duplication
assert len(edge_set) == 4
if p is not None:
assert not (3, 0) in edge_set
assert not (3, 1) in edge_set
_test1(None, True) # w/ replacement, uniform
_test1(None, False) # w/o replacement, uniform
_test1('prob', True) # w/ replacement
_test1('prob', False) # w/o replacement
def _test2(p, replace): # fanout > #neighbors
for i in range(10):
subg = dgl.sampling.sample_neighbors(g, [0, 2], 2, prob=p, replace=replace)
assert subg.number_of_nodes() == g.number_of_nodes()
num_edges = 4 if replace else 3
assert subg.number_of_edges() == num_edges
u, v = subg.edges()
assert set(F.asnumpy(F.unique(v))) == {0, 2}
assert F.array_equal(g.has_edges_between(u, v), F.ones((num_edges,), dtype=F.int64))
assert F.array_equal(g.edge_ids(u, v), subg.edata[dgl.EID])
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
if not replace:
# check no duplication
assert len(edge_set) == num_edges
if p is not None:
assert not (3, 0) in edge_set
_test2(None, True) # w/ replacement, uniform
_test2(None, False) # w/o replacement, uniform
_test2('prob', True) # w/ replacement
_test2('prob', False) # w/o replacement
def _test3(p, replace):
for i in range(10):
subg = dgl.sampling.sample_neighbors(hg, {'user' : [0,1], 'game' : 0}, 2, prob=p, replace=replace)
assert len(subg.ntypes) == 3
assert len(subg.etypes) == 4
assert subg['follow'].number_of_edges() == 4
assert subg['play'].number_of_edges() == 2 if replace else 1
assert subg['liked-by'].number_of_edges() == 4 if replace else 3
assert subg['flips'].number_of_edges() == 0
_test3(None, True) # w/ replacement, uniform
_test3(None, False) # w/o replacement, uniform
_test3('prob', True) # w/ replacement
_test3('prob', False) # w/o replacement
# test different fanouts for different relations
for i in range(10):
subg = dgl.sampling.sample_neighbors(
hg,
{'user' : [0,1], 'game' : 0},
{'follow': 1, 'play': 2, 'liked-by': 0, 'flips': 2},
replace=True)
assert len(subg.ntypes) == 3
assert len(subg.etypes) == 4
assert subg['follow'].number_of_edges() == 2
assert subg['play'].number_of_edges() == 2
assert subg['liked-by'].number_of_edges() == 0
assert subg['flips'].number_of_edges() == 0
def _test_sample_neighbors_outedge(hypersparse):
g, hg = _gen_neighbor_sampling_test_graph(hypersparse, True)
def _test1(p, replace):
for i in range(10):
subg = dgl.sampling.sample_neighbors(g, [0, 1], 2, prob=p, replace=replace, edge_dir='out')
assert subg.number_of_nodes() == g.number_of_nodes()
assert subg.number_of_edges() == 4
u, v = subg.edges()
assert set(F.asnumpy(F.unique(u))) == {0, 1}
assert F.array_equal(g.has_edges_between(u, v), F.ones((4,), dtype=F.int64))
assert F.array_equal(g.edge_ids(u, v), subg.edata[dgl.EID])
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
if not replace:
# check no duplication
assert len(edge_set) == 4
if p is not None:
assert not (0, 3) in edge_set
assert not (1, 3) in edge_set
_test1(None, True) # w/ replacement, uniform
_test1(None, False) # w/o replacement, uniform
_test1('prob', True) # w/ replacement
_test1('prob', False) # w/o replacement
def _test2(p, replace): # fanout > #neighbors
for i in range(10):
subg = dgl.sampling.sample_neighbors(g, [0, 2], 2, prob=p, replace=replace, edge_dir='out')
assert subg.number_of_nodes() == g.number_of_nodes()
num_edges = 4 if replace else 3
assert subg.number_of_edges() == num_edges
u, v = subg.edges()
assert set(F.asnumpy(F.unique(u))) == {0, 2}
assert F.array_equal(g.has_edges_between(u, v), F.ones((num_edges,), dtype=F.int64))
assert F.array_equal(g.edge_ids(u, v), subg.edata[dgl.EID])
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
if not replace:
# check no duplication
assert len(edge_set) == num_edges
if p is not None:
assert not (0, 3) in edge_set
_test2(None, True) # w/ replacement, uniform
_test2(None, False) # w/o replacement, uniform
_test2('prob', True) # w/ replacement
_test2('prob', False) # w/o replacement
def _test3(p, replace):
for i in range(10):
subg = dgl.sampling.sample_neighbors(hg, {'user' : [0,1], 'game' : 0}, 2, prob=p, replace=replace, edge_dir='out')
assert len(subg.ntypes) == 3
assert len(subg.etypes) == 4
assert subg['follow'].number_of_edges() == 4
assert subg['play'].number_of_edges() == 2 if replace else 1
assert subg['liked-by'].number_of_edges() == 4 if replace else 3
assert subg['flips'].number_of_edges() == 0
_test3(None, True) # w/ replacement, uniform
_test3(None, False) # w/o replacement, uniform
_test3('prob', True) # w/ replacement
_test3('prob', False) # w/o replacement
def _test_sample_neighbors_topk(hypersparse):
g, hg = _gen_neighbor_topk_test_graph(hypersparse, False)
def _test1():
subg = dgl.sampling.select_topk(g, 2, 'weight', [0, 1])
assert subg.number_of_nodes() == g.number_of_nodes()
assert subg.number_of_edges() == 4
u, v = subg.edges()
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
assert F.array_equal(g.edge_ids(u, v), subg.edata[dgl.EID])
assert edge_set == {(2,0),(1,0),(2,1),(3,1)}
_test1()
def _test2(): # k > #neighbors
subg = dgl.sampling.select_topk(g, 2, 'weight', [0, 2])
assert subg.number_of_nodes() == g.number_of_nodes()
assert subg.number_of_edges() == 3
u, v = subg.edges()
assert F.array_equal(g.edge_ids(u, v), subg.edata[dgl.EID])
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
assert edge_set == {(2,0),(1,0),(0,2)}
_test2()
def _test3():
subg = dgl.sampling.select_topk(hg, 2, 'weight', {'user' : [0,1], 'game' : 0})
assert len(subg.ntypes) == 3
assert len(subg.etypes) == 4
u, v = subg['follow'].edges()
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
assert F.array_equal(hg['follow'].edge_ids(u, v), subg['follow'].edata[dgl.EID])
assert edge_set == {(2,0),(1,0),(2,1),(3,1)}
u, v = subg['play'].edges()
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
assert F.array_equal(hg['play'].edge_ids(u, v), subg['play'].edata[dgl.EID])
assert edge_set == {(0,0)}
u, v = subg['liked-by'].edges()
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
assert F.array_equal(hg['liked-by'].edge_ids(u, v), subg['liked-by'].edata[dgl.EID])
assert edge_set == {(2,0),(2,1),(1,0)}
assert subg['flips'].number_of_edges() == 0
_test3()
# test different k for different relations
subg = dgl.sampling.select_topk(
hg, {'follow': 1, 'play': 2, 'liked-by': 0, 'flips': 2}, 'weight', {'user' : [0,1], 'game' : 0})
assert len(subg.ntypes) == 3
assert len(subg.etypes) == 4
assert subg['follow'].number_of_edges() == 2
assert subg['play'].number_of_edges() == 1
assert subg['liked-by'].number_of_edges() == 0
assert subg['flips'].number_of_edges() == 0
def _test_sample_neighbors_topk_outedge(hypersparse):
g, hg = _gen_neighbor_topk_test_graph(hypersparse, True)
def _test1():
subg = dgl.sampling.select_topk(g, 2, 'weight', [0, 1], edge_dir='out')
assert subg.number_of_nodes() == g.number_of_nodes()
assert subg.number_of_edges() == 4
u, v = subg.edges()
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
assert F.array_equal(g.edge_ids(u, v), subg.edata[dgl.EID])
assert edge_set == {(0,2),(0,1),(1,2),(1,3)}
_test1()
def _test2(): # k > #neighbors
subg = dgl.sampling.select_topk(g, 2, 'weight', [0, 2], edge_dir='out')
assert subg.number_of_nodes() == g.number_of_nodes()
assert subg.number_of_edges() == 3
u, v = subg.edges()
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
assert F.array_equal(g.edge_ids(u, v), subg.edata[dgl.EID])
assert edge_set == {(0,2),(0,1),(2,0)}
_test2()
def _test3():
subg = dgl.sampling.select_topk(hg, 2, 'weight', {'user' : [0,1], 'game' : 0}, edge_dir='out')
assert len(subg.ntypes) == 3
assert len(subg.etypes) == 4
u, v = subg['follow'].edges()
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
assert F.array_equal(hg['follow'].edge_ids(u, v), subg['follow'].edata[dgl.EID])
assert edge_set == {(0,2),(0,1),(1,2),(1,3)}
u, v = subg['play'].edges()
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
assert F.array_equal(hg['play'].edge_ids(u, v), subg['play'].edata[dgl.EID])
assert edge_set == {(0,0)}
u, v = subg['liked-by'].edges()
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
assert F.array_equal(hg['liked-by'].edge_ids(u, v), subg['liked-by'].edata[dgl.EID])
assert edge_set == {(0,2),(1,2),(0,1)}
assert subg['flips'].number_of_edges() == 0
_test3()
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU sample neighbors not implemented")
def test_sample_neighbors():
_test_sample_neighbors(False)
_test_sample_neighbors(True)
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU sample neighbors not implemented")
def test_sample_neighbors_outedge():
_test_sample_neighbors_outedge(False)
_test_sample_neighbors_outedge(True)
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU sample neighbors not implemented")
def test_sample_neighbors_topk():
_test_sample_neighbors_topk(False)
_test_sample_neighbors_topk(True)
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU sample neighbors not implemented")
def test_sample_neighbors_topk_outedge():
_test_sample_neighbors_topk_outedge(False)
_test_sample_neighbors_topk_outedge(True)
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU sample neighbors not implemented")
def test_sample_neighbors_with_0deg():
g = dgl.graph([], num_nodes=5)
sg = dgl.sampling.sample_neighbors(g, F.tensor([1, 2], dtype=F.int64), 2, edge_dir='in', replace=False)
assert sg.number_of_edges() == 0
sg = dgl.sampling.sample_neighbors(g, F.tensor([1, 2], dtype=F.int64), 2, edge_dir='in', replace=True)
assert sg.number_of_edges() == 0
sg = dgl.sampling.sample_neighbors(g, F.tensor([1, 2], dtype=F.int64), 2, edge_dir='out', replace=False)
assert sg.number_of_edges() == 0
sg = dgl.sampling.sample_neighbors(g, F.tensor([1, 2], dtype=F.int64), 2, edge_dir='out', replace=True)
assert sg.number_of_edges() == 0
if __name__ == '__main__':
test_random_walk()
test_pack_traces()
test_pinsage_sampling()
test_sample_neighbors()
test_sample_neighbors_outedge()
test_sample_neighbors_topk()
test_sample_neighbors_topk_outedge()
test_sample_neighbors_with_0deg()
| 47.659044 | 126 | 0.567658 |
6042c12cfef6c41795afaee5e6f92c8ab2598325 | 9,435 | py | Python | samples/roof_types/train_roof_model.py | mWollenhaupt/Mask_RCNN | 40366f4fb6e4853467293bfeb657e0d69585024f | [
"MIT"
] | null | null | null | samples/roof_types/train_roof_model.py | mWollenhaupt/Mask_RCNN | 40366f4fb6e4853467293bfeb657e0d69585024f | [
"MIT"
] | null | null | null | samples/roof_types/train_roof_model.py | mWollenhaupt/Mask_RCNN | 40366f4fb6e4853467293bfeb657e0d69585024f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
import sys
import random
import math
import re
import time
import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
import skimage.io
from imgaug import augmenters as iaa
import imgaug as ia
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
from mrcnn.model import log
#get_ipython().run_line_magic('matplotlib', 'inline')
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
def get_ax(rows=1, cols=1, size=8):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Change the default size attribute to control the size
of rendered images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
class RoofTypeConfig(Config):
"""Configuration for training on the toy dataset.
Derives from the base Config class and overrides some values.
"""
NAME = "roof_types"
# Train on 1 GPU and 8 images per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
GPU_COUNT = 1
IMAGES_PER_GPU = 4
BATCH_SIZE = IMAGES_PER_GPU * GPU_COUNT
#RPN_NMS_THRESHOLD = 0.75
LEARNING_RATE = 0.001
DETECTION_MAX_INSTANCES = 400
MAX_GT_INSTANCES = 400
# Number of classes (including background)
NUM_CLASSES = 1 + 7 # background + 16 roof types
# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape.
IMAGE_MIN_DIM = 512
IMAGE_MAX_DIM = 512
IMAGE_CHANNEL_COUNT = 3
# Use smaller anchors because our image and objects are small
RPN_ANCHOR_SCALES = (16, 32, 64, 128, 256) # anchor side in pixels
#RPN_ANCHOR_SCALES = (10, 20, 40, 80, 160) # anchor side in pixels
# Reduce training ROIs per image because the images are small and have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 300
# Use a small epoch since the data is simple
STEPS_PER_EPOCH = 100
# use small validation steps since the epoch is small
VALIDATION_STEPS = 15
config = RoofTypeConfig()
config.display()
class DatasetLoader():
def __init__(self):
self.roofs = []
self.dataset_dir = None
def load_dataset(self, dataset_dir):
self.dataset_dir = dataset_dir
with open(os.path.join(dataset_dir, 'map.txt'), 'r') as file:
lines = file.readlines()
for line in lines:
split = line.split()
self.roofs.append(split)
def split_train_val_data(self, _train=.8, _test=.1, _val=.1, SEED=101010):
if not self.roofs:
print('Load Dataset before try to split data!')
return
files = self.roofs
count = len(files)
train_files = self.split_indices(files, _train, SEED)
validation_files = self.split_indices(files, _val/(len(files)/count), SEED)
test_files = files
dataset_train = RoofTypeDataset()
dataset_train.load_roof_data(train_files, self.dataset_dir)
dataset_train.prepare()
dataset_val = RoofTypeDataset()
dataset_val.load_roof_data(validation_files, self.dataset_dir)
dataset_val.prepare()
dataset_test = RoofTypeDataset()
dataset_test.load_roof_data(test_files, self.dataset_dir)
dataset_test.prepare()
return (dataset_train, dataset_val, dataset_test)
def split_indices(self, files, split, SEED=101010):
random.seed(SEED)
indices = random.sample(range(0, len(files)), int(len(files)*split))
indices.sort(reverse=True)
result = []
for idx in indices:
result.append(files.pop(idx))
return result
class RoofTypeDataset(utils.Dataset):
def __init__(self):
super().__init__()
self.roofs = []
self.types = {
'1000':1,
"2100":2,
"3100":3,
"3200":4,
"3300":5,
"3400":6,
"3500":7
}
def load_roof_data(self, data_list, dataset_dir):
"""Load a subset of the RoofType dataset.
dataset_dir: Root directory of the dataset.
subset: Subset to load: train or val
"""
# Add classes
self.add_class("roof_types", 1, "1000")
self.add_class("roof_types", 2, "2100")
self.add_class("roof_types", 3, "3100")
self.add_class("roof_types", 4, "3200")
self.add_class("roof_types", 5, "3300")
self.add_class("roof_types", 6, "3400")
self.add_class("roof_types", 7, "3500")
self.dataset_dir = dataset_dir
for entry in data_list:
self.add_image(
"roof_types",
image_id=len(self.roofs),
path=os.path.join(dataset_dir, entry[0])
)
self.roofs.append(entry)
def load_mask(self, image_id):
"""Load instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
image_info = self.image_info[image_id]
if image_info["source"] != "roof_types":
return super(self.__class__, self).load_mask(image_id)
img = skimage.io.imread(image_info["path"])
mask_paths = self.roofs[image_id][1:]
masks = []
lbls = np.empty(0).astype(np.int)
for cnt, mask in enumerate(mask_paths):
path = os.path.join(self.dataset_dir, mask)
arr = skimage.io.imread(path).astype(np.bool)
masks.append(arr)
lbl = self.types[mask.split('\\')[1]]
lbls = np.append(lbls, lbl)
result = np.dstack(np.asarray(masks))
return result, lbls
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info["source"] == "roof_types":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
dataset_dir = r'C:\Users\MoritzWollenhaupt\Desktop\ArcGIS_Rooftype_Detection\data\bochum\tif\train\512\mrcnn\single_instances_augmented_sobel_min_max_uint16'
loader = DatasetLoader()
loader.load_dataset(dataset_dir)
dataset_train, dataset_val, dataset_test = loader.split_train_val_data()
# Create model in training mode
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=MODEL_DIR)
# Which weights to start with?
#init_with = "coco" # imagenet, coco, or last
init_with = "last"
#init_with = "imagenet"
if init_with == "imagenet":
model.load_weights(model.get_imagenet_weights(), by_name=True)
elif init_with == "coco":
# Load weights trained on MS COCO, but skip layers that
# are different due to the different number of classes
# See README for instructions to download the COCO weights
model.load_weights(COCO_MODEL_PATH, by_name=True,
exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
elif init_with == "last":
# Load the last model you trained and continue training
model.load_weights(model.find_last(), by_name=True)
# ### Augmentation
sometimes = lambda aug: iaa.Sometimes(0.5, aug)
seqAug = iaa.Sequential(
[
# apply the following augmenters to most images
iaa.Fliplr(0.5), # horizontally flip 50% of all images
iaa.Flipud(0.2), # vertically flip 50% of all images
iaa.LinearContrast((0.75, 1.5)),
# crop images by -10% to 10% of their height/width
sometimes(iaa.CropAndPad(
percent=(-0.1, 0.1),
#pad_mode=ia.ALL,
pad_cval=0
)),
sometimes(iaa.Affine(
# scale images to 80-120% of their size, individually per axis
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
# # translate by -20 to +20 percent (per axis)
translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
rotate=(-175, 175), # rotate by -175 to +175 degrees
shear=(-16, 16), # shear by -16 to +16 degrees
order=[0, 1], # use nearest neighbor or bilinear interpolation (fast)
cval=0, # if mode is constant, use a cval = 0
#mode=ia.ALL # use any of scikit-image's warping modes
))
],
random_order=True
)
# ## Training
epochs = 400
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE/100,
epochs=epochs,
layers='all',
#augmentation=seqAug
) | 33.221831 | 157 | 0.629889 |
766190045266006ca75ef8f58ba1f3bb115e5859 | 28,906 | py | Python | zvmsdk/tests/unit/test_api.py | wngzhe/feilong | 43aeb9c002214e2b150cb1173cf4a2bae239aaa7 | [
"Apache-2.0"
] | null | null | null | zvmsdk/tests/unit/test_api.py | wngzhe/feilong | 43aeb9c002214e2b150cb1173cf4a2bae239aaa7 | [
"Apache-2.0"
] | null | null | null | zvmsdk/tests/unit/test_api.py | wngzhe/feilong | 43aeb9c002214e2b150cb1173cf4a2bae239aaa7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017,2021 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from zvmsdk import api
from zvmsdk import exception
from zvmsdk.tests.unit import base
from zvmsdk import vmops
class SDKAPITestCase(base.SDKTestCase):
"""Testcases for compute APIs."""
@classmethod
def setUpClass(cls):
super(SDKAPITestCase, cls).setUpClass()
cls.userid = 'TESTUID'
cls.userid_list = ["USERID1", "USERID2"]
def setUp(self):
super(SDKAPITestCase, self).setUp()
vmops.VMOps.check_guests_exist_in_db = mock.MagicMock()
self.api = api.SDKAPI()
def test_init_ComputeAPI(self):
self.assertTrue(isinstance(self.api, api.SDKAPI))
@mock.patch("zvmsdk.vmops.VMOps.get_power_state")
def test_guest_get_power_state_real(self, gstate):
self.api.guest_get_power_state_real(self.userid)
gstate.assert_called_once_with(self.userid)
@mock.patch("zvmsdk.utils.check_userid_exist")
@mock.patch("zvmsdk.vmops.VMOps.get_power_state")
def test_guest_get_power_state(self, gstate, chk_uid):
chk_uid.return_value = True
self.api.guest_get_power_state(self.userid)
chk_uid.assert_called_once_with(self.userid)
gstate.assert_called_once_with(self.userid)
chk_uid.reset_mock()
gstate.reset_mock()
chk_uid.return_value = False
self.assertRaises(exception.SDKObjectNotExistError,
self.api.guest_get_power_state, self.userid)
chk_uid.assert_called_once_with(self.userid)
gstate.assert_not_called()
@mock.patch("zvmsdk.vmops.VMOps.get_info")
def test_guest_get_info(self, ginfo):
self.api.guest_get_info(self.userid)
ginfo.assert_called_once_with(self.userid)
@mock.patch("zvmsdk.vmops.VMOps.get_definition_info")
def test_guest_get_user_direct_(self, ginfo):
ginfo.return_value = {'user_direct':
['CPU 00 BASE',
'USER USERID1 PASSWORD 4096m ']}
expected_value = {'user_direct':
['CPU 00 BASE',
'USER USERID1 ****** 4096m ']}
result = self.api.guest_get_user_direct(self.userid)
ginfo.assert_called_once_with(self.userid)
self.assertEqual(result, expected_value)
@mock.patch("zvmsdk.vmops.VMOps.get_adapters_info")
def test_guest_get_adapters_info(self, adapters_info):
self.api.guest_get_adapters_info(self.userid)
adapters_info.assert_called_once_with(self.userid)
@mock.patch("zvmsdk.vmops.VMOps.guest_deploy")
def test_guest_deploy(self, guest_deploy):
user_id = 'fakevm'
image_name = 'fakeimg'
transportfiles = '/tmp/transport.tgz'
vdev = '0100'
self.api.guest_deploy(user_id, image_name,
transportfiles=transportfiles,
vdev=vdev)
guest_deploy.assert_called_with(user_id.upper(), image_name,
transportfiles, None, vdev,
None, False)
@mock.patch("zvmsdk.imageops.ImageOps.image_import")
def test_image_import(self, image_import):
image_name = '95a4da37-9f9b-4fb2-841f-f0bb441b7544'
url = "file:///install/temp/test.img"
image_meta = {'os_version': "rhel6.7"}
self.api.image_import(image_name, url, image_meta)
image_import.assert_called_once_with(image_name, url,
image_meta,
remote_host=None)
@mock.patch("zvmsdk.imageops.ImageOps.image_export")
def test_image_export(self, image_export):
image_name = '95a4da37-9f9b-4fb2-841f-f0bb441b7544'
dest_url = "file:///install/temp/test.img"
self.api.image_export(image_name, dest_url)
image_export.assert_called_once_with(image_name, dest_url,
None)
@mock.patch("zvmsdk.vmops.VMOps.create_vm")
def test_guest_create(self, create_vm):
vcpus = 1
memory = 1024
disk_list = []
user_profile = 'profile'
max_cpu = 10
max_mem = '4G'
self.api.guest_create(self.userid, vcpus, memory, disk_list,
user_profile, max_cpu, max_mem)
create_vm.assert_called_once_with(self.userid, vcpus, memory,
disk_list, user_profile, max_cpu, max_mem,
'', '', '', [], {}, '', None)
@mock.patch("zvmsdk.vmops.VMOps.create_vm")
def test_guest_create_with_account(self, create_vm):
vcpus = 1
memory = 1024
disk_list = []
user_profile = 'profile'
max_cpu = 10
max_mem = '4G'
account = "dummy account"
self.api.guest_create(self.userid, vcpus, memory, disk_list,
user_profile, max_cpu, max_mem,
account=account)
create_vm.assert_called_once_with(self.userid, vcpus, memory,
disk_list, user_profile, max_cpu, max_mem,
'', '', '', [], {}, account, None)
@mock.patch("zvmsdk.vmops.VMOps.create_vm")
def test_guest_create_with_comment(self, create_vm):
vcpus = 1
memory = 1024
disk_list = []
user_profile = 'profile'
max_cpu = 10
max_mem = '4G'
comment_list = ["dummy account", "this is a test"]
self.api.guest_create(self.userid, vcpus, memory, disk_list,
user_profile, max_cpu, max_mem,
comment_list=comment_list)
create_vm.assert_called_once_with(self.userid, vcpus, memory,
disk_list, user_profile, max_cpu, max_mem,
'', '', '', [], {}, '', comment_list)
@mock.patch("zvmsdk.vmops.VMOps.create_vm")
def test_guest_create_with_default_profile(self, create_vm):
vcpus = 1
memory = 1024
disk_list = []
user_profile = ''
max_cpu = 10
max_mem = '4G'
base.set_conf('zvm', 'user_profile', 'abc')
self.api.guest_create(self.userid, vcpus, memory, disk_list,
user_profile, max_cpu, max_mem)
create_vm.assert_called_once_with(self.userid, vcpus, memory,
disk_list, 'abc', max_cpu, max_mem,
'', '', '', [], {}, '', None)
@mock.patch("zvmsdk.vmops.VMOps.create_vm")
def test_guest_create_with_no_disk_pool(self, create_vm):
disk_list = [{'size': '1g', 'is_boot_disk': True,
'disk_pool': 'ECKD: eckdpool1'},
{'size': '1g', 'format': 'ext3'},
{'size': '1g', 'format': 'swap'}]
vcpus = 1
memory = 1024
user_profile = 'profile'
max_cpu = 10
max_mem = '4G'
base.set_conf('zvm', 'disk_pool', None)
self.assertRaises(exception.SDKInvalidInputFormat,
self.api.guest_create, self.userid, vcpus,
memory, disk_list, user_profile,
max_cpu, max_mem)
create_vm.assert_not_called()
@mock.patch("zvmsdk.vmops.VMOps.create_vm")
def test_guest_create_with_no_disk_pool_swap_only(self, create_vm):
disk_list = [{'size': '1g', 'format': 'swap'}]
vcpus = 1
memory = 1024
user_profile = 'profile'
base.set_conf('zvm', 'disk_pool', None)
base.set_conf('zvm', 'swap_force_mdisk', False)
self.api.guest_create(self.userid, vcpus, memory, disk_list,
user_profile)
create_vm.assert_called_once_with(self.userid, vcpus, memory,
disk_list, user_profile, 32, '64G',
'', '', '', [], {}, '', None)
@mock.patch("zvmsdk.vmops.VMOps.create_vm")
def test_guest_create_no_disk_pool_force_mdisk(self, create_vm):
disk_list = [{'size': '1g', 'is_boot_disk': True,
'disk_pool': 'ECKD: eckdpool1'},
{'size': '1g', 'format': 'ext3'},
{'size': '1g', 'format': 'swap'}]
vcpus = 1
memory = 1024
user_profile = 'profile'
max_cpu = 10
max_mem = '4G'
# should be no side effect at all
base.set_conf('zvm', 'swap_force_mdisk', True)
base.set_conf('zvm', 'disk_pool', None)
self.assertRaises(exception.SDKInvalidInputFormat,
self.api.guest_create, self.userid, vcpus,
memory, disk_list, user_profile,
max_cpu, max_mem)
create_vm.assert_not_called()
@mock.patch("zvmsdk.vmops.VMOps.create_vm")
def test_guest_create_no_disk_pool_swap_only_force_mdisk(self, create_vm):
disk_list = [{'size': '1g', 'format': 'swap'}]
vcpus = 1
memory = 1024
user_profile = 'profile'
base.set_conf('zvm', 'disk_pool', None)
base.set_conf('zvm', 'swap_force_mdisk', True)
self.assertRaises(exception.SDKInvalidInputFormat,
self.api.guest_create, self.userid, vcpus,
memory, disk_list, user_profile)
@mock.patch("zvmsdk.vmops.VMOps.create_vm")
def test_guest_create_with_default_max_cpu_memory(self, create_vm):
vcpus = 1
memory = 1024
disk_list = []
user_profile = 'profile'
self.api.guest_create(self.userid, vcpus, memory, disk_list,
user_profile)
create_vm.assert_called_once_with(self.userid, vcpus, memory,
disk_list, user_profile, 32, '64G',
'', '', '', [], {}, '', None)
@mock.patch("zvmsdk.imageops.ImageOps.image_query")
def test_image_query(self, image_query):
imagekeyword = 'eae09a9f_7958_4024_a58c_83d3b2fc0aab'
self.api.image_query(imagekeyword)
image_query.assert_called_once_with(imagekeyword)
@mock.patch("zvmsdk.vmops.VMOps.delete_vm")
@mock.patch("zvmsdk.vmops.VMOps.check_guests_exist_in_db")
def test_guest_delete(self, cge, delete_vm):
cge.return_value = True
self.api.guest_delete(self.userid)
cge.assert_called_once_with(self.userid, raise_exc=False)
delete_vm.assert_called_once_with(self.userid)
@mock.patch("zvmsdk.vmops.VMOps.delete_vm")
@mock.patch("zvmsdk.vmops.VMOps.check_guests_exist_in_db")
def test_guest_delete_userid_in_lower_case(self, cge, delete_vm):
cge.return_value = True
self.api.guest_delete('testuid')
cge.assert_called_once_with(self.userid, raise_exc=False)
delete_vm.assert_called_once_with(self.userid)
@mock.patch("zvmsdk.utils.check_userid_exist")
@mock.patch("zvmsdk.vmops.VMOps.check_guests_exist_in_db")
def test_guest_delete_not_exist(self, cge, cue):
cge.return_value = False
cue.return_value = False
self.api.guest_delete(self.userid)
cge.assert_called_once_with(self.userid, raise_exc=False)
cue.assert_called_once_with(self.userid)
@mock.patch("zvmsdk.utils.check_userid_exist")
@mock.patch("zvmsdk.vmops.VMOps.check_guests_exist_in_db")
def test_guest_delete_not_exist_in_db(self, cge, cue):
cge.return_value = False
cue.return_value = True
self.assertRaises(exception.SDKObjectNotExistError,
self.api.guest_delete, self.userid)
cge.assert_called_once_with(self.userid, raise_exc=False)
cue.assert_called_once_with(self.userid)
@mock.patch("zvmsdk.monitor.ZVMMonitor.inspect_stats")
def test_guest_inspect_cpus_list(self, inspect_stats):
self.api.guest_inspect_stats(self.userid_list)
inspect_stats.assert_called_once_with(self.userid_list)
@mock.patch("zvmsdk.monitor.ZVMMonitor.inspect_stats")
def test_guest_inspect_cpus_single(self, inspect_stats):
self.api.guest_inspect_stats(self.userid)
inspect_stats.assert_called_once_with([self.userid])
@mock.patch("zvmsdk.monitor.ZVMMonitor.inspect_vnics")
def test_guest_inspect_vnics_list(self, inspect_vnics):
self.api.guest_inspect_vnics(self.userid_list)
inspect_vnics.assert_called_once_with(self.userid_list)
@mock.patch("zvmsdk.monitor.ZVMMonitor.inspect_vnics")
def test_guest_inspect_vnics_single(self, inspect_vnics):
self.api.guest_inspect_vnics(self.userid)
inspect_vnics.assert_called_once_with([self.userid])
@mock.patch("zvmsdk.vmops.VMOps.guest_stop")
def test_guest_stop(self, gs):
self.api.guest_stop(self.userid)
gs.assert_called_once_with(self.userid)
@mock.patch("zvmsdk.vmops.VMOps.guest_stop")
def test_guest_stop_with_timeout(self, gs):
self.api.guest_stop(self.userid, timeout=300)
gs.assert_called_once_with(self.userid, timeout=300)
@mock.patch("zvmsdk.vmops.VMOps.guest_softstop")
def test_guest_softstop(self, gss):
self.api.guest_softstop(self.userid, timeout=300)
gss.assert_called_once_with(self.userid, timeout=300)
@mock.patch("zvmsdk.vmops.VMOps.guest_pause")
def test_guest_pause(self, gp):
self.api.guest_pause(self.userid)
gp.assert_called_once_with(self.userid)
@mock.patch("zvmsdk.vmops.VMOps.guest_unpause")
def test_guest_unpause(self, gup):
self.api.guest_unpause(self.userid)
gup.assert_called_once_with(self.userid)
@mock.patch("zvmsdk.vmops.VMOps.guest_config_minidisks")
def test_guest_process_additional_disks(self, config_disks):
disk_list = [{'vdev': '0101',
'format': 'ext3',
'mntdir': '/mnt/0101'}]
self.api.guest_config_minidisks(self.userid, disk_list)
config_disks.assert_called_once_with(self.userid, disk_list)
@mock.patch("zvmsdk.imageops.ImageOps.image_delete")
def test_image_delete(self, image_delete):
image_name = 'eae09a9f_7958_4024_a58c_83d3b2fc0aab'
self.api.image_delete(image_name)
image_delete.assert_called_once_with(image_name)
def test_set_vswitch(self):
self.assertRaises(exception.SDKInvalidInputFormat,
self.api.vswitch_set,
"vswitch_name", unknown='fake_id')
@mock.patch("zvmsdk.vmops.VMOps.create_disks")
def test_guest_add_disks(self, cds):
disk_list = [{'size': '1g'}]
self.api.guest_create_disks(self.userid, disk_list)
cds.assert_called_once_with(self.userid, disk_list)
@mock.patch("zvmsdk.vmops.VMOps.create_disks")
def test_guest_add_disks_no_disk_pool(self, cds):
disk_list = [{'size': '1g', 'is_boot_disk': True,
'disk_pool': 'ECKD: eckdpool1'},
{'size': '1g', 'format': 'ext3'}]
base.set_conf('zvm', 'disk_pool', None)
self.assertRaises(exception.SDKInvalidInputFormat,
self.api.guest_create_disks, self.userid, disk_list)
cds.ssert_not_called()
@mock.patch("zvmsdk.vmops.VMOps.create_disks")
def test_guest_add_disks_nothing_to_do(self, cds):
self.api.guest_create_disks('userid', [])
cds.assert_not_called()
@mock.patch("zvmsdk.vmops.VMOps.delete_disks")
def test_guest_delete_disks(self, dds):
vdev_list = ['0102', '0103']
self.api.guest_delete_disks(self.userid, vdev_list)
dds.assert_called_once_with(self.userid, vdev_list)
@mock.patch("zvmsdk.vmops.VMOps.live_resize_cpus")
def test_guest_live_resize_cpus(self, live_resize_cpus):
cpu_cnt = 3
self.api.guest_live_resize_cpus(self.userid, cpu_cnt)
live_resize_cpus.assert_called_once_with(self.userid, cpu_cnt)
@mock.patch("zvmsdk.vmops.VMOps.resize_cpus")
def test_guest_resize_cpus(self, resize_cpus):
cpu_cnt = 3
self.api.guest_resize_cpus(self.userid, cpu_cnt)
resize_cpus.assert_called_once_with(self.userid, cpu_cnt)
@mock.patch("zvmsdk.vmops.VMOps.live_resize_memory")
def test_guest_live_resize_mem(self, live_resize_memory):
size = "1024m"
self.api.guest_live_resize_mem(self.userid, size)
live_resize_memory.assert_called_once_with(self.userid, size)
@mock.patch("zvmsdk.vmops.VMOps.resize_memory")
def test_guest_resize_mem(self, resize_memory):
size = "2g"
self.api.guest_resize_mem(self.userid, size)
resize_memory.assert_called_once_with(self.userid, size)
@mock.patch("zvmsdk.vmops.VMOps.guest_grow_root_volume")
def test_guest_grow_root_volume(self, grow_root_volume):
os_version = "RHEL7.8"
self.api.guest_grow_root_volume(self.userid, os_version)
grow_root_volume.assert_called_once_with(self.userid, os_version)
@mock.patch("zvmsdk.networkops.NetworkOPS.grant_user_to_vswitch")
def test_vswitch_grant_user(self, guv):
self.api.vswitch_grant_user("testvsw", self.userid)
guv.assert_called_once_with("testvsw", self.userid)
@mock.patch("zvmsdk.volumeop.VolumeOperatorAPI.attach_volume_to_instance")
def test_volume_attach(self, mock_attach):
connection_info = {'platform': 'x86_64',
'ip': '1.2.3.4',
'os_version': 'rhel7',
'multipath': False,
'target_wwpn': '1111',
'target_lun': '2222',
'zvm_fcp': 'b83c',
'assigner_id': 'user1'}
self.api.volume_attach(connection_info)
mock_attach.assert_called_once_with(connection_info)
@mock.patch("zvmsdk.volumeop.VolumeOperatorAPI.volume_refresh_bootmap")
def test_refresh_bootmap(self, mock_attach):
fcpchannel = ['5d71']
wwpn = ['5005076802100c1b', '5005076802200c1b']
lun = '01000000000000'
self.api.volume_refresh_bootmap(fcpchannel, wwpn, lun)
mock_attach.assert_called_once_with(fcpchannel, wwpn, lun,
transportfiles=None, guest_networks=None)
@mock.patch("zvmsdk.volumeop.VolumeOperatorAPI."
"detach_volume_from_instance")
def test_volume_detach(self, mock_detach):
connection_info = {'platform': 'x86_64',
'ip': '1.2.3.4',
'os_version': 'rhel7',
'multipath': False,
'target_wwpn': '1111',
'target_lun': '2222',
'zvm_fcp': 'b83c',
'assigner_id': 'user1'}
self.api.volume_detach(connection_info)
mock_detach.assert_called_once_with(connection_info)
@mock.patch("zvmsdk.utils.check_userid_exist")
@mock.patch("zvmsdk.smtclient.SMTClient.get_adapters_info")
@mock.patch("zvmsdk.database.GuestDbOperator.add_guest_registered")
@mock.patch("zvmsdk.database.NetworkDbOperator.switch_add_record")
def test_guest_register(self, networkdb_add, guestdb_reg,
get_adapters_info, chk_usr):
networkdb_add.return_value = ''
guestdb_reg.return_value = ''
adapters = [{'adapter_address': '1000',
'adapter_status': '02',
'lan_owner': 'SYSTEM',
'lan_name': 'VSC11590',
'mac_address': '02:55:36:EF:50:91',
'mac_ip_version': '4',
'mac_ip_address': '1.2.3.4'}]
get_adapters_info.return_value = adapters
chk_usr.return_value = True
meta_data = 'rhel7'
net_set = '1'
port_macs = {'EF5091': '6e2ecc4f-14a2-4f33-9f12-5ac4a42f97e7',
'69FCF1': '389dee5e-7b03-405c-b1e8-7c9c235d1425'
}
self.api.guest_register(self.userid, meta_data, net_set, port_macs)
networkdb_add.assert_called_once_with(self.userid, '1000',
'6e2ecc4f-14a2-4f33-9f12'
'-5ac4a42f97e7',
'VSC11590')
guestdb_reg.assert_called_once_with(self.userid, 'rhel7', '1')
get_adapters_info.assert_called_once_with(self.userid)
chk_usr.assert_called_once_with(self.userid)
@mock.patch("zvmsdk.utils.check_userid_exist")
@mock.patch("zvmsdk.smtclient.SMTClient.get_adapters_info")
@mock.patch("zvmsdk.database.GuestDbOperator.add_guest_registered")
@mock.patch("zvmsdk.database.NetworkDbOperator.switch_add_record")
def test_guest_register_invalid_portmacs(self, networkdb_add, guestdb_reg,
get_adapters_info, chk_usr):
networkdb_add.return_value = ''
guestdb_reg.return_value = ''
adapters = [{'adapter_address': '1000',
'adapter_status': '02',
'lan_owner': 'SYSTEM',
'lan_name': 'VSC11590',
'mac_address': '02:55:36:EF:50:91',
'mac_ip_version': '4',
'mac_ip_address': '1.2.3.4'}]
get_adapters_info.return_value = adapters
chk_usr.return_value = True
meta_data = 'rhel7'
net_set = '1'
port_macs = '6e2ecc4f-14a2-4f33-9f12-5ac4a42f97e7'
self.assertRaises(exception.SDKInvalidInputFormat,
self.api.guest_register,
self.userid, meta_data, net_set, port_macs)
@mock.patch("zvmsdk.utils.check_userid_exist")
@mock.patch("zvmsdk.smtclient.SMTClient.get_adapters_info")
@mock.patch("zvmsdk.database.GuestDbOperator.add_guest_registered")
@mock.patch("zvmsdk.database.NetworkDbOperator.switch_add_record")
def test_guest_register_no_port_macs(self, networkdb_add, guestdb_reg,
get_adapters_info, chk_usr):
networkdb_add.return_value = ''
guestdb_reg.return_value = ''
adapters = [{'adapter_address': '1000',
'adapter_status': '02',
'lan_owner': 'SYSTEM',
'lan_name': 'VSC11590',
'mac_address': '02:55:36:EF:50:91',
'mac_ip_version': '4',
'mac_ip_address': '1.2.3.4'}]
get_adapters_info.return_value = adapters
chk_usr.return_value = True
meta_data = 'rhel7'
net_set = '1'
self.api.guest_register(self.userid, meta_data, net_set)
networkdb_add.assert_called_once_with(self.userid, '1000',
None,
'VSC11590')
guestdb_reg.assert_called_once_with(self.userid, 'rhel7', '1')
get_adapters_info.assert_called_once_with(self.userid)
chk_usr.assert_called_once_with(self.userid)
@mock.patch("zvmsdk.utils.check_userid_exist")
@mock.patch("zvmsdk.smtclient.SMTClient.get_adapters_info")
@mock.patch("zvmsdk.database.GuestDbOperator.add_guest_registered")
@mock.patch("zvmsdk.database.NetworkDbOperator.switch_add_record")
@mock.patch("zvmsdk.database.GuestDbOperator.update_guest_by_userid")
@mock.patch("zvmsdk.database.GuestDbOperator.get_comments_by_userid")
@mock.patch("zvmsdk.database.GuestDbOperator.get_migrated_guest_list")
@mock.patch("zvmsdk.database.GuestDbOperator.get_guest_by_userid")
def test_guest_register_guest_in_db(self, get_guest, get_mig_guest,
get_comments, update_guest, networkdb_add,
guestdb_reg, get_adapters_info, chk_usr):
get_guest.return_value = 'fake_guest'
get_mig_guest.return_value = self.userid + ' other info'
get_comments.return_value = {'migrated': 1}
update_guest.return_value = ''
# Below mocks shall not be called
networkdb_add.return_value = ''
guestdb_reg.return_value = ''
get_adapters_info.return_value = []
chk_usr.return_value = True
meta_data = 'rhel7'
net_set = '1'
self.api.guest_register(self.userid, meta_data, net_set)
get_guest.assert_called_once_with(self.userid)
get_mig_guest.assert_called_once_with()
get_comments.assert_called_once_with(self.userid)
update_guest.assert_called_once_with(self.userid,
comments={'migrated': 0})
chk_usr.assert_called_once_with(self.userid)
networkdb_add.assert_not_called()
guestdb_reg.assert_not_called()
get_adapters_info.assert_not_called()
@mock.patch("zvmsdk.vmops.VMOps.check_guests_exist_in_db")
@mock.patch("zvmsdk.database.NetworkDbOperator."
"switch_delete_record_for_userid")
@mock.patch("zvmsdk.database.GuestDbOperator.delete_guest_by_userid")
def test_guest_deregister(self, guestdb_del, networkdb_del, chk_db):
guestdb_del.return_value = ''
networkdb_del.return_value = ''
chk_db.return_value = True
self.api.guest_deregister(self.userid)
guestdb_del.assert_called_once_with(self.userid)
networkdb_del.assert_called_once_with(self.userid)
chk_db.assert_called_once_with(self.userid, raise_exc=False)
@mock.patch("zvmsdk.vmops.VMOps.check_guests_exist_in_db")
@mock.patch("zvmsdk.database.NetworkDbOperator."
"switch_delete_record_for_userid")
@mock.patch("zvmsdk.database.GuestDbOperator.delete_guest_by_userid")
def test_guest_deregister_not_exists(self, guestdb_del,
networkdb_del, chk_db):
guestdb_del.return_value = ''
networkdb_del.return_value = ''
chk_db.return_value = False
self.api.guest_deregister(self.userid)
guestdb_del.assert_called_once_with(self.userid)
networkdb_del.assert_called_once_with(self.userid)
chk_db.assert_called_once_with(self.userid, raise_exc=False)
@mock.patch("zvmsdk.hostops.HOSTOps.guest_list")
def test_host_get_guest_list(self, guest_list):
self.api.host_get_guest_list()
guest_list.assert_called_once_with()
@mock.patch("zvmsdk.hostops.HOSTOps.diskpool_get_volumes")
def test_host_get_diskpool_volumes(self, diskpool_vols):
base.set_conf('zvm', 'disk_pool', None)
disk_pool = 'ECKD:IAS1PL'
result = self.api.host_get_diskpool_volumes(disk_pool)
diskpool_vols.assert_called_once_with('IAS1PL')
# Test disk_pool is None
disk_pool = None
try:
self.api.host_get_diskpool_volumes(disk_pool)
except Exception as exc:
errmsg = ("Invalid disk_pool input None, disk_pool should be"
" configured for sdkserver.")
result = errmsg in six.text_type(exc)
self.assertEqual(result, True)
pass
@mock.patch("zvmsdk.hostops.HOSTOps.get_volume_info")
def test_host_get_volume_info(self, volume_info):
volume = 'VOLUM1'
result = self.api.host_get_volume_info(volume)
volume_info.assert_called_once_with(volume)
# Test volume is None
volume = None
try:
self.api.host_get_volume_info(volume)
except Exception as exc:
errmsg = ("Invalid volume input None, volume"
" must be specified.")
result = errmsg in six.text_type(exc)
self.assertEqual(result, True)
pass
@mock.patch("zvmsdk.hostops.HOSTOps.diskpool_get_info")
def test_host_diskpool_get_info(self, dp_info):
base.set_conf('zvm', 'disk_pool', None)
results = self.api.host_diskpool_get_info()
self.assertEqual(results['disk_total'], 0)
self.assertEqual(results['disk_available'], 0)
self.assertEqual(results['disk_used'], 0)
dp_info.ssert_not_called()
| 44.064024 | 78 | 0.627551 |
08a44a4d5423bc24d10d60c34524642ea8970d9d | 1,120 | py | Python | persimmon/view/blocks/csvoutblock.py | AlvarBer/Persimmon | da08ed854dd0305d7e4684e97ee828acffd76b4d | [
"MIT"
] | 206 | 2016-11-02T20:45:48.000Z | 2022-02-07T05:43:18.000Z | persimmon/view/blocks/csvoutblock.py | mgbin088/Persimmon | da08ed854dd0305d7e4684e97ee828acffd76b4d | [
"MIT"
] | 6 | 2016-11-06T19:16:01.000Z | 2018-02-20T11:22:45.000Z | persimmon/view/blocks/csvoutblock.py | mgbin088/Persimmon | da08ed854dd0305d7e4684e97ee828acffd76b4d | [
"MIT"
] | 40 | 2017-03-08T21:01:53.000Z | 2020-12-29T16:43:56.000Z | from persimmon.view.pins import InputPin
from persimmon.view.util import FileDialog
from persimmon.view.blocks.block import Block
from kivy.properties import ObjectProperty, StringProperty
from kivy.lang import Builder
import numpy as np
import pandas as pd
Builder.load_file('persimmon/view/blocks/csvoutblock.kv')
class CSVOutBlock(Block):
in_1 = ObjectProperty()
path = StringProperty()
file_dialog = ObjectProperty()
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.file_dialog = FileDialog(dir='~', filters=['*.csv'],
size_hint=(0.8, 0.8))
# Let's bind two together
self.file_dialog.bind(file_chosen=self.setter('path'))
self.tainted = True
self.tainted_msg = 'File not chosen in block {}!'.format(self.title)
def function(self):
if type(self.in_1.val) == np.ndarray:
self.in_1.val = pd.DataFrame(self.in_1.val)
self.in_1.val.to_csv(path_or_buf=self.path, index=False)
def on_path(self, instance, value):
self.tainted = not value.endswith('.csv')
| 31.111111 | 76 | 0.665179 |
37100c3d52b29cc3756f66df886a7f5479039646 | 10,106 | py | Python | spacy_transformers/wrapper.py | maxtrem/spacy-transformers | 7458fc3466af0800617c3c106a4ff86fc0285f4d | [
"MIT"
] | null | null | null | spacy_transformers/wrapper.py | maxtrem/spacy-transformers | 7458fc3466af0800617c3c106a4ff86fc0285f4d | [
"MIT"
] | 1 | 2020-07-11T14:08:04.000Z | 2020-07-11T14:08:04.000Z | spacy_transformers/wrapper.py | maxtrem/spacy-transformers | 7458fc3466af0800617c3c106a4ff86fc0285f4d | [
"MIT"
] | 2 | 2020-06-04T18:38:34.000Z | 2022-02-19T19:23:19.000Z | from thinc.extra.wrappers import PyTorchWrapper, xp2torch, torch2xp
from transformers.optimization import AdamW
import transformers
import torch.autograd
import torch.nn.utils.clip_grad
import torch
from typing import Tuple, Callable, Any
from thinc.neural.optimizers import Optimizer
import numpy
import contextlib
from thinc.compat import BytesIO
from .util import get_model, Dropout
from .activations import RaggedArray, Activations
FINE_TUNE = True
CONFIG = {"output_hidden_states": True, "output_attentions": True}
class TransformersWrapper(PyTorchWrapper):
"""Wrap a Transformers model for use in Thinc.
The model will take as input a spacy_transformers.util.RaggedArray
object that will specify the input IDs and optionally the segment IDs. The
RaggedArray is basically a tuple (ids, lengths), where ids is concatenated
for a whole batch (this format allows the data to be contiguous even if
the sequences are different lengths). The segment IDs should be coded as
the different models expect them -- see
https://github.com/huggingface/transformers/blob/master/examples/utils_glue.py
"""
_model: Any
_optimizer: Any
cfg: dict
@classmethod
def from_pretrained(cls, name):
model_cls = get_model(name)
model = model_cls.from_pretrained(name, **CONFIG)
self = cls(name, model.config.to_dict(), model)
self.cfg.update(self.transformers_model.config.to_dict())
return self
def __init__(self, name, config, model):
PyTorchWrapper.__init__(self, model)
self.cfg = dict(config)
@property
def nO(self):
if "hidden_size" in self.cfg:
# BERT
return self.cfg["hidden_size"]
elif "hidden_dim" in self.cfg:
# DistilBERT
return self.cfg["hidden_dim"] // 4
elif "n_embd" in self.cfg:
# GPT2
return self.cfg["n_embd"]
elif "d_model" in self.cfg:
# XLNet
return self.cfg["d_model"]
elif hasattr(self.transformers_model, "dim"):
# XLM
return self.transformers_model.dim
else:
keys = ", ".join(self.cfg.keys())
raise ValueError(f"Unexpected config. Keys: {keys}")
@property
def transformers_model(self):
return self._model
@property
def max_length(self):
# `n_positions` in GPT2 config
return self.cfg.get("max_position_embeddings", self.cfg.get("n_positions", 128))
def predict(self, inputs: RaggedArray):
self._model.eval()
model_kwargs = self.get_model_kwargs(inputs)
with torch.no_grad():
if hasattr(self._optimizer, "swap_swa_sgd"):
self._optimizer.swap_swa_sgd()
y_var = self._model(**model_kwargs)
if hasattr(self._optimizer, "swap_swa_sgd"):
self._optimizer.swap_swa_sgd()
return self.make_activations(y_var, inputs.lengths)
def begin_update(
self, inputs: RaggedArray, drop: Dropout = 0.0
) -> Tuple[Activations, Callable[..., None]]:
if drop is None:
# "drop is None" indicates prediction. It's one of the parts of
# Thinc's API I'm least happy with...
return self.predict(inputs), lambda dY, sgd=None: None
max_original = max(inputs.lengths, default=0)
model_kwargs = self.get_model_kwargs(inputs)
self._model.train()
# Prepare all the model arguments, including the attention mask
y_var = self._model(**model_kwargs)
output = self.make_activations(y_var, inputs.lengths)
assert output.lh.data.shape[0] == inputs.data.shape[0], (
output.lh.data.shape,
inputs.data.shape,
)
def backward_pytorch(d_output: Activations, sgd: Optimizer = None) -> None:
y_for_bwd = []
dy_for_bwd = []
if d_output.has_lh:
assert d_output.lh.data.shape[0] == sum(d_output.lh.lengths)
d_lh = d_output.lh.to_padded(to=max_original)
if self.max_length and d_lh.shape[1] >= self.max_length:
d_lh = d_lh[:, : self.max_length]
dy_for_bwd.append(xp2torch(d_lh))
y_for_bwd.append(y_var[0])
if d_output.has_po:
dy_for_bwd.append(xp2torch(d_output.po.data))
y_for_bwd.append(y_var[1])
if FINE_TUNE:
torch.autograd.backward(y_for_bwd, grad_tensors=dy_for_bwd)
if sgd is not None:
if self._optimizer is None:
self._optimizer = self._create_optimizer(sgd)
if sgd.max_grad_norm:
torch.nn.utils.clip_grad.clip_grad_norm_(
self._model.parameters(), sgd.max_grad_norm
)
optimizer = self._optimizer
for group in optimizer.param_groups:
group["lr"] = getattr(sgd, "trf_lr", sgd.alpha)
optimizer.step()
optimizer.zero_grad()
self._update_pytorch_averages(sgd)
return None
self._model.eval()
return output, backward_pytorch
@contextlib.contextmanager
def use_params(self, params):
key_prefix = f"pytorch_{self.id}_"
state_dict = {}
for k, v in params.items():
if hasattr(k, "startswith") and k.startswith(key_prefix):
state_dict[k.replace(key_prefix, "")] = xp2torch(v)
if state_dict:
backup = {k: v.clone() for k, v in self._model.state_dict().items()}
self._model.load_state_dict(state_dict)
yield
self._model.load_state_dict(backup)
else:
yield
def make_activations(self, fields, lengths) -> Activations:
"""Create Activations from the output tuples produced by PyTorch Transformers.
Includes converting torch tensors to xp, and handling missing values.
"""
fields = list(fields)
fields[0] = torch2xp(fields[0])
fields[0] = RaggedArray.from_padded(fields[0], lengths)
assert fields[0].data.shape[0] == sum(lengths)
# lh: last hidden
# po: pooler_output
# ah: all_hidden
# aa: all_attention
if len(fields) != 4:
lh = fields[0]
po = RaggedArray.blank()
else:
if isinstance(fields[1], tuple):
fields[1] = RaggedArray.blank()
else:
fields[1] = RaggedArray(torch2xp(fields[1]), [1] * len(lengths))
lh, po, _, _2 = fields
# Convert last_hidden_state to xp
return Activations(lh, po)
def get_model_kwargs(self, inputs):
padded = inputs.to_padded(value=-1)
if padded.ndim == 2:
padded = padded.reshape(padded.shape + (1,))
ids = padded[:, :, 0]
neg_idx = ids < 0
ids[neg_idx] = 0
ids = torch.as_tensor(ids, dtype=torch.int64)
if padded.shape[2] == 2:
segment_ids = padded[:, :, 1]
numpy.place(segment_ids, segment_ids<0, 0)
segment_ids = torch.as_tensor(segment_ids, dtype=torch.int64)
else:
segment_ids = torch.zeros_like(ids)
# Calculate "attention mask" for BERT and XLNet, but not GPT2 (sigh)
if isinstance(self._model, (transformers.BertModel, transformers.XLNetModel)):
mask = self.ops.xp.ones(ids.shape, dtype=numpy.int_)
mask[neg_idx] = 0
mask = xp2torch(mask)
return {
"input_ids": ids,
"attention_mask": mask,
"token_type_ids": segment_ids,
}
elif isinstance(self._model, (transformers.DistilBertModel)):
# Mask, but no token type IDs for DistilBert (sigh again...)
mask = self.ops.xp.ones(ids.shape, dtype=numpy.int_)
mask[neg_idx] = 0
mask = xp2torch(mask)
return {"input_ids": ids, "attention_mask": mask}
else:
return {"input_ids": ids, "token_type_ids": segment_ids}
def _create_optimizer(self, sgd):
optimizer = AdamW(
self._model.parameters(),
lr=getattr(sgd, "trf_lr", sgd.alpha),
eps=sgd.eps,
betas=(sgd.b1, sgd.b2),
weight_decay=getattr(sgd, "trf_weight_decay", 0.0),
)
optimizer.zero_grad()
return optimizer
def _update_pytorch_averages(self, sgd, *, init_steps=1):
if sgd.averages is None:
return
# Collect parameters if we don't have them
for name, param in self._model.state_dict().items():
key = f"pytorch_{self.id}_{name}"
sgd.nr_update[key] += 1
xp_param = torch2xp(param)
if key in sgd.averages:
self.ops.update_averages(
sgd.averages[key], xp_param, sgd.nr_update[key]
)
else:
sgd.averages[key] = xp_param.copy()
sgd.nr_update[key] = init_steps
def to_disk(self, path):
torch.save(self._model.state_dict(), str(path))
def from_disk(self, path):
if self.ops.device == "cpu":
map_location = "cpu"
else:
map_location = "cuda:0"
self._model.load_state_dict(torch.load(path, map_location=map_location))
self._model.to(map_location)
def to_bytes(self):
filelike = BytesIO()
torch.save(self._model.state_dict(), filelike)
filelike.seek(0)
return filelike.getvalue()
def from_bytes(self, data):
filelike = BytesIO(data)
filelike.seek(0)
if self.ops.device == "cpu":
map_location = "cpu"
else:
map_location = "cuda:0"
self._model.load_state_dict(torch.load(filelike, map_location=map_location))
self._model.to(map_location)
| 37.992481 | 88 | 0.590243 |
eb5237ee19b1366d3f8afcf05da7a4a80734d3e4 | 1,467 | py | Python | LED_Knight_Rider01.py | LekkerPrutsen/LED-matrix-experiments | 62bd8b18be842df7648d5a09a87b203933541524 | [
"MIT"
] | 4 | 2017-01-27T15:08:05.000Z | 2019-07-27T19:35:13.000Z | LED_Knight_Rider01.py | LekkerPrutsen/LED-matrix-experiments | 62bd8b18be842df7648d5a09a87b203933541524 | [
"MIT"
] | null | null | null | LED_Knight_Rider01.py | LekkerPrutsen/LED-matrix-experiments | 62bd8b18be842df7648d5a09a87b203933541524 | [
"MIT"
] | 1 | 2018-03-31T13:09:00.000Z | 2018-03-31T13:09:00.000Z | # -*- coding: utf-8 -*-
import time
from random import randint
import max7219.led as led
device = led.matrix(cascaded=4)
device.orientation(90)
print "Press Ctrl+C to stop"
#ASCII codes
symbol01 = 219
symbol02 = 178
symbol03 = 177
symbol04 = 176
duration = 0.2
try:
while True:
#position 1
device.letter(0, symbol01)
device.letter(1, symbol02)
device.letter(2, symbol04)
device.letter(3, symbol04)
time.sleep(duration)
#position 2
device.letter(0, symbol02)
device.letter(1, symbol01)
device.letter(2, symbol04)
device.clear(3)
time.sleep(duration)
#position 3
device.letter(0, symbol03)
device.letter(1, symbol02)
device.letter(2, symbol01)
device.clear(3)
time.sleep(duration)
#position 4
device.letter(0, symbol04)
device.letter(1, symbol03)
device.letter(2, symbol02)
device.letter(3, symbol01)
time.sleep(duration)
#position 5
device.clear(0)
device.letter(1, symbol04)
device.letter(2, symbol01)
device.letter(3, symbol02)
time.sleep(duration)
#position 6
device.clear(0)
device.letter(1, symbol01)
device.letter(2, symbol02)
device.letter(3, symbol03)
time.sleep(duration)
except KeyboardInterrupt:
device.clear()
| 18.807692 | 34 | 0.587594 |
17abb5660d5577fd08a3c9ddab481c55cf7d159c | 24,233 | py | Python | tests/test_taskgroups.py | byronformwalt/anyio | 35858dcd08d2522fee3f84b213d55d902ebbc2ff | [
"MIT"
] | null | null | null | tests/test_taskgroups.py | byronformwalt/anyio | 35858dcd08d2522fee3f84b213d55d902ebbc2ff | [
"MIT"
] | null | null | null | tests/test_taskgroups.py | byronformwalt/anyio | 35858dcd08d2522fee3f84b213d55d902ebbc2ff | [
"MIT"
] | null | null | null | import asyncio
import re
import sys
import time
import pytest
import trio
import anyio
from anyio import (
CancelScope, ExceptionGroup, create_task_group, current_effective_deadline, current_time,
fail_after, get_cancelled_exc_class, move_on_after, sleep, wait_all_tasks_blocked)
if sys.version_info < (3, 7):
current_task = asyncio.Task.current_task
else:
current_task = asyncio.current_task
pytestmark = pytest.mark.anyio
async def async_error(text, delay=0.1):
try:
if delay:
await sleep(delay)
finally:
raise Exception(text)
async def test_already_closed():
async with create_task_group() as tg:
pass
with pytest.raises(RuntimeError) as exc:
tg.start_soon(async_error, 'fail')
exc.match('This task group is not active; no new tasks can be started')
async def test_success():
async def async_add(value):
results.add(value)
results = set()
async with create_task_group() as tg:
tg.start_soon(async_add, 'a')
tg.start_soon(async_add, 'b')
assert results == {'a', 'b'}
@pytest.mark.parametrize('module', [
pytest.param(asyncio, id='asyncio'),
pytest.param(trio, id='trio')
])
def test_run_natively(module):
async def testfunc():
async with create_task_group() as tg:
tg.start_soon(sleep, 0)
if module is asyncio:
from anyio._backends._asyncio import native_run
try:
native_run(testfunc())
finally:
asyncio.set_event_loop(None)
else:
module.run(testfunc)
async def test_start_soon_while_running():
async def task_func():
tg.start_soon(sleep, 0)
async with create_task_group() as tg:
tg.start_soon(task_func)
async def test_start_soon_after_error():
with pytest.raises(ZeroDivisionError):
async with create_task_group() as tg:
a = 1 / 0 # noqa: F841
with pytest.raises(RuntimeError) as exc:
tg.start_soon(sleep, 0)
exc.match('This task group is not active; no new tasks can be started')
async def test_start_no_value():
async def taskfunc(*, task_status):
task_status.started()
async with create_task_group() as tg:
value = await tg.start(taskfunc)
assert value is None
async def test_start_with_value():
async def taskfunc(*, task_status):
task_status.started('foo')
async with create_task_group() as tg:
value = await tg.start(taskfunc)
assert value == 'foo'
async def test_start_crash_before_started_call():
async def taskfunc(*, task_status):
raise Exception('foo')
async with create_task_group() as tg:
with pytest.raises(Exception) as exc:
await tg.start(taskfunc)
exc.match('foo')
async def test_start_crash_after_started_call():
async def taskfunc(*, task_status):
task_status.started(2)
raise Exception('foo')
with pytest.raises(Exception) as exc:
async with create_task_group() as tg:
value = await tg.start(taskfunc)
exc.match('foo')
assert value == 2
async def test_start_no_started_call():
async def taskfunc(*, task_status):
pass
async with create_task_group() as tg:
with pytest.raises(RuntimeError) as exc:
await tg.start(taskfunc)
exc.match('hild exited')
async def test_start_cancelled():
async def taskfunc(*, task_status):
nonlocal started, finished
started = True
await sleep(2)
finished = True
started = finished = False
async with create_task_group() as tg:
tg.cancel_scope.cancel()
await tg.start(taskfunc)
assert started
assert not finished
@pytest.mark.parametrize('anyio_backend', ['asyncio'])
async def test_start_native_host_cancelled():
async def taskfunc(*, task_status):
nonlocal started, finished
started = True
await sleep(2)
finished = True
async def start_another():
async with create_task_group() as tg:
await tg.start(taskfunc)
started = finished = False
task = asyncio.get_event_loop().create_task(start_another())
await wait_all_tasks_blocked()
task.cancel()
with pytest.raises(asyncio.CancelledError):
await task
assert started
assert not finished
@pytest.mark.parametrize('anyio_backend', ['asyncio'])
async def test_start_native_child_cancelled():
async def taskfunc(*, task_status):
nonlocal task, finished
task = current_task()
await sleep(2)
finished = True
async def start_another():
async with create_task_group() as tg2:
await tg2.start(taskfunc)
task = None
finished = False
async with create_task_group() as tg:
tg.start_soon(start_another)
await wait_all_tasks_blocked()
task.cancel()
assert not finished
async def test_start_exception_delivery():
def task_fn(*, task_status):
task_status.started("hello")
async with anyio.create_task_group() as tg:
with pytest.raises(TypeError, match='to be synchronous$'):
await tg.start(task_fn)
async def test_host_exception():
async def set_result(value):
nonlocal result
await sleep(3)
result = value
result = None
with pytest.raises(Exception) as exc:
async with create_task_group() as tg:
tg.start_soon(set_result, 'a')
raise Exception('dummy error')
exc.match('dummy error')
assert result is None
async def test_edge_cancellation():
async def dummy():
nonlocal marker
marker = 1
# At this point the task has been cancelled so sleep() will raise an exception
await sleep(0)
# Execution should never get this far
marker = 2
marker = None
async with create_task_group() as tg:
tg.start_soon(dummy)
assert marker is None
tg.cancel_scope.cancel()
assert marker == 1
async def test_failing_child_task_cancels_host():
async def child():
await wait_all_tasks_blocked()
raise Exception('foo')
sleep_completed = False
with pytest.raises(Exception) as exc:
async with create_task_group() as tg:
tg.start_soon(child)
await sleep(0.5)
sleep_completed = True
exc.match('foo')
assert not sleep_completed
async def test_failing_host_task_cancels_children():
async def child():
nonlocal sleep_completed
await sleep(1)
sleep_completed = True
sleep_completed = False
with pytest.raises(Exception) as exc:
async with create_task_group() as tg:
tg.start_soon(child)
await wait_all_tasks_blocked()
raise Exception('foo')
exc.match('foo')
assert not sleep_completed
async def test_cancel_scope_in_another_task():
async def child():
nonlocal result, local_scope
with CancelScope() as local_scope:
await sleep(2)
result = True
local_scope = None
result = False
async with create_task_group() as tg:
tg.start_soon(child)
while local_scope is None:
await sleep(0)
local_scope.cancel()
assert not result
async def test_cancel_propagation():
async def g():
async with create_task_group():
await sleep(1)
assert False
async with create_task_group() as tg:
tg.start_soon(g)
await sleep(0)
tg.cancel_scope.cancel()
async def test_cancel_twice():
"""Test that the same task can receive two cancellations."""
async def cancel_group():
await wait_all_tasks_blocked()
tg.cancel_scope.cancel()
for _ in range(2):
async with create_task_group() as tg:
tg.start_soon(cancel_group)
await sleep(1)
pytest.fail('Execution should not reach this point')
async def test_cancel_exiting_task_group():
"""
Test that if a task group is waiting for subtasks to finish and it receives a cancellation, the
subtasks are also cancelled and the waiting continues.
"""
async def waiter():
nonlocal cancel_received
try:
await sleep(5)
finally:
cancel_received = True
async def subgroup():
async with create_task_group() as tg2:
tg2.start_soon(waiter)
cancel_received = False
async with create_task_group() as tg:
tg.start_soon(subgroup)
await wait_all_tasks_blocked()
tg.cancel_scope.cancel()
assert cancel_received
async def test_exception_group_children():
with pytest.raises(ExceptionGroup) as exc:
async with create_task_group() as tg:
tg.start_soon(async_error, 'task1')
tg.start_soon(async_error, 'task2', 0.15)
assert len(exc.value.exceptions) == 2
assert sorted(str(e) for e in exc.value.exceptions) == ['task1', 'task2']
assert exc.match('^2 exceptions were raised in the task group:\n')
assert exc.match(r'Exception: task\d\n----')
assert re.fullmatch(
r"<ExceptionGroup: Exception\('task[12]',?\), Exception\('task[12]',?\)>",
repr(exc.value))
async def test_exception_group_host():
with pytest.raises(ExceptionGroup) as exc:
async with create_task_group() as tg:
tg.start_soon(async_error, 'child', 2)
await wait_all_tasks_blocked()
raise Exception('host')
assert len(exc.value.exceptions) == 2
assert sorted(str(e) for e in exc.value.exceptions) == ['child', 'host']
assert exc.match('^2 exceptions were raised in the task group:\n')
assert exc.match(r'Exception: host\n----')
async def test_escaping_cancelled_exception():
async with create_task_group() as tg:
tg.cancel_scope.cancel()
await sleep(0)
async def test_cancel_scope_cleared():
with move_on_after(0.1):
await sleep(1)
await sleep(0)
@pytest.mark.parametrize('delay', [0, 0.1], ids=['instant', 'delayed'])
async def test_fail_after(delay):
with pytest.raises(TimeoutError):
with fail_after(delay) as scope:
await sleep(1)
assert scope.cancel_called
async def test_fail_after_no_timeout():
with fail_after(None) as scope:
assert scope.deadline == float('inf')
await sleep(0.1)
assert not scope.cancel_called
async def test_fail_after_after_cancellation():
event = anyio.Event()
async with anyio.create_task_group() as tg:
tg.cancel_scope.cancel()
await event.wait()
block_complete = False
with pytest.raises(TimeoutError):
with fail_after(0.1):
await anyio.sleep(0.5)
block_complete = True
assert not block_complete
@pytest.mark.parametrize('delay', [0, 0.1], ids=['instant', 'delayed'])
async def test_move_on_after(delay):
result = False
with move_on_after(delay) as scope:
await sleep(1)
result = True
assert not result
assert scope.cancel_called
async def test_move_on_after_no_timeout():
result = False
with move_on_after(None) as scope:
assert scope.deadline == float('inf')
await sleep(0.1)
result = True
assert result
assert not scope.cancel_called
async def test_nested_move_on_after():
sleep_completed = inner_scope_completed = False
with move_on_after(0.1) as outer_scope:
assert current_effective_deadline() == outer_scope.deadline
with move_on_after(1) as inner_scope:
assert current_effective_deadline() == outer_scope.deadline
await sleep(2)
sleep_completed = True
inner_scope_completed = True
assert not sleep_completed
assert not inner_scope_completed
assert outer_scope.cancel_called
assert not inner_scope.cancel_called
async def test_shielding():
async def cancel_when_ready():
await wait_all_tasks_blocked()
tg.cancel_scope.cancel()
inner_sleep_completed = outer_sleep_completed = False
async with create_task_group() as tg:
tg.start_soon(cancel_when_ready)
with move_on_after(10, shield=True) as inner_scope:
assert inner_scope.shield
await sleep(0.1)
inner_sleep_completed = True
await sleep(1)
outer_sleep_completed = True
assert inner_sleep_completed
assert not outer_sleep_completed
assert tg.cancel_scope.cancel_called
assert not inner_scope.cancel_called
async def test_cancel_from_shielded_scope():
async with create_task_group() as tg:
with CancelScope(shield=True) as inner_scope:
assert inner_scope.shield
tg.cancel_scope.cancel()
with pytest.raises(get_cancelled_exc_class()):
await sleep(0.01)
with pytest.raises(get_cancelled_exc_class()):
await sleep(0.01)
@pytest.mark.parametrize('anyio_backend', ['asyncio'])
async def test_cancel_host_asyncgen():
async def host_task():
nonlocal done
async with create_task_group() as tg:
with CancelScope(shield=True) as inner_scope:
assert inner_scope.shield
tg.cancel_scope.cancel()
with pytest.raises(get_cancelled_exc_class()):
await sleep(0)
with pytest.raises(get_cancelled_exc_class()):
await sleep(0)
done = True
async def host_agen_fn():
await host_task()
yield
pytest.fail("host_agen_fn should only be __anext__ed once")
done = False
host_agen = host_agen_fn()
try:
await asyncio.get_event_loop().create_task(host_agen.__anext__())
finally:
await host_agen.aclose()
assert done
async def test_shielding_immediate_scope_cancelled():
async def cancel_when_ready():
await wait_all_tasks_blocked()
scope.cancel()
sleep_completed = False
async with create_task_group() as tg:
with CancelScope(shield=True) as scope:
tg.start_soon(cancel_when_ready)
await sleep(0.5)
sleep_completed = True
assert not sleep_completed
async def test_cancel_scope_in_child_task():
async def child():
nonlocal child_scope
with CancelScope() as child_scope:
await sleep(2)
child_scope = None
host_done = False
async with create_task_group() as tg:
tg.start_soon(child)
await wait_all_tasks_blocked()
child_scope.cancel()
await sleep(0.1)
host_done = True
assert host_done
assert not tg.cancel_scope.cancel_called
async def test_exception_cancels_siblings():
async def child(fail):
if fail:
raise Exception('foo')
else:
nonlocal sleep_completed
await sleep(1)
sleep_completed = True
sleep_completed = False
with pytest.raises(Exception) as exc:
async with create_task_group() as tg:
tg.start_soon(child, False)
await wait_all_tasks_blocked()
tg.start_soon(child, True)
exc.match('foo')
assert not sleep_completed
async def test_cancel_cascade():
async def do_something():
async with create_task_group() as tg2:
tg2.start_soon(sleep, 1)
raise Exception('foo')
async with create_task_group() as tg:
tg.start_soon(do_something)
await wait_all_tasks_blocked()
tg.cancel_scope.cancel()
async def test_cancelled_parent():
async def child():
with CancelScope():
await sleep(1)
raise Exception('foo')
async def parent(tg):
await wait_all_tasks_blocked()
tg.start_soon(child)
async with create_task_group() as tg:
tg.start_soon(parent, tg)
tg.cancel_scope.cancel()
async def test_shielded_deadline():
with move_on_after(10):
with CancelScope(shield=True):
with move_on_after(1000):
assert current_effective_deadline() - current_time() > 900
async def test_deadline_reached_on_start():
with move_on_after(0):
await sleep(0)
pytest.fail('Execution should not reach this point')
async def test_deadline_moved():
with fail_after(0.1) as scope:
scope.deadline += 0.3
await sleep(0.2)
async def test_timeout_error_with_multiple_cancellations():
with pytest.raises(TimeoutError):
with fail_after(0.1):
async with create_task_group() as tg:
tg.start_soon(sleep, 2)
await sleep(2)
async def test_nested_fail_after():
async def killer(scope):
await wait_all_tasks_blocked()
scope.cancel()
async with create_task_group() as tg:
with CancelScope() as scope:
with CancelScope():
tg.start_soon(killer, scope)
with fail_after(1):
await sleep(2)
pytest.fail('Execution should not reach this point')
pytest.fail('Execution should not reach this point either')
pytest.fail('Execution should also not reach this point')
assert scope.cancel_called
async def test_nested_shield():
async def killer(scope):
await wait_all_tasks_blocked()
scope.cancel()
with pytest.raises(TimeoutError):
async with create_task_group() as tg:
with CancelScope() as scope:
with CancelScope(shield=True):
tg.start_soon(killer, scope)
with fail_after(0.2):
await sleep(2)
def test_task_group_in_generator(anyio_backend_name, anyio_backend_options):
async def task_group_generator():
async with create_task_group():
yield
gen = task_group_generator()
anyio.run(gen.__anext__, backend=anyio_backend_name, backend_options=anyio_backend_options)
pytest.raises(StopAsyncIteration, anyio.run, gen.__anext__, backend=anyio_backend_name,
backend_options=anyio_backend_options)
async def test_exception_group_filtering():
"""Test that CancelledErrors are filtered out of nested exception groups."""
async def fail(name):
try:
await anyio.sleep(.1)
finally:
raise Exception('%s task failed' % name)
async def fn():
async with anyio.create_task_group() as tg:
tg.start_soon(fail, 'parent')
async with anyio.create_task_group() as tg2:
tg2.start_soon(fail, 'child')
await anyio.sleep(1)
with pytest.raises(ExceptionGroup) as exc:
await fn()
assert len(exc.value.exceptions) == 2
assert str(exc.value.exceptions[0]) == 'parent task failed'
assert str(exc.value.exceptions[1]) == 'child task failed'
async def test_cancel_propagation_with_inner_spawn():
async def g():
async with anyio.create_task_group() as tg2:
tg2.start_soon(anyio.sleep, 10)
await anyio.sleep(1)
assert False
async with anyio.create_task_group() as tg:
tg.start_soon(g)
await wait_all_tasks_blocked()
tg.cancel_scope.cancel()
async def test_escaping_cancelled_error_from_cancelled_task():
"""Regression test for issue #88. No CancelledError should escape the outer scope."""
with CancelScope() as scope:
with move_on_after(0.1):
await sleep(1)
scope.cancel()
@pytest.mark.filterwarnings('ignore:"@coroutine" decorator is deprecated:DeprecationWarning')
def test_cancel_generator_based_task():
from asyncio import coroutine
async def native_coro_part():
with CancelScope() as scope:
scope.cancel()
@coroutine
def generator_part():
yield from native_coro_part()
anyio.run(generator_part, backend='asyncio')
async def test_suppress_exception_context():
"""
Test that the __context__ attribute has been cleared when the exception is re-raised in the
exception group. This prevents recursive tracebacks.
"""
with pytest.raises(ValueError) as exc:
async with create_task_group() as tg:
tg.cancel_scope.cancel()
async with create_task_group() as tg2:
tg2.start_soon(sleep, 1)
raise ValueError
assert exc.value.__context__ is None
@pytest.mark.parametrize('anyio_backend', ['asyncio'])
async def test_cancel_native_future_tasks():
async def wait_native_future():
loop = asyncio.get_event_loop()
await loop.create_future()
async with anyio.create_task_group() as tg:
tg.start_soon(wait_native_future)
tg.cancel_scope.cancel()
@pytest.mark.parametrize('anyio_backend', ['asyncio'])
async def test_cancel_native_future_tasks_cancel_scope():
async def wait_native_future():
with anyio.CancelScope():
loop = asyncio.get_event_loop()
await loop.create_future()
async with anyio.create_task_group() as tg:
tg.start_soon(wait_native_future)
tg.cancel_scope.cancel()
@pytest.mark.parametrize('anyio_backend', ['asyncio'])
async def test_cancel_completed_task():
loop = asyncio.get_event_loop()
old_exception_handler = loop.get_exception_handler()
exceptions = []
def exception_handler(*args, **kwargs):
exceptions.append((args, kwargs))
loop.set_exception_handler(exception_handler)
try:
async def noop():
pass
async with anyio.create_task_group() as tg:
tg.start_soon(noop)
tg.cancel_scope.cancel()
assert exceptions == []
finally:
loop.set_exception_handler(old_exception_handler)
async def test_task_in_sync_spawn_callback():
outer_task_id = anyio.get_current_task().id
inner_task_id = None
def task_wrap():
assert anyio.get_current_task().id == outer_task_id
async def corofn():
nonlocal inner_task_id
inner_task_id = anyio.get_current_task().id
return corofn()
async with create_task_group() as tg:
tg.start_soon(task_wrap)
assert inner_task_id is not None
assert inner_task_id != outer_task_id
async def test_shielded_cancel_sleep_time():
"""Test that cancelling a shielded tasks spends more time sleeping than cancelling."""
event = anyio.Event()
hang_time = 0.2
async def set_event():
await sleep(hang_time)
event.set()
async def never_cancel_task():
with CancelScope(shield=True):
await sleep(0.2)
await event.wait()
async with create_task_group() as tg:
tg.start_soon(set_event)
async with create_task_group() as tg:
tg.start_soon(never_cancel_task)
tg.cancel_scope.cancel()
process_time = time.process_time()
assert (time.process_time() - process_time) < hang_time
async def test_cancelscope_wrong_exit_order():
"""
Test that a RuntimeError is raised if the task tries to exit cancel scopes in the wrong order.
"""
scope1 = CancelScope()
scope2 = CancelScope()
scope1.__enter__()
scope2.__enter__()
pytest.raises(RuntimeError, scope1.__exit__, None, None, None)
async def test_cancelscope_exit_before_enter():
"""Test that a RuntimeError is raised if one tries to exit a cancel scope before entering."""
scope = CancelScope()
pytest.raises(RuntimeError, scope.__exit__, None, None, None)
@pytest.mark.parametrize('anyio_backend', ['asyncio']) # trio does not check for this yet
async def test_cancelscope_exit_in_wrong_task():
async def enter_scope(scope):
scope.__enter__()
async def exit_scope(scope):
scope.__exit__(None, None, None)
scope = CancelScope()
async with create_task_group() as tg:
tg.start_soon(enter_scope, scope)
with pytest.raises(RuntimeError):
async with create_task_group() as tg:
tg.start_soon(exit_scope, scope)
| 27.32018 | 99 | 0.653695 |
ebef1c101324744cadab3ce6c3e39ccb05c9efed | 1,934 | py | Python | Splunk_TA_paloalto/bin/Splunk_TA_paloalto_rh_cortex_xdr.py | moshekaplan/Splunk-Apps | fc95334aa2ee2209b221bd5b2a6a520ad9ecab4a | [
"0BSD"
] | 34 | 2016-03-25T08:09:05.000Z | 2020-07-23T05:04:16.000Z | Splunk_TA_paloalto/bin/Splunk_TA_paloalto_rh_cortex_xdr.py | moshekaplan/Splunk-Apps | fc95334aa2ee2209b221bd5b2a6a520ad9ecab4a | [
"0BSD"
] | 127 | 2020-08-07T21:56:58.000Z | 2022-03-30T18:24:53.000Z | Splunk_TA_paloalto/bin/Splunk_TA_paloalto_rh_cortex_xdr.py | moshekaplan/Splunk-Apps | fc95334aa2ee2209b221bd5b2a6a520ad9ecab4a | [
"0BSD"
] | 22 | 2016-03-26T09:39:19.000Z | 2020-07-27T21:17:55.000Z |
import splunk_ta_paloalto_declare
from splunktaucclib.rest_handler.endpoint import (
field,
validator,
RestModel,
DataInputModel,
)
from splunktaucclib.rest_handler import admin_external, util
from splunk_aoblib.rest_migration import ConfigMigrationHandler
util.remove_http_proxy_env_vars()
fields = [
field.RestField(
'interval',
required=True,
encrypted=False,
default=None,
validator=validator.Pattern(
regex=r"""^\-[1-9]\d*$|^\d*$""",
)
),
field.RestField(
'index',
required=True,
encrypted=False,
default='default',
validator=validator.String(
min_len=1,
max_len=80,
)
),
field.RestField(
'xdr_tenant',
required=True,
encrypted=False,
default=None,
validator=validator.String(
min_len=0,
max_len=8192,
)
),
field.RestField(
'xdr_region',
required=True,
encrypted=False,
default='us',
validator=validator.String(
min_len=0,
max_len=8192,
)
),
field.RestField(
'xdr_key_id',
required=True,
encrypted=True,
default=None,
validator=validator.String(
min_len=0,
max_len=8192,
)
),
field.RestField(
'xdr_key',
required=True,
encrypted=True,
default=None,
validator=validator.String(
min_len=0,
max_len=8192,
)
),
field.RestField(
'disabled',
required=False,
validator=None
)
]
model = RestModel(fields, name=None)
endpoint = DataInputModel(
'cortex_xdr',
model,
)
if __name__ == '__main__':
admin_external.handle(
endpoint,
handler=ConfigMigrationHandler,
)
| 19.535354 | 63 | 0.544467 |
430e28f567856a1115587b43a19d9f22689d789f | 752 | py | Python | Client/scraping.py | alejodiazg/DadaPoemGenerator | 798d6e4a80b3b79201e65e394f11748a5d25ea8a | [
"MIT"
] | null | null | null | Client/scraping.py | alejodiazg/DadaPoemGenerator | 798d6e4a80b3b79201e65e394f11748a5d25ea8a | [
"MIT"
] | null | null | null | Client/scraping.py | alejodiazg/DadaPoemGenerator | 798d6e4a80b3b79201e65e394f11748a5d25ea8a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from lxml import html
import requests
from random import shuffle
from re import search
print(" ")
print("***** INSTRUCCIONES PARA REALIZAR UN POEMA DADAÍSTA *****")
print("***** (Sin necesidad de periódicos o tijeras) *****")
print(" ")
url = raw_input("Introduzca el URL del artículo seleccionado: ")
page = requests.get(url)
tree = html.fromstring(page.content)
texto = tree.xpath('//*[@class="entry-content"]/p/text()')
text = []
for line in texto:
aux = line.split()
for word in aux:
word = word.encode('utf8')
objMatch = search(r'[\wÁÉÍÓÚÑáéíóúñ]+',word)
if objMatch != None:
text += [objMatch.group()]
shuffle(text)
shuffle(text)
f = open('prueba', 'w')
for word in text:
f.write(word)
f.write("\n")
| 24.258065 | 66 | 0.658245 |
5c1a11917529a867c7dfd9d48fa45816d5863d2a | 959 | py | Python | ML_and_DL/gpu_project.py | AshfakYeafi/AI_practice_code | 3d8a0b9382f5903e840ce59218ebb95ca962ab01 | [
"MIT"
] | null | null | null | ML_and_DL/gpu_project.py | AshfakYeafi/AI_practice_code | 3d8a0b9382f5903e840ce59218ebb95ca962ab01 | [
"MIT"
] | null | null | null | ML_and_DL/gpu_project.py | AshfakYeafi/AI_practice_code | 3d8a0b9382f5903e840ce59218ebb95ca962ab01 | [
"MIT"
] | null | null | null | import tensorflow as tf
import cv2
from tensorflow import keras
import matplotlib.pyplot as plt
import numpy as np
(x_train,y_train),(x_test,y_test)=tf.keras.datasets.cifar10.load_data()
classes=["airplane", "automobile", "bird","cat","deer","dog","frog","horse","ship","truck"]
x_train_scaled=x_train/255
x_test_scaled=x_test/255
print(x_train_scaled.shape)
y_train_catargorical=keras.utils.to_categorical(
y_train,num_classes=10
)
print(y_train_catargorical[0:5])
model=keras.Sequential([
keras.layers.Flatten(input_shape=(32,32,3)),
keras.layers.Dense(3000,activation="relu"),
keras.layers.Dense(1000,activation="relu"),
keras.layers.Dense(10,activation="sigmoid")
])
model.compile(optimizer="SGD",
loss="categorical_crossentropy",
metrics=['accuracy'])
model.fit(x_train_scaled,y_train_catargorical,epochs=50)
print(np.argmax(model.predict(x_test_scaled)[0]))
print(y_train[0][0])
| 18.09434 | 91 | 0.727842 |
7e938db1a1cb23f0ac8b5d897b9240c86e4f9287 | 955 | py | Python | stable_baselines3/common/recorder.py | offdroid/stable-baselines3 | 793bf44e11fe1e6735e8984add42442e5ab59d0f | [
"MIT"
] | null | null | null | stable_baselines3/common/recorder.py | offdroid/stable-baselines3 | 793bf44e11fe1e6735e8984add42442e5ab59d0f | [
"MIT"
] | null | null | null | stable_baselines3/common/recorder.py | offdroid/stable-baselines3 | 793bf44e11fe1e6735e8984add42442e5ab59d0f | [
"MIT"
] | null | null | null | from typing import List, Tuple
from enum import Enum
class ReplayMode(Enum):
"""No recording or replay"""
IGNORE = 1
"""Record the training buffer"""
RECORDING = 2
"""Replay a given buffer"""
REPLAYING = 3
class Recording:
def __init__(self, source: List) -> None:
self.pos = 0
self._history = source
def __iter__(self) -> "Recording":
self.pos = 0
return self
def __next__(self) -> Tuple:
if self.pos <= len(self._history):
x = self._history[self.pos]
self.pos += 1
return x
else:
raise StopIteration
class Recorder:
def __init__(self) -> None:
self._recording = []
def append(self, new_obs, rewards, dones, infos, buffer_actions) -> None:
self._recording.append((new_obs, rewards, dones, infos, buffer_actions))
def freeze(self) -> Recording:
return Recording(self._recording)
| 24.487179 | 80 | 0.597906 |
642cf8d3a51d63ae2db4a42fb7e03f804a6e2c42 | 571 | py | Python | qlib/contrib/online/__init__.py | lpd6375/qlib | 3a911bc09ba5136cd7c61c2c8dcca8a63339e738 | [
"MIT"
] | 1 | 2022-02-05T06:54:28.000Z | 2022-02-05T06:54:28.000Z | qlib/contrib/online/__init__.py | lpd6375/qlib | 3a911bc09ba5136cd7c61c2c8dcca8a63339e738 | [
"MIT"
] | null | null | null | qlib/contrib/online/__init__.py | lpd6375/qlib | 3a911bc09ba5136cd7c61c2c8dcca8a63339e738 | [
"MIT"
] | 1 | 2022-03-22T06:37:38.000Z | 2022-03-22T06:37:38.000Z | # pylint: skip-file
'''
TODO:
- Online needs that the model have such method
def get_data_with_date(self, date, **kwargs):
"""
Will be called in online module
need to return the data that used to predict the label (score) of stocks at date.
:param
date: pd.Timestamp
predict date
:return:
data: the input data that used to predict the label (score) of stocks at predict date.
"""
raise NotImplementedError("get_data_with_date for this model is not implemented.")
'''
| 27.190476 | 98 | 0.618214 |
8cb519dd73ef2509e17d878d0ec8f2d5b5f35250 | 7,086 | py | Python | test/functional/llmq-chainlocks.py | farsider350/AUTX-Core | 6d00d1e027a5a6dffb3b0815a155e4515ced007b | [
"MIT"
] | null | null | null | test/functional/llmq-chainlocks.py | farsider350/AUTX-Core | 6d00d1e027a5a6dffb3b0815a155e4515ced007b | [
"MIT"
] | null | null | null | test/functional/llmq-chainlocks.py | farsider350/AUTX-Core | 6d00d1e027a5a6dffb3b0815a155e4515ced007b | [
"MIT"
] | 1 | 2021-01-03T02:35:54.000Z | 2021-01-03T02:35:54.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The autx Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import time
from test_framework.mininode import *
from test_framework.test_framework import autxTestFramework
from test_framework.util import *
'''
llmq-chainlocks.py
Checks LLMQs based ChainLocks
'''
class LLMQChainLocksTest(autxTestFramework):
def set_test_params(self):
self.set_autx_test_params(6, 5, fast_dip3_enforcement=True)
def run_test(self):
self.log.info("Wait for dip0008 activation")
while self.nodes[0].getblockchaininfo()["bip9_softforks"]["dip0008"]["status"] != "active":
self.nodes[0].generate(10)
sync_blocks(self.nodes, timeout=60*5)
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.wait_for_sporks_same()
self.log.info("Mining 4 quorums")
for i in range(4):
self.mine_quorum()
self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 0)
self.wait_for_sporks_same()
self.log.info("Mine single block, wait for chainlock")
self.nodes[0].generate(1)
self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash())
self.log.info("Mine many blocks, wait for chainlock")
self.nodes[0].generate(20)
# We need more time here due to 20 blocks being generated at once
self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash(), timeout=30)
self.log.info("Assert that all blocks up until the tip are chainlocked")
for h in range(1, self.nodes[0].getblockcount()):
block = self.nodes[0].getblock(self.nodes[0].getblockhash(h))
assert(block['chainlock'])
self.log.info("Isolate node, mine on another, and reconnect")
isolate_node(self.nodes[0])
node0_mining_addr = self.nodes[0].getnewaddress()
node0_tip = self.nodes[0].getbestblockhash()
self.nodes[1].generatetoaddress(5, node0_mining_addr)
self.wait_for_chainlocked_block(self.nodes[1], self.nodes[1].getbestblockhash())
assert(self.nodes[0].getbestblockhash() == node0_tip)
reconnect_isolated_node(self.nodes[0], 1)
self.nodes[1].generatetoaddress(1, node0_mining_addr)
self.wait_for_chainlocked_block(self.nodes[0], self.nodes[1].getbestblockhash())
self.log.info("Isolate node, mine on both parts of the network, and reconnect")
isolate_node(self.nodes[0])
self.nodes[0].generate(5)
self.nodes[1].generatetoaddress(1, node0_mining_addr)
good_tip = self.nodes[1].getbestblockhash()
self.wait_for_chainlocked_block(self.nodes[1], good_tip)
assert(not self.nodes[0].getblock(self.nodes[0].getbestblockhash())["chainlock"])
reconnect_isolated_node(self.nodes[0], 1)
self.nodes[1].generatetoaddress(1, node0_mining_addr)
self.wait_for_chainlocked_block(self.nodes[0], self.nodes[1].getbestblockhash())
assert(self.nodes[0].getblock(self.nodes[0].getbestblockhash())["previousblockhash"] == good_tip)
assert(self.nodes[1].getblock(self.nodes[1].getbestblockhash())["previousblockhash"] == good_tip)
self.log.info("Keep node connected and let it try to reorg the chain")
good_tip = self.nodes[0].getbestblockhash()
self.log.info("Restart it so that it forgets all the chainlocks from the past")
self.stop_node(0)
self.start_node(0)
connect_nodes(self.nodes[0], 1)
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
self.log.info("Now try to reorg the chain")
self.nodes[0].generate(2)
time.sleep(6)
assert(self.nodes[1].getbestblockhash() == good_tip)
self.nodes[0].generate(2)
time.sleep(6)
assert(self.nodes[1].getbestblockhash() == good_tip)
self.log.info("Now let the node which is on the wrong chain reorg back to the locked chain")
self.nodes[0].reconsiderblock(good_tip)
assert(self.nodes[0].getbestblockhash() != good_tip)
self.nodes[1].generatetoaddress(1, node0_mining_addr)
self.wait_for_chainlocked_block(self.nodes[0], self.nodes[1].getbestblockhash())
assert(self.nodes[0].getbestblockhash() == self.nodes[1].getbestblockhash())
self.log.info("Enable LLMQ bases InstantSend, which also enables checks for \"safe\" transactions")
self.nodes[0].spork("SPORK_2_INSTANTSEND_ENABLED", 0)
self.nodes[0].spork("SPORK_3_INSTANTSEND_BLOCK_FILTERING", 0)
self.wait_for_sporks_same()
self.log.info("Isolate a node and let it create some transactions which won't get IS locked")
isolate_node(self.nodes[0])
txs = []
for i in range(3):
txs.append(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1))
txs += self.create_chained_txs(self.nodes[0], 1)
self.log.info("Assert that after block generation these TXs are NOT included (as they are \"unsafe\")")
self.nodes[0].generate(1)
for txid in txs:
tx = self.nodes[0].getrawtransaction(txid, 1)
assert("confirmations" not in tx)
time.sleep(1)
assert(not self.nodes[0].getblock(self.nodes[0].getbestblockhash())["chainlock"])
self.log.info("Disable LLMQ based InstantSend for a very short time (this never gets propagated to other nodes)")
self.nodes[0].spork("SPORK_2_INSTANTSEND_ENABLED", 4070908800)
self.log.info("Now the TXs should be included")
self.nodes[0].generate(1)
self.nodes[0].spork("SPORK_2_INSTANTSEND_ENABLED", 0)
self.log.info("Assert that TXs got included now")
for txid in txs:
tx = self.nodes[0].getrawtransaction(txid, 1)
assert("confirmations" in tx and tx["confirmations"] > 0)
# Enable network on first node again, which will cause the blocks to propagate and IS locks to happen retroactively
# for the mined TXs, which will then allow the network to create a CLSIG
self.log.info("Reenable network on first node and wait for chainlock")
reconnect_isolated_node(self.nodes[0], 1)
self.wait_for_chainlocked_block(self.nodes[0], self.nodes[0].getbestblockhash(), timeout=30)
def create_chained_txs(self, node, amount):
txid = node.sendtoaddress(node.getnewaddress(), amount)
tx = node.getrawtransaction(txid, 1)
inputs = []
valueIn = 0
for txout in tx["vout"]:
inputs.append({"txid": txid, "vout": txout["n"]})
valueIn += txout["value"]
outputs = {
node.getnewaddress(): round(float(valueIn) - 0.0001, 6)
}
rawtx = node.createrawtransaction(inputs, outputs)
rawtx = node.signrawtransaction(rawtx)
rawtxid = node.sendrawtransaction(rawtx["hex"])
return [txid, rawtxid]
if __name__ == '__main__':
LLMQChainLocksTest().main()
| 45.716129 | 123 | 0.670336 |
8e5ee16141e43d4fa74f17f6c4f49a59d45501bc | 6,950 | py | Python | steps.py | undoingtech/Sticky-Steps | fdb3af0c4c7e0ccf3689204d298d0794c2f99dc6 | [
"MIT"
] | null | null | null | steps.py | undoingtech/Sticky-Steps | fdb3af0c4c7e0ccf3689204d298d0794c2f99dc6 | [
"MIT"
] | null | null | null | steps.py | undoingtech/Sticky-Steps | fdb3af0c4c7e0ccf3689204d298d0794c2f99dc6 | [
"MIT"
] | null | null | null | """ TODO: cosmetics
- get rid of the lines from html_label
"""
""" TODO: functionality
- new temporary input file (opens editor)
- save temporary input file (opens save dialog)
- text resizing / zoom
- remember text size / zoom on close
- open file from url
- copy button for codeblocks and code
- remember file and step on close
- remember window position and size on close
- color preferences
- add step before current step
- add step after current step
- add note to step
"""
"""TODO: bug fixes
"""
"""TODO: non-functionality
- Readme
- video link
- description of each file
- Help / tutorial default first open md file
- Example md instruction files
"""
from tkinter import *
from tkinter import filedialog
from tkinter import simpledialog
from tkinter import messagebox
from tkhtmlview import HTMLLabel
import markdown
import editor
# SOURCE: https://stackoverflow.com/questions/2632199/how-do-i-get-the-path-of-the-current-executed-file-in-python
import inspect, os.path
class Steps:
def __init__(self, file_location):
md_file = open(file_location)
md_text = md_file.read()
md_file.close()
html = markdown.markdown(md_text)
# variables that don't change after init
self.step_list = html.split("<hr />")
self.step_count = len(self.step_list)
self.file_location = file_location
# - blue #a9edf1 SOURCE: https://www.color-hex.com/color-palette/104537
# - yellow #f1f58f SOURCE: https://www.color-hex.com/color-palette/104537
# - purple #CB94FE - formerly #9985ff
# - pink #e095f9
self.colors = ["#f1f58f", "#a9edf1", "#CB94FE", "#e095f9"]
# variables that do change after init
self.number = 1
self.html = self.step_list[0]
self.color = self.colors[0]
def goto_step_number(self, step_number):
# if requested step number is invalid, return the current step
if step_number in range(1, self.step_count + 1):
self.number = step_number
self.html = self.step_list[step_number - 1]
self.color = self.get_step_color(step_number)
return self.html
def get_step_color(self, step_number):
color_index = step_number - 1
if step_number >= len(self.colors):
color_index = (step_number - 1) % len(self.colors)
return self.colors[color_index]
class StickySteps:
root = Tk()
root.title("Sticky Steps")
widgets = dict()
width = 300
height = 200
y = 10
step = None
def __init__(self):
# make the sticky sized window appear in the top right corner
x = self.root.winfo_screenwidth() - self.width - 10
self.root.geometry("%dx%d+%d+%d" % (self.width, self.height, x, self.y))
# add gui elements
self.widgets["counter"] = Label(self.root, text = "")
self.widgets["counter"].pack()
self.widgets["html_label"] = HTMLLabel(self.root, html="")
self.widgets["html_label"].pack(fill="both", expand=True)
self.widgets["html_label"].fit_height()
self.widgets["bottomButtons"] = Frame(self.root)
self.widgets["bottomButtons"].pack(side = BOTTOM)
# make buttons to paginate through step list
self.widgets["prev_button"] = Button(self.widgets["bottomButtons"], text="<", command=self.prev_step)
self.widgets["prev_button"].grid(row = 0, column = 0)
self.widgets["open_button"] = Button(self.widgets["bottomButtons"], text="o", command=self.open_file)
self.widgets["open_button"].grid(row = 0, column = 1)
self.widgets["next_button"] = Button(self.widgets["bottomButtons"], text=">", command=self.next_step)
self.widgets["next_button"].grid(row = 0, column = 2)
self.root["background"] = "#f1f58f"
for widget in self.widgets:
#print("widget: %s - widget type: %s" % (widget, type(widget)))
self.widgets[widget].configure(bg="#f1f58f", bd=0, relief=FLAT)
# because html_label only picks up color after the configure for some reason
self.widgets["html_label"].set_html("")
self.root.bind("<h>", lambda e:self.help_message())
self.root.bind("<o>", lambda e:self.open_file())
self.root.bind("<e>", lambda e:self.edit_file())
self.root.bind("<Right>", lambda e:self.next_step())
self.root.bind("<Left>", lambda e:self.prev_step())
self.root.bind("<g>", lambda e:self.goto_step_number())
self.root.bind("<Control-q>", lambda e:self.root.destroy())
self.keybindings = dict()
self.keybindings["h"] = "Show keybindings"
self.keybindings["o"] = "Open local file"
self.keybindings["e"] = "Edit file"
self.keybindings["Right"] = "Go to next step"
self.keybindings["Left"] = "Go to previous step"
self.keybindings["g"] = "Go to step [number]"
self.keybindings["Control-q"] = "Quit"
def help_message(self):
# Oneliner SOURCE: https://stackoverflow.com/questions/44689546/how-to-print-out-a-dictionary-nicely-in-python
message = "\n".join("{}\t{}".format(k, v) for k, v in self.keybindings.items())
messagebox.showinfo("Key bindings", message)
def open_file(self, file_location=None):
if file_location is None:
file_location = filedialog.askopenfilename(filetypes=[("markdown files", "*.md")])
if type(file_location) is not str or file_location == "":
return
self.step = Steps(file_location)
self.widgets["html_label"].set_html(self.step.html)
self.update_counter()
def update_counter(self):
self.widgets["counter"].config(text = "%d / %d" % (self.step.number, self.step.step_count))
def update_color(self):
self.root["background"] = self.step.color
for widget in self.widgets:
#print("widget: %s - widget type: %s" % (widget, type(widget)))
self.widgets[widget].configure(bg=self.step.color)
def update_widgets(self):
self.update_counter()
self.update_color()
def goto_step_number(self):
if self.step is None:
return
step_number = simpledialog.askinteger("Input", "Go to step", parent=self.root)
html = self.step.goto_step_number(step_number)
# must set html after update widgets so html has same color
self.update_widgets()
self.widgets["html_label"].set_html(html)
def goto_step_increment(self, increment):
if self.step is None:
return
html = self.step.goto_step_number(self.step.number + increment)
# must set html after update widgets so html has same color
self.update_widgets()
self.widgets["html_label"].set_html(html)
def prev_step(self):
self.goto_step_increment(-1)
def next_step(self):
self.goto_step_increment(1)
def edit_file(self):
if self.step is None:
return
target_file = self.step.file_location
editor(filename=target_file)
self.open_file(target_file)
def run(self):
# SOURCE for getting file location: https://stackoverflow.com/questions/2632199/how-do-i-get-the-path-of-the-current-executed-file-in-python
filename = inspect.getframeinfo(inspect.currentframe()).filename
path = os.path.dirname(os.path.abspath(filename))
# SOURCE for joining path: https://stackoverflow.com/questions/7132861/build-the-full-path-filename-in-python
test_file = os.path.join(path, "test.md")
self.open_file(test_file)
self.root.mainloop()
stickysteps = StickySteps()
stickysteps.run() | 33.253589 | 142 | 0.71554 |
87ece73966885a4b54b7c9a3af968b9f263fb60d | 620 | py | Python | pyvac/tests/mocks/celery.py | sayoun/pyvac | 45ade8de2f29864d500e0358e38ebcbd2674a06d | [
"BSD-3-Clause"
] | 21 | 2015-11-19T17:36:46.000Z | 2021-07-02T15:48:21.000Z | pyvac/tests/mocks/celery.py | sayoun/pyvac | 45ade8de2f29864d500e0358e38ebcbd2674a06d | [
"BSD-3-Clause"
] | 28 | 2015-07-03T07:54:48.000Z | 2022-03-21T22:16:23.000Z | pyvac/tests/mocks/celery.py | sayoun/pyvac | 45ade8de2f29864d500e0358e38ebcbd2674a06d | [
"BSD-3-Clause"
] | 13 | 2015-07-03T07:30:04.000Z | 2020-07-03T15:22:51.000Z | # -*- coding: utf-8 -*-
""" Mock classses for Celery subtask method and Task class. """
def subtask(task):
return task
class DummyTask(object):
_ids = {
'worker_approved': 10,
'worker_accepted': 20,
'worker_denied': 30,
}
def __init__(self, task=None):
self.task = task
@property
def task_id(self):
return self._ids[self.task]
@property
def name(self):
return self.task
def delay(self, **kwargs):
return self
def apply_async(self, **kwargs):
return self
def send(self, **kwargs):
return True
| 16.756757 | 63 | 0.570968 |
6acba325333dfee5254429a311e85022dc800296 | 7,075 | py | Python | ucsb/repository/asset_repository.py | jasunchen/agmonitor_backend | 2eea26732dea09080af2a7e700c24ef7f9300f2c | [
"MIT"
] | null | null | null | ucsb/repository/asset_repository.py | jasunchen/agmonitor_backend | 2eea26732dea09080af2a7e700c24ef7f9300f2c | [
"MIT"
] | null | null | null | ucsb/repository/asset_repository.py | jasunchen/agmonitor_backend | 2eea26732dea09080af2a7e700c24ef7f9300f2c | [
"MIT"
] | null | null | null | from ucsb.models import user_asset, user
from rest_framework.response import Response
from rest_framework.decorators import api_view
from ucsb.repository.asset_data_repository import delete_asset_data_helper
from django.forms.models import model_to_dict
from ucsb.repository.helpers import *
@api_view(['POST'])
def add_asset(request):
params = ["email", "name", "description", "type_of_asset"]
#Check for Required Fields
for p in params:
if request.data.get(p, None) == None:
return Response(
{"message": "Missing Required Parameters: {}".format(p)},
status = 400)
email = request.data.get('email')
tmp_user = user(user_email=email)
name = request.data.get('name')
desc = request.data.get('description')
if request.data.get("type_of_asset", None) == "generation":
params = ["declination", "azimuth", "modules_power"]
#Check for Required Fields
for p in params:
if request.data.get(p, None) == None:
return Response(
{"message": "Missing Required Parameters: {}".format(p)},
status = 400)
declination = request.data.get('declination')
azimuth = request.data.get('azimuth')
modules_power = request.data.get('modules_power')
asset = user_asset(user=tmp_user, asset_name=name, description=desc, declination=declination, azimuth=azimuth, modules_power=modules_power, type_of_asset="generation")
asset.save()
elif request.data.get("type_of_asset", None) == "flexible":
params = ["start_charge_time", "end_charge_time", "demand", "duration"]
#Check for Required Fields
for p in params:
if request.data.get(p, None) == None:
return Response(
{"message": "Missing Required Parameters: {}".format(p)},
status = 400)
start_time = request.data.get('start_charge_time')
end_time = request.data.get('end_charge_time')
dmd = request.data.get('demand')
dur = request.data.get('duration')
asset = user_asset(user=tmp_user, asset_name=name, description=desc, start_charge_time=start_time, end_charge_time=end_time, type_of_asset="flexible",demand=dmd, duration=dur)
asset.save()
else:
asset = user_asset(user=tmp_user, asset_name=name, description=desc)
asset.save()
return Response({"detail":"Asset created successfully"})
@api_view(['POST'])
def update_asset(request):
params = ["id", "name", "description", "type_of_asset"]
#Check for Required Fields
for p in params:
if request.data.get(p, None) == None:
return Response(
{"message": "Missing Required Parameters: {}".format(p)},
status = 400)
id = request.data.get('id')
name = request.data.get('name')
desc = request.data.get('description')
try:
asset = user_asset.objects.get(id=id)
except:
return Response({"detail":"Asset does not exist"}, status=400)
asset.asset_name = name
asset.description = desc
if asset.type_of_asset == "generation":
params = ["declination", "azimuth", "modules_power"]
#Check for Required Fields
for p in params:
if request.data.get(p, None) == None:
return Response(
{"message": "Missing Required Parameters: {}".format(p)},
status = 400)
declination = request.data.get('declination')
azimuth = request.data.get('azimuth')
modules_power = request.data.get('modules_power')
asset.declination = declination
asset.azimuth = azimuth
asset.modules_power = modules_power
elif asset.type_of_asset == "flexible":
params = ["start_charge_time", "end_charge_time", "demand", "duration"]
#Check for Required Fields
for p in params:
if request.data.get(p, None) == None:
return Response(
{"message": "Missing Required Parameters: {}".format(p)},
status = 400)
start_time = request.data.get('start_charge_time')
end_time = request.data.get('end_charge_time')
dmd = request.data.get('demand')
dur = request.data.get('duration')
asset.start_charge_time = start_time
asset.end_charge_time = end_time
asset.demand = dmd
asset.duration = dur
asset.save()
return Response({"detail":"Asset updated successfully"})
@api_view(['DELETE'])
def delete_asset(request):
params = ["id"]
#Check for Required Fields
for p in params:
if request.data.get(p, None) == None:
return Response(
{"message": "Missing Required Parameters: {}".format(p)},
status = 400)
#Check for Invalid Parameters
if verify(request.data, params):
return Response(
{"message": "Request has invalid parameter not in {}".format(params)},
status = 400)
id = request.data.get('id')
try:
asset = user_asset.objects.get(id=id)
except:
return Response({"detail":"Asset does not exist"}, status=400)
delete_asset_data_helper(id)
user_asset.objects.filter(id=id).delete()
return Response({"detail": "Asset deleted successfully"})
@api_view(['GET'])
def get_all_assets(request):
params = ["email"]
#Check for Required Fields
for p in params:
if request.query_params.get(p, None) == None:
return Response(
{"message": "Missing Required Parameters: {}".format(p)},
status = 400)
#Check for Invalid Parameters
if verify(request.query_params, params):
return Response(
{"message": "Request has invalid parameter not in {}".format(params)},
status = 400)
email = request.query_params.get('email')
try:
tmp_user = user.objects.get(user_email=email)
except:
return Response({"detail": "Error: User does not exist"}, status=400)
bases = user_asset.objects.filter(user=tmp_user, type_of_asset='base').values('id', 'asset_name', 'description')
generations = user_asset.objects.filter(user=tmp_user, type_of_asset='generation').values('id', 'asset_name', 'description', 'declination', 'azimuth', 'modules_power')
felxible_assets = user_asset.objects.filter(user=tmp_user, type_of_asset='flexible').values('id', 'asset_name', 'description', 'start_charge_time', 'end_charge_time', 'duration', 'demand')
result = {"base": bases, "generation": generations, "flexible": felxible_assets}
return Response(result)
@api_view(['GET'])
def get_single_asset(request):
id = request.query_params.get('id')
try:
asset = user_asset.objects.get(id=id)
except:
return Response({"detail":"Asset does not exist"}, status=400)
return Response(model_to_dict(asset)) | 38.243243 | 192 | 0.619788 |
a574e9c80a438acde205cffa5ed7393322181f4a | 1,839 | py | Python | src/tic_tac_toe_game/__main__.py | alexistli/tic-tac-toe-game | f6689d5ea722c3ea52c06c8a3433cf052543297f | [
"MIT"
] | 1 | 2021-12-11T09:10:33.000Z | 2021-12-11T09:10:33.000Z | src/tic_tac_toe_game/__main__.py | alexistli/tic-tac-toe-game | f6689d5ea722c3ea52c06c8a3433cf052543297f | [
"MIT"
] | 39 | 2021-11-01T23:18:43.000Z | 2022-03-31T23:27:55.000Z | src/tic_tac_toe_game/__main__.py | alexistli/tic-tac-toe-game | f6689d5ea722c3ea52c06c8a3433cf052543297f | [
"MIT"
] | null | null | null | """Command-line interface."""
import click
from tic_tac_toe_game import engine
@click.command()
@click.version_option()
def main() -> None:
"""Tic Tac Toe Game."""
click.secho("hello", fg="green")
if click.confirm("Do you want to play a game?", abort=True):
click.echo("Let's play a game...")
# player_1_mark = click.prompt(
# "Player 1, please pick your mark",
# type=click.Choice(["X", "O"], case_sensitive=False),
# default="X",
# )
#
# player_2_type = click.prompt(
# "Player 2, are you Human (H) or a Bot (B)?",
# type=click.Choice(["H", "B"], case_sensitive=False),
# default="B",
# )
game = engine.build_game()
# game = engine.Engine(player_1_mark, player_2_type)
# game.board = Board()
# TODO: Upgrade with any possible strategy
# game.players_match.update_ai_algorithm(move)
finished = False
while not finished:
player = game.players_match.current()
print("\n\n")
print(f"{player.name}, it is your turn!")
print("Current grid: \n")
print(f"{game.board.framed_grid()}\n")
if isinstance(player, engine.HumanPlayer):
played_cell = click.prompt(
"Please pick a cell xy", type=click.Tuple([int, int])
)
else:
played_cell = game.get_move()
game.board.make_move(coord=played_cell, value=player.get_mark())
if game.board.is_winning_move(played_cell, player.get_mark()):
print(f"Player {player.name} won!")
finished = True
elif game.board.is_full():
print("Players tied!")
finished = True
else:
game.players_match.switch()
if __name__ == "__main__":
main(prog_name="tic-tac-toe-game") # pragma: no cover
| 28.292308 | 72 | 0.584013 |
3359fef0e98cc83f7723cb3fe70bb7cba5b8a9ab | 1,749 | py | Python | movie.py | Roger-tn-su/javbus_crawler | ebc163a778df2210f8a01749f84057f8fdcb9229 | [
"MIT"
] | null | null | null | movie.py | Roger-tn-su/javbus_crawler | ebc163a778df2210f8a01749f84057f8fdcb9229 | [
"MIT"
] | null | null | null | movie.py | Roger-tn-su/javbus_crawler | ebc163a778df2210f8a01749f84057f8fdcb9229 | [
"MIT"
] | 1 | 2021-10-30T18:30:56.000Z | 2021-10-30T18:30:56.000Z |
class Movie:
""" class for each movie in javbus website"""
def __init__(self, avNum, title, coverImgUrl, date):
self._avNum = avNum
self._title = title
self._cover_img = coverImgUrl
self._release_date = date
@property
def avNum(self):
return self._avNum
@property
def title(self):
return self._title
@property
def cover_img(self):
return self._cover_img
@property
def release_date(self):
return self._release_date
def __repr__(self):
return '{} {} {} {}'.format(self._avNum, self._title, self._cover_img, self._release_date)
class Link:
""" Class for magnet link"""
def __init__(self, avNum, magnet, size):
self._avNum = avNum
self._magnet = magnet
self._size = size
@property
def size(self):
return self._size
@property
def magnet(self):
return self._magnet
@property
def av_num(self):
return self._avNum
def __repr__(self):
return '{} {} {}'.format(self._avNum, self._magnet, self._size)
class Counter:
""" Class for counter"""
def __init__(self):
self._parsing_time = 0
self._page_skip = 0
self._movie_skip = 0
@property
def parsing_time(self):
return self._parsing_time
@property
def page_skip(self):
return self._page_skip
@property
def movie_skip(self):
return self._movie_skip
def reset_movie_skip(self):
self._movie_skip = 0
def increment_movie_skip(self):
self._movie_skip += 1
def increment_page_skip(self):
self._page_skip += 1
def increment_parse(self):
self._parsing_time += 1
| 20.576471 | 98 | 0.608348 |
fa64d027993ad0602ee242b438a5dab7c4abc5aa | 2,767 | py | Python | setup.py | kadeve/assistant-sdk-python | d8f6143f03851a0c2f236e12c2f1051f8f77178c | [
"Apache-2.0"
] | null | null | null | setup.py | kadeve/assistant-sdk-python | d8f6143f03851a0c2f236e12c2f1051f8f77178c | [
"Apache-2.0"
] | null | null | null | setup.py | kadeve/assistant-sdk-python | d8f6143f03851a0c2f236e12c2f1051f8f77178c | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
import io
install_requires = [
'googleapis-common-protos==1.5.2',
'grpcio==1.2.1',
]
auth_helpers_requires = [
'google-auth-oauthlib==0.0.1',
'urllib3[secure]==1.20',
]
audio_helpers_requires = [
'sounddevice==0.3.7',
]
samples_requires = [
'click==6.7',
'tenacity==4.1.0',
] + auth_helpers_requires + audio_helpers_requires
with io.open('README.rst', 'r') as fh:
long_description = fh.read()
setup(
name='google-assistant-sdk',
version='0.2.1',
author='Google Assistant SDK team',
author_email='proppy+assistant-sdk@google.com',
description='Samples and bindings for the Google Assistant API',
long_description=long_description,
url='https://github.com/googlesamples/assistant-sdk-python',
packages=find_packages(exclude=['tests*']),
namespace_packages=[
'google',
'google.assistant',
'google.assistant.embedded',
'googlesamples',
],
install_requires=install_requires,
extras_require={
'samples': samples_requires,
'auth_helpers': auth_helpers_requires,
'audio_helpers': audio_helpers_requires,
},
entry_points={
'console_scripts': [
'googlesamples-assistant'
'=googlesamples.assistant.__main__:main [samples]',
'googlesamples-assistant-auth'
'=googlesamples.assistant.auth_helpers.__main__:main [samples]',
],
},
license='Apache 2.0',
keywords='google assistant api sdk sample',
classifiers=(
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Operating System :: OS Independent',
'Topic :: Internet :: WWW/HTTP',
),
)
| 31.804598 | 76 | 0.653415 |
621879993257357bd94ae2ab02c4f42840705bd9 | 6,686 | py | Python | funsor/joint.py | bogiebro/funsor | c15eaf7019e34c647630ed3da89001e620a972fa | [
"Apache-2.0"
] | null | null | null | funsor/joint.py | bogiebro/funsor | c15eaf7019e34c647630ed3da89001e620a972fa | [
"Apache-2.0"
] | null | null | null | funsor/joint.py | bogiebro/funsor | c15eaf7019e34c647630ed3da89001e620a972fa | [
"Apache-2.0"
] | null | null | null | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import math
from collections import OrderedDict
from functools import reduce
from typing import Tuple, Union
from multipledispatch import dispatch
from multipledispatch.variadic import Variadic
import funsor.ops as ops
from funsor.cnf import Contraction, GaussianMixture
from funsor.delta import Delta
from funsor.domains import bint
from funsor.gaussian import Gaussian, align_gaussian
from funsor.ops import AssociativeOp
from funsor.tensor import Tensor, align_tensor
from funsor.terms import Funsor, Independent, Number, Reduce, Unary, eager, moment_matching, normalize
@dispatch(str, str, Variadic[(Gaussian, GaussianMixture)])
def eager_cat_homogeneous(name, part_name, *parts):
assert parts
output = parts[0].output
inputs = OrderedDict([(part_name, None)])
for part in parts:
assert part.output == output
assert part_name in part.inputs
inputs.update(part.inputs)
int_inputs = OrderedDict((k, v) for k, v in inputs.items() if v.dtype != "real")
real_inputs = OrderedDict((k, v) for k, v in inputs.items() if v.dtype == "real")
inputs = int_inputs.copy()
inputs.update(real_inputs)
discretes = []
info_vecs = []
precisions = []
for part in parts:
inputs[part_name] = part.inputs[part_name]
int_inputs[part_name] = inputs[part_name]
shape = tuple(d.size for d in int_inputs.values())
if isinstance(part, Gaussian):
discrete = None
gaussian = part
elif issubclass(type(part), GaussianMixture): # TODO figure out why isinstance isn't working
discrete, gaussian = part.terms[0], part.terms[1]
discrete = ops.expand(align_tensor(int_inputs, discrete), shape)
else:
raise NotImplementedError("TODO")
discretes.append(discrete)
info_vec, precision = align_gaussian(inputs, gaussian)
info_vecs.append(ops.expand(info_vec, shape + (-1,)))
precisions.append(ops.expand(precision, shape + (-1, -1)))
if part_name != name:
del inputs[part_name]
del int_inputs[part_name]
dim = 0
info_vec = ops.cat(dim, *info_vecs)
precision = ops.cat(dim, *precisions)
inputs[name] = bint(info_vec.shape[dim])
int_inputs[name] = inputs[name]
result = Gaussian(info_vec, precision, inputs)
if any(d is not None for d in discretes):
for i, d in enumerate(discretes):
if d is None:
discretes[i] = ops.new_zeros(info_vecs[i], info_vecs[i].shape[:-1])
discrete = ops.cat(dim, *discretes)
result = result + Tensor(discrete, int_inputs)
return result
#################################
# patterns for moment-matching
#################################
@moment_matching.register(Contraction, AssociativeOp, AssociativeOp, frozenset, Variadic[object])
def moment_matching_contract_default(*args):
return None
@moment_matching.register(Contraction, ops.LogAddExpOp, ops.AddOp, frozenset, (Number, Tensor), Gaussian)
def moment_matching_contract_joint(red_op, bin_op, reduced_vars, discrete, gaussian):
approx_vars = frozenset(k for k in reduced_vars if k in gaussian.inputs
and gaussian.inputs[k].dtype != 'real')
exact_vars = reduced_vars - approx_vars
if exact_vars and approx_vars:
return Contraction(red_op, bin_op, exact_vars, discrete, gaussian).reduce(red_op, approx_vars)
if approx_vars and not exact_vars:
discrete += gaussian.log_normalizer
new_discrete = discrete.reduce(ops.logaddexp, approx_vars.intersection(discrete.inputs))
new_discrete = discrete.reduce(ops.logaddexp, approx_vars.intersection(discrete.inputs))
num_elements = reduce(ops.mul, [
gaussian.inputs[k].num_elements for k in approx_vars.difference(discrete.inputs)], 1)
if num_elements != 1:
new_discrete -= math.log(num_elements)
int_inputs = OrderedDict((k, d) for k, d in gaussian.inputs.items() if d.dtype != 'real')
probs = (discrete - new_discrete.clamp_finite()).exp()
old_loc = Tensor(ops.cholesky_solve(ops.unsqueeze(gaussian.info_vec, -1), gaussian._precision_chol).squeeze(-1),
int_inputs)
new_loc = (probs * old_loc).reduce(ops.add, approx_vars)
old_cov = Tensor(ops.cholesky_inverse(gaussian._precision_chol), int_inputs)
diff = old_loc - new_loc
outers = Tensor(ops.unsqueeze(diff.data, -1) * ops.unsqueeze(diff.data, -2), diff.inputs)
new_cov = ((probs * old_cov).reduce(ops.add, approx_vars) +
(probs * outers).reduce(ops.add, approx_vars))
# Numerically stabilize by adding bogus precision to empty components.
total = probs.reduce(ops.add, approx_vars)
mask = ops.unsqueeze(ops.unsqueeze((total.data == 0), -1), -1)
new_cov.data = new_cov.data + mask * ops.new_eye(new_cov.data, new_cov.data.shape[-1:])
new_precision = Tensor(ops.cholesky_inverse(ops.cholesky(new_cov.data)), new_cov.inputs)
new_info_vec = (new_precision.data @ ops.unsqueeze(new_loc.data, -1)).squeeze(-1)
new_inputs = new_loc.inputs.copy()
new_inputs.update((k, d) for k, d in gaussian.inputs.items() if d.dtype == 'real')
new_gaussian = Gaussian(new_info_vec, new_precision.data, new_inputs)
new_discrete -= new_gaussian.log_normalizer
return new_discrete + new_gaussian
return None
####################################################
# Patterns for normalizing
####################################################
@eager.register(Reduce, ops.AddOp, Unary[ops.ExpOp, Funsor], frozenset)
def eager_reduce_exp(op, arg, reduced_vars):
# x.exp().reduce(ops.add) == x.reduce(ops.logaddexp).exp()
log_result = arg.arg.reduce(ops.logaddexp, reduced_vars)
if log_result is not normalize(Reduce, ops.logaddexp, arg.arg, reduced_vars):
return log_result.exp()
return None
@eager.register(Independent,
(Contraction[ops.NullOp, ops.AddOp, frozenset, Tuple[Delta, Union[Number, Tensor], Gaussian]],
Contraction[ops.NullOp, ops.AddOp, frozenset, Tuple[Delta, Union[Number, Tensor, Gaussian]]]),
str, str, str)
def eager_independent_joint(joint, reals_var, bint_var, diag_var):
if diag_var not in joint.terms[0].fresh:
return None
delta = Independent(joint.terms[0], reals_var, bint_var, diag_var)
new_terms = (delta,) + tuple(t.reduce(ops.add, bint_var) for t in joint.terms[1:])
return reduce(joint.bin_op, new_terms)
| 42.858974 | 120 | 0.667963 |
b5cca9bc3246e5b87c5752b1652216acb8c65c7b | 216 | py | Python | yabul/__init__.py | timodonnell/yabul | d2ad2fbf934b375f9ef2b6f2d9c2ab9c157260d2 | [
"Apache-2.0"
] | 2 | 2021-03-01T20:09:20.000Z | 2021-03-02T05:52:34.000Z | yabul/__init__.py | timodonnell/yabul | d2ad2fbf934b375f9ef2b6f2d9c2ab9c157260d2 | [
"Apache-2.0"
] | null | null | null | yabul/__init__.py | timodonnell/yabul | d2ad2fbf934b375f9ef2b6f2d9c2ab9c157260d2 | [
"Apache-2.0"
] | null | null | null | """
Yet Another Bioinformatics Utility Library
"""
__version__ = "0.0.3"
from .fasta import read_fasta, write_fasta
from .align import align_pair
__all__ = [
"read_fasta",
"write_fasta",
"align_pair",
]
| 16.615385 | 42 | 0.699074 |
596289ba8a1eb9f05ef70c8af8d9347678a59cb9 | 1,871 | py | Python | src/preprocess/displace_matrix.py | MelvinYin/Defined_Proteins | 75da20be82a47d85d27176db29580ab87d52b670 | [
"BSD-3-Clause"
] | 2 | 2021-01-05T02:55:57.000Z | 2021-04-16T15:49:08.000Z | src/preprocess/displace_matrix.py | MelvinYin/Defined_Proteins | 75da20be82a47d85d27176db29580ab87d52b670 | [
"BSD-3-Clause"
] | null | null | null | src/preprocess/displace_matrix.py | MelvinYin/Defined_Proteins | 75da20be82a47d85d27176db29580ab87d52b670 | [
"BSD-3-Clause"
] | 1 | 2021-01-05T08:12:38.000Z | 2021-01-05T08:12:38.000Z | """
Displace a matrix file to the left or right, filling remaining spots with the
composition frequencies.
"""
import matplotlib.pyplot as plt
import os
from config import paths
from utils import seq_logo
from preprocess import crop_matrix
def displace(displacement, matrix_file, output_file,
composition_file=paths.COMPOSITION):
matrix = []
with open(matrix_file, 'r') as file:
for line in file:
matrix.append((line.strip().split(" ")))
composition = []
with open(composition_file, 'r') as file:
for line in file:
split_line = line.strip().split(" ")
if len(split_line) != 2:
continue
composition.append(split_line[1])
output_matrix = []
assert len(composition) == len(matrix[0])
if displacement > 0:
# shift right
for i in range(displacement):
output_matrix.append(composition)
for i in range(len(matrix) - displacement):
output_matrix.append(matrix[i])
else:
# shift left
displacement *= -1
for i in range(displacement, len(matrix)):
output_matrix.append(matrix[i])
for i in range(displacement):
output_matrix.append(composition)
assert len(output_matrix) == len(matrix)
with open(output_file, 'w') as file:
for line in output_matrix:
file.write(" ".join(line) + "\n")
def test_displace():
direct_from_nbdb = os.path.join(paths.USER_INPUT, "GxxGxG_pssm.txt")
cropped = os.path.join(paths.USER_INPUT, "GxxGxG_pssm_cropped.txt")
test_output = os.path.join(paths.TEST, 'test.txt')
crop_matrix.crop(direct_from_nbdb, cropped)
displace(-2, cropped, test_output, paths.COMPOSITION)
seq_logo.build_logo_nbdb(test_output)
os.remove(cropped)
os.remove(test_output)
plt.show()
| 31.183333 | 77 | 0.644041 |
db91d1a194430276df4a1258b130857333228a1e | 5,715 | py | Python | test_net.py | R2D2oid/slowfast_feature_extractor | a5f2f0bdeee964ffd3b5e5950d84c22b93ab8467 | [
"MIT"
] | 63 | 2019-11-08T14:03:26.000Z | 2022-03-06T05:28:40.000Z | test_net.py | xujinglin/slowfast_feature_extractor | be219c1dbf24511a189e458cc8d4ab5c0292a335 | [
"MIT"
] | 15 | 2019-11-11T09:16:30.000Z | 2022-03-08T14:56:21.000Z | test_net.py | xujinglin/slowfast_feature_extractor | be219c1dbf24511a189e458cc8d4ab5c0292a335 | [
"MIT"
] | 17 | 2019-11-11T07:38:59.000Z | 2021-11-19T19:29:09.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Modified to process a list of videos
"""Extract features for videos using pre-trained networks"""
import numpy as np
import torch
import os
import time
from tqdm import tqdm
import av
from moviepy.video.io.VideoFileClip import VideoFileClip
import slowfast.utils.checkpoint as cu
import slowfast.utils.distributed as du
import slowfast.utils.logging as logging
import slowfast.utils.misc as misc
from models import build_model
from datasets import VideoSet
logger = logging.get_logger(__name__)
def calculate_time_taken(start_time, end_time):
hours = int((end_time - start_time) / 3600)
minutes = int((end_time - start_time) / 60) - (hours * 60)
seconds = int((end_time - start_time) % 60)
return hours, minutes, seconds
@torch.no_grad()
def perform_inference(test_loader, model, cfg):
"""
Perform mutli-view testing that samples a segment of frames from a video
and extract features from a pre-trained model.
Args:
test_loader (loader): video testing loader.
model (model): the pretrained video model to test.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
# Enable eval mode.
model.eval()
feat_arr = None
for inputs in tqdm(test_loader):
# Transfer the data to the current GPU device.
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
# Perform the forward pass.
preds, feat = model(inputs)
# Gather all the predictions across all the devices to perform ensemble.
if cfg.NUM_GPUS > 1:
preds, feat = du.all_gather([preds, feat])
feat = feat.cpu().numpy()
if feat_arr is None:
feat_arr = feat
else:
feat_arr = np.concatenate((feat_arr, feat), axis=0)
return feat_arr
def test(cfg):
"""
Perform multi-view testing/feature extraction on the pretrained video model.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
# Set random seed from configs.
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
# Setup logging format.
logging.setup_logging(cfg.OUTPUT_DIR)
# Print config.
logger.info("Test with config:")
logger.info(cfg)
# Build the video model and print model statistics.
model = build_model(cfg)
if du.is_master_proc() and cfg.LOG_MODEL_INFO:
misc.log_model_info(model, cfg, use_train_input=False)
cu.load_test_checkpoint(cfg, model)
vid_root = os.path.join(cfg.DATA.PATH_TO_DATA_DIR, cfg.DATA.PATH_PREFIX)
videos_list_file = os.path.join(cfg.DATA.PATH_TO_DATA_DIR, "vid_list.csv")
print("Loading Video List ...")
with open(videos_list_file) as f:
videos = sorted([x.strip() for x in f.readlines() if len(x.strip()) > 0])
print("Done")
print("----------------------------------------------------------")
if cfg.DATA.READ_VID_FILE:
rejected_vids = []
print("{} videos to be processed...".format(len(videos)))
print("----------------------------------------------------------")
start_time = time.time()
for vid_no, vid in enumerate(videos):
# Create video testing loaders.
path_to_vid = os.path.join(vid_root, os.path.split(vid)[0])
vid_id = os.path.split(vid)[1]
if cfg.DATA.READ_VID_FILE:
try:
_ = VideoFileClip(
os.path.join(path_to_vid, vid_id) + cfg.DATA.VID_FILE_EXT,
audio=False,
fps_source="fps",
)
except Exception as e:
print("{}. {} cannot be read with error {}".format(vid_no, vid, e))
print("----------------------------------------------------------")
rejected_vids.append(vid)
continue
out_path = os.path.join(cfg.OUTPUT_DIR, os.path.split(vid)[0])
out_file = vid_id.split(".")[0] + "_{}.npy".format(cfg.DATA.NUM_FRAMES)
if os.path.exists(os.path.join(out_path, out_file)):
print("{}. {} already exists".format(vid_no, out_file))
print("----------------------------------------------------------")
continue
print("{}. Processing {}...".format(vid_no, vid))
dataset = VideoSet(
cfg, path_to_vid, vid_id, read_vid_file=cfg.DATA.READ_VID_FILE
)
test_loader = torch.utils.data.DataLoader(
dataset,
batch_size=cfg.TEST.BATCH_SIZE,
shuffle=False,
sampler=None,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=False,
)
# Perform multi-view test on the entire dataset.
feat_arr = perform_inference(test_loader, model, cfg)
os.makedirs(out_path, exist_ok=True)
np.save(os.path.join(out_path, out_file), feat_arr)
print("Done.")
print("----------------------------------------------------------")
if cfg.DATA.READ_VID_FILE:
print("Rejected Videos: {}".format(rejected_vids))
end_time = time.time()
hours, minutes, seconds = calculate_time_taken(start_time, end_time)
print(
"Time taken: {} hour(s), {} minute(s) and {} second(s)".format(
hours, minutes, seconds
)
)
print("----------------------------------------------------------")
| 32.657143 | 83 | 0.584427 |
3c3513a3ef5992c12d88cee0ae7da54721bec955 | 7,841 | py | Python | train.py | ruyueshi/crnn.pytorch-1 | 2272d4defc328c033a889b9ca3279795f268b52d | [
"MIT"
] | 2,122 | 2017-03-23T02:43:38.000Z | 2022-03-30T22:48:43.000Z | train.py | Salamit/crnn.pytorch | d3a47f91691b31b8d5336e2ed5932e6cf65142f0 | [
"MIT"
] | 242 | 2017-04-02T07:05:24.000Z | 2022-03-15T08:35:22.000Z | train.py | Salamit/crnn.pytorch | d3a47f91691b31b8d5336e2ed5932e6cf65142f0 | [
"MIT"
] | 646 | 2017-03-04T15:15:40.000Z | 2022-03-30T02:32:48.000Z | from __future__ import print_function
from __future__ import division
import argparse
import random
import torch
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
from torch.autograd import Variable
import numpy as np
from warpctc_pytorch import CTCLoss
import os
import utils
import dataset
import models.crnn as crnn
parser = argparse.ArgumentParser()
parser.add_argument('--trainRoot', required=True, help='path to dataset')
parser.add_argument('--valRoot', required=True, help='path to dataset')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=2)
parser.add_argument('--batchSize', type=int, default=64, help='input batch size')
parser.add_argument('--imgH', type=int, default=32, help='the height of the input image to network')
parser.add_argument('--imgW', type=int, default=100, help='the width of the input image to network')
parser.add_argument('--nh', type=int, default=256, help='size of the lstm hidden state')
parser.add_argument('--nepoch', type=int, default=25, help='number of epochs to train for')
# TODO(meijieru): epoch -> iter
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--pretrained', default='', help="path to pretrained model (to continue training)")
parser.add_argument('--alphabet', type=str, default='0123456789abcdefghijklmnopqrstuvwxyz')
parser.add_argument('--expr_dir', default='expr', help='Where to store samples and models')
parser.add_argument('--displayInterval', type=int, default=500, help='Interval to be displayed')
parser.add_argument('--n_test_disp', type=int, default=10, help='Number of samples to display when test')
parser.add_argument('--valInterval', type=int, default=500, help='Interval to be displayed')
parser.add_argument('--saveInterval', type=int, default=500, help='Interval to be displayed')
parser.add_argument('--lr', type=float, default=0.01, help='learning rate for Critic, not used by adadealta')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--adam', action='store_true', help='Whether to use adam (default is rmsprop)')
parser.add_argument('--adadelta', action='store_true', help='Whether to use adadelta (default is rmsprop)')
parser.add_argument('--keep_ratio', action='store_true', help='whether to keep ratio for image resize')
parser.add_argument('--manualSeed', type=int, default=1234, help='reproduce experiemnt')
parser.add_argument('--random_sample', action='store_true', help='whether to sample the dataset with random sampler')
opt = parser.parse_args()
print(opt)
if not os.path.exists(opt.expr_dir):
os.makedirs(opt.expr_dir)
random.seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
train_dataset = dataset.lmdbDataset(root=opt.trainroot)
assert train_dataset
if not opt.random_sample:
sampler = dataset.randomSequentialSampler(train_dataset, opt.batchSize)
else:
sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=opt.batchSize,
shuffle=True, sampler=sampler,
num_workers=int(opt.workers),
collate_fn=dataset.alignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio=opt.keep_ratio))
test_dataset = dataset.lmdbDataset(
root=opt.valroot, transform=dataset.resizeNormalize((100, 32)))
nclass = len(opt.alphabet) + 1
nc = 1
converter = utils.strLabelConverter(opt.alphabet)
criterion = CTCLoss()
# custom weights initialization called on crnn
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
crnn = crnn.CRNN(opt.imgH, nc, nclass, opt.nh)
crnn.apply(weights_init)
if opt.pretrained != '':
print('loading pretrained model from %s' % opt.pretrained)
crnn.load_state_dict(torch.load(opt.pretrained))
print(crnn)
image = torch.FloatTensor(opt.batchSize, 3, opt.imgH, opt.imgH)
text = torch.IntTensor(opt.batchSize * 5)
length = torch.IntTensor(opt.batchSize)
if opt.cuda:
crnn.cuda()
crnn = torch.nn.DataParallel(crnn, device_ids=range(opt.ngpu))
image = image.cuda()
criterion = criterion.cuda()
image = Variable(image)
text = Variable(text)
length = Variable(length)
# loss averager
loss_avg = utils.averager()
# setup optimizer
if opt.adam:
optimizer = optim.Adam(crnn.parameters(), lr=opt.lr,
betas=(opt.beta1, 0.999))
elif opt.adadelta:
optimizer = optim.Adadelta(crnn.parameters())
else:
optimizer = optim.RMSprop(crnn.parameters(), lr=opt.lr)
def val(net, dataset, criterion, max_iter=100):
print('Start val')
for p in crnn.parameters():
p.requires_grad = False
net.eval()
data_loader = torch.utils.data.DataLoader(
dataset, shuffle=True, batch_size=opt.batchSize, num_workers=int(opt.workers))
val_iter = iter(data_loader)
i = 0
n_correct = 0
loss_avg = utils.averager()
max_iter = min(max_iter, len(data_loader))
for i in range(max_iter):
data = val_iter.next()
i += 1
cpu_images, cpu_texts = data
batch_size = cpu_images.size(0)
utils.loadData(image, cpu_images)
t, l = converter.encode(cpu_texts)
utils.loadData(text, t)
utils.loadData(length, l)
preds = crnn(image)
preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size))
cost = criterion(preds, text, preds_size, length) / batch_size
loss_avg.add(cost)
_, preds = preds.max(2)
preds = preds.squeeze(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
sim_preds = converter.decode(preds.data, preds_size.data, raw=False)
for pred, target in zip(sim_preds, cpu_texts):
if pred == target.lower():
n_correct += 1
raw_preds = converter.decode(preds.data, preds_size.data, raw=True)[:opt.n_test_disp]
for raw_pred, pred, gt in zip(raw_preds, sim_preds, cpu_texts):
print('%-20s => %-20s, gt: %-20s' % (raw_pred, pred, gt))
accuracy = n_correct / float(max_iter * opt.batchSize)
print('Test loss: %f, accuray: %f' % (loss_avg.val(), accuracy))
def trainBatch(net, criterion, optimizer):
data = train_iter.next()
cpu_images, cpu_texts = data
batch_size = cpu_images.size(0)
utils.loadData(image, cpu_images)
t, l = converter.encode(cpu_texts)
utils.loadData(text, t)
utils.loadData(length, l)
preds = crnn(image)
preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size))
cost = criterion(preds, text, preds_size, length) / batch_size
crnn.zero_grad()
cost.backward()
optimizer.step()
return cost
for epoch in range(opt.nepoch):
train_iter = iter(train_loader)
i = 0
while i < len(train_loader):
for p in crnn.parameters():
p.requires_grad = True
crnn.train()
cost = trainBatch(crnn, criterion, optimizer)
loss_avg.add(cost)
i += 1
if i % opt.displayInterval == 0:
print('[%d/%d][%d/%d] Loss: %f' %
(epoch, opt.nepoch, i, len(train_loader), loss_avg.val()))
loss_avg.reset()
if i % opt.valInterval == 0:
val(crnn, test_dataset, criterion)
# do checkpointing
if i % opt.saveInterval == 0:
torch.save(
crnn.state_dict(), '{0}/netCRNN_{1}_{2}.pth'.format(opt.expr_dir, epoch, i))
| 36.640187 | 117 | 0.693406 |
9a1837e2f9dd3fc1a3e0d38816c4c8288db421e0 | 197 | py | Python | leonardo_package_index/apps/package_index_api.py | leonardo-modules/leonardo-package-index | ed4dd006055fe99d841ce09776507a2cd31c2b83 | [
"BSD-3-Clause"
] | null | null | null | leonardo_package_index/apps/package_index_api.py | leonardo-modules/leonardo-package-index | ed4dd006055fe99d841ce09776507a2cd31c2b83 | [
"BSD-3-Clause"
] | 11 | 2015-08-28T23:00:15.000Z | 2016-09-17T19:31:00.000Z | leonardo_package_index/apps/package_index_api.py | leonardo-modules/leonardo-package-index | ed4dd006055fe99d841ce09776507a2cd31c2b83 | [
"BSD-3-Clause"
] | null | null | null |
from django.conf.urls import *
urlpatterns = patterns('leonardo_package_index.apps.views',
url(r'^', include('leonardo_package_index.api.urls'),),
)
| 28.142857 | 78 | 0.57868 |
69eb164733d6ec8bd04522102a81225cce0bf977 | 2,268 | py | Python | src/utils/glove.py | LaudateCorpus1/hermes-5 | d9b50452379fe636da96c2bad2d286afa15cd7b9 | [
"Apache-2.0"
] | 135 | 2015-11-17T09:04:37.000Z | 2022-01-14T07:00:34.000Z | src/utils/glove.py | cacan/hermes | d9b50452379fe636da96c2bad2d286afa15cd7b9 | [
"Apache-2.0"
] | 16 | 2015-11-19T18:04:13.000Z | 2016-11-19T00:30:12.000Z | src/utils/glove.py | cacan/hermes | d9b50452379fe636da96c2bad2d286afa15cd7b9 | [
"Apache-2.0"
] | 68 | 2015-11-13T22:51:57.000Z | 2022-01-26T01:51:09.000Z | import numpy as np
class Glove(object):
"""Load a GloVe model and provide access to the vectors and vector space.
Provides access to word vectors as if it were a dictionary:
glove_instance = Glove('file')
glove['word']
Unrecognized words will return the Null vector (all 0).
Also provides a way to find the closest word to a vector in the vector
space.
"""
def __init__(self, glove_file):
"""Set up the GloVe class by reading in a vector file.
Args:
glove_file (str): Location of the plain text GloVe vector file.
"""
self.__model = {}
self.__line_to_word = {}
space = []
# Load the GloVe data from a file
with open(glove_file, 'r') as open_file:
for line_number, line in enumerate(open_file):
sline = line.split()
key_word = sline[0]
vector = np.array([float(i) for i in sline[1:]])
self.__model[key_word] = vector
self.__line_to_word[line_number] = key_word
space.append(vector)
# Set up a vector space so we can quickly find the closest vector
self.__vector_space = np.array(space)
# Null vector for unrecognized words
self.vector_size = len(vector)
self.__null_vector = np.zeros(self.vector_size)
def __getitem__(self, key):
"""Return the vector representation of a word.
Args:
key (str): A word to locate in the vector space.
Returns:
numpy array: The location of the word in the vector space, or the
null (0) vector if the word is not found.
"""
return self.__model.get(key, self.__null_vector)
def closest_word(self, vector):
"""Return the closest word to a given vector.
Args:
vector (numpy array): A vector of the same dimension as the vector
space.
Returns:
str: The closest word to the input vector in the vector space.
"""
squares = (self.__vector_space - vector)**2
distances = np.sum(squares, axis=1)
line_number = np.argmin(distances)
return self.__line_to_word[line_number]
| 31.5 | 78 | 0.59612 |
35b101f2c6f3c7d3d6daf2d23908f82c5f60f15f | 2,388 | py | Python | python/cuda_linux_demo/model_test.py | windstamp/Paddle-Inference-Demo | de773a0864eb12911d2cdcbc8f1f036911541c60 | [
"Apache-2.0"
] | 115 | 2020-05-06T09:47:08.000Z | 2022-03-31T08:47:18.000Z | python/cuda_linux_demo/model_test.py | windstamp/Paddle-Inference-Demo | de773a0864eb12911d2cdcbc8f1f036911541c60 | [
"Apache-2.0"
] | 79 | 2020-05-06T09:51:45.000Z | 2022-03-27T00:23:29.000Z | python/cuda_linux_demo/model_test.py | windstamp/Paddle-Inference-Demo | de773a0864eb12911d2cdcbc8f1f036911541c60 | [
"Apache-2.0"
] | 81 | 2020-05-06T09:47:11.000Z | 2022-03-23T07:29:32.000Z | import numpy as np
import argparse
import cv2
from paddle.inference import Config
from paddle.inference import create_predictor
from paddle.inference import PrecisionType
from img_preprocess import preprocess
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir",
type=str,
default="",
help=
"Model dir, If you load a non-combined model, specify the directory of the model."
)
parser.add_argument(
"--model_file",
type=str,
default="",
help="Model filename, Specify this when your model is a combined model."
)
parser.add_argument(
"--params_file",
type=str,
default="",
help=
"Parameter filename, Specify this when your model is a combined model."
)
parser.add_argument("--img_path", type=str, default="", help="Input image path.")
parser.add_argument("--threads",
type=int,
default=1,
help="Whether use gpu.")
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
assert (args.model_dir != "") or \
(args.model_file != "" and args.params_file != ""), \
"Set model path error."
assert args.img_path != "", "Set img_path error."
# Init config
if args.model_dir == "":
config = Config(args.model_file, args.params_file)
else:
config = Config(args.model_dir)
config.enable_use_gpu(500, 0)
config.switch_ir_optim()
config.enable_memory_optim()
config.enable_tensorrt_engine(workspace_size=1 << 30, precision_mode=PrecisionType.Float32,max_batch_size=1, min_subgraph_size=5, use_static=False, use_calib_mode=False)
# Create predictor
predictor = create_predictor(config)
# Set input
img = cv2.imread(args.img_path)
img = preprocess(img)
input_names = predictor.get_input_names()
input_tensor = predictor.get_input_handle(input_names[0])
input_tensor.reshape(img.shape)
input_tensor.copy_from_cpu(img.copy())
# Run
predictor.run()
# Set output
output_names = predictor.get_output_names()
output_tensor = predictor.get_output_handle(output_names[0])
output_data = output_tensor.copy_to_cpu()
print("Predict class index: ", np.argmax(output_data))
| 31.421053 | 173 | 0.646566 |
2c26c233741c290e55f23f5fb8856be3cd70b909 | 2,068 | py | Python | motorodm/documents/meta_document.py | rob-blackbourn/motorodm | c77e79bd2b11c896b9a971ba6a9c4947dce96163 | [
"Apache-2.0"
] | null | null | null | motorodm/documents/meta_document.py | rob-blackbourn/motorodm | c77e79bd2b11c896b9a971ba6a9c4947dce96163 | [
"Apache-2.0"
] | null | null | null | motorodm/documents/meta_document.py | rob-blackbourn/motorodm | c77e79bd2b11c896b9a971ba6a9c4947dce96163 | [
"Apache-2.0"
] | null | null | null | from ..fields.field import Field
from ..fields import ObjectIdField
from ..query_sets.query_set import QuerySet
class MetaEmbeddedDocument(type):
def __new__(cls, name, bases, dct):
dct['_fields'] = {}
dct['_db_name_map'] = {}
dct['_indices'] = []
for base in bases:
for field_name, field in filter(lambda x: isinstance(x[1], Field), base.__dict__.items()):
cls.add_field(dct, field_name, field)
for field_name, field in filter(lambda x: isinstance(x[1], Field), dct.items()):
field.name = field_name
cls.add_field(dct, field_name, field)
dct['_values'] = {}
dct['_dirty_fields'] = set()
return super().__new__(cls, name, bases, dct)
@classmethod
def add_field(cls, dct, field_name, field):
if field_name in dct['_fields']:
raise KeyError(f"Field '{field_name}' already exists")
if not field.db_name:
field.db_name = field_name
if field.db_name in dct['_db_name_map']:
raise KeyError(f"Field '{field_name}' already exists")
field.name = field_name
dct['_fields'][field_name] = field
dct['_db_name_map'][field.db_name] = field_name
if field.unique:
dct['_indices'].append(field_name)
class MetaDocument(MetaEmbeddedDocument):
def __new__(cls, name, bases, dct):
if '_root' in dct and dct['_root']:
return super().__new__(cls, name, bases, dct)
if dct.get('__collection__', None) is None:
dct['__collection__'] = name
klass = super().__new__(cls, name, bases, dct)
if '_id' not in klass._db_name_map:
if 'id' in klass.__dict__:
raise Exception('Unable to set id field - already exists')
field = ObjectIdField(name='id', db_name='_id')
klass.id = field
klass._fields[field.name] = field
klass._db_name_map[field.db_name] = field.name
klass.qs = QuerySet()
return klass
| 31.815385 | 102 | 0.597195 |
d27a7cd8fb1a33aefe639f54dcde93489cfbe955 | 6,982 | py | Python | JobScript/wind_to_db/wind_stock_daily_import.py | zuoziji/transaction | 7a59817a699d9df32e13d43edda630520af7860d | [
"Apache-2.0"
] | null | null | null | JobScript/wind_to_db/wind_stock_daily_import.py | zuoziji/transaction | 7a59817a699d9df32e13d43edda630520af7860d | [
"Apache-2.0"
] | 9 | 2021-02-08T20:19:53.000Z | 2022-03-11T23:16:46.000Z | JobScript/wind_to_db/wind_stock_daily_import.py | zuoziji/transaction | 7a59817a699d9df32e13d43edda630520af7860d | [
"Apache-2.0"
] | 2 | 2019-03-03T14:27:54.000Z | 2019-07-22T09:00:35.000Z | from datetime import date, datetime, timedelta
import pandas as pd
import numpy as np
from config_fh import get_db_engine, get_db_session, STR_FORMAT_DATE, UN_AVAILABLE_DATE, WIND_REST_URL
from fh_tools.windy_utils_rest import WindRest
from fh_tools.fh_utils import get_last, get_first
import logging
from sqlalchemy.types import String, Date, Float, Integer
DATE_BASE = datetime.strptime('2005-01-01', STR_FORMAT_DATE).date()
ONE_DAY = timedelta(days=1)
def get_datelist(startdate, enddate):
datelist = w.tdays(startdate, enddate)
datelist = datelist.Data[0]
datelist = [i.strftime(STR_FORMAT_DATE) for i in datelist]
return datelist
def get_stockcodes(targerdate):
codesinfo = w.wset("sectorconstituent", "date=%s;windcode=881001.WI" % targerdate)
codes = codesinfo.Data[1]
names = codesinfo.Data[2]
return codes, names
def get_tradeinfo(stockcode, stockname, startdate, enddate):
wind_indictor_str = "open,high,low,close,adjfactor,volume,amt,pct_chg,maxupordown," + \
"swing,turn,free_turn,trade_status,susp_days"
stock_tradeinfo = w.wsd(stockcode, wind_indictor_str, startdate, enddate)
stock_times = stock_tradeinfo.Times
stock_data = stock_tradeinfo.Data
stockre = pd.DataFrame()
stockre['Trade_Date'] = [i.strftime('%Y-%m-%d') for i in stock_times]
stockcode_list = [stockcode] * len(stock_data[0])
stockname_list = [stockname] * len(stock_data[0])
stockre['Stock_Code'] = stockcode_list
stockre['Stock_Name'] = stockname_list
wind_list = wind_indictor_str.split(',')
for index, wincode in enumerate(wind_list):
stockre[wincode] = stock_data[index]
# 去除nan数据
open_tmp = stockre['close']
open_tmp_nan = np.isnan(open_tmp)
stockre = stockre[open_tmp_nan != 1]
return stockre
def save_df2db(stockre, indexnames, conn):
stockre.to_sql('stock_tradeinfo', conn, if_exists='append', flavor='mysql',
index_label=['Trade_Date', 'Stock_Code', 'Stock_Name'])
def import_stock_daily():
w = WindRest(WIND_REST_URL)
engine = get_db_engine()
with get_db_session(engine) as session:
# 获取每只股票最新交易日数据
sql_str = 'select wind_code, max(Trade_date) from wind_stock_daily group by wind_code'
table = session.execute(sql_str)
stock_trade_date_latest_dic = dict(table.fetchall())
# 获取市场有效交易日数据
sql_str = "select trade_date from wind_trade_date where trade_date > '2005-1-1'"
table = session.execute(sql_str)
trade_date_sorted_list = [t[0] for t in table.fetchall()]
trade_date_sorted_list.sort()
# 获取每只股票上市日期、退市日期
table = session.execute('SELECT wind_code, ipo_date, delist_date FROM wind_stock_info')
stock_date_dic = {wind_code: (ipo_date, delist_date if delist_date is None or delist_date > UN_AVAILABLE_DATE else None) for
wind_code, ipo_date, delist_date in table.fetchall()}
today_t_1 = date.today() - ONE_DAY
data_df_list = []
try:
for wind_code, date_pair in stock_date_dic.items():
date_ipo, date_delist = date_pair
# 获取 date_from
if wind_code in stock_trade_date_latest_dic:
date_latest_t1 = stock_trade_date_latest_dic[wind_code] + ONE_DAY
date_from = max([date_latest_t1, DATE_BASE, date_ipo])
else:
date_from = max([DATE_BASE, date_ipo])
date_from = get_first(trade_date_sorted_list, lambda x: x >= date_from)
# 获取 date_to
if date_delist is None:
date_to = today_t_1
else:
date_to = min([date_delist, today_t_1])
date_to = get_last(trade_date_sorted_list, lambda x: x <= date_to)
if date_from is None or date_to is None or date_from > date_to:
continue
# 获取股票量价等行情数据
wind_indictor_str = "open,high,low,close,adjfactor,volume,amt,pct_chg,maxupordown," + \
"swing,turn,free_turn,trade_status,susp_days"
data_df = w.wsd(wind_code, wind_indictor_str, date_from, date_to)
if data_df is None:
logging.warning('%s has no data during %s %s', wind_code, date_from, date_to)
continue
logging.info('%d data of %s', data_df.shape[0], wind_code)
data_df['wind_code'] = wind_code
data_df_list.append(data_df)
finally:
# 导入数据库
if len(data_df_list) > 0:
data_df_all = pd.concat(data_df_list)
data_df_all.index.rename('trade_date', inplace=True)
data_df_all.reset_index(inplace=True)
data_df_all.set_index(['wind_code', 'trade_date'], inplace=True)
data_df_all.to_sql('wind_stock_daily', engine, if_exists='append',
dtype={
'wind_code': String(20),
'trade_date': Date,
'open': Float,
'high': Float,
'low': Float,
'close': Float,
'adjfactor': Float,
'volume': Float,
'amt': Float,
'pct_chg': Float,
'maxupordown': Integer,
'swing': Float,
'turn': Float,
'free_turn': Float,
'trade_status': String(20),
'susp_days': Integer,
}
)
logging.info('%d data imported', data_df_all.shape[0])
if __name__ == '__main__':
import_stock_daily()
# startdate = '2005-01-03'
# enddate = '2014-12-31'
# stockcodes, stocknames = get_stockcodes(enddate)
# stockloc = 1085
# costtime = 0
# stockcodes = stockcodes[stockloc:]
# stocknames = stocknames[stockloc:]
# with get_db_session() as session:
# for stockcode, stockname in zip(stockcodes, stocknames):
# timestart = time.time()
# stockre = get_tradeinfo(stockcode, stockname, startdate, enddate)
# stockre.set_index(['Trade_Date', 'Stock_Code', 'Stock_Name'], inplace=True) #
# indexnames = ['Trade_Date', 'Stock_Code', 'Stock_Name']
# save_df2db(stockre, indexnames, session)
# timeend = time.time()
# costtime = costtime + timeend - timestart
# # conn.close()
# print('Success Transfer %s, %s' % (stockcode, stockname),
# "本次耗时:%d" % round(timeend - timestart), "累计耗时:%d" % costtime)
| 44.75641 | 133 | 0.584073 |
7e99ec6791ef449bd15d88bc40e47be5465426ce | 2,149 | py | Python | bot_4u/shop.py | Gametz/Helper_Bot | c48c8258a71782ecc4621b0a0eac3f9de4d461b7 | [
"Apache-2.0"
] | null | null | null | bot_4u/shop.py | Gametz/Helper_Bot | c48c8258a71782ecc4621b0a0eac3f9de4d461b7 | [
"Apache-2.0"
] | null | null | null | bot_4u/shop.py | Gametz/Helper_Bot | c48c8258a71782ecc4621b0a0eac3f9de4d461b7 | [
"Apache-2.0"
] | null | null | null | import json
def shop():
return "Магазин:" \
"\n" \
"\n 🚗 Машины" \
"\n 📱 Телефоны" \
"\n 🏡 Дома" \
"\n 🎞 Видеокарты" \
"\n ₿ Биткоины" \
"\n" \
"\n📌 Для просмотра категории используйте ее название"
def sell():
return "Продажа:" \
"\n" \
"\n 🚗 Пмашину - Продать свою машину" \
"\n 📱 Птел - Продать свой телефон" \
"\n 🏡 Пдом - Продать свой дом" \
"\n 🎞 Пкарту - Продать свю видеокарту"
def cars():
return "🚗 Машины:" \
"\n" \
"\n 💎 1. ВАЗ 2115 | 2.000$" \
"\n 💎 2. LADA Vesta | 4.000$" \
"\n 💎 3. Audi Q7 | 8.000$" \
"\n 💎 4. BMW M8 | 15.000$" \
"\n 💎 5. Range Rover | 50.000$" \
"\n 💎 6. Rolls-Royce | 150.000$" \
"\n" \
"\n📌 Для покупки транспорта используйте 'кмашину [номер]'\n" \
"Например: кмашину 1"
def phones():
return "📱 Телефоны:" \
"\n" \
"\n 💎 1. Fly Ezzy Flip | 200$" \
"\n 💎 2. Sony Xperia XA1 | 1.000$" \
"\n 💎 3. Xiaomi Mi 11 | 10.000$" \
"\n 💎 4. Samsung Galaxy S21 | 50.000$" \
"\n 💎 5. iPhone 12 | 200.000$" \
"\n" \
"\n📌 Для покупки телефона используйте 'ктел [номер]'\n" \
"Например: ктел 1"
def homes():
return "🏡 Дома:" \
"\n" \
"\n 💎 1. Картонная коробка | 100$" \
"\n 💎 2. Дом на дереве | 2.000$" \
"\n 💎 3. Деревянный дом | 10.000$" \
"\n 💎 4. Квартира в новостройке | 50.000$" \
"\n 💎 5. Особняк | 150.000$" \
"\n 💎 6. Дом на Рублёвке | 300.000$" \
"\n 💎 7. Личный остров | 500.000$" \
"\n 💎 8. Дворец в Геленджике | 1.000.000$" \
"\n" \
"\n📌 Для покупки транспорта используйте 'кдом [номер]'\n" \
"Например: кдом 1"
| 35.229508 | 73 | 0.442531 |
02f488e6ce3fe201ae71bfea566b158c06d9e203 | 5,026 | py | Python | dataset_processing/process_modelnet.py | Mingy2018/SwitchVAE | cf9c06ce3af50a559d79b9cba14851472e43a70b | [
"MIT"
] | 1 | 2021-07-22T00:46:06.000Z | 2021-07-22T00:46:06.000Z | dataset_processing/process_modelnet.py | Mingy2018/SwitchVAE | cf9c06ce3af50a559d79b9cba14851472e43a70b | [
"MIT"
] | null | null | null | dataset_processing/process_modelnet.py | Mingy2018/SwitchVAE | cf9c06ce3af50a559d79b9cba14851472e43a70b | [
"MIT"
] | 1 | 2021-12-07T17:10:19.000Z | 2021-12-07T17:10:19.000Z | import os
import numpy as np
from utils import binvox_rw
import glob
if __name__ == '__main__':
ModelNet10_ROOT = '/home/zmy/Datasets/ModelNet10/ModelNet10'
ModelNet40_ROOT = '/home/zmy/Datasets/ModelNet40'
image_ROOT = '/home/zmy/mmi_dataset/ModelNet40_images/modelnet40_images_new_12x'
ModelNet10_CLASSES = ['bathtub', 'bed', 'chair', 'desk', 'dresser',
'monitor', 'night_stand', 'sofa', 'table', 'toilet']
ModelNet40_CLASSES = [ 'airplane', 'bowl', 'table', 'chair', 'vase', 'glass_box', 'bathtub', 'toilet', 'range_hood',
'flower_pot', 'laptop', 'plant', 'cup', 'person', 'tent', 'sofa', 'monitor', 'keyboard',
'desk', 'mantel', 'curtain', 'bed', 'lamp', 'bench', 'dresser','car', 'sink',
'night_stand', 'stool', 'door', 'guitar', 'stairs', 'radio', 'tv_stand', 'cone', 'xbox',
'wardrobe', 'bookshelf', 'bottle', 'piano']
# ---------------Block1---------------------------
# X = {'train': [], 'test': []}
# y = {'train': [], 'test': []}
#
# for label, cl in enumerate(ModelNet10_CLASSES):
# for split in ['train', 'test']:
# examples_dir = os.path.join(ModelNet10_ROOT, cl, split)
# for example in os.listdir(examples_dir):
# if 'binvox' in example: # Ignore OFF files
# with open(os.path.join(examples_dir, example), 'rb') as file:
# data = np.int32(binvox_rw.read_as_3d_array(file).data)
# X[split].append(data)
# y[split].append(label)
# X['train']=np.expand_dims(X['train'], axis=1)
# X['test'] = np.expand_dims(X['test'], axis=1)
#
# np.savez_compressed('/home/zmy/Datasets/modelnet10.npz',
# X_train=X['train'],
# X_test=X['test'],
# y_train=y['train'],
# y_test=y['test'])
#----------------------------------------------------
# -----------------------Block2--------------------------
# X = {'train': [], 'test': []}
# y = {'train': [], 'test': []}
# for label, cl in enumerate(ModelNet40_CLASSES):
# for split in ['train', 'test']:
# examples_dir = os.path.join(ModelNet40_ROOT, cl, split)
# for example in os.listdir(examples_dir):
# if 'binvox' in example: # Ignore OFF files
# with open(os.path.join(examples_dir, example), 'rb') as file:
# data = np.int32(binvox_rw.read_as_3d_array(file).data)
# X[split].append(data)
# y[split].append(label)
#
# X['train'] = np.expand_dims(X['train'], axis=1)
# X['test'] = np.expand_dims(X['test'], axis=1)
# np.savez_compressed('/home/zmy/Datasets/modelnet40.npz',
# X_train=X['train'],
# X_test=X['test'],
# y_train=y['train'],
# y_test=y['test'])
#-------------------------------------------------------
#-------------------------------------------------------------
# X = {'train': [], 'test': []}
# y = {'train': [], 'test': []}
#
# for label, cl in enumerate(ModelNet10_CLASSES):
# for split in ['train', 'test']:
# examples_dir = os.path.join(image_ROOT, cl, split)
# file_list = os.listdir(examples_dir)
# id_list = [name.split('.')[0] for name in file_list if not name.startswith('.')]
# unique_id_list = list(set(id_list))
# X[split]+= unique_id_list
# y[split]+= [label] * len(unique_id_list)
#
# np.savez_compressed('/home/zmy/mmi_dataset/modelnet10_image.npz',
# X_train=X['train'],
# X_test=X['test'],
# y_train=y['train'],
# y_test=y['test'])
#-------------------------------------------------------------------------------------
#-------------------------------------------------------------
X = {'train': [], 'test': []}
y = {'train': [], 'test': []}
for label, cl in enumerate(ModelNet40_CLASSES):
for split in ['train', 'test']:
examples_dir = os.path.join(image_ROOT, cl, split)
file_list = os.listdir(examples_dir)
id_list = [name.split('.')[0] for name in file_list if not name.startswith('.')]
unique_id_list = list(set(id_list))
X[split]+= unique_id_list
y[split]+= [label] * len(unique_id_list)
np.savez_compressed('/home/zmy/mmi_dataset/modelnet40_image.npz',
X_train=X['train'],
X_test=X['test'],
y_train=y['train'],
y_test=y['test'])
#-------------------------------------------------------------------------------------
| 45.690909 | 120 | 0.449065 |
1c51aa15373779b06273296a27d913c070079f41 | 857 | py | Python | python/paddle/fluid/contrib/slim/quantization/__init__.py | ysh329/Paddle | 50ad9046c9a440564d104eaa354eb9df83a35678 | [
"Apache-2.0"
] | 1 | 2022-03-26T11:44:07.000Z | 2022-03-26T11:44:07.000Z | python/paddle/fluid/contrib/slim/quantization/__init__.py | ysh329/Paddle | 50ad9046c9a440564d104eaa354eb9df83a35678 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/contrib/slim/quantization/__init__.py | ysh329/Paddle | 50ad9046c9a440564d104eaa354eb9df83a35678 | [
"Apache-2.0"
] | 1 | 2022-03-26T11:44:12.000Z | 2022-03-26T11:44:12.000Z | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from . import quantization_pass
from .quantization_pass import *
from . import quantization_strategy
from .quantization_strategy import *
__all__ = quantization_pass.__all__ + quantization_strategy.__all__
| 37.26087 | 74 | 0.787631 |
7be7a563b31adbb4c9c6a38a319557746106dc98 | 2,409 | py | Python | day20/part2.py | mtn/advent18 | 634ad20f02c321d6a38583077ee6b7f84a8848e5 | [
"MIT"
] | 1 | 2018-12-01T20:58:37.000Z | 2018-12-01T20:58:37.000Z | day20/part2.py | mtn/advent18 | 634ad20f02c321d6a38583077ee6b7f84a8848e5 | [
"MIT"
] | null | null | null | day20/part2.py | mtn/advent18 | 634ad20f02c321d6a38583077ee6b7f84a8848e5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from collections import defaultdict
open_parens = []
start_end = {} # open paren to close paren ind
alt_starts = {} # alternation to opening "("
alts = {} # maps "(" ind to [alternations]
with open("input.txt") as f:
inp = f.read().strip()
for i, ch in enumerate(inp):
if ch == "$":
break
if ch == "(":
open_parens.append(i)
elif ch == ")":
last_open = open_parens.pop()
start_end[last_open] = i
if ch == "|":
alt_starts[i] = open_parens[-1]
if open_parens[-1] in alts:
alts[open_parens[-1]].append(i)
else:
alts[open_parens[-1]] = [i]
assert not open_parens # all opened parens should be closed
g = defaultdict(set) # graph represented as adjacency lists
visited = set() # what we've visited so we don't cycle in inp
def run(point, ind):
global g
while True:
if inp[ind] == "$" or (point, ind) in visited:
break
visited.add((point, ind))
if inp[ind] == "N":
new = (point[0], point[1] - 1)
g[point].add(new)
g[new].add(point)
ind += 1
point = new
elif inp[ind] == "E":
new = (point[0] + 1, point[1])
g[point].add(new)
g[new].add(point)
ind += 1
point = new
elif inp[ind] == "S":
new = (point[0], point[1] + 1)
g[point].add(new)
g[new].add(point)
ind += 1
point = new
elif inp[ind] == "W":
new = (point[0] - 1, point[1])
g[point].add(new)
g[new].add(point)
ind += 1
point = new
elif inp[ind] == "|":
# jump to the end of the alternation
ind = start_end[alt_starts[ind]] + 1
elif inp[ind] == "(":
for alt in alts[ind]:
run(point, alt + 1)
ind += 1 # the first branch wasn't parsed as part of the alts
elif inp[ind] == ")":
ind += 1
run((0, 0), 1)
q = [((0, 0), 0)]
distances = {}
count = 0
while q:
(x, y), dist = q.pop()
if (x, y) in distances and distances[(x, y)] <= dist:
continue
if dist >= 1000:
count += 1
distances[(x, y)] = dist
for neighbor in g[(x, y)]:
q.append((neighbor, dist + 1))
print(count)
| 24.333333 | 74 | 0.479867 |
c045e6c776ebf0200b59b2bd61fbe2c462c08bca | 6,031 | py | Python | pydown/downloader.py | qorost/pydown | b8ab1ff12c20f0e9e3f7af30bea31f4288025d7e | [
"BSD-2-Clause"
] | null | null | null | pydown/downloader.py | qorost/pydown | b8ab1ff12c20f0e9e3f7af30bea31f4288025d7e | [
"BSD-2-Clause"
] | null | null | null | pydown/downloader.py | qorost/pydown | b8ab1ff12c20f0e9e3f7af30bea31f4288025d7e | [
"BSD-2-Clause"
] | null | null | null | import sys
import urllib
import urllib2
#import requests
import os.path
import thread
import threading
import argparse
def print_progress(iteration,total,prefix='Progress: ',suffix='Complete',decimals = 2, barlen = 100):
"""
http://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
"""
filledlen = int(round(barlen*iteration)/float(total))
percents = round(100.00*(iteration/float(total)),decimals)
bar = '#' * filledlen + '-' * (barlen - filledlen)
try:
sys.stdout.write("%s [%s] %s%s %s\r" % (prefix,bar,percents,'%',suffix))
sys.stdout.flush()
except Exception,e:
print str(e)
print prefix,bar,percents,suffix
print type(percents),type(bar)
if iteration == total:
print("\n")
class LinkFile():
def __init__(self,filename="links.txt"):
self.filename = filename
def saveto(self,links):
try:
fp = open(self.filename,"w")
for i in links:
fp.write(i)
fp.write("\n")
fp.close()
except Exception,e:
sys.stderr.write("Error in Writing: " + str(e))
def extractfrom(self,filename=None):
links = set()
if filename is not None:
tmp = filename
else:
tmp = self.filename
try:
fp = open(tmp,"r")
texts = fp.read().splitlines()
for i in texts:
links.add(i)
fp.close()
return links
except Exception,e:
sys.stderr.write("Error while reading: " + str(e))
class Downloader():
def __init__(self,url,filename,overite=False):
self.url = url
self.filename = filename
def run(self,overite=False) :
msg= self.url
i = 0
url = self.url
file_name = self.filename
if os.path.exists(os.path.abspath(file_name)) and overite == False:
print "File Already Existed, skip downloading..."
return 1
try:
u = urllib2.urlopen(url)
f = open(file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "Downloading: %s Bytes: %s" % (file_name, file_size)
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
print_progress(file_size_dl,file_size)
#status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
#status = status + chr(8)*(len(status)+1)
#print status
f.close()
return 0
except Exception,e:
print 'Exceptiion in %s',url,': ',str(e)
return -1
class MyDownLoadThread(threading.Thread) :
def __init__(self,url, filename = None):
threading.Thread.__init__(self)
self.fileurl = url
if filename is not None:
self.filename = filename
else:
if url.find('/') >= 0:
self.filename = url.split('/')[-1]
else :
self.filename ='test.pdf'
def run(self) :
msg= 'Thread downloading %s started!\n From url: %s' %(self.filename,self.fileurl)
try :
urllib.urlretrieve(self.fileurl, self.filename,None)
msg= 'File %s downloaded!' % self.filename
except:
msg= 'failed to download'
class MyFilesDownloader():
def __init__(self, urls, dir='.'):
self.downurls = urls
self.threads = []
self.dir = dir
def startDownloadingFiles(self, multiThreading = False):
if multiThreading == True :
msg= 'In MULTITHREAD mode \nStart Downloading file into directory %s...' % self.dir
if self.downurls is not None:
for url in self.downurls :
if url.find('/') >= 0:
filename = url.split('/')[-1]
else :
filename ="test.pdf"
filename =os.path.join(self.dir, filename)
t = MyDownLoadThread(url, filename)
self.threads.append(t)
t.start()
else :
msg= 'In NORMAL mode \nStart Downloading file into directory %s...' % self.dir
if self.downurls is not None:
i = 1
failures = 0
skipped = 0
success = 0
num = len(self.downurls)
for url in self.downurls :
if url.find('/') >= 0:
filename = url.split('/')[-1]
else :
filename ="test.pdf"
print "(%d/%d) URL: %s" %(i,num,url)
filename =os.path.join(self.dir, filename)
filedownloader = Downloader(url, filename)
result = filedownloader.run()
i += 1
if result == 1:
skipped += 1
elif result == 0:
success += 1
else:
failures += 1
print "\nDownloading finished, (Suc:%d,Fails:%d,Skipped,%d,Total:%d)" %(success,failures,skipped,num)
def test_download():
filename = "5MB5MB.zip"
url = "http://download.thinkbroadband.com/5MB.zip"
xdown = Downloader(url,filename)
xdown.run()
if __name__ == '__main__':
test_download()
#
| 31.910053 | 113 | 0.507213 |
d8ffa1d38fd38bef012c335812ecb17304bf1ace | 5,266 | py | Python | raymon/types.py | pbonte/raymon | 83912d7a5ff22d61289688828169a7178fa34a2d | [
"MIT"
] | 21 | 2021-06-14T08:37:22.000Z | 2022-03-08T05:41:54.000Z | raymon/types.py | pbonte/raymon | 83912d7a5ff22d61289688828169a7178fa34a2d | [
"MIT"
] | 57 | 2021-01-30T08:45:13.000Z | 2022-02-21T16:15:00.000Z | raymon/types.py | pbonte/raymon | 83912d7a5ff22d61289688828169a7178fa34a2d | [
"MIT"
] | 1 | 2021-06-18T09:53:58.000Z | 2021-06-18T09:53:58.000Z | import json
import io
from abc import ABC, abstractmethod
from pydoc import locate
import msgpack
import numpy as np
import pandas as pd
import base64
import ast
from PIL import Image as PILImage
from raymon.globals import Serializable
class RaymonDataType(Serializable, ABC):
def to_json(self):
return json.dumps(self.to_jcr())
def to_msgpack(self):
return msgpack.packb(self.to_jcr())
def class2str(self):
module = str(self.__class__.__module__)
classname = str(self.__class__.__name__)
return f"{module}.{classname}"
class Image(RaymonDataType):
def __init__(self, data, lossless=False):
self.validate(data=data, lossless=lossless)
self.data = data
self.lossless = lossless
def validate(self, data, lossless):
# Validate 3 channels
if not isinstance(data, PILImage.Image):
raise ValueError("Image shoud be a PIL Image")
if not isinstance(lossless, bool):
raise ValueError("lossless should be boolean")
return True
def to_jcr(self):
img_byte_arr = io.BytesIO()
if self.lossless:
self.data.save(img_byte_arr, format="png")
else:
# We'll save the image as JPEG. This is not lossless, but it is saves as the highest JPEG quality. This is 25 times faster than dumping as lossless PNG, and results in a size of only 1/5th the size, before b64 encoding.
# Measurements: PNG: 3.767667055130005s, 4008037 bytes -- PNG: 3.767667055130005s, 4008037 bytes
# For impact on algorithms see "On the Impact of Lossy Image and Video Compression on the Performance of Deep Convolutional Neural Network Architectures" (https://arxiv.org/abs/2007.14314), although this paper takes jpeg quality 95 as highest quality.
self.data.save(img_byte_arr, format="jpeg", quality=95)
img_byte_arr = img_byte_arr.getvalue()
b64 = base64.b64encode(img_byte_arr).decode()
data = {"type": self.class2str(), "params": {"data": b64, "lossless": self.lossless}}
return data
@classmethod
def from_jcr(cls, params):
b64 = params["data"]
img_byte_arr = io.BytesIO(base64.decodebytes(b64.encode()))
img = PILImage.open(img_byte_arr)
return cls(data=img)
class Numpy(RaymonDataType):
def __init__(self, data):
self.validate(data)
self.data = data
def validate(self, data):
if not isinstance(data, np.ndarray):
raise ValueError(f"Data must bu of type numpy.ndarray, not {type(data)}.")
return True
def to_jcr(self):
b64 = base64.b64encode(self.data).decode()
shape = self.data.shape
dtype = self.data.dtype
data = {"type": self.class2str(), "params": {"data": b64, "shape": str(shape), "dtype": str(dtype)}}
return data
@classmethod
def from_jcr(cls, params):
shape = ast.literal_eval(params["shape"])
dtype = params["dtype"]
b64 = params["data"]
nprest = np.frombuffer(base64.decodebytes(b64.encode()), dtype=str(dtype)).reshape(shape)
return cls(data=nprest)
class Series(RaymonDataType):
def __init__(self, data):
self.validate(data)
self.data = data
def validate(self, data):
if not isinstance(data, pd.Series):
raise ValueError("Data should be a Pandas Series")
return True
def to_jcr(self):
data = {
"type": self.class2str(),
"params": {
"data": json.loads(self.data.to_json()),
},
}
return data
@classmethod
def from_jcr(cls, jcr):
series = pd.Series(**jcr)
return cls(series)
class DataFrame(RaymonDataType):
def __init__(self, data):
self.validate(data)
self.data = data
def validate(self, data):
if not isinstance(data, pd.DataFrame):
raise ValueError("Data should be a Pandas DataFrame")
return True
def to_jcr(self):
data = {
"type": self.class2str(),
"params": {
"data": json.loads(self.data.to_json()),
},
}
return data
@classmethod
def from_jcr(cls, jcr):
frame = pd.read_json(json.dumps(jcr["data"]))
return cls(frame)
class Native(RaymonDataType):
def __init__(self, data):
self.validate(data)
self.data = data
def validate(self, data):
try:
json.dumps(data)
except TypeError as exc:
raise ValueError(f"{exc}")
return True
def to_jcr(self):
data = {
"type": self.class2str(),
"params": {
"data": self.data,
},
}
return data
@classmethod
def from_jcr(cls, jcr):
return cls(jcr["data"])
def load_jcr(jcr):
params = jcr["params"]
dtype = jcr["type"]
type_class = locate(dtype)
if type_class is None:
raise NameError(f"Could not locate {dtype}")
loaded = type_class.from_jcr(params)
return loaded
def from_msgpack(data):
loaded_data = msgpack.unpackb(data, raw=False)
return load_jcr(loaded_data)
| 29.418994 | 263 | 0.6109 |
4e04e12ca888d485002ae7196472f3182c8a4d8a | 640 | py | Python | release/cs_submitter/mainform/views.py | kvswim/kv_jhucs_coursesubmit | a00f2d1ca52204857bdf34271e13d97b424fcfca | [
"MIT"
] | null | null | null | release/cs_submitter/mainform/views.py | kvswim/kv_jhucs_coursesubmit | a00f2d1ca52204857bdf34271e13d97b424fcfca | [
"MIT"
] | null | null | null | release/cs_submitter/mainform/views.py | kvswim/kv_jhucs_coursesubmit | a00f2d1ca52204857bdf34271e13d97b424fcfca | [
"MIT"
] | null | null | null | #Kyle Verdeyen
#Independent Study, Summer 2017
#Joanne Selinski
#View for the mainform. Very basic validation checks.
from django.shortcuts import render, render_to_response, redirect
from django.http import HttpResponse
from .forms import MainFormModelForm
from django.urls import reverse
# Create your views here.
def index(request):
if request.method == "POST":
form = MainFormModelForm(request.POST)
if form.is_valid():
post = form.save(commit = False)
post.save()
return redirect('/table')
else:
form = MainFormModelForm()
return render(request, 'form/index.html', {'form' : form})
#return reverse('table:table')
| 30.47619 | 65 | 0.75 |
ae70b469d9ff51872b0638d41b15693be95ad731 | 7,398 | py | Python | mars/services/scheduling/supervisor/tests/test_queue_balance.py | ConanoutlooklvTBS/mars | 7030566fd9e9fc02b6b4064ef7bd86f6c24a2f60 | [
"Apache-2.0"
] | 2,413 | 2018-12-06T09:37:11.000Z | 2022-03-30T15:47:39.000Z | mars/services/scheduling/supervisor/tests/test_queue_balance.py | ConanoutlooklvTBS/mars | 7030566fd9e9fc02b6b4064ef7bd86f6c24a2f60 | [
"Apache-2.0"
] | 1,335 | 2018-12-07T03:06:18.000Z | 2022-03-31T11:45:57.000Z | mars/services/scheduling/supervisor/tests/test_queue_balance.py | ConanoutlooklvTBS/mars | 7030566fd9e9fc02b6b4064ef7bd86f6c24a2f60 | [
"Apache-2.0"
] | 329 | 2018-12-07T03:12:41.000Z | 2022-03-29T21:49:57.000Z | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import pytest
from typing import Tuple, List
from ..... import oscar as mo
from ....cluster import ClusterAPI
from ....cluster.core import NodeRole, NodeStatus
from ....cluster.uploader import NodeInfoUploaderActor
from ....cluster.supervisor.locator import SupervisorPeerLocatorActor
from ....cluster.supervisor.node_info import NodeInfoCollectorActor
from ....subtask import Subtask
from ...supervisor import AssignerActor, \
SubtaskManagerActor, SubtaskQueueingActor, GlobalSlotManagerActor
class MockNodeInfoCollectorActor(NodeInfoCollectorActor):
def __init__(self, timeout=None, check_interval=None):
super().__init__(timeout=timeout, check_interval=check_interval)
self.ready_nodes = {('address0', 'numa-0'): 2,
('address1', 'numa-0'): 2,
('address2', 'numa-0'): 2}
async def update_node_info(self, address, role, env=None,
resource=None, detail=None, status=None):
if 'address' in address and status == NodeStatus.STOPPING:
del self.ready_nodes[(address, 'numa-0')]
await super().update_node_info(address, role, env,
resource, detail, status)
def get_all_bands(self, role=None, statuses=None):
if statuses == {NodeStatus.READY}:
return self.ready_nodes
else:
return {('address0', 'numa-0'): 2,
('address1', 'numa-0'): 2,
('address2', 'numa-0'): 2}
class FakeClusterAPI(ClusterAPI):
@classmethod
async def create(cls, address: str, **kw):
dones, _ = await asyncio.wait([
mo.create_actor(SupervisorPeerLocatorActor, 'fixed', address,
uid=SupervisorPeerLocatorActor.default_uid(),
address=address),
mo.create_actor(MockNodeInfoCollectorActor,
uid=NodeInfoCollectorActor.default_uid(),
address=address),
mo.create_actor(NodeInfoUploaderActor, NodeRole.WORKER,
interval=kw.get('upload_interval'),
band_to_slots=kw.get('band_to_slots'),
use_gpu=kw.get('use_gpu', False),
uid=NodeInfoUploaderActor.default_uid(),
address=address),
])
for task in dones:
try:
task.result()
except mo.ActorAlreadyExist: # pragma: no cover
pass
api = await super().create(address=address)
await api.mark_node_ready()
return api
class MockSlotsActor(mo.Actor):
def apply_subtask_slots(self, band: Tuple, session_id: str,
subtask_ids: List[str], subtask_slots: List[int]):
return subtask_ids
class MockAssignerActor(mo.Actor):
def assign_subtasks(self, subtasks: List[Subtask]):
return [subtask.expect_bands[0] for subtask in subtasks]
def reassign_subtasks(self, band_num_queued_subtasks):
if len(band_num_queued_subtasks.keys()) == 1:
[(band, _)] = band_num_queued_subtasks.items()
return {band: 0}
return {('address1', 'numa-0'): -8, ('address0', 'numa-0'): 0,
('address2', 'numa-0'): 8}
class MockSubtaskManagerActor(mo.Actor):
def __init__(self):
self._subtask_ids, self._bands = [], []
@mo.extensible
def submit_subtask_to_band(self, subtask_id: str, band: Tuple):
self._subtask_ids.append(subtask_id)
self._bands.append(band)
def dump_data(self):
return self._subtask_ids, self._bands
@pytest.fixture
async def actor_pool():
pool = await mo.create_actor_pool('127.0.0.1', n_process=0)
async with pool:
session_id = 'test_session'
cluster_api = await FakeClusterAPI.create(pool.external_address)
# create assigner actor
await mo.create_actor(MockAssignerActor,
uid=AssignerActor.gen_uid(session_id),
address=pool.external_address)
# create queueing actor
manager_ref = await mo.create_actor(MockSubtaskManagerActor,
uid=SubtaskManagerActor.gen_uid(session_id),
address=pool.external_address)
# create slots actor
slots_ref = await mo.create_actor(MockSlotsActor,
uid=GlobalSlotManagerActor.default_uid(),
address=pool.external_address)
# create queueing actor
queueing_ref = await mo.create_actor(SubtaskQueueingActor,
session_id, 1,
uid=SubtaskQueueingActor.gen_uid(session_id),
address=pool.external_address)
yield pool, session_id, cluster_api, queueing_ref, slots_ref, manager_ref
await mo.destroy_actor(queueing_ref)
async def _queue_subtasks(num_subtasks, expect_bands, queueing_ref):
if not num_subtasks:
return
subtasks = [Subtask(expect_bands[0] + '-' + str(i)) for i in range(num_subtasks)]
for subtask in subtasks:
subtask.expect_bands = [expect_bands]
priorities = [(i,) for i in range(num_subtasks)]
await queueing_ref.add_subtasks(subtasks, priorities)
@pytest.mark.asyncio
async def test_subtask_queueing(actor_pool):
_pool, session_id, cluster_api, queueing_ref, slots_ref, manager_ref = actor_pool
nums_subtasks = [9, 8, 1]
expects_bands = [('address0', 'numa-0'), ('address1', 'numa-0'),
('address2', 'numa-0')]
for num_subtasks, expect_bands in zip(nums_subtasks, expects_bands):
await _queue_subtasks(num_subtasks, expect_bands, queueing_ref)
await cluster_api.set_node_status(
node='address1', role=NodeRole.WORKER, status=NodeStatus.STOPPING)
# 9 subtasks on ('address0', 'numa-0')
await queueing_ref.submit_subtasks(band=('address0', 'numa-0'), limit=10)
commited_subtask_ids, _commited_bands = await manager_ref.dump_data()
assert len(commited_subtask_ids) == 9
# 0 subtasks on ('address1', 'numa-0')
await queueing_ref.submit_subtasks(band=('address1', 'numa-0'), limit=10)
commited_subtask_ids, _commited_bands = await manager_ref.dump_data()
assert len(commited_subtask_ids) == 9
# 9 subtasks on ('address2', 'numa-0')
await queueing_ref.submit_subtasks(band=('address2', 'numa-0'), limit=10)
commited_subtask_ids, _commited_bands = await manager_ref.dump_data()
assert len(commited_subtask_ids) == 18
| 40.872928 | 90 | 0.628008 |
1bd69ceec19c6af6752dbe20cdaa51daebdac417 | 183 | py | Python | mindhome_alpha/erpnext/patches/v5_0/rename_customer_issue.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:55:29.000Z | 2021-04-29T14:55:29.000Z | mindhome_alpha/erpnext/patches/v5_0/rename_customer_issue.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | null | null | null | mindhome_alpha/erpnext/patches/v5_0/rename_customer_issue.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:39:01.000Z | 2021-04-29T14:39:01.000Z | from __future__ import unicode_literals
import frappe
def execute():
if frappe.db.table_exists("Customer Issue"):
frappe.rename_doc("DocType", "Customer Issue", "Warranty Claim")
| 26.142857 | 66 | 0.775956 |
3385ae91238131c0f2ff872285f9cab3f21c3557 | 2,189 | py | Python | fynance/models/xgb.py | ArthurBernard/Fynance | efd9a2e6f8eddcff017d828972236312f6f24084 | [
"MIT"
] | 19 | 2018-12-13T18:52:51.000Z | 2021-09-03T00:33:47.000Z | fynance/models/xgb.py | ArthurBernard/Fynance | efd9a2e6f8eddcff017d828972236312f6f24084 | [
"MIT"
] | null | null | null | fynance/models/xgb.py | ArthurBernard/Fynance | efd9a2e6f8eddcff017d828972236312f6f24084 | [
"MIT"
] | 6 | 2019-05-31T16:51:51.000Z | 2021-07-29T21:31:25.000Z | #!/usr/bin/env python3
# coding: utf-8
# @Author: ArthurBernard
# @Email: arthur.bernard.92@gmail.com
# @Date: 2019-04-23 19:15:05
# @Last modified by: ArthurBernard
# @Last modified time: 2019-09-25 14:14:47
# Built-in packages
# Third party packages
# import xgboost as xgb
# Local packages
__all__ = ['XGB', 'XGBData']
class XGB:
# TODO : train method, predict method
def __init__(self, X, y, **kwargs):
""" Setting data to XGBoot model.
Parameters
----------
X, y : np.ndarray[ndim=2, dtype=np.float64]
Respectively features with shape `(T, N)` and target with shape
`(T, 1)` of the model.
kwargs : dict, optional
Parameters of DMatrix object, cf XGBoost documentation [1]_.
References
----------
.. [1] https://xgboost.readthedocs.io/en/latest/python/python_api.html
"""
self.data = XGBData(X, label=y, **kwargs)
def run(self, n, s, **params):
# TODO : to remove
train = self.data[:-n]
estim = self.data[: s]
# bst = xgb.train(params, train)
# return bst.predict(estim)
class XGBData: # (xgb.DMatrix):
""" Set data for XGBoost models. """
def __getitem__(self, key):
""" Slice the DMatrix and return a new DMatrix that only contains `key`.
Parameters
----------
key : slice
Slice to be selected.
Returns
-------
res : DMatrix
A new DMatrix containing only selected indices.
"""
start = 0 if key.start is None else key.start
step = 1 if key.step is None else key.step
stop = self.num_row() if key.stop is None else key.stop
if step < 0:
stop, start = start - 1, stop + 1
if stop < 0:
stop += self.num_row() + 1
return self.slice(list(range(start, stop, step)))
def train_xgb(params, dtrain, bst=None, **kwargs):
""" Train a XGBoost model """
if bst is None:
pass
# return xgb.train(params, dtrain, **kwargs)
else:
pass
# return xgb.train(params, dtrain, xgb_model=bst, **kwargs)
| 24.595506 | 80 | 0.562814 |
79e4955fbc2717ea605a117d3b035dbae454bceb | 36,864 | py | Python | tensorflow/python/autograph/impl/api_test.py | AdaAlarm/tensorflow | e0db063159751276a92d88a4ad6d481b1199318c | [
"Apache-2.0"
] | 10 | 2021-05-25T17:43:04.000Z | 2022-03-08T10:46:09.000Z | tensorflow/python/autograph/impl/api_test.py | AdaAlarm/tensorflow | e0db063159751276a92d88a4ad6d481b1199318c | [
"Apache-2.0"
] | 1,056 | 2019-12-15T01:20:31.000Z | 2022-02-10T02:06:28.000Z | tensorflow/python/autograph/impl/api_test.py | AdaAlarm/tensorflow | e0db063159751276a92d88a4ad6d481b1199318c | [
"Apache-2.0"
] | 6 | 2016-09-07T04:00:15.000Z | 2022-01-12T01:47:38.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for api module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import contextlib
import functools
import gc
import imp
import os
import re
import sys
import textwrap
import types
import numpy as np
import six
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.autograph.impl import api
from tensorflow.python.autograph.impl import conversion
from tensorflow.python.autograph.pyct import errors
from tensorflow.python.autograph.pyct import inspect_utils
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.utils import ag_logging
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.framework import _errors_test_helper
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors as tf_errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import function_utils
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
global_n = 2
DEFAULT_RECURSIVE = converter.ConversionOptions(recursive=True)
class TestResource(object):
def __init__(self):
self.x = 3
class ApiTest(test.TestCase):
@contextlib.contextmanager
def assertPrints(self, expected, not_expected):
try:
out_capturer = six.StringIO()
sys.stdout = out_capturer
yield
self.assertIn(expected, out_capturer.getvalue())
self.assertNotIn(not_expected, out_capturer.getvalue())
finally:
sys.stdout = sys.__stdout__
def assertNoMemoryLeaks(self, f):
object_ids_before = {id(o) for o in gc.get_objects()}
f()
gc.collect()
objects_after = tuple(
o for o in gc.get_objects() if id(o) not in object_ids_before)
self.assertEmpty(
tuple(o for o in objects_after if isinstance(o, TestResource)))
def test_converted_call_kwonly_args(self):
def test_fn(*, a):
return a
x = api.converted_call(
test_fn, (), {'a': constant_op.constant(-1)}, options=DEFAULT_RECURSIVE)
self.assertEqual(-1, self.evaluate(x))
def test_super_with_no_arg(self):
test_case_self = self
class TestBase:
def plus_three(self, x):
return x + 3
class TestSubclass(TestBase):
def plus_three(self, x):
test_case_self.fail('This should never be called.')
def no_arg(self, x):
return super().plus_three(x)
tc = api.converted_call(TestSubclass, (), None, options=DEFAULT_RECURSIVE)
self.assertEqual(5, tc.no_arg(2))
def test_converted_call_avoids_triggering_operators(self):
test_self = self
class Pair(collections.namedtuple('Pair', ['a', 'b'])):
def __call__(self):
return self.a + self.b
def __eq__(self, other):
test_self.fail('Triggered operator')
p = Pair(constant_op.constant(1), constant_op.constant(2))
x = api.converted_call(p, (), {}, options=DEFAULT_RECURSIVE)
self.assertIsNotNone(self.evaluate(x), 3)
@test_util.run_deprecated_v1
def test_decorator_recursive(self):
class TestClass(object):
def called_member(self, a):
if a < 0:
a = -a
return a
@api.convert(recursive=True)
def test_method(self, x, s, a):
while math_ops.reduce_sum(x) > s:
x //= self.called_member(a)
return x
tc = TestClass()
x = tc.test_method(
constant_op.constant([2, 4]), constant_op.constant(1),
constant_op.constant(-2))
self.assertListEqual([0, 1], self.evaluate(x).tolist())
@test_util.run_deprecated_v1
def test_decorator_not_recursive(self):
class TestClass(object):
def called_member(self, a):
return math_ops.negative(a)
@api.convert(recursive=False)
def test_method(self, x, s, a):
while math_ops.reduce_sum(x) > s:
x //= self.called_member(a)
return x
tc = TestClass()
x = tc.test_method(
constant_op.constant([2, 4]), constant_op.constant(1),
constant_op.constant(-2))
self.assertListEqual([0, 1], self.evaluate(x).tolist())
@test_util.run_deprecated_v1
def test_convert_then_do_not_convert(self):
class TestClass(object):
@api.do_not_convert
def called_member(self, a):
return math_ops.negative(a)
@api.convert(recursive=True)
def test_method(self, x, s, a):
while math_ops.reduce_sum(x) > s:
x //= self.called_member(a)
return x
tc = TestClass()
x = tc.test_method(
constant_op.constant((2, 4)), constant_op.constant(1),
constant_op.constant(-2))
self.assertAllEqual((0, 1), self.evaluate(x))
@test_util.run_deprecated_v1
def test_decorator_calls_decorated(self):
class TestClass(object):
@api.convert()
def called_member(self, a):
if a < 0:
a = -a
return a
@api.convert(recursive=True)
def test_method(self, x, s, a):
while math_ops.reduce_sum(x) > s:
x //= self.called_member(a)
return x
tc = TestClass()
x = tc.test_method(
constant_op.constant([2, 4]), constant_op.constant(1),
constant_op.constant(-2))
self.assertListEqual([0, 1], self.evaluate(x).tolist())
def test_decorator_preserves_argspec(self):
class TestClass(object):
def test_method(self, a):
if a < 0:
a = -a
return a
test_method_converted = api.convert()(test_method)
tc = TestClass()
self.assertListEqual(
list(tf_inspect.getfullargspec(tc.test_method)),
list(tf_inspect.getfullargspec(tc.test_method_converted)))
def test_do_not_convert_argspec(self):
class TestClass(object):
def test_method(self, x, y):
z = x + y
return z
test_method_allowlisted = api.do_not_convert(test_method)
tc = TestClass()
self.assertTrue(tf_inspect.ismethod(tc.test_method_allowlisted))
# Because the wrapped function is not generated, we can't preserve its
# arg spec.
self.assertEqual((),
tuple(function_utils.fn_args(tc.test_method_allowlisted)))
def test_do_not_convert_callable_object(self):
class TestClass(object):
def __call__(self):
return 1
tc = TestClass()
self.assertEqual(1, api.do_not_convert(tc)())
@test_util.run_deprecated_v1
def test_convert_call_site_decorator(self):
class TestClass(object):
def called_member(self, a):
if a < 0:
a = -a
return a
@api.convert(recursive=True)
def test_method(self, x, s, a):
while math_ops.reduce_sum(x) > s:
x //= api.converted_call(
self.called_member, (a,), None, options=DEFAULT_RECURSIVE)
return x
tc = TestClass()
x = tc.test_method(
constant_op.constant([2, 4]), constant_op.constant(1),
constant_op.constant(-2))
self.assertListEqual([0, 1], self.evaluate(x).tolist())
def test_converted_call_builtin(self):
x = api.converted_call(range, (3,), None, options=DEFAULT_RECURSIVE)
self.assertEqual((0, 1, 2), tuple(x))
x = api.converted_call(
re.compile, ('mnas_v4_a.*\\/.*(weights|kernel):0$',),
None,
options=DEFAULT_RECURSIVE)
self.assertIsNotNone(x.match('mnas_v4_a/weights:0'))
def test_converted_call_function(self):
def test_fn(x):
if x < 0:
return -x
return x
x = api.converted_call(
test_fn, (constant_op.constant(-1),), None, options=DEFAULT_RECURSIVE)
self.assertEqual(1, self.evaluate(x))
@test_util.run_v1_only('b/120545219')
def test_converted_call_functools_partial(self):
def test_fn(x, y, z):
if x < 0:
return -x, -y, -z
return x, y, z
x = api.converted_call(
functools.partial(test_fn, constant_op.constant(-1), z=-3),
(constant_op.constant(-2),),
None,
options=DEFAULT_RECURSIVE)
self.assertEqual((1, 2, 3), self.evaluate(x))
x = api.converted_call(
functools.partial(
functools.partial(test_fn, constant_op.constant(-1)), z=-3),
(constant_op.constant(-2),),
None,
options=DEFAULT_RECURSIVE)
self.assertEqual((1, 2, 3), self.evaluate(x))
@test_util.run_v1_only('b/120545219')
def test_converted_call_functools_partial_kwarg_mutation(self):
def test_fn(x, y, z):
if x < 0:
return -x, -y, -z
return x, y, z
partial_fn = functools.partial(test_fn, constant_op.constant(-1), z=-3)
# Call using kwargs to assign y first to ensure that partial_fn.keywords is
# not mutated for subsequent calls (where y is assign through args).
x = api.converted_call(
partial_fn,
args=(),
kwargs={
'y': constant_op.constant(-2),
},
options=DEFAULT_RECURSIVE)
self.assertEqual((1, 2, 3), self.evaluate(x))
x = api.converted_call(
partial_fn,
args=(constant_op.constant(-4),),
kwargs=None,
options=DEFAULT_RECURSIVE)
self.assertEqual((1, 4, 3), self.evaluate(x))
def test_converted_call_method(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def test_method(self):
if self.x < 0:
return -self.x
return self.x
tc = TestClass(constant_op.constant(-1))
x = api.converted_call(tc.test_method, (), None, options=DEFAULT_RECURSIVE)
self.assertEqual(1, self.evaluate(x))
def test_converted_call_synthetic_method(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def test_function(self):
if self.x < 0:
return -self.x
return self.x
tc = TestClass(constant_op.constant(-1))
test_method = types.MethodType(test_function, tc)
x = api.converted_call(test_method, (), None, options=DEFAULT_RECURSIVE)
self.assertEqual(1, self.evaluate(x))
def test_converted_call_method_wrapper(self):
class TestClass(object):
def foo(self):
pass
tc = TestClass()
# `method.__get__()` returns a so-called method-wrapper.
wrapper = api.converted_call(
tc.foo.__get__, (tc,), None, options=DEFAULT_RECURSIVE)
self.assertEqual(wrapper, tc.foo)
def test_converted_call_method_as_object_attribute(self):
class AnotherClass(object):
def __init__(self):
self.another_class_attr = constant_op.constant(1)
def method(self):
if self.another_class_attr > 0:
return self.another_class_attr + 1
return self.another_class_attr + 10
class TestClass(object):
def __init__(self, another_obj_method):
self.another_obj_method = another_obj_method
obj = AnotherClass()
tc = TestClass(obj.method)
x = api.converted_call(
tc.another_obj_method, (), None, options=DEFAULT_RECURSIVE)
self.assertEqual(self.evaluate(x), 2)
def test_converted_call_method_converts_recursively(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def other_method(self):
if self.x < 0:
return -self.x
return self.x
def test_method(self):
return self.other_method()
tc = TestClass(constant_op.constant(-1))
x = api.converted_call(tc.test_method, (), None, options=DEFAULT_RECURSIVE)
self.assertEqual(1, self.evaluate(x))
def test_converted_call_method_by_class(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def test_method(self):
if self.x < 0:
return -self.x
return self.x
tc = TestClass(constant_op.constant(-1))
x = api.converted_call(
TestClass.test_method, (tc,), None, options=DEFAULT_RECURSIVE)
self.assertEqual(1, self.evaluate(x))
def test_converted_call_callable_object(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def __call__(self):
if self.x < 0:
return -self.x
return self.x
tc = TestClass(constant_op.constant(-1))
x = api.converted_call(tc, (), None, options=DEFAULT_RECURSIVE)
self.assertEqual(1, self.evaluate(x))
def test_converted_call_callable_metaclass(self):
test_self = self
class TestMetaclass(type):
def __call__(cls):
self.assertTrue(converter_testing.is_inside_generated_code())
inst = object.__new__(cls)
inst.__init__()
def instance_call(unused_self):
test_self.fail(
'The class-bound __call__ should be called, not the instance'
' bound one.')
inst.__call__ = instance_call
return inst
tmc = TestMetaclass('TestClass', (), {})
tc = api.converted_call(tmc, (), None, options=DEFAULT_RECURSIVE)
self.assertIsInstance(tc, tmc)
def test_converted_call_callable_abc(self):
test_self = self
@six.add_metaclass(abc.ABCMeta)
class TestBase(object):
@abc.abstractmethod
def __call__(self):
test_self.fail('This should not be called')
class TestSubclass(TestBase):
def __init__(self):
test_self.assertFalse(converter_testing.is_inside_generated_code())
def __call__(self, expected):
test_self.assertTrue(expected)
test_self.assertTrue(converter_testing.is_inside_generated_code())
tc = api.converted_call(TestSubclass, (), None, options=DEFAULT_RECURSIVE)
api.converted_call(tc, (True,), None, options=DEFAULT_RECURSIVE)
@test_util.run_deprecated_v1
def test_converted_call_constructor(self):
test_self = self
class TestClass(object):
def __init__(self):
test_self.assertFalse(converter_testing.is_inside_generated_code())
tc = api.converted_call(TestClass, (), None, options=DEFAULT_RECURSIVE)
self.assertIsInstance(tc, TestClass)
def test_converted_call_mangled_properties(self):
class TestClass(object):
def __init__(self):
self.__private = constant_op.constant(-1)
def test_method(self):
return self.__private
tc = TestClass()
with self.assertRaisesRegex(
errors.UnsupportedLanguageElementError, 'mangled names'):
api.converted_call(tc.test_method, (), None, options=DEFAULT_RECURSIVE)
# TODO(mdan): Refactor to avoid this use of global state.
ag_logging.set_verbosity(0, True)
os.environ['AUTOGRAPH_STRICT_CONVERSION'] = '0'
with self.assertPrints('could not transform', 'bug'):
api.converted_call(tc.test_method, (), None, options=DEFAULT_RECURSIVE)
ag_logging.set_verbosity(0, False)
os.environ['AUTOGRAPH_STRICT_CONVERSION'] = '1'
def test_converted_call_partial_of_allowlisted_function(self):
def test_fn(_):
self.assertFalse(converter_testing.is_inside_generated_code())
converter_testing.allowlist(test_fn)
api.converted_call(
functools.partial(test_fn, None), (), None, options=DEFAULT_RECURSIVE)
def test_converted_call_already_converted(self):
def f(x):
return x == 0
x = api.converted_call(
f, (constant_op.constant(0),), None, options=DEFAULT_RECURSIVE)
self.assertTrue(self.evaluate(x))
converted_f = api.to_graph(
f, experimental_optional_features=converter.Feature.ALL)
x = api.converted_call(
converted_f, (constant_op.constant(0),),
None,
options=DEFAULT_RECURSIVE)
self.assertTrue(self.evaluate(x))
def test_converted_call_then_already_converted_dynamic(self):
@api.convert()
def g(x):
if x > 0:
return x
else:
return -x
def f(g, x):
return g(x)
x = api.converted_call(
f, (g, constant_op.constant(1)), None, options=DEFAULT_RECURSIVE)
self.assertEqual(self.evaluate(x), 1)
def test_converted_call_forced_when_explicitly_allowlisted(self):
@api.do_not_convert()
def f(x):
return x + 1
opts = converter.ConversionOptions(recursive=True, user_requested=True)
x = api.converted_call(f, (constant_op.constant(0),), None, options=opts)
self.assertTrue(self.evaluate(x))
converted_f = api.to_graph(
f, experimental_optional_features=converter.Feature.ALL)
x = api.converted_call(converted_f, (0,), None, options=DEFAULT_RECURSIVE)
self.assertEqual(x, 1)
@test_util.run_deprecated_v1
def test_converted_call_no_user_code(self):
def f(x):
return len(x)
opts = converter.ConversionOptions(internal_convert_user_code=False)
# f should not be converted, causing len to error out.
with self.assertRaisesRegex(Exception, 'len is not well defined'):
api.converted_call(f, (constant_op.constant([0]),), None, options=opts)
# len on the other hand should work fine.
x = api.converted_call(
len, (constant_op.constant([0]),), None, options=opts)
# The constant has static shape so the result is a primitive not a Tensor.
self.assertEqual(x, 1)
def test_converted_call_no_kwargs_allowed(self):
def f(*args):
# Note: np.broadcast rejects any **kwargs, even *{}
return np.broadcast(args[:1])
opts = converter.ConversionOptions(internal_convert_user_code=False)
self.assertIsNotNone(
api.converted_call(f, (1, 2, 3, 4), None, options=opts))
def test_converted_call_allowlisted_method(self):
class TestClass(object):
def method(self):
return converter_testing.is_inside_generated_code()
obj = TestClass()
converter_testing.allowlist(obj.method.__func__)
self.assertFalse(
api.converted_call(obj.method, (), {}, options=DEFAULT_RECURSIVE))
def test_converted_call_allowlisted_method_via_owner(self):
class TestClass(object):
def method(self):
return converter_testing.is_inside_generated_code()
converter_testing.allowlist(TestClass)
obj = TestClass()
self.assertFalse(
api.converted_call(obj.method, (), {}, options=DEFAULT_RECURSIVE))
def test_converted_call_numpy(self):
x = api.converted_call(np.arange, (5,), None, options=DEFAULT_RECURSIVE)
self.assertAllEqual(x, list(range(5)))
def test_converted_call_tf_op_forced(self):
# TODO(mdan): Add the missing level of support to LOGICAL_EXPRESSIONS.
opts = converter.ConversionOptions(
user_requested=True, optional_features=None)
x = api.converted_call(math_ops.add, (1, 1), None, options=opts)
self.assertAllEqual(self.evaluate(x), 2)
def test_converted_call_exec_generated_code(self):
temp_mod = imp.new_module('test_module')
dynamic_code = """
def foo(x):
return x + 1
"""
exec(textwrap.dedent(dynamic_code), temp_mod.__dict__) # pylint:disable=exec-used
opts = converter.ConversionOptions(optional_features=None)
x = api.converted_call(temp_mod.foo, (1,), None, options=opts)
self.assertAllEqual(x, 2)
def test_converted_call_namedtuple(self):
x = api.converted_call(
collections.namedtuple, ('TestNamedtuple', ('a', 'b')),
None,
options=DEFAULT_RECURSIVE)
self.assertTrue(inspect_utils.isnamedtuple(x))
def test_converted_call_namedtuple_via_collections(self):
x = api.converted_call(
collections.namedtuple, ('TestNamedtuple', ('a', 'b')),
None,
options=DEFAULT_RECURSIVE)
self.assertTrue(inspect_utils.isnamedtuple(x))
def test_converted_call_namedtuple_subclass_bound_method(self):
class TestClass(collections.namedtuple('TestNamedtuple', ('a', 'b'))):
def test_method(self, x):
while math_ops.reduce_sum(x) > self.a:
x //= self.b
return x
obj = TestClass(5, 2)
x = api.converted_call(
obj.test_method, (constant_op.constant([2, 4]),),
None,
options=DEFAULT_RECURSIVE)
self.assertAllEqual(self.evaluate(x), [1, 2])
def test_converted_call_namedtuple_method(self):
class TestClass(collections.namedtuple('TestNamedtuple', ('a', 'b'))):
pass
obj = TestClass(5, 2)
# _asdict is a documented method of namedtuple.
x = api.converted_call(obj._asdict, (), None, options=DEFAULT_RECURSIVE)
self.assertDictEqual(x, {'a': 5, 'b': 2})
def test_converted_call_namedtuple_subclass_unbound_method(self):
class TestClass(collections.namedtuple('TestNamedtuple', ('a', 'b'))):
def test_method(self, x):
while math_ops.reduce_sum(x) > self.a:
x //= self.b
return x
obj = TestClass(5, 2)
x = api.converted_call(
TestClass.test_method, (obj, constant_op.constant([2, 4])),
None,
options=DEFAULT_RECURSIVE)
self.assertAllEqual(self.evaluate(x), [1, 2])
def test_converted_call_lambda(self):
l = lambda x: x == 0
x = api.converted_call(
l, (constant_op.constant(0),), None, options=DEFAULT_RECURSIVE)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(True, self.evaluate(x))
def test_converted_call_defun_object_method(self):
# pylint:disable=method-hidden
class TestClass(object):
def method(self):
return 1
def prepare(self):
self.method = function.defun(self.method)
# pylint:enable=method-hidden
tc = TestClass()
tc.prepare()
x = api.converted_call(tc.method, (), None, options=DEFAULT_RECURSIVE)
self.assertAllEqual(1, self.evaluate(x))
def test_converted_call_native_binding(self):
x = api.converted_call(np.power, (2, 2), None, options=DEFAULT_RECURSIVE)
self.assertAllEqual(x, 4)
def test_converted_call_native_binding_errorneous(self):
class FaultyBinding(object):
def __array__(self):
raise ValueError('fault')
bad_obj = FaultyBinding()
def fail_if_warning(*_):
self.fail('No warning should be issued')
with test.mock.patch.object(ag_logging, 'warn', fail_if_warning):
with self.assertRaisesRegex(ValueError, 'fault'):
api.converted_call(
np.power, (bad_obj, 2), None, options=DEFAULT_RECURSIVE)
def test_converted_call_through_tf_dataset(self):
def other_fn(x):
if x > 0:
return x
return -x
def f():
return dataset_ops.Dataset.range(-3, 3).map(other_fn)
# Dataset iteration only works inside math_ops.
@def_function.function
def graph_fn():
ds = api.converted_call(f, (), None, options=DEFAULT_RECURSIVE)
itr = iter(ds)
return next(itr), next(itr), next(itr)
self.assertAllEqual(self.evaluate(graph_fn()), (3, 2, 1))
def test_converted_call_no_leaks_via_closure(self):
def test_fn():
res = TestResource()
def f(y):
return res.x + y
api.converted_call(f, (1,), None, options=DEFAULT_RECURSIVE)
self.assertNoMemoryLeaks(test_fn)
def test_converted_call_no_leaks_via_inner_function_closure(self):
def test_fn():
res = TestResource()
def f(y):
def inner_f():
return res.x + y
return inner_f
api.converted_call(f, (1,), None, options=DEFAULT_RECURSIVE)()
self.assertNoMemoryLeaks(test_fn)
def test_converted_call_no_caching_on_abort(self):
def test_fn(needs_autograph):
if needs_autograph:
if constant_op.constant(True):
x = constant_op.constant(1)
else:
x = constant_op.constant(2)
else:
x = 3
return x
def call_in_disabled_context():
with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED):
return api.converted_call(
test_fn, (False,), None, options=DEFAULT_RECURSIVE)
def call_in_default_context():
with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.ENABLED):
return api.converted_call(
test_fn, (True,), None, options=DEFAULT_RECURSIVE)
# Note: this is an invariant, not a test (see above).
assert call_in_disabled_context() == 3
# If api.convert placed test_fn in the unconverted cache, this second
# invocation would fail.
self.assertEqual(self.evaluate(call_in_default_context()), 1)
def test_converted_call_caching_of_allowlisted_bound_methods(self):
class TestClass(object):
def __init__(self):
self.__private = constant_op.constant(-1)
def test_method(self):
return self.__private
# TODO(mdan): Refactor to avoid this use of global state.
cache_size_before = len(conversion._ALLOWLIST_CACHE)
# First invocation with fallback on, to allow recording it into cache.
os.environ['AUTOGRAPH_STRICT_CONVERSION'] = '0'
tc = TestClass()
api.converted_call(tc.test_method, (), None, options=DEFAULT_RECURSIVE)
os.environ['AUTOGRAPH_STRICT_CONVERSION'] = '1'
# Entry should be added to the allowlist cache.
self.assertEqual(len(conversion._ALLOWLIST_CACHE), cache_size_before + 1)
# A second invocation should go through even with fallback off.
tc = TestClass()
api.converted_call(tc.test_method, (), None, options=DEFAULT_RECURSIVE)
# No new entries should appear in the allowlist cache.
self.assertEqual(len(conversion._ALLOWLIST_CACHE), cache_size_before + 1)
def test_context_tracking_direct_calls(self):
@api.do_not_convert()
def unconverted_fn():
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.DISABLED)
@api.convert()
def converted_fn():
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.ENABLED)
unconverted_fn()
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.ENABLED)
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.UNSPECIFIED)
converted_fn()
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.UNSPECIFIED)
@api.call_with_unspecified_conversion_status
def unspecified_fn():
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.UNSPECIFIED)
unspecified_fn()
def test_to_graph_basic(self):
def test_fn(x, s):
while math_ops.reduce_sum(x) > s:
x //= 2
return x
compiled_fn = api.to_graph(test_fn)
with ops.Graph().as_default():
x = compiled_fn(constant_op.constant((4, 8)), 4)
self.assertAllEqual(self.evaluate(x), (1, 2))
@test_util.run_deprecated_v1
def test_to_graph_with_defaults(self):
foo = 4
def test_fn(x, s=foo):
while math_ops.reduce_sum(x) > s:
x //= 2
return x
compiled_fn = api.to_graph(test_fn)
x = compiled_fn(constant_op.constant([4, 8]))
self.assertListEqual([1, 2], self.evaluate(x).tolist())
def test_to_graph_with_globals(self):
def test_fn(x):
global global_n
global_n = x + global_n
return global_n
converted_fn = api.to_graph(test_fn)
prev_val = global_n
converted_fn(10)
self.assertGreater(global_n, prev_val)
def test_to_graph_with_kwargs_clashing_converted_call(self):
def called_fn(**kwargs):
return kwargs['f'] + kwargs['owner']
def test_fn():
# These arg names intentionally match converted_call's
return called_fn(f=1, owner=2)
compiled_fn = api.to_graph(test_fn)
self.assertEqual(compiled_fn(), 3)
def test_to_graph_with_kwargs_clashing_unconverted_call(self):
@api.do_not_convert
def called_fn(**kwargs):
return kwargs['f'] + kwargs['owner']
def test_fn():
# These arg names intentionally match _call_unconverted's
return called_fn(f=1, owner=2)
compiled_fn = api.to_graph(test_fn)
self.assertEqual(compiled_fn(), 3)
def test_to_graph_caching(self):
def test_fn(x):
if x > 0:
return x
else:
return -x
converted_functions = tuple(api.to_graph(test_fn) for _ in (-1, 0, 1))
# All outputs are from the same module. We can't use __module__ because
# that's reset when we instantiate the function (see conversion.py).
# TODO(mdan): Can and should we overwrite __module__ instead?
module_names = frozenset(f.ag_module for f in converted_functions)
self.assertEqual(len(module_names), 1)
self.assertNotIn('__main__', module_names)
self.assertEqual(len(frozenset(id(f) for f in converted_functions)), 3)
def test_to_graph_caching_different_options(self):
def called_fn():
pass
def test_fn():
return called_fn()
converted_recursive = api.to_graph(test_fn, recursive=True)
converted_non_recursive = api.to_graph(test_fn, recursive=False)
self.assertNotEqual(converted_recursive.ag_module,
converted_non_recursive.ag_module)
self.assertRegex(
tf_inspect.getsource(converted_recursive),
'FunctionScope(.*recursive=True.*)')
self.assertRegex(
tf_inspect.getsource(converted_non_recursive),
'FunctionScope(.*recursive=False.*)')
def test_to_graph_preserves_bindings(self):
y = 3
def test_fn():
return y
converted = api.to_graph(test_fn)
self.assertEqual(converted(), 3)
y = 7
self.assertEqual(converted(), 7)
def test_to_graph_source_map(self):
def test_fn(y):
return y**2
self.assertTrue(hasattr(api.to_graph(test_fn), 'ag_source_map'))
def test_to_graph_sets_conversion_context(self):
def g():
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.ENABLED)
return 0
# Note: the autograph=False sets the connect to Status.DISABLED. The test
# verifies that to_graph overrides that.
@def_function.function(autograph=False)
def f():
converted_g = api.to_graph(g)
converted_g()
f()
def test_to_code_basic(self):
def test_fn(x, s):
while math_ops.reduce_sum(x) > s:
x /= 2
return x
# Just check that the output is parseable Python code.
self.assertIsNotNone(parser.parse(api.to_code(test_fn)))
def test_to_code_with_wrapped_function(self):
@def_function.function
def test_fn(x, s):
while math_ops.reduce_sum(x) > s:
x /= 2
return x
with self.assertRaisesRegex(Exception, 'try passing.*python_function'):
api.to_code(test_fn)
def test_tf_convert_overrides_current_context(self):
def f(expect_converted):
self.assertEqual(
converter_testing.is_inside_generated_code(), expect_converted)
@api.do_not_convert
def test_fn(ctx, expect_converted):
return api.tf_convert(f, ctx)(expect_converted)
test_fn(
ag_ctx.ControlStatusCtx(status=ag_ctx.Status.ENABLED), True)
test_fn(
ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED), False)
def test_tf_convert_unspecified_not_converted_by_default(self):
def f():
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.UNSPECIFIED)
self.assertFalse(converter_testing.is_inside_generated_code())
@def_function.function
def test_fn(ctx):
return api.tf_convert(f, ctx, convert_by_default=False)()
test_fn(ag_ctx.ControlStatusCtx(status=ag_ctx.Status.UNSPECIFIED))
def test_tf_convert_allowlisted_method(self):
if six.PY2:
self.skipTest('Test bank not comptible with Python 2.')
class TestClass(object):
def method(self):
return converter_testing.is_inside_generated_code()
converter_testing.allowlist(TestClass.method)
obj = TestClass()
converted_call = api.tf_convert(
obj.method, ag_ctx.ControlStatusCtx(status=ag_ctx.Status.ENABLED))
_, converted_target = tf_decorator.unwrap(converted_call)
self.assertIs(converted_target.__func__, obj.method.__func__)
def test_tf_convert_tf_decorator_unwrapping_context_enabled(self):
def f():
self.assertTrue(converter_testing.is_inside_generated_code())
@functools.wraps(f)
def wrapper(*args, **kwargs):
return wrapper.__wrapped__(*args, **kwargs)
decorated_f = tf_decorator.make_decorator(f, wrapper)
def test_fn(ctx):
return api.tf_convert(decorated_f, ctx)()
test_fn(ag_ctx.ControlStatusCtx(status=ag_ctx.Status.ENABLED))
def test_tf_convert_tf_decorator_unwrapping_context_disabled(self):
def f():
self.assertFalse(converter_testing.is_inside_generated_code())
@functools.wraps(f)
def wrapper(*args, **kwargs):
return wrapper.__wrapped__(*args, **kwargs)
decorated_f = tf_decorator.make_decorator(f, wrapper)
def test_fn(ctx):
return api.tf_convert(decorated_f, ctx)()
test_fn(ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED))
def test_tf_convert_tf_decorator_allowlist_method(self):
def wrap(f):
def wrapper(*args, **kwargs):
return wrapper.__wrapped__(*args, **kwargs)
return tf_decorator.make_decorator(f, wrapper)
class TestClass(object):
@wrap
def method(self):
return converter_testing.is_inside_generated_code()
converter_testing.allowlist(TestClass.method)
obj = TestClass()
# It's intended that tf_convert modifies the original method in this case.
# This is not desirable, but options are limited.
converted = api.tf_convert(
obj.method, ag_ctx.ControlStatusCtx(status=ag_ctx.Status.ENABLED))
self.assertTrue(converted())
self.assertTrue(obj.method())
def test_super_with_one_arg(self):
test_case_self = self
class TestBase(object):
def plus_three(self, x):
return x + 3
class TestSubclass(TestBase):
def plus_three(self, x):
test_case_self.fail('This should never be called.')
def one_arg(self, x):
test_base_unbound = super(TestSubclass)
test_base = test_base_unbound.__get__(self, TestSubclass)
return test_base.plus_three(x)
tc = api.converted_call(TestSubclass, (), None, options=DEFAULT_RECURSIVE)
self.assertEqual(5, tc.one_arg(2))
def test_super_with_two_args(self):
test_case_self = self
class TestBase(object):
def plus_three(self, x):
return x + 3
class TestSubclass(TestBase):
def plus_three(self, x):
test_case_self.fail('This should never be called.')
def two_args(self, x):
return super(TestSubclass, self).plus_three(x)
tc = api.converted_call(TestSubclass, (), None, options=DEFAULT_RECURSIVE)
self.assertEqual(5, tc.two_args(2))
def test_raise_from_func_graph(self):
@def_function.function
def raise_from_tf_function(n):
_errors_test_helper.TestRaiseFromStatus(n)
for code, expected_exception in [
(1, tf_errors.CancelledError),
(2, tf_errors.UnknownError),
(3, tf_errors.InvalidArgumentError),
(4, tf_errors.DeadlineExceededError),
(5, tf_errors.NotFoundError),
(6, tf_errors.AlreadyExistsError),
(7, tf_errors.PermissionDeniedError),
(16, tf_errors.UnauthenticatedError),
(8, tf_errors.ResourceExhaustedError),
(9, tf_errors.FailedPreconditionError),
(10, tf_errors.AbortedError),
(11, tf_errors.OutOfRangeError),
(12, tf_errors.UnimplementedError),
(13, tf_errors.InternalError),
(14, tf_errors.UnavailableError),
(15, tf_errors.DataLossError),
]:
with self.assertRaises(expected_exception) as error:
raise_from_tf_function(code)
self.assertEqual(error.exception.experimental_payloads['key1'], 'value1')
self.assertEqual(error.exception.experimental_payloads['key2'], 'value2')
if __name__ == '__main__':
os.environ['AUTOGRAPH_STRICT_CONVERSION'] = '1'
test.main()
| 28.400616 | 86 | 0.678792 |
dc2fc7e67bc880962a42b7655e4b7d26a8f72a0d | 5,474 | py | Python | com/xiumei/etl/process_finance_data.py | struggle3014/aqf | d0477075bd6d25d0de82acd9796a5a4e9e056b2e | [
"Apache-2.0"
] | 2 | 2020-02-07T15:08:12.000Z | 2020-04-14T09:48:07.000Z | com/xiumei/etl/process_finance_data.py | struggle3014/aqf | d0477075bd6d25d0de82acd9796a5a4e9e056b2e | [
"Apache-2.0"
] | null | null | null | com/xiumei/etl/process_finance_data.py | struggle3014/aqf | d0477075bd6d25d0de82acd9796a5a4e9e056b2e | [
"Apache-2.0"
] | null | null | null | # -*- coding=utf-8 -*-
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
import seaborn
import tushare as ts
import scipy.stats as stats
# 1.1- 获取单个证券的股价数据
def get_single_stock_data(stock, start_date, end_date):
# 获取证券的股价。数据类型为 pandas.core.frame.DataFrame
data = ts.get_k_data(stock, start=start_date, end=end_date)
# 1- 将数据 date 列指定为索引
data.set_index('date', inplace=True)
# 将字符串格式的 date 转换为日期格式
data.index = pd.to_datetime(data.index)
return data
# 1.2- 获取多只证券的股价信息
def get_multi_stock_data(stocks, start_date, end_date):
datas = map(get_single_stock_data, stocks, fill_list(start_date, len(stocks)), fill_list(end_date, len(stocks)))
return pd.concat(datas, keys=stocks, names=['Ticker', 'Date'])
# 填充指定长度的 list
def fill_list(thing, length):
result = []
for i in range(length):
result.append(thing)
return result
# 2.1- 金融数据可视化
def finance_data_visual():
stocks = get_multi_stock_data(['600030', '000001', '600426'], '2019-05-05', '2019-06-06')
# 1- 重置索引
close_price = stocks[['close']].reset_index()
print(close_price.head())
# 2- 数据透视表,将所有股价信息显示在一张表中
daily_close = close_price.pivot(index='Date', columns='Ticker', values='close')
print(daily_close.head())
# 3- 画图
daily_close.plot(subplots=True, figsize=(10, 8))
plt.show()
# 3.1- 金融数据计算,每日收益
def calculate_daily_profit():
stocks = get_multi_stock_data(['600030', '000001', '600426'], '2019-05-05', '2019-06-06')
# 1- 重置索引
close_price = stocks[['close']].reset_index()
# 2- 数据透视表,将所有股价信息显示在一张表中
daily_close = close_price.pivot(index='Date', columns='Ticker', values='close')
# 3- 使用 shift 方法,计算收益。shift 将每列下移 n 格。
price_change = daily_close / daily_close.shift(1) - 1
# print(price_change.ix[:, 0:4].head())
print(price_change.head())
# 4- 将 NaN 替换为 0
price_change.fillna(0, inplace=True)
print(price_change.head())
# 3.2- 金融数据计算,累计收益
def calculate_accu_profit():
stocks = get_multi_stock_data(['600030', '000001', '600426'], '2019-05-05', '2019-06-06')
# 1- 重置索引
close_price = stocks[['close']].reset_index()
# 2- 数据透视表,将所有股价信息显示在一张表中
daily_close = close_price.pivot(index='Date', columns='Ticker', values='close')
# 3- 使用 shift 方法,计算收益。shift 将每列下移 n 格。
price_change = daily_close / daily_close.shift(1) - 1
# print(price_change.ix[:, 0:4].head())
# 4- 将 NaN 替换为 0
price_change.fillna(0, inplace=True)
cum_daily_return = (1 + price_change).cumprod()
print(cum_daily_return.head())
cum_daily_return.plot(figsize=(8, 6))
plt.show()
# 4- 分析 return 分布
# 4.1- 直方图
def plot_hist():
# stocks = get_multi_stock_data(['600030', '000001', '600426'], '2019-05-05', '2019-06-06')
stocks = get_multi_stock_data(['600030', '600426'], '2019-05-05', '2019-06-06')
# 1- 重置索引
close_price = stocks[['close']].reset_index()
# 2- 数据透视表,将所有股价信息显示在一张表中
daily_close = close_price.pivot(index='Date', columns='Ticker', values='close')
# 3- 使用 shift 方法,计算收益。shift 将每列下移 n 格。
price_change = daily_close / daily_close.shift(1) - 1
# print(price_change.ix[:, 0:4].head())
# 4- 将 NaN 替换为 0
price_change.fillna(0, inplace=True)
# 5- 画出 600030 股价直方图
price_change['600030'].hist(bins=30, figsize=(4, 3))
plt.show()
# 6- 画出所有股票的股价分布图
price_change.hist(bins=20, sharex=True, figsize=(12, 8))
plt.show()
# 4.2- QQ-Plots
# 使用 QQ 图验证股价 return 分布
def plot_qq():
# stocks = get_multi_stock_data(['600030', '000001', '600426'], '2019-05-05', '2019-06-06')
stocks = get_multi_stock_data(['600030', '600426'], '2019-05-05', '2019-06-06')
# 1- 重置索引
close_price = stocks[['close']].reset_index()
# 2- 数据透视表,将所有股价信息显示在一张表中
daily_close = close_price.pivot(index='Date', columns='Ticker', values='close')
# 3- 使用 shift 方法,计算收益。shift 将每列下移 n 格。
price_change = daily_close / daily_close.shift(1) - 1
# print(price_change.ix[:, 0:4].head())
# 4- 将 NaN 替换为 0
price_change.fillna(0, inplace=True)
# 5- 绘制 QQ 图
fig = plt.figure(figsize=(7, 5))
stats.probplot(price_change['600030'], dist='norm', plot=fig.add_subplot(111))
plt.show()
# 5- 股价相关性
def plot_stocks_coors():
# 1- 获取 hs300 股价信息
hs300_data = get_single_stock_data('hs300', '2016-01-01', '2017-07-01')
hs300_return = hs300_data.close.pct_change().fillna(0)
# 2- 获取其他股票股价信息
stocks = get_multi_stock_data(['600030', '000001', '600426'], '2016-01-01', '2017-07-01')
close_price = stocks[['close']].reset_index()
# 数据透视表,将所有股价信息显示在一张表中
daily_close = close_price.pivot(index='Date', columns='Ticker', values='close')
# 3- 数据合并
return_all = pd.concat([hs300_return, daily_close.pct_change().fillna(0)], axis=1)
return_all.rename(columns={'close': 'hs300'}, inplace=True)
print(return_all.head())
# 4- 计算累计收益
cum_return_all = (1 + return_all).cumprod()
print(cum_return_all.head())
# 5- 累计收益作图
cum_return_all[['hs300', '600030', '600426']].plot(figsize=(8, 6))
# plt.show()
# 6- 计算相关性,corr 协方差计算
corrs = return_all.corr()
seaborn.heatmap(corrs)
plt.show()
if __name__ == '__main__':
# get_single_stock_data()
# result = get_multi_stock_data(['600030', '000001'], '2019-06-05', '2019-06-06')
# finance_data_visual()
# calculate_daily_profit()
# calculate_accu_profit()
# plot_hist()
# plot_qq()
plot_stocks_coors()
| 34 | 116 | 0.658385 |
8ada3766ccb0b1de8ad1debab6da32648e1e988f | 1,679 | py | Python | myshkin/mixins/model.py | jakesnell/myshkin | cea0a625b1913627e27d66d0ada9155402f57d33 | [
"MIT"
] | null | null | null | myshkin/mixins/model.py | jakesnell/myshkin | cea0a625b1913627e27d66d0ada9155402f57d33 | [
"MIT"
] | null | null | null | myshkin/mixins/model.py | jakesnell/myshkin | cea0a625b1913627e27d66d0ada9155402f57d33 | [
"MIT"
] | null | null | null | import os
import glob
import yaml
import numpy as np
import keras
class Model(object):
def save_conf(self, out_file):
with open(out_file, 'w') as f:
conf_dict = {'model': self.__class__.__name__,
'opts': dict(self.opts._asdict())}
f.write(yaml.dump(conf_dict, default_flow_style=False))
def save_weights(self, out_dir, verbose=False):
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
def _save_weights(out_dir, name, component):
if isinstance(component, keras.models.Model):
if verbose:
print "saving {:s}...".format(name)
component.save_weights(os.path.join(out_dir, name + ".h5"), overwrite=True)
else:
for k, subcomponent in component.components.iteritems():
_save_weights(out_dir, name + "." + k, subcomponent)
for k, component in self.components.iteritems():
_save_weights(out_dir, k, component)
def get_component(self, specs):
cur = self
for spec in specs:
cur = cur.components[spec]
return cur
def load_weights(self, weights_dir, verbose=False):
weight_files = glob.glob(os.path.join(weights_dir, '*.h5'))
for weight_file in weight_files:
component = self.get_component(os.path.basename(weight_file).split(".")[:-1])
if verbose:
print "loading from {:s}...".format(os.path.basename(weight_file))
component.load_weights(weight_file)
def __repr__(self):
return "{:s}({:s})".format(self.__class__.__name__, self.opts)
| 35.723404 | 91 | 0.596188 |
4a84eff221ec4d84cf0a858d30cc9ef4061428fe | 1,867 | py | Python | commands.py | Bocom/LSP-typescript | c2fbd5f756ff0fe36142b00a31100bf7505bdbc1 | [
"MIT"
] | null | null | null | commands.py | Bocom/LSP-typescript | c2fbd5f756ff0fe36142b00a31100bf7505bdbc1 | [
"MIT"
] | null | null | null | commands.py | Bocom/LSP-typescript | c2fbd5f756ff0fe36142b00a31100bf7505bdbc1 | [
"MIT"
] | null | null | null | from .protocol import Call, CallsDirection, CallsRequestParams, CallsResponse
from LSP.plugin import Request
from LSP.plugin import Session
from LSP.plugin.core.protocol import LocationLink
from LSP.plugin.core.registry import LspTextCommand
from LSP.plugin.core.typing import Optional
from LSP.plugin.core.views import text_document_position_params
from LSP.plugin.locationpicker import LocationPicker
import functools
import sublime
SESSION_NAME = "LSP-typescript"
class LspTypescriptCallsCommand(LspTextCommand):
session_name = SESSION_NAME
def is_enabled(self) -> bool:
selection = self.view.sel()
return len(selection) > 0 and super().is_enabled()
def run(self, edit: sublime.Edit, direction: CallsDirection) -> None:
session = self.session_by_name(self.session_name)
if session is None:
return
position_params = text_document_position_params(self.view, self.view.sel()[0].b)
params = {
'textDocument': position_params['textDocument'],
'position': position_params['position'],
'direction': direction
} # type: CallsRequestParams
session.send_request(Request("textDocument/calls", params), functools.partial(self.on_result_async, session))
def on_result_async(self, session: Session, result: Optional[CallsResponse]) -> None:
if not result:
return
def to_location_link(call: Call) -> LocationLink:
return {
'targetUri': call['location']['uri'],
'targetSelectionRange': call['location']['range'],
}
locations = list(map(to_location_link, result['calls']))
self.view.run_command("add_jump_record", {"selection": [(r.a, r.b) for r in self.view.sel()]})
LocationPicker(self.view, session, locations, side_by_side=False)
| 38.102041 | 117 | 0.688806 |
020af2a1bce83b829f5f549f2229002dcd2274fc | 9,491 | py | Python | ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/hbase.py | hmcl/ambari-apache | 87423d64f54d896c62d1a9245eb03a97763e35a4 | [
"Apache-2.0"
] | 1 | 2021-05-06T06:24:04.000Z | 2021-05-06T06:24:04.000Z | ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/hbase.py | hmcl/ambari-apache | 87423d64f54d896c62d1a9245eb03a97763e35a4 | [
"Apache-2.0"
] | null | null | null | ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/hbase.py | hmcl/ambari-apache | 87423d64f54d896c62d1a9245eb03a97763e35a4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from ambari_commons import OSConst
from resource_management.core.resources.system import Directory, Execute, File
from resource_management.libraries.resources.xml_config import XmlConfig
from resource_management.libraries.resources.template_config import TemplateConfig
from resource_management.libraries.functions.format import format
from resource_management.core.source import Template, InlineTemplate
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def hbase(name=None, action = None):
import params
Directory(params.hbase_conf_dir,
owner = params.hadoop_user,
create_parents = True
)
Directory(params.hbase_tmp_dir,
create_parents = True,
owner = params.hadoop_user
)
Directory (os.path.join(params.local_dir, "jars"),
owner = params.hadoop_user,
create_parents = True
)
XmlConfig("hbase-site.xml",
conf_dir = params.hbase_conf_dir,
configurations = params.config['configurations']['ams-hbase-site'],
configuration_attributes=params.config['configuration_attributes']['ams-hbase-site'],
owner = params.hadoop_user
)
if 'ams-hbase-policy' in params.config['configurations']:
XmlConfig("hbase-policy.xml",
conf_dir = params.hbase_conf_dir,
configurations = params.config['configurations']['ams-hbase-policy'],
configuration_attributes=params.config['configuration_attributes']['ams-hbase-policy'],
owner = params.hadoop_user
)
# Manually overriding ownership of file installed by hadoop package
else:
File(os.path.join(params.hbase_conf_dir, "hbase-policy.xml"),
owner = params.hadoop_user
)
# Metrics properties
File(os.path.join(params.hbase_conf_dir, "hadoop-metrics2-hbase.properties"),
owner = params.hbase_user,
content=Template("hadoop-metrics2-hbase.properties.j2")
)
hbase_TemplateConfig('regionservers', user=params.hadoop_user)
if params.security_enabled:
hbase_TemplateConfig(format("hbase_{name}_jaas.conf"), user=params.hadoop_user)
if name != "client":
Directory (params.hbase_log_dir,
owner = params.hadoop_user,
create_parents = True
)
if (params.hbase_log4j_props != None):
File(os.path.join(params.hbase_conf_dir, "log4j.properties"),
owner=params.hadoop_user,
content=params.hbase_log4j_props
)
elif (os.path.exists(os.path.join(params.hbase_conf_dir,"log4j.properties"))):
File(os.path.join(params.hbase_conf_dir,"log4j.properties"),
owner=params.hadoop_user
)
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def hbase(name=None # 'master' or 'regionserver' or 'client'
, action=None):
import params
Directory(params.hbase_conf_dir,
owner = params.hbase_user,
group = params.user_group,
create_parents = True,
recursive_ownership = True,
)
Directory (params.hbase_tmp_dir,
owner = params.hbase_user,
cd_access="a",
create_parents = True,
recursive_ownership = True,
)
Directory (os.path.join(params.local_dir, "jars"),
owner = params.hbase_user,
group = params.user_group,
cd_access="a",
mode=0775,
create_parents = True
)
merged_ams_hbase_site = {}
merged_ams_hbase_site.update(params.config['configurations']['ams-hbase-site'])
if params.security_enabled:
merged_ams_hbase_site.update(params.config['configurations']['ams-hbase-security-site'])
if not params.is_hbase_distributed:
File(format("{hbase_conf_dir}/core-site.xml"),
action='delete',
owner=params.hbase_user)
File(format("{hbase_conf_dir}/hdfs-site.xml"),
action='delete',
owner=params.hbase_user)
XmlConfig("hbase-site.xml",
conf_dir = params.hbase_conf_dir,
configurations = merged_ams_hbase_site,
configuration_attributes=params.config['configuration_attributes']['ams-hbase-site'],
owner = params.hbase_user,
group = params.user_group
)
# Phoenix spool file dir if not /tmp
if not os.path.exists(params.phoenix_server_spool_dir):
Directory(params.phoenix_server_spool_dir,
owner=params.ams_user,
mode = 0755,
group=params.user_group,
cd_access="a",
create_parents = True
)
pass
if 'ams-hbase-policy' in params.config['configurations']:
XmlConfig("hbase-policy.xml",
conf_dir = params.hbase_conf_dir,
configurations = params.config['configurations']['ams-hbase-policy'],
configuration_attributes=params.config['configuration_attributes']['ams-hbase-policy'],
owner = params.hbase_user,
group = params.user_group
)
# Manually overriding ownership of file installed by hadoop package
else:
File( format("{params.hbase_conf_dir}/hbase-policy.xml"),
owner = params.hbase_user,
group = params.user_group
)
File(format("{hbase_conf_dir}/hbase-env.sh"),
owner = params.hbase_user,
content=InlineTemplate(params.hbase_env_sh_template)
)
# Metrics properties
File(os.path.join(params.hbase_conf_dir, "hadoop-metrics2-hbase.properties"),
owner = params.hbase_user,
group = params.user_group,
content=Template("hadoop-metrics2-hbase.properties.j2")
)
# hbase_TemplateConfig( params.metric_prop_file_name,
# tag = 'GANGLIA-MASTER' if name == 'master' else 'GANGLIA-RS'
# )
hbase_TemplateConfig('regionservers', user=params.hbase_user)
if params.security_enabled:
hbase_TemplateConfig( format("hbase_{name}_jaas.conf"), user=params.hbase_user)
hbase_TemplateConfig( format("hbase_client_jaas.conf"), user=params.hbase_user)
hbase_TemplateConfig( format("ams_zookeeper_jaas.conf"), user=params.hbase_user)
if name != "client":
Directory( params.hbase_pid_dir,
owner = params.hbase_user,
create_parents = True,
cd_access = "a",
mode = 0755,
)
Directory (params.hbase_log_dir,
owner = params.hbase_user,
create_parents = True,
cd_access = "a",
mode = 0755,
)
if name == "master":
if not params.is_local_fs_rootdir:
# If executing Stop All, HDFS is probably down
if action != 'stop':
params.HdfsResource(params.hbase_root_dir,
type="directory",
action="create_on_execute",
owner=params.hbase_user,
mode=0775,
dfs_type=params.dfs_type
)
params.HdfsResource(params.hbase_staging_dir,
type="directory",
action="create_on_execute",
owner=params.hbase_user,
mode=0711,
dfs_type=params.dfs_type
)
params.HdfsResource(None, action="execute")
if params.is_hbase_distributed:
#Workaround for status commands not aware of operating mode
File(format("{params.hbase_pid_dir}/distributed_mode"), action="create", mode=0644, owner=params.hbase_user)
pass
else:
local_root_dir = params.hbase_root_dir
#cut protocol name
if local_root_dir.startswith("file://"):
local_root_dir = local_root_dir[7:]
#otherwise assume dir name is provided as is
Directory(local_root_dir,
owner = params.hbase_user,
cd_access="a",
create_parents = True,
recursive_ownership = True
)
File(format("{params.hbase_pid_dir}/distributed_mode"), action="delete", owner=params.hbase_user)
if params.hbase_log4j_props is not None:
File(format("{params.hbase_conf_dir}/log4j.properties"),
mode=0644,
group=params.user_group,
owner=params.hbase_user,
content=params.hbase_log4j_props
)
elif os.path.exists(format("{params.hbase_conf_dir}/log4j.properties")):
File(format("{params.hbase_conf_dir}/log4j.properties"),
mode=0644,
group=params.user_group,
owner=params.hbase_user
)
def hbase_TemplateConfig(name, tag=None, user=None):
import params
TemplateConfig( os.path.join(params.hbase_conf_dir, name),
owner = user,
template_tag = tag
)
| 34.638686 | 116 | 0.660837 |
cf922ee9a428e5e2e14e71ebf5fb49ed68e89d24 | 1,271 | py | Python | setup.py | laterpay/djtranslationchecker | 6589ed3472193a795d78504a9f2337cd045b29f2 | [
"MIT"
] | 1 | 2015-03-25T09:30:55.000Z | 2015-03-25T09:30:55.000Z | setup.py | laterpay/djtranslationchecker | 6589ed3472193a795d78504a9f2337cd045b29f2 | [
"MIT"
] | null | null | null | setup.py | laterpay/djtranslationchecker | 6589ed3472193a795d78504a9f2337cd045b29f2 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
from setuptools import find_packages, setup
import codecs
import os
#import time
#_version = "0.10.dev%s" % int(time.time())
_version = "0.10.0"
_packages = find_packages('.', exclude=["*.tests", "*.tests.*", "tests.*", "tests"])
if os.path.exists('README.rst'):
_long_description = codecs.open('README.rst', 'r', 'utf-8').read()
else:
_long_description = ""
setup(
name='djtranslationchecker',
version=_version,
description="Check your Django translation files",
long_description=_long_description,
author="LaterPay GmbH",
author_email="support@laterpay.net",
url="https://github.com/laterpay/djtranslationchecker",
license='MIT',
keywords="Django translation check gettext",
#test_suite="tests",
packages=_packages,
classifiers=(
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Software Development :: Libraries :: Python Modules",
),
)
| 28.244444 | 84 | 0.638867 |
ca071c293474dd0ee380b182a0684ea4dd7fde3b | 170 | py | Python | persist.py | leosantos16/sample-market-maker | 4006b1a7fadf0db3821202cfb5681768af33d711 | [
"Apache-2.0"
] | null | null | null | persist.py | leosantos16/sample-market-maker | 4006b1a7fadf0db3821202cfb5681768af33d711 | [
"Apache-2.0"
] | null | null | null | persist.py | leosantos16/sample-market-maker | 4006b1a7fadf0db3821202cfb5681768af33d711 | [
"Apache-2.0"
] | null | null | null | from subprocess import Popen
import sys
filename = 'run.py'
while True:
print("\nStarting " + filename)
p = Popen("python " + filename, shell=True)
p.wait()
| 18.888889 | 47 | 0.658824 |
9e511e5d1d9bdf7f3fa6fac8215605f4269c1137 | 1,771 | py | Python | cafe/drivers/unittest/config.py | melissa-kam/opencafe | af90c228084d479afa60b8b06a6b5d4d1adf2b8e | [
"Apache-2.0"
] | null | null | null | cafe/drivers/unittest/config.py | melissa-kam/opencafe | af90c228084d479afa60b8b06a6b5d4d1adf2b8e | [
"Apache-2.0"
] | null | null | null | cafe/drivers/unittest/config.py | melissa-kam/opencafe | af90c228084d479afa60b8b06a6b5d4d1adf2b8e | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Rackspace
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cafe.engine.models.data_interfaces import (
ConfigSectionInterface, _get_path_from_env)
class DriverConfig(ConfigSectionInterface):
"""
Unittest driver configuration values.
This config section is intended to supply values and configuration that can
not be programatically identified to the unittest driver.
"""
SECTION_NAME = 'drivers.unittest'
def __init__(self, config_file_path=None):
config_file_path = config_file_path or _get_path_from_env(
'CAFE_ENGINE_CONFIG_FILE_PATH')
super(DriverConfig, self).__init__(config_file_path=config_file_path)
@property
def ignore_empty_datasets(self):
"""
Identify whether empty datasets should change suite results.
A dataset provided to a suite should result in the suite failing. This
value provides a mechanism to modify that behavior in the case of
suites with intensionally included empty datasets. If this is set to
'True' empty datasets will not cause suite failures. This defaults
to 'False'.
"""
return self.get_boolean(
item_name="ignore_empty_datasets",
default=False)
| 37.680851 | 79 | 0.726708 |
d28f307af02ce3bac54ba47c296eef107a99687c | 4,432 | py | Python | src/103_signal_processing.py | j20232/kaggle_earthquake | 47fac5f2e8d2ad4fab82426a0b6af18b71e4b57b | [
"MIT"
] | null | null | null | src/103_signal_processing.py | j20232/kaggle_earthquake | 47fac5f2e8d2ad4fab82426a0b6af18b71e4b57b | [
"MIT"
] | null | null | null | src/103_signal_processing.py | j20232/kaggle_earthquake | 47fac5f2e8d2ad4fab82426a0b6af18b71e4b57b | [
"MIT"
] | null | null | null | """Extract signal processing features
Reference: https://www.kaggle.com/gpreda/lanl-earthquake-eda-and-prediction
"""
import sys
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn.linear_model import LinearRegression
from tqdm import tqdm
import competition as cc
from common import stop_watch
TRAIN_CSV_DIRECTORY_PATH = cc.INPUT_PATH / sys.argv[1]
TRAIN_CSV_LIST = list(TRAIN_CSV_DIRECTORY_PATH.glob('**/*.csv'))
@stop_watch
def extract_features(csv_list, feature_dir_path):
df = pd.DataFrame()
Path.mkdir(feature_dir_path, exist_ok=True, parents=True)
for index, each_csv in enumerate(tqdm(sorted(csv_list))):
seg = pd.read_csv(each_csv, dtype=cc.DTYPES)
seg_id = each_csv.split("/")[-1].split(".")[0]
df.loc[index, "seg_id"] = seg_id
xc = pd.Series(seg['acoustic_data'].values)
# Regression
df.loc[index, 'trend'] = add_trend_feature(xc)
df.loc[index, 'abs_trend'] = add_trend_feature(xc, abs_values=True)
# classic_sta_lta (the definition is written in this file)
df.loc[index, 'classic_sta_lta1_mean'] = classic_sta_lta(xc, 500, 10000).mean()
df.loc[index, 'classic_sta_lta2_mean'] = classic_sta_lta(xc, 5000, 100000).mean()
df.loc[index, 'classic_sta_lta3_mean'] = classic_sta_lta(xc, 3333, 6666).mean()
df.loc[index, 'classic_sta_lta4_mean'] = classic_sta_lta(xc, 10000, 25000).mean()
# moving average
df.loc[index, 'Moving_average_700_mean'] = xc.rolling(window=700).mean().mean(skipna=True)
df.loc[index, 'Moving_average_1500_mean'] = xc.rolling(window=1500).mean().mean(skipna=True)
df.loc[index, 'Moving_average_3000_mean'] = xc.rolling(window=3000).mean().mean(skipna=True)
df.loc[index, 'Moving_average_6000_mean'] = xc.rolling(window=6000).mean().mean(skipna=True)
# ema moving average
ewma = pd.Series.ewm
df.loc[index, 'exp_Moving_average_300_mean'] = ewma(xc, span=300).mean().mean(skipna=True)
df.loc[index, 'exp_Moving_average_3000_mean'] = ewma(xc, span=3000).mean().mean(skipna=True)
df.loc[index, 'exp_Moving_average_30000_mean'] = ewma(xc, span=6000).mean().mean(skipna=True)
# moving average by correction with std
no_of_std = 2
df.loc[index, 'MA_400MA_std_mean'] = xc.rolling(window=400).std().mean()
df.loc[index, 'MA_400MA_BB_high_mean'] = (df.loc[index, 'Moving_average_700_mean'] + no_of_std * df.loc[index, 'MA_400MA_std_mean']).mean()
df.loc[index, 'MA_400MA_BB_low_mean'] = (df.loc[index, 'Moving_average_700_mean'] - no_of_std * df.loc[index, 'MA_400MA_std_mean']).mean()
df.loc[index, 'MA_700MA_std_mean'] = xc.rolling(window=700).std().mean()
df.loc[index, 'MA_700MA_BB_high_mean'] = (df.loc[index, 'Moving_average_700_mean'] + no_of_std * df.loc[index, 'MA_700MA_std_mean']).mean()
df.loc[index, 'MA_700MA_BB_low_mean'] = (df.loc[index, 'Moving_average_700_mean'] - no_of_std * df.loc[index, 'MA_700MA_std_mean']).mean()
df.loc[index, 'MA_1000MA_std_mean'] = xc.rolling(window=1000).std().mean()
print("Aggregation output is belows:")
print(df.head(3))
df.to_csv(feature_dir_path / "{}.csv".format(cc.PREF), index=False)
def add_trend_feature(arr, abs_values=False):
idx = np.array(range(len(arr)))
if abs_values:
arr = np.abs(arr)
lr = LinearRegression()
lr.fit(idx.reshape(-1, 1), arr)
return lr.coef_[0]
def classic_sta_lta(x, length_sta, length_lta):
sta = np.cumsum(x ** 2)
# Convert to float
sta = np.require(sta, dtype=np.float)
# Copy for LTA
lta = sta.copy()
# Compute the STA and the LTA
sta[length_sta:] = sta[length_sta:] - sta[:-length_sta]
sta /= length_sta
lta[length_lta:] = lta[length_lta:] - lta[:-length_lta]
lta /= length_lta
# Pad zeros
sta[:length_lta - 1] = 0
# Avoid division by zero by setting zero values to tiny float
dtiny = np.finfo(0.0).tiny
idx = lta < dtiny
lta[idx] = dtiny
return sta / lta
if __name__ == "__main__":
train_csv_path = cc.FEATURE_PATH / "{}".format(sys.argv[1])
train_csv_l = [str(item) for item in TRAIN_CSV_LIST]
extract_features(train_csv_l, train_csv_path)
test_csv_path = cc.FEATURE_PATH / "test"
test_csv_l = [str(item) for item in cc.TEST_CSV_LIST]
extract_features(test_csv_l, test_csv_path)
| 43.45098 | 147 | 0.678475 |
7d6fa3b3374606aeeb424c0e983aa60cabcb2ff0 | 1,209 | py | Python | dgi_gcn/models/dgi.py | Guo-lab/Graph | c4c5fbc8fb5d645c16da20351b9746019cf75aab | [
"MIT"
] | null | null | null | dgi_gcn/models/dgi.py | Guo-lab/Graph | c4c5fbc8fb5d645c16da20351b9746019cf75aab | [
"MIT"
] | null | null | null | dgi_gcn/models/dgi.py | Guo-lab/Graph | c4c5fbc8fb5d645c16da20351b9746019cf75aab | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from layers import GCN, AvgReadout, Discriminator
class DGI(nn.Module):
def __init__(self, n_in, n_h, activation):
super(DGI, self).__init__()
self.gcn = GCN(n_in, n_h, activation)
self.read = AvgReadout()
self.sigm = nn.Sigmoid()
self.disc = Discriminator(n_h)
def forward(self, seq1, seq2, adj, sparse, msk, samp_bias1, samp_bias2):
h_1 = self.gcn(seq1, adj, sparse)
#//print("h1 gat OK")
#//print("h1 shape", h_1.shape)
#//print("seq shape, adj shape", seq1.shape, adj.shape)
c = self.read(h_1, msk)
#//print("AvgReadout OK")
#//print("c shape", c.shape)
c = self.sigm(c)
#//print("seq2 shape, adj shape", seq2.shape, adj.shape)
h_2 = self.gcn(seq2, adj, sparse)
#//print(h_2.shape)
#//print("h2 gat OK")
ret = self.disc(c, h_1, h_2, samp_bias1, samp_bias2)
return ret
# Detach the return variables
def embed(self, seq, adj, sparse, msk):
h_1 = self.gcn(seq, adj, sparse)
c = self.read(h_1, msk)
return h_1.detach(), c.detach() | 29.487805 | 76 | 0.557486 |
c570097cf8e08e24570debaf9bdf54543aca13c6 | 6,331 | py | Python | lab/weather.py | AbdullahNoori/replace | 1dffd3668ca467a1e5acf20c5fe6f992e483eb49 | [
"MIT"
] | null | null | null | lab/weather.py | AbdullahNoori/replace | 1dffd3668ca467a1e5acf20c5fe6f992e483eb49 | [
"MIT"
] | null | null | null | lab/weather.py | AbdullahNoori/replace | 1dffd3668ca467a1e5acf20c5fe6f992e483eb49 | [
"MIT"
] | null | null | null |
class Subject:
# Both of the following two methods take an
# observer as an argument; that is, the observer
# to be registered ore removed.
def registerObserver(observer):
pass
def removeObserver(observer):
pass
# This method is called to notify all observers
# when the Subject's state (measurements) have changed.
def notifyObservers():
pass
# The observer class is implemented by all observers,
# so they all have to implemented the update() method. Here
# we're following Mary and Sue's lead and
# passing the measurements to the observers.
class Observer:
def update(self, temp, humidity, pressure):
pass
# WeatherData now implements the subject interface.
class WeatherData(Subject):
def __init__(self):
self.observers = []
self.temperature = 0
self.humidity = 0
self.pressure = 0
def registerObserver(self, observer):
# When an observer registers, we just
# add it to the end of the list.
self.observers.append(observer)
def removeObserver(self, observer):
# When an observer wants to un-register,
# we just take it off the list.
self.observers.remove(observer)
def notifyObservers(self):
# We notify the observers when we get updated measurements
# from the Weather Station.
for ob in self.observers:
ob.update(self.temperature, self.humidity, self.pressure)
def measurementsChanged(self):
self.notifyObservers()
def setMeasurements(self, temperature, humidity, pressure):
self.temperature = temperature
self.humidity = humidity
self.pressure = pressure
self.measurementsChanged()
# other WeatherData methods here.
class CurrentConditionsDisplay(Observer):
def __init__(self, weatherData):
self.temperature = 0
self.humidity = 0
self.pressure = 0
self.weatherData = weatherData # save the ref in an attribute.
weatherData.registerObserver(self) # register the observer
# so it gets data updates.
def update(self, temperature, humidity, pressure):
self.temeprature = temperature
self.humidity = humidity
self.pressure = pressure
self.display()
def display(self):
print("Current conditions:", self.temperature,
"F degrees and", self.humidity,"[%] humidity",
"and pressure", self.pressure)
# TODO: implement StatisticsDisplay class and ForecastDisplay class.
# The StatisticsDisplay class should keep track of the min/average/max
# measurements and display them.
class StatisticsDisplay(Observer):
def __init__(self, weatherData):
self.temperatures = []
self.humidities = []
self.pressures = []
self.weatherData = weatherData
weatherData.registerObserver(self)
def update(self, temperature, humidity, pressure):
self.temperatures.append(temperature)
self.humidities.append(humidity)
self.pressures.append(pressure)
self.display()
def getStats(self, units):
"""Returns min, max, and avg of a list"""
if units == "[%]":
values = self.temperatures
measurement = "temp"
elif units == "F degrees":
values = self.humidities
measurement = "humidity"
else:
values = self.pressures
measurement = "pressure"
# calculate results
result = (
f"Min: {measurement}: {min(values)} {units}, "
+ f"Avg {measurement}: {sum(values) / len(values)} {units}, "
+ f"Max {measurement}: {max(values)} {units}"
)
return result
def display(self):
# displays temparatures
if self.temperatures:
print(self.getStats("F degrees"))
else:
print("No temperature stats")
# displays humidities
if self.humidities:
print(self.getStats("[%]"))
else:
print("No humidity stats")
# displays pressures
if self.pressures:
print(self.getStats(""), '\n')
else:
print("No pressure stats", '\n')
# ForecastDisplay class. Also, we register them to the concrete instance
# of the Subject class so the they retrieve the measurements' updates.
class ForecastDisplay(Observer):
def __init__(self, weatherData):
self.weatherData = weatherData
weatherData.registerObserver(self)
self.forecast_temp = 0
self.forecast_humidity = 0
self.forecast_pressure = 0
# The ForecastDisplay class shows the weather forecast based on the current
# temperature, humidity and pressure. Use the following formulas :
def update(self, temperature, humidity, pressure):
self.forecast_temp = temperature + 0.11 * humidity + 0.2 * pressure
self.forecast_humidity = humidity - 0.9 * humidity
self.forecast_pressure = pressure + 0.1 * temperature - 0.21 * pressure
self.display()
def display(self):
print("Forecast conditions:", self.forecast_temp, "F degrees and",
self.forecast_humidity, "[%] humidity",
"and pressure", self.forecast_pressure,)
class WeatherStation:
def main(self):
weatherData = WeatherData()
current_display = CurrentConditionsDisplay(weatherData)
# TODO: Create two objects from StatisticsDisplay class and
# ForecastDisplay class. Also, register them to the concrete instance
# of the Subject class so they get the measurements' updates.
stats_display = StatisticsDisplay(weatherData)
forecast_display = ForecastDisplay(weatherData)
weatherData.setMeasurements(80, 65,30.4)
weatherData.setMeasurements(82, 70,29.2)
weatherData.setMeasurements(78, 90,29.2)
# un-register the observer
weatherData.removeObserver(current_display)
weatherData.setMeasurements(120, 100,1000)
if __name__ == "__main__":
w = WeatherStation()
w.main()
| 32.973958 | 79 | 0.623282 |
dfa52052899c80b0f08aa247e02dd629e0153ad3 | 180 | py | Python | start_notebook_server.py | ftschindler-work/proceedings-mbour-2017-lrbms-control | 0fa8b63b223f3ce8bdfa0f010266a7719a574091 | [
"BSD-2-Clause"
] | null | null | null | start_notebook_server.py | ftschindler-work/proceedings-mbour-2017-lrbms-control | 0fa8b63b223f3ce8bdfa0f010266a7719a574091 | [
"BSD-2-Clause"
] | null | null | null | start_notebook_server.py | ftschindler-work/proceedings-mbour-2017-lrbms-control | 0fa8b63b223f3ce8bdfa0f010266a7719a574091 | [
"BSD-2-Clause"
] | 1 | 2020-09-23T12:51:00.000Z | 2020-09-23T12:51:00.000Z | #!/bin/bash
export NOTEBOOK_PATH=$PWD
export NOTEBOOK_PORT=${EXPOSED_PORT:-18881}
jupyter-notebook --ip 0.0.0.0 --no-browser --notebook-dir=$NOTEBOOK_PATH --port=$NOTEBOOK_PORT
| 22.5 | 94 | 0.755556 |
13bd82f006456ce483eba276ce72a8df6bcd6f25 | 1,054 | py | Python | solutions/day11/p1/main.py | tosmun/AdventOfCode | 62f4f3a8cc3761ee5d5eaf682ae9c2c985cd80b5 | [
"Apache-2.0"
] | 1 | 2017-07-15T19:01:03.000Z | 2017-07-15T19:01:03.000Z | solutions/day11/p1/main.py | tosmun/Python-AdventOfCode | 62f4f3a8cc3761ee5d5eaf682ae9c2c985cd80b5 | [
"Apache-2.0"
] | null | null | null | solutions/day11/p1/main.py | tosmun/Python-AdventOfCode | 62f4f3a8cc3761ee5d5eaf682ae9c2c985cd80b5 | [
"Apache-2.0"
] | null | null | null | import copy, re
encoding="UTF-8"
input=bytearray('hepxcrrq',encoding)
max_byte=bytearray('z', encoding)[0]
min_byte=bytearray('a', encoding)[0]
invalid_chars=bytearray('iol', encoding)
double_char_pattern=re.compile(r'(.|^)(?!\1)(.)\2(?!\2)')
new=copy.copy(input)
while True:
#Work backwards
for i in range(len(new)-1,-1,-1):
if new[i] == max_byte:
new[i] = min_byte
#Detect rollover
if i == 0:
#TODO more pythonic way?
for i in range(0, len(new)):
new[i] = min_byte
break
else:
new[i] = new[i] + 1
#Ensure valid char
while(new[i] in invalid_chars):
new[i] = new[i] + 1
break
#Check for two overlapping pairs
new_str = new.decode(encoding)
if len(double_char_pattern.findall(new_str)) < 2:
continue
buffer = [ new[0] ]
for i in range(1, len(new)):
if len(buffer) == 3:
break
elif buffer[-1] != new[i]-1:
buffer = [ new[i] ]
else:
buffer.append(new[i])
if len(buffer) == 3:
print(new.decode(encoding))
break
if new == input:
raise Exception("No suitable new password found")
| 23.422222 | 57 | 0.63852 |
a89b95f6ef51c58e817351f96c53f0273a56013a | 4,112 | py | Python | zfused_maya/zfused_maya/tool/rigging/transferuv.py | qinningfx/zfused_outsource | bfc5558f05e3d6005653794a47bd863b61b009b1 | [
"Apache-2.0"
] | 2 | 2019-02-22T03:33:26.000Z | 2019-02-23T03:29:26.000Z | zfused_maya/zfused_maya/tool/rigging/transferuv.py | qinningfx/zfused_outsource | bfc5558f05e3d6005653794a47bd863b61b009b1 | [
"Apache-2.0"
] | null | null | null | zfused_maya/zfused_maya/tool/rigging/transferuv.py | qinningfx/zfused_outsource | bfc5558f05e3d6005653794a47bd863b61b009b1 | [
"Apache-2.0"
] | null | null | null | # coding:utf-8
# --author-- 张伟
# cmds.polyTransfer('zero:p_zero_luoxuanjiang_1', v = 0, vc = 0, uv = 1, ao = 'zero_172:p_zero_luoxuanjiang_1')
from PySide2 import QtWidgets, QtCore
import maya.OpenMayaUI as oi
import shiboken2 as shiboken
import zfused_maya.widgets.window as win
import maya.cmds as cmds
class PolyTransfer():
def __init__(self):
self.showUI()
self.__uiwindow = oi.MQtUtil.findWindow(u"传递UV")
self.__uiwindow = shiboken.wrapInstance(long(self.__uiwindow), QtWidgets.QWidget)
# pass
def _returnwindow(self):
return self.__uiwindow
def showUI(self):
windowName = u'传递UV'
if cmds.window(windowName, q = True, exists = True):
cmds.deleteUI(windowName)
if cmds.windowPref(windowName, exists = True) == True:
cmds.windowPref(windowName,remove = True)
cmds.window(windowName,t = windowName,sizeable = True,w = 250)
PrimaryLayout = cmds.columnLayout(adj = True, bgc = [0.15,0.15,0.15])
cmds.separator(h = 5)
cmds.text(l = u'方 法 一', bgc = [0.2,0.2,0.2], height = 22)
cmds.text(l = u'选择模型,单击\'<<<\'选定', h = 20)
true_InfoGatherLayout = cmds.rowLayout(nc = 3, adjustableColumn = 2, p = PrimaryLayout)
cmds.text(l = u"UV正确模型 : ")
trueUV_Name = cmds.textField(w = 200)
trueUV_Assign = cmds.button(l = '<<<', c = lambda *args: self.true_AssignBtnCmd())
cmds.setParent(PrimaryLayout)
false_InfoGatherLayout = cmds.rowLayout(nc = 3, adjustableColumn = 2, p = PrimaryLayout)
cmds.text(l = u"UV错误模型 : ")
falseUV_Name = cmds.textField(w = 200)
falseUV_Assign = cmds.button(l = '<<<', c = lambda *agrs: self.false_AssignBtnCmd())
cmds.setParent(PrimaryLayout)
assignButton = cmds.button(l = u'传递', c = lambda *args: self.transferUV())
cmds.setParent(PrimaryLayout)
cmds.separator(h = 10, bgc = [0.2,0.2,0.2])
cmds.separator(h = 10, bgc = [0.2,0.2,0.2])
cmds.text(l = u'方 法 二', bgc = [0.2,0.2,0.2], height = 22)
cmds.text(l = u'先选正确UV模型,后选错误UV模型', h = 20)
cmds.button(l = u'传递', c = lambda *args: self.secTransferUV())
cmds.separator(h = 5)
self.trueUV_Name = trueUV_Name
self.falseUV_Name = falseUV_Name
def setTextField(self, textFieldToSet, Value):
cmds.textField(textFieldToSet, e = True, text = Value)
def getTrueModel(self):
selection = cmds.ls(sl = True)[0]
return selection
def true_AssignBtnCmd(self):
trueUV_Name = self.trueUV_Name
trueModelName = self.getTrueModel()
self.setTextField(trueUV_Name, trueModelName)
def getFalseModel(self):
selection = cmds.ls(sl = True)[0]
return selection
def false_AssignBtnCmd(self):
falseUV_Name = self.falseUV_Name
falseModelName = self.getFalseModel()
self.setTextField(falseUV_Name, falseModelName)
def transferUV(self):
trueUV_Name = self.trueUV_Name
falseUV_Name = self.falseUV_Name
trueName = cmds.textField(trueUV_Name, q = True, text = True)
falseName = cmds.textField(falseUV_Name, q = True, text = True)
cmds.polyTransfer(falseName, v = 0, vc = 0, uv = 1, ao = trueName)
def secTransferUV(self):
selects = cmds.ls(sl = True)
cmds.polyTransfer(selects[1], v = 0, vc = 0, uv = 1, ao = selects[0])
def UI(self):
# self.showUI()
# _uiwindow = oi.MQtUtil.findWindow(u"传递UV")
# _uiwindow = shiboken.wrapInstance(long(_uiwindow), QtWidgets.QWidget)
mainWindow = win.Window()
mainWindow.set_central_widget(self._returnwindow())
mainWindow.set_title_name(u"传递UV")
#mainWindow.setFixedSize(500,286)
mainWindow.resize(500,286)
mainWindow.show()
if __name__ == "__main__":
polytransfer = PolyTransfer()
polytransfer.UI() | 39.538462 | 111 | 0.604086 |
af1f95eda2e8845d622db272b4eee154b56d8c9d | 4,932 | py | Python | test/record/parser/test_response_ccwhois_verisign_grs_com_cc_status_registered.py | huyphan/pyyawhois | 77fb2f73a9c67989f1d41d98f37037406a69d136 | [
"MIT"
] | null | null | null | test/record/parser/test_response_ccwhois_verisign_grs_com_cc_status_registered.py | huyphan/pyyawhois | 77fb2f73a9c67989f1d41d98f37037406a69d136 | [
"MIT"
] | null | null | null | test/record/parser/test_response_ccwhois_verisign_grs_com_cc_status_registered.py | huyphan/pyyawhois | 77fb2f73a9c67989f1d41d98f37037406a69d136 | [
"MIT"
] | null | null | null |
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/ccwhois.verisign-grs.com/cc/status_registered
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestCcwhoisVerisignGrsComCcStatusRegistered(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/ccwhois.verisign-grs.com/cc/status_registered.txt"
host = "ccwhois.verisign-grs.com"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'registered')
def test_available(self):
eq_(self.record.available, False)
def test_domain(self):
eq_(self.record.domain, "google.cc")
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(len(self.record.nameservers), 4)
eq_(self.record.nameservers[0].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[0].name, "ns1.google.com")
eq_(self.record.nameservers[0].ipv4, None)
eq_(self.record.nameservers[0].ipv6, None)
eq_(self.record.nameservers[1].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[1].name, "ns2.google.com")
eq_(self.record.nameservers[1].ipv4, None)
eq_(self.record.nameservers[1].ipv6, None)
eq_(self.record.nameservers[2].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[2].name, "ns3.google.com")
eq_(self.record.nameservers[2].ipv4, None)
eq_(self.record.nameservers[2].ipv6, None)
eq_(self.record.nameservers[3].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[3].name, "ns4.google.com")
eq_(self.record.nameservers[3].ipv4, None)
eq_(self.record.nameservers[3].ipv6, None)
def test_registered(self):
eq_(self.record.registered, True)
def test_referral_whois(self):
eq_(self.record.referral_whois, "whois.markmonitor.com")
def test_created_on(self):
eq_(self.record.created_on.__class__.__name__, 'datetime')
eq_(self.record.created_on, time_parse('1999-06-07 00:00:00 UTC'))
def test_registrar(self):
eq_(self.record.registrar.__class__.__name__, 'Registrar')
eq_(self.record.registrar.id, "292")
eq_(self.record.registrar.name, "MARKMONITOR INC.")
eq_(self.record.registrar.organization, None)
eq_(self.record.registrar.url, "http://www.markmonitor.com")
def test_referral_url(self):
eq_(self.record.referral_url, "http://www.markmonitor.com")
def test_updated_on(self):
eq_(self.record.updated_on.__class__.__name__, 'datetime')
eq_(self.record.updated_on, time_parse('2013-05-06 05:17:44 UTC'))
def test_domain_id(self):
eq_(self.record.domain_id, "86420657")
def test_expires_on(self):
eq_(self.record.expires_on.__class__.__name__, 'datetime')
eq_(self.record.expires_on, time_parse('2014-06-07 00:00:00 UTC'))
def test_disclaimer(self):
eq_(self.record.disclaimer, "TERMS OF USE: You are not authorized to access or query our Whois database through the use of electronic processes that are high-volume and automated except as reasonably necessary to register domain names or modify existing registrations; the Data in VeriSign's (\"VeriSign\") Whois database is provided by VeriSign for information purposes only, and to assist persons in obtaining information about or related to a domain name registration record. VeriSign does not guarantee its accuracy. By submitting a Whois query, you agree to abide by the following terms of use: You agree that you may use this Data only for lawful purposes and that under no circumstances will you use this Data to: (1) allow, enable, or otherwise support the transmission of mass unsolicited, commercial advertising or solicitations via e-mail, telephone, or facsimile; or (2) enable high volume, automated, electronic processes that apply to VeriSign (or its computer systems). The compilation, repackaging, dissemination or other use of this Data is expressly prohibited without the prior written consent of VeriSign. You agree not to use electronic processes that are automated and high-volume to access or query the Whois database except as reasonably necessary to register domain names or modify existing registrations. VeriSign reserves the right to restrict your access to the Whois database in its sole discretion to ensure operational stability. VeriSign may restrict or terminate your access to the Whois database for failure to abide by these terms of use. VeriSign reserves the right to modify these terms at any time.")
| 57.348837 | 1,647 | 0.727494 |
d762a376f57716369303f18337fb37d8543ff2f8 | 2,615 | py | Python | arithmetic_analysis/in_static_equilibrium.py | Study-Repos-Forks/Python | c86aa72cfa0467bd9a5711d7b5a77ed8243e49f1 | [
"MIT"
] | 1 | 2022-03-18T12:11:26.000Z | 2022-03-18T12:11:26.000Z | arithmetic_analysis/in_static_equilibrium.py | abdussalam02/Python | a80e5aadf30817251989378e8d908ca18f733a2f | [
"MIT"
] | null | null | null | arithmetic_analysis/in_static_equilibrium.py | abdussalam02/Python | a80e5aadf30817251989378e8d908ca18f733a2f | [
"MIT"
] | null | null | null | """
Checks if a system of forces is in static equilibrium.
"""
from __future__ import annotations
from numpy import array, cos, cross, float64, radians, sin
from numpy.typing import NDArray
def polar_force(
magnitude: float, angle: float, radian_mode: bool = False
) -> list[float]:
"""
Resolves force along rectangular components.
(force, angle) => (force_x, force_y)
>>> import math
>>> force = polar_force(10, 45)
>>> math.isclose(force[0], 7.071067811865477)
True
>>> math.isclose(force[1], 7.0710678118654755)
True
>>> polar_force(10, 3.14, radian_mode=True)
[-9.999987317275396, 0.01592652916486828]
"""
if radian_mode:
return [magnitude * cos(angle), magnitude * sin(angle)]
return [magnitude * cos(radians(angle)), magnitude * sin(radians(angle))]
def in_static_equilibrium(
forces: NDArray[float64], location: NDArray[float64], eps: float = 10**-1
) -> bool:
"""
Check if a system is in equilibrium.
It takes two numpy.array objects.
forces ==> [
[force1_x, force1_y],
[force2_x, force2_y],
....]
location ==> [
[x1, y1],
[x2, y2],
....]
>>> force = array([[1, 1], [-1, 2]])
>>> location = array([[1, 0], [10, 0]])
>>> in_static_equilibrium(force, location)
False
"""
# summation of moments is zero
moments: NDArray[float64] = cross(location, forces)
sum_moments: float = sum(moments)
return abs(sum_moments) < eps
if __name__ == "__main__":
# Test to check if it works
forces = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
location: NDArray[float64] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
forces = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
location = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
forces = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
location = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 28.423913 | 78 | 0.545315 |
03c0e45d005d4bdc28d9e1488c2e81136cd0e8d7 | 4,719 | py | Python | modules/tools/common/message_manager.py | zarmars/apollo | 2c71e68118fdfc8ea4327e6a0fdc93b428882a8b | [
"Apache-2.0"
] | 1 | 2021-12-04T08:02:09.000Z | 2021-12-04T08:02:09.000Z | modules/tools/common/message_manager.py | Mrrabbitan/apollo | ff6bb065eb343689603a0827828728ed4fa1a699 | [
"Apache-2.0"
] | null | null | null | modules/tools/common/message_manager.py | Mrrabbitan/apollo | ff6bb065eb343689603a0827828728ed4fa1a699 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from modules.localization.proto import localization_pb2
from modules.perception.proto import perception_obstacle_pb2
from modules.perception.proto import traffic_light_detection_pb2
from modules.planning.proto import planning_internal_pb2
from modules.planning.proto import planning_pb2
from modules.prediction.proto import prediction_obstacle_pb2
from modules.routing.proto import routing_pb2
from modules.control.proto import control_cmd_pb2
from modules.canbus.proto import chassis_pb2
from modules.common.proto import drive_event_pb2
from modules.map.relative_map.proto import navigation_pb2
from modules.guardian.proto import guardian_pb2
from modules.tools.common import proto_utils
class MessageType:
def __init__(self, name, topic, msg_type):
self.name = name
self.topic = topic
self.msg_type = msg_type
def instance(self):
return self.__msg_type()
def parse_file(self, filename):
value = self.instance()
if not proto_utils.get_pb_from_file(filename, value):
print("Failed to parse file %s" % filename)
return None
else:
return value
topic_pb_list = [
MessageType("planning", "/apollo/planning", planning_pb2.ADCTrajectory),
MessageType("control", "/apollo/control", control_cmd_pb2.ControlCommand),
MessageType("chassis", "/apollo/canbus/chassis", chassis_pb2.Chassis),
MessageType("prediction", "/apollo/prediction",
prediction_obstacle_pb2.PredictionObstacles),
MessageType("perception", "/apollo/perception/obstacles",
perception_obstacle_pb2.PerceptionObstacles),
MessageType("routing_response", "/apollo/routing_response",
routing_pb2.RoutingResponse),
MessageType("routing_request", "/apollo/routing_request",
routing_pb2.RoutingRequest),
MessageType("localization", "/apollo/localization/pose",
localization_pb2.LocalizationEstimate),
MessageType("traffic_light", "/apollo/perception/traffic_light",
traffic_light_detection_pb2.TrafficLightDetection),
MessageType("drive_event", "/apollo/drive_event",
drive_event_pb2.DriveEvent),
MessageType("relative_map", "/apollo/relative_map", navigation_pb2.MapMsg),
MessageType("navigation", "/apollo/navigation",
navigation_pb2.NavigationInfo),
MessageType("guardian", "/apollo/guardian", guardian_pb2.GuardianCommand),
]
class PbMessageManager:
def __init__(self):
self.__topic_dict = {}
self.__name_dict = {}
for msg in topic_pb_list:
self.__topic_dict[msg.topic] = msg
self.__name_dict[msg.name] = msg
def topic_dict(self):
return self.__topic_dict
def get_msg_meta_by_topic(self, topic):
if topic in self.__topic_dict:
return self.__topic_dict[topic]
else:
return None
def get_msg_meta_by_name(self, name):
if name in self.__name_dict:
return self.__name_dict[name]
else:
return None
def name_dict(self):
return self.__name_dict
def parse_topic_file(self, topic, filename):
if topic not in self.__topic_dict:
print("topic %s is not registered in topic_pb_list" % topic)
return None
meta_msg = self.__topic_dict[topic]
return meta_msg.parse_file(filename)
def parse_file(self, filename):
"""parse a file by guessing topic type"""
for topic, meta_msg in self.__topic_dict.items():
try:
message = meta_msg.parse_file(filename)
if message:
print("identified topic %s" % topic)
return (meta_msg, message)
except text_format.ParseError as e:
print("Tried %s, failed" % (topic))
continue
return (None, None)
| 38.365854 | 79 | 0.665395 |
73c11134fd43b5ccb5c236067011b23c61ff39d3 | 13,997 | py | Python | niftynet/application/regression_application.py | josemariamoreira/NiftyNet | d3ed1404fed9b8b69a3b60ae5b398045ae121d34 | [
"Apache-2.0"
] | 1 | 2018-05-13T14:54:47.000Z | 2018-05-13T14:54:47.000Z | niftynet/application/regression_application.py | josemariamoreira/NiftyNet | d3ed1404fed9b8b69a3b60ae5b398045ae121d34 | [
"Apache-2.0"
] | null | null | null | niftynet/application/regression_application.py | josemariamoreira/NiftyNet | d3ed1404fed9b8b69a3b60ae5b398045ae121d34 | [
"Apache-2.0"
] | 2 | 2018-05-13T14:54:48.000Z | 2018-05-26T16:08:09.000Z | import tensorflow as tf
import os
from niftynet.application.base_application import BaseApplication
from niftynet.engine.application_factory import ApplicationNetFactory
from niftynet.engine.application_factory import OptimiserFactory
from niftynet.engine.application_variables import CONSOLE
from niftynet.engine.application_variables import NETWORK_OUTPUT
from niftynet.engine.application_variables import TF_SUMMARIES
from niftynet.engine.sampler_grid import GridSampler
from niftynet.engine.sampler_resize import ResizeSampler
from niftynet.engine.sampler_uniform import UniformSampler
from niftynet.engine.sampler_weighted import WeightedSampler
from niftynet.engine.sampler_balanced import BalancedSampler
from niftynet.engine.windows_aggregator_grid import GridSamplesAggregator
from niftynet.engine.windows_aggregator_resize import ResizeSamplesAggregator
from niftynet.io.image_reader import ImageReader
from niftynet.layer.crop import CropLayer
from niftynet.layer.histogram_normalisation import \
HistogramNormalisationLayer
from niftynet.layer.loss_regression import LossFunction
from niftynet.layer.mean_variance_normalisation import \
MeanVarNormalisationLayer
from niftynet.layer.pad import PadLayer
from niftynet.layer.post_processing import PostProcessingLayer
from niftynet.layer.rand_flip import RandomFlipLayer
from niftynet.layer.rand_rotation import RandomRotationLayer
from niftynet.layer.rand_spatial_scaling import RandomSpatialScalingLayer
from niftynet.evaluation.regression_evaluator import RegressionEvaluator
SUPPORTED_INPUT = set(['image', 'output', 'weight', 'sampler', 'inferred'])
class RegressionApplication(BaseApplication):
REQUIRED_CONFIG_SECTION = "REGRESSION"
def __init__(self, net_param, action_param, action):
BaseApplication.__init__(self)
tf.logging.info('starting regression application')
self.action = action
self.net_param = net_param
self.action_param = action_param
self.regression_param = None
self.data_param = None
self.SUPPORTED_SAMPLING = {
'uniform': (self.initialise_uniform_sampler,
self.initialise_grid_sampler,
self.initialise_grid_aggregator),
'weighted': (self.initialise_weighted_sampler,
self.initialise_grid_sampler,
self.initialise_grid_aggregator),
'resize': (self.initialise_resize_sampler,
self.initialise_resize_sampler,
self.initialise_resize_aggregator),
'balanced': (self.initialise_balanced_sampler,
self.initialise_grid_sampler,
self.initialise_grid_aggregator),
}
def initialise_dataset_loader(
self, data_param=None, task_param=None, data_partitioner=None):
self.data_param = data_param
self.regression_param = task_param
file_lists = self.get_file_lists(data_partitioner)
# read each line of csv files into an instance of Subject
if self.is_training:
self.readers = []
for file_list in file_lists:
reader = ImageReader({'image', 'output', 'weight', 'sampler'})
reader.initialise(data_param, task_param, file_list)
self.readers.append(reader)
elif self.is_inference:
inference_reader = ImageReader(['image'])
file_list = data_partitioner.inference_files
inference_reader.initialise(data_param, task_param, file_lists[0])
self.readers = [inference_reader]
elif self.is_evaluation:
file_list = data_partitioner.inference_files
reader = ImageReader({'image', 'output', 'inferred'})
reader.initialise(data_param, task_param, file_lists[0])
self.readers = [reader]
else:
raise ValueError('Action `{}` not supported. Expected one of {}'
.format(self.action, self.SUPPORTED_ACTIONS))
mean_var_normaliser = MeanVarNormalisationLayer(
image_name='image')
histogram_normaliser = None
if self.net_param.histogram_ref_file:
histogram_normaliser = HistogramNormalisationLayer(
image_name='image',
modalities=vars(task_param).get('image'),
model_filename=self.net_param.histogram_ref_file,
norm_type=self.net_param.norm_type,
cutoff=self.net_param.cutoff,
name='hist_norm_layer')
normalisation_layers = []
if self.net_param.normalisation:
normalisation_layers.append(histogram_normaliser)
if self.net_param.whitening:
normalisation_layers.append(mean_var_normaliser)
augmentation_layers = []
if self.is_training:
if self.action_param.random_flipping_axes != -1:
augmentation_layers.append(RandomFlipLayer(
flip_axes=self.action_param.random_flipping_axes))
if self.action_param.scaling_percentage:
augmentation_layers.append(RandomSpatialScalingLayer(
min_percentage=self.action_param.scaling_percentage[0],
max_percentage=self.action_param.scaling_percentage[1]))
if self.action_param.rotation_angle:
augmentation_layers.append(RandomRotationLayer())
augmentation_layers[-1].init_uniform_angle(
self.action_param.rotation_angle)
volume_padding_layer = []
if self.net_param.volume_padding_size:
volume_padding_layer.append(PadLayer(
image_name=SUPPORTED_INPUT,
border=self.net_param.volume_padding_size))
for reader in self.readers:
reader.add_preprocessing_layers(volume_padding_layer +
normalisation_layers +
augmentation_layers)
def initialise_uniform_sampler(self):
self.sampler = [[UniformSampler(
reader=reader,
data_param=self.data_param,
batch_size=self.net_param.batch_size,
windows_per_image=self.action_param.sample_per_volume,
queue_length=self.net_param.queue_length) for reader in
self.readers]]
def initialise_weighted_sampler(self):
self.sampler = [[WeightedSampler(
reader=reader,
data_param=self.data_param,
batch_size=self.net_param.batch_size,
windows_per_image=self.action_param.sample_per_volume,
queue_length=self.net_param.queue_length) for reader in
self.readers]]
def initialise_resize_sampler(self):
self.sampler = [[ResizeSampler(
reader=reader,
data_param=self.data_param,
batch_size=self.net_param.batch_size,
shuffle_buffer=self.is_training,
queue_length=self.net_param.queue_length) for reader in
self.readers]]
def initialise_grid_sampler(self):
self.sampler = [[GridSampler(
reader=reader,
data_param=self.data_param,
batch_size=self.net_param.batch_size,
spatial_window_size=self.action_param.spatial_window_size,
window_border=self.action_param.border,
queue_length=self.net_param.queue_length) for reader in
self.readers]]
def initialise_balanced_sampler(self):
self.sampler = [[BalancedSampler(
reader=reader,
data_param=self.data_param,
batch_size=self.net_param.batch_size,
windows_per_image=self.action_param.sample_per_volume,
queue_length=self.net_param.queue_length) for reader in
self.readers]]
def initialise_grid_aggregator(self):
self.output_decoder = GridSamplesAggregator(
image_reader=self.readers[0],
output_path=self.action_param.save_seg_dir,
window_border=self.action_param.border,
interp_order=self.action_param.output_interp_order)
def initialise_resize_aggregator(self):
self.output_decoder = ResizeSamplesAggregator(
image_reader=self.readers[0],
output_path=self.action_param.save_seg_dir,
window_border=self.action_param.border,
interp_order=self.action_param.output_interp_order)
def initialise_sampler(self):
if self.is_training:
self.SUPPORTED_SAMPLING[self.net_param.window_sampling][0]()
elif self.is_inference:
self.SUPPORTED_SAMPLING[self.net_param.window_sampling][1]()
def initialise_aggregator(self):
self.SUPPORTED_SAMPLING[self.net_param.window_sampling][2]()
def initialise_network(self):
w_regularizer = None
b_regularizer = None
reg_type = self.net_param.reg_type.lower()
decay = self.net_param.decay
if reg_type == 'l2' and decay > 0:
from tensorflow.contrib.layers.python.layers import regularizers
w_regularizer = regularizers.l2_regularizer(decay)
b_regularizer = regularizers.l2_regularizer(decay)
elif reg_type == 'l1' and decay > 0:
from tensorflow.contrib.layers.python.layers import regularizers
w_regularizer = regularizers.l1_regularizer(decay)
b_regularizer = regularizers.l1_regularizer(decay)
self.net = ApplicationNetFactory.create(self.net_param.name)(
num_classes=1,
w_regularizer=w_regularizer,
b_regularizer=b_regularizer,
acti_func=self.net_param.activation_function)
def connect_data_and_network(self,
outputs_collector=None,
gradients_collector=None):
def switch_sampler(for_training):
with tf.name_scope('train' if for_training else 'validation'):
sampler = self.get_sampler()[0][0 if for_training else -1]
return sampler.pop_batch_op()
if self.is_training:
if self.action_param.validation_every_n > 0:
data_dict = tf.cond(tf.logical_not(self.is_validation),
lambda: switch_sampler(True),
lambda: switch_sampler(False))
else:
data_dict = switch_sampler(for_training=True)
image = tf.cast(data_dict['image'], tf.float32)
net_out = self.net(image, is_training=self.is_training)
with tf.name_scope('Optimiser'):
optimiser_class = OptimiserFactory.create(
name=self.action_param.optimiser)
self.optimiser = optimiser_class.get_instance(
learning_rate=self.action_param.lr)
loss_func = LossFunction(
loss_type=self.action_param.loss_type)
crop_layer = CropLayer(
border=self.regression_param.loss_border, name='crop-88')
prediction = crop_layer(net_out)
ground_truth = crop_layer(data_dict.get('output', None))
weight_map = None if data_dict.get('weight', None) is None \
else crop_layer(data_dict.get('weight', None))
data_loss = loss_func(prediction=prediction,
ground_truth=ground_truth,
weight_map=weight_map)
reg_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
if self.net_param.decay > 0.0 and reg_losses:
reg_loss = tf.reduce_mean(
[tf.reduce_mean(reg_loss) for reg_loss in reg_losses])
loss = data_loss + reg_loss
else:
loss = data_loss
grads = self.optimiser.compute_gradients(loss)
# collecting gradients variables
gradients_collector.add_to_collection([grads])
# collecting output variables
outputs_collector.add_to_collection(
var=data_loss, name='Loss',
average_over_devices=False, collection=CONSOLE)
outputs_collector.add_to_collection(
var=data_loss, name='Loss',
average_over_devices=True, summary_type='scalar',
collection=TF_SUMMARIES)
elif self.is_inference:
data_dict = switch_sampler(for_training=False)
image = tf.cast(data_dict['image'], tf.float32)
net_out = self.net(image, is_training=self.is_training)
crop_layer = CropLayer(border=0, name='crop-88')
post_process_layer = PostProcessingLayer('IDENTITY')
net_out = post_process_layer(crop_layer(net_out))
outputs_collector.add_to_collection(
var=net_out, name='window',
average_over_devices=False, collection=NETWORK_OUTPUT)
outputs_collector.add_to_collection(
var=data_dict['image_location'], name='location',
average_over_devices=False, collection=NETWORK_OUTPUT)
self.initialise_aggregator()
def interpret_output(self, batch_output):
if self.is_inference:
return self.output_decoder.decode_batch(
batch_output['window'], batch_output['location'])
else:
return True
def initialise_evaluator(self, eval_param):
self.eval_param = eval_param
self.evaluator = RegressionEvaluator(self.readers[0],
self.regression_param,
eval_param)
def add_inferred_output(self, data_param, task_param):
return self.add_inferred_output_like(data_param, task_param, 'output')
| 45.444805 | 78 | 0.650639 |
13acb83397ddc86f7d6751b0180565940b1d779a | 713 | py | Python | cplex_gurobi_projects/00_Initial/first_example.py | hpaucar/autonomous-system-repo | b86b62c23fe9a05694fcb5a106457454ff9976fb | [
"MIT"
] | null | null | null | cplex_gurobi_projects/00_Initial/first_example.py | hpaucar/autonomous-system-repo | b86b62c23fe9a05694fcb5a106457454ff9976fb | [
"MIT"
] | null | null | null | cplex_gurobi_projects/00_Initial/first_example.py | hpaucar/autonomous-system-repo | b86b62c23fe9a05694fcb5a106457454ff9976fb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
First Example from CPLEX
"""
import pyomo.environ as pyo
from pyomo.environ import *
from pyomo.opt import SolverFactory
model = pyo.ConcreteModel()
model.c = pyo.Var(bounds=(0, None), domain=Integers)
model.p = pyo.Var(bounds=(0, None), domain=Integers)
c = model.c
p = model.p
model.obj = pyo.Objective(expr = 3000*c + 5000*p, sense=maximize)
model.C1 = pyo.Constraint(expr = 2*c + 3*p <= 30)
model.C2 = pyo.Constraint(expr = 4*c + 8*p <= 70)
opt = SolverFactory('gurobi')
opt.solve(model)
model.pprint()
print('------------------------------------------------------')
print('N casas: ', pyo.value(c))
print('N predios: ', pyo.value(p))
print('Lucro: ', pyo.value(model.obj)) | 23.766667 | 65 | 0.619916 |
5fb99e5517c1f752028d731a23e50fc1001ed510 | 9,634 | py | Python | lib/core/function.py | kuldeepbrd1/deep-high-resolution-net.pytorch | aece4d855edb4f43c968218a294a72e23304b4b1 | [
"MIT"
] | 1 | 2021-12-17T08:37:38.000Z | 2021-12-17T08:37:38.000Z | lib/core/function.py | kuldeepbrd1/HRNet-spacecraft-pose | 13992450423449b9abf3b02741e699d5e2ae2875 | [
"MIT"
] | null | null | null | lib/core/function.py | kuldeepbrd1/HRNet-spacecraft-pose | 13992450423449b9abf3b02741e699d5e2ae2875 | [
"MIT"
] | null | null | null | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import logging
import os
import numpy as np
import torch
from core.evaluate import accuracy
from core.inference import get_final_preds
from utils.transforms import flip_back
from utils.vis import save_debug_images
# ---- Added all heatmap save util function ---- 28/02
from sat_pose_utils.utils import save_all_val_heatmaps
logger = logging.getLogger(__name__)
def train(config, train_loader, model, criterion, optimizer, epoch,
output_dir, tb_log_dir, writer_dict):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
acc = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target, target_weight, meta) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# compute output
outputs = model(input)
target = target.cuda(non_blocking=True)
target_weight = target_weight.cuda(non_blocking=True)
if isinstance(outputs, list):
loss = criterion(outputs[0], target, target_weight)
for output in outputs[1:]:
loss += criterion(output, target, target_weight)
else:
output = outputs
loss = criterion(output, target, target_weight)
# loss = criterion(output, target, target_weight)
# compute gradient and do update step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure accuracy and record loss
losses.update(loss.item(), input.size(0))
_, avg_acc, cnt, pred = accuracy(output.detach().cpu().numpy(),
target.detach().cpu().numpy())
acc.update(avg_acc, cnt)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % config.PRINT_FREQ == 0:
msg = 'Epoch: [{0}][{1}/{2}]\t' \
'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \
'Speed {speed:.1f} samples/s\t' \
'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\t' \
'Loss {loss.val:.5f} ({loss.avg:.5f})\t' \
'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
speed=input.size(0)/batch_time.val,
data_time=data_time, loss=losses, acc=acc)
logger.info(msg)
writer = writer_dict['writer']
global_steps = writer_dict['train_global_steps']
writer.add_scalar('train_loss', losses.val, global_steps)
writer.add_scalar('train_acc', acc.val, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
prefix = '{}_{}'.format(os.path.join(output_dir, 'train'), i)
save_debug_images(config, input, meta, target, pred*4, output,
prefix)
def validate(config, val_loader, val_dataset, model, criterion, output_dir,
tb_log_dir, writer_dict=None):
batch_time = AverageMeter()
losses = AverageMeter()
acc = AverageMeter()
# switch to evaluate mode
model.eval()
num_samples = len(val_dataset)
all_preds = np.zeros(
(num_samples, config.MODEL.NUM_JOINTS, 3),
dtype=np.float32
)
all_boxes = np.zeros((num_samples, 6))
image_path = []
filenames = []
imgnums = []
idx = 0
with torch.no_grad():
end = time.time()
for i, (input, target, target_weight, meta) in enumerate(val_loader):
# compute output
outputs = model(input)
if isinstance(outputs, list):
output = outputs[-1]
else:
output = outputs
if config.TEST.FLIP_TEST:
input_flipped = input.flip(3)
outputs_flipped = model(input_flipped)
if isinstance(outputs_flipped, list):
output_flipped = outputs_flipped[-1]
else:
output_flipped = outputs_flipped
output_flipped = flip_back(output_flipped.cpu().numpy(),
val_dataset.flip_pairs)
output_flipped = torch.from_numpy(output_flipped.copy()).cuda()
# feature is not aligned, shift flipped heatmap for higher accuracy
if config.TEST.SHIFT_HEATMAP:
output_flipped[:, :, :, 1:] = \
output_flipped.clone()[:, :, :, 0:-1]
output = (output + output_flipped) * 0.5
target = target.cuda(non_blocking=True)
target_weight = target_weight.cuda(non_blocking=True)
loss = criterion(output, target, target_weight)
num_images = input.size(0)
# measure accuracy and record loss
losses.update(loss.item(), num_images)
_, avg_acc, cnt, pred = accuracy(output.cpu().numpy(),
target.cpu().numpy())
acc.update(avg_acc, cnt)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
c = meta['center'].numpy()
s = meta['scale'].numpy()
score = meta['score'].numpy()
#print(f"idx: {i}")
preds, maxvals = get_final_preds(
config, output.clone().cpu().numpy(), c, s)
all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2]
all_preds[idx:idx + num_images, :, 2:3] = maxvals
# double check this all_boxes parts
all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2]
all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2]
all_boxes[idx:idx + num_images, 4] = np.prod(s*200, 1)
all_boxes[idx:idx + num_images, 5] = score
image_path.extend(meta['image'])
idx += num_images
# ----------------Save all heatmaps from test/val : 28/02 -----
save_all_val_heatmaps(config, input, meta, target, pred*4, output,output_dir)
if i % config.PRINT_FREQ == 0:
msg = 'Test: [{0}/{1}]\t' \
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
'Loss {loss.val:.4f} ({loss.avg:.4f})\t' \
'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time,
loss=losses, acc=acc)
logger.info(msg)
prefix = '{}_{}'.format(
os.path.join(output_dir, config.DATASET.TEST_SET), i
)
save_debug_images(config, input, meta, target, pred*4, output,
prefix)
name_values, perf_indicator = val_dataset.evaluate(
config, all_preds, output_dir, all_boxes, image_path,
filenames, imgnums
)
model_name = config.MODEL.NAME
if isinstance(name_values, list):
for name_value in name_values:
_print_name_value(name_value, model_name)
else:
_print_name_value(name_values, model_name)
if writer_dict:
writer = writer_dict['writer']
global_steps = writer_dict['valid_global_steps']
writer.add_scalar(
'valid_loss',
losses.avg,
global_steps
)
writer.add_scalar(
'valid_acc',
acc.avg,
global_steps
)
if isinstance(name_values, list):
for name_value in name_values:
writer.add_scalars(
'valid',
dict(name_value),
global_steps
)
else:
writer.add_scalars(
'valid',
dict(name_values),
global_steps
)
writer_dict['valid_global_steps'] = global_steps + 1
return perf_indicator
# markdown format output
def _print_name_value(name_value, full_arch_name):
names = name_value.keys()
values = name_value.values()
num_values = len(name_value)
logger.info(
'| Arch ' +
' '.join(['| {}'.format(name) for name in names]) +
' |'
)
logger.info('|---' * (num_values+1) + '|')
if len(full_arch_name) > 15:
full_arch_name = full_arch_name[:8] + '...'
logger.info(
'| ' + full_arch_name + ' ' +
' '.join(['| {:.3f}'.format(value) for value in values]) +
' |'
)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count if self.count != 0 else 0
| 34.284698 | 89 | 0.531659 |
86cd1aac404ea65ff2fa02d832bb61f296deba69 | 4,509 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/community/general/plugins/modules/rax_cdb_database.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 22 | 2021-07-16T08:11:22.000Z | 2022-03-31T07:15:34.000Z | venv/lib/python3.6/site-packages/ansible_collections/community/general/plugins/modules/rax_cdb_database.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/community/general/plugins/modules/rax_cdb_database.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 39 | 2021-07-05T02:31:42.000Z | 2022-03-31T02:46:03.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: rax_cdb_database
short_description: 'create / delete a database in the Cloud Databases'
description:
- create / delete a database in the Cloud Databases.
options:
cdb_id:
type: str
description:
- The databases server UUID
required: yes
name:
type: str
description:
- Name to give to the database
required: yes
character_set:
type: str
description:
- Set of symbols and encodings
default: 'utf8'
collate:
type: str
description:
- Set of rules for comparing characters in a character set
default: 'utf8_general_ci'
state:
type: str
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
author: "Simon JAILLET (@jails)"
extends_documentation_fragment:
- community.general.rackspace
- community.general.rackspace.openstack
'''
EXAMPLES = '''
- name: Build a database in Cloud Databases
tasks:
- name: Database build request
local_action:
module: rax_cdb_database
credentials: ~/.raxpub
region: IAD
cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66
name: db1
state: present
register: rax_db_database
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module
def find_database(instance, name):
try:
database = instance.get_database(name)
except Exception:
return False
return database
def save_database(module, cdb_id, name, character_set, collate):
cdb = pyrax.cloud_databases
try:
instance = cdb.get(cdb_id)
except Exception as e:
module.fail_json(msg='%s' % e.message)
changed = False
database = find_database(instance, name)
if not database:
try:
database = instance.create_database(name=name,
character_set=character_set,
collate=collate)
except Exception as e:
module.fail_json(msg='%s' % e.message)
else:
changed = True
module.exit_json(changed=changed, action='create',
database=rax_to_dict(database))
def delete_database(module, cdb_id, name):
cdb = pyrax.cloud_databases
try:
instance = cdb.get(cdb_id)
except Exception as e:
module.fail_json(msg='%s' % e.message)
changed = False
database = find_database(instance, name)
if database:
try:
database.delete()
except Exception as e:
module.fail_json(msg='%s' % e.message)
else:
changed = True
module.exit_json(changed=changed, action='delete',
database=rax_to_dict(database))
def rax_cdb_database(module, state, cdb_id, name, character_set, collate):
# act on the state
if state == 'present':
save_database(module, cdb_id, name, character_set, collate)
elif state == 'absent':
delete_database(module, cdb_id, name)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
cdb_id=dict(type='str', required=True),
name=dict(type='str', required=True),
character_set=dict(type='str', default='utf8'),
collate=dict(type='str', default='utf8_general_ci'),
state=dict(default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
cdb_id = module.params.get('cdb_id')
name = module.params.get('name')
character_set = module.params.get('character_set')
collate = module.params.get('collate')
state = module.params.get('state')
setup_rax_module(module, pyrax)
rax_cdb_database(module, state, cdb_id, name, character_set, collate)
if __name__ == '__main__':
main()
| 26.063584 | 146 | 0.645154 |
a82f4fe5cd0e915c94eb16e20bf17f63bc9186f1 | 420 | py | Python | kernal_rom.py | folkertvanheusden/pyc64 | 90d7fc117427fc3f7b7a65a5a26ed0d1d2ad8941 | [
"Apache-2.0"
] | null | null | null | kernal_rom.py | folkertvanheusden/pyc64 | 90d7fc117427fc3f7b7a65a5a26ed0d1d2ad8941 | [
"Apache-2.0"
] | null | null | null | kernal_rom.py | folkertvanheusden/pyc64 | 90d7fc117427fc3f7b7a65a5a26ed0d1d2ad8941 | [
"Apache-2.0"
] | null | null | null | # (C) 2020 by Folkert van Heusden <mail@vanheusden.com>
# License: Apache License v2.0
from bus_device import bus_device
class kernal_rom(bus_device):
def __init__(self):
fh = open('kernal.901227-01.bin', 'rb')
self.rom: List[int] = [ int(b) for b in fh.read() ]
fh.close()
def write_through(self):
return True
def read(self, addr):
return self.rom[addr - 0xe000]
| 24.705882 | 59 | 0.628571 |
0b1440e94e2481ce39569cfabc7511ea13f6f245 | 68,112 | py | Python | taxcalc/utils.py | ClarePan/Tax-Calculator | d2d6cb4b551f34017db7166d91d982b5c4670816 | [
"CC0-1.0"
] | 1 | 2021-02-23T21:03:43.000Z | 2021-02-23T21:03:43.000Z | taxcalc/utils.py | ClarePan/Tax-Calculator | d2d6cb4b551f34017db7166d91d982b5c4670816 | [
"CC0-1.0"
] | null | null | null | taxcalc/utils.py | ClarePan/Tax-Calculator | d2d6cb4b551f34017db7166d91d982b5c4670816 | [
"CC0-1.0"
] | null | null | null | """
PUBLIC low-level utility functions for Tax-Calculator.
"""
# CODING-STYLE CHECKS:
# pycodestyle utils.py
# pylint --disable=locally-disabled utils.py
#
# pylint: disable=too-many-lines
import os
import math
import json
import collections
import pkg_resources
import six
import numpy as np
import pandas as pd
import bokeh.io as bio
import bokeh.plotting as bp
from bokeh.models import PrintfTickFormatter
from taxcalc.utilsprvt import (weighted_count_lt_zero,
weighted_count_gt_zero,
weighted_count, weighted_mean,
wage_weighted, agi_weighted,
expanded_income_weighted)
# Items in the DIST_TABLE_COLUMNS list below correspond to the items in the
# DIST_TABLE_LABELS list below; this correspondence allows us to use this
# labels list to map a label to the correct column in a distribution table.
DIST_VARIABLES = ['expanded_income', 'c00100', 'aftertax_income', 'standard',
'c04470', 'c04600', 'c04800', 'taxbc', 'c62100', 'c09600',
'c05800', 'othertaxes', 'refund', 'c07100',
'iitax', 'payrolltax', 'combined', 's006', 'ubi',
'benefit_cost_total', 'benefit_value_total']
DIST_TABLE_COLUMNS = ['s006',
'c00100',
'num_returns_StandardDed',
'standard',
'num_returns_ItemDed',
'c04470',
'c04600',
'c04800',
'taxbc',
'c62100',
'num_returns_AMT',
'c09600',
'c05800',
'c07100',
'othertaxes',
'refund',
'iitax',
'payrolltax',
'combined',
'ubi',
'benefit_cost_total',
'benefit_value_total',
'expanded_income',
'aftertax_income']
DIST_TABLE_LABELS = ['Returns',
'AGI',
'Standard Deduction Filers',
'Standard Deduction',
'Itemizers',
'Itemized Deduction',
'Personal Exemption',
'Taxable Income',
'Regular Tax',
'AMTI',
'AMT Filers',
'AMT',
'Tax before Credits',
'Non-refundable Credits',
'Other Taxes',
'Refundable Credits',
'Individual Income Tax Liabilities',
'Payroll Tax Liablities',
'Combined Payroll and Individual Income Tax Liabilities',
'Universal Basic Income',
'Total Cost of Benefits',
'Consumption Value of Benefits',
'Expanded Income',
'After-Tax Expanded Income']
# Items in the DIFF_TABLE_COLUMNS list below correspond to the items in the
# DIFF_TABLE_LABELS list below; this correspondence allows us to use this
# labels list to map a label to the correct column in a difference table.
DIFF_VARIABLES = ['expanded_income', 'c00100', 'aftertax_income',
'iitax', 'payrolltax', 'combined', 's006', 'ubi',
'benefit_cost_total', 'benefit_value_total']
DIFF_TABLE_COLUMNS = ['count',
'tax_cut',
'perc_cut',
'tax_inc',
'perc_inc',
'mean',
'tot_change',
'share_of_change',
'ubi',
'benefit_cost_total',
'benefit_value_total',
'pc_aftertaxinc']
DIFF_TABLE_LABELS = ['All Tax Units',
'Tax Units with Tax Cut',
'Percent with Tax Cut',
'Tax Units with Tax Increase',
'Percent with Tax Increase',
'Average Tax Change',
'Total Tax Difference',
'Share of Overall Change',
'Universal Basic Income',
'Total Cost of Benefits',
'Consumption Value of Benefits',
'% Change in After-Tax Income']
DECILE_ROW_NAMES = ['0-10n', '0-10z', '0-10p',
'10-20', '20-30', '30-40', '40-50',
'50-60', '60-70', '70-80', '80-90', '90-100',
'ALL',
'90-95', '95-99', 'Top 1%']
STANDARD_ROW_NAMES = ['<$0K', '=$0K', '$0-10K', '$10-20K', '$20-30K',
'$30-40K', '$40-50K', '$50-75K', '$75-100K',
'$100-200K', '$200-500K', '$500-1000K', '>$1000K', 'ALL']
STANDARD_INCOME_BINS = [-9e99, -1e-9, 1e-9, 10e3, 20e3, 30e3, 40e3, 50e3,
75e3, 100e3, 200e3, 500e3, 1e6, 9e99]
SOI_AGI_BINS = [-9e99, 1.0, 5e3, 10e3, 15e3, 20e3, 25e3, 30e3, 40e3, 50e3,
75e3, 100e3, 200e3, 500e3, 1e6, 1.5e6, 2e6, 5e6, 10e6, 9e99]
def unweighted_sum(pdf, col_name):
"""
Return unweighted sum of Pandas DataFrame col_name items.
"""
return pdf[col_name].sum()
def weighted_sum(pdf, col_name):
"""
Return weighted sum of Pandas DataFrame col_name items.
"""
return (pdf[col_name] * pdf['s006']).sum()
def add_quantile_table_row_variable(pdf, income_measure, num_quantiles,
decile_details=False,
weight_by_income_measure=False):
"""
Add a variable to specified Pandas DataFrame, pdf, that specifies the
table row and is called 'table_row'. The rows hold equal number of
filing units when weight_by_income_measure=False or equal number of
income dollars when weight_by_income_measure=True. Assumes that
specified pdf contains columns for the specified income_measure and
for sample weights, s006. When num_quantiles is 10 and decile_details
is True, the bottom decile is broken up into three subgroups (neg, zero,
and pos income_measure ) and the top decile is broken into three subgroups
(90-95, 95-99, and top 1%).
"""
assert isinstance(pdf, pd.DataFrame)
assert income_measure in pdf
if decile_details and num_quantiles != 10:
msg = 'decile_details is True when num_quantiles is {}'
raise ValueError(msg.format(num_quantiles))
pdf.sort_values(by=income_measure, inplace=True)
if weight_by_income_measure:
pdf['cumsum_temp'] = np.cumsum(np.multiply(pdf[income_measure].values,
pdf['s006'].values))
min_cumsum = pdf['cumsum_temp'].values[0]
else:
pdf['cumsum_temp'] = np.cumsum(pdf['s006'].values)
min_cumsum = 0. # because s006 values are non-negative
max_cumsum = pdf['cumsum_temp'].values[-1]
cumsum_range = max_cumsum - min_cumsum
bin_width = cumsum_range / float(num_quantiles)
bin_edges = list(min_cumsum +
np.arange(0, (num_quantiles + 1)) * bin_width)
bin_edges[-1] = 9e99 # raise top of last bin to include all observations
bin_edges[0] = -9e99 # lower bottom of 1st bin to include all observations
num_bins = num_quantiles
if decile_details:
assert bin_edges[1] > 1e-9 # bin_edges[1] is top of bottom decile
bin_edges.insert(1, 1e-9) # top of zeros
bin_edges.insert(1, -1e-9) # top of negatives
bin_edges.insert(-1, bin_edges[-2] + 0.5 * bin_width) # top of 90-95
bin_edges.insert(-1, bin_edges[-2] + 0.4 * bin_width) # top of 95-99
num_bins += 4
labels = range(1, (num_bins + 1))
pdf['table_row'] = pd.cut(pdf['cumsum_temp'], bin_edges,
right=False, labels=labels)
pdf.drop('cumsum_temp', axis=1, inplace=True)
return pdf
def add_income_table_row_variable(pdf, income_measure, bin_edges):
"""
Add a variable to specified Pandas DataFrame, pdf, that specifies the
table row and is called 'table_row'. The rows are defined by the
specified bin_edges function argument. Note that the bin groupings
are LEFT INCLUSIVE, which means that bin_edges=[1,2,3,4] implies these
three bin groupings: [1,2), [2,3), [3,4).
Parameters
----------
pdf: Pandas DataFrame
the object to which we are adding bins
income_measure: String
specifies income variable used to construct bins
bin_edges: list of scalar bin edges
Returns
-------
pdf: Pandas DataFrame
the original input plus the added 'table_row' column
"""
assert isinstance(pdf, pd.DataFrame)
assert income_measure in pdf
assert isinstance(bin_edges, list)
pdf['table_row'] = pd.cut(pdf[income_measure], bin_edges, right=False)
return pdf
def get_sums(pdf):
"""
Compute unweighted sum of items in each column of Pandas DataFrame, pdf.
Returns
-------
Pandas Series object containing column sums indexed by pdf column names.
"""
sums = dict()
for col in pdf.columns.values.tolist():
if col != 'table_row':
sums[col] = pdf[col].sum()
return pd.Series(sums, name='ALL')
def create_distribution_table(vdf, groupby, income_measure):
"""
Get results from vdf, sort them by expanded_income based on groupby,
and return them as a table.
Parameters
----------
vdf : Pandas DataFrame including columns named in DIST_TABLE_COLUMNS list
for example, an object returned from the Calculator class
distribution_table_dataframe method
groupby : String object
options for input: 'weighted_deciles' or
'standard_income_bins' or 'soi_agi_bins'
determines how the rows in the resulting Pandas DataFrame are sorted
income_measure: String object
options for input: 'expanded_income' or 'expanded_income_baseline'
determines which variable is used to sort rows
Returns
-------
distribution table as a Pandas DataFrame with DIST_TABLE_COLUMNS and
groupby rows.
NOTE: when groupby is 'weighted_deciles', the returned table has three
extra rows containing top-decile detail consisting of statistics
for the 0.90-0.95 quantile range (bottom half of top decile),
for the 0.95-0.99 quantile range, and
for the 0.99-1.00 quantile range (top one percent); and the
returned table splits the bottom decile into filing units with
negative (denoted by a 0-10n row label),
zero (denoted by a 0-10z row label), and
positive (denoted by a 0-10p row label) values of the
specified income_measure.
"""
# pylint: disable=too-many-statements,too-many-branches
# nested function that returns calculated column statistics as a DataFrame
def stat_dataframe(gpdf):
"""
Returns calculated distribution table column statistics derived from
the specified grouped Dataframe object, gpdf.
"""
unweighted_columns = ['s006', 'num_returns_StandardDed',
'num_returns_ItemDed', 'num_returns_AMT']
sdf = pd.DataFrame()
for col in DIST_TABLE_COLUMNS:
if col in unweighted_columns:
sdf[col] = gpdf.apply(unweighted_sum, col)
else:
sdf[col] = gpdf.apply(weighted_sum, col)
return sdf
# main logic of create_distribution_table
assert isinstance(vdf, pd.DataFrame)
assert (groupby == 'weighted_deciles' or
groupby == 'standard_income_bins' or
groupby == 'soi_agi_bins')
assert (income_measure == 'expanded_income' or
income_measure == 'expanded_income_baseline')
assert income_measure in vdf
assert 'table_row' not in list(vdf.columns.values)
# sort the data given specified groupby and income_measure
if groupby == 'weighted_deciles':
pdf = add_quantile_table_row_variable(vdf, income_measure,
10, decile_details=True)
elif groupby == 'standard_income_bins':
pdf = add_income_table_row_variable(vdf, income_measure,
STANDARD_INCOME_BINS)
elif groupby == 'soi_agi_bins':
pdf = add_income_table_row_variable(vdf, income_measure,
SOI_AGI_BINS)
# construct grouped DataFrame
gpdf = pdf.groupby('table_row', as_index=False)
dist_table = stat_dataframe(gpdf)
del pdf['table_row']
# compute sum row
sum_row = get_sums(dist_table)[dist_table.columns]
# handle placement of sum_row in table
if groupby == 'weighted_deciles':
# compute top-decile row
lenindex = len(dist_table.index)
assert lenindex == 14 # rows should be indexed from 0 to 13
topdec_row = get_sums(dist_table[11:lenindex])[dist_table.columns]
# move top-decile detail rows to make room for topdec_row and sum_row
dist_table = dist_table.reindex(index=range(0, lenindex + 2))
dist_table.iloc[15] = dist_table.iloc[13]
dist_table.iloc[14] = dist_table.iloc[12]
dist_table.iloc[13] = dist_table.iloc[11]
dist_table.iloc[12] = sum_row
dist_table.iloc[11] = topdec_row
del topdec_row
else:
dist_table = dist_table.append(sum_row)
del sum_row
# set print display format for float table elements
pd.options.display.float_format = '{:8,.0f}'.format
# ensure dist_table columns are in correct order
assert dist_table.columns.values.tolist() == DIST_TABLE_COLUMNS
# add row names to table if using weighted_deciles or standard_income_bins
if groupby == 'weighted_deciles':
rownames = DECILE_ROW_NAMES
elif groupby == 'standard_income_bins':
rownames = STANDARD_ROW_NAMES
else:
rownames = None
if rownames:
assert len(dist_table.index) == len(rownames)
dist_table.index = rownames
del rownames
# delete intermediate Pandas DataFrame objects
del gpdf
del pdf
# return table as Pandas DataFrame
vdf.sort_index(inplace=True)
return dist_table
def create_difference_table(vdf1, vdf2, groupby, tax_to_diff):
"""
Get results from two different vdf, construct tax difference results,
and return the difference statistics as a table.
Parameters
----------
vdf1 : Pandas DataFrame including columns named in DIFF_VARIABLES list
for example, object returned from a dataframe(DIFF_VARIABLE) call
on the basesline Calculator object
vdf2 : Pandas DataFrame including columns in the DIFF_VARIABLES list
for example, object returned from a dataframe(DIFF_VARIABLE) call
on the reform Calculator object
groupby : String object
options for input: 'weighted_deciles' or
'standard_income_bins' or 'soi_agi_bins'
determines how the rows in the resulting Pandas DataFrame are sorted
tax_to_diff : String object
options for input: 'iitax', 'payrolltax', 'combined'
specifies which tax to difference
Returns
-------
difference table as a Pandas DataFrame with DIFF_TABLE_COLUMNS and
groupby rows.
NOTE: when groupby is 'weighted_deciles', the returned table has three
extra rows containing top-decile detail consisting of statistics
for the 0.90-0.95 quantile range (bottom half of top decile),
for the 0.95-0.99 quantile range, and
for the 0.99-1.00 quantile range (top one percent); and the
returned table splits the bottom decile into filing units with
negative (denoted by a 0-10n row label),
zero (denoted by a 0-10z row label), and
positive (denoted by a 0-10p row label) values of the
specified income_measure.
"""
# pylint: disable=too-many-statements,too-many-locals
# nested function that creates dataframe containing additive statistics
def additive_stats_dataframe(gpdf):
"""
Nested function that returns additive stats DataFrame derived from gpdf
"""
sdf = pd.DataFrame()
sdf['count'] = gpdf.apply(weighted_count)
sdf['tax_cut'] = gpdf.apply(weighted_count_lt_zero, 'tax_diff')
sdf['tax_inc'] = gpdf.apply(weighted_count_gt_zero, 'tax_diff')
sdf['tot_change'] = gpdf.apply(weighted_sum, 'tax_diff')
sdf['ubi'] = gpdf.apply(weighted_sum, 'ubi')
sdf['benefit_cost_total'] = gpdf.apply(weighted_sum,
'benefit_cost_total')
sdf['benefit_value_total'] = gpdf.apply(weighted_sum,
'benefit_value_total')
sdf['atinc1'] = gpdf.apply(weighted_sum, 'atinc1')
sdf['atinc2'] = gpdf.apply(weighted_sum, 'atinc2')
return sdf
# main logic of create_difference_table
assert isinstance(vdf1, pd.DataFrame)
assert isinstance(vdf2, pd.DataFrame)
assert np.allclose(vdf1['s006'], vdf2['s006']) # check rows in same order
assert (groupby == 'weighted_deciles' or
groupby == 'standard_income_bins' or
groupby == 'soi_agi_bins')
assert 'expanded_income' in vdf1
assert (tax_to_diff == 'iitax' or
tax_to_diff == 'payrolltax' or
tax_to_diff == 'combined')
assert 'table_row' not in list(vdf1.columns.values)
assert 'table_row' not in list(vdf2.columns.values)
baseline_expanded_income = 'expanded_income_baseline'
vdf2[baseline_expanded_income] = vdf1['expanded_income']
vdf2['tax_diff'] = vdf2[tax_to_diff] - vdf1[tax_to_diff]
vdf2['atinc1'] = vdf1['aftertax_income']
vdf2['atinc2'] = vdf2['aftertax_income']
# add table_row column to vdf2 given specified groupby and income_measure
if groupby == 'weighted_deciles':
pdf = add_quantile_table_row_variable(vdf2, baseline_expanded_income,
10, decile_details=True)
elif groupby == 'standard_income_bins':
pdf = add_income_table_row_variable(vdf2, baseline_expanded_income,
STANDARD_INCOME_BINS)
elif groupby == 'soi_agi_bins':
pdf = add_income_table_row_variable(vdf2, baseline_expanded_income,
SOI_AGI_BINS)
# create grouped Pandas DataFrame
gpdf = pdf.groupby('table_row', as_index=False)
del pdf['table_row']
# create additive difference table statistics from gpdf
diff_table = additive_stats_dataframe(gpdf)
# calculate additive statistics on sums row
sum_row = get_sums(diff_table)[diff_table.columns]
# handle placement of sum_row in table
if groupby == 'weighted_deciles':
# compute top-decile row
lenindex = len(diff_table.index)
assert lenindex == 14 # rows should be indexed from 0 to 13
topdec_row = get_sums(diff_table[11:lenindex])[diff_table.columns]
# move top-decile detail rows to make room for topdec_row and sum_row
diff_table = diff_table.reindex(index=range(0, lenindex + 2))
diff_table.iloc[15] = diff_table.iloc[13]
diff_table.iloc[14] = diff_table.iloc[12]
diff_table.iloc[13] = diff_table.iloc[11]
diff_table.iloc[12] = sum_row
diff_table.iloc[11] = topdec_row
del topdec_row
else:
diff_table = diff_table.append(sum_row)
# delete intermediate Pandas DataFrame objects
del gpdf
del pdf
# compute non-additive stats in each table cell
count = diff_table['count']
diff_table['perc_cut'] = np.where(count > 0.,
100 * diff_table['tax_cut'] / count,
0.)
diff_table['perc_inc'] = np.where(count > 0.,
100 * diff_table['tax_inc'] / count,
0.)
diff_table['mean'] = np.where(count > 0.,
diff_table['tot_change'] / count,
0.)
total_change = sum_row['tot_change']
diff_table['share_of_change'] = np.where(total_change == 0.,
np.nan,
(100 * diff_table['tot_change'] /
total_change))
diff_table['pc_aftertaxinc'] = np.where(diff_table['atinc1'] == 0.,
np.nan,
(100 * (diff_table['atinc2'] /
diff_table['atinc1'] - 1)))
# delete intermediate Pandas DataFrame objects
del diff_table['atinc1']
del diff_table['atinc2']
del count
del sum_row
# set print display format for float table elements
pd.options.display.float_format = '{:10,.2f}'.format
# put diff_table columns in correct order
diff_table = diff_table.reindex(columns=DIFF_TABLE_COLUMNS)
# add row names to table if using weighted_deciles or standard_income_bins
if groupby == 'weighted_deciles':
rownames = DECILE_ROW_NAMES
elif groupby == 'standard_income_bins':
rownames = STANDARD_ROW_NAMES
else:
rownames = None
if rownames:
assert len(diff_table.index) == len(rownames)
diff_table.index = rownames
del rownames
# return table as Pandas DataFrame
vdf1.sort_index(inplace=True)
vdf2.sort_index(inplace=True)
return diff_table
def create_diagnostic_table(vdf, year):
"""
Extract single-year diagnostic table from Pandas DataFrame object
derived from a Calculator object using the dataframe(DIST_VARIABLES)
method.
Parameters
----------
vdf : Pandas DataFrame object containing the variables
year : calendar year for which variables were drawn from Calculator object
Returns
-------
Pandas DataFrame object containing the diagnostic table
"""
# pylint: disable=too-many-statements
def diagnostic_table_odict(recs):
"""
Nested function that extracts diagnostic table dictionary from
the specified Pandas DataFrame object, vdf.
Parameters
----------
vdf : Pandas DataFrame object containing the variables
Returns
-------
ordered dictionary of variable names and aggregate weighted values
"""
# aggregate weighted values expressed in millions or billions
in_millions = 1.0e-6
in_billions = 1.0e-9
odict = collections.OrderedDict()
# total number of filing units
wghts = vdf['s006']
odict['Returns (#m)'] = wghts.sum() * in_millions
# adjusted gross income
agi = vdf['c00100']
odict['AGI ($b)'] = (agi * wghts).sum() * in_billions
# number of itemizers
num = (wghts[vdf['c04470'] > 0.].sum())
odict['Itemizers (#m)'] = num * in_millions
# itemized deduction
ided1 = vdf['c04470'] * wghts
val = ided1[vdf['c04470'] > 0.].sum()
odict['Itemized Deduction ($b)'] = val * in_billions
# number of standard deductions
num = wghts[vdf['standard'] > 0.].sum()
odict['Standard Deduction Filers (#m)'] = num * in_millions
# standard deduction
sded1 = recs.standard * wghts
val = sded1[vdf['standard'] > 0.].sum()
odict['Standard Deduction ($b)'] = val * in_billions
# personal exemption
val = (vdf['c04600'] * wghts).sum()
odict['Personal Exemption ($b)'] = val * in_billions
# taxable income
val = (vdf['c04800'] * wghts).sum()
odict['Taxable Income ($b)'] = val * in_billions
# regular tax liability
val = (vdf['taxbc'] * wghts).sum()
odict['Regular Tax ($b)'] = val * in_billions
# AMT taxable income
odict['AMT Income ($b)'] = ((vdf['c62100'] * wghts).sum() *
in_billions)
# total AMT liability
odict['AMT Liability ($b)'] = ((vdf['c09600'] * wghts).sum() *
in_billions)
# number of people paying AMT
odict['AMT Filers (#m)'] = (wghts[vdf['c09600'] > 0.].sum() *
in_millions)
# tax before credits
val = (vdf['c05800'] * wghts).sum()
odict['Tax before Credits ($b)'] = val * in_billions
# refundable credits
val = (vdf['refund'] * wghts).sum()
odict['Refundable Credits ($b)'] = val * in_billions
# nonrefundable credits
val = (vdf['c07100'] * wghts).sum()
odict['Nonrefundable Credits ($b)'] = val * in_billions
# reform surtaxes (part of federal individual income tax liability)
val = (vdf['surtax'] * wghts).sum()
odict['Reform Surtaxes ($b)'] = val * in_billions
# other taxes on Form 1040
val = (vdf['othertaxes'] * wghts).sum()
odict['Other Taxes ($b)'] = val * in_billions
# federal individual income tax liability
val = (vdf['iitax'] * wghts).sum()
odict['Ind Income Tax ($b)'] = val * in_billions
# OASDI+HI payroll tax liability (including employer share)
val = (vdf['payrolltax'] * wghts).sum()
odict['Payroll Taxes ($b)'] = val * in_billions
# combined income and payroll tax liability
val = (vdf['combined'] * wghts).sum()
odict['Combined Liability ($b)'] = val * in_billions
# number of tax units with non-positive income tax liability
num = (wghts[vdf['iitax'] <= 0]).sum()
odict['With Income Tax <= 0 (#m)'] = num * in_millions
# number of tax units with non-positive combined tax liability
num = (wghts[vdf['combined'] <= 0]).sum()
odict['With Combined Tax <= 0 (#m)'] = num * in_millions
return odict
# tabulate diagnostic table
odict = diagnostic_table_odict(vdf)
pdf = pd.DataFrame(data=odict, index=[year], columns=odict.keys())
pdf = pdf.transpose()
pd.options.display.float_format = '{:8,.1f}'.format
del odict
return pdf
def mtr_graph_data(vdf, year,
mars='ALL',
mtr_measure='combined',
mtr_variable='e00200p',
alt_e00200p_text='',
mtr_wrt_full_compen=False,
income_measure='expanded_income',
dollar_weighting=False):
"""
Prepare marginal tax rate data needed by xtr_graph_plot utility function.
Parameters
----------
vdf : a Pandas DataFrame object containing variables and marginal tax rates
(See Calculator.mtr_graph method for required elements of vdf.)
year : integer
specifies calendar year of the data in vdf
mars : integer or string
specifies which filing status subgroup to show in the graph
- 'ALL': include all filing units in sample
- 1: include only single filing units
- 2: include only married-filing-jointly filing units
- 3: include only married-filing-separately filing units
- 4: include only head-of-household filing units
mtr_measure : string
specifies which marginal tax rate to show on graph's y axis
- 'itax': marginal individual income tax rate
- 'ptax': marginal payroll tax rate
- 'combined': sum of marginal income and payroll tax rates
mtr_variable : string
any string in the Calculator.VALID_MTR_VARS set
specifies variable to change in order to compute marginal tax rates
alt_e00200p_text : string
text to use in place of mtr_variable when mtr_variable is 'e00200p';
if empty string then use 'e00200p'
mtr_wrt_full_compen : boolean
see documentation of Calculator.mtr() argument wrt_full_compensation
(value has an effect only if mtr_variable is 'e00200p')
income_measure : string
specifies which income variable to show on the graph's x axis
- 'wages': wage and salary income (e00200)
- 'agi': adjusted gross income, AGI (c00100)
- 'expanded_income': sum of AGI, non-taxable interest income,
non-taxable social security benefits, and employer share of
FICA taxes.
dollar_weighting : boolean
False implies both income_measure percentiles on x axis and
mtr values for each percentile on the y axis are computed
without using dollar income_measure weights (just sampling weights);
True implies both income_measure percentiles on x axis and
mtr values for each percentile on the y axis are computed
using dollar income_measure weights (in addition to sampling weights).
Specifying True produces a graph x axis that shows income_measure
(not filing unit) percentiles.
Returns
-------
dictionary object suitable for passing to xtr_graph_plot utility function
"""
# pylint: disable=too-many-arguments,too-many-statements
# pylint: disable=too-many-locals,too-many-branches
# check validity of function arguments
# . . check income_measure value
weighting_function = weighted_mean
if income_measure == 'wages':
income_var = 'e00200'
income_str = 'Wage'
if dollar_weighting:
weighting_function = wage_weighted
elif income_measure == 'agi':
income_var = 'c00100'
income_str = 'AGI'
if dollar_weighting:
weighting_function = agi_weighted
elif income_measure == 'expanded_income':
income_var = 'expanded_income'
income_str = 'Expanded-Income'
if dollar_weighting:
weighting_function = expanded_income_weighted
else:
msg = ('income_measure="{}" is neither '
'"wages", "agi", nor "expanded_income"')
raise ValueError(msg.format(income_measure))
# . . check mars value
if isinstance(mars, six.string_types):
if mars != 'ALL':
msg = 'string value of mars="{}" is not "ALL"'
raise ValueError(msg.format(mars))
elif isinstance(mars, int):
if mars < 1 or mars > 4:
msg = 'integer mars="{}" is not in [1,4] range'
raise ValueError(msg.format(mars))
else:
msg = 'mars="{}" is neither a string nor an integer'
raise ValueError(msg.format(mars))
# . . check mars value if mtr_variable is e00200s
if mtr_variable == 'e00200s' and mars != 2:
msg = 'mtr_variable == "e00200s" but mars != 2'
raise ValueError(msg)
# . . check mtr_measure value
if mtr_measure == 'itax':
mtr_str = 'Income-Tax'
elif mtr_measure == 'ptax':
mtr_str = 'Payroll-Tax'
elif mtr_measure == 'combined':
mtr_str = 'Income+Payroll-Tax'
else:
msg = ('mtr_measure="{}" is neither '
'"itax" nor "ptax" nor "combined"')
raise ValueError(msg.format(mtr_measure))
# . . check vdf
assert isinstance(vdf, pd.DataFrame)
# create 'table_row' column given specified income_var and dollar_weighting
dfx = add_quantile_table_row_variable(
vdf, income_var, 100, weight_by_income_measure=dollar_weighting)
# split dfx into groups specified by 'table_row' column
gdfx = dfx.groupby('table_row', as_index=False)
# apply the weighting_function to percentile-grouped mtr values
mtr1_series = gdfx.apply(weighting_function, 'mtr1')
mtr2_series = gdfx.apply(weighting_function, 'mtr2')
# construct DataFrame containing the two mtr?_series
lines = pd.DataFrame()
lines['base'] = mtr1_series
lines['reform'] = mtr2_series
# construct dictionary containing merged data and auto-generated labels
data = dict()
data['lines'] = lines
if dollar_weighting:
income_str = 'Dollar-weighted {}'.format(income_str)
mtr_str = 'Dollar-weighted {}'.format(mtr_str)
data['ylabel'] = '{} MTR'.format(mtr_str)
xlabel_str = 'Baseline {} Percentile'.format(income_str)
if mars != 'ALL':
xlabel_str = '{} for MARS={}'.format(xlabel_str, mars)
data['xlabel'] = xlabel_str
var_str = '{}'.format(mtr_variable)
if mtr_variable == 'e00200p' and alt_e00200p_text != '':
var_str = '{}'.format(alt_e00200p_text)
if mtr_variable == 'e00200p' and mtr_wrt_full_compen:
var_str = '{} wrt full compensation'.format(var_str)
title_str = 'Mean Marginal Tax Rate for {} by Income Percentile'
title_str = title_str.format(var_str)
if mars != 'ALL':
title_str = '{} for MARS={}'.format(title_str, mars)
title_str = '{} for {}'.format(title_str, year)
data['title'] = title_str
return data
def atr_graph_data(vdf, year,
mars='ALL',
atr_measure='combined'):
"""
Prepare average tax rate data needed by xtr_graph_plot utility function.
Parameters
----------
vdf : a Pandas DataFrame object containing variables and tax liabilities
(See Calculator.atr_graph method for required elements of vdf.)
year : integer
specifies calendar year of the data in vdf
mars : integer or string
specifies which filing status subgroup to show in the graph
- 'ALL': include all filing units in sample
- 1: include only single filing units
- 2: include only married-filing-jointly filing units
- 3: include only married-filing-separately filing units
- 4: include only head-of-household filing units
atr_measure : string
specifies which average tax rate to show on graph's y axis
- 'itax': average individual income tax rate
- 'ptax': average payroll tax rate
- 'combined': sum of average income and payroll tax rates
Returns
-------
dictionary object suitable for passing to xtr_graph_plot utility function
"""
# pylint: disable=too-many-locals,too-many-statements
# check validity of function arguments
# . . check mars value
if isinstance(mars, six.string_types):
if mars != 'ALL':
msg = 'string value of mars="{}" is not "ALL"'
raise ValueError(msg.format(mars))
elif isinstance(mars, int):
if mars < 1 or mars > 4:
msg = 'integer mars="{}" is not in [1,4] range'
raise ValueError(msg.format(mars))
else:
msg = 'mars="{}" is neither a string nor an integer'
raise ValueError(msg.format(mars))
# . . check atr_measure value
if atr_measure == 'combined':
atr_str = 'Income+Payroll-Tax'
elif atr_measure == 'itax':
atr_str = 'Income-Tax'
elif atr_measure == 'ptax':
atr_str = 'Payroll-Tax'
else:
msg = ('atr_measure="{}" is neither '
'"itax" nor "ptax" nor "combined"')
raise ValueError(msg.format(atr_measure))
# . . check vdf object
assert isinstance(vdf, pd.DataFrame)
# determine last bin that contains non-positive expanded_income values
weights = vdf['s006']
nonpos = np.array(vdf['expanded_income'] <= 0, dtype=bool)
nonpos_frac = weights[nonpos].sum() / weights.sum()
num_bins_with_nonpos = int(math.ceil(100 * nonpos_frac))
# create 'table_row' column
dfx = add_quantile_table_row_variable(vdf, 'expanded_income', 100)
# specify which 'table_row' are included
include = [0] * num_bins_with_nonpos + [1] * (100 - num_bins_with_nonpos)
included = np.array(include, dtype=bool)
# split dfx into groups specified by 'table_row' column
gdfx = dfx.groupby('table_row', as_index=False)
# apply weighted_mean function to percentile-grouped values
avginc_series = gdfx.apply(weighted_mean, 'expanded_income')
avgtax1_series = gdfx.apply(weighted_mean, 'tax1')
avgtax2_series = gdfx.apply(weighted_mean, 'tax2')
# compute average tax rates for each included income percentile
atr1_series = np.zeros_like(avginc_series)
atr1_series[included] = avgtax1_series[included] / avginc_series[included]
atr2_series = np.zeros_like(avginc_series)
atr2_series[included] = avgtax2_series[included] / avginc_series[included]
# construct DataFrame containing the two atr?_series
lines = pd.DataFrame()
lines['base'] = atr1_series
lines['reform'] = atr2_series
# include only percentiles with average income no less than min_avginc
lines = lines[included]
# construct dictionary containing plot lines and auto-generated labels
data = dict()
data['lines'] = lines
data['ylabel'] = '{} Average Tax Rate'.format(atr_str)
xlabel_str = 'Baseline Expanded-Income Percentile'
if mars != 'ALL':
xlabel_str = '{} for MARS={}'.format(xlabel_str, mars)
data['xlabel'] = xlabel_str
title_str = 'Average Tax Rate by Income Percentile'
if mars != 'ALL':
title_str = '{} for MARS={}'.format(title_str, mars)
title_str = '{} for {}'.format(title_str, year)
data['title'] = title_str
return data
def xtr_graph_plot(data,
width=850,
height=500,
xlabel='',
ylabel='',
title='',
legendloc='bottom_right'):
"""
Plot marginal/average tax rate graph using data returned from either the
mtr_graph_data function or the atr_graph_data function.
Parameters
----------
data : dictionary object returned from ?tr_graph_data() utility function
width : integer
width of plot expressed in pixels
height : integer
height of plot expressed in pixels
xlabel : string
x-axis label; if '', then use label generated by ?tr_graph_data
ylabel : string
y-axis label; if '', then use label generated by ?tr_graph_data
title : string
graph title; if '', then use title generated by ?tr_graph_data
legendloc : string
options: 'top_right', 'top_left', 'bottom_left', 'bottom_right'
specifies location of the legend in the plot
Returns
-------
bokeh.plotting figure object containing a raster graphics plot
Notes
-----
USAGE EXAMPLE::
gdata = mtr_graph_data(...)
gplot = xtr_graph_plot(gdata)
THEN when working interactively in a Python notebook::
bp.show(gplot)
OR when executing script using Python command-line interpreter::
bio.output_file('graph-name.html', title='?TR by Income Percentile')
bio.show(gplot) [OR bio.save(gplot) WILL JUST WRITE FILE TO DISK]
WILL VISUALIZE GRAPH IN BROWSER AND WRITE GRAPH TO SPECIFIED HTML FILE
To convert the visualized graph into a PNG-formatted file, click on
the "Save" icon on the Toolbar (located in the top-right corner of
the visualized graph) and a PNG-formatted file will written to your
Download directory.
The ONLY output option the bokeh.plotting figure has is HTML format,
which (as described above) can be converted into a PNG-formatted
raster graphics file. There is no option to make the bokeh.plotting
figure generate a vector graphics file such as an EPS file.
"""
# pylint: disable=too-many-arguments
if title == '':
title = data['title']
fig = bp.figure(plot_width=width, plot_height=height, title=title)
fig.title.text_font_size = '12pt'
lines = data['lines']
fig.line(lines.index, lines.base,
line_color='blue', line_width=3, legend='Baseline')
fig.line(lines.index, lines.reform,
line_color='red', line_width=3, legend='Reform')
fig.circle(0, 0, visible=False) # force zero to be included on y axis
if xlabel == '':
xlabel = data['xlabel']
fig.xaxis.axis_label = xlabel
fig.xaxis.axis_label_text_font_size = '12pt'
fig.xaxis.axis_label_text_font_style = 'normal'
if ylabel == '':
ylabel = data['ylabel']
fig.yaxis.axis_label = ylabel
fig.yaxis.axis_label_text_font_size = '12pt'
fig.yaxis.axis_label_text_font_style = 'normal'
fig.legend.location = legendloc
fig.legend.label_text_font = 'times'
fig.legend.label_text_font_style = 'italic'
fig.legend.label_width = 2
fig.legend.label_height = 2
fig.legend.label_standoff = 2
fig.legend.glyph_width = 14
fig.legend.glyph_height = 14
fig.legend.spacing = 5
fig.legend.padding = 5
return fig
def pch_graph_data(vdf, year):
"""
Prepare percentage change in after-tax expanded income data needed by
pch_graph_plot utility function.
Parameters
----------
vdf : a Pandas DataFrame object containing variables
(See Calculator.pch_graph method for required elements of vdf.)
year : integer
specifies calendar year of the data in vdf
Returns
-------
dictionary object suitable for passing to pch_graph_plot utility function
"""
# pylint: disable=too-many-locals
# check validity of function arguments
assert isinstance(vdf, pd.DataFrame)
# determine last bin that contains non-positive expanded_income values
weights = vdf['s006']
nonpos = np.array(vdf['expanded_income'] <= 0, dtype=bool)
nonpos_frac = weights[nonpos].sum() / weights.sum()
num_bins_with_nonpos = int(math.ceil(100 * nonpos_frac))
# create 'table_row' column
dfx = add_quantile_table_row_variable(vdf, 'expanded_income', 100)
# specify which 'table_row' are included
include = [0] * num_bins_with_nonpos + [1] * (100 - num_bins_with_nonpos)
included = np.array(include, dtype=bool)
# split dfx into groups specified by 'table_row' column
gdfx = dfx.groupby('table_row', as_index=False)
# apply weighted_mean function to percentile-grouped values
avginc_series = gdfx.apply(weighted_mean, 'expanded_income')
change_series = gdfx.apply(weighted_mean, 'chg_aftinc')
# compute percentage change statistic each included income percentile
pch_series = np.zeros_like(avginc_series)
pch_series[included] = change_series[included] / avginc_series[included]
# construct DataFrame containing the pch_series expressed as percent
line = pd.DataFrame()
line['pch'] = pch_series * 100
# include only percentiles with average income no less than min_avginc
line = line[included]
# construct dictionary containing plot line and auto-generated labels
data = dict()
data['line'] = line
data['ylabel'] = 'Change in After-Tax Expanded Income'
data['xlabel'] = 'Baseline Expanded-Income Percentile'
title_str = ('Percentage Change in After-Tax Expanded Income '
'by Income Percentile')
title_str = '{} for {}'.format(title_str, year)
data['title'] = title_str
return data
def pch_graph_plot(data,
width=850,
height=500,
xlabel='',
ylabel='',
title=''):
"""
Plot percentage change in after-tax expanded income using data returned
from the pch_graph_data function.
Parameters
----------
data : dictionary object returned from ?tr_graph_data() utility function
width : integer
width of plot expressed in pixels
height : integer
height of plot expressed in pixels
xlabel : string
x-axis label; if '', then use label generated by pch_graph_data
ylabel : string
y-axis label; if '', then use label generated by pch_graph_data
title : string
graph title; if '', then use title generated by pch_graph_data
Returns
-------
bokeh.plotting figure object containing a raster graphics plot
Notes
-----
See Notes to xtr_graph_plot function.
"""
# pylint: disable=too-many-arguments
if title == '':
title = data['title']
fig = bp.figure(plot_width=width, plot_height=height, title=title)
fig.title.text_font_size = '12pt'
fig.line(data['line'].index, data['line'].pch,
line_color='blue', line_width=3)
fig.circle(0, 0, visible=False) # force zero to be included on y axis
zero_grid_line_range = range(0, 101)
zero_grid_line_height = [0] * len(zero_grid_line_range)
fig.line(zero_grid_line_range, zero_grid_line_height,
line_color='black', line_width=1)
if xlabel == '':
xlabel = data['xlabel']
fig.xaxis.axis_label = xlabel
fig.xaxis.axis_label_text_font_size = '12pt'
fig.xaxis.axis_label_text_font_style = 'normal'
if ylabel == '':
ylabel = data['ylabel']
fig.yaxis.axis_label = ylabel
fig.yaxis.axis_label_text_font_size = '12pt'
fig.yaxis.axis_label_text_font_style = 'normal'
fig.yaxis[0].formatter = PrintfTickFormatter(format='%+.1f%%')
return fig
def write_graph_file(figure, filename, title):
"""
Write HTML file named filename containing figure.
The title is the text displayed in the browser tab.
Parameters
----------
figure : bokeh.plotting figure object
filename : string
name of HTML file to which figure is written; should end in .html
title : string
text displayed in browser tab when HTML file is displayed in browser
Returns
-------
Nothing
"""
delete_file(filename) # work around annoying 'already exists' bokeh msg
bio.output_file(filename=filename, title=title)
bio.save(figure)
def isoelastic_utility_function(consumption, crra, cmin):
"""
Calculate and return utility of consumption.
Parameters
----------
consumption : float
consumption for a filing unit
crra : non-negative float
constant relative risk aversion parameter
cmin : positive float
consumption level below which marginal utility is assumed to be constant
Returns
-------
utility of consumption
"""
if consumption >= cmin:
if crra == 1.0:
return math.log(consumption)
return math.pow(consumption, (1.0 - crra)) / (1.0 - crra)
else: # if consumption < cmin
if crra == 1.0:
tu_at_cmin = math.log(cmin)
else:
tu_at_cmin = math.pow(cmin, (1.0 - crra)) / (1.0 - crra)
mu_at_cmin = math.pow(cmin, -crra)
tu_at_c = tu_at_cmin + mu_at_cmin * (consumption - cmin)
return tu_at_c
def expected_utility(consumption, probability, crra, cmin):
"""
Calculate and return expected utility of consumption.
Parameters
----------
consumption : numpy array
consumption for each filing unit
probability : numpy array
samplying probability of each filing unit
crra : non-negative float
constant relative risk aversion parameter of isoelastic utility function
cmin : positive float
consumption level below which marginal utility is assumed to be constant
Returns
-------
expected utility of consumption array
"""
utility = consumption.apply(isoelastic_utility_function,
args=(crra, cmin,))
return np.inner(utility, probability)
def certainty_equivalent(exputil, crra, cmin):
"""
Calculate and return certainty-equivalent of exputil of consumption
assuming an isoelastic utility function with crra and cmin as parameters.
Parameters
----------
exputil : float
expected utility value
crra : non-negative float
constant relative risk aversion parameter of isoelastic utility function
cmin : positive float
consumption level below which marginal utility is assumed to be constant
Returns
-------
certainty-equivalent of specified expected utility, exputil
"""
if crra == 1.0:
tu_at_cmin = math.log(cmin)
else:
tu_at_cmin = math.pow(cmin, (1.0 - crra)) / (1.0 - crra)
if exputil >= tu_at_cmin:
if crra == 1.0:
return math.exp(exputil)
return math.pow((exputil * (1.0 - crra)), (1.0 / (1.0 - crra)))
mu_at_cmin = math.pow(cmin, -crra)
return ((exputil - tu_at_cmin) / mu_at_cmin) + cmin
def ce_aftertax_expanded_income(df1, df2,
custom_params=None,
require_no_agg_tax_change=True):
"""
Return dictionary that contains certainty-equivalent of the
expected utility of after-tax expanded income computed for
several constant-relative-risk-aversion parameter values
for each of two Pandas DataFrame objects: df1, which represents
the pre-reform situation, and df2, which represents the
post-reform situation. Both DataFrame objects must contain
's006', 'combined', and 'expanded_income' columns.
IMPORTANT NOTES: These normative welfare calculations are very simple.
It is assumed that utility is a function of only consumption, and that
consumption is equal to after-tax income. This means that any assumed
behavioral responses that change work effort will not affect utility via
the correpsonding change in leisure. And any saving response to changes
in after-tax income do not affect consumption.
The cmin value is the consumption level below which marginal utility
is considered to be constant. This allows the handling of filing units
with very low or even negative after-tax expanded income in the
expected-utility and certainty-equivalent calculations.
"""
# pylint: disable=too-many-locals
# check consistency of the two DataFrame objects
assert isinstance(df1, pd.DataFrame)
assert isinstance(df2, pd.DataFrame)
assert df1.shape == df2.shape
# specify utility function parameters
if custom_params:
crras = custom_params['crra_list']
for crra in crras:
assert crra >= 0
cmin = custom_params['cmin_value']
assert cmin > 0
else:
crras = [0, 1, 2, 3, 4]
cmin = 1000
# compute aggregate combined tax revenue and aggregate after-tax income
billion = 1.0e-9
cedict = dict()
cedict['tax1'] = weighted_sum(df1, 'combined') * billion
cedict['tax2'] = weighted_sum(df2, 'combined') * billion
if require_no_agg_tax_change:
diff = cedict['tax2'] - cedict['tax1']
if abs(diff) >= 0.0005:
msg = 'Aggregate taxes not equal when required_... arg is True:'
msg += '\n taxes1= {:9.3f}'
msg += '\n taxes2= {:9.3f}'
msg += '\n txdiff= {:9.3f}'
msg += ('\n(adjust _LST or other parameter to bracket txdiff=0 '
'and then interpolate)')
raise ValueError(msg.format(cedict['tax1'], cedict['tax2'], diff))
cedict['inc1'] = weighted_sum(df1, 'expanded_income') * billion
cedict['inc2'] = weighted_sum(df2, 'expanded_income') * billion
# calculate sample-weighted probability of each filing unit
prob_raw = np.divide(df1['s006'], # pylint: disable=no-member
df1['s006'].sum())
prob = np.divide(prob_raw, # pylint: disable=no-member
prob_raw.sum()) # handle any rounding error
# calculate after-tax income of each filing unit in df1 and df2
ati1 = df1['expanded_income'] - df1['combined']
ati2 = df2['expanded_income'] - df2['combined']
# calculate certainty-equivaluent after-tax income in df1 and df2
cedict['crra'] = crras
ce1 = list()
ce2 = list()
for crra in crras:
eu1 = expected_utility(ati1, prob, crra, cmin)
ce1.append(certainty_equivalent(eu1, crra, cmin))
eu2 = expected_utility(ati2, prob, crra, cmin)
ce2.append(certainty_equivalent(eu2, crra, cmin))
cedict['ceeu1'] = ce1
cedict['ceeu2'] = ce2
# ... return cedict
return cedict
def read_egg_csv(fname, index_col=None):
"""
Read from egg the file named fname that contains CSV data and
return pandas DataFrame containing the data.
"""
try:
path_in_egg = os.path.join('taxcalc', fname)
vdf = pd.read_csv(
pkg_resources.resource_stream(
pkg_resources.Requirement.parse('taxcalc'),
path_in_egg),
index_col=index_col
)
except Exception:
raise ValueError('could not read {} data from egg'.format(fname))
# cannot call read_egg_ function in unit tests
return vdf # pragma: no cover
def read_egg_json(fname):
"""
Read from egg the file named fname that contains JSON data and
return dictionary containing the data.
"""
try:
path_in_egg = os.path.join('taxcalc', fname)
pdict = json.loads(
pkg_resources.resource_stream(
pkg_resources.Requirement.parse('taxcalc'),
path_in_egg).read().decode('utf-8'),
object_pairs_hook=collections.OrderedDict
)
except Exception:
raise ValueError('could not read {} data from egg'.format(fname))
# cannot call read_egg_ function in unit tests
return pdict # pragma: no cover
def delete_file(filename):
"""
Remove specified file if it exists.
"""
if os.path.isfile(filename):
os.remove(filename)
def bootstrap_se_ci(data, seed, num_samples, statistic, alpha):
"""
Return bootstrap estimate of standard error of statistic and
bootstrap estimate of 100*(1-2*alpha)% confidence interval for statistic
in a dictionary along with specified seed and nun_samples (B) and alpha.
"""
assert isinstance(data, np.ndarray)
assert isinstance(seed, int)
assert isinstance(num_samples, int)
assert callable(statistic) # function that computes statistic from data
assert isinstance(alpha, float)
bsest = dict()
bsest['seed'] = seed
np.random.seed(seed) # pylint: disable=no-member
dlen = len(data)
idx = np.random.randint(low=0, high=dlen, # pylint: disable=no-member
size=(num_samples, dlen))
samples = data[idx]
stat = statistic(samples, axis=1)
bsest['B'] = num_samples
bsest['se'] = np.std(stat, ddof=1)
stat = np.sort(stat)
bsest['alpha'] = alpha
bsest['cilo'] = stat[int(round(alpha * num_samples)) - 1]
bsest['cihi'] = stat[int(round((1 - alpha) * num_samples)) - 1]
return bsest
def dec_graph_data(dist_table1, dist_table2, year,
include_zero_incomes, include_negative_incomes):
"""
Prepare data needed by dec_graph_plot utility function.
Parameters
----------
dist_table1 : a Pandas DataFrame object returned from the
Calculator class distribution_tables method for baseline
dist_table2 : a Pandas DataFrame object returned from the
Calculator class distribution_tables method for reform
year : integer
specifies calendar year of the data in the diff_table
include_zero_incomes : boolean
if True, the bottom decile does contain filing units
with zero expanded_income;
if False, the bottom decile does not contain filing units
with zero expanded_income.
include_negative_incomes : boolean
if True, the bottom decile does contain filing units
with negative expanded_income;
if False, the bottom decile does not contain filing units
with negative expanded_income.
Returns
-------
dictionary object suitable for passing to dec_graph_plot utility function
"""
# pylint: disable=too-many-locals
# check that the two distribution tables are consistent
assert len(dist_table1.index) == len(DECILE_ROW_NAMES)
assert len(dist_table2.index) == len(DECILE_ROW_NAMES)
assert np.allclose(dist_table1['s006'], dist_table2['s006'])
# compute bottom bar width and statistic value
wght = dist_table1['s006']
total_wght = wght[2] + wght[1] + wght[0]
included_wght = wght[2]
included_val1 = dist_table1['aftertax_income'][2] * wght[2]
included_val2 = dist_table2['aftertax_income'][2] * wght[2]
if include_zero_incomes:
included_wght += wght[1]
included_val1 += dist_table1['aftertax_income'][1] * wght[1]
included_val2 += dist_table2['aftertax_income'][1] * wght[1]
if include_negative_incomes:
included_wght += wght[0]
included_val1 += dist_table1['aftertax_income'][0] * wght[0]
included_val2 += dist_table2['aftertax_income'][0] * wght[0]
bottom_bar_width = included_wght / total_wght
bottom_bar_value = (included_val2 / included_val1 - 1.) * 100.
# construct dictionary containing the bar data required by dec_graph_plot
bars = dict()
# ... bottom bar
info = dict()
if include_zero_incomes and include_negative_incomes:
info['label'] = '0-10'
elif include_zero_incomes and not include_negative_incomes:
info['label'] = '0-10zp'
if not include_zero_incomes and include_negative_incomes:
info['label'] = '0-10np'
if not include_zero_incomes and not include_negative_incomes:
info['label'] = '0-10p'
info['value'] = bottom_bar_value
bars[0] = info
# ... other bars
offset = 2
for idx in range(offset + 1, len(DECILE_ROW_NAMES)):
info = dict()
info['label'] = DECILE_ROW_NAMES[idx]
val1 = dist_table1['aftertax_income'][idx] * wght[idx]
val2 = dist_table2['aftertax_income'][idx] * wght[idx]
info['value'] = (val2 / val1 - 1.) * 100.
if info['label'] == 'ALL':
info['label'] = '---------'
info['value'] = 0
bars[idx - offset] = info
# construct dictionary containing bar data and auto-generated labels
data = dict()
data['bottom_bar_width'] = bottom_bar_width
data['bars'] = bars
xlabel = 'Reform-Induced Percentage Change in After-Tax Expanded Income'
data['xlabel'] = xlabel
ylabel = 'Expanded Income Percentile Group'
data['ylabel'] = ylabel
title_str = 'Change in After-Tax Income by Income Percentile Group'
data['title'] = '{} for {}'.format(title_str, year)
return data
def dec_graph_plot(data,
width=850,
height=500,
xlabel='',
ylabel='',
title=''):
"""
Plot stacked decile graph using data returned from dec_graph_data function.
Parameters
----------
data : dictionary object returned from dec_graph_data() utility function
width : integer
width of plot expressed in pixels
height : integer
height of plot expressed in pixels
xlabel : string
x-axis label; if '', then use label generated by dec_graph_data
ylabel : string
y-axis label; if '', then use label generated by dec_graph_data
title : string
graph title; if '', then use title generated by dec_graph_data
Returns
-------
bokeh.plotting figure object containing a raster graphics plot
Notes
-----
USAGE EXAMPLE::
gdata = dec_graph_data(...)
gplot = dec_graph_plot(gdata)
THEN when working interactively in a Python notebook::
bp.show(gplot)
OR when executing script using Python command-line interpreter::
bio.output_file('graph-name.html', title='Change in After-Tax Income')
bio.show(gplot) [OR bio.save(gplot) WILL JUST WRITE FILE TO DISK]
WILL VISUALIZE GRAPH IN BROWSER AND WRITE GRAPH TO SPECIFIED HTML FILE
To convert the visualized graph into a PNG-formatted file, click on
the "Save" icon on the Toolbar (located in the top-right corner of
the visualized graph) and a PNG-formatted file will written to your
Download directory.
The ONLY output option the bokeh.plotting figure has is HTML format,
which (as described above) can be converted into a PNG-formatted
raster graphics file. There is no option to make the bokeh.plotting
figure generate a vector graphics file such as an EPS file.
"""
# pylint: disable=too-many-arguments,too-many-locals
if title == '':
title = data['title']
bar_keys = sorted(data['bars'].keys())
bar_labels = [data['bars'][key]['label'] for key in bar_keys]
fig = bp.figure(plot_width=width, plot_height=height, title=title,
y_range=bar_labels)
fig.title.text_font_size = '12pt'
fig.outline_line_color = None
fig.axis.axis_line_color = None
fig.axis.minor_tick_line_color = None
fig.axis.axis_label_text_font_size = '12pt'
fig.axis.axis_label_text_font_style = 'normal'
fig.axis.major_label_text_font_size = '12pt'
if xlabel == '':
xlabel = data['xlabel']
fig.xaxis.axis_label = xlabel
fig.xaxis[0].formatter = PrintfTickFormatter(format='%+.1f%%')
if ylabel == '':
ylabel = data['ylabel']
fig.yaxis.axis_label = ylabel
fig.ygrid.grid_line_color = None
# plot thick x-axis grid line at zero
fig.line(x=[0, 0], y=[0, 14], line_width=1, line_color='black')
# plot bars
barheight = 0.8
bcolor = 'blue'
yidx = 0
for idx in bar_keys:
bval = data['bars'][idx]['value']
blabel = data['bars'][idx]['label']
bheight = barheight
if blabel == '0-10':
bheight *= data['bottom_bar_width']
elif blabel == '90-95':
bheight *= 0.5
bcolor = 'red'
elif blabel == '95-99':
bheight *= 0.4
elif blabel == 'Top 1%':
bheight *= 0.1
fig.rect(x=(bval / 2.0), # x-coordinate of center of the rectangle
y=(yidx + 0.5), # y-coordinate of center of the rectangle
width=abs(bval), # width of the rectangle
height=bheight, # height of the rectangle
color=bcolor)
yidx += 1
return fig
def nonsmall_diffs(linelist1, linelist2, small=0.0):
"""
Return True if line lists differ significantly; otherwise return False.
Significant difference means one or more numbers differ (between
linelist1 and linelist2) by more than the small amount.
NOTE: this function is meant to be used only in the unit tests to handle
small differences in floating point values generated by Python 2.7 and 3.6,
where a nonzero small amount is used only under Python 3.6.
"""
# embedded function used only in nonsmall_diffs function
def isfloat(value):
"""
Return True if value can be cast to float; otherwise return False.
"""
try:
float(value)
return True
except ValueError:
return False
# begin nonsmall_diffs logic
assert isinstance(linelist1, list)
assert isinstance(linelist2, list)
if len(linelist1) != len(linelist2):
return True
assert small >= 0.0 and small <= 1.0
epsilon = 1e-6
smallamt = small + epsilon
for line1, line2 in zip(linelist1, linelist2):
if line1 == line2:
continue
else:
tokens1 = line1.replace(',', '').split()
tokens2 = line2.replace(',', '').split()
for tok1, tok2 in zip(tokens1, tokens2):
tok1_isfloat = isfloat(tok1)
tok2_isfloat = isfloat(tok2)
if tok1_isfloat and tok2_isfloat:
if abs(float(tok1) - float(tok2)) <= smallamt:
continue
else:
return True
elif not tok1_isfloat and not tok2_isfloat:
if tok1 == tok2:
continue
else:
return True
else:
return True
return False
def quantity_response(quantity,
price_elasticity,
aftertax_price1,
aftertax_price2,
income_elasticity,
aftertax_income1,
aftertax_income2):
"""
Calculate dollar change in quantity using a log-log response equation,
which assumes that the proportional change in the quantity is equal to
the sum of two terms:
(1) the proportional change in the quanitity's marginal aftertax price
times an assumed price elasticity, and
(2) the proportional change in aftertax income
times an assumed income elasticity.
Parameters
----------
quantity: numpy array
pre-response quantity whose response is being calculated
price_elasticity: float
coefficient of the percentage change in aftertax price of
the quantity in the log-log response equation
aftertax_price1: numpy array
marginal aftertax price of the quanitity under baseline policy
Note that this function forces prices to be in [0.01, inf] range,
but the caller of this function may want to constrain negative
or very small prices to be somewhat larger in order to avoid extreme
proportional changes in price.
Note this is NOT an array of marginal tax rates (MTR), but rather
usually 1-MTR (or in the case of quantities, like charitable
giving, whose MTR values are non-positive, 1+MTR).
aftertax_price2: numpy array
marginal aftertax price of the quantity under reform policy
Note that this function forces prices to be in [0.01, inf] range,
but the caller of this function may want to constrain negative
or very small prices to be somewhat larger in order to avoid extreme
proportional changes in price.
Note this is NOT an array of marginal tax rates (MTR), but rather
usually 1-MTR (or in the case of quantities, like charitable
giving, whose MTR values are non-positive, 1+MTR).
income_elasticity: float
coefficient of the percentage change in aftertax income in the
log-log response equation
aftertax_income1: numpy array
aftertax income under baseline policy
Note that this function forces income to be in [1, inf] range,
but the caller of this function may want to constrain negative
or small incomes to be somewhat larger in order to avoid extreme
proportional changes in aftertax income.
aftertax_income2: numpy array
aftertax income under reform policy
Note that this function forces income to be in [1, inf] range,
but the caller of this function may want to constrain negative
or small incomes to be somewhat larger in order to avoid extreme
proportional changes in aftertax income.
Returns
-------
response: numpy array
dollar change in quantity calculated from log-log response equation
"""
# pylint: disable=too-many-arguments
# compute price term in log-log response equation
if price_elasticity == 0.:
pch_price = np.zeros(quantity.shape)
else:
atp1 = np.where(aftertax_price1 < 0.01, 0.01, aftertax_price1)
atp2 = np.where(aftertax_price2 < 0.01, 0.01, aftertax_price2)
pch_price = atp2 / atp1 - 1.
# compute income term in log-log response equation
if income_elasticity == 0.:
pch_income = np.zeros(quantity.shape)
else:
ati1 = np.where(aftertax_income1 < 1.0, 1.0, aftertax_income1)
ati2 = np.where(aftertax_income2 < 1.0, 1.0, aftertax_income2)
pch_income = ati2 / ati1 - 1.
# compute response
pch_q = price_elasticity * pch_price + income_elasticity * pch_income
response = pch_q * quantity
return response
| 39.189873 | 79 | 0.626571 |
8185a8d9f5d71cb35eecdb1605c61aa54a9f4f9f | 4,726 | py | Python | code/calcUpstreamLength.py | hishivshah/waterSourceHeatMap | 5f5e3ace8d7ede88ec543132b48c9c3f21f66593 | [
"MIT"
] | null | null | null | code/calcUpstreamLength.py | hishivshah/waterSourceHeatMap | 5f5e3ace8d7ede88ec543132b48c9c3f21f66593 | [
"MIT"
] | null | null | null | code/calcUpstreamLength.py | hishivshah/waterSourceHeatMap | 5f5e3ace8d7ede88ec543132b48c9c3f21f66593 | [
"MIT"
] | null | null | null | import sqlite3
import logging
import networkx
import shapely
import shapely.wkt
def searchUpOrDownStream(
graph, startNode, gaugedEdgeId, gaugedEdgeUpLen, searchDirection
):
if searchDirection == "upstream":
# Find upstream edges
searchNodes = graph.predecessors(startNode)
elif searchDirection == "downstream":
# Find downstream edges
searchNodes = graph.successors(startNode)
searchEdges = graph.edges(searchNodes, keys=True, data=True)
for sEdge in searchEdges:
if sEdge[3].get("nearestGaugedEdge") is None:
sEdge[3]["nearestGaugedEdge"] = gaugedEdgeId
sEdge[3]["upstreamLengthRatio"] = (
sEdge[3]["upstreamLength"] / gaugedEdgeUpLen
)
searchUpOrDownStream(
graph, sEdge[0], gaugedEdgeId, gaugedEdgeUpLen, searchDirection
)
if __name__ == "__main__":
# Logging set-up
logging.basicConfig(format="%(asctime)s|%(levelname)s|%(message)s",
level=logging.INFO)
# Database path
sqliteDb = "../results/results.sqlite"
# Create Directed Graph with multiple edges
logging.info("Creating graph object")
G = networkx.MultiDiGraph()
# Connect to database
logging.info("Connecting to database")
with sqlite3.connect(sqliteDb) as db:
db.enable_load_extension(True)
db.load_extension("mod_spatialite")
cur = db.cursor()
cur.execute("SELECT InitSpatialMetaData(1);")
# Add river nodes to graph
logging.info("Adding river nodes to graph")
cur.execute("SELECT id, ST_ASText(geometry) from riverNodes;")
for row in cur:
id = row[0]
geometry = shapely.wkt.loads(row[1])
G.add_node(id, geometry=geometry)
# Add river edges to graph
logging.info("Adding river edges to graph")
cur.execute("""SELECT id, startNodeId, endNodeId, ST_ASText(geometry)
FROM riverEdges
WHERE startNodeId IS NOT NULL
AND endNodeId IS NOT NULL;""")
for row in cur:
id = row[0]
startNodeId = row[1]
endNodeId = row[2]
geometry = shapely.wkt.loads(row[3])
G.add_edge(startNodeId, endNodeId, key=id, geometry=geometry)
# Calculate upstream river length
logging.info("Calculating upstream river lengths")
for startNode, endNode, key, attr in G.edges_iter(
data=True, keys=True
):
preNodes = networkx.ancestors(G, startNode)
preEdges = G.edges(preNodes, keys=True, data=True)
upstreamLength = (
attr["geometry"].length +
sum([e[3]["geometry"].length for e in preEdges])
)
G.edge[startNode][endNode][key]["upstreamLength"] = upstreamLength
# Find river reaches with gauging station
cur.execute("""SELECT id
FROM riverEdges e
WHERE e.id IN
(SELECT riverId FROM nrfaStations)
AND startNodeId IS NOT NULL
AND endNodeId IS NOT NULL;""")
gEdgeIds = [row[0] for row in cur.fetchall()]
gEdges = [
e for e in G.edges(keys=True, data=True) if edge[2] in gEdgeIds
]
for gEdge in gEdges:
gEdge[3]["nearestGaugedEdge"] = gEdge[2]
gEdge[3]["upstreamLengthRatio"] = 1
# Find upstream edges for each gauged edge
for gEdge in gEdges:
gEdgeStart = gEdge[0]
gEdgeId = gEdge[2]
gEdgeUpLen = gEdge[3]["upstreamLength"]
searchUpOrDownStream(
G, gEdgeStart, gEdgeId, gEdgeUpLen, "upstream"
)
# Find downstream edges for each gauged edge
for gEdge in gEdges:
gEdgeStart = gEdge[0]
gEdgeId = gEdge[2]
gEdgeUpLen = gEdge[3]["upstreamLength"]
searchUpOrDownStream(
G, gEdgeStart, gEdgeId, gEdgeUpLen, "downstream"
)
# Update riverEdges tables
for e in G.edges_iter(data=True, keys=True):
if e[3].get("nearestGaugedEdge") is not None:
cur.execute("""
UPDATE riverEdges
SET nearestGaugedEdge = '%s',
upstreamLengthRatio = %s
WHERE id = '%s';
""" % (
e[3].get("nearestGaugedEdge"),
e[3].get("upstreamLengthRatio"),
e[2]
))
# Commit changes
db.commit()
| 33.28169 | 79 | 0.557766 |
dbcd6fa121b8b855845c9a235a8511266164b058 | 53,702 | py | Python | cinder/tests/unit/test_rbd.py | scottdangelo/RemoveVolumeMangerLocks | a448e6981f00ee068e29f3daac33d2d2d3820b4d | [
"Apache-2.0"
] | 1 | 2019-02-08T05:24:58.000Z | 2019-02-08T05:24:58.000Z | cinder/tests/unit/test_rbd.py | scottdangelo/RemoveVolumeMangerLocks | a448e6981f00ee068e29f3daac33d2d2d3820b4d | [
"Apache-2.0"
] | null | null | null | cinder/tests/unit/test_rbd.py | scottdangelo/RemoveVolumeMangerLocks | a448e6981f00ee068e29f3daac33d2d2d3820b4d | [
"Apache-2.0"
] | null | null | null |
# Copyright 2012 Josh Durgin
# Copyright 2013 Canonical Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
import os
import tempfile
import mock
from oslo_utils import timeutils
from oslo_utils import units
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder import test
from cinder.tests.unit.image import fake as fake_image
from cinder.tests.unit import test_volume
from cinder.tests.unit import utils
from cinder.volume import configuration as conf
import cinder.volume.drivers.rbd as driver
from cinder.volume.flows.manager import create_volume
# This is used to collect raised exceptions so that tests may check what was
# raised.
# NOTE: this must be initialised in test setUp().
RAISED_EXCEPTIONS = []
class MockException(Exception):
def __init__(self, *args, **kwargs):
RAISED_EXCEPTIONS.append(self.__class__)
class MockImageNotFoundException(MockException):
"""Used as mock for rbd.ImageNotFound."""
class MockImageBusyException(MockException):
"""Used as mock for rbd.ImageBusy."""
class MockImageExistsException(MockException):
"""Used as mock for rbd.ImageExists."""
def common_mocks(f):
"""Decorator to set mocks common to all tests.
The point of doing these mocks here is so that we don't accidentally set
mocks that can't/don't get unset.
"""
def _common_inner_inner1(inst, *args, **kwargs):
@mock.patch('cinder.volume.drivers.rbd.RBDVolumeProxy')
@mock.patch('cinder.volume.drivers.rbd.RADOSClient')
@mock.patch('cinder.backup.drivers.ceph.rbd')
@mock.patch('cinder.backup.drivers.ceph.rados')
def _common_inner_inner2(mock_rados, mock_rbd, mock_client,
mock_proxy):
inst.mock_rbd = mock_rbd
inst.mock_rados = mock_rados
inst.mock_client = mock_client
inst.mock_proxy = mock_proxy
inst.mock_rbd.RBD.Error = Exception
inst.mock_rados.Error = Exception
inst.mock_rbd.ImageBusy = MockImageBusyException
inst.mock_rbd.ImageNotFound = MockImageNotFoundException
inst.mock_rbd.ImageExists = MockImageExistsException
inst.driver.rbd = inst.mock_rbd
inst.driver.rados = inst.mock_rados
return f(inst, *args, **kwargs)
return _common_inner_inner2()
return _common_inner_inner1
CEPH_MON_DUMP = """dumped monmap epoch 1
{ "epoch": 1,
"fsid": "33630410-6d93-4d66-8e42-3b953cf194aa",
"modified": "2013-05-22 17:44:56.343618",
"created": "2013-05-22 17:44:56.343618",
"mons": [
{ "rank": 0,
"name": "a",
"addr": "[::1]:6789\/0"},
{ "rank": 1,
"name": "b",
"addr": "[::1]:6790\/0"},
{ "rank": 2,
"name": "c",
"addr": "[::1]:6791\/0"},
{ "rank": 3,
"name": "d",
"addr": "127.0.0.1:6792\/0"},
{ "rank": 4,
"name": "e",
"addr": "example.com:6791\/0"}],
"quorum": [
0,
1,
2]}
"""
class RBDTestCase(test.TestCase):
def setUp(self):
global RAISED_EXCEPTIONS
RAISED_EXCEPTIONS = []
super(RBDTestCase, self).setUp()
self.cfg = mock.Mock(spec=conf.Configuration)
self.cfg.volume_tmp_dir = None
self.cfg.image_conversion_dir = None
self.cfg.rbd_cluster_name = 'nondefault'
self.cfg.rbd_pool = 'rbd'
self.cfg.rbd_ceph_conf = None
self.cfg.rbd_secret_uuid = None
self.cfg.rbd_user = None
self.cfg.volume_dd_blocksize = '1M'
self.cfg.rbd_store_chunk_size = 4
mock_exec = mock.Mock()
mock_exec.return_value = ('', '')
self.driver = driver.RBDDriver(execute=mock_exec,
configuration=self.cfg)
self.driver.set_initialized()
self.volume_name = u'volume-00000001'
self.snapshot_name = u'snapshot-00000001'
self.volume_size = 1
self.volume = dict(name=self.volume_name, size=self.volume_size)
self.snapshot = dict(volume_name=self.volume_name,
name=self.snapshot_name)
@common_mocks
def test_create_volume(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
self.driver.create_volume(self.volume)
chunk_size = self.cfg.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
args = [client.ioctx, str(self.volume_name),
self.volume_size * units.Gi, order]
kwargs = {'old_format': False,
'features': client.features}
self.mock_rbd.RBD.return_value.create.assert_called_once_with(
*args, **kwargs)
client.__enter__.assert_called_once_with()
client.__exit__.assert_called_once_with(None, None, None)
@common_mocks
def test_manage_existing_get_size(self):
with mock.patch.object(self.driver.rbd.Image(), 'size') as \
mock_rbd_image_size:
with mock.patch.object(self.driver.rbd.Image(), 'close') \
as mock_rbd_image_close:
mock_rbd_image_size.return_value = 2 * units.Gi
existing_ref = {'source-name': self.volume_name}
return_size = self.driver.manage_existing_get_size(
self.volume,
existing_ref)
self.assertEqual(2, return_size)
mock_rbd_image_size.assert_called_once_with()
mock_rbd_image_close.assert_called_once_with()
@common_mocks
def test_manage_existing_get_invalid_size(self):
with mock.patch.object(self.driver.rbd.Image(), 'size') as \
mock_rbd_image_size:
with mock.patch.object(self.driver.rbd.Image(), 'close') \
as mock_rbd_image_close:
mock_rbd_image_size.return_value = 'abcd'
existing_ref = {'source-name': self.volume_name}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.manage_existing_get_size,
self.volume, existing_ref)
mock_rbd_image_size.assert_called_once_with()
mock_rbd_image_close.assert_called_once_with()
@common_mocks
def test_manage_existing(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
with mock.patch.object(self.driver.rbd.RBD(), 'rename') as \
mock_rbd_image_rename:
exist_volume = 'vol-exist'
existing_ref = {'source-name': exist_volume}
mock_rbd_image_rename.return_value = 0
self.driver.manage_existing(self.volume, existing_ref)
mock_rbd_image_rename.assert_called_with(
client.ioctx,
exist_volume,
self.volume_name)
@common_mocks
def test_manage_existing_with_exist_rbd_image(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
self.mock_rbd.RBD.return_value.rename.side_effect = (
MockImageExistsException)
exist_volume = 'vol-exist'
existing_ref = {'source-name': exist_volume}
self.assertRaises(self.mock_rbd.ImageExists,
self.driver.manage_existing,
self.volume, existing_ref)
# Make sure the exception was raised
self.assertEqual(RAISED_EXCEPTIONS,
[self.mock_rbd.ImageExists])
@common_mocks
def test_delete_backup_snaps(self):
self.driver.rbd.Image.remove_snap = mock.Mock()
with mock.patch.object(self.driver, '_get_backup_snaps') as \
mock_get_backup_snaps:
mock_get_backup_snaps.return_value = [{'name': 'snap1'}]
rbd_image = self.driver.rbd.Image()
self.driver._delete_backup_snaps(rbd_image)
mock_get_backup_snaps.assert_called_once_with(rbd_image)
self.assertTrue(
self.driver.rbd.Image.return_value.remove_snap.called)
@common_mocks
def test_delete_volume(self):
client = self.mock_client.return_value
self.driver.rbd.Image.return_value.list_snaps.return_value = []
with mock.patch.object(self.driver, '_get_clone_info') as \
mock_get_clone_info:
with mock.patch.object(self.driver, '_delete_backup_snaps') as \
mock_delete_backup_snaps:
mock_get_clone_info.return_value = (None, None, None)
self.driver.delete_volume(self.volume)
mock_get_clone_info.assert_called_once_with(
self.mock_rbd.Image.return_value,
self.volume_name,
None)
(self.driver.rbd.Image.return_value
.list_snaps.assert_called_once_with())
client.__enter__.assert_called_once_with()
client.__exit__.assert_called_once_with(None, None, None)
mock_delete_backup_snaps.assert_called_once_with(
self.mock_rbd.Image.return_value)
self.assertFalse(
self.driver.rbd.Image.return_value.unprotect_snap.called)
self.assertEqual(
1, self.driver.rbd.RBD.return_value.remove.call_count)
@common_mocks
def delete_volume_not_found(self):
self.mock_rbd.Image.side_effect = self.mock_rbd.ImageNotFound
self.assertIsNone(self.driver.delete_volume(self.volume))
self.mock_rbd.Image.assert_called_once_with()
# Make sure the exception was raised
self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageNotFound])
@common_mocks
def test_delete_busy_volume(self):
self.mock_rbd.Image.return_value.list_snaps.return_value = []
self.mock_rbd.RBD.return_value.remove.side_effect = (
self.mock_rbd.ImageBusy)
with mock.patch.object(self.driver, '_get_clone_info') as \
mock_get_clone_info:
mock_get_clone_info.return_value = (None, None, None)
with mock.patch.object(self.driver, '_delete_backup_snaps') as \
mock_delete_backup_snaps:
with mock.patch.object(driver, 'RADOSClient') as \
mock_rados_client:
self.assertRaises(exception.VolumeIsBusy,
self.driver.delete_volume, self.volume)
mock_get_clone_info.assert_called_once_with(
self.mock_rbd.Image.return_value,
self.volume_name,
None)
(self.mock_rbd.Image.return_value.list_snaps
.assert_called_once_with())
mock_rados_client.assert_called_once_with(self.driver)
mock_delete_backup_snaps.assert_called_once_with(
self.mock_rbd.Image.return_value)
self.assertFalse(
self.mock_rbd.Image.return_value.unprotect_snap.called)
self.assertEqual(
3, self.mock_rbd.RBD.return_value.remove.call_count)
self.assertEqual(3, len(RAISED_EXCEPTIONS))
# Make sure the exception was raised
self.assertIn(self.mock_rbd.ImageBusy, RAISED_EXCEPTIONS)
@common_mocks
def test_delete_volume_not_found(self):
self.mock_rbd.Image.return_value.list_snaps.return_value = []
self.mock_rbd.RBD.return_value.remove.side_effect = (
self.mock_rbd.ImageNotFound)
with mock.patch.object(self.driver, '_get_clone_info') as \
mock_get_clone_info:
mock_get_clone_info.return_value = (None, None, None)
with mock.patch.object(self.driver, '_delete_backup_snaps') as \
mock_delete_backup_snaps:
with mock.patch.object(driver, 'RADOSClient') as \
mock_rados_client:
self.assertIsNone(self.driver.delete_volume(self.volume))
mock_get_clone_info.assert_called_once_with(
self.mock_rbd.Image.return_value,
self.volume_name,
None)
(self.mock_rbd.Image.return_value.list_snaps
.assert_called_once_with())
mock_rados_client.assert_called_once_with(self.driver)
mock_delete_backup_snaps.assert_called_once_with(
self.mock_rbd.Image.return_value)
self.assertFalse(
self.mock_rbd.Image.return_value.unprotect_snap.called)
self.assertEqual(
1, self.mock_rbd.RBD.return_value.remove.call_count)
# Make sure the exception was raised
self.assertEqual(RAISED_EXCEPTIONS,
[self.mock_rbd.ImageNotFound])
@common_mocks
def test_create_snapshot(self):
proxy = self.mock_proxy.return_value
proxy.__enter__.return_value = proxy
self.driver.create_snapshot(self.snapshot)
args = [str(self.snapshot_name)]
proxy.create_snap.assert_called_with(*args)
proxy.protect_snap.assert_called_with(*args)
@common_mocks
def test_delete_snapshot(self):
proxy = self.mock_proxy.return_value
proxy.__enter__.return_value = proxy
self.driver.delete_snapshot(self.snapshot)
proxy.remove_snap.assert_called_with(self.snapshot_name)
proxy.unprotect_snap.assert_called_with(self.snapshot_name)
@common_mocks
def test_delete_busy_snapshot(self):
proxy = self.mock_proxy.return_value
proxy.__enter__.return_value = proxy
proxy.unprotect_snap.side_effect = (
self.mock_rbd.ImageBusy)
with mock.patch.object(self.driver, '_get_children_info') as \
mock_get_children_info:
mock_get_children_info.return_value = [('pool', 'volume2')]
with mock.patch.object(driver, 'LOG') as \
mock_log:
self.assertRaises(exception.SnapshotIsBusy,
self.driver.delete_snapshot,
self.snapshot)
mock_get_children_info.assert_called_once_with(
proxy,
self.snapshot_name)
self.assertTrue(mock_log.info.called)
self.assertTrue(proxy.unprotect_snap.called)
self.assertFalse(proxy.remove_snap.called)
@common_mocks
def test_get_children_info(self):
volume = self.mock_proxy
volume.set_snap = mock.Mock()
volume.list_children = mock.Mock()
list_children = [('pool', 'volume2')]
volume.list_children.return_value = list_children
info = self.driver._get_children_info(volume,
self.snapshot_name)
self.assertEqual(list_children, info)
@common_mocks
def test_get_clone_info(self):
volume = self.mock_rbd.Image()
volume.set_snap = mock.Mock()
volume.parent_info = mock.Mock()
parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_name))
volume.parent_info.return_value = parent_info
info = self.driver._get_clone_info(volume, self.volume_name)
self.assertEqual(parent_info, info)
self.assertFalse(volume.set_snap.called)
volume.parent_info.assert_called_once_with()
@common_mocks
def test_get_clone_info_w_snap(self):
volume = self.mock_rbd.Image()
volume.set_snap = mock.Mock()
volume.parent_info = mock.Mock()
parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_name))
volume.parent_info.return_value = parent_info
snapshot = self.mock_rbd.ImageSnapshot()
info = self.driver._get_clone_info(volume, self.volume_name,
snap=snapshot)
self.assertEqual(parent_info, info)
self.assertEqual(2, volume.set_snap.call_count)
volume.parent_info.assert_called_once_with()
@common_mocks
def test_get_clone_info_w_exception(self):
volume = self.mock_rbd.Image()
volume.set_snap = mock.Mock()
volume.parent_info = mock.Mock()
volume.parent_info.side_effect = self.mock_rbd.ImageNotFound
snapshot = self.mock_rbd.ImageSnapshot()
info = self.driver._get_clone_info(volume, self.volume_name,
snap=snapshot)
self.assertEqual((None, None, None), info)
self.assertEqual(2, volume.set_snap.call_count)
volume.parent_info.assert_called_once_with()
# Make sure the exception was raised
self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageNotFound])
@common_mocks
def test_get_clone_info_deleted_volume(self):
volume = self.mock_rbd.Image()
volume.set_snap = mock.Mock()
volume.parent_info = mock.Mock()
parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_name))
volume.parent_info.return_value = parent_info
info = self.driver._get_clone_info(volume,
"%s.deleted" % (self.volume_name))
self.assertEqual(parent_info, info)
self.assertFalse(volume.set_snap.called)
volume.parent_info.assert_called_once_with()
@common_mocks
def test_create_cloned_volume_same_size(self):
src_name = u'volume-00000001'
dst_name = u'volume-00000002'
self.cfg.rbd_max_clone_depth = 2
with mock.patch.object(self.driver, '_get_clone_depth') as \
mock_get_clone_depth:
# Try with no flatten required
with mock.patch.object(self.driver, '_resize') as mock_resize:
mock_get_clone_depth.return_value = 1
self.driver.create_cloned_volume({'name': dst_name,
'size': 10},
{'name': src_name,
'size': 10})
(self.mock_rbd.Image.return_value.create_snap
.assert_called_once_with('.'.join((dst_name,
'clone_snap'))))
(self.mock_rbd.Image.return_value.protect_snap
.assert_called_once_with('.'.join((dst_name,
'clone_snap'))))
self.assertEqual(
1, self.mock_rbd.RBD.return_value.clone.call_count)
self.mock_rbd.Image.return_value.close \
.assert_called_once_with()
self.assertTrue(mock_get_clone_depth.called)
self.assertEqual(
0, mock_resize.call_count)
@common_mocks
def test_create_cloned_volume_different_size(self):
src_name = u'volume-00000001'
dst_name = u'volume-00000002'
self.cfg.rbd_max_clone_depth = 2
with mock.patch.object(self.driver, '_get_clone_depth') as \
mock_get_clone_depth:
# Try with no flatten required
with mock.patch.object(self.driver, '_resize') as mock_resize:
mock_get_clone_depth.return_value = 1
self.driver.create_cloned_volume({'name': dst_name,
'size': 20},
{'name': src_name,
'size': 10})
(self.mock_rbd.Image.return_value.create_snap
.assert_called_once_with('.'.join((dst_name,
'clone_snap'))))
(self.mock_rbd.Image.return_value.protect_snap
.assert_called_once_with('.'.join((dst_name,
'clone_snap'))))
self.assertEqual(
1, self.mock_rbd.RBD.return_value.clone.call_count)
self.mock_rbd.Image.return_value.close \
.assert_called_once_with()
self.assertTrue(mock_get_clone_depth.called)
self.assertEqual(
1, mock_resize.call_count)
@common_mocks
def test_create_cloned_volume_w_flatten(self):
src_name = u'volume-00000001'
dst_name = u'volume-00000002'
self.cfg.rbd_max_clone_depth = 1
with mock.patch.object(self.driver, '_get_clone_info') as \
mock_get_clone_info:
mock_get_clone_info.return_value = (
('fake_pool', dst_name, '.'.join((dst_name, 'clone_snap'))))
with mock.patch.object(self.driver, '_get_clone_depth') as \
mock_get_clone_depth:
# Try with no flatten required
mock_get_clone_depth.return_value = 1
self.assertRaises(self.mock_rbd.RBD.Error,
self.driver.create_cloned_volume,
dict(name=dst_name), dict(name=src_name))
(self.mock_rbd.Image.return_value.create_snap
.assert_called_once_with('.'.join((dst_name, 'clone_snap'))))
(self.mock_rbd.Image.return_value.protect_snap
.assert_called_once_with('.'.join((dst_name, 'clone_snap'))))
self.assertEqual(
1, self.mock_rbd.RBD.return_value.clone.call_count)
(self.mock_rbd.Image.return_value.unprotect_snap
.assert_called_once_with('.'.join((dst_name, 'clone_snap'))))
(self.mock_rbd.Image.return_value.remove_snap
.assert_called_once_with('.'.join((dst_name, 'clone_snap'))))
# We expect the driver to close both volumes, so 2 is expected
self.assertEqual(
2, self.mock_rbd.Image.return_value.close.call_count)
self.assertTrue(mock_get_clone_depth.called)
@common_mocks
def test_create_cloned_volume_w_clone_exception(self):
src_name = u'volume-00000001'
dst_name = u'volume-00000002'
self.cfg.rbd_max_clone_depth = 2
self.mock_rbd.RBD.return_value.clone.side_effect = (
self.mock_rbd.RBD.Error)
with mock.patch.object(self.driver, '_get_clone_depth') as \
mock_get_clone_depth:
# Try with no flatten required
mock_get_clone_depth.return_value = 1
self.assertRaises(self.mock_rbd.RBD.Error,
self.driver.create_cloned_volume,
{'name': dst_name}, {'name': src_name})
(self.mock_rbd.Image.return_value.create_snap
.assert_called_once_with('.'.join((dst_name, 'clone_snap'))))
(self.mock_rbd.Image.return_value.protect_snap
.assert_called_once_with('.'.join((dst_name, 'clone_snap'))))
self.assertEqual(
1, self.mock_rbd.RBD.return_value.clone.call_count)
(self.mock_rbd.Image.return_value.unprotect_snap
.assert_called_once_with('.'.join((dst_name, 'clone_snap'))))
(self.mock_rbd.Image.return_value.remove_snap
.assert_called_once_with('.'.join((dst_name, 'clone_snap'))))
self.mock_rbd.Image.return_value.close.assert_called_once_with()
@common_mocks
def test_good_locations(self):
locations = ['rbd://fsid/pool/image/snap',
'rbd://%2F/%2F/%2F/%2F', ]
map(self.driver._parse_location, locations)
@common_mocks
def test_bad_locations(self):
locations = ['rbd://image',
'http://path/to/somewhere/else',
'rbd://image/extra',
'rbd://image/',
'rbd://fsid/pool/image/',
'rbd://fsid/pool/image/snap/',
'rbd://///', ]
for loc in locations:
self.assertRaises(exception.ImageUnacceptable,
self.driver._parse_location,
loc)
self.assertFalse(
self.driver._is_cloneable(loc, {'disk_format': 'raw'}))
@common_mocks
def test_cloneable(self):
with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid:
mock_get_fsid.return_value = 'abc'
location = 'rbd://abc/pool/image/snap'
info = {'disk_format': 'raw'}
self.assertTrue(self.driver._is_cloneable(location, info))
self.assertTrue(mock_get_fsid.called)
@common_mocks
def test_uncloneable_different_fsid(self):
with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid:
mock_get_fsid.return_value = 'abc'
location = 'rbd://def/pool/image/snap'
self.assertFalse(
self.driver._is_cloneable(location, {'disk_format': 'raw'}))
self.assertTrue(mock_get_fsid.called)
@common_mocks
def test_uncloneable_unreadable(self):
with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid:
mock_get_fsid.return_value = 'abc'
location = 'rbd://abc/pool/image/snap'
self.driver.rbd.Error = Exception
self.mock_proxy.side_effect = Exception
args = [location, {'disk_format': 'raw'}]
self.assertFalse(self.driver._is_cloneable(*args))
self.assertEqual(1, self.mock_proxy.call_count)
self.assertTrue(mock_get_fsid.called)
@common_mocks
def test_uncloneable_bad_format(self):
with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid:
mock_get_fsid.return_value = 'abc'
location = 'rbd://abc/pool/image/snap'
formats = ['qcow2', 'vmdk', 'vdi']
for f in formats:
self.assertFalse(
self.driver._is_cloneable(location, {'disk_format': f}))
self.assertTrue(mock_get_fsid.called)
def _copy_image(self):
with mock.patch.object(tempfile, 'NamedTemporaryFile'):
with mock.patch.object(os.path, 'exists') as mock_exists:
mock_exists.return_value = True
with mock.patch.object(image_utils, 'fetch_to_raw'):
with mock.patch.object(self.driver, 'delete_volume'):
with mock.patch.object(self.driver, '_resize'):
mock_image_service = mock.MagicMock()
args = [None, {'name': 'test', 'size': 1},
mock_image_service, None]
self.driver.copy_image_to_volume(*args)
@common_mocks
def test_copy_image_no_volume_tmp(self):
self.cfg.volume_tmp_dir = None
self.cfg.image_conversion_dir = None
self._copy_image()
@common_mocks
def test_copy_image_volume_tmp(self):
self.cfg.volume_tmp_dir = None
self.cfg.image_conversion_dir = '/var/run/cinder/tmp'
self._copy_image()
@common_mocks
def test_update_volume_stats(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
client.cluster = mock.Mock()
client.cluster.mon_command = mock.Mock()
client.cluster.mon_command.return_value = (
0, '{"stats":{"total_bytes":64385286144,'
'"total_used_bytes":3289628672,"total_avail_bytes":61095657472},'
'"pools":[{"name":"rbd","id":2,"stats":{"kb_used":1510197,'
'"bytes_used":1546440971,"max_avail":28987613184,"objects":412}},'
'{"name":"volumes","id":3,"stats":{"kb_used":0,"bytes_used":0,'
'"max_avail":28987613184,"objects":0}}]}\n', '')
self.driver.configuration.safe_get = mock.Mock()
self.driver.configuration.safe_get.return_value = 'RBD'
expected = dict(
volume_backend_name='RBD',
vendor_name='Open Source',
driver_version=self.driver.VERSION,
storage_protocol='ceph',
total_capacity_gb=27,
free_capacity_gb=26,
reserved_percentage=0)
actual = self.driver.get_volume_stats(True)
client.cluster.mon_command.assert_called_once_with(
'{"prefix":"df", "format":"json"}', '')
self.assertDictMatch(expected, actual)
@common_mocks
def test_update_volume_stats_error(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
client.cluster = mock.Mock()
client.cluster.mon_command = mock.Mock()
client.cluster.mon_command.return_value = (22, '', '')
self.driver.configuration.safe_get = mock.Mock()
self.driver.configuration.safe_get.return_value = 'RBD'
expected = dict(volume_backend_name='RBD',
vendor_name='Open Source',
driver_version=self.driver.VERSION,
storage_protocol='ceph',
total_capacity_gb='unknown',
free_capacity_gb='unknown',
reserved_percentage=0)
actual = self.driver.get_volume_stats(True)
client.cluster.mon_command.assert_called_once_with(
'{"prefix":"df", "format":"json"}', '')
self.assertDictMatch(expected, actual)
@common_mocks
def test_get_mon_addrs(self):
with mock.patch.object(self.driver, '_execute') as mock_execute:
mock_execute.return_value = (CEPH_MON_DUMP, '')
hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com']
ports = ['6789', '6790', '6791', '6792', '6791']
self.assertEqual((hosts, ports), self.driver._get_mon_addrs())
@common_mocks
def test_initialize_connection(self):
hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com']
ports = ['6789', '6790', '6791', '6792', '6791']
with mock.patch.object(self.driver, '_get_mon_addrs') as \
mock_get_mon_addrs:
mock_get_mon_addrs.return_value = (hosts, ports)
volume_id = '0a83f0a3-ef6e-47b6-a8aa-20436bc9ed01'
expected = {
'driver_volume_type': 'rbd',
'data': {
'name': '%s/%s' % (self.cfg.rbd_pool,
self.volume_name),
'hosts': hosts,
'ports': ports,
'auth_enabled': False,
'auth_username': None,
'secret_type': 'ceph',
'secret_uuid': None,
'volume_id': volume_id
}
}
volume = dict(name=self.volume_name, id=volume_id)
actual = self.driver.initialize_connection(volume, None)
self.assertDictMatch(expected, actual)
self.assertTrue(mock_get_mon_addrs.called)
@common_mocks
def test_clone(self):
src_pool = u'images'
src_image = u'image-name'
src_snap = u'snapshot-name'
client_stack = []
def mock__enter__(inst):
def _inner():
client_stack.append(inst)
return inst
return _inner
client = self.mock_client.return_value
# capture both rados client used to perform the clone
client.__enter__.side_effect = mock__enter__(client)
self.driver._clone(self.volume, src_pool, src_image, src_snap)
args = [client_stack[0].ioctx, str(src_image), str(src_snap),
client_stack[1].ioctx, str(self.volume_name)]
kwargs = {'features': client.features}
self.mock_rbd.RBD.return_value.clone.assert_called_once_with(
*args, **kwargs)
self.assertEqual(2, client.__enter__.call_count)
@common_mocks
def test_extend_volume(self):
fake_size = '20'
fake_vol = {'project_id': 'testprjid', 'name': self.volume_name,
'size': fake_size,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
self.mox.StubOutWithMock(self.driver, '_resize')
size = int(fake_size) * units.Gi
self.driver._resize(fake_vol, size=size)
self.mox.ReplayAll()
self.driver.extend_volume(fake_vol, fake_size)
self.mox.VerifyAll()
@common_mocks
def test_retype(self):
context = {}
diff = {'encryption': {},
'extra_specs': {}}
fake_volume = {'name': 'testvolume',
'host': 'currenthost'}
fake_type = 'high-IOPS'
# no support for migration
host = {'host': 'anotherhost'}
self.assertFalse(self.driver.retype(context, fake_volume,
fake_type, diff, host))
host = {'host': 'currenthost'}
# no support for changing encryption
diff['encryption'] = {'non-empty': 'non-empty'}
self.assertFalse(self.driver.retype(context, fake_volume,
fake_type, diff, host))
diff['encryption'] = {}
# no support for changing extra_specs
diff['extra_specs'] = {'non-empty': 'non-empty'}
self.assertFalse(self.driver.retype(context, fake_volume,
fake_type, diff, host))
diff['extra_specs'] = {}
self.assertTrue(self.driver.retype(context, fake_volume,
fake_type, diff, host))
@common_mocks
def test_update_migrated_volume(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
with mock.patch.object(self.driver.rbd.RBD(), 'rename') as mock_rename:
context = {}
current_volume = {'id': 'curr_id',
'name': 'curr_name',
'provider_location': 'curr_provider_location'}
original_volume = {'id': 'orig_id',
'name': 'orig_name',
'provider_location': 'orig_provider_location'}
mock_rename.return_value = 0
model_update = self.driver.update_migrated_volume(context,
original_volume,
current_volume,
'available')
mock_rename.assert_called_with(client.ioctx,
'volume-%s' % current_volume['id'],
'volume-%s' % original_volume['id'])
self.assertEqual({'_name_id': None,
'provider_location': None}, model_update)
def test_rbd_volume_proxy_init(self):
mock_driver = mock.Mock(name='driver')
mock_driver._connect_to_rados.return_value = (None, None)
with driver.RBDVolumeProxy(mock_driver, self.volume_name):
self.assertEqual(1, mock_driver._connect_to_rados.call_count)
self.assertFalse(mock_driver._disconnect_from_rados.called)
self.assertEqual(1, mock_driver._disconnect_from_rados.call_count)
mock_driver.reset_mock()
snap = u'snapshot-name'
with driver.RBDVolumeProxy(mock_driver, self.volume_name,
snapshot=snap):
self.assertEqual(1, mock_driver._connect_to_rados.call_count)
self.assertFalse(mock_driver._disconnect_from_rados.called)
self.assertEqual(1, mock_driver._disconnect_from_rados.call_count)
@common_mocks
@mock.patch('time.sleep')
def test_connect_to_rados(self, sleep_mock):
# Default
self.cfg.rados_connect_timeout = -1
self.mock_rados.Rados.return_value.open_ioctx.return_value = \
self.mock_rados.Rados.return_value.ioctx
# default configured pool
ret = self.driver._connect_to_rados()
self.assertTrue(self.mock_rados.Rados.return_value.connect.called)
# Expect no timeout if default is used
self.mock_rados.Rados.return_value.connect.assert_called_once_with()
self.assertTrue(self.mock_rados.Rados.return_value.open_ioctx.called)
self.assertEqual(self.mock_rados.Rados.return_value.ioctx, ret[1])
self.mock_rados.Rados.return_value.open_ioctx.assert_called_with(
self.cfg.rbd_pool)
# different pool
ret = self.driver._connect_to_rados('alt_pool')
self.assertTrue(self.mock_rados.Rados.return_value.connect.called)
self.assertTrue(self.mock_rados.Rados.return_value.open_ioctx.called)
self.assertEqual(self.mock_rados.Rados.return_value.ioctx, ret[1])
self.mock_rados.Rados.return_value.open_ioctx.assert_called_with(
'alt_pool')
# With timeout
self.cfg.rados_connect_timeout = 1
self.mock_rados.Rados.return_value.connect.reset_mock()
self.driver._connect_to_rados()
self.mock_rados.Rados.return_value.connect.assert_called_once_with(
timeout=1)
# error
self.mock_rados.Rados.return_value.open_ioctx.reset_mock()
self.mock_rados.Rados.return_value.shutdown.reset_mock()
self.mock_rados.Rados.return_value.open_ioctx.side_effect = (
self.mock_rados.Error)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._connect_to_rados)
self.assertTrue(self.mock_rados.Rados.return_value.open_ioctx.called)
self.assertEqual(
3, self.mock_rados.Rados.return_value.shutdown.call_count)
class RBDImageIOWrapperTestCase(test.TestCase):
def setUp(self):
super(RBDImageIOWrapperTestCase, self).setUp()
self.meta = mock.Mock()
self.meta.user = 'mock_user'
self.meta.conf = 'mock_conf'
self.meta.pool = 'mock_pool'
self.meta.image = mock.Mock()
self.meta.image.read = mock.Mock()
self.meta.image.write = mock.Mock()
self.meta.image.size = mock.Mock()
self.mock_rbd_wrapper = driver.RBDImageIOWrapper(self.meta)
self.data_length = 1024
self.full_data = 'abcd' * 256
def test_init(self):
self.assertEqual(self.mock_rbd_wrapper._rbd_meta, self.meta)
self.assertEqual(0, self.mock_rbd_wrapper._offset)
def test_inc_offset(self):
self.mock_rbd_wrapper._inc_offset(10)
self.mock_rbd_wrapper._inc_offset(10)
self.assertEqual(20, self.mock_rbd_wrapper._offset)
def test_rbd_image(self):
self.assertEqual(self.mock_rbd_wrapper.rbd_image, self.meta.image)
def test_rbd_user(self):
self.assertEqual(self.mock_rbd_wrapper.rbd_user, self.meta.user)
def test_rbd_pool(self):
self.assertEqual(self.mock_rbd_wrapper.rbd_conf, self.meta.conf)
def test_rbd_conf(self):
self.assertEqual(self.mock_rbd_wrapper.rbd_pool, self.meta.pool)
def test_read(self):
def mock_read(offset, length):
return self.full_data[offset:length]
self.meta.image.read.side_effect = mock_read
self.meta.image.size.return_value = self.data_length
data = self.mock_rbd_wrapper.read()
self.assertEqual(self.full_data, data)
data = self.mock_rbd_wrapper.read()
self.assertEqual('', data)
self.mock_rbd_wrapper.seek(0)
data = self.mock_rbd_wrapper.read()
self.assertEqual(self.full_data, data)
self.mock_rbd_wrapper.seek(0)
data = self.mock_rbd_wrapper.read(10)
self.assertEqual(self.full_data[:10], data)
def test_write(self):
self.mock_rbd_wrapper.write(self.full_data)
self.assertEqual(1024, self.mock_rbd_wrapper._offset)
def test_seekable(self):
self.assertTrue(self.mock_rbd_wrapper.seekable)
def test_seek(self):
self.assertEqual(0, self.mock_rbd_wrapper._offset)
self.mock_rbd_wrapper.seek(10)
self.assertEqual(10, self.mock_rbd_wrapper._offset)
self.mock_rbd_wrapper.seek(10)
self.assertEqual(10, self.mock_rbd_wrapper._offset)
self.mock_rbd_wrapper.seek(10, 1)
self.assertEqual(20, self.mock_rbd_wrapper._offset)
self.mock_rbd_wrapper.seek(0)
self.mock_rbd_wrapper.write(self.full_data)
self.meta.image.size.return_value = self.data_length
self.mock_rbd_wrapper.seek(0)
self.assertEqual(0, self.mock_rbd_wrapper._offset)
self.mock_rbd_wrapper.seek(10, 2)
self.assertEqual(self.data_length + 10, self.mock_rbd_wrapper._offset)
self.mock_rbd_wrapper.seek(-10, 2)
self.assertEqual(self.data_length - 10, self.mock_rbd_wrapper._offset)
# test exceptions.
self.assertRaises(IOError, self.mock_rbd_wrapper.seek, 0, 3)
self.assertRaises(IOError, self.mock_rbd_wrapper.seek, -1)
# offset should not have been changed by any of the previous
# operations.
self.assertEqual(self.data_length - 10, self.mock_rbd_wrapper._offset)
def test_tell(self):
self.assertEqual(0, self.mock_rbd_wrapper.tell())
self.mock_rbd_wrapper._inc_offset(10)
self.assertEqual(10, self.mock_rbd_wrapper.tell())
def test_flush(self):
with mock.patch.object(driver, 'LOG') as mock_logger:
self.meta.image.flush = mock.Mock()
self.mock_rbd_wrapper.flush()
self.meta.image.flush.assert_called_once_with()
self.meta.image.flush.reset_mock()
# this should be caught and logged silently.
self.meta.image.flush.side_effect = AttributeError
self.mock_rbd_wrapper.flush()
self.meta.image.flush.assert_called_once_with()
msg = _("flush() not supported in this version of librbd")
mock_logger.warning.assert_called_with(msg)
def test_fileno(self):
self.assertRaises(IOError, self.mock_rbd_wrapper.fileno)
def test_close(self):
self.mock_rbd_wrapper.close()
class ManagedRBDTestCase(test_volume.DriverTestCase):
driver_name = "cinder.volume.drivers.rbd.RBDDriver"
def setUp(self):
super(ManagedRBDTestCase, self).setUp()
# TODO(dosaboy): need to remove dependency on mox stubs here once
# image.fake has been converted to mock.
fake_image.stub_out_image_service(self.stubs)
self.volume.driver.set_initialized()
self.volume.stats = {'allocated_capacity_gb': 0,
'pools': {}}
self.called = []
def _create_volume_from_image(self, expected_status, raw=False,
clone_error=False):
"""Try to clone a volume from an image, and check status afterwards.
NOTE: if clone_error is True we force the image type to raw otherwise
clone_image is not called
"""
volume_id = 1
# See tests.image.fake for image types.
if raw:
image_id = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
else:
image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
# creating volume testdata
db.volume_create(self.context,
{'id': volume_id,
'updated_at': timeutils.utcnow(),
'display_description': 'Test Desc',
'size': 20,
'status': 'creating',
'instance_uuid': None,
'host': 'dummy'})
try:
if not clone_error:
self.volume.create_volume(self.context,
volume_id,
request_spec={'image_id': image_id})
else:
self.assertRaises(exception.CinderException,
self.volume.create_volume,
self.context,
volume_id,
request_spec={'image_id': image_id})
volume = db.volume_get(self.context, volume_id)
self.assertEqual(expected_status, volume['status'])
finally:
# cleanup
db.volume_destroy(self.context, volume_id)
def test_create_vol_from_image_status_available(self):
"""Clone raw image then verify volume is in available state."""
def _mock_clone_image(context, volume, image_location,
image_meta, image_service):
return {'provider_location': None}, True
with mock.patch.object(self.volume.driver, 'clone_image') as \
mock_clone_image:
mock_clone_image.side_effect = _mock_clone_image
with mock.patch.object(self.volume.driver, 'create_volume') as \
mock_create:
with mock.patch.object(create_volume.CreateVolumeFromSpecTask,
'_copy_image_to_volume') as mock_copy:
self._create_volume_from_image('available', raw=True)
self.assertFalse(mock_copy.called)
self.assertTrue(mock_clone_image.called)
self.assertFalse(mock_create.called)
@mock.patch('cinder.image.image_utils.TemporaryImages.fetch')
def test_create_vol_from_non_raw_image_status_available(self, mock_fetch):
"""Clone non-raw image then verify volume is in available state."""
def _mock_clone_image(context, volume, image_location,
image_meta, image_service):
return {'provider_location': None}, False
mock_fetch.return_value = mock.MagicMock(spec=utils.get_file_spec())
with mock.patch.object(self.volume.driver, 'clone_image') as \
mock_clone_image:
mock_clone_image.side_effect = _mock_clone_image
with mock.patch.object(self.volume.driver, 'create_volume') as \
mock_create:
with mock.patch.object(create_volume.CreateVolumeFromSpecTask,
'_copy_image_to_volume') as mock_copy:
self._create_volume_from_image('available', raw=False)
self.assertTrue(mock_copy.called)
self.assertTrue(mock_clone_image.called)
self.assertTrue(mock_create.called)
def test_create_vol_from_image_status_error(self):
"""Fail to clone raw image then verify volume is in error state."""
with mock.patch.object(self.volume.driver, 'clone_image') as \
mock_clone_image:
mock_clone_image.side_effect = exception.CinderException
with mock.patch.object(self.volume.driver, 'create_volume'):
with mock.patch.object(create_volume.CreateVolumeFromSpecTask,
'_copy_image_to_volume') as mock_copy:
self._create_volume_from_image('error', raw=True,
clone_error=True)
self.assertFalse(mock_copy.called)
self.assertTrue(mock_clone_image.called)
self.assertFalse(self.volume.driver.create_volume.called)
def test_clone_failure(self):
driver = self.volume.driver
with mock.patch.object(driver, '_is_cloneable', lambda *args: False):
image_loc = (mock.Mock(), None)
actual = driver.clone_image(mock.Mock(),
mock.Mock(),
image_loc,
{},
mock.Mock())
self.assertEqual(({}, False), actual)
self.assertEqual(({}, False),
driver.clone_image('', object(), None, {}, ''))
def test_clone_success(self):
expected = ({'provider_location': None}, True)
driver = self.volume.driver
with mock.patch.object(self.volume.driver, '_is_cloneable') as \
mock_is_cloneable:
mock_is_cloneable.return_value = True
with mock.patch.object(self.volume.driver, '_clone') as \
mock_clone:
with mock.patch.object(self.volume.driver, '_resize') as \
mock_resize:
image_loc = ('rbd://fee/fi/fo/fum', None)
volume = {'name': 'vol1'}
actual = driver.clone_image(mock.Mock(),
volume,
image_loc,
{'disk_format': 'raw',
'id': 'id.foo'},
mock.Mock())
self.assertEqual(expected, actual)
mock_clone.assert_called_once_with(volume,
'fi', 'fo', 'fum')
mock_resize.assert_called_once_with(volume)
def test_clone_multilocation_success(self):
expected = ({'provider_location': None}, True)
driver = self.volume.driver
def cloneable_side_effect(url_location, image_meta):
return url_location == 'rbd://fee/fi/fo/fum'
with mock.patch.object(self.volume.driver, '_is_cloneable') \
as mock_is_cloneable, \
mock.patch.object(self.volume.driver, '_clone') as mock_clone, \
mock.patch.object(self.volume.driver, '_resize') \
as mock_resize:
mock_is_cloneable.side_effect = cloneable_side_effect
image_loc = ('rbd://bee/bi/bo/bum',
[{'url': 'rbd://bee/bi/bo/bum'},
{'url': 'rbd://fee/fi/fo/fum'}])
volume = {'name': 'vol1'}
image_meta = mock.sentinel.image_meta
image_service = mock.sentinel.image_service
actual = driver.clone_image(self.context,
volume,
image_loc,
image_meta,
image_service)
self.assertEqual(expected, actual)
self.assertEqual(2, mock_is_cloneable.call_count)
mock_clone.assert_called_once_with(volume,
'fi', 'fo', 'fum')
mock_is_cloneable.assert_called_with('rbd://fee/fi/fo/fum',
image_meta)
mock_resize.assert_called_once_with(volume)
def test_clone_multilocation_failure(self):
expected = ({}, False)
driver = self.volume.driver
with mock.patch.object(driver, '_is_cloneable', return_value=False) \
as mock_is_cloneable, \
mock.patch.object(self.volume.driver, '_clone') as mock_clone, \
mock.patch.object(self.volume.driver, '_resize') \
as mock_resize:
image_loc = ('rbd://bee/bi/bo/bum',
[{'url': 'rbd://bee/bi/bo/bum'},
{'url': 'rbd://fee/fi/fo/fum'}])
volume = {'name': 'vol1'}
image_meta = mock.sentinel.image_meta
image_service = mock.sentinel.image_service
actual = driver.clone_image(self.context,
volume,
image_loc,
image_meta,
image_service)
self.assertEqual(expected, actual)
self.assertEqual(2, mock_is_cloneable.call_count)
mock_is_cloneable.assert_any_call('rbd://bee/bi/bo/bum',
image_meta)
mock_is_cloneable.assert_any_call('rbd://fee/fi/fo/fum',
image_meta)
self.assertFalse(mock_clone.called)
self.assertFalse(mock_resize.called)
| 41.597211 | 79 | 0.586663 |
8514a26670ff04da73c9c99ba2907300ac8a2757 | 619 | py | Python | ubb/fop/lab05-07/main.py | AlexanderChristian/private_courses | c80f3526af539e35f93b460f3909f669aaef573c | [
"MIT"
] | null | null | null | ubb/fop/lab05-07/main.py | AlexanderChristian/private_courses | c80f3526af539e35f93b460f3909f669aaef573c | [
"MIT"
] | 6 | 2020-03-04T20:52:39.000Z | 2022-03-31T00:33:07.000Z | ubb/fop/lab05-07/main.py | AlexanderChristian/private_courses | c80f3526af539e35f93b460f3909f669aaef573c | [
"MIT"
] | null | null | null | import atexit
from tests.tester import Tester
from ui.LibraryApplication import LibraryApplication
from controllers.LibraryController import LibraryController
from repository.LibraryRepository import LibraryRepository
from model.sort import gnomeSort
from model.sort import testSort
from model.book import Book
from model.client import Client
__author__ = 'cosmin'
if __name__ == '__main__':
tester = Tester()
tester.testAll()
testSort()
repo = LibraryRepository()
controller = LibraryController(repo)
atexit.register(repo.saveHistory)
app = LibraryApplication(controller)
app.run() | 25.791667 | 59 | 0.785137 |
40b15e77b786a0dd62c833f18e16386fc67b5932 | 5,905 | py | Python | mne/parallel.py | libertyh/mne-python | bf03e17f323341a877dea62963c86cf140757896 | [
"BSD-3-Clause"
] | 1 | 2020-07-28T16:09:54.000Z | 2020-07-28T16:09:54.000Z | mne/parallel.py | gkmaro634/mne-python | 5409a89233b764f3f3f3136cf9bf6b8d5fb0a4fe | [
"BSD-3-Clause"
] | 1 | 2019-08-16T13:59:53.000Z | 2019-08-19T16:37:35.000Z | mne/parallel.py | gkmaro634/mne-python | 5409a89233b764f3f3f3136cf9bf6b8d5fb0a4fe | [
"BSD-3-Clause"
] | 1 | 2019-12-10T02:59:18.000Z | 2019-12-10T02:59:18.000Z | """Parallel util function."""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: Simplified BSD
import logging
import os
from . import get_config
from .utils import logger, verbose, warn, ProgressBar
from .fixes import _get_args
if 'MNE_FORCE_SERIAL' in os.environ:
_force_serial = True
else:
_force_serial = None
@verbose
def parallel_func(func, n_jobs, max_nbytes='auto', pre_dispatch='n_jobs',
total=None, prefer=None, verbose=None):
"""Return parallel instance with delayed function.
Util function to use joblib only if available
Parameters
----------
func: callable
A function
n_jobs: int
Number of jobs to run in parallel
max_nbytes : int, str, or None
Threshold on the minimum size of arrays passed to the workers that
triggers automated memory mapping. Can be an int in Bytes,
or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays. Use 'auto' to
use the value set using mne.set_memmap_min_size.
pre_dispatch : int, or string, optional
See :class:`joblib.Parallel`.
total : int | None
If int, use a progress bar to display the progress of dispatched
jobs. This should only be used when directly iterating, not when
using ``split_list`` or :func:`np.array_split`.
If None (default), do not add a progress bar.
prefer : str | None
If str, can be "processes" or "threads". See :class:`joblib.Parallel`.
Ignored if the joblib version is too old to support this.
.. versionadded:: 0.18
%(verbose)s INFO or DEBUG
will print parallel status, others will not.
Returns
-------
parallel: instance of joblib.Parallel or list
The parallel object
my_func: callable
func if not parallel or delayed(func)
n_jobs: int
Number of jobs >= 0
"""
should_print = (logger.level <= logging.INFO)
# for a single job, we don't need joblib
if n_jobs != 1:
try:
from joblib import Parallel, delayed
except ImportError:
try:
from sklearn.externals.joblib import Parallel, delayed
except ImportError:
warn('joblib not installed. Cannot run in parallel.')
n_jobs = 1
if n_jobs == 1:
n_jobs = 1
my_func = func
parallel = list
else:
# check if joblib is recent enough to support memmaping
p_args = _get_args(Parallel.__init__)
joblib_mmap = ('temp_folder' in p_args and 'max_nbytes' in p_args)
cache_dir = get_config('MNE_CACHE_DIR', None)
if isinstance(max_nbytes, str) and max_nbytes == 'auto':
max_nbytes = get_config('MNE_MEMMAP_MIN_SIZE', None)
if max_nbytes is not None:
if not joblib_mmap and cache_dir is not None:
warn('"MNE_CACHE_DIR" is set but a newer version of joblib is '
'needed to use the memmapping pool.')
if joblib_mmap and cache_dir is None:
logger.info(
'joblib supports memapping pool but "MNE_CACHE_DIR" '
'is not set in MNE-Python config. To enable it, use, '
'e.g., mne.set_cache_dir(\'/tmp/shm\'). This will '
'store temporary files under /dev/shm and can result '
'in large memory savings.')
# create keyword arguments for Parallel
kwargs = {'verbose': 5 if should_print and total is None else 0}
kwargs['pre_dispatch'] = pre_dispatch
if 'prefer' in p_args:
kwargs['prefer'] = prefer
if joblib_mmap:
if cache_dir is None:
max_nbytes = None # disable memmaping
kwargs['temp_folder'] = cache_dir
kwargs['max_nbytes'] = max_nbytes
n_jobs = check_n_jobs(n_jobs)
parallel = Parallel(n_jobs, **kwargs)
my_func = delayed(func)
if total is not None:
def parallel_progress(op_iter):
pb = ProgressBar(total, verbose_bool=should_print)
return parallel(pb(op_iter))
parallel_out = parallel_progress
else:
parallel_out = parallel
return parallel_out, my_func, n_jobs
def check_n_jobs(n_jobs, allow_cuda=False):
"""Check n_jobs in particular for negative values.
Parameters
----------
n_jobs : int
The number of jobs.
allow_cuda : bool
Allow n_jobs to be 'cuda'. Default: False.
Returns
-------
n_jobs : int
The checked number of jobs. Always positive (or 'cuda' if
applicable.)
"""
if not isinstance(n_jobs, int):
if not allow_cuda:
raise ValueError('n_jobs must be an integer')
elif not isinstance(n_jobs, str) or n_jobs != 'cuda':
raise ValueError('n_jobs must be an integer, or "cuda"')
# else, we have n_jobs='cuda' and this is okay, so do nothing
elif _force_serial:
n_jobs = 1
logger.info('... MNE_FORCE_SERIAL set. Processing in forced '
'serial mode.')
elif n_jobs <= 0:
try:
import multiprocessing
n_cores = multiprocessing.cpu_count()
n_jobs = min(n_cores + n_jobs + 1, n_cores)
if n_jobs <= 0:
raise ValueError('If n_jobs has a negative value it must not '
'be less than the number of CPUs present. '
'You\'ve got %s CPUs' % n_cores)
except ImportError:
# only warn if they tried to use something other than 1 job
if n_jobs != 1:
warn('multiprocessing not installed. Cannot run in parallel.')
n_jobs = 1
return n_jobs
| 35.359281 | 79 | 0.602371 |
7e653828f0538d63d5a0053be206b584f1b6d857 | 4,350 | py | Python | feature_engine/base_transformers.py | iahsanujunda/feature_engine | 46c6bd5a06626b0789fcc1367069d065010794a1 | [
"BSD-3-Clause"
] | 1 | 2020-11-15T13:15:28.000Z | 2020-11-15T13:15:28.000Z | feature_engine/base_transformers.py | myamullaciencia/feature_engine | 46c6bd5a06626b0789fcc1367069d065010794a1 | [
"BSD-3-Clause"
] | null | null | null | feature_engine/base_transformers.py | myamullaciencia/feature_engine | 46c6bd5a06626b0789fcc1367069d065010794a1 | [
"BSD-3-Clause"
] | null | null | null | # Transformation methods are shared by most transformer groups.
# Each transformer can inherit the transform method from these base classes.
import warnings
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted
from feature_engine.dataframe_checks import (
_is_dataframe,
_check_input_matches_training_df,
_check_contains_na,
)
from feature_engine.variable_manipulation import _find_numerical_variables
class BaseImputer(BaseEstimator, TransformerMixin):
# Common transformation procedure for most feature imputers
def transform(self, X):
"""
Replaces missing data with the learned parameters.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features]
The input samples.
Returns
-------
X_transformed : pandas dataframe of shape = [n_samples, n_features]
The dataframe without missing values in the selected variables.
"""
# Check method fit has been called
check_is_fitted(self)
# check that input is a dataframe
X = _is_dataframe(X)
# Check that input data contains same number of columns than
# the dataframe used to fit the imputer.
_check_input_matches_training_df(X, self.input_shape_[1])
# replaces missing data with the learned parameters
for variable in self.imputer_dict_:
X[variable].fillna(self.imputer_dict_[variable], inplace=True)
return X
class BaseCategoricalTransformer(BaseEstimator, TransformerMixin):
# Common transformation procedure for most variable encoders
def transform(self, X):
""" Replaces categories with the learned parameters.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features].
The input samples.
Returns
-------
X_transformed : pandas dataframe of shape = [n_samples, n_features].
The dataframe containing categories replaced by numbers.
"""
# Check method fit has been called
check_is_fitted(self)
# check that input is a dataframe
X = _is_dataframe(X)
# check if dataset contains na
_check_contains_na(X, self.variables)
# Check that the dataframe contains the same number of columns
# than the dataframe
# used to fit the imputer.
_check_input_matches_training_df(X, self.input_shape_[1])
# replace categories by the learned parameters
for feature in self.encoder_dict_.keys():
X[feature] = X[feature].map(self.encoder_dict_[feature])
# check if NaN values were introduced by the encoding
if X[self.encoder_dict_.keys()].isnull().sum().sum() > 0:
warnings.warn(
"NaN values were introduced in the returned dataframe by the encoder."
"This means that some of the categories in the input dataframe were "
"not present in the training set used when the fit method was called. "
"Thus, mappings for those categories does not exist. Try using the "
"RareLabelCategoricalEncoder to remove infrequent categories before "
"calling this encoder."
)
return X
class BaseNumericalTransformer(BaseEstimator, TransformerMixin):
# shared set-up procedures across numerical transformers, i.e.,
# variable transformers, discretisers, outlier handlers
def fit(self, X, y=None):
# check input dataframe
X = _is_dataframe(X)
# find or check for numerical variables
self.variables = _find_numerical_variables(X, self.variables)
# check if dataset contains na
_check_contains_na(X, self.variables)
return X
def transform(self, X):
# Check method fit has been called
check_is_fitted(self)
# check that input is a dataframe
X = _is_dataframe(X)
# check if dataset contains na
_check_contains_na(X, self.variables)
# Check that the dataframe contains the same number of columns
# than the dataframe used to fit the imputer.
_check_input_matches_training_df(X, self.input_shape_[1])
return X
| 32.954545 | 87 | 0.662299 |
b7ec5f45c56d3759b8ab7bfc00162a5371be0bb7 | 22,703 | py | Python | tests/sentry/receivers/test_featureadoption.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | tests/sentry/receivers/test_featureadoption.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | tests/sentry/receivers/test_featureadoption.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from django.utils import timezone
from sentry.models import FeatureAdoption, GroupAssignee, GroupTombstone, Rule
from sentry.plugins.bases import IssueTrackingPlugin2, NotificationPlugin
from sentry.signals import (
alert_rule_created,
event_processed,
first_event_received,
project_created,
member_joined,
plugin_enabled,
user_feedback_received,
issue_assigned,
issue_resolved,
advanced_search,
save_search_created,
inbound_filter_toggled,
sso_enabled,
data_scrubber_enabled,
)
from sentry.receivers.rules import DEFAULT_RULE_DATA
from sentry.testutils import SnubaTestCase, TestCase
class FeatureAdoptionTest(TestCase, SnubaTestCase):
def setUp(self):
super(FeatureAdoptionTest, self).setUp()
self.now = timezone.now()
self.owner = self.create_user()
self.organization = self.create_organization(owner=self.owner)
self.team = self.create_team(organization=self.organization)
self.project = self.create_project(teams=[self.team])
def test_bad_feature_slug(self):
FeatureAdoption.objects.record(self.organization.id, "xxx")
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="first_event"
)
assert feature_complete is None
def test_all_passed_feature_slugs_are_complete(self):
event1 = self.store_event(
data={"tags": {"environment": "prod"}}, project_id=self.project.id
)
event2 = self.store_event(
data={"tags": {"environment": "prod"}}, project_id=self.project.id
)
event_processed.send(project=self.project, event=event1, sender=type(self.project))
event_processed.send(project=self.project, event=event2, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="environment_tracking"
)
assert feature_complete.complete
def test_first_event(self):
event = self.store_event(
data={"platform": "javascript", "message": "javascript error message"},
project_id=self.project.id,
)
first_event_received.send(project=self.project, event=event, sender=type(self.project))
first_event = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="first_event"
)
assert first_event.complete
def test_javascript(self):
event = self.store_event(data={"platform": "javascript"}, project_id=self.project.id)
event_processed.send(project=self.project, event=event, sender=type(self.project))
js = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="javascript")
assert js.complete
def test_python(self):
event = self.store_event(
data={"platform": "python", "message": "python error message"},
project_id=self.project.id,
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
python = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="python")
assert python.complete
def test_node(self):
event = self.store_event(
data={"platform": "node", "message": "node error message"}, project_id=self.project.id
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
node = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="node")
assert node.complete
def test_ruby(self):
event = self.store_event(
data={"platform": "ruby", "message": "ruby error message"}, project_id=self.project.id
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
ruby = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="ruby")
assert ruby.complete
def test_java(self):
event = self.store_event(
data={"platform": "java", "message": "java error message"}, project_id=self.project.id
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
java = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="java")
assert java.complete
def test_cocoa(self):
event = self.store_event(
data={"platform": "cocoa", "message": "cocoa error message"}, project_id=self.project.id
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
cocoa = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="cocoa")
assert cocoa.complete
def test_objc(self):
event = self.store_event(
data={"platform": "objc", "message": "objc error message"}, project_id=self.project.id
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
objc = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="objc")
assert objc.complete
def test_php(self):
event = self.store_event(
data={"platform": "php", "message": "php error message"}, project_id=self.project.id
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
php = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="php")
assert php.complete
def test_go(self):
event = self.store_event(
data={"platform": "go", "message": "go error message"}, project_id=self.project.id
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
go = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="go")
assert go.complete
def test_csharp(self):
event = self.store_event(
data={"platform": "csharp", "message": "csharp error message"},
project_id=self.project.id,
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
csharp = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="csharp")
assert csharp.complete
def test_perl(self):
event = self.store_event(
data={"platform": "perl", "message": "perl error message"}, project_id=self.project.id
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
perl = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="perl")
assert perl.complete
def test_elixir(self):
event = self.store_event(
data={"platform": "elixir", "message": "elixir error message"},
project_id=self.project.id,
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
elixir = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="elixir")
assert elixir.complete
def test_cfml(self):
event = self.store_event(
data={"platform": "cfml", "message": "cfml error message"}, project_id=self.project.id
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
cfml = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="cfml")
assert cfml.complete
def test_groovy(self):
event = self.store_event(
data={"platform": "groovy", "message": "groovy error message"},
project_id=self.project.id,
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
groovy = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="groovy")
assert groovy.complete
def test_release_tracking(self):
event = self.store_event(data={"tags": {"sentry:release": "1"}}, project_id=self.project.id)
event_processed.send(project=self.project, event=event, sender=type(self.project))
release_tracking = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="release_tracking"
)
assert release_tracking
def test_environment_tracking(self):
event = self.store_event(data={"environment": "prod"}, project_id=self.project.id)
event_processed.send(project=self.project, event=event, sender=type(self.project))
environment_tracking = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="environment_tracking"
)
assert environment_tracking
def test_bulk_create(self):
event = self.store_event(
data={
"platform": "javascript",
"environment": "prod",
"tags": {"sentry:release": "abc"},
"user": {"id": "123"},
},
project_id=self.project.id,
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
javascript = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="javascript"
)
assert javascript
environment_tracking = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="environment_tracking"
)
assert environment_tracking
release_tracking = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="release_tracking"
)
assert release_tracking
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="user_tracking"
)
assert feature_complete
def test_user_tracking(self):
event = self.store_event(data={"user": {"id": "123"}}, project_id=self.project.id)
event_processed.send(project=self.project, event=event, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="user_tracking"
)
assert feature_complete
def test_no_user_tracking_for_ip_address_only(self):
"""test to see if just sending ip address doesn't check the user tracking box"""
userless_event = self.store_event(
data={"user": {"ip_address": "0.0.0.0"}}, project_id=self.project.id
)
event_processed.send(project=self.project, event=userless_event, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="user_tracking"
)
assert feature_complete is None
def test_no_env_tracking(self):
envless_event = self.store_event(
data={"platform": "javascript"}, project_id=self.project.id
)
event_processed.send(project=self.project, event=envless_event, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="environment_tracking"
)
assert feature_complete is None
def test_custom_tags(self):
event = self.store_event(data={}, project_id=self.project.id)
event.data["tags"].append(("foo", "bar"))
assert event.get_tag("foo") == "bar"
event_processed.send(project=self.project, event=event, sender=type(self.project))
custom_tags = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="custom_tags"
)
assert custom_tags
def test_source_maps(self):
event = self.store_event(
data={
"platform": "javascript",
"exception": {
"values": [
{
"stacktrace": {
"frames": [
{
"data": {
"sourcemap": "https://media.sentry.io/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js.map"
}
}
]
},
"type": "TypeError",
}
]
},
},
project_id=self.project.id,
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
source_maps = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="source_maps"
)
assert source_maps
def test_breadcrumbs(self):
event = self.store_event(
data={
"breadcrumbs": {
"values": [
{
"category": "xhr",
"timestamp": 1496395011.63,
"type": "http",
"data": {
"url": "/api/path/here",
"status_code": "500",
"method": "POST",
},
}
]
}
},
project_id=self.project.id,
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
breadcrumbs = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="breadcrumbs"
)
assert breadcrumbs
def test_multiple_events(self):
simple_event = self.store_event(
data={"message": "javascript error message", "platform": "javascript"},
project_id=self.project.id,
)
first_event_received.send(
project=self.project, event=simple_event, sender=type(self.project)
)
event_processed.send(project=self.project, event=simple_event, sender=type(self.project))
first_event = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="first_event"
)
assert first_event.complete
js = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="javascript")
assert js.complete
full_event = self.store_event(
data={
"message": "javascript error message",
"platform": "javascript",
"environment": "prod",
"tags": {"sentry:release": "abc"},
"user": {"id": "123"},
"exception": {
"values": [
{
"stacktrace": {
"frames": [
{
"data": {
"sourcemap": "https://media.sentry.io/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js.map"
}
}
]
},
"type": "TypeError",
}
]
},
"breadcrumbs": {
"values": [
{
"category": "xhr",
"timestamp": 1496395011.63,
"type": "http",
"data": {
"url": "/api/path/here",
"status_code": "500",
"method": "POST",
},
}
]
},
},
project_id=self.project.id,
)
event_processed.send(project=self.project, event=full_event, sender=type(self.project))
release_tracking = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="release_tracking"
)
assert release_tracking
environment_tracking = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="environment_tracking"
)
assert environment_tracking
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="user_tracking"
)
assert feature_complete
source_maps = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="source_maps"
)
assert source_maps
breadcrumbs = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="breadcrumbs"
)
assert breadcrumbs
def test_user_feedback(self):
user_feedback_received.send(project=self.project, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="user_feedback"
)
assert feature_complete
def test_project_created(self):
project_created.send(project=self.project, user=self.owner, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="first_project"
)
assert feature_complete
def test_member_joined(self):
member = self.create_member(
organization=self.organization, teams=[self.team], user=self.create_user()
)
member_joined.send(member=member, organization=self.organization, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="invite_team"
)
assert feature_complete
def test_assignment(self):
GroupAssignee.objects.create(
group_id=self.group.id, user_id=self.user.id, project_id=self.project.id
)
issue_assigned.send(
project=self.project, group=self.group, user=self.user, sender="something"
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="assignment"
)
assert feature_complete
def test_resolved_in_release(self):
issue_resolved.send(
organization_id=self.organization.id,
project=self.project,
group=self.group,
user=self.user,
resolution_type="in_next_release",
sender=type(self.project),
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="resolved_in_release"
)
assert feature_complete
def test_resolved_manually(self):
issue_resolved.send(
organization_id=self.organization.id,
project=self.project,
group=self.group,
user=self.user,
resolution_type="now",
sender=type(self.project),
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="resolved_in_release"
)
assert not feature_complete
def test_advanced_search(self):
advanced_search.send(project=self.project, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="advanced_search"
)
assert feature_complete
def test_save_search(self):
save_search_created.send(project=self.project, user=self.user, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="saved_search"
)
assert feature_complete
def test_inbound_filters(self):
inbound_filter_toggled.send(project=self.project, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="inbound_filters"
)
assert feature_complete
def test_alert_rules(self):
rule = Rule.objects.create(
project=self.project, label="Trivially modified rule", data=DEFAULT_RULE_DATA
)
alert_rule_created.send(
user=self.owner,
project=self.project,
rule=rule,
rule_type="issue",
sender=type(self.project),
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="alert_rules"
)
assert feature_complete
def test_issue_tracker_plugin(self):
plugin_enabled.send(
plugin=IssueTrackingPlugin2(),
project=self.project,
user=self.owner,
sender=type(self.project),
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="issue_tracker_integration"
)
assert feature_complete
def test_notification_plugin(self):
plugin_enabled.send(
plugin=NotificationPlugin(),
project=self.project,
user=self.owner,
sender=type(self.project),
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="notification_integration"
)
assert feature_complete
def test_sso(self):
sso_enabled.send(
organization=self.organization,
user=self.user,
provider="google",
sender=type(self.organization),
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="sso"
)
assert feature_complete
def test_data_scrubber(self):
data_scrubber_enabled.send(organization=self.organization, sender=type(self.organization))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="data_scrubbers"
)
assert feature_complete
def test_delete_and_discard(self):
GroupTombstone.objects.create(previous_group_id=self.group.id, project=self.project)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="delete_and_discard"
)
assert feature_complete
| 38.544992 | 149 | 0.609963 |
5c6ad587928af26fe0259f211e65b6b49220a490 | 1,218 | py | Python | krux/object.py | SpectrumIO/python-krux-stdlib | cb99e5dbd52711a76eb4fbb68a90fc1373616c07 | [
"MIT"
] | 3 | 2016-02-05T22:46:11.000Z | 2017-07-15T03:23:41.000Z | krux/object.py | SpectrumIO/python-krux-stdlib | cb99e5dbd52711a76eb4fbb68a90fc1373616c07 | [
"MIT"
] | 45 | 2015-01-13T00:59:05.000Z | 2019-10-16T01:14:02.000Z | krux/object.py | SpectrumIO/python-krux-stdlib | cb99e5dbd52711a76eb4fbb68a90fc1373616c07 | [
"MIT"
] | 2 | 2015-08-08T04:17:30.000Z | 2021-03-02T18:09:47.000Z | # © Copyright 2013-2020 Salesforce.com, inc.
from __future__ import generator_stop
from abc import ABCMeta
from krux.logging import get_logger
from krux.stats import get_stats
class Object(object, metaclass=ABCMeta):
"""
An abstract class to handle the common Krux coding pattern
.. seealso:: https://docs.python.org/2/library/abc.html
"""
def __init__(self, name=None, logger=None, stats=None):
"""
Basic init method that sets up name, logger, and stats
:param name: Name of the application
:type name: str
:param logger: Logger, recommended to be obtained using krux.cli.Application
:type logger: logging.Logger
:param stats: Stats, recommended to be obtained using krux.cli.Application
:type stats: kruxstatsd.StatsClient
"""
# Call to the superclass to bootstrap.
super(Object, self).__init__()
# Private variables, not to be used outside this module
self._name = name if name is not None else self.__class__.__name__
self._logger = logger if logger is not None else get_logger(self._name)
self._stats = stats if stats is not None else get_stats(prefix=self._name)
| 34.8 | 84 | 0.686371 |
be6528415bedcc22d19f89616ddae8848dbc562c | 4,507 | py | Python | tests/test_s3_merge_upsert.py | njdanielsen/aws-data-wrangler | 5cdb316224370e952dfb3a701825e1b1ab331105 | [
"Apache-2.0"
] | 1 | 2021-08-06T07:55:34.000Z | 2021-08-06T07:55:34.000Z | tests/test_s3_merge_upsert.py | njdanielsen/aws-data-wrangler | 5cdb316224370e952dfb3a701825e1b1ab331105 | [
"Apache-2.0"
] | 1 | 2021-03-12T20:39:41.000Z | 2021-03-15T08:21:03.000Z | tests/test_s3_merge_upsert.py | Glovo/aws-data-wrangler | ce0444ecc210d51eec1aeb2e085aabe536d51172 | [
"Apache-2.0"
] | null | null | null | import datetime
import logging
import pandas as pd
import pytest
import awswrangler as wr
from awswrangler.s3._merge_upsert_table import _is_data_quality_sufficient, merge_upsert_table
logger = logging.getLogger("awswrangler")
logger.setLevel(logging.DEBUG)
def test_is_data_quality_sufficient_check_column_names():
# Check both table have the same columns
existing_df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["col_a", "col_b", "col_c"])
delta_df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["col_a", "col_b", "col_c"])
primary_key = ["col_a", "col_b"]
assert _is_data_quality_sufficient(existing_df=existing_df, delta_df=delta_df, primary_key=primary_key)
def test_is_data_quality_sufficient_mistmatch_column_names():
# Check both dataframe have the same columns.
# In this case they are different thus it should fail
existing_df = pd.DataFrame({"c0": [1, 2, 1, 2], "c1": [1, 2, 1, 2], "c2": [2, 1, 2, 1]})
delta_df = pd.DataFrame({"d0": [1, 2, 1, 2], "d1": [1, 2, 1, 2], "c2": [2, 1, 2, 1]})
primary_key = ["c0", "c1"]
assert _is_data_quality_sufficient(existing_df=existing_df, delta_df=delta_df, primary_key=primary_key) is False
def test_is_data_quality_sufficient_same_column_names_different_row_count():
# Check both table have the same columns and
existing_df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], columns=["col_a", "col_b", "col_c"])
delta_df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["col_a", "col_b", "col_c"])
primary_key = ["col_a", "col_b"]
assert _is_data_quality_sufficient(existing_df=existing_df, delta_df=delta_df, primary_key=primary_key) is True
def test_is_data_quality_sufficient_missing_primary_key():
# Check both tables have the same primary key
existing_df = pd.DataFrame({"c0": [1, 2, 1], "c1": [1, 2, 1], "c2": [2, 1, 1]})
delta_df = pd.DataFrame({"c0": [1, 2, 1, 2]})
primary_key = ["c0", "c1"]
assert _is_data_quality_sufficient(existing_df=existing_df, delta_df=delta_df, primary_key=primary_key) is False
def test_is_data_quality_sufficient_fail_for_duplicate_data():
# Check for duplicate data inside the dataframe
existing_df = pd.DataFrame([[1, 2, 3], [1, 2, 3], [7, 8, 9], [10, 11, 12]], columns=["col_a", "col_b", "col_c"])
delta_df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["col_a", "col_b", "col_c"])
primary_key = ["col_a", "col_b"]
assert _is_data_quality_sufficient(existing_df=existing_df, delta_df=delta_df, primary_key=primary_key) is False
def test_table_does_not_exist(glue_database, glue_table):
# Fail as table does not exist
delta_df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["col_a", "col_b", "col_c"])
primary_key = ["col_a", "col_b"]
with pytest.raises(AttributeError):
merge_upsert_table(delta_df=delta_df, database=glue_database, table=glue_table, primary_key=primary_key)
def test_success_case(glue_database, glue_table, path):
df = pd.DataFrame(
{"id": [1, 2], "cchar": ["foo", "boo"], "date": [datetime.date(2020, 1, 1), datetime.date(2020, 1, 2)]}
)
# Create the table
wr.s3.to_parquet(df=df, path=path, index=False, dataset=True, database=glue_database, table=glue_table)["paths"]
delta_df = pd.DataFrame({"id": [1], "cchar": ["foo"], "date": [datetime.date(2021, 1, 1)]})
primary_key = ["id", "cchar"]
merge_upsert_table(delta_df=delta_df, database=glue_database, table=glue_table, primary_key=primary_key)
merged_df = wr.s3.read_parquet_table(database=glue_database, table=glue_table)
# Row count should still be 2 rows
assert merged_df.shape == (2, 3)
def test_success_case2(glue_database, glue_table, path):
df = pd.DataFrame(
{"id": [1, 2], "cchar": ["foo", "boo"], "date": [datetime.date(2020, 1, 1), datetime.date(2020, 1, 2)]}
)
# Create the table
wr.s3.to_parquet(df=df, path=path, index=False, dataset=True, database=glue_database, table=glue_table)["paths"]
delta_df = pd.DataFrame(
{"id": [1, 2], "cchar": ["foo", "boo"], "date": [datetime.date(2021, 1, 1), datetime.date(2021, 1, 2)]}
)
primary_key = ["id", "cchar"]
merge_upsert_table(delta_df=delta_df, database=glue_database, table=glue_table, primary_key=primary_key)
merged_df = wr.s3.read_parquet_table(database=glue_database, table=glue_table)
# Row count should still be 2 rows
assert merged_df.shape == (2, 3)
| 49.527473 | 116 | 0.678722 |
deb062c48dc791c67574092b3e55e7f5d40c6526 | 16,020 | py | Python | neural_network/gan.py | kostogls/Python | 81c2bcb3cbaeb4f861bc5c44df2526a89c616512 | [
"MIT"
] | 5 | 2020-03-04T18:50:13.000Z | 2020-05-05T11:46:13.000Z | neural_network/gan.py | Mathewsmusukuma/Python | 4866b1330bc7c77c0ed0e050e6b99efdeb026448 | [
"MIT"
] | 1 | 2021-12-19T23:22:00.000Z | 2021-12-19T23:22:00.000Z | neural_network/gan.py | Mathewsmusukuma/Python | 4866b1330bc7c77c0ed0e050e6b99efdeb026448 | [
"MIT"
] | 4 | 2020-03-06T00:53:00.000Z | 2021-01-05T13:42:35.000Z | import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
from sklearn.utils import shuffle
import input_data
random_numer = 42
np.random.seed(random_numer)
def ReLu(x):
mask = (x > 0) * 1.0
return mask * x
def d_ReLu(x):
mask = (x > 0) * 1.0
return mask
def arctan(x):
return np.arctan(x)
def d_arctan(x):
return 1 / (1 + x ** 2)
def log(x):
return 1 / (1 + np.exp(-1 * x))
def d_log(x):
return log(x) * (1 - log(x))
def tanh(x):
return np.tanh(x)
def d_tanh(x):
return 1 - np.tanh(x) ** 2
def plot(samples):
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis("off")
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect("equal")
plt.imshow(sample.reshape(28, 28), cmap="Greys_r")
return fig
if __name__ == "__main__":
# 1. Load Data and declare hyper
print("--------- Load Data ----------")
mnist = input_data.read_data_sets("MNIST_data", one_hot=False)
temp = mnist.test
images, labels = temp.images, temp.labels
images, labels = shuffle(np.asarray(images), np.asarray(labels))
num_epoch = 10
learing_rate = 0.00009
G_input = 100
hidden_input, hidden_input2, hidden_input3 = 128, 256, 346
hidden_input4, hidden_input5, hidden_input6 = 480, 560, 686
print("--------- Declare Hyper Parameters ----------")
# 2. Declare Weights
D_W1 = (
np.random.normal(size=(784, hidden_input), scale=(1.0 / np.sqrt(784 / 2.0)))
* 0.002
)
# D_b1 = np.random.normal(size=(128),scale=(1. / np.sqrt(128 / 2.))) *0.002
D_b1 = np.zeros(hidden_input)
D_W2 = (
np.random.normal(
size=(hidden_input, 1), scale=(1.0 / np.sqrt(hidden_input / 2.0))
)
* 0.002
)
# D_b2 = np.random.normal(size=(1),scale=(1. / np.sqrt(1 / 2.))) *0.002
D_b2 = np.zeros(1)
G_W1 = (
np.random.normal(
size=(G_input, hidden_input), scale=(1.0 / np.sqrt(G_input / 2.0))
)
* 0.002
)
# G_b1 = np.random.normal(size=(128),scale=(1. / np.sqrt(128 / 2.))) *0.002
G_b1 = np.zeros(hidden_input)
G_W2 = (
np.random.normal(
size=(hidden_input, hidden_input2),
scale=(1.0 / np.sqrt(hidden_input / 2.0)),
)
* 0.002
)
# G_b1 = np.random.normal(size=(128),scale=(1. / np.sqrt(128 / 2.))) *0.002
G_b2 = np.zeros(hidden_input2)
G_W3 = (
np.random.normal(
size=(hidden_input2, hidden_input3),
scale=(1.0 / np.sqrt(hidden_input2 / 2.0)),
)
* 0.002
)
# G_b1 = np.random.normal(size=(128),scale=(1. / np.sqrt(128 / 2.))) *0.002
G_b3 = np.zeros(hidden_input3)
G_W4 = (
np.random.normal(
size=(hidden_input3, hidden_input4),
scale=(1.0 / np.sqrt(hidden_input3 / 2.0)),
)
* 0.002
)
# G_b1 = np.random.normal(size=(128),scale=(1. / np.sqrt(128 / 2.))) *0.002
G_b4 = np.zeros(hidden_input4)
G_W5 = (
np.random.normal(
size=(hidden_input4, hidden_input5),
scale=(1.0 / np.sqrt(hidden_input4 / 2.0)),
)
* 0.002
)
# G_b1 = np.random.normal(size=(128),scale=(1. / np.sqrt(128 / 2.))) *0.002
G_b5 = np.zeros(hidden_input5)
G_W6 = (
np.random.normal(
size=(hidden_input5, hidden_input6),
scale=(1.0 / np.sqrt(hidden_input5 / 2.0)),
)
* 0.002
)
# G_b1 = np.random.normal(size=(128),scale=(1. / np.sqrt(128 / 2.))) *0.002
G_b6 = np.zeros(hidden_input6)
G_W7 = (
np.random.normal(
size=(hidden_input6, 784), scale=(1.0 / np.sqrt(hidden_input6 / 2.0))
)
* 0.002
)
# G_b2 = np.random.normal(size=(784),scale=(1. / np.sqrt(784 / 2.))) *0.002
G_b7 = np.zeros(784)
# 3. For Adam Optimzier
v1, m1 = 0, 0
v2, m2 = 0, 0
v3, m3 = 0, 0
v4, m4 = 0, 0
v5, m5 = 0, 0
v6, m6 = 0, 0
v7, m7 = 0, 0
v8, m8 = 0, 0
v9, m9 = 0, 0
v10, m10 = 0, 0
v11, m11 = 0, 0
v12, m12 = 0, 0
v13, m13 = 0, 0
v14, m14 = 0, 0
v15, m15 = 0, 0
v16, m16 = 0, 0
v17, m17 = 0, 0
v18, m18 = 0, 0
beta_1, beta_2, eps = 0.9, 0.999, 0.00000001
print("--------- Started Training ----------")
for iter in range(num_epoch):
random_int = np.random.randint(len(images) - 5)
current_image = np.expand_dims(images[random_int], axis=0)
# Func: Generate The first Fake Data
Z = np.random.uniform(-1.0, 1.0, size=[1, G_input])
Gl1 = Z.dot(G_W1) + G_b1
Gl1A = arctan(Gl1)
Gl2 = Gl1A.dot(G_W2) + G_b2
Gl2A = ReLu(Gl2)
Gl3 = Gl2A.dot(G_W3) + G_b3
Gl3A = arctan(Gl3)
Gl4 = Gl3A.dot(G_W4) + G_b4
Gl4A = ReLu(Gl4)
Gl5 = Gl4A.dot(G_W5) + G_b5
Gl5A = tanh(Gl5)
Gl6 = Gl5A.dot(G_W6) + G_b6
Gl6A = ReLu(Gl6)
Gl7 = Gl6A.dot(G_W7) + G_b7
current_fake_data = log(Gl7)
# Func: Forward Feed for Real data
Dl1_r = current_image.dot(D_W1) + D_b1
Dl1_rA = ReLu(Dl1_r)
Dl2_r = Dl1_rA.dot(D_W2) + D_b2
Dl2_rA = log(Dl2_r)
# Func: Forward Feed for Fake Data
Dl1_f = current_fake_data.dot(D_W1) + D_b1
Dl1_fA = ReLu(Dl1_f)
Dl2_f = Dl1_fA.dot(D_W2) + D_b2
Dl2_fA = log(Dl2_f)
# Func: Cost D
D_cost = -np.log(Dl2_rA) + np.log(1.0 - Dl2_fA)
# Func: Gradient
grad_f_w2_part_1 = 1 / (1.0 - Dl2_fA)
grad_f_w2_part_2 = d_log(Dl2_f)
grad_f_w2_part_3 = Dl1_fA
grad_f_w2 = grad_f_w2_part_3.T.dot(grad_f_w2_part_1 * grad_f_w2_part_2)
grad_f_b2 = grad_f_w2_part_1 * grad_f_w2_part_2
grad_f_w1_part_1 = (grad_f_w2_part_1 * grad_f_w2_part_2).dot(D_W2.T)
grad_f_w1_part_2 = d_ReLu(Dl1_f)
grad_f_w1_part_3 = current_fake_data
grad_f_w1 = grad_f_w1_part_3.T.dot(grad_f_w1_part_1 * grad_f_w1_part_2)
grad_f_b1 = grad_f_w1_part_1 * grad_f_w1_part_2
grad_r_w2_part_1 = -1 / Dl2_rA
grad_r_w2_part_2 = d_log(Dl2_r)
grad_r_w2_part_3 = Dl1_rA
grad_r_w2 = grad_r_w2_part_3.T.dot(grad_r_w2_part_1 * grad_r_w2_part_2)
grad_r_b2 = grad_r_w2_part_1 * grad_r_w2_part_2
grad_r_w1_part_1 = (grad_r_w2_part_1 * grad_r_w2_part_2).dot(D_W2.T)
grad_r_w1_part_2 = d_ReLu(Dl1_r)
grad_r_w1_part_3 = current_image
grad_r_w1 = grad_r_w1_part_3.T.dot(grad_r_w1_part_1 * grad_r_w1_part_2)
grad_r_b1 = grad_r_w1_part_1 * grad_r_w1_part_2
grad_w1 = grad_f_w1 + grad_r_w1
grad_b1 = grad_f_b1 + grad_r_b1
grad_w2 = grad_f_w2 + grad_r_w2
grad_b2 = grad_f_b2 + grad_r_b2
# ---- Update Gradient ----
m1 = beta_1 * m1 + (1 - beta_1) * grad_w1
v1 = beta_2 * v1 + (1 - beta_2) * grad_w1 ** 2
m2 = beta_1 * m2 + (1 - beta_1) * grad_b1
v2 = beta_2 * v2 + (1 - beta_2) * grad_b1 ** 2
m3 = beta_1 * m3 + (1 - beta_1) * grad_w2
v3 = beta_2 * v3 + (1 - beta_2) * grad_w2 ** 2
m4 = beta_1 * m4 + (1 - beta_1) * grad_b2
v4 = beta_2 * v4 + (1 - beta_2) * grad_b2 ** 2
D_W1 = D_W1 - (learing_rate / (np.sqrt(v1 / (1 - beta_2)) + eps)) * (
m1 / (1 - beta_1)
)
D_b1 = D_b1 - (learing_rate / (np.sqrt(v2 / (1 - beta_2)) + eps)) * (
m2 / (1 - beta_1)
)
D_W2 = D_W2 - (learing_rate / (np.sqrt(v3 / (1 - beta_2)) + eps)) * (
m3 / (1 - beta_1)
)
D_b2 = D_b2 - (learing_rate / (np.sqrt(v4 / (1 - beta_2)) + eps)) * (
m4 / (1 - beta_1)
)
# Func: Forward Feed for G
Z = np.random.uniform(-1.0, 1.0, size=[1, G_input])
Gl1 = Z.dot(G_W1) + G_b1
Gl1A = arctan(Gl1)
Gl2 = Gl1A.dot(G_W2) + G_b2
Gl2A = ReLu(Gl2)
Gl3 = Gl2A.dot(G_W3) + G_b3
Gl3A = arctan(Gl3)
Gl4 = Gl3A.dot(G_W4) + G_b4
Gl4A = ReLu(Gl4)
Gl5 = Gl4A.dot(G_W5) + G_b5
Gl5A = tanh(Gl5)
Gl6 = Gl5A.dot(G_W6) + G_b6
Gl6A = ReLu(Gl6)
Gl7 = Gl6A.dot(G_W7) + G_b7
current_fake_data = log(Gl7)
Dl1 = current_fake_data.dot(D_W1) + D_b1
Dl1_A = ReLu(Dl1)
Dl2 = Dl1_A.dot(D_W2) + D_b2
Dl2_A = log(Dl2)
# Func: Cost G
G_cost = -np.log(Dl2_A)
# Func: Gradient
grad_G_w7_part_1 = ((-1 / Dl2_A) * d_log(Dl2).dot(D_W2.T) * (d_ReLu(Dl1))).dot(
D_W1.T
)
grad_G_w7_part_2 = d_log(Gl7)
grad_G_w7_part_3 = Gl6A
grad_G_w7 = grad_G_w7_part_3.T.dot(grad_G_w7_part_1 * grad_G_w7_part_1)
grad_G_b7 = grad_G_w7_part_1 * grad_G_w7_part_2
grad_G_w6_part_1 = (grad_G_w7_part_1 * grad_G_w7_part_2).dot(G_W7.T)
grad_G_w6_part_2 = d_ReLu(Gl6)
grad_G_w6_part_3 = Gl5A
grad_G_w6 = grad_G_w6_part_3.T.dot(grad_G_w6_part_1 * grad_G_w6_part_2)
grad_G_b6 = grad_G_w6_part_1 * grad_G_w6_part_2
grad_G_w5_part_1 = (grad_G_w6_part_1 * grad_G_w6_part_2).dot(G_W6.T)
grad_G_w5_part_2 = d_tanh(Gl5)
grad_G_w5_part_3 = Gl4A
grad_G_w5 = grad_G_w5_part_3.T.dot(grad_G_w5_part_1 * grad_G_w5_part_2)
grad_G_b5 = grad_G_w5_part_1 * grad_G_w5_part_2
grad_G_w4_part_1 = (grad_G_w5_part_1 * grad_G_w5_part_2).dot(G_W5.T)
grad_G_w4_part_2 = d_ReLu(Gl4)
grad_G_w4_part_3 = Gl3A
grad_G_w4 = grad_G_w4_part_3.T.dot(grad_G_w4_part_1 * grad_G_w4_part_2)
grad_G_b4 = grad_G_w4_part_1 * grad_G_w4_part_2
grad_G_w3_part_1 = (grad_G_w4_part_1 * grad_G_w4_part_2).dot(G_W4.T)
grad_G_w3_part_2 = d_arctan(Gl3)
grad_G_w3_part_3 = Gl2A
grad_G_w3 = grad_G_w3_part_3.T.dot(grad_G_w3_part_1 * grad_G_w3_part_2)
grad_G_b3 = grad_G_w3_part_1 * grad_G_w3_part_2
grad_G_w2_part_1 = (grad_G_w3_part_1 * grad_G_w3_part_2).dot(G_W3.T)
grad_G_w2_part_2 = d_ReLu(Gl2)
grad_G_w2_part_3 = Gl1A
grad_G_w2 = grad_G_w2_part_3.T.dot(grad_G_w2_part_1 * grad_G_w2_part_2)
grad_G_b2 = grad_G_w2_part_1 * grad_G_w2_part_2
grad_G_w1_part_1 = (grad_G_w2_part_1 * grad_G_w2_part_2).dot(G_W2.T)
grad_G_w1_part_2 = d_arctan(Gl1)
grad_G_w1_part_3 = Z
grad_G_w1 = grad_G_w1_part_3.T.dot(grad_G_w1_part_1 * grad_G_w1_part_2)
grad_G_b1 = grad_G_w1_part_1 * grad_G_w1_part_2
# ---- Update Gradient ----
m5 = beta_1 * m5 + (1 - beta_1) * grad_G_w1
v5 = beta_2 * v5 + (1 - beta_2) * grad_G_w1 ** 2
m6 = beta_1 * m6 + (1 - beta_1) * grad_G_b1
v6 = beta_2 * v6 + (1 - beta_2) * grad_G_b1 ** 2
m7 = beta_1 * m7 + (1 - beta_1) * grad_G_w2
v7 = beta_2 * v7 + (1 - beta_2) * grad_G_w2 ** 2
m8 = beta_1 * m8 + (1 - beta_1) * grad_G_b2
v8 = beta_2 * v8 + (1 - beta_2) * grad_G_b2 ** 2
m9 = beta_1 * m9 + (1 - beta_1) * grad_G_w3
v9 = beta_2 * v9 + (1 - beta_2) * grad_G_w3 ** 2
m10 = beta_1 * m10 + (1 - beta_1) * grad_G_b3
v10 = beta_2 * v10 + (1 - beta_2) * grad_G_b3 ** 2
m11 = beta_1 * m11 + (1 - beta_1) * grad_G_w4
v11 = beta_2 * v11 + (1 - beta_2) * grad_G_w4 ** 2
m12 = beta_1 * m12 + (1 - beta_1) * grad_G_b4
v12 = beta_2 * v12 + (1 - beta_2) * grad_G_b4 ** 2
m13 = beta_1 * m13 + (1 - beta_1) * grad_G_w5
v13 = beta_2 * v13 + (1 - beta_2) * grad_G_w5 ** 2
m14 = beta_1 * m14 + (1 - beta_1) * grad_G_b5
v14 = beta_2 * v14 + (1 - beta_2) * grad_G_b5 ** 2
m15 = beta_1 * m15 + (1 - beta_1) * grad_G_w6
v15 = beta_2 * v15 + (1 - beta_2) * grad_G_w6 ** 2
m16 = beta_1 * m16 + (1 - beta_1) * grad_G_b6
v16 = beta_2 * v16 + (1 - beta_2) * grad_G_b6 ** 2
m17 = beta_1 * m17 + (1 - beta_1) * grad_G_w7
v17 = beta_2 * v17 + (1 - beta_2) * grad_G_w7 ** 2
m18 = beta_1 * m18 + (1 - beta_1) * grad_G_b7
v18 = beta_2 * v18 + (1 - beta_2) * grad_G_b7 ** 2
G_W1 = G_W1 - (learing_rate / (np.sqrt(v5 / (1 - beta_2)) + eps)) * (
m5 / (1 - beta_1)
)
G_b1 = G_b1 - (learing_rate / (np.sqrt(v6 / (1 - beta_2)) + eps)) * (
m6 / (1 - beta_1)
)
G_W2 = G_W2 - (learing_rate / (np.sqrt(v7 / (1 - beta_2)) + eps)) * (
m7 / (1 - beta_1)
)
G_b2 = G_b2 - (learing_rate / (np.sqrt(v8 / (1 - beta_2)) + eps)) * (
m8 / (1 - beta_1)
)
G_W3 = G_W3 - (learing_rate / (np.sqrt(v9 / (1 - beta_2)) + eps)) * (
m9 / (1 - beta_1)
)
G_b3 = G_b3 - (learing_rate / (np.sqrt(v10 / (1 - beta_2)) + eps)) * (
m10 / (1 - beta_1)
)
G_W4 = G_W4 - (learing_rate / (np.sqrt(v11 / (1 - beta_2)) + eps)) * (
m11 / (1 - beta_1)
)
G_b4 = G_b4 - (learing_rate / (np.sqrt(v12 / (1 - beta_2)) + eps)) * (
m12 / (1 - beta_1)
)
G_W5 = G_W5 - (learing_rate / (np.sqrt(v13 / (1 - beta_2)) + eps)) * (
m13 / (1 - beta_1)
)
G_b5 = G_b5 - (learing_rate / (np.sqrt(v14 / (1 - beta_2)) + eps)) * (
m14 / (1 - beta_1)
)
G_W6 = G_W6 - (learing_rate / (np.sqrt(v15 / (1 - beta_2)) + eps)) * (
m15 / (1 - beta_1)
)
G_b6 = G_b6 - (learing_rate / (np.sqrt(v16 / (1 - beta_2)) + eps)) * (
m16 / (1 - beta_1)
)
G_W7 = G_W7 - (learing_rate / (np.sqrt(v17 / (1 - beta_2)) + eps)) * (
m17 / (1 - beta_1)
)
G_b7 = G_b7 - (learing_rate / (np.sqrt(v18 / (1 - beta_2)) + eps)) * (
m18 / (1 - beta_1)
)
# --- Print Error ----
# print("Current Iter: ",iter, " Current D cost:",D_cost, " Current G cost: ", G_cost,end='\r')
if iter == 0:
learing_rate = learing_rate * 0.01
if iter == 40:
learing_rate = learing_rate * 0.01
# ---- Print to Out put ----
if iter % 10 == 0:
print(
"Current Iter: ",
iter,
" Current D cost:",
D_cost,
" Current G cost: ",
G_cost,
end="\r",
)
print("--------- Show Example Result See Tab Above ----------")
print("--------- Wait for the image to load ---------")
Z = np.random.uniform(-1.0, 1.0, size=[16, G_input])
Gl1 = Z.dot(G_W1) + G_b1
Gl1A = arctan(Gl1)
Gl2 = Gl1A.dot(G_W2) + G_b2
Gl2A = ReLu(Gl2)
Gl3 = Gl2A.dot(G_W3) + G_b3
Gl3A = arctan(Gl3)
Gl4 = Gl3A.dot(G_W4) + G_b4
Gl4A = ReLu(Gl4)
Gl5 = Gl4A.dot(G_W5) + G_b5
Gl5A = tanh(Gl5)
Gl6 = Gl5A.dot(G_W6) + G_b6
Gl6A = ReLu(Gl6)
Gl7 = Gl6A.dot(G_W7) + G_b7
current_fake_data = log(Gl7)
fig = plot(current_fake_data)
fig.savefig(
"Click_Me_{}.png".format(
str(iter).zfill(3)
+ "_Ginput_"
+ str(G_input)
+ "_hiddenone"
+ str(hidden_input)
+ "_hiddentwo"
+ str(hidden_input2)
+ "_LR_"
+ str(learing_rate)
),
bbox_inches="tight",
)
# for complete explanation visit https://towardsdatascience.com/only-numpy-implementing-gan-general-adversarial-networks-and-adam-optimizer-using-numpy-with-2a7e4e032021
# -- end code --
| 31.597633 | 173 | 0.527278 |
3e2bbd0b61fae259eacb5d804224ea267d56aa77 | 2,260 | py | Python | pylearn2/neuroimaging_utils/research/randomize_snps.py | rdevon/pylearn2 | f7b9a6ea0e2498176b47202f5bb83aec4976e1dd | [
"BSD-3-Clause"
] | 1 | 2017-10-29T06:18:35.000Z | 2017-10-29T06:18:35.000Z | pylearn2/neuroimaging_utils/research/randomize_snps.py | rdevon/pylearn2 | f7b9a6ea0e2498176b47202f5bb83aec4976e1dd | [
"BSD-3-Clause"
] | null | null | null | pylearn2/neuroimaging_utils/research/randomize_snps.py | rdevon/pylearn2 | f7b9a6ea0e2498176b47202f5bb83aec4976e1dd | [
"BSD-3-Clause"
] | null | null | null | """
.. todo::
WRITEME
"""
__authors__ = "Devon Hjelm"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Devon Hjelm"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import theano
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano import tensor as T
from pylearn2.blocks import Block
from pylearn2.utils.rng import make_theano_rng
class RandomizeSNPs(Block):
"""
.. todo::
WRITEME
Parameters
----------
theano_rng : WRITEME
seed : WRITEME
input_space : WRITEME
"""
def __init__(self, theano_rng = None, seed=None,
input_space=None, corruption_prob=0.1):
super(RandomizeSNPs, self).__init__()
assert theano_rng is None or seed is None
theano_rng = make_theano_rng(theano_rng if theano_rng is not None else seed,
2012+11+22, which_method='binomial')
self.__dict__.update(locals())
del self.self
self.set_fn()
def set_fn(self):
"""
.. todo::
WRITEME
"""
inputs = T.matrix()
a = self.theano_rng.binomial(
size=(self.input_space.dim, ),
p=(1 - self.corruption_prob),
dtype=theano.config.floatX
)
b = self.theano_rng.binomial(
size=(self.input_space.dim, ),
p=0.5,
dtype=theano.config.floatX
) + 1
c = T.eq(a, 0) * b
self.fn = theano.function([inputs], ((2 * inputs + c) % 3 / 2.0))
def __call__(self, X):
return self.perform(X)
def set_input_space(self, space):
"""
.. todo::
WRITEME
"""
self.input_space = space
def get_input_space(self):
"""
.. todo::
WRITEME
"""
if self.input_space is not None:
return self.input_space
raise ValueError("No input space was specified for this Block (%s). "
"You can call set_input_space to correct that." % str(self))
def get_output_space(self):
"""
.. todo::
WRITEME
"""
return self.get_input_space()
| 23.789474 | 84 | 0.557522 |
9cb329d58ba682b36718b26aa33b7d052f527bfe | 267,910 | py | Python | src/config/api-server/vnc_cfg_api_server/tests/test_crud_basic.py | atsgen/tf-controller | 9321889cdd3d7108980cc88937b2e82956502cc5 | [
"Apache-2.0"
] | 37 | 2020-09-21T10:42:26.000Z | 2022-01-09T10:16:40.000Z | src/config/api-server/vnc_cfg_api_server/tests/test_crud_basic.py | atsgen/tf-controller | 9321889cdd3d7108980cc88937b2e82956502cc5 | [
"Apache-2.0"
] | null | null | null | src/config/api-server/vnc_cfg_api_server/tests/test_crud_basic.py | atsgen/tf-controller | 9321889cdd3d7108980cc88937b2e82956502cc5 | [
"Apache-2.0"
] | 21 | 2020-08-25T12:48:42.000Z | 2022-03-22T04:32:18.000Z | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
from builtins import str
from builtins import range
from builtins import object
from past.utils import old_div
import gevent
import os
import sys
import socket
import errno
import uuid
import logging
import random
import netaddr
import mock
import tempfile
import fixtures
import testtools
from testtools.matchers import Equals, MismatchError, Not, Contains, LessThan
from testtools import content, content_type, ExpectedException
import unittest
from flexmock import flexmock
import re
import json
import copy
from lxml import etree
import inspect
import requests
import bottle
import stevedore
import netaddr
import contextlib
from vnc_api.vnc_api import *
from cfgm_common import exceptions as vnc_exceptions
from netaddr import IPNetwork
import vnc_api.gen.vnc_api_test_gen
from vnc_api.gen.resource_test import *
import cfgm_common
from cfgm_common import vnc_plugin_base
from cfgm_common import vnc_cgitb
from cfgm_common import SGID_MIN_ALLOC
from cfgm_common import rest
from functools import reduce
vnc_cgitb.enable(format='text')
from cfgm_common.tests import cassandra_fake_impl
from cfgm_common.tests import test_common
from cfgm_common.tests.test_utils import FakeKombu
from cfgm_common.tests.test_utils import FakeExtensionManager
from cfgm_common.vnc_api_stats import log_api_stats
from . import test_case
from vnc_cfg_api_server.api_server import VncApiServer
from vnc_cfg_api_server.resources import GlobalSystemConfigServer
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class TestFixtures(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestFixtures, cls).setUpClass(*args, **kwargs)
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestFixtures, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
def test_fixture_ref(self):
proj_fixt = self.useFixture(
ProjectTestFixtureGen(self._vnc_lib, project_name='admin'))
# 2 policies, 2 VNs associate and check
pol_1_fixt = self.useFixture(NetworkPolicyTestFixtureGen(
self._vnc_lib, network_policy_name='policy1111',
parent_fixt=proj_fixt))
pol_2_fixt = self.useFixture(NetworkPolicyTestFixtureGen(
self._vnc_lib, network_policy_name='policy2222',
parent_fixt=proj_fixt))
ref_tuple = [(pol_1_fixt._obj,
VirtualNetworkPolicyType(
sequence=SequenceType(major=0, minor=0)))]
ref_tuple2 = [(pol_2_fixt._obj,
VirtualNetworkPolicyType(
sequence=SequenceType(major=0, minor=0)))]
vn_blue = self.useFixture(
VirtualNetworkTestFixtureGen(
self._vnc_lib, virtual_network_name='vnblue',
parent_fixt=proj_fixt, id_perms=IdPermsType(enable=True),
network_policy_ref_infos=ref_tuple))
vn_red = self.useFixture(
VirtualNetworkTestFixtureGen(
self._vnc_lib, virtual_network_name='vnred',
parent_fixt=proj_fixt, id_perms=IdPermsType(enable=True),
network_policy_ref_infos=ref_tuple2))
policy_name = vn_blue.get_network_policys()[0].fixture()[0].name
self.assertThat(policy_name, Equals('policy1111'))
policy_name = vn_red.get_network_policys()[0].fixture()[0].name
self.assertThat(policy_name, Equals('policy2222'))
# ipam referring to virtual dns
vdns_data = VirtualDnsType(domain_name='abc.net', record_order='fixed',
default_ttl_seconds=360)
vdns_fixt = self.useFixture(
VirtualDnsTestFixtureGen(self._vnc_lib, virtual_DNS_name='vdns1',
virtual_DNS_data=vdns_data))
dns_method = "virtual-dns-server"
dns_server = IpamDnsAddressType(
virtual_dns_server_name=vdns_fixt.getObj().get_fq_name_str())
ipam_mgmt = IpamType(
ipam_dns_method=dns_method, ipam_dns_server=dns_server)
ipam_fixt = self.useFixture(
NetworkIpamTestFixtureGen(
self._vnc_lib, network_ipam_name='ipam1',
parent_fixt=proj_fixt,
network_ipam_mgmt=ipam_mgmt,
virtual_DNS_refs=[vdns_fixt.getObj()]))
# end test_fixture_ref
def test_fixture_reuse_policy(self):
proj_fixt = self.useFixture(
ProjectTestFixtureGen(self._vnc_lib, project_name='admin'))
pol_fixt = self.useFixture(NetworkPolicyTestFixtureGen(
self._vnc_lib, network_policy_name='policy1111',
parent_fixt=proj_fixt))
ref_tuple = [(pol_fixt._obj,
VirtualNetworkPolicyType(
sequence=SequenceType(major=0, minor=0)))]
vn1 = self.useFixture(
VirtualNetworkTestFixtureGen(
self._vnc_lib, virtual_network_name='vn1',
parent_fixt=proj_fixt, id_perms=IdPermsType(enable=True),
network_policy_ref_infos=ref_tuple))
vn2 = self.useFixture(
VirtualNetworkTestFixtureGen(
self._vnc_lib, virtual_network_name='vn2',
parent_fixt=proj_fixt, id_perms=IdPermsType(enable=True),
network_policy_ref_infos=ref_tuple))
vn3 = self.useFixture(
VirtualNetworkTestFixtureGen(
self._vnc_lib, virtual_network_name='vn3',
parent_fixt=proj_fixt, id_perms=IdPermsType(enable=True),
network_policy_ref_infos=ref_tuple))
vn4 = self.useFixture(
VirtualNetworkTestFixtureGen(
self._vnc_lib, virtual_network_name='vn4',
parent_fixt=proj_fixt, id_perms=IdPermsType(enable=True),
network_policy_ref_infos=ref_tuple))
vn5 = self.useFixture(
VirtualNetworkTestFixtureGen(
self._vnc_lib, virtual_network_name='vn5',
parent_fixt=proj_fixt, id_perms=IdPermsType(enable=True),
network_policy_ref_infos=ref_tuple))
npolicy_children = len(proj_fixt.getObj().get_network_policys())
self.assertThat(npolicy_children, Equals(1))
# end test_fixture_reuse_policy
# end class TestFixtures
class TestListUpdate(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestListUpdate, cls).setUpClass(*args, **kwargs)
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestListUpdate, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
def test_policy_create_w_rules(self):
proj_fixt = self.useFixture(ProjectTestFixtureGen(self._vnc_lib))
policy_obj = NetworkPolicy(
'test-policy-create-w-rules', proj_fixt.getObj())
np_rules = [
PolicyRuleType(direction='<>',
action_list=ActionListType(simple_action='pass'),
protocol='any',
src_addresses=
[AddressType(virtual_network='local')],
src_ports=[PortType(-1, -1)],
dst_addresses=[AddressType(virtual_network='any')],
dst_ports=[PortType(-1, -1)]),
PolicyRuleType(direction='<>',
action_list=ActionListType(simple_action='deny'),
protocol='any',
src_addresses=
[AddressType(virtual_network='local')],
src_ports=[PortType(-1, -1)],
dst_addresses=[AddressType(virtual_network='any')],
dst_ports=[PortType(-1, -1)]),
]
policy_obj.set_network_policy_entries(PolicyEntriesType(np_rules))
self._vnc_lib.network_policy_create(policy_obj)
# cleanup
self._vnc_lib.network_policy_delete(id=policy_obj.uuid)
# end test_policy_create_w_rules
def test_policy_create_wo_rules(self):
proj_fixt = self.useFixture(ProjectTestFixtureGen(self._vnc_lib))
policy_obj = NetworkPolicy(
'test-policy-create-wo-rules', proj_fixt.getObj())
self._vnc_lib.network_policy_create(policy_obj)
np_rules = [
PolicyRuleType(direction='<>',
action_list=ActionListType(simple_action='pass'),
protocol='any',
src_addresses=
[AddressType(virtual_network='local')],
src_ports=[PortType(1, 2)],
dst_addresses=[AddressType(virtual_network='any')],
dst_ports=[PortType(3, 4)]),
PolicyRuleType(direction='<>',
action_list=ActionListType(simple_action='deny'),
protocol='any',
src_addresses=
[AddressType(virtual_network='local')],
src_ports=[PortType(5, 6)],
dst_addresses=[AddressType(virtual_network='any')],
dst_ports=[PortType(7, 8)]),
]
policy_entries = PolicyEntriesType(np_rules)
policy_obj.set_network_policy_entries(policy_entries)
self._vnc_lib.network_policy_update(policy_obj)
policy_entries.policy_rule.append(
PolicyRuleType(direction='<>',
action_list=ActionListType(simple_action= 'deny'),
protocol='any',
src_addresses=
[AddressType(virtual_network='local')],
src_ports=[PortType(9, 10)],
dst_addresses=[AddressType(virtual_network='any')],
dst_ports=[PortType(11, 12)])
)
policy_obj.set_network_policy_entries(policy_entries)
self._vnc_lib.network_policy_update(policy_obj)
# cleanup
self._vnc_lib.network_policy_delete(id=policy_obj.uuid)
# end test_policy_create_wo_rules
def test_policy_create_w_sg_in_rules(self):
policy_obj = NetworkPolicy('test-policy-create-w-sg-in-rules')
np_rules = [
PolicyRuleType(direction='<>',
action_list=ActionListType(simple_action='pass'),
protocol='any',
src_addresses=
[AddressType(security_group='local')],
src_ports=[PortType(-1, -1)],
dst_addresses=[AddressType(security_group='any')],
dst_ports=[PortType(-1, -1)]),
PolicyRuleType(direction='<>',
action_list=ActionListType(simple_action='deny'),
protocol='any',
src_addresses=
[AddressType(virtual_network='local')],
src_ports=[PortType(-1, -1)],
dst_addresses=[AddressType(virtual_network='any')],
dst_ports=[PortType(-1, -1)]),
]
policy_obj.set_network_policy_entries(PolicyEntriesType(np_rules))
with ExpectedException(BadRequest) as e:
self._vnc_lib.network_policy_create(policy_obj)
# cleanup
self._vnc_lib.network_policy_delete(id=policy_obj.uuid)
# end test_policy_create_w_sg_in_rules
# end class TestListUpdate
class TestCrud(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestCrud, cls).setUpClass(*args, **kwargs)
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestCrud, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
def test_create_using_lib_api(self):
vn_obj = VirtualNetwork('vn-%s' %(self.id()))
self._vnc_lib.virtual_network_create(vn_obj)
self.assert_vnc_db_has_ident(vn_obj)
# end test_create_using_lib_api
def test_create_using_rest_api(self):
listen_ip = self._api_server_ip
listen_port = self._api_server._args.listen_port
url = 'http://%s:%s/virtual-networks' %(listen_ip, listen_port)
vn_body = {
'virtual-network': {
'fq_name': ['default-domain',
'default-project',
'vn-%s' %(self.id())],
'parent_type': 'project',
}}
requests.post(url,
headers={'Content-type': 'application/json; charset="UTF-8"'},
data=json.dumps(vn_body))
# end test_create_using_rest_api
def test_user_defined_log_statistics_crud(self):
gsc = self._vnc_lib.global_system_config_read(
fq_name=['default-global-system-config'])
gsc.add_user_defined_log_statistics(UserDefinedLogStat('Test01',
'.*[ab][0-9]s1.*'))
gsc.add_user_defined_log_statistics(UserDefinedLogStat('Test02',
'127.0.0.1'))
self._vnc_lib.global_system_config_update(gsc)
gsc_uuid = self._vnc_lib.global_system_configs_list()[
'global-system-configs'][0]['uuid']
gsc = self._vnc_lib.global_system_config_read(id=gsc_uuid)
tst_trgt = ('Test01', 'Test02')
self.assertTrue(reduce(lambda x, y: x and y, [p.name in tst_trgt for p in gsc.user_defined_log_statistics.statlist], True))
#end test_user_defined_log_statistics_crud
def test_user_defined_log_statistics_bad_add(self):
gsc = self._vnc_lib.global_system_config_read(
fq_name=['default-global-system-config'])
gsc.add_user_defined_log_statistics(UserDefinedLogStat('Test01',
'.*[ab][0-9]s1.*'))
# bad regex
gsc.add_user_defined_log_statistics(UserDefinedLogStat('Test03',
'*foo'))
with ExpectedException(BadRequest) as e:
self._vnc_lib.global_system_config_update(gsc)
#end test_user_defined_log_statistics_bad_add
def test_user_defined_log_statistics_set(self):
gsc = self._vnc_lib.global_system_config_read(
fq_name=['default-global-system-config'])
sl = UserDefinedLogStatList()
sl.add_statlist(UserDefinedLogStat('Test01', '.*[ab][0-9]s1.*'))
sl.add_statlist(UserDefinedLogStat('Test02', '127.0.0.1'))
gsc.set_user_defined_log_statistics(sl)
self._vnc_lib.global_system_config_update(gsc)
gsc_uuid = self._vnc_lib.global_system_configs_list()[
'global-system-configs'][0]['uuid']
gsc = self._vnc_lib.global_system_config_read(id=gsc_uuid)
tst_trgt = ('Test01', 'Test02')
self.assertTrue(reduce(lambda x, y: x and y, [p.name in tst_trgt for p in gsc.user_defined_log_statistics.statlist], True))
#end test_user_defined_log_statistics_set
def test_user_defined_log_statistics_bad_set(self):
gsc = self._vnc_lib.global_system_config_read(
fq_name=['default-global-system-config'])
sl = UserDefinedLogStatList()
sl.add_statlist(UserDefinedLogStat('Test01', '.*[ab][0-9]s1.*'))
sl.add_statlist(UserDefinedLogStat('Test02', '127.0.0.1'))
sl.add_statlist(UserDefinedLogStat('Test03', '*127.0.0.1'))
gsc.set_user_defined_log_statistics(sl)
with ExpectedException(BadRequest) as e:
self._vnc_lib.global_system_config_update(gsc)
#end test_user_defined_log_statistics_bad_set
def test_vlan_tag_on_sub_intefaces(self):
vn = VirtualNetwork('vn-%s' %(self.id()))
self._vnc_lib.virtual_network_create(vn)
vmi_name = self.id() + '-main_port'
logger.info('Creating port %s', vmi_name)
main_port_obj = VirtualMachineInterface(vmi_name, parent_obj=Project())
main_port_obj.add_virtual_network(vn)
self._vnc_lib.virtual_machine_interface_create(main_port_obj)
id_perms = IdPermsType(enable=True)
vmi_prop = VirtualMachineInterfacePropertiesType(sub_interface_vlan_tag=256)
port_obj = VirtualMachineInterface(
str(uuid.uuid4()), parent_obj=Project(),
virtual_machine_interface_properties=vmi_prop,
id_perms=id_perms)
port_obj.uuid = port_obj.name
port_obj.set_virtual_network(vn)
port_obj.set_virtual_machine_interface(main_port_obj)
#create port with sub_interface_vlan_tag specified
port_id = self._vnc_lib.virtual_machine_interface_create(port_obj)
vmi_prop.sub_interface_vlan_tag = 128
port_obj.set_virtual_machine_interface_properties(vmi_prop)
#updating sub_interface_vlan_tag of the port to a new value should fail
#as vrouter doesn't support it.
with ExpectedException(BadRequest) as e:
self._vnc_lib.virtual_machine_interface_update(port_obj)
# end test_vlan_tag_on_sub_interfaces
def test_service_interface_type_value(self):
vn = VirtualNetwork('vn-%s' %(self.id()))
self._vnc_lib.virtual_network_create(vn)
vmi_prop = VirtualMachineInterfacePropertiesType(service_interface_type='Left')
port_obj = VirtualMachineInterface(
str(uuid.uuid4()), parent_obj=Project(),
virtual_machine_interface_properties=vmi_prop)
port_obj.uuid = port_obj.name
port_obj.set_virtual_network(vn)
#creation of port should fail as the valid values for
#service_interface_type are: management|left|right|other[0-9]*
with ExpectedException(BadRequest) as e:
port_id = self._vnc_lib.virtual_machine_interface_create(port_obj)
# end test_service_interface_type_value
def test_physical_router_credentials(self):
phy_rout_name = self.id() + '-phy-router-1'
user_cred_create = UserCredentials(username="test_user", password="test_pswd")
phy_rout = PhysicalRouter(phy_rout_name, physical_router_user_credentials=user_cred_create)
phy_rout.uuid = '123e4567-e89b-12d3-a456-426655440000'
self._vnc_lib.physical_router_create(phy_rout)
phy_rout_obj = self._vnc_lib.physical_router_read(id=phy_rout.uuid)
user_cred_read = phy_rout_obj.get_physical_router_user_credentials()
self.assertIsNotNone(user_cred_read.password)
self.assertEqual(user_cred_read.password, 'ngVv1S3pB+rM2SWMnm6XpQ==')
# Verify update of physical router does not update password
# unless physical_router_encryption_type is set to 'none'
phy_rout_obj.set_physical_router_user_credentials(user_cred_read)
self._vnc_lib.physical_router_update(phy_rout_obj)
phy_rout_obj = self._vnc_lib.physical_router_read(id=phy_rout.uuid)
user_cred_read = phy_rout_obj.get_physical_router_user_credentials()
self.assertIsNotNone(user_cred_read.password)
self.assertEqual(user_cred_read.password, 'ngVv1S3pB+rM2SWMnm6XpQ==')
# Update the user password in Physical Router with
# physical_router_encryption_type set to 'none'
user_cred_create = UserCredentials(username="test_user", password="test_new_pswd")
phy_rout_obj.set_physical_router_user_credentials(user_cred_create)
phy_rout_obj.set_physical_router_encryption_type('none')
self._vnc_lib.physical_router_update(phy_rout_obj)
phy_rout_obj = self._vnc_lib.physical_router_read(id=phy_rout.uuid)
user_cred_read = phy_rout_obj.get_physical_router_user_credentials()
self.assertIsNotNone(user_cred_read.password)
self.assertNotEqual(user_cred_read.password, 'ngVv1S3pB+rM2SWMnm6XpQ==')
# end test_physical_router_credentials
def test_physical_router_w_no_user_credentials(self):
phy_rout_name = self.id() + '-phy-router-2'
phy_router = PhysicalRouter(phy_rout_name)
self._vnc_lib.physical_router_create(phy_router)
# reading Physical Router object when user credentials
# are set to None should be successfull.
phy_rout_obj = self._vnc_lib.physical_router_read(id=phy_router.uuid)
phy_rout3_name = self.id() + '-phy-router-3'
phy_router3 = PhysicalRouter(phy_rout3_name)
self._vnc_lib.physical_router_create(phy_router3)
phy_router3.set_physical_router_user_credentials(None)
self._vnc_lib.physical_router_update(phy_router3)
# reading Physical Router object when user credentials
# are update to None should be successfull.
phy_rout_obj = self._vnc_lib.physical_router_read(id=phy_router3.uuid)
# end test_physical_router_w_no_user_credentials
def test_bridge_domain_with_multiple_bd_in_vn(self):
vn1_name = self.id() + '-vn-1'
vn1 = VirtualNetwork(vn1_name)
logger.info('Creating VN %s', vn1_name)
self._vnc_lib.virtual_network_create(vn1)
vmi_name = self.id() + '-port'
logger.info('Creating port %s', vmi_name)
vmi = VirtualMachineInterface(vmi_name, parent_obj=Project())
vmi.add_virtual_network(vn1)
self._vnc_lib.virtual_machine_interface_create(vmi)
bd1_name = self.id() + '-bd-1'
bd1 = BridgeDomain(bd1_name, parent_obj=vn1)
bd1.set_isid(200200)
logger.info('Creating Bridge Domain %s', bd1_name)
self._vnc_lib.bridge_domain_create(bd1)
bd2_name = self.id() + '-bd-2'
bd2 = BridgeDomain(bd2_name, parent_obj=vn1)
bd2.set_isid(300300)
logger.info('Creating Bridge Domain %s', bd2_name)
with ExpectedException(BadRequest) as e:
self._vnc_lib.bridge_domain_create(bd2)
# end test_bridge_domain_with_multiple_bd_in_vn
def test_bridge_domain_link_vmi_and_bd_in_different_vn(self):
vn1_name = self.id() + '-vn-1'
vn1 = VirtualNetwork(vn1_name)
logger.info('Creating VN %s', vn1_name)
self._vnc_lib.virtual_network_create(vn1)
vn2_name = self.id() + '-vn-2'
vn2 = VirtualNetwork(vn2_name)
logger.info('Creating VN %s', vn2_name)
self._vnc_lib.virtual_network_create(vn2)
vmi1_name = self.id() + '-port-1'
logger.info('Creating port %s', vmi1_name)
vmi1 = VirtualMachineInterface(vmi1_name, parent_obj=Project())
vmi1.add_virtual_network(vn1)
self._vnc_lib.virtual_machine_interface_create(vmi1)
vmi2_name = self.id() + '-port-2'
logger.info('Creating port %s', vmi2_name)
vmi2 = VirtualMachineInterface(vmi2_name, parent_obj=Project())
vmi2.add_virtual_network(vn2)
self._vnc_lib.virtual_machine_interface_create(vmi2)
bd1_name = self.id() + '-bd-1'
bd1 = BridgeDomain(bd1_name, parent_obj=vn1)
bd1.set_isid(200200)
logger.info('Creating Bridge Domain %s', bd1_name)
self._vnc_lib.bridge_domain_create(bd1)
bd_ref_data1 = BridgeDomainMembershipType(vlan_tag=0)
vmi2.add_bridge_domain(bd1, bd_ref_data1)
with ExpectedException(BadRequest) as e:
self._vnc_lib.virtual_machine_interface_update(vmi2)
bd_ref_data2 = BridgeDomainMembershipType(vlan_tag=0)
vmi1.add_bridge_domain(bd1, bd_ref_data2)
self._vnc_lib.virtual_machine_interface_update(vmi1)
# end test_bridge_domain_link_vmi_and_bd_in_different_vn
def test_bridge_domain_delete_vn_ref_with_bd_link(self):
vn1_name = self.id() + '-vn-1'
vn1 = VirtualNetwork(vn1_name)
logger.info('Creating VN %s', vn1_name)
self._vnc_lib.virtual_network_create(vn1)
vmi_name = self.id() + '-port'
logger.info('Creating port %s', vmi_name)
vmi = VirtualMachineInterface(vmi_name, parent_obj=Project())
vmi.add_virtual_network(vn1)
self._vnc_lib.virtual_machine_interface_create(vmi)
bd1_name = self.id() + '-bd-1'
bd1 = BridgeDomain(bd1_name, parent_obj=vn1)
bd1.set_isid(200200)
logger.info('Creating Bridge Domain %s', bd1_name)
self._vnc_lib.bridge_domain_create(bd1)
bd_ref_data = BridgeDomainMembershipType(vlan_tag=0)
vmi.add_bridge_domain(bd1, bd_ref_data)
self._vnc_lib.virtual_machine_interface_update(vmi)
# Try to delete the VN link with BD ref
vmi_temp = copy.deepcopy(vmi)
vmi_temp.del_virtual_network(vn1)
with ExpectedException(BadRequest) as e:
self._vnc_lib.virtual_machine_interface_update(vmi_temp)
# Delete the BD ref
vmi.del_bridge_domain(bd1)
self._vnc_lib.virtual_machine_interface_update(vmi)
vmi.del_virtual_network(vn1)
self._vnc_lib.virtual_machine_interface_update(vmi)
# end test_bridge_domain_with_multiple_bd_in_vn
def test_vmi_with_end_to_end_shc(self):
project = Project()
vn = VirtualNetwork('vn-%s' %(self.id()))
self._vnc_lib.virtual_network_create(vn)
vmi_obj = VirtualMachineInterface(
str(uuid.uuid4()), parent_obj=project)
vmi_obj.uuid = vmi_obj.name
vmi_obj.set_virtual_network(vn)
vmi_id = self._vnc_lib.virtual_machine_interface_create(vmi_obj)
shc_props = ServiceHealthCheckType()
shc_props.enabled = True
shc_props.health_check_type = 'end-to-end'
shc_obj = ServiceHealthCheck(str(uuid.uuid4()), parent_obj=project,
service_health_check_properties=shc_props)
shc_id = self._vnc_lib.service_health_check_create(shc_obj)
with ExpectedException(BadRequest) as e:
self._vnc_lib.ref_update('virtual-machine-interface', vmi_id,
'service-health-check', shc_id, None, 'ADD')
def test_sub_interfaces_with_same_vlan_tags(self):
vn = VirtualNetwork('vn-%s' %(self.id()))
self._vnc_lib.virtual_network_create(vn)
vmi_prop = VirtualMachineInterfacePropertiesType(sub_interface_vlan_tag=256)
vmi_obj = VirtualMachineInterface(
str(uuid.uuid4()), parent_obj=Project())
vmi_obj.uuid = vmi_obj.name
vmi_obj.set_virtual_network(vn)
vmi_id = self._vnc_lib.virtual_machine_interface_create(vmi_obj)
sub_vmi_obj = VirtualMachineInterface(
str(uuid.uuid4()), parent_obj=Project(),
virtual_machine_interface_properties=vmi_prop)
sub_vmi_obj.uuid = sub_vmi_obj.name
sub_vmi_obj.set_virtual_network(vn)
sub_vmi_obj.set_virtual_machine_interface(vmi_obj)
sub_vmi_id = self._vnc_lib.virtual_machine_interface_create(sub_vmi_obj)
sub_vmi_obj2 = VirtualMachineInterface(
str(uuid.uuid4()), parent_obj=Project(),
virtual_machine_interface_properties=vmi_prop)
sub_vmi_obj2.uuid = sub_vmi_obj2.name
sub_vmi_obj2.set_virtual_network(vn)
sub_vmi_obj2.set_virtual_machine_interface(vmi_obj)
# creating two sub interfacs with same vlan_tag
# under same primary port should give an error
with ExpectedException(BadRequest) as e:
sub_vmi2_id = self._vnc_lib.virtual_machine_interface_create(sub_vmi_obj2)
# end test_sub_interfaces_with_same_vlan_tags
def test_create_sub_vmi_with_primary_vmi_as_another_sub_vmi(self):
vn = VirtualNetwork('vn-%s' %(self.id()))
self._vnc_lib.virtual_network_create(vn)
vmi_obj = VirtualMachineInterface(
str(uuid.uuid4()), parent_obj=Project())
vmi_obj.uuid = vmi_obj.name
vmi_obj.set_virtual_network(vn)
vmi_id = self._vnc_lib.virtual_machine_interface_create(vmi_obj)
vmi_prop = VirtualMachineInterfacePropertiesType(sub_interface_vlan_tag=128)
sub_vmi_obj = VirtualMachineInterface(
str(uuid.uuid4()), parent_obj=Project(),
virtual_machine_interface_properties=vmi_prop)
sub_vmi_obj.uuid = sub_vmi_obj.name
sub_vmi_obj.set_virtual_network(vn)
sub_vmi_obj.set_virtual_machine_interface(vmi_obj)
sub_vmi_id = self._vnc_lib.virtual_machine_interface_create(sub_vmi_obj)
sub_vmi_obj2 = VirtualMachineInterface(
str(uuid.uuid4()), parent_obj=Project(),
virtual_machine_interface_properties=vmi_prop)
sub_vmi_obj2.uuid = sub_vmi_obj2.name
sub_vmi_obj2.set_virtual_network(vn)
# set it's vmi ref (primary port) to another sub interface
sub_vmi_obj2.set_virtual_machine_interface(sub_vmi_obj)
# creating a sub interface with it's primary port as
# another sub interface should give an error
with ExpectedException(BadRequest) as e:
sub_vmi2_id = self._vnc_lib.virtual_machine_interface_create(sub_vmi_obj2)
# end test_create_sub_vmi_with_primary_vmi_as_another_sub_vmi
def test_sub_interfaces_on_diff_vns_with_same_vlan_tags(self):
vn1 = VirtualNetwork('vn1-%s' %(self.id()))
self._vnc_lib.virtual_network_create(vn1)
vn2 = VirtualNetwork('vn2-%s' %(self.id()))
self._vnc_lib.virtual_network_create(vn2)
vmi_prop = VirtualMachineInterfacePropertiesType(sub_interface_vlan_tag=256)
vmi_obj = VirtualMachineInterface(
str(uuid.uuid4()), parent_obj=Project())
vmi_obj2 = VirtualMachineInterface(
str(uuid.uuid4()), parent_obj=Project())
vmi_obj.uuid = vmi_obj.name
vmi_obj.set_virtual_network(vn1)
vmi_id = self._vnc_lib.virtual_machine_interface_create(vmi_obj)
vmi_obj2.uuid = vmi_obj2.name
vmi_obj2.set_virtual_network(vn2)
vmi_id2 = self._vnc_lib.virtual_machine_interface_create(vmi_obj2)
sub_vmi_obj = VirtualMachineInterface(
str(uuid.uuid4()), parent_obj=Project(),
virtual_machine_interface_properties=vmi_prop)
sub_vmi_obj.uuid = sub_vmi_obj.name
sub_vmi_obj.set_virtual_network(vn1)
sub_vmi_obj.set_virtual_machine_interface(vmi_obj)
sub_vmi_id = self._vnc_lib.virtual_machine_interface_create(sub_vmi_obj)
sub_vmi_obj2 = VirtualMachineInterface(
str(uuid.uuid4()), parent_obj=Project(),
virtual_machine_interface_properties=vmi_prop)
sub_vmi_obj2.uuid = sub_vmi_obj2.name
sub_vmi_obj2.set_virtual_network(vn2)
sub_vmi_obj2.set_virtual_machine_interface(vmi_obj2)
# creating two sub interfacs with same vlan_tag
# on different VNs should get succedded
sub_vmi2_id = self._vnc_lib.virtual_machine_interface_create(sub_vmi_obj2)
# end test_sub_interfaces_on_diff_vns_with_same_vlan_tags
def test_physical_router_credentials_list(self):
phy_rout_name = self.id() + '-phy-router-1'
phy_rout_name_2 = self.id() + '-phy-router-2'
phy_rout_name_3 = self.id() + '-phy-router-3'
phy_rout_name_4 = self.id() + '-phy-router-4'
phy_rout_name_5 = self.id() + '-phy-router-5'
user_cred_create = UserCredentials(username="test_user",
password="test_pswd")
user_cred_create_2 = UserCredentials(username="test_user_2",
password="test_pswd_2")
# Test the password that's more than 16 bytes
user_cred_create_3 = UserCredentials(username="test_user_3",
password="01234567890123456789")
# Test the password that's more than 32 bytes
user_cred_create_4 = UserCredentials(username="test_user_4",
password="ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
# Test the password that is already encrypted
user_cred_create_5 = UserCredentials(username="test_user_5",
password="waldIpPkKKud0y0Z6AN4Tg8x7q5JOktwkVCPPRuIC2w=")
phy_rout = PhysicalRouter(phy_rout_name,
physical_router_user_credentials=user_cred_create)
phy_rout.uuid = '123e4567-e89b-12d3-a456-426655440001'
self._vnc_lib.physical_router_create(phy_rout)
phy_rout_2 = PhysicalRouter(phy_rout_name_2,
physical_router_user_credentials=user_cred_create_2)
phy_rout_2.uuid = '123e4567-e89b-12d3-a456-426655440002'
self._vnc_lib.physical_router_create(phy_rout_2)
phy_rout_3 = PhysicalRouter(phy_rout_name_3,
physical_router_user_credentials=user_cred_create_3)
phy_rout_3.uuid = '123e4567-e89b-12d3-a456-426655440003'
self._vnc_lib.physical_router_create(phy_rout_3)
phy_rout_4 = PhysicalRouter(phy_rout_name_4,
physical_router_user_credentials=user_cred_create_4)
phy_rout_4.uuid = '123e4567-e89b-12d3-a456-426655440004'
self._vnc_lib.physical_router_create(phy_rout_4)
phy_rout_5 = PhysicalRouter(phy_rout_name_5,
physical_router_user_credentials=user_cred_create_5,
physical_router_encryption_type='local')
phy_rout_5.uuid = '123e4567-e89b-12d3-a456-426655440005'
self._vnc_lib.physical_router_create(phy_rout_5)
obj_uuids = []
obj_uuids.append(phy_rout.uuid)
obj_uuids.append(phy_rout_2.uuid)
obj_uuids.append(phy_rout_3.uuid)
obj_uuids.append(phy_rout_4.uuid)
obj_uuids.append(phy_rout_5.uuid)
phy_rtr_list = self._vnc_lib.physical_routers_list(obj_uuids=obj_uuids,
detail=True)
for rtr in phy_rtr_list:
user_cred_read = rtr.get_physical_router_user_credentials()
if user_cred_read.username == 'test_user':
self.assertEqual(user_cred_read.password, 'TtF53zhTfh1DQ66R2h5+Fg==')
if user_cred_read.username == 'test_user_2':
self.assertEqual(user_cred_read.password, '+sasYAEDEZd+Nn3X1ojFUw==')
if user_cred_read.username == 'test_user_3':
self.assertEqual(user_cred_read.password,
'waldIpPkKKud0y0Z6AN4Tg8x7q5JOktwkVCPPRuIC2w=')
if user_cred_read.username == 'test_user_4':
self.assertEqual(user_cred_read.password,
'd6jW0qMEBKSlUILBnetOdRIjTZGnK76OQ2R5jQgPxly0r+UNSfEqEh5DPqBL58td')
if user_cred_read.username == 'test_user_5':
self.assertEqual(user_cred_read.password,
'waldIpPkKKud0y0Z6AN4Tg8x7q5JOktwkVCPPRuIC2w=')
# end test_physical_router_credentials
def test_allowed_address_pair_prefix_len(self):
ip_addresses = {'10.10.10.1': 23,
'10.10.10.2': 24,
'10.10.10.3': 25,
'fe80:0:0:0:0:0:a0a:a0a': 119,
'fe80:0:0:0:0:0:a0a:a0b': 120,
'fe80:0:0:0:0:0:a0a:a0c': 121,
}
proj = self._vnc_lib.project_read(fq_name=['default-domain', 'default-project'])
vn = VirtualNetwork()
for ip_address, prefix in list(ip_addresses.items()):
ip_family = netaddr.IPNetwork(ip_address).version
vmi = VirtualMachineInterface('vmi-%s-' % prefix +self.id(), parent_obj=proj)
print('Validating with ip (%s) and prefix (%s)' % (ip_address, prefix))
aap = AllowedAddressPair(ip=SubnetType(ip_address, prefix), address_mode='active-standby')
aaps = AllowedAddressPairs()
aaps.allowed_address_pair.append(aap)
vmi.set_virtual_machine_interface_allowed_address_pairs(aaps)
vmi.add_virtual_network(vn)
try:
self._vnc_lib.virtual_machine_interface_create(vmi)
if ip_family == 4 and prefix < 24:
raise RuntimeError('Prefix of length < 24 should have been rejected')
if ip_family == 6 and prefix < 120:
raise RuntimeError('Prefix of length < 120 should have been rejected')
except cfgm_common.exceptions.BadRequest:
if ip_family == 4 and prefix >= 24:
print('ERROR: Prefix >= 24 should be accepted')
raise
if ip_family == 6 and prefix >= 120:
print('ERROR: Prefix >= 120 should be accepted')
raise
finally:
if ip_family == 4 and prefix >= 24:
vmi.del_virtual_machine_interface(vmi)
if ip_family == 6 and prefix >= 120:
vmi.del_virtual_machine_interface(vmi)
# end test_allowed_address_pair_prefix_len
def test_show_allowed_address_pair_with_leading_spaces(self):
""" This test case compares AAP addresses present in DB and API-server
based on leading white spaces and throws an exception if there is
a mismatch
JIRA TICKET:CEM-14035
"""
proj = self._vnc_lib.project_read(fq_name=['default-domain', 'default-project'])
vn = VirtualNetwork()
ip_address = '10.10.10.1'
expected_address = ' 10.10.10.1'
prefix = 24
vmi = VirtualMachineInterface('vmi-%s-' % prefix +self.id(), parent_obj=proj)
print('Validating with ip (%s) and prefix (%s)' % (ip_address, prefix))
aap = AllowedAddressPair(ip=SubnetType(ip_address, prefix), address_mode='active-standby')
aaps = AllowedAddressPairs()
aaps.allowed_address_pair.append(aap)
vmi.set_virtual_machine_interface_allowed_address_pairs(aaps)
vmi.add_virtual_network(vn)
self._vnc_lib.virtual_machine_interface_create(vmi)
# read vmi
ok, vmi_list = self._api_server._db_conn._object_db.object_read(
'virtual-machine-interface', [vmi.uuid])
vmi_dict = vmi_list[0]
# manipulate AAP of the VMI with space in the DB
vmi_aap = vmi_dict['virtual_machine_interface_allowed_address_pairs']
vmi_aap['allowed_address_pair'][0]['ip']['ip_prefix'] = (
expected_address)
self._api_server._db_conn._object_db.object_update(
'virtual-machine-interface', vmi.uuid, vmi_dict)
# reading at DB to ensure DB update was successful
ok, vmi_list2 = self._api_server._db_conn._object_db.object_read(
'virtual-machine-interface', [vmi.uuid])
vmi_dict2 = vmi_list2[0]
vmi_aap2 = vmi_dict2['virtual_machine_interface_allowed_address_pairs']
assert vmi_aap2['allowed_address_pair'][0]['ip']['ip_prefix'] == (
expected_address)
# reading at API-server to ensure read is successful
vmiobj_re = self._vnc_lib.virtual_machine_interface_read(id=vmi.uuid)
aap_read = vmiobj_re.virtual_machine_interface_allowed_address_pairs
api_aap_ip_prefix = aap_read.allowed_address_pair[0].ip.ip_prefix
assert api_aap_ip_prefix == expected_address, \
("AAP IP prefix read from Api server (%s) "
"do not match expected (%s)" % (
api_aap_ip_prefix, expected_address))
# end test_show_allowed_address_pair_with_leading_spaces
def test_allowed_address_pair_with_leading_spaces(self):
""" This test case checks for leading white spaces in the IP address
and throws an exception if present
JIRA TICKET:CEM-14035
"""
ip_addresses = {'10.10.10.1': 24,
' 10.10.10.2': 24,
'0:0:0:0:0:ffff:1414:1400': 120,
' fe80:0:0:0:0:0:a0a:a0c': 120,
}
proj = self._vnc_lib.project_read(fq_name=['default-domain', 'default-project'])
vn = VirtualNetwork()
for ip_address, prefix in list(ip_addresses.items()):
vmi = VirtualMachineInterface('vmi-%s-' % prefix +self.id(), parent_obj=proj)
print('Validating with ip (%s) and prefix (%s)' % (ip_address, prefix))
aap = AllowedAddressPair(ip=SubnetType(ip_address, prefix), address_mode='active-standby')
aaps = AllowedAddressPairs()
aaps.allowed_address_pair.append(aap)
vmi.set_virtual_machine_interface_allowed_address_pairs(aaps)
vmi.add_virtual_network(vn)
if ip_address == ip_address.strip():
self._vnc_lib.virtual_machine_interface_create(vmi)
vmi.del_virtual_machine_interface(vmi)
else:
with ExpectedException(BadRequest) as e:
self._vnc_lib.virtual_machine_interface_create(vmi)
# end test_allowed_address_pair_with_leading_spaces
def test_bgpaas_ports_shrunk(self):
gsc = self._vnc_lib.global_system_config_read(
fq_name=['default-global-system-config'])
bgpaas_param = BGPaaServiceParametersType('2','500')
gsc.set_bgpaas_parameters(bgpaas_param)
self._vnc_lib.global_system_config_update(gsc)
gsc.set_bgpaas_parameters(BGPaaServiceParametersType('4','100'))
# port range should be allowed to shrunk
# as no bgpaas obj. is configured
self._vnc_lib.global_system_config_update(gsc)
bgpaas = BgpAsAService('bgpaas-%s' % self.id())
self._vnc_lib.bgp_as_a_service_create(bgpaas)
gsc.set_bgpaas_parameters(BGPaaServiceParametersType('10','50'))
# port range should not be allowed to shrunk
with ExpectedException(BadRequest) as e:
self._vnc_lib.global_system_config_update(gsc)
# end test_bgpaas_ports_shrunk
def test_invalid_parent_type(self):
vn = VirtualNetwork(self.id())
vn.fq_name = [vn.name]
with ExpectedException(BadRequest):
self._vnc_lib.virtual_network_create(vn)
vn = VirtualNetwork(self.id())
vn.parent_type='network_policy'
with ExpectedException(BadRequest):
self._vnc_lib.virtual_network_create(vn)
# end test_invalid_parent_type
def test_routing_policy_create_w_asn_of_cluster_asn_negative(self):
rp_name = self.id() + 'rp1'
gsc = self._vnc_lib.global_system_config_read(GlobalSystemConfig().fq_name)
asn = gsc.autonomous_system
rp_entry = PolicyStatementType(term=[PolicyTermType(
term_action_list=TermActionListType(
update=ActionUpdateType(
as_path=ActionAsPathType(
expand=AsListType(asn_list=[asn])))))])
rp = RoutingPolicy(rp_name, routing_policy_entries=rp_entry)
with ExpectedException(BadRequest):
self._vnc_lib.routing_policy_create(rp)
# end test_routing_policy_create_w_asn_of_cluster_asn_negative
# end class TestCrud
class TestVncCfgApiServer(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestVncCfgApiServer, cls).setUpClass(*args, **kwargs)
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestVncCfgApiServer, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
def test_fq_name_to_id_http_post(self):
test_obj = self._create_test_object()
test_uuid = self._vnc_lib.fq_name_to_id('virtual-network', test_obj.get_fq_name())
# check that format is correct
try:
uuid.UUID(test_uuid)
except ValueError:
self.assertTrue(False, 'Bad form UUID ' + test_uuid)
with ExpectedException(NoIdError) as e:
test_uuid = self._vnc_lib.fq_name_to_id('project', test_obj.get_fq_name())
def test_id_to_fq_name_http_post(self):
test_obj = self._create_test_object()
fq_name = self._vnc_lib.id_to_fq_name(test_obj.uuid)
self.assertEqual(test_obj.fq_name, fq_name)
with ExpectedException(NoIdError) as e:
self._vnc_lib.id_to_fq_name(str(uuid.uuid4()))
def test_useragent_kv_http_post(self):
# unikey store
test_body = json.dumps({'operation': 'STORE',
'key': 'fookey',
'value': 'fooval'})
self.addDetail('useragent-kv-post-store', content.json_content(test_body))
(code, msg) = self._http_post('/useragent-kv', test_body)
self.assertEqual(code, 200)
# unikey retrieve
test_body = json.dumps({'operation': 'RETRIEVE',
'key': 'fookey'})
self.addDetail('useragent-kv-post-retrieve', content.json_content(test_body))
(code, msg) = self._http_post('/useragent-kv', test_body)
self.assertEqual(code, 200)
self.assertEqual(json.loads(msg)['value'], 'fooval')
# multikey retrieve
test_body = json.dumps({'operation': 'STORE',
'key': 'barkey',
'value': 'barval'})
self.addDetail('useragent-kv-post-store', content.json_content(test_body))
(code, msg) = self._http_post('/useragent-kv', test_body)
self.assertEqual(code, 200)
test_body = json.dumps({'operation': 'RETRIEVE',
'key': ['fookey', 'barkey']})
self.addDetail('useragent-kv-post-multikey-retrieve',
content.json_content(test_body))
(code, msg) = self._http_post('/useragent-kv', test_body)
self.assertEqual(code, 200)
self.assertEqual(len(json.loads(msg)['value']), 2)
self.assertThat(json.loads(msg)['value'], Contains('fooval'))
self.assertThat(json.loads(msg)['value'], Contains('barval'))
# wrong op test
test_body = json.dumps({'operation': 'foo',
'key': 'fookey'})
self.addDetail('useragent-kv-post-wrongop', content.json_content(test_body))
(code, msg) = self._http_post('/useragent-kv', test_body)
self.assertEqual(code, 404)
def test_err_on_max_rabbit_pending(self):
self.ignore_err_in_log = True
api_server = self._server_info['api_server']
orig_max_pending_updates = api_server._args.rabbit_max_pending_updates
max_pend_upd = 10
api_server._args.rabbit_max_pending_updates = str(max_pend_upd)
orig_rabbitq_pub = api_server._db_conn._msgbus._producer.publish
orig_rabbitq_conn_drain = api_server._db_conn._msgbus._conn_drain.connect
orig_rabbitq_conn_publish = api_server._db_conn._msgbus._conn_publish.connect
try:
def err_rabbitq_pub(*args, **kwargs):
raise Exception("Faking Rabbit publish failure")
def err_rabbitq_conn(*args, **kwargs):
gevent.sleep(0.1)
raise Exception("Faking RabbitMQ connection failure")
api_server._db_conn._msgbus._producer.publish = err_rabbitq_pub
api_server._db_conn._msgbus._conn_publish.connect = err_rabbitq_conn
logger.info("Creating objects to hit max rabbit pending.")
# every VN create, creates RI too
test_objs = self._create_test_objects(count=old_div(max_pend_upd,2)+1)
def asserts_on_max_pending():
self.assertEqual(e.status_code, 500)
self.assertIn("Too many pending updates", e.content)
logger.info("Creating one more object expecting failure.")
obj = VirtualNetwork('vn-to-fail')
self.addDetail('expecting-failed-create', content.text_content(obj.name))
try:
self._vnc_lib.virtual_network_create(obj)
except HttpError as e:
asserts_on_max_pending()
else:
self.assertTrue(False, 'Create succeeded unexpectedly')
logger.info("Update of object should fail.")
test_objs[0].display_name = 'foo'
try:
self._vnc_lib.virtual_network_update(test_objs[0])
except HttpError as e:
asserts_on_max_pending()
else:
self.assertTrue(False, 'Update succeeded unexpectedly')
logger.info("Delete of object should fail.")
test_objs[0].display_name = 'foo'
try:
self._vnc_lib.virtual_network_delete(id=test_objs[0].uuid)
except HttpError as e:
asserts_on_max_pending()
else:
self.assertTrue(False, 'Delete succeeded unexpectedly')
logger.info("Read obj object should be ok.")
self._vnc_lib.virtual_network_read(id=test_objs[0].uuid)
finally:
api_server._args.rabbit_max_pending_updates = orig_max_pending_updates
api_server._db_conn._msgbus._producer.publish = orig_rabbitq_pub
api_server._db_conn._msgbus._conn_drain.connect = orig_rabbitq_conn_drain
api_server._db_conn._msgbus._conn_publish.connect = orig_rabbitq_conn_publish
def test_reconnect_to_rabbit(self):
self.ignore_err_in_log = True
exceptions = [(FakeKombu.Connection.ConnectionException(), 'conn'),
(FakeKombu.Connection.ChannelException(), 'chan'),
(Exception(), 'generic')]
# fake problem on publish to rabbit
# restore, ensure retry and successful publish
for exc_obj, exc_type in exceptions:
obj = VirtualNetwork('%s-pub-%s' %(self.id(), exc_type))
obj.uuid = str(uuid.uuid4())
publish_captured = [False]
def err_on_publish(orig_method, *args, **kwargs):
msg = args[0]
if msg['oper'] == 'CREATE' and msg['uuid'] == obj.uuid:
publish_captured[0] = True
raise exc_obj
return orig_method(*args, **kwargs)
rabbit_producer = self._api_server._db_conn._msgbus._producer
with test_common.patch(rabbit_producer,
'publish', err_on_publish):
self._vnc_lib.virtual_network_create(obj)
self.assertTill(lambda: publish_captured[0] == True)
# unpatch err publish
self.assert_vnc_db_has_ident(obj)
# end exception types on publish
# fake problem on consume from rabbit
# restore, ensure retry and successful consume
for exc_obj, exc_type in exceptions:
obj = VirtualNetwork('%s-sub-%s' %(self.id(), exc_type))
obj.uuid = str(uuid.uuid4())
consume_captured = [False]
consume_test_payload = [None]
rabbit_consumer = self._api_server._db_conn._msgbus._consumer
def err_on_consume(orig_method, *args, **kwargs):
msg = orig_method()
payload = msg.payload
if payload['oper'] == 'UPDATE' and payload['uuid'] == obj.uuid:
if (consume_test_payload[0] == payload):
return msg
consume_captured[0] = True
consume_test_payload[0] = payload
rabbit_consumer.queue.put(payload, None)
raise exc_obj
return msg
with test_common.patch(rabbit_consumer.queue,
'get', err_on_consume):
# create the object to insert 'get' handler,
# update oper will test the error handling
self._vnc_lib.virtual_network_create(obj)
obj.display_name = 'test_update'
self._vnc_lib.virtual_network_update(obj)
self.assertTill(lambda: consume_captured[0] == True)
# unpatch err consume
self.assertTill(self.vnc_db_ident_has_prop, obj=obj,
prop_name='display_name', prop_value='test_update')
# end exception types on consume
# fake problem on consume and publish at same time
# restore, ensure retry and successful publish + consume
obj = VirtualNetwork('%s-pub-sub' %(self.id()))
obj.uuid = str(uuid.uuid4())
msgbus = self._api_server._db_conn._msgbus
pub_greenlet = msgbus._publisher_greenlet
sub_greenlet = msgbus._connection_monitor_greenlet
setattr(pub_greenlet, 'unittest', {'name': 'producer'})
setattr(sub_greenlet, 'unittest', {'name': 'consumer'})
consume_captured = [False]
consume_test_payload = [None]
publish_connect_done = [False]
publish_captured = [False]
def err_on_consume(orig_method, *args, **kwargs):
msg = orig_method()
payload = msg.payload
if payload['oper'] == 'UPDATE' and payload['uuid'] == obj.uuid:
if (consume_test_payload[0] == payload):
return msg
consume_captured[0] = True
consume_test_payload[0] = payload
rabbit_consumer = self._api_server._db_conn._msgbus._consumer
rabbit_consumer.queue.put(payload, None)
raise exc_obj
return msg
def block_on_connect(orig_method, *args, **kwargs):
# block consumer till publisher does update,
# fake consumer connect exceptions till publisher connects fine
utvars = getattr(gevent.getcurrent(), 'unittest', None)
if utvars and utvars['name'] == 'producer':
publish_connect_done[0] = True
return orig_method(*args, **kwargs)
while not publish_captured[0]:
gevent.sleep(0.1)
while not publish_connect_done[0]:
gevent.sleep(0.1)
raise Exception('Faking connection fail')
return orig_method(*args, **kwargs)
rabbit_consumer = self._api_server._db_conn._msgbus._consumer
rabbit_conn = self._api_server._db_conn._msgbus._conn_drain
with test_common.patch(rabbit_consumer.queue,
'get', err_on_consume):
with test_common.patch(rabbit_conn,
'connect', block_on_connect):
# create the object to insert 'get' handler,
# update oper will test the error handling
self._vnc_lib.virtual_network_create(obj)
obj.display_name = 'test_update_1'
self._vnc_lib.virtual_network_update(obj)
self.assertTill(lambda: consume_captured[0] == True)
def err_on_publish(orig_method, *args, **kwargs):
msg = args[0]
if msg['oper'] == 'UPDATE' and msg['uuid'] == obj.uuid:
publish_captured[0] = True
raise exc_obj
return orig_method(*args, **kwargs)
rabbit_producer = self._api_server._db_conn._msgbus._producer
with test_common.patch(rabbit_producer,
'publish', err_on_publish):
obj.display_name = 'test_update_2'
self._vnc_lib.virtual_network_update(obj)
self.assertTill(lambda: publish_captured[0] == True)
# unpatch err publish
# unpatch connect
# unpatch err consume
self.assertTill(self.vnc_db_ident_has_prop, obj=obj,
prop_name='display_name', prop_value='test_update_2')
# end test_reconnect_to_rabbit
def test_update_implicit(self):
self.ignore_err_in_log = True
api_server = self._server_info['api_server']
orig_rabbitq_pub = api_server._db_conn._msgbus._producer.publish
try:
update_implicit = {}
def rabbitq_pub(*args, **kwargs):
if args[0]['oper'] == 'UPDATE-IMPLICIT':
update_implicit.update(args[0])
orig_rabbitq_pub(*args, **kwargs)
logger.info("Creating VN objects")
# every VN create, creates RI too
vn_objs = self._create_test_objects(count=2)
api_server._db_conn._msgbus._producer.publish = rabbitq_pub
ri_objs = [self._vnc_lib.routing_instance_read(
fq_name=vn.fq_name + [vn.name]) for vn in vn_objs]
ri_objs[0].add_routing_instance(ri_objs[1], None)
self._vnc_lib.routing_instance_update(ri_objs[0])
for i in range(0, 10):
gevent.sleep(0.1)
if update_implicit.get('uuid') == ri_objs[1].uuid:
break
else:
self.assertTrue(False, 'update-implicit was not published')
finally:
api_server._db_conn._msgbus._producer.publish = orig_rabbitq_pub
def test_handle_trap_on_exception(self):
self.ignore_err_in_log = True
api_server = self._server_info['api_server']
orig_read = api_server._db_conn._object_db.object_read
def exception_on_log_error(*args, **kwargs):
self.assertTrue(False)
def exception_on_vn_read(obj_type, *args, **kwargs):
if obj_type == 'virtual_network':
raise Exception("fake vn read exception")
orig_read(obj_type, *args, **kwargs)
try:
orig_config_log = api_server.config_log
api_server.config_log = exception_on_log_error
with ExpectedException(NoIdError):
self._vnc_lib.virtual_network_read(fq_name=['foo', 'bar', 'baz'])
finally:
api_server.config_log = orig_config_log
try:
test_obj = self._create_test_object()
api_server._db_conn._object_db.object_read = exception_on_vn_read
with ExpectedException(HttpError):
self._vnc_lib.virtual_network_read(fq_name=test_obj.get_fq_name())
finally:
api_server._db_conn._object_db.object_read = orig_read
def test_update_api_server_configs(self):
api_server = self._server_info['api_server']
introspect_port = api_server._args.http_server_port
update_url = 'http://localhost:%s/Snh_ConfigApiUpdateReq?%s'
test_dicts = [
{'enable_api_stats_log': '', 'enable_latency_stats_log': '', 'assert': (0, 0)},
{'enable_api_stats_log': 0, 'enable_latency_stats_log': '', 'assert': (0, 0)},
{'enable_api_stats_log': '', 'enable_latency_stats_log': 0, 'assert': (0, 0)},
{'enable_api_stats_log': 1, 'enable_latency_stats_log': '', 'assert': (1, 0)},
{'enable_api_stats_log': '', 'enable_latency_stats_log': 1, 'assert': (1, 1)},
{'enable_api_stats_log': '', 'enable_latency_stats_log': '', 'assert': (1, 1)},
{'enable_api_stats_log': 1, 'enable_latency_stats_log': 1, 'assert': (1, 1)},
{'enable_api_stats_log': '', 'enable_latency_stats_log': '', 'assert': (1, 1)},
{'enable_api_stats_log': 0, 'enable_latency_stats_log': 0, 'assert': (0, 0)},
{'enable_api_stats_log': '', 'enable_latency_stats_log': '', 'assert': (0, 0)},
]
for test_dict in test_dicts:
assert_vals = test_dict['assert']
params = 'enable_api_stats_log=%s' % test_dict['enable_api_stats_log']
params += '&enable_latency_stats_log=%s' % test_dict['enable_latency_stats_log']
updates = requests.get(update_url % (introspect_port, params))
self.assertEqual(updates.status_code, 200)
self.assertEqual(api_server.enable_api_stats_log, bool(assert_vals[0]))
self.assertEqual(api_server.enable_latency_stats_log, bool(assert_vals[1]))
def test_sandesh_trace(self):
api_server = self._server_info['api_server']
# the test
test_obj = self._create_test_object()
self.assert_vnc_db_has_ident(test_obj)
self._vnc_lib.virtual_network_delete(id=test_obj.uuid)
gevent.sleep(0.05) # wait traces published
# and validations
introspect_port = api_server._args.http_server_port
traces = requests.get('http://localhost:%s/Snh_SandeshTraceRequest?x=RestApiTraceBuf' %(introspect_port))
self.assertThat(traces.status_code, Equals(200))
top_elem = etree.fromstring(traces.text)
self.assertThat(top_elem[0][0][-2].text, Contains('POST'))
self.assertThat(top_elem[0][0][-2].text, Contains('200 OK'))
self.assertThat(top_elem[0][0][-1].text, Contains('DELETE'))
self.assertThat(top_elem[0][0][-1].text, Contains('200 OK'))
traces = requests.get('http://localhost:%s/Snh_SandeshTraceRequest?x=DBRequestTraceBuf' %(introspect_port))
self.assertThat(traces.status_code, Equals(200))
top_elem = etree.fromstring(traces.text)
self.assertThat(top_elem[0][0][-1].text, Contains('delete'))
self.assertThat(top_elem[0][0][-1].text, Contains(test_obj.name))
traces = requests.get('http://localhost:%s/Snh_SandeshTraceRequest?x=MessageBusNotifyTraceBuf' %(introspect_port))
self.assertThat(traces.status_code, Equals(200))
top_elem = etree.fromstring(traces.text)
self.assertThat(top_elem[0][0][-1].text, Contains('DELETE'))
self.assertThat(top_elem[0][0][-1].text, Contains(test_obj.name))
def test_dup_create_with_same_uuid(self):
dom_name = self.id() + '-domain'
logger.info('Creating Domain %s', dom_name)
domain_obj = Domain(dom_name)
self._vnc_lib.domain_create(domain_obj)
project_name = self.id() + '-project'
logger.info('Creating Project %s', project_name)
orig_project_obj = Project(project_name, domain_obj)
self._vnc_lib.project_create(orig_project_obj)
logger.info('Creating Dup Project in default domain with same uuid')
dup_project_obj = Project(project_name)
dup_project_obj.uuid = orig_project_obj.uuid
with ExpectedException(RefsExistError) as e:
self._vnc_lib.project_create(dup_project_obj)
def test_dup_create_port_timing(self):
# test for https://bugs.launchpad.net/juniperopenstack/r2.0/+bug/1382385
vn_name = self.id() + '-network'
vn_obj = VirtualNetwork(vn_name, parent_obj=Project())
self._vnc_lib.virtual_network_create(vn_obj)
vmi_name = self.id() + '-port'
logger.info('Creating port %s', vmi_name)
vmi_obj = VirtualMachineInterface(vmi_name, parent_obj=Project())
vmi_obj.add_virtual_network(vn_obj)
self._vnc_lib.virtual_machine_interface_create(vmi_obj)
vmi_name = self.id() + '-port'
logger.info('Creating dup port %s', vmi_name)
vmi_obj = VirtualMachineInterface(vmi_name, parent_obj=Project())
vmi_obj.add_virtual_network(vn_obj)
orig_fq_name_to_uuid = self._api_server._db_conn.fq_name_to_uuid
def dummy_fq_name_to_uuid(obj_type, *args, **kwargs):
if obj_type == 'virtual-machine-interface':
raise NoIdError('')
return orig_fq_name_to_uuid(obj_type, *args, **kwargs)
self._api_server._db_conn.fq_name_to_uuid = dummy_fq_name_to_uuid
try:
with ExpectedException(RefsExistError) as e:
self._vnc_lib.virtual_machine_interface_create(vmi_obj)
finally:
self._api_server._db_conn.fq_name_to_uuid= orig_fq_name_to_uuid
def test_put_on_wrong_type(self):
vn_name = self.id()+'-vn'
vn_obj = VirtualNetwork(vn_name)
self._add_detail('Creating network with name %s' %(vn_name))
self._vnc_lib.virtual_network_create(vn_obj)
listen_port = self._api_server._args.listen_port
uri = '/network-ipam/%s' %(vn_obj.uuid)
self._add_detail('Trying to update uuid as network-ipam, expecting 404')
code, msg = self._http_put(uri, json.dumps({'network-ipam': {'display_name': 'foobar'}}))
self.assertThat(code, Equals(404))
self._add_detail('Updating display_name as network, expecting success')
uri = '/virtual-network/%s' %(vn_obj.uuid)
code, msg = self._http_put(uri, json.dumps({'virtual-network': {'display_name': 'foobar'}}))
self.assertThat(code, Equals(200))
rb_obj = self._vnc_lib.virtual_network_read(id=vn_obj.uuid)
self.assertThat(rb_obj.display_name, Equals('foobar'))
def test_floatingip_as_instanceip(self):
ipam_fixt = self.useFixture(NetworkIpamTestFixtureGen(
self._vnc_lib, network_ipam_name='ipam-%s' % self.id()))
project_fixt = self.useFixture(ProjectTestFixtureGen(self._vnc_lib, 'default-project'))
subnet_vnc = IpamSubnetType(subnet=SubnetType('1.1.1.0', 24))
vnsn_data = VnSubnetsType([subnet_vnc])
logger.info("Creating a virtual network")
logger.info("Creating subnet 1.1.1.0/24")
vn_fixt = self.useFixture(VirtualNetworkTestFixtureGen(self._vnc_lib,
'vn-%s' %(self.id()),
network_ipam_ref_infos=[(ipam_fixt.getObj(), vnsn_data)]))
vn_fixt.getObj().set_router_external(True)
self._vnc_lib.virtual_network_update(vn_fixt.getObj())
logger.info("Fetching floating-ip-pool")
fip_pool_fixt = self.useFixture(
FloatingIpPoolTestFixtureGen(self._vnc_lib, 'floating-ip-pool',
parent_fixt=vn_fixt))
logger.info("Creating auto-alloc floating-ip")
fip_fixt = self.useFixture(
FloatingIpTestFixtureGen(
self._vnc_lib, 'fip1', parent_fixt=fip_pool_fixt,
project_refs=[project_fixt.getObj()]))
ip_allocated = fip_fixt.getObj().floating_ip_address
logger.info("Creating auto-alloc instance-ip, expecting an error")
with ExpectedException(RefsExistError) as e:
iip_fixt = self.useFixture(
InstanceIpTestFixtureGen(
self._vnc_lib, 'iip1', auto_prop_val=False,
instance_ip_address=ip_allocated,
virtual_network_refs=[vn_fixt.getObj()]))
# end test_floatingip_as_instanceip
def test_aliasip_as_instanceip(self):
ipam_fixt = self.useFixture(NetworkIpamTestFixtureGen(
self._vnc_lib, network_ipam_name='ipam-%s' % self.id()))
project_fixt = self.useFixture(ProjectTestFixtureGen(self._vnc_lib, 'default-project'))
subnet_vnc = IpamSubnetType(subnet=SubnetType('1.1.1.0', 24))
vnsn_data = VnSubnetsType([subnet_vnc])
logger.info("Creating a virtual network")
logger.info("Creating subnet 1.1.1.0/24")
vn_fixt = self.useFixture(VirtualNetworkTestFixtureGen(self._vnc_lib,
'vn-%s' %(self.id()),
network_ipam_ref_infos=[(ipam_fixt.getObj(), vnsn_data)]))
vn_fixt.getObj().set_router_external(True)
self._vnc_lib.virtual_network_update(vn_fixt.getObj())
logger.info("Fetching alias-ip-pool")
aip_pool_fixt = self.useFixture(
AliasIpPoolTestFixtureGen(self._vnc_lib, 'alias-ip-pool',
parent_fixt=vn_fixt))
logger.info("Creating auto-alloc alias-ip")
aip_fixt = self.useFixture(
AliasIpTestFixtureGen(
self._vnc_lib, 'aip1', parent_fixt=aip_pool_fixt,
project_refs=[project_fixt.getObj()]))
ip_allocated = aip_fixt.getObj().alias_ip_address
logger.info("Creating auto-alloc instance-ip, expecting an error")
with ExpectedException(RefsExistError) as e:
iip_fixt = self.useFixture(
InstanceIpTestFixtureGen(
self._vnc_lib, 'iip1', auto_prop_val=False,
instance_ip_address=ip_allocated,
virtual_network_refs=[vn_fixt.getObj()]))
# end test_aliasip_as_instanceip
def test_list_lib_api(self):
num_objs = 5
proj_obj = Project('%s-project' %(self.id()))
self._vnc_lib.project_create(proj_obj)
ipam_obj = NetworkIpam('%s-ipam' %(self.id()), parent_obj=proj_obj)
self._vnc_lib.network_ipam_create(ipam_obj)
def create_vns():
objs = []
for i in range(num_objs):
name = '%s-%s' %(self.id(), i)
obj = VirtualNetwork(
name, proj_obj, display_name=name, is_shared=True,
router_external=False)
obj.add_network_ipam(ipam_obj,
VnSubnetsType(
[IpamSubnetType(SubnetType('1.1.%s.0' %(i), 28))]))
self._vnc_lib.virtual_network_create(obj)
objs.append(obj)
return objs
vn_objs = create_vns()
# unanchored summary list without filters
read_vn_dicts = self._vnc_lib.virtual_networks_list()['virtual-networks']
self.assertThat(len(read_vn_dicts), Not(LessThan(num_objs)))
for obj in vn_objs:
# locate created object, should only be one, expect exact fields
obj_dict = [d for d in read_vn_dicts if d['uuid'] == obj.uuid]
self.assertThat(len(obj_dict), Equals(1))
self.assertThat(set(['fq_name', 'uuid', 'href']),
Equals(set(obj_dict[0].keys())))
# unanchored summary list with field filters, with extra fields
resp = self._vnc_lib.virtual_networks_list(
filters={'display_name':vn_objs[2].display_name},
fields=['is_shared'])
vn_dicts = resp['virtual-networks']
self.assertThat(len(vn_dicts), Equals(1))
self.assertThat(vn_dicts[0]['uuid'], Equals(vn_objs[2].uuid))
self.assertThat(set(['fq_name', 'uuid', 'href', 'is_shared']),
Equals(set(vn_dicts[0].keys())))
# unanchored detailed list without filters
read_vn_objs = self._vnc_lib.virtual_networks_list(
detail=True)
self.assertThat(len(read_vn_objs), Not(LessThan(num_objs)))
read_display_names = [o.display_name for o in read_vn_objs]
for obj in vn_objs:
self.assertThat(read_display_names,
Contains(obj.display_name))
# unanchored detailed list with filters
read_vn_objs = self._vnc_lib.virtual_networks_list(
detail=True,
filters={'is_shared':True})
self.assertThat(len(read_vn_objs), Not(LessThan(num_objs)))
read_display_names = [o.display_name for o in read_vn_objs]
for obj in vn_objs:
self.assertThat(read_display_names,
Contains(obj.display_name))
# unanchored detailed list with filter with multiple values
filtered_display_names = [
'%s-%d' %(self.id(), num_objs - 1),
'%s-%d' %(self.id(), num_objs - 2),
]
read_vn_objs = self._vnc_lib.virtual_networks_list(
detail=True,
filters={'display_name': filtered_display_names})
self.assertEqual(len(read_vn_objs), len(filtered_display_names))
read_display_names = [o.display_name for o in read_vn_objs]
self.assertEqual(set(read_display_names), set(filtered_display_names))
# parent anchored summary list without filters, with extra fields
read_vn_dicts = self._vnc_lib.virtual_networks_list(
parent_id=proj_obj.uuid,
fields=['router_external'])['virtual-networks']
self.assertThat(len(read_vn_dicts), Equals(num_objs))
for obj in vn_objs:
# locate created object, should only be one, expect exact fields
obj_dict = [d for d in read_vn_dicts if d['uuid'] == obj.uuid]
self.assertThat(len(obj_dict), Equals(1))
self.assertThat(set(['fq_name', 'uuid', 'href', 'router_external']),
Equals(set(obj_dict[0].keys())))
self.assertThat(obj_dict[0]['fq_name'][:-1],
Equals(proj_obj.fq_name))
self.assertEqual(obj_dict[0]['router_external'], False)
# parent anchored summary list with filters
resp = self._vnc_lib.virtual_networks_list(
parent_id=proj_obj.uuid,
filters={'is_shared': vn_objs[2].is_shared})
read_vn_dicts = resp['virtual-networks']
self.assertThat(len(read_vn_dicts), Equals(num_objs))
for obj in vn_objs:
# locate created object, should only be one, expect exact fields
obj_dict = [d for d in read_vn_dicts if d['uuid'] == obj.uuid]
self.assertThat(len(obj_dict), Equals(1))
self.assertThat(set(['fq_name', 'uuid', 'href']),
Equals(set(obj_dict[0].keys())))
self.assertThat(obj_dict[0]['fq_name'][:-1],
Equals(proj_obj.fq_name))
# unanchored list with unknown filter
read_vn_objs = self._vnc_lib.virtual_networks_list(
parent_id=proj_obj.uuid,
filters={'foo': 'bar'})['virtual-networks']
self.assertEqual(len(read_vn_objs), num_objs)
# parent anchored detailed list without filters
read_vn_objs = self._vnc_lib.virtual_networks_list(
parent_id=proj_obj.uuid, detail=True)
self.assertThat(len(read_vn_objs), Equals(num_objs))
read_display_names = [o.display_name for o in read_vn_objs]
read_fq_names = [o.fq_name for o in read_vn_objs]
for obj in vn_objs:
self.assertThat(read_display_names,
Contains(obj.display_name))
for fq_name in read_fq_names:
self.assertThat(fq_name[:-1], Equals(proj_obj.fq_name))
# parent anchored detailed list with filters
read_vn_objs = self._vnc_lib.virtual_networks_list(
parent_id=proj_obj.uuid, detail=True,
filters={'display_name':vn_objs[2].display_name})
self.assertThat(len(read_vn_objs), Equals(1))
self.assertThat(read_vn_objs[0].fq_name[:-1],
Equals(proj_obj.fq_name))
# backref anchored summary list without filters
resp = self._vnc_lib.virtual_networks_list(
back_ref_id=ipam_obj.uuid,
filters={'is_shared':vn_objs[2].is_shared})
read_vn_dicts = resp['virtual-networks']
self.assertThat(len(read_vn_dicts), Equals(num_objs))
for obj in vn_objs:
# locate created object, should only be one, expect exact fields
obj_dict = [d for d in read_vn_dicts if d['uuid'] == obj.uuid]
self.assertThat(len(obj_dict), Equals(1))
self.assertEqual(obj_dict[0]['fq_name'], obj.get_fq_name())
self.assertThat(set(['fq_name', 'uuid', 'href']),
Equals(set(obj_dict[0].keys())))
# backref anchored summary list with filters, with extra fields
resp = self._vnc_lib.virtual_networks_list(
back_ref_id=ipam_obj.uuid,
filters={'display_name':vn_objs[2].display_name},
fields=['is_shared', 'router_external'])
read_vn_dicts = resp['virtual-networks']
self.assertEqual(len(read_vn_dicts), 1)
self.assertEqual(read_vn_dicts[0]['uuid'], vn_objs[2].uuid)
self.assertEqual(read_vn_dicts[0]['is_shared'], True)
self.assertEqual(read_vn_dicts[0]['router_external'], False)
# backref anchored detailed list without filters
read_vn_objs = self._vnc_lib.virtual_networks_list(
back_ref_id=ipam_obj.uuid, detail=True)
self.assertThat(len(read_vn_objs), Equals(num_objs))
read_display_names = [o.display_name for o in read_vn_objs]
read_ipam_uuids = [o.network_ipam_refs[0]['uuid']
for o in read_vn_objs]
for obj in vn_objs:
self.assertThat(read_display_names,
Contains(obj.display_name))
for ipam_uuid in read_ipam_uuids:
self.assertThat(ipam_uuid, Equals(ipam_obj.uuid))
# backref anchored detailed list with filters
read_vn_objs = self._vnc_lib.virtual_networks_list(
back_ref_id=ipam_obj.uuid, detail=True,
filters={'display_name':vn_objs[2].display_name,
'is_shared':vn_objs[2].is_shared})
self.assertThat(len(read_vn_objs), Equals(1))
read_ipam_fq_names = [o.network_ipam_refs[0]['to']
for o in read_vn_objs]
for ipam_fq_name in read_ipam_fq_names:
self.assertThat(ipam_fq_name,
Equals(ipam_obj.fq_name))
# id-list detailed without filters
read_vn_objs = self._vnc_lib.virtual_networks_list(
obj_uuids=[o.uuid for o in vn_objs], detail=True)
self.assertThat(len(read_vn_objs), Equals(num_objs))
read_display_names = [o.display_name for o in read_vn_objs]
for obj in vn_objs:
self.assertThat(read_display_names,
Contains(obj.display_name))
# id-list detailed with filters
read_vn_objs = self._vnc_lib.virtual_networks_list(
obj_uuids=[o.uuid for o in vn_objs], detail=True,
filters={'is_shared':False})
self.assertThat(len(read_vn_objs), Equals(0))
# end test_list_lib_api
def test_list_with_id_parent_id_backref_id_and_filters(self):
# Create 2 projects, one with 4 policies (3 with same name) other one
# with one. One rule in first project but used by all policies in both
# projects
# ===========================|===========================
# P1 | P2
# ===========================|===========================
# FP1 FP2 FP3 FP4 | FP1
# \ \ \ | /
# \ \ \ | /
# \_____\__ FR_|_/
# FP1, FP2 and FP3 in P1 have the same diplay name
p1 = Project('%s-p1' % self.id())
self._vnc_lib.project_create(p1)
p2 = Project('%s-p2' % self.id())
self._vnc_lib.project_create(p2)
p1_fr = FirewallRule(
'%s-fr' % self.id(),
parent_obj=p1,
service=FirewallServiceType(),
)
self._vnc_lib.firewall_rule_create(p1_fr)
p1_fp1_fp2_name = '%s-p1-fp1-fp2' % self.id()
p1_fp1 = FirewallPolicy(
'%s-p1-fp1' % self.id(),
parent_obj=p1,
display_name=p1_fp1_fp2_name)
p1_fp2 = FirewallPolicy(
'%s-p1-fp2' % self.id(),
parent_obj=p1,
display_name=p1_fp1_fp2_name)
p1_fp2.add_firewall_rule(p1_fr)
p1_fp3 = FirewallPolicy(
'%s-p1-fp3' % self.id(),
parent_obj=p1,
display_name=p1_fp1_fp2_name)
p1_fp3.add_firewall_rule(p1_fr)
p1_fp4 = FirewallPolicy('%s-p1-fp4' % self.id(), parent_obj=p1)
p1_fp4.add_firewall_rule(p1_fr)
p2_fp1 = FirewallPolicy('%s-p2-fp1' % self.id(), parent_obj=p2)
p2_fp1.add_firewall_rule(p1_fr)
for fp in [p1_fp1, p1_fp2, p1_fp3, p1_fp4, p2_fp1]:
self._vnc_lib.firewall_policy_create(fp)
# list P1 and P2 policies
list_result = self._vnc_lib.firewall_policys_list(
parent_id=[p1.uuid, p2.uuid]
)['firewall-policys']
self.assertEquals(len(list_result), 5)
self.assertEquals({r['uuid'] for r in list_result},
set([p1_fp1.uuid, p1_fp2.uuid, p1_fp3.uuid,
p1_fp4.uuid, p2_fp1.uuid]))
# list P1 policies
list_result = self._vnc_lib.firewall_policys_list(
parent_id=p1.uuid,
)['firewall-policys']
self.assertEquals(len(list_result), 4)
self.assertEquals({r['uuid'] for r in list_result},
set([p1_fp1.uuid, p1_fp2.uuid, p1_fp3.uuid,
p1_fp4.uuid]))
# list P1 policies with a ref to FR
list_result = self._vnc_lib.firewall_policys_list(
parent_id=p1.uuid,
back_ref_id=p1_fr.uuid,
)['firewall-policys']
self.assertEquals(len(list_result), 3)
self.assertEquals({r['uuid'] for r in list_result},
set([p1_fp2.uuid, p1_fp3.uuid, p1_fp4.uuid]))
# list P1 policies whit name 'p1_fp1_fp2_name' and with a ref to FR
list_result = self._vnc_lib.firewall_policys_list(
parent_id=p1.uuid,
back_ref_id=p1_fr.uuid,
filters={'display_name': p1_fp1_fp2_name},
)['firewall-policys']
self.assertEquals(len(list_result), 2)
self.assertEquals({r['uuid'] for r in list_result},
set([p1_fp2.uuid, p1_fp3.uuid]))
# list P1 policies whit name 'p1_fp1_fp2_name', with a ref to FR and
# with UUID equals to FP1 UUID
list_result = self._vnc_lib.firewall_policys_list(
obj_uuids=[p1_fp2.uuid],
parent_id=p1.uuid,
back_ref_id=p1_fr.uuid,
filters={'display_name': p1_fp1_fp2_name},
)['firewall-policys']
self.assertEquals(len(list_result), 1)
self.assertEquals(list_result[0]['uuid'], p1_fp2.uuid)
def test_list_for_coverage(self):
name = '%s-vn1' %(self.id())
vn1_obj = VirtualNetwork(
name, display_name=name, is_shared=True,
router_external=False)
self._vnc_lib.virtual_network_create(vn1_obj)
name = '%s-vn2' %(self.id())
id_perms = IdPermsType(user_visible=False)
vn2_obj = VirtualNetwork(
name, display_name=name, id_perms=id_perms,
is_shared=True, router_external=False)
def fake_admin_request(orig_method, *args, **kwargs):
return True
with test_common.patch(self._api_server,
'is_admin_request', fake_admin_request):
self._vnc_lib.virtual_network_create(vn2_obj)
listen_ip = self._api_server_ip
listen_port = self._api_server._args.listen_port
q_params = 'obj_uuids=%s,%s&fields=is_shared,router_external' %(
vn1_obj.uuid, vn2_obj.uuid)
url = 'http://%s:%s/virtual-networks?%s' %(
listen_ip, listen_port, q_params)
resp = requests.get(url)
self.assertEqual(resp.status_code, 200)
read_vn_dicts = json.loads(resp.text)['virtual-networks']
self.assertEqual(len(read_vn_dicts), 1)
self.assertEqual(read_vn_dicts[0]['uuid'], vn1_obj.uuid)
self.assertEqual(read_vn_dicts[0]['is_shared'], True)
self.assertEqual(read_vn_dicts[0]['router_external'], False)
# end test_list_for_coverage
def test_list_with_malformed_filters(self):
vn_objs, _, _, _ = self._create_vn_ri_vmi()
vn_uuid = vn_objs[0].uuid
vn_uuids = [vn_uuid, 'bad-uuid']
try:
results = self._vnc_lib.resource_list('virtual-network',
obj_uuids=vn_uuids)
self.assertEqual(len(results['virtual-networks']), 1)
self.assertEqual(results['virtual-networks'][0]['uuid'], vn_uuid)
except HttpError:
self.fail('Malformed object UUID filter was not ignored')
try:
results = self._vnc_lib.resource_list('routing-instance',
parent_id=vn_uuids,
detail=True)
self.assertEqual(len(results), 2)
for ri_obj in results:
self.assertEqual(ri_obj.parent_uuid, vn_uuid)
except HttpError:
self.fail('Malformed parent UUID filter was not ignored')
try:
results = self._vnc_lib.resource_list('virtual-machine-interface',
back_ref_id=vn_uuids,
detail=True)
self.assertEqual(len(results), 1)
vmi_obj = results[0]
self.assertEqual(vmi_obj.get_virtual_network_refs()[0]['uuid'],
vn_uuid)
except HttpError:
self.fail('Malformed back-ref UUID filter was not ignored')
def test_list_filtering_parent_fq_name(self):
project = Project('project-%s' % self.id())
self._vnc_lib.project_create(project)
fp = FirewallPolicy('fp-%s' % self.id(), parent_obj=project)
self._vnc_lib.firewall_policy_create(fp)
fps = self._vnc_lib.firewall_policys_list(
parent_fq_name=project.fq_name)
self.assertEqual(len(fps['%ss' % FirewallPolicy.resource_type]), 1)
@mock.patch.object(GlobalSystemConfigServer, 'pre_dbe_create',
return_value=(True, ''))
def test_list_filtering_parent_fq_name_multiple_parent_types_match(
self, pre_dbe_create_mock):
identical_name = 'gsc-and-domain-name-%s' % self.id()
gsc = GlobalSystemConfig(identical_name)
self._vnc_lib.global_system_config_create(gsc)
domain = Domain(identical_name)
self._vnc_lib.domain_create(domain)
gsc_aal = ApiAccessList('gsc-aal-%s' % self.id(), parent_obj=gsc)
self._vnc_lib.api_access_list_create(gsc_aal)
domain_aal = ApiAccessList('domain-aal-%s' % self.id(), parent_obj=gsc)
self._vnc_lib.api_access_list_create(domain_aal)
aals = self._vnc_lib.api_access_lists_list(parent_fq_name=gsc.fq_name)
self.assertEqual(len(aals['%ss' % ApiAccessList.resource_type]), 2)
def test_create_with_wrong_type(self):
vn_obj = VirtualNetwork('%s-bad-prop-type' %(self.id()))
vn_obj.virtual_network_properties = 'foo' #VirtualNetworkType
with ExpectedException(BadRequest) as e:
self._vnc_lib.virtual_network_create(vn_obj)
#end test_create_with_wrong_type(self):
def test_update_with_wrong_type(self):
vn_obj = VirtualNetwork('%s-bad-prop-type' %(self.id()))
self._vnc_lib.virtual_network_create(vn_obj)
vn_obj.virtual_network_properties = 'foo' #VirtualNetworkType
with ExpectedException(BadRequest) as e:
self._vnc_lib.virtual_network_update(vn_obj)
#end test_update_with_wrong_type(self):
def test_read_rest_api(self):
logger.info("Creating VN, RI, VMI.")
vn_objs, ipam_objs, ri_objs, vmi_objs = self._create_vn_ri_vmi()
listen_ip = self._api_server_ip
listen_port = self._api_server._args.listen_port
logger.info("Reading VN without filters.")
url = 'http://%s:%s/virtual-network/%s' %(
listen_ip, listen_port, vn_objs[0].uuid)
resp = requests.get(url)
self.assertEqual(resp.status_code, 200)
ret_vn = json.loads(resp.text)['virtual-network']
self.assertThat(list(ret_vn.keys()), Contains('routing_instances'))
self.assertThat(list(ret_vn.keys()), Contains('virtual_machine_interface_back_refs'))
for link_key, linked_obj in [('routing_instances', ri_objs[0]),
('virtual_machine_interface_back_refs', vmi_objs[0])]:
found = False
for ret_link in ret_vn[link_key]:
self.assertThat(ret_link, Contains('to'))
self.assertThat(ret_link, Contains('uuid'))
if (ret_link['to'] == linked_obj.get_fq_name() and
ret_link['uuid'] == linked_obj.uuid):
found = True
break
self.assertTrue(found)
logger.info("Reading VN with children excluded.")
url = 'http://%s:%s/virtual-network/%s?exclude_children=true' %(
listen_ip, listen_port, vn_objs[0].uuid)
resp = requests.get(url)
self.assertEqual(resp.status_code, 200)
ret_vn = json.loads(resp.text)['virtual-network']
self.assertThat(list(ret_vn.keys()), Not(Contains('routing_instances')))
self.assertThat(list(ret_vn.keys()), Contains(
'virtual_machine_interface_back_refs'))
for link_key, linked_obj in [('virtual_machine_interface_back_refs',
vmi_objs[0])]:
ret_link = ret_vn[link_key][0]
self.assertThat(ret_link, Contains('to'))
self.assertThat(ret_link, Contains('uuid'))
self.assertEqual(ret_link['to'], linked_obj.get_fq_name())
self.assertEqual(ret_link['uuid'], linked_obj.uuid)
logger.info("Reading VN with backrefs excluded.")
url = 'http://%s:%s/virtual-network/%s?exclude_back_refs=true' %(
listen_ip, listen_port, vn_objs[0].uuid)
resp = requests.get(url)
self.assertEqual(resp.status_code, 200)
ret_vn = json.loads(resp.text)['virtual-network']
self.assertThat(list(ret_vn.keys()), Contains('routing_instances'))
self.assertThat(list(ret_vn.keys()), Not(Contains(
'virtual_machine_interface_back_refs')))
for link_key, linked_obj in [('routing_instances',
ri_objs[0])]:
found = False
for ret_link in ret_vn[link_key]:
self.assertThat(ret_link, Contains('to'))
self.assertThat(ret_link, Contains('uuid'))
if (ret_link['to'] == linked_obj.get_fq_name() and
ret_link['uuid'] == linked_obj.uuid):
found = True
break
self.assertTrue(found)
logger.info("Reading VN with children and backrefs excluded.")
query_param_str = 'exclude_children=True&exclude_back_refs=true'
url = 'http://%s:%s/virtual-network/%s?%s' %(
listen_ip, listen_port, vn_objs[0].uuid, query_param_str)
resp = requests.get(url)
self.assertEqual(resp.status_code, 200)
ret_vn = json.loads(resp.text)['virtual-network']
self.assertThat(list(ret_vn.keys()), Not(Contains('routing_instances')))
self.assertThat(list(ret_vn.keys()), Not(Contains(
'virtual_machine_interface_back_refs')))
# id_perms and perms2 are always returned irrespective of what
# fields are requested
property = 'virtual_network_network_id'
reference = 'network_ipam_refs'
children = 'routing_instances'
back_reference = 'virtual_machine_interface_back_refs'
logger.info("Reading VN with one specific property field.")
query_param_str = 'fields=%s' % property
url = 'http://%s:%s/virtual-network/%s?%s' % (
listen_ip, listen_port, vn_objs[0].uuid, query_param_str)
resp = requests.get(url)
self.assertEqual(resp.status_code, 200)
ret_vn = json.loads(resp.text)['virtual-network']
self.assertThat(list(ret_vn.keys()), Contains(property))
self.assertThat(list(ret_vn.keys()), Contains('id_perms'))
self.assertThat(list(ret_vn.keys()), Contains('perms2'))
self.assertThat(list(ret_vn.keys()), Not(Contains(reference)))
self.assertThat(list(ret_vn.keys()), Not(Contains(children)))
self.assertThat(list(ret_vn.keys()), Not(Contains(back_reference)))
logger.info("Reading VN with one specific ref field.")
query_param_str = 'fields=%s' % reference
url = 'http://%s:%s/virtual-network/%s?%s' % (
listen_ip, listen_port, vn_objs[0].uuid, query_param_str)
resp = requests.get(url)
self.assertEqual(resp.status_code, 200)
ret_vn = json.loads(resp.text)['virtual-network']
self.assertThat(list(ret_vn.keys()), Not(Contains(property)))
self.assertThat(list(ret_vn.keys()), Contains('id_perms'))
self.assertThat(list(ret_vn.keys()), Contains('perms2'))
self.assertThat(list(ret_vn.keys()), Contains(reference))
self.assertThat(list(ret_vn.keys()), Not(Contains(children)))
self.assertThat(list(ret_vn.keys()), Not(Contains(back_reference)))
logger.info("Reading VN with one specific children field.")
query_param_str = 'fields=%s' % children
url = 'http://%s:%s/virtual-network/%s?%s' % (
listen_ip, listen_port, vn_objs[0].uuid, query_param_str)
resp = requests.get(url)
self.assertEqual(resp.status_code, 200)
ret_vn = json.loads(resp.text)['virtual-network']
self.assertThat(list(ret_vn.keys()), Not(Contains(property)))
self.assertThat(list(ret_vn.keys()), Not(Contains(reference)))
self.assertThat(list(ret_vn.keys()), Contains('id_perms'))
self.assertThat(list(ret_vn.keys()), Contains('perms2'))
self.assertThat(list(ret_vn.keys()), Contains(children))
self.assertThat(list(ret_vn.keys()), Not(Contains(back_reference)))
logger.info("Reading VN with one specific back-reference field.")
query_param_str = 'fields=%s' % back_reference
url = 'http://%s:%s/virtual-network/%s?%s' % (
listen_ip, listen_port, vn_objs[0].uuid, query_param_str)
resp = requests.get(url)
self.assertEqual(resp.status_code, 200)
ret_vn = json.loads(resp.text)['virtual-network']
self.assertThat(list(ret_vn.keys()), Not(Contains(property)))
self.assertThat(list(ret_vn.keys()), Not(Contains(reference)))
self.assertThat(list(ret_vn.keys()), Contains('id_perms'))
self.assertThat(list(ret_vn.keys()), Contains('perms2'))
self.assertThat(list(ret_vn.keys()), Not(Contains(children)))
self.assertThat(list(ret_vn.keys()), Contains(back_reference))
logger.info("Reading VN with property, reference, children and "
"back-reference fields.")
query_param_str = ('fields=%s,%s,%s,%s' % (property, reference,
children, back_reference))
url = 'http://%s:%s/virtual-network/%s?%s' % (
listen_ip, listen_port, vn_objs[0].uuid, query_param_str)
resp = requests.get(url)
self.assertEqual(resp.status_code, 200)
ret_vn = json.loads(resp.text)['virtual-network']
self.assertThat(list(ret_vn.keys()), Contains('id_perms'))
self.assertThat(list(ret_vn.keys()), Contains('perms2'))
self.assertThat(list(ret_vn.keys()), Contains(property))
self.assertThat(list(ret_vn.keys()), Contains(reference))
self.assertThat(list(ret_vn.keys()), Contains(children))
self.assertThat(list(ret_vn.keys()), Contains(back_reference))
# end test_read_rest_api
def test_bulk_read_rest_api_with_fqns(self):
num_vn = 4
listen_ip = self._api_server_ip
listen_port = self._api_server._args.listen_port
vn_objs, _, _, _ = self._create_vn_ri_vmi(num_vn)
vn_fqns = [o.fq_name for o in vn_objs]
vn_fqns_str_list = [':'.join(o.fq_name) for o in vn_objs]
self.assertEqual(len(vn_fqns_str_list), num_vn)
ret_list = self._vnc_lib.virtual_networks_list(fq_names=vn_fqns)
ret_vns = ret_list['virtual-networks']
ret_fqns_str_list = [':'.join(ret['fq_name']) for ret in ret_vns]
self.assertEqual(len(ret_fqns_str_list), num_vn)
self.assertEqual(vn_fqns_str_list.sort(), ret_fqns_str_list.sort())
#end test_bulk_read_rest_api_with_fqns
def test_bulk_read_rest_api_with_bad_fqns(self):
num_vn = 2
listen_ip = self._api_server_ip
listen_port = self._api_server._args.listen_port
vn_objs, _, _, _ = self._create_vn_ri_vmi(num_vn)
vn_fqns = [o.fq_name for o in vn_objs]
vn_fqns.append(['default-domain', 'default-project', 'bad-vn-fqn'])
vn_fqns_str_list = [':'.join(o.fq_name) for o in vn_objs]
self.assertEqual(len(vn_fqns_str_list), num_vn)
ret_list = self._vnc_lib.resource_list('virtual-network',
fq_names=vn_fqns)
ret_vns = ret_list['virtual-networks']
ret_fqns_str_list = [':'.join(ret['fq_name']) for ret in ret_vns]
self.assertEqual(len(ret_fqns_str_list), num_vn)
self.assertEqual(vn_fqns_str_list.sort(), ret_fqns_str_list.sort())
#end test_bulk_read_rest_api_with_bad_fqns
def test_bulk_read_rest_api_with_fqns_objs(self):
num_vn = 4
listen_ip = self._api_server_ip
listen_port = self._api_server._args.listen_port
vn_objs, _, _, _ = self._create_vn_ri_vmi(num_vn)
vn_fqns = [o.fq_name for o in vn_objs]
vn_fqns_str_list = [':'.join(o.fq_name) for o in vn_objs]
vn_uuids_list = [o.uuid for o in vn_objs]
self.assertEqual(len(vn_fqns_str_list), num_vn)
self.assertEqual(len(vn_uuids_list), num_vn)
# We are adding 1st two in fq_names and last two in obj_uuids
ret_list = self._vnc_lib.resource_list('virtual-network',
fq_names=vn_fqns[0:2],
obj_uuids=vn_uuids_list[2:])
ret_vns = ret_list['virtual-networks']
ret_fqns_str_list = [':'.join(ret['fq_name']) for ret in ret_vns]
ret_uuids_str_list = [ret['uuid'] for ret in ret_vns]
self.assertEqual(len(ret_fqns_str_list), num_vn)
self.assertEqual(ret_fqns_str_list.sort(), vn_fqns_str_list.sort())
self.assertEqual(ret_uuids_str_list.sort(), vn_uuids_list.sort())
#end test_bulk_read_rest_api_with_fqns_objs
def test_delete_after_unref(self):
# 2 policies, 1 VN associate to VN, dissociate, delete policies
def create_vn_and_policies():
pol1_obj = NetworkPolicy('%s-pol1' %(self.id()))
self._vnc_lib.network_policy_create(pol1_obj)
pol2_obj = NetworkPolicy('%s-pol2' %(self.id()))
self._vnc_lib.network_policy_create(pol2_obj)
vn_obj = VirtualNetwork('%s-vn' %(self.id()))
vn_obj.add_network_policy(pol1_obj,
VirtualNetworkPolicyType(sequence=SequenceType(major=0, minor=0)))
vn_obj.add_network_policy(pol2_obj,
VirtualNetworkPolicyType(sequence=SequenceType(major=1, minor=0)))
self._vnc_lib.virtual_network_create(vn_obj)
return vn_obj, pol1_obj, pol2_obj
def delete_vn_and_policies():
self._vnc_lib.network_policy_delete(id=pol1_obj.uuid)
self._vnc_lib.network_policy_delete(id=pol2_obj.uuid)
self._vnc_lib.virtual_network_delete(id=vn_obj.uuid)
# references could be removed like this...
vn_obj, pol1_obj, pol2_obj = create_vn_and_policies()
vn_obj.del_network_policy(pol1_obj)
vn_obj.del_network_policy(pol2_obj)
self._vnc_lib.virtual_network_update(vn_obj)
delete_vn_and_policies()
# ... or this
# references could be removed like this...
vn_obj, pol1_obj, pol2_obj = create_vn_and_policies()
vn_obj.set_network_policy_list([], [])
self._vnc_lib.virtual_network_update(vn_obj)
delete_vn_and_policies()
# end test_delete_after_unref
def test_vn_with_native_ri(self):
logger.info("Creating a VN, expecting auto Native RI creation...")
vn_obj = VirtualNetwork('vn-%s' %(self.id()))
self._vnc_lib.virtual_network_create(vn_obj)
ri_obj = self._vnc_lib.routing_instance_read(
fq_name=vn_obj.fq_name+[vn_obj.name])
ri_children = vn_obj.get_routing_instances()
self.assertTrue(ri_obj.uuid in [r['uuid'] for r in ri_children])
logger.info("...VN/RI creation done.")
logger.info("Deleting a VN, expecting auto Native RI deletion.")
self._vnc_lib.virtual_network_delete(id=vn_obj.uuid)
with ExpectedException(NoIdError) as e:
self._vnc_lib.routing_instance_read(fq_name=ri_obj.fq_name)
logger.info("Testing delete RI with refs to RI...")
vn_obj = VirtualNetwork('vn-%s' %(self.id()))
self._vnc_lib.virtual_network_create(vn_obj)
ri_obj = self._vnc_lib.routing_instance_read(
fq_name=vn_obj.fq_name+[vn_obj.name])
vmi_obj = VirtualMachineInterface(
'vmi-%s' %(self.id()), parent_obj=Project())
# link to vn expected in vmi create in server
vmi_obj.add_virtual_network(vn_obj)
vmi_obj.add_routing_instance(ri_obj, PolicyBasedForwardingRuleType())
self._vnc_lib.virtual_machine_interface_create(vmi_obj)
logger.info("...VN/RI/VMI creation done...")
# remove link from vmi before vn delete
vmi_obj.del_virtual_network(vn_obj)
self._vnc_lib.virtual_machine_interface_update(vmi_obj)
self._vnc_lib.virtual_network_delete(id=vn_obj.uuid)
with ExpectedException(NoIdError) as e:
self._vnc_lib.routing_instance_read(fq_name=ri_obj.fq_name)
vmi_obj = self._vnc_lib.virtual_machine_interface_read(
id=vmi_obj.uuid)
ri_refs = vmi_obj.get_routing_instance_refs()
self.assertIsNone(ri_refs)
logger.info("...VN/RI deletion done.")
# end test_vn_with_native_ri
def test_vmi_links_to_native_ri(self):
logger.info("Creating a VN/VMI, expecting auto Native RI linking...")
vn_obj = VirtualNetwork('vn-%s' %(self.id()))
self._vnc_lib.virtual_network_create(vn_obj)
vmi_obj = VirtualMachineInterface(
'vmi-%s' %(self.id()), parent_obj=Project())
# link to vn expected in vmi create in server
vmi_obj.add_virtual_network(vn_obj)
self._vnc_lib.virtual_machine_interface_create(vmi_obj)
vmi_obj = self._vnc_lib.virtual_machine_interface_read(
id=vmi_obj.uuid)
ri_refs = vmi_obj.get_routing_instance_refs()
ri_fq_name = vn_obj.fq_name[:]
ri_fq_name.append(vn_obj.fq_name[-1])
self.assertEqual(ri_refs[0]['to'], ri_fq_name)
logger.info("...link to Native RI done.")
# end test_vmi_links_to_native_ri
def test_nop_on_empty_body_update(self):
# library api test
vn_fq_name = VirtualNetwork().fq_name
vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn_fq_name)
mod_time = vn_obj.id_perms.last_modified
resp = self._vnc_lib.virtual_network_update(vn_obj)
self.assertIsNone(resp)
vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn_fq_name)
self.assertEqual(mod_time, vn_obj.id_perms.last_modified)
# rest api test
listen_ip = self._api_server_ip
listen_port = self._api_server._args.listen_port
url = 'http://%s:%s/virtual-network/%s' %(
listen_ip, listen_port, vn_obj.uuid)
resp = requests.put(url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.text, '')
# end test_nop_on_empty_body_update
def test_id_perms_uuid_update_should_fail(self):
vn_obj = self._create_test_object()
# read in id-perms
vn_obj = self._vnc_lib.virtual_network_read(id=vn_obj.uuid)
orig_id_perms = copy.deepcopy(vn_obj.id_perms)
wrong_id_perms = copy.deepcopy(vn_obj.id_perms)
wrong_id_perms.uuid.uuid_mslong += 1
wrong_id_perms.uuid.uuid_lslong += 1
vn_obj.set_id_perms(wrong_id_perms)
self._vnc_lib.virtual_network_update(vn_obj)
read_id_perms = self._vnc_lib.virtual_network_read(id=vn_obj.uuid).id_perms
self.assertEqual(read_id_perms.uuid.uuid_mslong,
orig_id_perms.uuid.uuid_mslong)
self.assertEqual(read_id_perms.uuid.uuid_lslong,
orig_id_perms.uuid.uuid_lslong)
# end test_id_perms_uuid_update_should_fail
def test_ip_addr_not_released_on_delete_error(self):
ipam_obj = NetworkIpam('ipam-%s' %(self.id()))
self._vnc_lib.network_ipam_create(ipam_obj)
vn_obj = VirtualNetwork('vn-%s' %(self.id()))
vn_obj.add_network_ipam(ipam_obj,
VnSubnetsType(
[IpamSubnetType(SubnetType('1.1.1.0', 28))]))
self._vnc_lib.virtual_network_create(vn_obj)
# instance-ip test
iip_obj = InstanceIp('iip-%s' %(self.id()))
iip_obj.add_virtual_network(vn_obj)
self._vnc_lib.instance_ip_create(iip_obj)
# read back to get allocated ip
iip_obj = self._vnc_lib.instance_ip_read(id=iip_obj.uuid)
def err_on_delete(orig_method, *args, **kwargs):
if args[0] == 'instance_ip':
raise Exception("Faking db delete for instance ip")
return orig_method(*args, **kwargs)
with test_common.patch(
self._api_server._db_conn, 'dbe_delete', err_on_delete):
try:
self._vnc_lib.instance_ip_delete(id=iip_obj.uuid)
self.assertTrue(
False, 'Instance IP delete worked unexpectedly')
except Exception as e:
self.assertThat(str(e),
Contains('Faking db delete for instance ip'))
# assert reservation present in zookeeper and value in iip
zk_node = "%(#)010d" % {'#': int(netaddr.IPAddress(
iip_obj.instance_ip_address))}
zk_path = '%s/api-server/subnets/%s:1.1.1.0/28/%s' %(
self._cluster_id, vn_obj.get_fq_name_str(), zk_node)
mock_zk = self._api_server._db_conn._zk_db._zk_client._zk_client
self.assertEqual(
mock_zk._values[zk_path][0], iip_obj.uuid)
self.assertEqual(
self._vnc_lib.instance_ip_read(
id=iip_obj.uuid).instance_ip_address,
iip_obj.instance_ip_address)
# floating-ip test
fip_pool_obj = FloatingIpPool(
'fip-pool-%s' %(self.id()), parent_obj=vn_obj)
self._vnc_lib.floating_ip_pool_create(fip_pool_obj)
fip_obj = FloatingIp('fip-%s' %(self.id()), parent_obj=fip_pool_obj)
fip_obj.add_project(Project())
self._vnc_lib.floating_ip_create(fip_obj)
# read back to get allocated floating-ip
fip_obj = self._vnc_lib.floating_ip_read(id=fip_obj.uuid)
def err_on_delete(orig_method, *args, **kwargs):
if args[0] == 'floating_ip':
raise Exception("Faking db delete for floating ip")
if args[0] == 'alias_ip':
raise Exception("Faking db delete for alias ip")
return orig_method(*args, **kwargs)
with test_common.patch(
self._api_server._db_conn, 'dbe_delete', err_on_delete):
try:
self._vnc_lib.floating_ip_delete(id=fip_obj.uuid)
self.assertTrue(
False, 'Floating IP delete worked unexpectedly')
except Exception as e:
self.assertThat(str(e),
Contains('Faking db delete for floating ip'))
# assert reservation present in zookeeper and value in iip
zk_node = "%(#)010d" % {'#': int(netaddr.IPAddress(
fip_obj.floating_ip_address))}
zk_path = '%s/api-server/subnets/%s:1.1.1.0/28/%s' %(
self._cluster_id, vn_obj.get_fq_name_str(), zk_node)
mock_zk = self._api_server._db_conn._zk_db._zk_client._zk_client
self.assertEqual(
mock_zk._values[zk_path][0], fip_obj.uuid)
self.assertEqual(
self._vnc_lib.floating_ip_read(
id=fip_obj.uuid).floating_ip_address,
fip_obj.floating_ip_address)
# alias-ip test
aip_pool_obj = AliasIpPool(
'aip-pool-%s' %(self.id()), parent_obj=vn_obj)
self._vnc_lib.alias_ip_pool_create(aip_pool_obj)
aip_obj = AliasIp('aip-%s' %(self.id()), parent_obj=aip_pool_obj)
aip_obj.add_project(Project())
self._vnc_lib.alias_ip_create(aip_obj)
# read back to get allocated alias-ip
aip_obj = self._vnc_lib.alias_ip_read(id=aip_obj.uuid)
with test_common.patch(
self._api_server._db_conn, 'dbe_delete', err_on_delete):
try:
self._vnc_lib.alias_ip_delete(id=aip_obj.uuid)
self.assertTrue(
False, 'Alias IP delete worked unexpectedly')
except Exception as e:
self.assertThat(str(e),
Contains('Faking db delete for alias ip'))
# assert reservation present in zookeeper and value in iip
zk_node = "%(#)010d" % {'#': int(netaddr.IPAddress(
aip_obj.alias_ip_address))}
zk_path = '%s/api-server/subnets/%s:1.1.1.0/28/%s' %(
self._cluster_id, vn_obj.get_fq_name_str(), zk_node)
mock_zk = self._api_server._db_conn._zk_db._zk_client._zk_client
self.assertEqual(
mock_zk._values[zk_path][0], aip_obj.uuid)
self.assertEqual(
self._vnc_lib.alias_ip_read(
id=aip_obj.uuid).alias_ip_address,
aip_obj.alias_ip_address)
# end test_ip_addr_not_released_on_delete_error
def test_uve_trace_delete_name_from_msg(self):
test_obj = self._create_test_object()
self.assert_vnc_db_has_ident(test_obj)
db_client = self._api_server._db_conn
uve_delete_trace_invoked = []
uuid_to_fq_name_on_delete_invoked = []
def spy_uve_trace(orig_method, *args, **kwargs):
oper = kwargs['oper'].upper()
obj_uuid = kwargs['uuid']
if oper == 'DELETE' and obj_uuid == test_obj.uuid:
if not uve_delete_trace_invoked:
uve_delete_trace_invoked.append(True)
def assert_on_call(*args, **kwargs):
uuid_to_fq_name_on_delete_invoked.append(True)
with test_common.patch(db_client,
'uuid_to_fq_name', assert_on_call):
return orig_method(*args, **kwargs)
else:
return orig_method(*args, **kwargs)
with test_common.patch(db_client, 'dbe_uve_trace', spy_uve_trace):
self._delete_test_object(test_obj)
gevent.sleep(0.5)
self.assert_vnc_db_doesnt_have_ident(test_obj)
self.assertEqual(len(uve_delete_trace_invoked), 1,
'uve_trace not invoked on object delete')
self.assertEqual(len(uuid_to_fq_name_on_delete_invoked), 0,
'uuid_to_fq_name invoked in delete at dbe_uve_trace')
# end test_uve_trace_delete_name_from_msg
def test_ref_update_with_existing_ref(self):
ipam_obj = NetworkIpam('ipam-%s' % self.id())
self._vnc_lib.network_ipam_create(ipam_obj)
vn_obj = VirtualNetwork('vn-%s' % self.id())
vn_obj.add_network_ipam(ipam_obj,
VnSubnetsType(
[IpamSubnetType(SubnetType('1.1.1.0', 28))]))
self._vnc_lib.virtual_network_create(vn_obj)
self._vnc_lib.ref_update('virtual-network',
vn_obj.uuid,
'network-ipam',
ipam_obj.uuid,
ipam_obj.fq_name,
'ADD',
VnSubnetsType([
IpamSubnetType(SubnetType('1.1.1.0', 28)),
IpamSubnetType(SubnetType('2.2.2.0', 28)),
]))
vn_obj = self._vnc_lib.virtual_network_read(id=vn_obj.uuid)
self.assertEqual(len(vn_obj.network_ipam_refs), 1)
ipam_subnets = vn_obj.network_ipam_refs[0]['attr'].ipam_subnets
self.assertEqual(len(ipam_subnets), 2)
self.assertEqual(ipam_subnets[0].subnet.ip_prefix, '1.1.1.0')
self.assertEqual(ipam_subnets[1].subnet.ip_prefix, '2.2.2.0')
# end test_ref_update_with_existing_ref
def test_ref_update_with_resource_type_underscored(self):
vn_obj = VirtualNetwork('%s-vn' % self.id())
ipam_obj = NetworkIpam('%s-vmi' % self.id())
self._vnc_lib.network_ipam_create(ipam_obj)
self._vnc_lib.virtual_network_create(vn_obj)
subnet_type = IpamSubnetType(subnet=SubnetType('1.1.1.0', 2))
self._vnc_lib.ref_update(vn_obj.get_type().replace('-', '_'),
vn_obj.uuid,
ipam_obj.get_type().replace('-', '_'),
ipam_obj.uuid,
ipam_obj.fq_name,
'ADD',
VnSubnetsType([subnet_type]))
vn_obj = self._vnc_lib.virtual_network_read(id=vn_obj.uuid)
fq_name = vn_obj.get_network_ipam_refs()[0]['to']
ipam_name = self._vnc_lib.network_ipam_read(fq_name=fq_name).name
self.assertEqual(ipam_obj.name, ipam_name)
def test_fq_name_to_id_with_resource_type_underscored(self):
test_obj = self._create_test_object()
test_uuid = self._vnc_lib.fq_name_to_id(
test_obj.get_type().replace('-', '_'), test_obj.get_fq_name())
# check that format is correct
try:
uuid.UUID(test_uuid)
except ValueError:
self.assertTrue(False, 'Bad form UUID ' + test_uuid)
def test_resource_list_with_resource_type_underscored(self):
test_obj = self._create_test_object()
resources = self._vnc_lib.resource_list(
test_obj.get_type().replace('-', '_'),
obj_uuids=[test_obj.uuid])
resource_ids = [resource['uuid'] for resource in
resources['%ss' % test_obj.get_type()]]
self.assertEqual([test_obj.uuid], resource_ids)
def test_qos_config(self):
qc = QosConfig('qos-config-%s' %(self.id()), Project())
self._vnc_lib.qos_config_create(qc)
qc = self._vnc_lib.qos_config_read(fq_name=qc.get_fq_name())
self.assertEqual(len(qc.get_global_system_config_refs()), 1)
def test_annotations(self):
vn_obj = vnc_api.VirtualNetwork('vn-set-%s' %(self.id()))
vn_obj.set_annotations(
KeyValuePairs([KeyValuePair(key='k1', value='v1'),
KeyValuePair(key=' k2 prime ',
value=json.dumps('v2'))]))
self._vnc_lib.virtual_network_create(vn_obj)
ret_vn_obj = self._vnc_lib.virtual_network_read(id=vn_obj.uuid)
self.assertEqual(len(ret_vn_obj.annotations.key_value_pair), 2)
annotation_check = [a for a in ret_vn_obj.annotations.key_value_pair
if a.key == ' k2 prime ']
self.assertEqual(len(annotation_check), 1)
self.assertEqual(annotation_check[0].value,
json.dumps('v2'))
vn_obj = vnc_api.VirtualNetwork('vn-add-del-%s' %(self.id()))
self._vnc_lib.virtual_network_create(vn_obj)
vn_obj.add_annotations(KeyValuePair(key='k1', value=None))
vn_obj.add_annotations(KeyValuePair(key='k2', value='v2'))
vn_obj.add_annotations(KeyValuePair(key='k3', value=str(300)))
self._vnc_lib.virtual_network_update(vn_obj)
ret_vn_obj = self._vnc_lib.virtual_network_read(id=vn_obj.uuid)
self.assertEqual(len(ret_vn_obj.annotations.key_value_pair), 3)
self.assertEqual(set(['k1', 'k2', 'k3']),
set([a.key for a in ret_vn_obj.annotations.key_value_pair]))
vn_obj.del_annotations(elem_position='k1')
self._vnc_lib.virtual_network_update(vn_obj)
ret_vn_obj = self._vnc_lib.virtual_network_read(id=vn_obj.uuid)
self.assertEqual(len(ret_vn_obj.annotations.key_value_pair), 2)
self.assertEqual(set(['k2', 'k3']),
set([a.key for a in ret_vn_obj.annotations.key_value_pair]))
# end test_annotations
def test_cert_bundle_refresh(self):
bundle_dir = tempfile.mkdtemp(self.id())
try:
with open(bundle_dir+'cert', 'w') as f:
f.write("CERT")
with open(bundle_dir+'ca', 'w') as f:
f.write("CA")
with open(bundle_dir+'key', 'w') as f:
f.write("KEY")
cfgm_common.utils.getCertKeyCaBundle(bundle_dir+'pem',
[bundle_dir+x for x in ['cert', 'ca', 'key']])
with open(bundle_dir+'pem', 'r') as f:
self.assertEqual(f.readlines()[0], 'CERTCAKEY')
# sleep so mods to cert/ca/key appear as different epoch
gevent.sleep(0.1)
with open(bundle_dir+'cert', 'w') as f:
f.write("CERTNEW")
with open(bundle_dir+'ca', 'w') as f:
f.write("CANEW")
with open(bundle_dir+'key', 'w') as f:
f.write("KEYNEW")
cfgm_common.utils.getCertKeyCaBundle(bundle_dir+'pem',
[bundle_dir+x for x in ['cert', 'ca', 'key']])
with open(bundle_dir+'pem', 'r') as f:
self.assertEqual(f.readlines()[0], 'CERTNEWCANEWKEYNEW')
finally:
os.removedirs(bundle_dir)
# end test_cert_bundle_refresh
def test_name_attribute_in_detail_list_resource(self):
vn_obj = vnc_api.VirtualNetwork('%s-vn' % self.id())
self._vnc_lib.virtual_network_create(vn_obj)
query_params = {
'obj_uuids': vn_obj.uuid,
'detail': True,
}
results = self._vnc_lib._request_server(
rest.OP_GET,
'/virtual-networks',
data=query_params)['virtual-networks']
self.assertEqual(len(results), 1)
vn_dict = results[0]['virtual-network']
self.assertIn('name', vn_dict)
self.assertEqual(vn_dict['name'], vn_obj.fq_name[-1])
def test_bgpvpn_type_assoc_with_network_l2_l3_forwarding_mode(self):
# Create virtual network with forwarding mode set to 'l2' and 'l3'
vn_l2_l3 = self.create_virtual_network('vn-l2-l3-%s' % self.id())
# Create l2 bgpvpn
bgpvpn_l2 = Bgpvpn('bgpvpn-l2-%s' % self.id(), bgpvpn_type='l2')
self._vnc_lib.bgpvpn_create(bgpvpn_l2)
# Create l3 bgpvpn
bgpvpn_l3 = Bgpvpn('bgpvpn-l3-%s' % self.id())
self._vnc_lib.bgpvpn_create(bgpvpn_l3)
# Trying to associate a 'l2' bgpvpn on the virtual network
vn_l2_l3.add_bgpvpn(bgpvpn_l2)
self._vnc_lib.virtual_network_update(vn_l2_l3)
vn_l2_l3 = self._vnc_lib.virtual_network_read(id=vn_l2_l3.uuid)
# Trying to associate a 'l3' bgpvpn on the virtual network
vn_l2_l3.add_bgpvpn(bgpvpn_l3)
self._vnc_lib.virtual_network_update(vn_l2_l3)
vn_l2_l3 = self._vnc_lib.virtual_network_read(id=vn_l2_l3.uuid)
# Try to change the virtual network forwarding mode to 'l2' only
with ExpectedException(BadRequest):
vn_l2_l3.set_virtual_network_properties(
VirtualNetworkType(forwarding_mode='l2'))
self._vnc_lib.virtual_network_update(vn_l2_l3)
vn_l2_l3 = self._vnc_lib.virtual_network_read(id=vn_l2_l3.uuid)
# Try to change the virtual network forwarding mode to 'l3' only
with ExpectedException(BadRequest):
vn_l2_l3.set_virtual_network_properties(
VirtualNetworkType(forwarding_mode='l3'))
self._vnc_lib.virtual_network_update(vn_l2_l3)
def test_bgpvpn_type_assoc_with_network_l2_forwarding_mode(self):
# Create virtual network with forwarding mode set to 'l2' only
vn_l2 = self.create_virtual_network('vn-l2-%s' % self.id())
vn_l2.set_virtual_network_properties(
VirtualNetworkType(forwarding_mode='l2'))
self._vnc_lib.virtual_network_update(vn_l2)
vn_l2 = self._vnc_lib.virtual_network_read(id=vn_l2.uuid)
# Create l2 bgpvpn
bgpvpn_l2 = Bgpvpn('bgpvpn-l2-%s' % self.id(), bgpvpn_type='l2')
self._vnc_lib.bgpvpn_create(bgpvpn_l2)
# Create l3 bgpvpn
bgpvpn_l3 = Bgpvpn('bgpvpn-l3-%s' % self.id())
self._vnc_lib.bgpvpn_create(bgpvpn_l3)
# Trying to associate a 'l2' bgpvpn on the virtual network
vn_l2.add_bgpvpn(bgpvpn_l2)
self._vnc_lib.virtual_network_update(vn_l2)
vn_l2 = self._vnc_lib.virtual_network_read(id=vn_l2.uuid)
# Trying to associate a 'l3' bgpvpn on the virtual network
with ExpectedException(BadRequest):
vn_l2.add_bgpvpn(bgpvpn_l3)
self._vnc_lib.virtual_network_update(vn_l2)
vn_l2 = self._vnc_lib.virtual_network_read(id=vn_l2.uuid)
# Try to change the virtual network forwarding mode to 'l3' only
with ExpectedException(BadRequest):
vn_l2.set_virtual_network_properties(
VirtualNetworkType(forwarding_mode='l3'))
self._vnc_lib.virtual_network_update(vn_l2)
vn_l2 = self._vnc_lib.virtual_network_read(id=vn_l2.uuid)
# Try to change the virtual network forwarding mode to 'l2' and l3'
vn_l2.set_virtual_network_properties(
VirtualNetworkType(forwarding_mode='l2_l3'))
self._vnc_lib.virtual_network_update(vn_l2)
def test_bgpvpn_type_assoc_with_network_l3_forwarding_mode(self):
# Create virtual network with forwarding mode set to 'l3' only
vn_l3 = self.create_virtual_network('vn-l3-%s' % self.id())
vn_l3.set_virtual_network_properties(
VirtualNetworkType(forwarding_mode='l3'))
self._vnc_lib.virtual_network_update(vn_l3)
vn_l3 = self._vnc_lib.virtual_network_read(id=vn_l3.uuid)
# Create l2 bgpvpn
bgpvpn_l2 = Bgpvpn('bgpvpn-l2-%s' % self.id(), bgpvpn_type='l2')
self._vnc_lib.bgpvpn_create(bgpvpn_l2)
# Create l3 bgpvpn
bgpvpn_l3 = Bgpvpn('bgpvpn-l3-%s' % self.id())
self._vnc_lib.bgpvpn_create(bgpvpn_l3)
# Trying to associate a 'l3' bgpvpn on the virtual network
vn_l3.add_bgpvpn(bgpvpn_l3)
self._vnc_lib.virtual_network_update(vn_l3)
vn_l3 = self._vnc_lib.virtual_network_read(id=vn_l3.uuid)
# Trying to associate a 'l2' bgpvpn on the virtual network
with ExpectedException(BadRequest):
vn_l3.add_bgpvpn(bgpvpn_l2)
self._vnc_lib.virtual_network_update(vn_l3)
vn_l3 = self._vnc_lib.virtual_network_read(id=vn_l3.uuid)
# Try to change the virtual network forwarding mode to 'l2' only
with ExpectedException(BadRequest):
vn_l3.set_virtual_network_properties(
VirtualNetworkType(forwarding_mode='l2'))
self._vnc_lib.virtual_network_update(vn_l3)
vn_l3 = self._vnc_lib.virtual_network_read(id=vn_l3.uuid)
# Try to change the virtual network forwarding mode to 'l2' and l3'
vn_l3.set_virtual_network_properties(
VirtualNetworkType(forwarding_mode='l2_l3'))
self._vnc_lib.virtual_network_update(vn_l3)
def test_bgpvpn_type_limited_to_l3_for_router_assoc(self):
# Create logical router
lr, _, _, _ = self.create_logical_router(
'lr-%s' % self.id(), nb_of_attached_networks=0)
# Create l2 bgpvpn
bgpvpn_l2 = Bgpvpn('bgpvpn-l2-%s' % self.id(), bgpvpn_type='l2')
self._vnc_lib.bgpvpn_create(bgpvpn_l2)
# Trying to associate a 'l2' bgpvpn on the logical router
with ExpectedException(BadRequest):
lr.add_bgpvpn(bgpvpn_l2)
self._vnc_lib.logical_router_update(lr)
def test_bgpvpn_fail_assoc_network_with_gw_router_assoc_to_bgpvpn(self):
# Create one bgpvpn
bgpvpn = Bgpvpn('bgpvpn-%s' % self.id())
self._vnc_lib.bgpvpn_create(bgpvpn)
# Create one virtual network with one logical router as gateway
lr, vns, _, _ = self.create_logical_router('lr-%s' % self.id())
# We attached only one virtual network to the logical router
vn = vns[0]
# Associate the bgppvpn to the logical router
lr.add_bgpvpn(bgpvpn)
self._vnc_lib.logical_router_update(lr)
lr = self._vnc_lib.logical_router_read(id=lr.uuid)
# The try to set that same bgpvpn to the virtual network
with ExpectedException(BadRequest):
vn.add_bgpvpn(bgpvpn)
self._vnc_lib.virtual_network_update(vn)
def test_bgpvpn_fail_assoc_router_with_network_assoc_to_bgpvpn(self):
# Create one bgpvpn
bgpvpn = Bgpvpn('bgpvpn-%s' % self.id())
self._vnc_lib.bgpvpn_create(bgpvpn)
# Create one virtual network with one logical router as gateway
lr, vns, vmis, _ = self.create_logical_router('lr-%s' % self.id())
# We attached only one virtual network to the logical router
vn = vns[0]
vmi = vmis[0]
# Associate the bgpvpn to the virtual network
vn.add_bgpvpn(bgpvpn)
self._vnc_lib.virtual_network_update(vn)
lr = self._vnc_lib.logical_router_read(id=lr.uuid)
# The try to set that same bgpvpn to the logical router
with ExpectedException(BadRequest):
lr.add_bgpvpn(bgpvpn)
self._vnc_lib.logical_router_update(lr)
lr = self._vnc_lib.logical_router_read(id=lr.uuid)
# Detatch the logical router from the virtual network
lr.del_virtual_machine_interface(vmi)
self._vnc_lib.logical_router_update(lr)
lr = self._vnc_lib.logical_router_read(id=lr.uuid)
# Associate the bgpvpn to the logical router
lr.add_bgpvpn(bgpvpn)
self._vnc_lib.logical_router_update(lr)
lr = self._vnc_lib.logical_router_read(id=lr.uuid)
# Try to reattach the virtual network to the logical router
with ExpectedException(BadRequest):
lr.add_virtual_machine_interface(vmi)
self._vnc_lib.logical_router_update(lr)
def test_create_singleton_entry_with_zk_alloc_exist(self):
api_server = self._server_info['api_server']
vn_obj = VirtualNetwork('vn-%s' %(self.id()))
orig_dbe_alloc = api_server._db_conn.dbe_alloc
try:
def err_dbe_alloc(*args, **kwargs):
return (False, (409, 'Faking zk ResourceExistsError'))
api_server._db_conn.dbe_alloc = err_dbe_alloc
with ExpectedException(HttpError):
api_server.create_singleton_entry(vn_obj)
finally:
api_server._db_conn.dbe_alloc = orig_dbe_alloc
# end test_create_singleton_entry_with_zk_alloc_exist
def test_tcp_keepalive_options(self):
api_server = self._server_info['api_server']
# Check if the TCP keepalive has been set in the api server args
self.assertThat(api_server._args.tcp_keepalive_enable, Equals(True))
# Check if other TCP keepalive options are present in args.
self.assertIn('tcp_keepalive_idle_time', api_server._args)
self.assertIn('tcp_keepalive_interval', api_server._args)
self.assertIn('tcp_keepalive_probes', api_server._args)
# end test_tcp_keepalive_options
# end class TestVncCfgApiServer
class TestStaleLockRemoval(test_case.ApiServerTestCase):
STALE_LOCK_SECS = '0.2'
@classmethod
def setUpClass(cls):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestStaleLockRemoval, cls).setUpClass(
extra_config_knobs=[('DEFAULTS', 'stale_lock_seconds',
cls.STALE_LOCK_SECS)])
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestStaleLockRemoval, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
def test_stale_fq_name_lock_removed_on_partial_create(self):
# 1. partially create an object i.e zk done, cass
# cass silently not(simulating process restart).
# 2. create same object again, expect RefsExist
# 3. wait for stale_lock_seconds and attempt create
# of same object. should succeed.
def stub(*args, **kwargs):
return (True, '')
with test_common.flexmocks([
(self._api_server._db_conn, 'dbe_create', stub),
(self._api_server.get_resource_class('virtual-network'),
'post_dbe_create', stub)]):
self._create_test_object()
with ExpectedException(RefsExistError), \
mock.patch('vnc_cfg_api_server.api_server'\
'.VncApiServer.get_args') as get_args_patch:
get_args_patch.return_value.stale_lock_seconds = sys.maxsize
self._create_test_object()
gevent.sleep(float(self.STALE_LOCK_SECS))
self._create_test_object()
# end test_stale_fq_name_lock_removed_on_partial_create
def test_stale_fq_name_lock_removed_on_partial_delete(self):
# 1. partially delete an object i.e removed from cass
# but not from zk silently (simulating process restart)
# 2. create same object again, expect RefsExist
# 3. wait for stale_lock_seconds and attempt create
# of same object. should succeed.
def stub(*args, **kwargs):
return (True, '')
vn_obj = self._create_test_object()
with test_common.flexmocks([
(self._api_server._db_conn, 'dbe_release', stub)]):
self._vnc_lib.virtual_network_delete(id=vn_obj.uuid)
with ExpectedException(RefsExistError), \
mock.patch('vnc_cfg_api_server.api_server'\
'.VncApiServer.get_args') as get_args_patch:
get_args_patch.return_value.stale_lock_seconds = sys.maxsize
self._create_test_object()
gevent.sleep(float(self.STALE_LOCK_SECS))
self._create_test_object()
# end test_stale_fq_name_lock_removed_on_partial_delete
def test_stale_fq_name_lock_removed_coverage(self):
vn_obj = VirtualNetwork('vn-%s' %(self.id()))
vn_obj.__dict__['id_perms'] = {}
vn_UUID = uuid.uuid4()
# create zk-node
self._api_server._db_conn.set_uuid(
obj_type=vn_obj._type,
obj_dict=vn_obj.__dict__,
id=vn_UUID,
do_lock=True)
# assert we hit the zk-node on re-create
with ExpectedException(ResourceExistsError, ".*at zookeeper.*"):
self._api_server._db_conn.set_uuid(
obj_type=vn_obj._type,
obj_dict=vn_obj.__dict__,
id=vn_UUID,
do_lock=True)
# create entry in cassandra too and assert
# not a stale lock on re-create
uuid_cf = self.get_cf('config_db_uuid', 'obj_uuid_table')
with uuid_cf.patch_row(str(vn_UUID),
new_columns={'fq_name':json.dumps(vn_obj.fq_name),
'type':json.dumps(vn_obj._type)}):
with ExpectedException(ResourceExistsError, ".*at cassandra.*"):
self._api_server._db_conn.set_uuid(
obj_type=vn_obj._type,
obj_dict=vn_obj.__dict__,
id=vn_UUID,
do_lock=True)
self._api_server._db_conn._object_db.cache_uuid_to_fq_name_del(
str(vn_UUID))
# sleep and re-create and now it should be fine
gevent.sleep(float(self.STALE_LOCK_SECS))
self._api_server._db_conn.set_uuid(
obj_type=vn_obj._type,
obj_dict=vn_obj.__dict__,
id=vn_UUID,
do_lock=True)
# end test_stale_fq_name_lock_removed_coverage
# end TestStaleLockRemoval
class TestVncCfgApiServerRequests(test_case.ApiServerTestCase):
""" Tests to verify the max_requests config parameter of api-server."""
@classmethod
def setUpClass(cls):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestVncCfgApiServerRequests, cls).setUpClass(
extra_config_knobs=[('DEFAULTS', 'max_requests', 10)])
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestVncCfgApiServerRequests, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
def api_requests(self, orig_vn_read, count, vn_name):
self.blocked = False
api_server = self._server_info['api_server']
def slow_response_on_vn_read(obj_type, *args, **kwargs):
if obj_type == 'virtual_network':
while self.blocked:
gevent.sleep(1)
return orig_vn_read(obj_type, *args, **kwargs)
api_server._db_conn._object_db.object_read = slow_response_on_vn_read
logger.info("Creating a test VN object.")
test_obj = self.create_virtual_network(vn_name, '1.1.1.0/24')
logger.info("Making max_requests(%s) to api server" % (count - 1))
def vn_read():
self._vnc_lib.virtual_network_read(id=test_obj.uuid)
gevent.sleep(0)
self.blocked = True
for i in range(count):
gevent.spawn(vn_read)
gevent.sleep(1)
def test_max_api_requests(self):
# Test to make sure api-server accepts requests within max_api_requests
self.wait_till_api_server_idle()
# when there are pipe-lined requests, responses have content-length
# calculated only once. see _cast() in bottle.py for 'out' as bytes.
# in this test, without resetting as below, read of def-nw-ipam
# in create_vn will be the size returned for read_vn and
# deserialization fails
api_server = self._server_info['api_server']
def reset_response_content_length():
if 'Content-Length' in bottle.response:
del bottle.response['Content-Length']
api_server.api_bottle.add_hook('after_request', reset_response_content_length)
orig_vn_read = api_server._db_conn._object_db.object_read
try:
vn_name = self.id() + '5testvn1'
self.api_requests(orig_vn_read, 5, vn_name)
logger.info("Making one more requests well within the max_requests to api server")
vn_name = self.id() + 'testvn1'
try:
greenlet = gevent.spawn(self.create_virtual_network, vn_name, '10.1.1.0/24')
gevent.sleep(0)
vn_obj = greenlet.get(timeout=3)
except gevent.timeout.Timeout as e:
self.assertFalse(greenlet.successful(), 'Request failed unexpectedly')
else:
self.assertEqual(vn_obj.name, vn_name)
finally:
api_server._db_conn._object_db.object_read = orig_vn_read
self.blocked = False
# Test to make sure api-server rejects requests over max_api_requests
self.wait_till_api_server_idle()
api_server = self._server_info['api_server']
orig_vn_read = api_server._db_conn._object_db.object_read
try:
vn_name = self.id() + '11testvn2'
self.api_requests(orig_vn_read, 11, vn_name)
logger.info("Making one more requests (max_requests + 1) to api server")
try:
vn_name = self.id() + 'testvn2'
greenlet = gevent.spawn(self.create_virtual_network, vn_name, '10.1.1.0/24')
gevent.sleep(0)
greenlet.get(timeout=3)
except gevent.timeout.Timeout as e:
logger.info("max_requests + 1 failed as expected.")
self.assertFalse(False, greenlet.successful())
else:
self.assertTrue(False, 'Request succeeded unexpectedly')
finally:
api_server._db_conn._object_db.object_read = orig_vn_read
self.blocked = False
# end class TestVncCfgApiServerRequests
class TestLocalAuth(test_case.ApiServerTestCase):
_rbac_role = 'admin'
@classmethod
def setUpClass(cls):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
from keystonemiddleware import auth_token
class FakeAuthProtocol(object):
_test_case = cls
def __init__(self, app, *args, **kwargs):
self._app = app
# end __init__
def __call__(self, env, start_response):
# in multi-tenancy mode only admin role admitted
# by api-server till full rbac support
env['HTTP_X_ROLE'] = self._test_case._rbac_role
return self._app(env, start_response)
# end __call__
def get_admin_token(self):
return None
# end get_admin_token
# end class FakeAuthProtocol
super(TestLocalAuth, cls).setUpClass(
extra_config_knobs=[
('DEFAULTS', 'auth', 'keystone'),
('DEFAULTS', 'multi_tenancy', True),
('DEFAULTS', 'listen_ip_addr', '0.0.0.0'),
('KEYSTONE', 'admin_user', 'foo'),
('KEYSTONE', 'admin_password', 'bar'),],
extra_mocks=[
(auth_token, 'AuthProtocol', FakeAuthProtocol),
])
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestLocalAuth, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
def test_local_auth_on_8095(self):
from requests.auth import HTTPBasicAuth
admin_port = self._api_server._args.admin_port
# equivalent to curl -u foo:bar http://localhost:8095/virtual-networks
logger.info("Positive case")
url = 'http://localhost:%s/virtual-networks' %(admin_port)
resp = requests.get(url, auth=HTTPBasicAuth('foo', 'bar'))
self.assertThat(resp.status_code, Equals(200))
logger.info("Negative case without header")
resp = requests.get(url)
self.assertThat(resp.status_code, Equals(401))
self.assertThat(resp.text, Contains('HTTP_AUTHORIZATION header missing'))
logger.info("Negative case with wrong creds")
resp = requests.get(url, auth=HTTPBasicAuth('bar', 'foo'))
self.assertThat(resp.status_code, Equals(401))
def test_doc_auth(self):
listen_ip = self._api_server_ip
listen_port = self._api_server._args.listen_port
# equivalent to curl -u foo:bar http://localhost:8095/documentation/index.html
logger.info("Positive case")
def fake_static_file(*args, **kwargs):
return
with test_common.patch(
bottle, 'static_file', fake_static_file):
url = 'http://%s:%s/documentation/index.html' %(listen_ip, listen_port)
resp = requests.get(url)
self.assertThat(resp.status_code, Equals(200))
logger.info("Negative case without Documentation")
url = 'http://%s:%s/virtual-networks' %(listen_ip, listen_port)
orig_rbac_role = TestLocalAuth._rbac_role
try:
TestLocalAuth._rbac_role = 'foobar'
resp = requests.get(url)
self.assertThat(resp.status_code, Equals(403))
finally:
TestLocalAuth._rbac_role = orig_rbac_role
# end class TestLocalAuth
class TestExtensionApi(test_case.ApiServerTestCase):
test_case = None
class ResourceApiDriver(vnc_plugin_base.ResourceApi):
def __init__(self, *args, **kwargs):
pass
# end __init__
def transform_request(self, request):
# add/del/mod envvar
request.environ['X_TEST_DUMMY'] = 'foo'
request.environ['HTTP_X_CONTRAIL_USERAGENT'] = 'bar'
del request.environ['SERVER_SOFTWARE']
# /virtual-networks -> virtual-network
obj_type = request.path[1:-1]
if request.method == 'POST' and obj_type == 'virtual-network':
obj_name = request.json[obj_type]['fq_name'][-1]
if 'transform-create' in obj_name:
# add/del/mod body
request.json[obj_type]['dummy_field'] = 'foo'
request.json[obj_type]['fq_name'][-1] = obj_name + '-foo'
del request.json[obj_type]['uuid']
elif request.method == 'GET':
request.environ['QUERY_STRING'] = \
request.environ['QUERY_STRING'].replace('replace-me','')
# end transform_request
def validate_request(self, request):
# /virtual-networks -> virtual-network
obj_type = request.path[1:-1]
if request.method == 'POST' and obj_type == 'virtual-network':
obj_name = request.json[obj_type]['fq_name'][-1]
if 'validate-create' in obj_name:
raise bottle.abort(456, 'invalidating create request')
elif request.method == 'GET':
mch = re.match('/virtual-network/.*', request.path)
if (mch and
'fail-me' in request.environ['QUERY_STRING']):
raise bottle.abort(456, 'invalidating read request')
elif request.method == 'PUT':
mch = re.match('/virtual-network/.*', request.path)
if (mch and
request.json['virtual-network'].get('is_shared')):
raise bottle.abort(456, 'invalidating update request')
elif request.method == 'DELETE':
mch = re.match('/virtual-network/.*', request.path)
if mch:
raise bottle.abort(456, 'invalidating delete request')
# end validate_request
def transform_response(self, request, response):
if request.method == 'POST':
obj_type = request.path[1:-1]
if obj_type != 'virtual-network':
return
obj_name = request.json[obj_type]['fq_name'][-1]
if 'transform-create' in obj_name:
TestExtensionApi.test_case.assertIn('X_TEST_DUMMY', list(request.environ.keys()))
TestExtensionApi.test_case.assertNotIn('SERVER_SOFTWARE', list(request.environ.keys()))
TestExtensionApi.test_case.assertThat(request.environ['HTTP_X_CONTRAIL_USERAGENT'],
Equals('bar'))
bottle.response.status = '234 Transformed Response'
response[obj_type]['extra_field'] = 'foo'
# end transform_response
# end class ResourceApiDriver
@classmethod
def setUpClass(cls):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
test_common.setup_extra_flexmock(
[(stevedore.extension.ExtensionManager, '__new__',
FakeExtensionManager)])
FakeExtensionManager._entry_pt_to_classes['vnc_cfg_api.resourceApi'] = \
[TestExtensionApi.ResourceApiDriver]
super(TestExtensionApi, cls).setUpClass(extra_mocks=[
(stevedore.extension.ExtensionManager, '__new__',
FakeExtensionManager)])
# end setUpClass
@classmethod
def tearDownClass(cls):
FakeExtensionManager._entry_pt_to_classes['vnc_cfg_api.resourceApi'] = \
None
FakeExtensionManager._ext_objs = []
logger.removeHandler(cls.console_handler)
super(TestExtensionApi, cls).tearDownClass()
# end tearDownClass
def setUp(self):
TestExtensionApi.test_case = self
super(TestExtensionApi, self).setUp()
# end setUp
def test_transform_request(self):
# create
obj = VirtualNetwork('transform-create')
obj_request_uuid = str(uuid.uuid4())
body_dict = {'virtual-network':
{'fq_name': obj.fq_name,
'parent_type': 'project',
'uuid': obj_request_uuid}}
status, content = self._http_post('/virtual-networks',
body=json.dumps(body_dict))
self.assertThat(status, Equals(234))
obj_dict = json.loads(content)['virtual-network']
obj_allocd_uuid = obj_dict['uuid']
self.assertThat(obj_allocd_uuid, Not(Equals(obj_request_uuid)))
self.assertThat(obj_dict['fq_name'][-1], Equals('transform-create-foo'))
self.assertThat(obj_dict['extra_field'], Equals('foo'))
# read
status, content = self._http_get('/virtual-networks',
query_params={'obj_uuids':'replace-me'+obj_dict['uuid']})
self.assertThat(status, Equals(200))
objs_dict = json.loads(content)['virtual-networks']
self.assertThat(len(objs_dict), Equals(1))
self.assertThat(objs_dict[0]['fq_name'][-1],
Equals('transform-create-foo'))
# update
body_dict = {'virtual-network':
{'display_name': 'foo'}}
status, content = self._http_put('/virtual-network/'+obj_allocd_uuid,
body=json.dumps(body_dict))
obj = self._vnc_lib.virtual_network_read(id=obj_allocd_uuid)
self.assertThat(obj.display_name, Equals('foo'))
# end test_transform_request
def test_validate_request(self):
self.ignore_err_in_log = True
# create
obj = VirtualNetwork('validate-create')
body_dict = {'virtual-network':
{'fq_name': obj.fq_name,
'parent_type': 'project'}}
status, content = self._http_post('/virtual-networks',
body=json.dumps(body_dict))
self.assertThat(status, Equals(456))
self.assertThat(content, Contains('invalidating create request'))
with ExpectedException(NoIdError) as e:
self._vnc_lib.virtual_network_read(fq_name=obj.fq_name)
# read
obj = self._create_test_object()
status, content = self._http_get('/virtual-network/'+obj.uuid,
query_params={'fail-me': 1})
self.assertThat(status, Equals(456))
self.assertThat(content, Contains('invalidating read request'))
# update
obj.is_shared = True
body_dict = {'virtual-network':
{'is_shared': True}}
status, content = self._http_put('/virtual-network/'+obj.uuid,
body=json.dumps(body_dict))
self.assertThat(status, Equals(456))
self.assertThat(content, Contains('invalidating update request'))
obj = self._vnc_lib.virtual_network_read(id=obj.uuid)
self.assertThat(obj.is_shared, Equals(False))
# delete
status, content = self._http_delete('/virtual-network/'+obj.uuid,
body=None)
self.assertThat(status, Equals(456))
self.assertThat(content, Contains('invalidating delete request'))
obj = self._vnc_lib.virtual_network_read(id=obj.uuid)
# end test_validate_request
# end class TestExtensionApi
class TestPropertyWithList(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestPropertyWithList, cls).setUpClass(*args, **kwargs)
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestPropertyWithList, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
def assert_kvpos(self, rd_ff_proto, idx, k, v, pos):
self.assertEqual(rd_ff_proto[idx][0]['protocol'], k)
self.assertEqual(rd_ff_proto[idx][0]['port'], v)
self.assertEqual(rd_ff_proto[idx][1], pos)
def test_set_in_object(self):
vmi_obj = VirtualMachineInterface('vmi-%s' %(self.id()),
parent_obj=Project())
vmi_obj.set_virtual_machine_interface_fat_flow_protocols(
FatFlowProtocols([ProtocolType(protocol='p1', port=1),
ProtocolType(protocol='p2', port=2)]))
# needed for backend type-specific handling
vmi_obj.add_virtual_network(VirtualNetwork())
self._vnc_lib.virtual_machine_interface_create(vmi_obj)
# ensure stored as list order
rd_vmi_obj = self._vnc_lib.virtual_machine_interface_read(
id=vmi_obj.uuid)
rd_ff_proto = rd_vmi_obj.virtual_machine_interface_fat_flow_protocols
self.assertThat(
rd_ff_proto.fat_flow_protocol[0].protocol, Equals('p1'))
self.assertThat(
rd_ff_proto.fat_flow_protocol[1].protocol, Equals('p2'))
# verify db storage format (wrapper/container type stripped in storage)
uuid_cf = self.get_cf('config_db_uuid', 'obj_uuid_table')
cols = uuid_cf.get(vmi_obj.uuid,
column_start='propl:virtual_machine_interface_fat_flow_protocols:',
column_finish='propl:virtual_machine_interface_fat_flow_protocols;')
col_name_0, col_val_0 = cols.popitem(last=False)
col_name_1, col_val_1 = cols.popitem(last=False)
self.assertThat(col_name_0.split(':')[-1], Equals('0'))
self.assertThat(json.loads(col_val_0)['protocol'], Equals('p1'))
self.assertThat(col_name_1.split(':')[-1], Equals('1'))
self.assertThat(json.loads(col_val_1)['protocol'], Equals('p2'))
# update and clobber old entries
#vmi_obj.set_virtual_machine_interface_bindings([])
vmi_obj.set_virtual_machine_interface_fat_flow_protocols(
FatFlowProtocols())
self._vnc_lib.virtual_machine_interface_update(vmi_obj)
rd_vmi_obj = self._vnc_lib.virtual_machine_interface_read(
id=vmi_obj.uuid)
rd_ff_proto = rd_vmi_obj.virtual_machine_interface_fat_flow_protocols
self.assertIsNone(rd_ff_proto)
with ExpectedException(cassandra_fake_impl.NotFoundException) as e:
cols = uuid_cf.get(vmi_obj.uuid,
column_start='propl:virtual_machine_interface_fat_flow_protocols:',
column_finish='propl:virtual_machine_interface_fat_flow_protocols;')
# end test_set_in_object
def test_add_del_in_object(self):
vmi_obj = VirtualMachineInterface('vmi-%s' %(self.id()),
parent_obj=Project())
for proto,port,pos in [('proto2', 2, 'pos1'), ('proto1', 1, 'pos2'),
('proto3', 3, 'pos3'), ('proto4', 4, None)]:
vmi_obj.add_virtual_machine_interface_fat_flow_protocols(
ProtocolType(protocol=proto, port=port), pos)
# needed for backend type-specific handling
vmi_obj.add_virtual_network(VirtualNetwork())
self._vnc_lib.virtual_machine_interface_create(vmi_obj)
rd_ff_proto = self._vnc_lib.virtual_machine_interface_read(
id=vmi_obj.uuid).virtual_machine_interface_fat_flow_protocols
self.assertEqual(len(rd_ff_proto.fat_flow_protocol), 4)
self.assertEqual(rd_ff_proto.fat_flow_protocol[0].protocol, 'proto4')
self.assertEqual(rd_ff_proto.fat_flow_protocol[0].port, 4)
self.assertEqual(rd_ff_proto.fat_flow_protocol[1].protocol, 'proto2')
self.assertEqual(rd_ff_proto.fat_flow_protocol[1].port, 2)
self.assertEqual(rd_ff_proto.fat_flow_protocol[2].protocol, 'proto1')
self.assertEqual(rd_ff_proto.fat_flow_protocol[2].port, 1)
self.assertEqual(rd_ff_proto.fat_flow_protocol[3].protocol, 'proto3')
self.assertEqual(rd_ff_proto.fat_flow_protocol[3].port, 3)
for pos in ['pos1', 'pos3']:
vmi_obj.del_virtual_machine_interface_fat_flow_protocols(
elem_position=pos)
self._vnc_lib.virtual_machine_interface_update(vmi_obj)
rd_ff_proto = self._vnc_lib.virtual_machine_interface_read(
id=vmi_obj.uuid).virtual_machine_interface_fat_flow_protocols
self.assertEqual(len(rd_ff_proto.fat_flow_protocol), 2)
self.assertEqual(rd_ff_proto.fat_flow_protocol[0].protocol, 'proto4')
self.assertEqual(rd_ff_proto.fat_flow_protocol[0].port, 4)
self.assertEqual(rd_ff_proto.fat_flow_protocol[1].protocol, 'proto1')
self.assertEqual(rd_ff_proto.fat_flow_protocol[1].port, 1)
# end test_add_del_in_object
def test_prop_list_add_delete_get_element(self):
vmi_obj = VirtualMachineInterface('vmi-%s' %(self.id()),
parent_obj=Project())
vmi_obj.add_virtual_network(VirtualNetwork())
self._vnc_lib.virtual_machine_interface_create(vmi_obj)
# 1. Add tests
# add with element as type
self._vnc_lib.prop_list_add_element(vmi_obj.uuid,
'virtual_machine_interface_fat_flow_protocols',
ProtocolType('proto1', 1))
# add with element as dict
self._vnc_lib.prop_list_add_element(vmi_obj.uuid,
'virtual_machine_interface_fat_flow_protocols',
{'protocol':'proto2', 'port':2})
# verify above add without position specified generated uuid'd order
rd_ff_proto = self._vnc_lib.prop_list_get(vmi_obj.uuid,
'virtual_machine_interface_fat_flow_protocols')
self.assertEqual(len(rd_ff_proto), 2)
# add with position specified
self._vnc_lib.prop_list_add_element(vmi_obj.uuid,
'virtual_machine_interface_fat_flow_protocols',
{'protocol':'proto3', 'port':3}, '0.1')
self._vnc_lib.prop_list_add_element(vmi_obj.uuid,
'virtual_machine_interface_fat_flow_protocols',
{'protocol':'proto4', 'port':4}, '0.0')
self._vnc_lib.prop_list_add_element(vmi_obj.uuid,
'virtual_machine_interface_fat_flow_protocols',
{'protocol':'proto5', 'port':5}, '.00')
# 2. Get tests (specific and all elements)
# get specific element
rd_ff_proto = self._vnc_lib.prop_list_get(vmi_obj.uuid,
'virtual_machine_interface_fat_flow_protocols', '0.0')
self.assertEqual(len(rd_ff_proto), 1)
self.assert_kvpos(rd_ff_proto, 0, 'proto4', 4, '0.0')
# get all elements
rd_ff_proto = self._vnc_lib.prop_list_get(vmi_obj.uuid,
'virtual_machine_interface_fat_flow_protocols')
self.assertEqual(len(rd_ff_proto), 5)
self.assert_kvpos(rd_ff_proto, 0, 'proto5', 5, '.00')
self.assert_kvpos(rd_ff_proto, 1, 'proto4', 4, '0.0')
self.assert_kvpos(rd_ff_proto, 2, 'proto3', 3, '0.1')
self.assertTrue(
isinstance(uuid.UUID(rd_ff_proto[-1][1]), uuid.UUID),
'Auto-generated position not of uuid form')
# 3. Delete tests - middle and edges
self._vnc_lib.prop_list_delete_element(vmi_obj.uuid,
'virtual_machine_interface_fat_flow_protocols', '0.1')
self._vnc_lib.prop_list_delete_element(vmi_obj.uuid,
'virtual_machine_interface_fat_flow_protocols', '.00')
self._vnc_lib.prop_list_delete_element(vmi_obj.uuid,
'virtual_machine_interface_fat_flow_protocols', rd_ff_proto[-1][1])
rd_ff_proto = self._vnc_lib.prop_list_get(vmi_obj.uuid,
'virtual_machine_interface_fat_flow_protocols')
self.assertEqual(len(rd_ff_proto), 2)
self.assert_kvpos(rd_ff_proto, 0, 'proto4', 4, '0.0')
self.assertTrue(
isinstance(uuid.UUID(rd_ff_proto[-1][1]), uuid.UUID),
'Deleted incorrect element')
# end test_prop_list_add_delete_get_element
def test_set_in_resource_body_rest_api(self):
listen_ip = self._api_server_ip
listen_port = self._api_server._args.listen_port
url = 'http://%s:%s/virtual-machine-interfaces' %(
listen_ip, listen_port)
vmi_body = {
'virtual-machine-interface': {
'fq_name': ['default-domain',
'default-project',
'vmi-%s' %(self.id())],
'parent_type': 'project',
'virtual_machine_interface_fat_flow_protocols': {
'fat_flow_protocol': [
{'protocol': 'proto1', 'port': 1},
{'protocol': 'proto1', 'port': 2},
{'protocol': 'proto2', 'port': 1},
{'protocol': 'proto2', 'port': 2},
]
},
'virtual_network_refs': [
{'to': ['default-domain',
'default-project',
'default-virtual-network']}
]
}
}
vmi_resp = requests.post(url,
headers={'Content-type': 'application/json; charset="UTF-8"'},
data=json.dumps(vmi_body))
vmi_uuid = json.loads(
vmi_resp.content)['virtual-machine-interface']['uuid']
vmi_url = 'http://%s:%s/virtual-machine-interface/%s' %(
listen_ip, listen_port, vmi_uuid)
vmi_read = json.loads(
requests.get(vmi_url).content)['virtual-machine-interface']
rd_ff_proto = vmi_read['virtual_machine_interface_fat_flow_protocols']
self.assertEqual(len(rd_ff_proto['fat_flow_protocol']), 4)
self.assertEqual(rd_ff_proto['fat_flow_protocol'][0]['protocol'], 'proto1')
self.assertEqual(rd_ff_proto['fat_flow_protocol'][1]['protocol'], 'proto1')
self.assertEqual(rd_ff_proto['fat_flow_protocol'][2]['protocol'], 'proto2')
self.assertEqual(rd_ff_proto['fat_flow_protocol'][3]['protocol'], 'proto2')
vmi_body = {
'virtual-machine-interface': {
'virtual_machine_interface_fat_flow_protocols': {
'fat_flow_protocol': [
{'protocol': 'proto3', 'port': 3}
]
}
}
}
vmi_resp = requests.put(vmi_url,
headers={'Content-type': 'application/json; charset="UTF-8"'},
data=json.dumps(vmi_body))
vmi_read = json.loads(
requests.get(vmi_url).content)['virtual-machine-interface']
rd_ff_proto = vmi_read['virtual_machine_interface_fat_flow_protocols']
self.assertEqual(len(rd_ff_proto['fat_flow_protocol']), 1)
self.assertEqual(rd_ff_proto['fat_flow_protocol'][0]['protocol'], 'proto3')
# end test_set_in_resource_body_rest_api
def _rest_vmi_create(self):
listen_ip = self._api_server_ip
listen_port = self._api_server._args.listen_port
url = 'http://%s:%s/virtual-machine-interfaces' %(
listen_ip, listen_port)
vmi_body = {
'virtual-machine-interface': {
'fq_name': ['default-domain',
'default-project',
'vmi-%s' %(self.id())],
'parent_type': 'project',
'virtual_network_refs': [
{'to': ['default-domain',
'default-project',
'default-virtual-network']}
]
}
}
vmi_resp = requests.post(url,
headers={'Content-type': 'application/json; charset="UTF-8"'},
data=json.dumps(vmi_body))
vmi_uuid = json.loads(
vmi_resp.content)['virtual-machine-interface']['uuid']
return vmi_uuid
# end _rest_vmi_create
def test_prop_list_add_delete_get_rest_api(self):
listen_ip = self._api_server_ip
listen_port = self._api_server._args.listen_port
vmi_uuid = self._rest_vmi_create()
prop_coll_update_url = 'http://%s:%s/prop-collection-update' %(
listen_ip, listen_port)
prop_coll_get_url = 'http://%s:%s/prop-collection-get' %(
listen_ip, listen_port)
# 1. Add elements
requests.post(prop_coll_update_url,
headers={'Content-type': 'application/json; charset="UTF-8"'},
data=json.dumps(
{'uuid': vmi_uuid,
'updates': [
{'field': 'virtual_machine_interface_fat_flow_protocols',
'operation': 'add',
'value': {'protocol': 'proto1', 'port': 1} },
{'field': 'virtual_machine_interface_fat_flow_protocols',
'operation': 'add',
'value': {'protocol': 'proto2', 'port': 2},
'position': '0.0'},
{'field': 'virtual_machine_interface_fat_flow_protocols',
'operation': 'add',
'value': {'protocol': 'proto3', 'port': 3},
'position': '.01'} ] }))
# 2. Get elements (all and specific)
# get all elements
query_params = {'uuid': vmi_uuid,
'fields': ','.join(
['virtual_machine_interface_fat_flow_protocols'])}
rd_ff_proto = json.loads(requests.get(prop_coll_get_url,
params=query_params).content)['virtual_machine_interface_fat_flow_protocols']
self.assertEqual(len(rd_ff_proto), 3)
self.assertEqual(rd_ff_proto[0][0]['protocol'], 'proto3')
self.assertEqual(rd_ff_proto[0][0]['port'], 3)
self.assertEqual(rd_ff_proto[0][1], '.01')
self.assertEqual(rd_ff_proto[2][0]['protocol'], 'proto1')
self.assertEqual(rd_ff_proto[2][0]['port'], 1)
self.assertTrue(
isinstance(uuid.UUID(rd_ff_proto[2][1]), uuid.UUID),
'Autogenerated position not of uuid form')
# get specific element
query_params = {'uuid': vmi_uuid,
'fields': ','.join(
['virtual_machine_interface_fat_flow_protocols']),
'position': '.01'}
rd_ff_proto = json.loads(requests.get(prop_coll_get_url,
params=query_params).content)['virtual_machine_interface_fat_flow_protocols']
self.assertEqual(len(rd_ff_proto), 1)
self.assertEqual(rd_ff_proto[0][0]['protocol'], 'proto3')
self.assertEqual(rd_ff_proto[0][0]['port'], 3)
self.assertEqual(rd_ff_proto[0][1], '.01')
# 3. Modify specific elements
requests.post(prop_coll_update_url,
headers={'Content-type': 'application/json; charset="UTF-8"'},
data=json.dumps(
{'uuid': vmi_uuid,
'updates': [
{'field': 'virtual_machine_interface_fat_flow_protocols',
'operation': 'modify',
'value': {'protocol': 'proto2', 'port': 21},
'position': '0.0'},
{'field': 'virtual_machine_interface_fat_flow_protocols',
'operation': 'modify',
'value': {'protocol': 'proto3', 'port': 31},
'position': '.01'} ] }))
query_params = {'uuid': vmi_uuid,
'fields': ','.join(
['virtual_machine_interface_fat_flow_protocols'])}
rd_ff_proto = json.loads(requests.get(prop_coll_get_url,
params=query_params).content)['virtual_machine_interface_fat_flow_protocols']
self.assertEqual(len(rd_ff_proto), 3)
self.assertEqual(rd_ff_proto[0][0]['protocol'], 'proto3')
self.assertEqual(rd_ff_proto[0][0]['port'], 31)
self.assertEqual(rd_ff_proto[0][1], '.01')
self.assertEqual(rd_ff_proto[1][0]['protocol'], 'proto2')
self.assertEqual(rd_ff_proto[1][0]['port'], 21)
# 4. Delete (and add) elements
requests.post(prop_coll_update_url,
headers={'Content-type': 'application/json; charset="UTF-8"'},
data=json.dumps(
{'uuid': vmi_uuid,
'updates': [
{'field': 'virtual_machine_interface_fat_flow_protocols',
'operation': 'delete',
'position': '.01'},
{'field': 'virtual_machine_interface_fat_flow_protocols',
'operation': 'delete',
'position': '0.0'},
{'field': 'virtual_machine_interface_fat_flow_protocols',
'operation': 'add',
'value': {'protocol': 'proto4', 'port': 4},
'position': '.01'} ] }))
query_params = {'uuid': vmi_uuid,
'fields': ','.join(
['virtual_machine_interface_fat_flow_protocols'])}
rd_ff_proto = json.loads(requests.get(prop_coll_get_url,
params=query_params).content)['virtual_machine_interface_fat_flow_protocols']
self.assertEqual(len(rd_ff_proto), 2)
self.assertEqual(rd_ff_proto[0][0]['protocol'], 'proto4')
self.assertEqual(rd_ff_proto[0][0]['port'], 4)
self.assertEqual(rd_ff_proto[0][1], '.01')
self.assertEqual(rd_ff_proto[1][0]['protocol'], 'proto1')
self.assertEqual(rd_ff_proto[1][0]['port'], 1)
# end test_prop_list_add_delete_get_rest_api
def test_prop_list_wrong_type_should_fail(self):
listen_ip = self._api_server_ip
listen_port = self._api_server._args.listen_port
vmi_uuid = self._rest_vmi_create()
prop_coll_update_url = 'http://%s:%s/prop-collection-update' %(
listen_ip, listen_port)
prop_coll_get_url = 'http://%s:%s/prop-collection-get' %(
listen_ip, listen_port)
# 1. Try adding elements to non-prop-list field
response = requests.post(prop_coll_update_url,
headers={'Content-type': 'application/json; charset="UTF-8"'},
data=json.dumps(
{'uuid': vmi_uuid,
'updates': [
{'field': 'display_name',
'operation': 'add',
'value': {'key': 'k3', 'value': 'v3'},
'position': '.01'} ] }))
self.assertEqual(response.status_code, 400)
# 2. Try getting elements from non-prop-list field
query_params = {'uuid': vmi_uuid,
'fields': ','.join(
['display_name'])}
response = requests.get(prop_coll_get_url,
params=query_params)
self.assertEqual(response.status_code, 400)
# end test_prop_list_wrong_type_should_fail
def test_resource_list_with_field_prop_list(self):
vmi_obj = VirtualMachineInterface('vmi-%s' % (self.id()),
parent_obj=Project())
fname = 'virtual_machine_interface_fat_flow_protocols'
# needed for backend type-specific handling
vmi_obj.add_virtual_network(VirtualNetwork())
self._vnc_lib.virtual_machine_interface_create(vmi_obj)
vmis = self._vnc_lib.virtual_machine_interfaces_list(
obj_uuids=[vmi_obj.uuid], fields=[fname])
vmi_ids = [vmi['uuid'] for vmi in vmis['virtual-machine-interfaces']]
self.assertEqual([vmi_obj.uuid], vmi_ids)
self.assertNotIn(fname, vmis['virtual-machine-interfaces'][0])
vmi_obj = self._vnc_lib.virtual_machine_interface_read(id=vmi_obj.uuid)
proto_type = ProtocolType(protocol='proto', port=1)
vmi_obj.add_virtual_machine_interface_fat_flow_protocols(proto_type,
'pos')
self._vnc_lib.virtual_machine_interface_update(vmi_obj)
vmis = self._vnc_lib.virtual_machine_interfaces_list(
obj_uuids=[vmi_obj.uuid], fields=[fname])
vmi_ids = [vmi['uuid'] for vmi in vmis['virtual-machine-interfaces']]
self.assertEqual([vmi_obj.uuid], vmi_ids)
self.assertIn(fname, vmis['virtual-machine-interfaces'][0])
self.assertDictEqual({'fat_flow_protocol': [vars(proto_type)]},
vmis['virtual-machine-interfaces'][0][fname])
# end class TestPropertyWithlist
class TestPropertyWithMap(test_case.ApiServerTestCase):
_excluded_vmi_bindings = ['vif_type', 'vnic_type']
def assert_kvpos(self, rd_bindings, idx, k, v, pos):
self.assertEqual(rd_bindings[idx][0]['key'], k)
self.assertEqual(rd_bindings[idx][0]['value'], v)
self.assertEqual(rd_bindings[idx][1], pos)
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestPropertyWithMap, cls).setUpClass(*args, **kwargs)
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestPropertyWithMap, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
def test_set_in_object(self):
vmi_obj = VirtualMachineInterface('vmi-%s' %(self.id()),
parent_obj=Project())
vmi_obj.set_virtual_machine_interface_bindings(
KeyValuePairs([KeyValuePair(key='k1', value='v1'),
KeyValuePair(key='k2', value='v2')]))
# needed for backend type-specific handling
vmi_obj.add_virtual_network(VirtualNetwork())
self._vnc_lib.virtual_machine_interface_create(vmi_obj)
# ensure stored as list order
rd_bindings = self._vnc_lib.virtual_machine_interface_read(
id=vmi_obj.uuid).virtual_machine_interface_bindings
bindings_dict = {binding.key: binding.value for binding in
rd_bindings.key_value_pair
if binding.key not in self._excluded_vmi_bindings}
self.assertDictEqual(bindings_dict, {'k1': 'v1', 'k2': 'v2'})
# verify db storage format (wrapper/container type stripped in storage)
uuid_cf = self.get_cf('config_db_uuid','obj_uuid_table')
cols = uuid_cf.get(vmi_obj.uuid,
column_start='propm:virtual_machine_interface_bindings:',
column_finish='propm:virtual_machine_interface_bindings;')
col_name_0, col_val_0 = cols.popitem(last=False)
col_name_1, col_val_1 = cols.popitem(last=False)
self.assertThat(col_name_0.split(':')[-1], Equals('k1'))
self.assertThat(json.loads(col_val_0)['key'], Equals('k1'))
self.assertThat(col_name_1.split(':')[-1], Equals('k2'))
self.assertThat(json.loads(col_val_1)['key'], Equals('k2'))
# update and clobber old entries
#vmi_obj.set_virtual_machine_interface_bindings([])
vmi_obj.set_virtual_machine_interface_bindings(KeyValuePairs())
self._vnc_lib.virtual_machine_interface_update(vmi_obj)
rd_vmi_obj = self._vnc_lib.virtual_machine_interface_read(
id=vmi_obj.uuid)
rd_bindings = rd_vmi_obj.virtual_machine_interface_bindings
self.assertIsNone(rd_bindings)
with ExpectedException(cassandra_fake_impl.NotFoundException) as e:
cols = uuid_cf.get(vmi_obj.uuid,
column_start='propm:virtual_machine_interface_bindings:',
column_finish='propm:virtual_machine_interface_bindings;')
# end test_set_in_object
def test_element_add_del_in_object(self):
vmi_obj = VirtualMachineInterface('vmi-%s' %(self.id()),
parent_obj=Project())
fake_bindings_dict = {'k1': 'v1',
'k2': 'v2',
'k3': 'v3',
'k4': 'v4'}
for key, val in fake_bindings_dict.items():
vmi_obj.add_virtual_machine_interface_bindings(
KeyValuePair(key=key, value=val))
# needed for backend type-specific handling
vmi_obj.add_virtual_network(VirtualNetwork())
self._vnc_lib.virtual_machine_interface_create(vmi_obj)
rd_bindings = self._vnc_lib.virtual_machine_interface_read(
id=vmi_obj.uuid).virtual_machine_interface_bindings
self.assertEqual(len(rd_bindings.key_value_pair), 4)
bindings_dict = {binding.key: binding.value for binding in
rd_bindings.key_value_pair
if binding.key not in self._excluded_vmi_bindings}
self.assertDictEqual(bindings_dict, fake_bindings_dict)
for pos in ['k1', 'k4']:
vmi_obj.del_virtual_machine_interface_bindings(elem_position=pos)
fake_bindings_dict.pop(pos)
self._vnc_lib.virtual_machine_interface_update(vmi_obj)
rd_bindings = self._vnc_lib.virtual_machine_interface_read(
id=vmi_obj.uuid).virtual_machine_interface_bindings
self.assertEqual(len(rd_bindings.key_value_pair), 2)
bindings_dict = {binding.key: binding.value for binding in
rd_bindings.key_value_pair
if binding.key not in self._excluded_vmi_bindings}
self.assertDictEqual(bindings_dict, fake_bindings_dict)
# end test_element_set_del_in_object
def test_resource_list_with_field_prop_map(self):
vmi_obj = VirtualMachineInterface('vmi-%s' % (self.id()),
parent_obj=Project())
fname = 'virtual_machine_interface_bindings'
# needed for backend type-specific handling
vmi_obj.add_virtual_network(VirtualNetwork())
self._vnc_lib.virtual_machine_interface_create(vmi_obj)
vmis = self._vnc_lib.virtual_machine_interfaces_list(
obj_uuids=[vmi_obj.uuid], fields=[fname])
vmi_ids = [vmi['uuid'] for vmi in vmis['virtual-machine-interfaces']]
self.assertEqual([vmi_obj.uuid], vmi_ids)
self.assertNotIn(fname, vmis['virtual-machine-interfaces'][0])
vmi_obj = self._vnc_lib.virtual_machine_interface_read(id=vmi_obj.uuid)
kv_pairs = KeyValuePairs([KeyValuePair(key='k', value='v')])
vmi_obj.set_virtual_machine_interface_bindings(kv_pairs)
self._vnc_lib.virtual_machine_interface_update(vmi_obj)
vmis = self._vnc_lib.virtual_machine_interfaces_list(
obj_uuids=[vmi_obj.uuid], fields=[fname])
vmi_ids = [vmi['uuid'] for vmi in vmis['virtual-machine-interfaces']]
self.assertEqual([vmi_obj.uuid], vmi_ids)
self.assertIn(fname, vmis['virtual-machine-interfaces'][0])
self.assertDictEqual(kv_pairs.exportDict()['KeyValuePairs'],
vmis['virtual-machine-interfaces'][0][fname])
# end class TestPropertyWithMap
class TestDBAudit(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestDBAudit, cls).setUpClass(*args, **kwargs)
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestDBAudit, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
@contextlib.contextmanager
def audit_mocks(self):
with test_common.patch_imports(
[('schema_transformer.db',
flexmock(db=flexmock(
SchemaTransformerDB=flexmock(get_db_info=lambda: [('to_bgp_keyspace', ['route_target_table'])]))))]):
yield
# end audit_mocks
def _create_vn_subnet_ipam_iip(self, name):
ipam_obj = vnc_api.NetworkIpam('vn-%s' % name)
self._vnc_lib.network_ipam_create(ipam_obj)
vn_obj = vnc_api.VirtualNetwork(name)
vn_obj.add_network_ipam(ipam_obj,
VnSubnetsType(
[IpamSubnetType(SubnetType('1.1.1.0', 28))]))
self._vnc_lib.virtual_network_create(vn_obj)
iip_obj = vnc_api.InstanceIp('iip-%s' % name)
iip_obj.add_virtual_network(vn_obj)
self._vnc_lib.instance_ip_create(iip_obj)
return vn_obj, ipam_obj, iip_obj
# end _create_vn_subnet_ipam_iip
def _create_security_group(self, name):
sg_obj = vnc_api.SecurityGroup(name)
self._vnc_lib.security_group_create(sg_obj)
return sg_obj
def test_checker(self):
with self.audit_mocks():
from vnc_cfg_api_server import db_manage
test_obj = self._create_test_object()
self.assertTill(self.vnc_db_has_ident, obj=test_obj)
db_manage.db_check(*db_manage._parse_args('check --cluster_id %s' %(self._cluster_id)))
# end test_checker
def test_checker_missing_mandatory_fields(self):
# detect OBJ_UUID_TABLE entry missing required fields
with self.audit_mocks():
from vnc_cfg_api_server import db_manage
test_obj = self._create_test_object()
uuid_cf = self.get_cf('config_db_uuid', 'obj_uuid_table')
orig_col_val_ts = uuid_cf.get(test_obj.uuid,
include_timestamp=True)
omit_col_names = random.sample(set(
['type', 'fq_name', 'prop:id_perms']), 1)
wrong_col_val_ts = dict((k,v) for k,v in list(orig_col_val_ts.items())
if k not in omit_col_names)
with uuid_cf.patch_row(
test_obj.uuid, wrong_col_val_ts):
db_checker = db_manage.DatabaseChecker(
*db_manage._parse_args('check --cluster_id %s' %(self._cluster_id)))
errors = db_checker.check_obj_mandatory_fields()
self.assertIn(db_manage.MandatoryFieldsMissingError,
[type(x) for x in errors])
# end test_checker_missing_mandatory_fields
def test_checker_fq_name_mismatch_index_to_object(self):
# detect OBJ_UUID_TABLE and OBJ_FQ_NAME_TABLE inconsistency
with self.audit_mocks():
from vnc_cfg_api_server import db_manage
test_obj = self._create_test_object()
self.assert_vnc_db_has_ident(test_obj)
uuid_cf = self.get_cf('config_db_uuid', 'obj_uuid_table')
orig_col_val_ts = uuid_cf.get(test_obj.uuid,
include_timestamp=True)
wrong_col_val_ts = copy.deepcopy(orig_col_val_ts)
wrong_col_val_ts['fq_name'] = (json.dumps(['wrong-fq-name']),
wrong_col_val_ts['fq_name'][1])
with uuid_cf.patch_row(
test_obj.uuid, wrong_col_val_ts):
db_checker = db_manage.DatabaseChecker(
*db_manage._parse_args('check --cluster_id %s' %(self._cluster_id)))
errors = db_checker.check_fq_name_uuid_match()
error_types = [type(x) for x in errors]
self.assertIn(db_manage.FQNMismatchError, error_types)
self.assertIn(db_manage.FQNStaleIndexError, error_types)
self.assertIn(db_manage.FQNIndexMissingError, error_types)
# end test_checker_fq_name_mismatch_index_to_object
def test_checker_fq_name_index_stale(self):
# fq_name table in cassandra has entry but obj_uuid table doesn't
with self.audit_mocks():
from vnc_cfg_api_server import db_manage
test_obj = self._create_test_object()
uuid_cf = self.get_cf('config_db_uuid','obj_uuid_table')
with uuid_cf.patch_row(test_obj.uuid, new_columns=None):
db_checker = db_manage.DatabaseChecker(
*db_manage._parse_args('check --cluster_id %s' %(self._cluster_id)))
errors = db_checker.check_fq_name_uuid_match()
error_types = [type(x) for x in errors]
self.assertIn(db_manage.FQNStaleIndexError, error_types)
# test_checker_fq_name_mismatch_stale
def test_checker_fq_name_index_missing(self):
# obj_uuid table has entry but fq_name table in cassandra doesn't
with self.audit_mocks():
from vnc_cfg_api_server import db_manage
test_obj = self._create_test_object()
self.assert_vnc_db_has_ident(test_obj)
uuid_cf = self.get_cf('config_db_uuid','obj_uuid_table')
fq_name_cf = self.get_cf('config_db_uuid','obj_fq_name_table')
test_obj_type = test_obj.get_type().replace('-', '_')
orig_col_val_ts = fq_name_cf.get(test_obj_type,
include_timestamp=True)
# remove test obj in fq-name table
wrong_col_val_ts = dict((k,v) for k,v in list(orig_col_val_ts.items())
if ':'.join(test_obj.fq_name) not in k)
with fq_name_cf.patch_row(test_obj_type, new_columns=wrong_col_val_ts):
db_checker = db_manage.DatabaseChecker(
*db_manage._parse_args('check --cluster_id %s' %(self._cluster_id)))
errors = db_checker.check_fq_name_uuid_match()
error_types = [type(x) for x in errors]
self.assertIn(db_manage.FQNIndexMissingError, error_types)
# test_checker_fq_name_mismatch_missing
def test_checker_ifmap_identifier_extra(self):
# ifmap has identifier but obj_uuid table in cassandra doesn't
with self.audit_mocks():
from vnc_cfg_api_server import db_manage
test_obj = self._create_test_object()
self.assert_vnc_db_has_ident(test_obj)
uuid_cf = self.get_cf('config_db_uuid','obj_uuid_table')
with uuid_cf.patch_row(test_obj.uuid, new_columns=None):
db_checker = db_manage.DatabaseChecker(
*db_manage._parse_args('check --cluster_id %s' %(self._cluster_id)))
errors = db_checker.check_fq_name_uuid_match()
error_types = [type(x) for x in errors]
self.assertIn(db_manage.FQNStaleIndexError, error_types)
# test_checker_ifmap_identifier_extra
def test_checker_ifmap_identifier_missing(self):
# ifmap doesn't have an identifier but obj_uuid table
# in cassandra does
with self.audit_mocks():
from vnc_cfg_api_server import db_manage
uuid_cf = self.get_cf('config_db_uuid','obj_uuid_table')
with uuid_cf.patch_row(str(uuid.uuid4()),
new_columns={'type': json.dumps(''),
'fq_name':json.dumps(''),
'prop:id_perms':json.dumps('')}):
db_checker = db_manage.DatabaseChecker(
*db_manage._parse_args('check --cluster_id %s' %(self._cluster_id)))
errors = db_checker.check_fq_name_uuid_match()
error_types = [type(x) for x in errors]
self.assertIn(db_manage.FQNIndexMissingError, error_types)
# test_checker_ifmap_identifier_missing
def test_checker_useragent_subnet_key_missing(self):
pass # move to vnc_openstack test
# test_checker_useragent_subnet_key_missing
def test_checker_useragent_subnet_id_missing(self):
pass # move to vnc_openstack test
# test_checker_useragent_subnet_id_missing
def test_checker_ipam_subnet_uuid_missing(self):
pass # move to vnc_openstack test
# test_checker_ipam_subnet_uuid_missing
def test_checker_subnet_count_mismatch(self):
pass # move to vnc_openstack test
# test_checker_subnet_count_mismatch
def test_checker_useragent_subnet_missing(self):
pass # move to vnc_openstack test
# test_checker_useragent_subnet_missing
def test_checker_useragent_subnet_extra(self):
pass # move to vnc_openstack test
# test_checker_useragent_subnet_extra
def test_checker_zk_vn_extra(self):
vn_obj, _, _ = self._create_vn_subnet_ipam_iip(self.id())
fq_name_cf = self.get_cf('config_db_uuid','obj_fq_name_table')
orig_col_val_ts = fq_name_cf.get('virtual_network',
include_timestamp=True)
# remove test obj in fq-name table
wrong_col_val_ts = dict((k,v) for k,v in list(orig_col_val_ts.items())
if ':'.join(vn_obj.fq_name) not in k)
with self.audit_mocks():
from vnc_cfg_api_server import db_manage
db_checker = db_manage.DatabaseChecker(
*db_manage._parse_args('check --cluster_id %s' %(self._cluster_id)))
# verify catch of extra ZK VN when name index is mocked
with fq_name_cf.patch_row('virtual_network',
new_columns=wrong_col_val_ts):
errors = db_checker.check_subnet_addr_alloc()
error_types = [type(x) for x in errors]
self.assertIn(db_manage.FQNIndexMissingError, error_types)
# test_checker_zk_vn_extra
def test_checker_zk_vn_missing(self):
vn_obj, _, _ = self._create_vn_subnet_ipam_iip(self.id())
with self.audit_mocks():
from vnc_cfg_api_server import db_manage
db_checker = db_manage.DatabaseChecker(
*db_manage._parse_args('check --cluster_id %s' %(self._cluster_id)))
with db_checker._zk_client.patch_path(
'%s%s/%s' %(self._cluster_id,
db_checker.BASE_SUBNET_ZK_PATH,
vn_obj.get_fq_name_str())):
errors = db_checker.check_subnet_addr_alloc()
error_types = [type(x) for x in errors]
self.assertIn(db_manage.ZkVNMissingError, error_types)
self.assertIn(db_manage.ZkSubnetMissingError, error_types)
# test_checker_zk_vn_missing
def test_checker_zk_ip_extra(self):
vn_obj, _, _ = self._create_vn_subnet_ipam_iip(self.id())
with self.audit_mocks():
from vnc_cfg_api_server import db_manage
db_checker = db_manage.DatabaseChecker(
*db_manage._parse_args('check --cluster_id %s' %(self._cluster_id)))
# verify catch of zk extra ip when iip is mocked absent
iip_obj = vnc_api.InstanceIp(self.id())
iip_obj.add_virtual_network(vn_obj)
self._vnc_lib.instance_ip_create(iip_obj)
uuid_cf = self.get_cf('config_db_uuid','obj_uuid_table')
with uuid_cf.patch_row(iip_obj.uuid, None):
errors = db_checker.check_subnet_addr_alloc()
error_types = [type(x) for x in errors]
self.assertIn(db_manage.FQNStaleIndexError, error_types)
self.assertIn(db_manage.ZkIpExtraError, error_types)
# test_checker_zk_ip_extra
def test_checker_zk_ip_missing(self):
vn_obj, _, _ = self._create_vn_subnet_ipam_iip(self.id())
with self.audit_mocks():
from vnc_cfg_api_server import db_manage
db_checker = db_manage.DatabaseChecker(
*db_manage._parse_args('check --cluster_id %s' %(self._cluster_id)))
iip_obj = vnc_api.InstanceIp(self.id())
iip_obj.add_virtual_network(vn_obj)
self._vnc_lib.instance_ip_create(iip_obj)
ip_addr = self._vnc_lib.instance_ip_read(
id=iip_obj.uuid).instance_ip_address
ip_str = "%(#)010d" % {'#': int(netaddr.IPAddress(ip_addr))}
with db_checker._zk_client.patch_path(
'%s%s/%s:1.1.1.0/28/%s' %(
self._cluster_id, db_checker.BASE_SUBNET_ZK_PATH,
vn_obj.get_fq_name_str(), ip_str)):
errors = db_checker.check_subnet_addr_alloc()
error_types = [type(x) for x in errors]
self.assertIn(db_manage.ZkIpMissingError, error_types)
# test_checker_zk_ip_missing
def test_checker_zk_route_target_extra(self):
pass # move to schema transformer test
# test_checker_zk_route_target_extra
def test_checker_zk_route_target_range_wrong(self):
pass # move to schema transformer test
# test_checker_zk_route_target_range_wrong
def test_checker_cass_route_target_range_wrong(self):
pass # move to schema transformer test
# test_checker_cass_route_target_range_wrong
def test_checker_route_target_count_mismatch(self):
# include user assigned route-targets here
pass # move to schema transformer test
# test_checker_route_target_count_mismatch
def test_checker_zk_virtual_network_id_extra_and_missing(self):
uuid_cf = self.get_cf('config_db_uuid', 'obj_uuid_table')
vn_obj, _, _ = self._create_vn_subnet_ipam_iip(self.id())
with self.audit_mocks():
from vnc_cfg_api_server import db_manage
db_checker = db_manage.DatabaseChecker(
*db_manage._parse_args('check --cluster_id %s' %(self._cluster_id)))
with uuid_cf.patch_column(
vn_obj.uuid,
'prop:virtual_network_network_id',
json.dumps(42)):
errors = db_checker.check_virtual_networks_id()
error_types = [type(x) for x in errors]
self.assertIn(db_manage.ZkVNIdExtraError, error_types)
self.assertIn(db_manage.ZkVNIdMissingError, error_types)
# test_checker_zk_virtual_network_id_extra_and_missing
def test_checker_zk_virtual_network_id_duplicate(self):
uuid_cf = self.get_cf('config_db_uuid', 'obj_uuid_table')
vn1_obj, _, _ = self._create_vn_subnet_ipam_iip('vn1-%s' % self.id())
vn1_obj = self._vnc_lib.virtual_network_read(id=vn1_obj.uuid)
vn2_obj, _, _ = self._create_vn_subnet_ipam_iip('vn2-%s' % self.id())
with self.audit_mocks():
from vnc_cfg_api_server import db_manage
db_checker = db_manage.DatabaseChecker(
*db_manage._parse_args('check --cluster_id %s' %(self._cluster_id)))
with uuid_cf.patch_column(
vn2_obj.uuid,
'prop:virtual_network_network_id',
json.dumps(vn1_obj.virtual_network_network_id)):
errors = db_checker.check_virtual_networks_id()
error_types = [type(x) for x in errors]
self.assertIn(db_manage.VNDuplicateIdError, error_types)
self.assertIn(db_manage.ZkVNIdExtraError, error_types)
# test_checker_zk_virtual_network_id_duplicate
def test_checker_zk_security_group_id_extra_and_missing(self):
uuid_cf = self.get_cf('config_db_uuid', 'obj_uuid_table')
sg_obj = self._create_security_group(self.id())
with self.audit_mocks():
from vnc_cfg_api_server import db_manage
db_checker = db_manage.DatabaseChecker(
*db_manage._parse_args('check --cluster_id %s' %(self._cluster_id)))
with uuid_cf.patch_column(
sg_obj.uuid,
'prop:security_group_id',
json.dumps(8000042)):
errors = db_checker.check_security_groups_id()
error_types = [type(x) for x in errors]
self.assertIn(db_manage.ZkSGIdExtraError, error_types)
self.assertIn(db_manage.ZkSGIdMissingError, error_types)
# test_checker_zk_security_group_id_extra_and_missing
def test_checker_zk_security_group_id_duplicate(self):
uuid_cf = self.get_cf('config_db_uuid', 'obj_uuid_table')
sg1_obj = self._create_security_group('sg1-%s' % self.id())
sg1_obj = self._vnc_lib.security_group_read(id=sg1_obj.uuid)
sg2_obj = self._create_security_group('sg2-%s' % self.id())
with self.audit_mocks():
from vnc_cfg_api_server import db_manage
db_checker = db_manage.DatabaseChecker(
*db_manage._parse_args('check --cluster_id %s' %(self._cluster_id)))
with uuid_cf.patch_column(
sg2_obj.uuid,
'prop:security_group_id',
json.dumps(sg1_obj.security_group_id)):
errors = db_checker.check_security_groups_id()
error_types = [type(x) for x in errors]
self.assertIn(db_manage.SGDuplicateIdError, error_types)
self.assertIn(db_manage.ZkSGIdExtraError, error_types)
# test_checker_zk_security_group_id_duplicate
def test_checker_security_group_0_missing(self):
pass # move to schema transformer test
# test_checker_security_group_0_missing
def test_checker_route_targets_id_with_vn_rt_list_set_to_none(self):
project = Project('project-%s' % self.id())
self._vnc_lib.project_create(project)
vn = VirtualNetwork('vn-%s' % self.id(), parent_obj=project)
self._vnc_lib.virtual_network_create(vn)
vn.set_route_target_list(None)
self._vnc_lib.virtual_network_update(vn)
with self.audit_mocks():
from vnc_cfg_api_server import db_manage
args = db_manage._parse_args(
'check --cluster_id %s' % self._cluster_id)
db_checker = db_manage.DatabaseChecker(*args)
db_checker.audit_route_targets_id()
def test_cleaner(self):
with self.audit_mocks():
from vnc_cfg_api_server import db_manage
db_manage.db_clean(*db_manage._parse_args('clean --cluster_id %s' %(self._cluster_id)))
# end test_cleaner
def test_cleaner_zk_virtual_network_id(self):
uuid_cf = self.get_cf('config_db_uuid', 'obj_uuid_table')
vn_obj, _, _ = self._create_vn_subnet_ipam_iip(self.id())
vn_obj = self._vnc_lib.virtual_network_read(id=vn_obj.uuid)
with self.audit_mocks():
from vnc_cfg_api_server import db_manage
db_cleaner = db_manage.DatabaseCleaner(
*db_manage._parse_args('--execute clean --cluster_id %s' %(self._cluster_id)))
fake_id = 42
with uuid_cf.patch_column(
vn_obj.uuid,
'prop:virtual_network_network_id',
json.dumps(fake_id)):
db_cleaner.clean_stale_virtual_network_id()
zk_id_str = "%(#)010d" %\
{'#': vn_obj.virtual_network_network_id - 1}
self.assertIsNone(
db_cleaner._zk_client.exists(
'%s%s/%s' % (
self._cluster_id, db_cleaner.BASE_VN_ID_ZK_PATH,
zk_id_str))
)
def test_healer_zk_virtual_network_id(self):
uuid_cf = self.get_cf('config_db_uuid', 'obj_uuid_table')
vn_obj, _, _ = self._create_vn_subnet_ipam_iip(self.id())
vn_obj = self._vnc_lib.virtual_network_read(id=vn_obj.uuid)
with self.audit_mocks():
from vnc_cfg_api_server import db_manage
db_cleaner = db_manage.DatabaseHealer(
*db_manage._parse_args('--execute heal --cluster_id %s' % (
self._cluster_id)))
fake_id = 42
with uuid_cf.patch_column(
vn_obj.uuid,
'prop:virtual_network_network_id',
json.dumps(fake_id)):
db_cleaner.heal_virtual_networks_id()
zk_id_str = "%(#)010d" % {'#': fake_id - 1}
self.assertEqual(
db_cleaner._zk_client.exists(
'%s%s/%s' % (
self._cluster_id, db_cleaner.BASE_VN_ID_ZK_PATH,
zk_id_str))[0],
vn_obj.get_fq_name_str())
def test_cleaner_zk_security_group_id(self):
uuid_cf = self.get_cf('config_db_uuid', 'obj_uuid_table')
sg_obj = self._create_security_group(self.id())
sg_obj = self._vnc_lib.security_group_read(id=sg_obj.uuid)
with self.audit_mocks():
from vnc_cfg_api_server import db_manage
db_cleaner = db_manage.DatabaseCleaner(
*db_manage._parse_args('--execute clean --cluster_id %s' %(self._cluster_id)))
with uuid_cf.patch_column(
sg_obj.uuid,
'prop:security_group_id',
json.dumps(8000042)):
db_cleaner.clean_stale_security_group_id()
zk_id_str = "%(#)010d" % {'#': sg_obj.security_group_id}
self.assertIsNone(
db_cleaner._zk_client.exists(
'%s%s/%s' % (
self._cluster_id, db_cleaner.BASE_VN_ID_ZK_PATH,
zk_id_str))
)
def test_healer_zk_security_group_id(self):
uuid_cf = self.get_cf('config_db_uuid', 'obj_uuid_table')
sg_obj = self._create_security_group(self.id())
sg_obj = self._vnc_lib.security_group_read(id=sg_obj.uuid)
with self.audit_mocks():
from vnc_cfg_api_server import db_manage
db_cleaner = db_manage.DatabaseHealer(
*db_manage._parse_args('--execute heal --cluster_id %s' %(self._cluster_id)))
with uuid_cf.patch_column(
sg_obj.uuid,
'prop:security_group_id',
json.dumps(8000042)):
db_cleaner.heal_security_groups_id()
zk_id_str = "%(#)010d" % {'#': 42}
self.assertEqual(
db_cleaner._zk_client.exists(
'%s%s/%s' %
(self._cluster_id, db_cleaner.BASE_SG_ID_ZK_PATH,
zk_id_str))[0],
sg_obj.get_fq_name_str())
def test_clean_obj_missing_mandatory_fields(self):
pass
# end test_clean_obj_missing_mandatory_fields
def test_clean_dangling_fq_names(self):
pass
# end test_clean_dangling_fq_names()
def test_clean_dangling_back_refs(self):
pass
# end test_clean_dangling_back_refs()
def test_clean_dangling_children(self):
pass
# end test_clean_dangling_children
def test_healer(self):
with self.audit_mocks():
from vnc_cfg_api_server import db_manage
db_manage.db_heal(*db_manage._parse_args('heal --cluster_id %s' %(self._cluster_id)))
# end test_healer
def test_heal_fq_name_index(self):
pass
# end test_heal_fq_name_index
def test_heal_back_ref_index(self):
pass
# end test_heal_back_ref_index
def test_heal_children_index(self):
pass
# end test_heal_children_index
def test_heal_useragent_subnet_uuid(self):
pass
# end test_heal_useragent_subnet_uuid
# end class TestDBAudit
class TestBulk(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestBulk, cls).setUpClass(*args, **kwargs)
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestBulk, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
def test_list_bulk_collection(self):
obj_count = self._vnc_lib.POST_FOR_LIST_THRESHOLD + 1
vn_uuids = []
ri_uuids = []
vmi_uuids = []
logger.info("Creating %s VNs, RIs, VMIs.", obj_count)
vn_objs, _, ri_objs, vmi_objs = self._create_vn_ri_vmi(obj_count)
vn_uuids = [o.uuid for o in vn_objs]
ri_uuids = [o.uuid for o in ri_objs]
vmi_uuids = [o.uuid for o in vmi_objs]
bulk_route = [r for r in self._api_server.api_bottle.routes
if r.rule == '/list-bulk-collection'][0]
invoked_bulk = []
def spy_list_bulk(orig_method, *args, **kwargs):
invoked_bulk.append(True)
return orig_method(*args, **kwargs)
logger.info("Querying VNs by obj_uuids.")
with test_common.patch(bulk_route, 'callback', spy_list_bulk):
ret_list = self._vnc_lib.resource_list('virtual-network',
obj_uuids=vn_uuids)
ret_uuids = [ret['uuid'] for ret in ret_list['virtual-networks']]
self.assertThat(set(vn_uuids), Equals(set(ret_uuids)))
self.assertEqual(len(invoked_bulk), 1)
invoked_bulk.pop()
logger.info("Querying RIs by parent_id.")
ret_list = self._vnc_lib.resource_list('routing-instance',
parent_id=vn_uuids)
ret_uuids = [ret['uuid']
for ret in ret_list['routing-instances']]
self.assertThat(set(ri_uuids),
Equals(set(ret_uuids) & set(ri_uuids)))
self.assertEqual(len(invoked_bulk), 1)
invoked_bulk.pop()
logger.info("Querying VMIs by back_ref_id.")
ret_list = self._vnc_lib.resource_list('virtual-machine-interface',
back_ref_id=vn_uuids)
ret_uuids = [ret['uuid']
for ret in ret_list['virtual-machine-interfaces']]
self.assertThat(set(vmi_uuids), Equals(set(ret_uuids)))
self.assertEqual(len(invoked_bulk), 1)
invoked_bulk.pop()
logger.info("Querying VMIs by back_ref_id and extra fields.")
ret_list = self._vnc_lib.resource_list('virtual-machine-interface',
back_ref_id=vn_uuids,
fields=['virtual_network_refs'])
ret_uuids = [ret['uuid']
for ret in ret_list['virtual-machine-interfaces']]
self.assertThat(set(vmi_uuids), Equals(set(ret_uuids)))
self.assertEqual(set(vmi['virtual_network_refs'][0]['uuid']
for vmi in ret_list['virtual-machine-interfaces']),
set(vn_uuids))
self.assertEqual(len(invoked_bulk), 1)
invoked_bulk.pop()
logger.info("Querying RIs by parent_id and filter.")
ret_list = self._vnc_lib.resource_list('routing-instance',
parent_id=vn_uuids,
filters={'display_name':'%s-ri-5' %(self.id())})
self.assertThat(len(ret_list['routing-instances']), Equals(1))
self.assertEqual(len(invoked_bulk), 1)
invoked_bulk.pop()
logger.info("Querying VNs by obj_uuids for children+backref fields.")
ret_objs = self._vnc_lib.resource_list('virtual-network',
detail=True, obj_uuids=vn_uuids, fields=['routing_instances',
'virtual_machine_interface_back_refs'])
self.assertEqual(len(invoked_bulk), 1)
invoked_bulk.pop()
ret_ri_uuids = []
ret_vmi_uuids = []
for vn_obj in ret_objs:
ri_children = getattr(vn_obj, 'routing_instances',
'RI children absent')
self.assertNotEqual(ri_children, 'RI children absent')
ret_ri_uuids.extend([ri['uuid'] for ri in ri_children])
vmi_back_refs = getattr(vn_obj,
'virtual_machine_interface_back_refs',
'VMI backrefs absent')
self.assertNotEqual(ri_children, 'VMI backrefs absent')
ret_vmi_uuids.extend([vmi['uuid'] for vmi in vmi_back_refs])
self.assertThat(set(ri_uuids),
Equals(set(ret_ri_uuids) & set(ri_uuids)))
self.assertThat(set(vmi_uuids), Equals(set(ret_vmi_uuids)))
# end test_list_bulk_collection
def test_list_bulk_collection_with_malformed_filters(self):
obj_count = self._vnc_lib.POST_FOR_LIST_THRESHOLD + 1
vn_objs, _, _, _ = self._create_vn_ri_vmi()
vn_uuid = vn_objs[0].uuid
vn_uuids = [vn_uuid] +\
['bad-uuid'] * self._vnc_lib.POST_FOR_LIST_THRESHOLD
try:
results = self._vnc_lib.resource_list('virtual-network',
obj_uuids=vn_uuids)
self.assertEqual(len(results['virtual-networks']), 1)
self.assertEqual(results['virtual-networks'][0]['uuid'], vn_uuid)
except HttpError:
self.fail('Malformed object UUID filter was not ignored')
try:
results = self._vnc_lib.resource_list('routing-instance',
parent_id=vn_uuids,
detail=True)
self.assertEqual(len(results), 2)
for ri_obj in results:
self.assertEqual(ri_obj.parent_uuid, vn_uuid)
except HttpError:
self.fail('Malformed parent UUID filter was not ignored')
try:
results = self._vnc_lib.resource_list('virtual-machine-interface',
back_ref_id=vn_uuids,
detail=True)
self.assertEqual(len(results), 1)
vmi_obj = results[0]
self.assertEqual(vmi_obj.get_virtual_network_refs()[0]['uuid'],
vn_uuid)
except HttpError:
self.fail('Malformed back-ref UUID filter was not ignored')
# end class TestBulk
class TestCacheWithMetadata(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestCacheWithMetadata, cls).setUpClass(*args, **kwargs)
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestCacheWithMetadata, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
def setUp(self):
self.uuid_cf = self.get_cf( 'config_db_uuid', 'obj_uuid_table')
self.cache_mgr = self._api_server._db_conn._object_db._obj_cache_mgr
return super(TestCacheWithMetadata, self).setUp()
# end setUp
def create_test_object(self, name=None):
vn_name = name or 'vn-%s' %(self.id())
vn_obj = vnc_api.VirtualNetwork(vn_name)
vn_obj.display_name = 'test-cache-obj'
self._vnc_lib.virtual_network_create(vn_obj)
return vn_obj
# end create_object
def prime_test_object(self, vn_obj):
self._vnc_lib.virtual_networks_list(obj_uuids=[vn_obj.uuid])
return vn_obj
# end prime_test_object
def create_and_prime_test_object(self, name=None):
vn_name = name or 'vn-%s' %(self.id())
return self.prime_test_object(self.create_test_object(vn_name))
# end create_and_prime_test_object
def test_hit_and_fresh(self):
vn_obj = self.create_and_prime_test_object()
uuid_cf = self.uuid_cf
vn_row = uuid_cf.get(vn_obj.uuid, include_timestamp=True)
with uuid_cf.patch_row(vn_obj.uuid,
new_columns={'fq_name': vn_row['fq_name'],
'prop:id_perms': vn_row['prop:id_perms'],
'type': vn_row['type']}):
ret_vn_objs = self._vnc_lib.virtual_networks_list(
obj_uuids=[vn_obj.uuid], detail=True)
self.assertEqual(ret_vn_objs[0].display_name, vn_obj.display_name)
# end test_hit_and_fresh
def test_hit_and_stale(self):
vn_obj = self.create_and_prime_test_object()
cache_mgr = self.cache_mgr
self.assertIn(vn_obj.uuid, list(cache_mgr._cache.keys()))
uuid_cf = self.uuid_cf
vn_row = uuid_cf.get(vn_obj.uuid)
with uuid_cf.patches([
('column', (vn_obj.uuid, 'prop:display_name', 'stale-check-name')),
('column', (vn_obj.uuid, 'prop:id_perms', vn_row['prop:id_perms'])),
]):
ret_vn_objs = self._vnc_lib.virtual_networks_list(
obj_uuids=[vn_obj.uuid], detail=True)
self.assertEqual(
ret_vn_objs[0].display_name, 'stale-check-name')
# end test_hit_and_stale
def test_miss(self):
vn_obj = self.create_test_object()
cache_mgr = self.cache_mgr
self.assertNotIn(vn_obj.uuid, list(cache_mgr._cache.keys()))
ret_vn_dicts = self._vnc_lib.virtual_networks_list(
obj_uuids=[vn_obj.uuid],
fields=['display_name'])['virtual-networks']
self.assertEqual(ret_vn_dicts[0]['display_name'],
vn_obj.display_name)
# end test_miss
def test_hits_stales_misses(self):
uuid_cf = self.uuid_cf
cache_mgr = self.cache_mgr
vn_hit_fresh_obj = self.create_and_prime_test_object(
'vn-hit-fresh-%s' %(self.id()))
vn_hit_stale_obj = self.create_and_prime_test_object(
'vn-hit-stale-%s' %(self.id()))
vn_miss_obj = self.create_test_object('vn-miss-%s' %(self.id()))
self.assertNotIn(vn_miss_obj.uuid, list(cache_mgr._cache.keys()))
vn_hit_stale_row = uuid_cf.get(vn_hit_stale_obj.uuid)
with uuid_cf.patches([
('column', (vn_hit_fresh_obj.uuid,
'prop:display_name', 'fresh-check-name')),
('column', (vn_hit_stale_obj.uuid,
'prop:display_name', 'stale-check-name')),
('column', (vn_hit_stale_obj.uuid,
'prop:id_perms', vn_hit_stale_row['prop:id_perms'])),
]):
vn_uuids = [vn_hit_fresh_obj.uuid, vn_hit_stale_obj.uuid,
vn_miss_obj.uuid]
ret_vn_dicts = self._vnc_lib.virtual_networks_list(
obj_uuids=vn_uuids,
fields=['display_name'])['virtual-networks']
self.assertEqual(len(ret_vn_dicts), 3)
id_name_tuples = [(vn['uuid'], vn['display_name'])
for vn in ret_vn_dicts]
self.assertIn(
(vn_hit_fresh_obj.uuid, vn_hit_fresh_obj.display_name),
id_name_tuples)
self.assertIn((vn_hit_stale_obj.uuid, 'stale-check-name'),
id_name_tuples)
self.assertIn((vn_miss_obj.uuid, vn_miss_obj.display_name),
id_name_tuples)
# end test_hits_stales_misses
def test_evict_on_ref_type_same(self):
cache_mgr = self._api_server._db_conn._object_db._obj_cache_mgr
vn1_name = 'vn-1-%s' %(self.id())
vn2_name = 'vn-2-%s' %(self.id())
vn1_obj = self.create_test_object(vn1_name)
vn2_obj = self.create_test_object(vn2_name)
# prime RIs to cache
ri1_obj = self._vnc_lib.routing_instance_read(
fq_name=vn1_obj.fq_name+[vn1_name])
ri2_obj = self._vnc_lib.routing_instance_read(
fq_name=vn2_obj.fq_name+[vn2_name])
self.assertIn(ri1_obj.uuid, list(cache_mgr._cache.keys()))
self.assertIn(ri2_obj.uuid, list(cache_mgr._cache.keys()))
ri1_obj.add_routing_instance(ri2_obj, None)
self._vnc_lib.routing_instance_update(ri1_obj)
self.assertNotIn(ri2_obj.uuid, list(cache_mgr._cache.keys()))
# end test_evict_on_ref_type_same
def test_stale_for_backref_on_ref_update(self):
uuid_cf = self.uuid_cf
cache_mgr = self.cache_mgr
vn_obj = VirtualNetwork('vn-%s' %(self.id()))
ipam_obj = NetworkIpam('ipam-%s' %(self.id()),
display_name='ipam-name')
self._vnc_lib.network_ipam_create(ipam_obj)
self._vnc_lib.virtual_network_create(vn_obj)
# prime ipam in cache
self._vnc_lib.network_ipam_read(fq_name=ipam_obj.fq_name)
self.assertIn(ipam_obj.uuid, list(cache_mgr._cache.keys()))
vn_obj.add_network_ipam(ipam_obj,
VnSubnetsType(
[IpamSubnetType(SubnetType('1.1.1.0', 28))]))
self._vnc_lib.virtual_network_update(vn_obj)
with uuid_cf.patches([
('column',
(ipam_obj.uuid, 'prop:display_name', 'stale-check-name'))]):
# access for ipam without children/backref should hit cache
ret_ipam_obj = self._vnc_lib.network_ipam_read(
fq_name=ipam_obj.fq_name)
self.assertEqual(ret_ipam_obj.display_name, ipam_obj.display_name)
# access for ipam with backref should hit cache but stale
ret_ipam_obj = self._vnc_lib.network_ipam_read(
fq_name=ipam_obj.fq_name, fields=['display_name',
'virtual_network_back_refs'])
self.assertEqual(ret_ipam_obj.display_name, 'stale-check-name')
# end test_stale_for_backref_on_ref_update
def test_read_for_delete_not_from_cache(self):
uuid_cf = self.uuid_cf
cache_mgr = self.cache_mgr
ipam_obj = NetworkIpam('ipam-%s' %(self.id()),
display_name='ipam-name')
self._vnc_lib.network_ipam_create(ipam_obj)
# prime ipam in cache
self._vnc_lib.network_ipam_read(fq_name=ipam_obj.fq_name)
self.assertIn(ipam_obj.uuid, list(cache_mgr._cache.keys()))
vn_obj = VirtualNetwork('vn-%s' %(self.id()))
self._vnc_lib.virtual_network_create(vn_obj)
with uuid_cf.patches([
('column', (ipam_obj.uuid,
'backref:virtual_network:%s' %(vn_obj.uuid),
json.dumps(None)))
]):
with ExpectedException(RefsExistError,
".*Delete when resource still referred.*"):
self._vnc_lib.network_ipam_delete(id=ipam_obj.uuid)
# end test_read_for_delete_not_from_cache
# end class TestCacheWithMetadata
class TestCacheWithMetadataEviction(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
return super(TestCacheWithMetadataEviction, cls).setUpClass(
extra_config_knobs=[('DEFAULTS', 'object_cache_entries',
'2')])
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestCacheWithMetadataEviction, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
def test_evict_on_full(self):
vn1_obj = vnc_api.VirtualNetwork('vn-1-%s' %(self.id()))
self._vnc_lib.virtual_network_create(vn1_obj)
vn2_obj = vnc_api.VirtualNetwork('vn-2-%s' %(self.id()))
self._vnc_lib.virtual_network_create(vn2_obj)
vn3_obj = vnc_api.VirtualNetwork('vn-3-%s' %(self.id()))
self._vnc_lib.virtual_network_create(vn3_obj)
# prime with vn-1 and vn-2
cache_mgr = self._api_server._db_conn._object_db._obj_cache_mgr
self._vnc_lib.virtual_network_read(id=vn1_obj.uuid)
self._vnc_lib.virtual_network_read(id=vn2_obj.uuid)
cache_keys = list(cache_mgr._cache.keys())
self.assertIn(vn1_obj.uuid, cache_keys)
self.assertIn(vn2_obj.uuid, cache_keys)
self.assertNotIn(vn3_obj.uuid, cache_keys)
# prime vn-3 and test eviction
self._vnc_lib.virtual_network_read(id=vn3_obj.uuid)
cache_keys = list(cache_mgr._cache.keys())
self.assertIn(vn3_obj.uuid, cache_keys)
if vn1_obj.uuid in cache_keys:
self.assertNotIn(vn2_obj.uuid, cache_keys)
elif vn2_obj.uuid in cache_keys:
self.assertNotIn(vn1_obj.uuid, cache_keys)
else:
self.assertTrue(
False, 'Eviction failed, all VNs present in cache')
# end test_evict_on_full
# end class TestCacheWithMetadataEviction
class TestCacheWithMetadataExcludeTypes(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
return super(TestCacheWithMetadataExcludeTypes, cls).setUpClass(
extra_config_knobs=[('DEFAULTS', 'object_cache_exclude_types',
'project, network-ipam')])
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestCacheWithMetadataExcludeTypes, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
def test_exclude_types_not_cached(self):
# verify not cached for configured types
obj = vnc_api.Project('proj-%s' %(self.id()))
self._vnc_lib.project_create(obj)
self._vnc_lib.project_read(id=obj.uuid)
cache_mgr = self._api_server._db_conn._object_db._obj_cache_mgr
self.assertNotIn(obj.uuid, list(cache_mgr._cache.keys()))
obj = vnc_api.NetworkIpam('ipam-%s' %(self.id()))
self._vnc_lib.network_ipam_create(obj)
self._vnc_lib.network_ipam_read(id=obj.uuid)
cache_mgr = self._api_server._db_conn._object_db._obj_cache_mgr
self.assertNotIn(obj.uuid, list(cache_mgr._cache.keys()))
# verify cached for others
obj = vnc_api.VirtualNetwork('vn-%s' %(self.id()))
self._vnc_lib.virtual_network_create(obj)
self._vnc_lib.virtual_network_read(id=obj.uuid)
cache_mgr = self._api_server._db_conn._object_db._obj_cache_mgr
self.assertIn(obj.uuid, list(cache_mgr._cache.keys()))
# end test_exclude_types_not_cached
# end class TestCacheWithMetadataExcludeTypes
class TestRefValidation(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestRefValidation, cls).setUpClass(*args, **kwargs)
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestRefValidation, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
def test_refs_validation_with_expected_error(self):
obj = VirtualNetwork('validate-create-error')
body_dict = {'virtual-network':
{'fq_name': obj.fq_name,
'parent_type': 'project',
'network_ipam_refs': [
{'attr':
{'host_routes': None,
'ipam_subnets': [{'addr_from_start': None,
'alloc_unit': 1,
'allocation_pools': [],
'default_gateway': None,
'dhcp_option_list': None,
'dns_nameservers': [],
'dns_server_address': None,
'enable_dhcp': True,
'host_routes': None,
'subnet': {'ip_prefix': '11.1.1.0',
'ip_prefix_len': 24},
'subnet_name': None,
'subnet_uuid': 12}]},
'to': ['default-domain',
'default-project']}]}}
status, content = self._http_post('/virtual-networks',
body=json.dumps(body_dict))
self.assertThat(status, Equals(400))
self.assertThat(content, Contains('Bad reference'))
#end test_refs_validation_with_expected_error
def test_refs_validation_with_expected_success(self):
obj = VirtualNetwork('validate-create')
body_dict = {'virtual-network':
{'fq_name': obj.fq_name,
'parent_type': 'project',
'network_ipam_refs': [
{'attr':
{'host_routes': None,
'ipam_subnets': [{'addr_from_start': None,
'alloc_unit': 1,
'allocation_pools': [],
'default_gateway': None,
'dhcp_option_list': None,
'dns_nameservers': [],
'dns_server_address': None,
'enable_dhcp': True,
'host_routes': None,
'subnet': None,
'subnet': {'ip_prefix': '10.1.1.0',
'ip_prefix_len': 24},
'subnet_name': None,
'subnet_uuid': None}]},
'to': ['default-domain',
'default-project',
'default-network-ipam']}]}}
status, content = self._http_post('/virtual-networks',
body=json.dumps(body_dict))
self.assertThat(status, Equals(200))
#end test_refs_validation_with_expected_success
#end class TestRefValidation
class TestVncApiStats(test_case.ApiServerTestCase):
_sandesh = None
logs = []
def _check_sendwith(self, sandesh, stats, *args):
self.assertEqual(stats.response_code, 404)
self.assertEqual(stats.obj_type, 'virtual_network')
def _mock_sendwith(self, sandesh, stats, *args):
self.logs.append("TestVncApiStatsLog")
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestVncApiStats, cls).setUpClass(*args, **kwargs)
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestVncApiStats, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
def test_response_code_on_exception(self):
from cfgm_common.vnc_api_stats import VncApiStatistics
self._api_server.enable_api_stats_log = True
try:
with test_common.patch(VncApiStatistics,
'sendwith', self._check_sendwith):
self._vnc_lib.virtual_network_read(
id='5a4f39e3-9fb5-4832-9095-764bd19ffc90')
except cfgm_common.exceptions.NoIdError:
pass
else:
self.assertThat(0,
("Expecting HttpError to be raised ",
"but was not raised"))
# end test_response_code_on_exception
def test_disabled_vnc_api_stats(self):
from cfgm_common.vnc_api_stats import VncApiStatistics
def _crud_exec(factor):
with test_common.patch(
VncApiStatistics,
'sendwith', self._mock_sendwith):
obj = VirtualNetwork('%s-vn' % (self.id()))
self._vnc_lib.virtual_network_create(obj)
self.assertEquals(len(self.logs), 2 * factor)
self._vnc_lib.virtual_network_read(id=obj.uuid)
self.assertEquals(len(self.logs), 3 * factor)
obj.display_name = 'foo'
self._vnc_lib.virtual_network_update(obj)
self.assertEquals(len(self.logs), 5 * factor)
self._vnc_lib.virtual_network_delete(id=obj.uuid)
self.assertEquals(len(self.logs), 7 * factor)
self._api_server.enable_api_stats_log = False
# try all crud operations
_crud_exec(factor=0)
# Now enable api server logging and logs will be sent
self._api_server.enable_api_stats_log = True
_crud_exec(factor=1)
# end TestVncApiStats
class TestVncLatencyStats(test_case.ApiServerTestCase):
_sandesh = None
logs = []
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestVncLatencyStats, cls).setUpClass(*args, **kwargs)
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestVncLatencyStats, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
def mock_send(self, *args, **kwargs):
self.logs.append("VncLatencyLog")
def test_latency_stats(self):
from cfgm_common.uve.vnc_api.ttypes import VncApiLatencyStatsLog
def _crud_exec(logs_enabled):
with test_common.patch(
VncApiLatencyStatsLog,
'send', self.mock_send):
obj = VirtualNetwork('%s-vn' % (self.id()))
self._vnc_lib.virtual_network_create(obj)
if logs_enabled is True:
self.assertTrue(len(self.logs) is not 0)
else:
self.assertEquals(len(self.logs), 0)
self.logs = []
self._vnc_lib.virtual_network_read(id=obj.uuid)
if logs_enabled is True:
self.assertTrue(len(self.logs) is not 0)
else:
self.assertEquals(len(self.logs), 0)
self.logs = []
obj.display_name = 'foo'
self._vnc_lib.virtual_network_update(obj)
if logs_enabled is True:
self.assertTrue(len(self.logs) is not 0)
else:
self.assertEquals(len(self.logs), 0)
self.logs = []
self._vnc_lib.virtual_network_delete(id=obj.uuid)
if logs_enabled is True:
self.assertTrue(len(self.logs) is not 0)
else:
self.assertEquals(len(self.logs), 0)
self._api_server.enable_latency_stats_log = False
# try all crud operations
_crud_exec(False)
# Now enable api server logging and logs will be sent
self._api_server.enable_latency_stats_log = True
_crud_exec(True)
# end test_response_code_on_exception
class TestDbJsonExim(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestDbJsonExim, cls).setUpClass(*args, **kwargs)
cls.to_bgp_ks = '%s_to_bgp_keyspace' %(cls._cluster_id)
cls.svc_mon_ks = '%s_svc_monitor_keyspace' %(cls._cluster_id)
cls.dev_mgr_ks = '%s_dm_keyspace' %(cls._cluster_id)
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestDbJsonExim, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
def test_db_exim_args(self):
from cfgm_common import db_json_exim
with ExpectedException(db_json_exim.InvalidArguments,
'Both --import-from and --export-to cannot be specified'):
db_json_exim.DatabaseExim("--import-from foo --export-to bar")
# end test_db_exim_args
def test_db_export(self):
from cfgm_common import db_json_exim
with tempfile.NamedTemporaryFile() as export_dump:
patch_ks = cassandra_fake_impl.CassandraFakeServer.patch_keyspace
with patch_ks(self.to_bgp_ks, {}), \
patch_ks(self.svc_mon_ks, {}), \
patch_ks(self.dev_mgr_ks, {}):
vn_obj = self._create_test_object()
db_json_exim.DatabaseExim('--export-to %s --cluster_id %s' %(
export_dump.name, self._cluster_id)).db_export()
dump = json.loads(export_dump.readlines()[0])
dump_cassandra = dump['cassandra']
dump_zk = json.loads(dump['zookeeper'])
uuid_table = dump_cassandra['config_db_uuid']['obj_uuid_table']
self.assertEqual(uuid_table[vn_obj.uuid]['fq_name'][0],
json.dumps(vn_obj.get_fq_name()))
zk_node = [node for node in dump_zk
if node[0] == '%s/fq-name-to-uuid/virtual_network:%s/' %(
self._cluster_id, vn_obj.get_fq_name_str())]
self.assertEqual(len(zk_node), 1)
self.assertEqual(zk_node[0][1][0], vn_obj.uuid)
# end test_db_export
def test_db_export_with_omit_keyspaces(self):
from cfgm_common import db_json_exim
with tempfile.NamedTemporaryFile() as export_dump:
vn_obj = self._create_test_object()
omit_ks = set(db_json_exim.KEYSPACES) - set(['config_db_uuid'])
args = '--export-to %s --omit-keyspaces ' %(export_dump.name)
for ks in list(omit_ks):
args += '%s ' %(ks)
args += '--cluster_id %s' %(self._cluster_id)
db_json_exim.DatabaseExim(args).db_export()
dump = json.loads(export_dump.readlines()[0])
dump_cassandra = dump['cassandra']
dump_zk = json.loads(dump['zookeeper'])
uuid_table = dump_cassandra['config_db_uuid']['obj_uuid_table']
self.assertEqual(uuid_table[vn_obj.uuid]['fq_name'][0],
json.dumps(vn_obj.get_fq_name()))
zk_node = [node for node in dump_zk
if node[0] == '%s/fq-name-to-uuid/virtual_network:%s/' %(
self._cluster_id, vn_obj.get_fq_name_str())]
self.assertEqual(len(zk_node), 1)
self.assertEqual(zk_node[0][1][0], vn_obj.uuid)
# end test_db_export_with_omit_keyspaces
def test_db_export_and_import(self):
from cfgm_common import db_json_exim
with tempfile.NamedTemporaryFile() as dump_f:
patch_ks = cassandra_fake_impl.CassandraFakeServer.patch_keyspace
with patch_ks(self.to_bgp_ks, {}), \
patch_ks(self.svc_mon_ks, {}), \
patch_ks(self.dev_mgr_ks, {}):
vn_obj = self._create_test_object()
db_json_exim.DatabaseExim('--export-to %s --cluster_id %s' %(
dump_f.name, self._cluster_id)).db_export()
with ExpectedException(db_json_exim.CassandraNotEmptyError):
db_json_exim.DatabaseExim(
'--import-from %s --cluster_id %s' %(
dump_f.name, self._cluster_id)).db_import()
uuid_cf = self.get_cf(
'config_db_uuid', 'obj_uuid_table')
fq_name_cf = self.get_cf(
'config_db_uuid', 'obj_fq_name_table')
shared_cf = self.get_cf(
'config_db_uuid', 'obj_shared_table')
with uuid_cf.patch_cf({}), fq_name_cf.patch_cf({}), \
shared_cf.patch_cf({}):
with ExpectedException(
db_json_exim.ZookeeperNotEmptyError):
db_json_exim.DatabaseExim(
'--import-from %s --cluster_id %s' %(
dump_f.name, self._cluster_id)).db_import()
exim_obj = db_json_exim.DatabaseExim(
'--import-from %s --cluster_id %s' %(
dump_f.name, self._cluster_id))
with uuid_cf.patch_cf({}), fq_name_cf.patch_cf({}), \
shared_cf.patch_cf({}), exim_obj._zookeeper.patch_path(
'%s/' %(self._cluster_id), recursive=True):
exim_obj.db_import()
dump = json.loads(dump_f.readlines()[0])
dump_cassandra = dump['cassandra']
dump_zk = json.loads(dump['zookeeper'])
uuid_table = dump_cassandra['config_db_uuid']['obj_uuid_table']
self.assertEqual(uuid_table[vn_obj.uuid]['fq_name'][0],
json.dumps(vn_obj.get_fq_name()))
zk_node = [node for node in dump_zk
if node[0] == '%s/fq-name-to-uuid/virtual_network:%s/' %(
self._cluster_id, vn_obj.get_fq_name_str())]
self.assertEqual(len(zk_node), 1)
self.assertEqual(zk_node[0][1][0], vn_obj.uuid)
# end test_db_export_and_import
# end class TestDbJsonExim
class TestPagination(test_case.ApiServerTestCase):
default_paginate_count = 5
@classmethod
def setUpClass(cls):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
return super(TestPagination, cls).setUpClass(
extra_config_knobs=[('DEFAULTS', 'paginate_count',
TestPagination.default_paginate_count)])
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestPagination, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
class FetchExpect(object):
def __init__(self, num_objs, marker):
self.num_objs = num_objs
self.marker = marker
# end FetchExpect
def _create_vn_collection(self, count, proj_obj=None):
return self._create_test_objects(count=count, proj_obj=proj_obj)
# end _create_vn_collection
def _create_vmi_collection(self, count, vn_obj):
proj_obj = self._vnc_lib.project_read(id=vn_obj.parent_uuid)
vmi_objs = []
for i in range(count):
vmi_obj = VirtualMachineInterface(
'vmi-%s-%s-%s' %(self.id(), vn_obj.name, i),
parent_obj=proj_obj)
vmi_obj.add_virtual_network(vn_obj)
self._vnc_lib.virtual_machine_interface_create(vmi_obj)
vmi_objs.append(vmi_obj)
return vmi_objs
# end _create_vmi_collection
def test_validate_input(self):
# * fail 400 if last part of non-None page_marker is not alphanumeric
# (non-None marker is uuid in anchored walks and fq_name_str_uuid
# in unanchored walks)
# * fail 400 if page_limit is not number(None, string, array, dict)
pass
# end test_validate_input
def test_unanchored(self):
# 1. create a collection of n
# * cover with marker=None, no limit specified, run should be
# n/(default limit)
# * cover with marker=None, limit=n, run should be 1
# * cover with marker=None, limit=n/2, run should be 2
# * cover with marker=None, limit=1, run should be n
# * cover with marker=None, limit<=0, run should be 1
# * cover with marker=1, limit=n, run should be 1
# * cover with marker=n, limit=n, run should be 1 and empty
# * cover with marker=1, limit<=0, run should be 1
# * test with unicode/non-ascii char in fqn
vn_objs = self._create_vn_collection(self.default_paginate_count*2)
listen_ip = self._api_server_ip
listen_port = self._api_server._args.listen_port
def verify_collection_walk(page_limit=None):
marker = None
all_vn_ids = []
all_vn_count = self._vnc_lib.virtual_networks_list(
count=True)['virtual-networks']['count']
max_fetches = (old_div(all_vn_count,
(page_limit or self.default_paginate_count))) + 1
fetches = 0
while True:
if ((max_fetches > 0) and (fetches > max_fetches)):
break
fetches += 1
url = 'http://%s:%s/virtual-networks?page_marker=%s' %(
listen_ip, listen_port, marker)
if page_limit is not None:
url += '&page_limit=%s' %(page_limit)
resp = requests.get(url,
headers={'Content-type': 'application/json; charset="UTF-8"'})
if page_limit is not None and page_limit <= 0:
self.assertEqual(resp.status_code, 400)
return
self.assertEqual(resp.status_code, 200)
read_vn_ids = [vn['uuid']
for vn in json.loads(resp.text)['virtual-networks']]
all_vn_ids.extend(read_vn_ids)
marker = json.loads(resp.text)['marker']
if marker is not None:
self.assertEqual(len(read_vn_ids),
page_limit or self.default_paginate_count)
else:
# all fetched
break
self.assertLessEqual(fetches, max_fetches)
self.assertEqual(set([o.uuid for o in vn_objs]) - set(all_vn_ids),
set([]))
# end verify_collection_walk
verify_collection_walk()
verify_collection_walk(page_limit=-1)
verify_collection_walk(page_limit=0)
verify_collection_walk(page_limit=10000)
verify_collection_walk(page_limit=1)
verify_collection_walk(page_limit=2)
logger.info("Verified unanchored pagination fetch.")
# end test_unanchored
def test_anchored_by_one_parent(self):
proj_obj = Project('%s-project' %(self.id()))
self._vnc_lib.project_create(proj_obj)
vn_objs = self._create_vn_collection(
self.default_paginate_count*2, proj_obj)
listen_ip = self._api_server_ip
listen_port = self._api_server._args.listen_port
def verify_collection_walk(page_limit=None, fetch_expects=None):
marker = None
all_vn_ids = []
for fe_obj in fetch_expects or []:
url = 'http://%s:%s/virtual-networks?page_marker=%s&parent_id=%s' %(
listen_ip, listen_port, marker, proj_obj.uuid)
if page_limit is not None:
url += '&page_limit=%s' %(page_limit)
resp = requests.get(url,
headers={'Content-type': 'application/json; charset="UTF-8"'})
if page_limit is not None and page_limit <= 0:
self.assertEqual(resp.status_code, 400)
return
self.assertEqual(resp.status_code, 200)
read_vn_ids = [vn['uuid']
for vn in json.loads(resp.text)['virtual-networks']]
self.assertEqual(len(read_vn_ids), fe_obj.num_objs)
marker = json.loads(resp.text)['marker']
self.assertEqual(marker, fe_obj.marker)
all_vn_ids.extend(read_vn_ids)
self.assertEqual(set([o.uuid for o in vn_objs]) - set(all_vn_ids),
set([]))
# end verify_collection_walk
sorted_vn_uuid = sorted([o.uuid for o in vn_objs])
FetchExpect = self.FetchExpect
verify_collection_walk(fetch_expects=[
FetchExpect(self.default_paginate_count,
sorted_vn_uuid[self.default_paginate_count-1]),
FetchExpect(self.default_paginate_count,
sorted_vn_uuid[(self.default_paginate_count*2)-1]),
FetchExpect(0, None)])
verify_collection_walk(page_limit=-1, fetch_expects=[
FetchExpect(0, None)])
verify_collection_walk(page_limit=0, fetch_expects=[
FetchExpect(0, None)])
verify_collection_walk(page_limit=1, fetch_expects=[
FetchExpect(1, val) for idx,val in enumerate(sorted_vn_uuid)] +
[FetchExpect(0, None)])
verify_collection_walk(page_limit=2, fetch_expects=[
FetchExpect(2, sorted_vn_uuid[(i*2)+1])
for i in range(old_div(len(vn_objs),2))] +
[FetchExpect(0, None)])
logger.info("Verified anchored pagination fetch with one parent.")
# end test_anchored_by_one_parent
def test_anchored_by_one_backref(self):
proj_obj = Project('%s-project' %(self.id()))
self._vnc_lib.project_create(proj_obj)
vn_obj = VirtualNetwork('vn1', parent_obj=proj_obj)
self._vnc_lib.virtual_network_create(vn_obj)
vmi_objs = self._create_vmi_collection(
(self.default_paginate_count*2)-1, vn_obj)
listen_ip = self._api_server_ip
listen_port = self._api_server._args.listen_port
def verify_collection_walk(page_limit=None, fetch_expects=None):
marker = None
all_vmi_ids = []
for fe_obj in fetch_expects or []:
url = 'http://%s:%s/virtual-machine-interfaces?page_marker=%s&back_ref_id=%s' %(
listen_ip, listen_port, marker, vn_obj.uuid)
if page_limit is not None:
url += '&page_limit=%s' %(page_limit)
resp = requests.get(url,
headers={'Content-type': 'application/json; charset="UTF-8"'})
if page_limit is not None and page_limit <= 0:
self.assertEqual(resp.status_code, 400)
return
self.assertEqual(resp.status_code, 200)
read_vmi_ids = [vmi['uuid']
for vmi in json.loads(resp.text)['virtual-machine-interfaces']]
self.assertEqual(len(read_vmi_ids), fe_obj.num_objs)
marker = json.loads(resp.text)['marker']
self.assertEqual(marker, fe_obj.marker)
all_vmi_ids.extend(read_vmi_ids)
self.assertEqual(set([o.uuid for o in vmi_objs]) - set(all_vmi_ids),
set([]))
# end verify_collection_walk
sorted_vmi_uuid = sorted([o.uuid for o in vmi_objs])
FetchExpect = self.FetchExpect
verify_collection_walk(fetch_expects=[
FetchExpect(self.default_paginate_count,
sorted_vmi_uuid[self.default_paginate_count-1]),
FetchExpect(self.default_paginate_count-1,
None)])
verify_collection_walk(page_limit=-1, fetch_expects=[
FetchExpect(0, None)])
verify_collection_walk(page_limit=0, fetch_expects=[
FetchExpect(0, None)])
verify_collection_walk(page_limit=1, fetch_expects=[
FetchExpect(1, val) for idx,val in enumerate(sorted_vmi_uuid)] +
[FetchExpect(0, None)])
verify_collection_walk(page_limit=2, fetch_expects=[
FetchExpect(2, sorted_vmi_uuid[1]),
FetchExpect(2, sorted_vmi_uuid[3]),
FetchExpect(2, sorted_vmi_uuid[5]),
FetchExpect(2, sorted_vmi_uuid[7]),
FetchExpect(1, None)])
logger.info("Verified anchored pagination fetch with one backref.")
# end test_anchored_by_one_backref
def test_anchored_by_parent_list(self):
proj1_obj = Project('%s-project1' %(self.id()))
self._vnc_lib.project_create(proj1_obj)
proj2_obj = Project('%s-project2' %(self.id()))
self._vnc_lib.project_create(proj2_obj)
vn_p1_objs = self._create_vn_collection(
self.default_paginate_count+1, proj1_obj)
vn_p2_objs = self._create_vn_collection(2, proj2_obj)
listen_ip = self._api_server_ip
listen_port = self._api_server._args.listen_port
def verify_collection_walk(page_limit=None, fetch_expects=None):
all_vn_ids = []
def request_with_query_params(marker):
url = 'http://%s:%s/virtual-networks?page_marker=%s&parent_id=%s,%s' %(
listen_ip, listen_port, marker, proj1_obj.uuid, proj2_obj.uuid)
if page_limit is not None:
url += '&page_limit=%s' %(page_limit)
resp = requests.get(url,
headers={'Content-type': 'application/json; charset="UTF-8"'})
return resp
def request_with_bulk_post(marker):
url = 'http://%s:%s/list-bulk-collection' %(listen_ip, listen_port)
body = {'type': 'virtual-network',
'parent_id': '%s,%s' %(proj1_obj.uuid, proj2_obj.uuid),
'page_marker': marker}
if page_limit is not None:
body['page_limit'] = page_limit
resp = requests.post(url,
headers={'Content-type': 'application/json; charset="UTF-8"'},
data=json.dumps(body))
return resp
for req_method in [request_with_query_params,
request_with_bulk_post]:
marker = None
for fe_obj in fetch_expects or []:
resp = req_method(marker)
if page_limit is not None and page_limit <= 0:
self.assertEqual(resp.status_code, 400)
break
self.assertEqual(resp.status_code, 200)
read_vn_ids = [vn['uuid']
for vn in json.loads(resp.text)['virtual-networks']]
self.assertEqual(len(read_vn_ids), fe_obj.num_objs)
marker = json.loads(resp.text)['marker']
self.assertEqual(marker, fe_obj.marker)
all_vn_ids.extend(read_vn_ids)
if page_limit is not None and page_limit <= 0:
continue
self.assertEqual(
set([vn.uuid for vn in vn_p1_objs+vn_p2_objs]) - set(all_vn_ids),
set([]))
# end for req_method
# end verify_collection_walk
sorted_vn_uuid = sorted([o.uuid for o in (vn_p1_objs+vn_p2_objs)])
FetchExpect = self.FetchExpect
verify_collection_walk(fetch_expects=[
FetchExpect(self.default_paginate_count,
sorted_vn_uuid[self.default_paginate_count-1]),
FetchExpect(3, None)])
verify_collection_walk(page_limit=-1, fetch_expects=[
FetchExpect(0, None)])
verify_collection_walk(page_limit=0, fetch_expects=[
FetchExpect(0, None)])
verify_collection_walk(page_limit=1, fetch_expects=[
FetchExpect(1, val) for idx, val in enumerate(sorted_vn_uuid)] +
[FetchExpect(0, None)])
verify_collection_walk(page_limit=2, fetch_expects=[
FetchExpect(2, sorted_vn_uuid[1]),
FetchExpect(2, sorted_vn_uuid[3]),
FetchExpect(2, sorted_vn_uuid[5]),
FetchExpect(2, sorted_vn_uuid[7]),
FetchExpect(0, None)])
# end test_anchored_by_parent_list
def test_anchored_by_backref_list(self):
proj_obj = Project('%s-project' %(self.id()))
self._vnc_lib.project_create(proj_obj)
vn1_obj = VirtualNetwork('vn1', parent_obj=proj_obj)
self._vnc_lib.virtual_network_create(vn1_obj)
vn2_obj = VirtualNetwork('vn2', parent_obj=proj_obj)
self._vnc_lib.virtual_network_create(vn2_obj)
vmi_vn1_objs = self._create_vmi_collection(
self.default_paginate_count-1, vn1_obj)
vmi_vn2_objs = self._create_vmi_collection(
self.default_paginate_count-1, vn2_obj)
listen_ip = self._api_server_ip
listen_port = self._api_server._args.listen_port
def verify_collection_walk(page_limit=None, fetch_expects=None):
all_vmi_ids = []
def request_with_query_params(marker):
url = 'http://%s:%s/virtual-machine-interfaces?page_marker=%s&back_ref_id=%s,%s' %(
listen_ip, listen_port, marker, vn1_obj.uuid, vn2_obj.uuid)
if page_limit is not None:
url += '&page_limit=%s' %(page_limit)
resp = requests.get(url,
headers={'Content-type': 'application/json; charset="UTF-8"'})
return resp
def request_with_bulk_post(marker):
url = 'http://%s:%s/list-bulk-collection' %(listen_ip, listen_port)
body = {'type': 'virtual-machine-interface',
'back_ref_id': '%s,%s' %(vn1_obj.uuid, vn2_obj.uuid),
'page_marker': marker}
if page_limit is not None:
body['page_limit'] = page_limit
resp = requests.post(url,
headers={'Content-type': 'application/json; charset="UTF-8"'},
data=json.dumps(body))
return resp
for req_method in [request_with_query_params,
request_with_bulk_post]:
marker = None
for fe_obj in fetch_expects or []:
resp = req_method(marker)
if page_limit is not None and page_limit <= 0:
self.assertEqual(resp.status_code, 400)
break
self.assertEqual(resp.status_code, 200)
read_vmi_ids = [vmi['uuid']
for vmi in json.loads(resp.text)['virtual-machine-interfaces']]
self.assertEqual(len(read_vmi_ids), fe_obj.num_objs)
marker = json.loads(resp.text)['marker']
self.assertEqual(marker, fe_obj.marker)
all_vmi_ids.extend(read_vmi_ids)
if page_limit is not None and page_limit <= 0:
continue
self.assertEqual(
set([vmi.uuid for vmi in vmi_vn1_objs+vmi_vn2_objs]) - set(all_vmi_ids),
set([]))
# end for req_method
# end verify_collection_walk
sorted_vmi_uuid = sorted([o.uuid for o in (vmi_vn1_objs+vmi_vn2_objs)])
FetchExpect = self.FetchExpect
verify_collection_walk(fetch_expects=[
FetchExpect(self.default_paginate_count,
sorted_vmi_uuid[self.default_paginate_count-1]),
FetchExpect(3, None)])
verify_collection_walk(page_limit=-1, fetch_expects=[
FetchExpect(0, None)])
verify_collection_walk(page_limit=0, fetch_expects=[
FetchExpect(0, None)])
verify_collection_walk(page_limit=1, fetch_expects=[
FetchExpect(1, val) for idx, val in enumerate(sorted_vmi_uuid)] +
[FetchExpect(0, None)])
verify_collection_walk(page_limit=2, fetch_expects=[
FetchExpect(2, sorted_vmi_uuid[1]),
FetchExpect(2, sorted_vmi_uuid[3]),
FetchExpect(2, sorted_vmi_uuid[5]),
FetchExpect(2, sorted_vmi_uuid[7]),
FetchExpect(0, None)])
# end test_anchored_by_backref_list
def test_by_obj_list(self):
proj_objs = [Project('%s-proj%s' %(self.id(), i))
for i in range(self.default_paginate_count+2)]
for proj_obj in proj_objs:
self._vnc_lib.project_create(proj_obj)
listen_ip = self._api_server_ip
listen_port = self._api_server._args.listen_port
def verify_collection_walk(page_limit=None, fetch_expects=None):
all_proj_ids = []
def request_with_query_params(marker):
url = 'http://%s:%s/projects?page_marker=%s&obj_uuids=%s' %(
listen_ip, listen_port, marker,
','.join([o.uuid for o in proj_objs]))
if page_limit is not None:
url += '&page_limit=%s' %(page_limit)
resp = requests.get(url,
headers={'Content-type': 'application/json; charset="UTF-8"'})
return resp
def request_with_bulk_post(marker):
url = 'http://%s:%s/list-bulk-collection' %(listen_ip, listen_port)
body = {'type': 'project',
'obj_uuids': '%s' %(','.join([o.uuid for o in proj_objs])),
'page_marker': marker}
if page_limit is not None:
body['page_limit'] = page_limit
resp = requests.post(url,
headers={'Content-type': 'application/json; charset="UTF-8"'},
data=json.dumps(body))
return resp
for req_method in [request_with_query_params,
request_with_bulk_post]:
marker = None
for fe_obj in fetch_expects or []:
resp = req_method(marker)
if page_limit is not None and page_limit <= 0:
self.assertEqual(resp.status_code, 400)
break
self.assertEqual(resp.status_code, 200)
read_proj_ids = [proj['uuid']
for proj in json.loads(resp.text)['projects']]
self.assertEqual(len(read_proj_ids), fe_obj.num_objs)
marker = json.loads(resp.text)['marker']
self.assertEqual(marker, fe_obj.marker)
all_proj_ids.extend(read_proj_ids)
if page_limit is not None and page_limit <= 0:
continue
self.assertEqual(
set([proj.uuid for proj in proj_objs]) - set(all_proj_ids),
set([]))
# end for req_method
# end verify_collection_walk
proj_uuids = [o.uuid for o in proj_objs]
FetchExpect = self.FetchExpect
verify_collection_walk(fetch_expects=[
FetchExpect(self.default_paginate_count,
proj_uuids[self.default_paginate_count-1]),
FetchExpect(2, None)])
verify_collection_walk(page_limit=-1, fetch_expects=[
FetchExpect(0, None)])
verify_collection_walk(page_limit=0, fetch_expects=[
FetchExpect(0, None)])
verify_collection_walk(page_limit=1, fetch_expects=[
FetchExpect(1, val) for idx, val in enumerate(proj_uuids)] +
[FetchExpect(0, None)])
verify_collection_walk(page_limit=2, fetch_expects=[
FetchExpect(2, proj_uuids[1]),
FetchExpect(2, proj_uuids[3]),
FetchExpect(2, proj_uuids[5]),
FetchExpect(1, None)])
# end test_by_obj_list
def test_anchored_by_parent_list_shared(self):
proj1_obj = Project('%s-project1' %(self.id()))
self._vnc_lib.project_create(proj1_obj)
proj2_obj = Project('%s-project2' %(self.id()))
self._vnc_lib.project_create(proj2_obj)
vn_p1_objs = self._create_vn_collection(
self.default_paginate_count+1, proj1_obj)
vn_p2_objs = self._create_vn_collection(2, proj2_obj)
listen_ip = self._api_server_ip
listen_port = self._api_server._args.listen_port
# create couple of globally shared obj and verify they appear at
# end of pagination
proj3_obj = Project('%s-project3' %(self.id()))
self._vnc_lib.project_create(proj3_obj)
vn_p3_objs = self._create_vn_collection(
2, proj3_obj)
url = 'http://%s:%s/chmod' %(listen_ip, listen_port)
for vn_obj in vn_p3_objs:
body = {'uuid': vn_obj.uuid,
'global_access': cfgm_common.PERMS_R}
resp = requests.post(url,
headers={'Content-type': 'application/json; charset="UTF-8"'},
data=json.dumps(body))
def verify_collection_walk(page_limit=None, fetch_expects=None):
all_vn_ids = []
def request_with_query_params(marker):
url = 'http://%s:%s/virtual-networks?page_marker=%s&parent_id=%s,%s&shared=True' %(
listen_ip, listen_port, marker, proj1_obj.uuid, proj2_obj.uuid)
if page_limit is not None:
url += '&page_limit=%s' %(page_limit)
resp = requests.get(url,
headers={'Content-type': 'application/json; charset="UTF-8"',
'X_USER_DOMAIN_ID': str(uuid.uuid4())})
return resp
def request_with_bulk_post(marker):
url = 'http://%s:%s/list-bulk-collection' %(listen_ip, listen_port)
body = {'type': 'virtual-network',
'parent_id': '%s,%s' %(proj1_obj.uuid, proj2_obj.uuid),
'page_marker': marker,
'shared': True}
if page_limit is not None:
body['page_limit'] = page_limit
resp = requests.post(url,
headers={'Content-type': 'application/json; charset="UTF-8"',
'X_USER_DOMAIN_ID': str(uuid.uuid4())},
data=json.dumps(body))
return resp
for req_method in [request_with_query_params,
request_with_bulk_post]:
marker = None
for fe_obj in fetch_expects or []:
resp = req_method(marker)
if page_limit is not None and page_limit <= 0:
self.assertEqual(resp.status_code, 400)
break
self.assertEqual(resp.status_code, 200)
read_vn_ids = [vn['uuid']
for vn in json.loads(resp.text)['virtual-networks']]
self.assertEqual(len(read_vn_ids), fe_obj.num_objs)
marker = json.loads(resp.text)['marker']
self.assertEqual(marker, fe_obj.marker)
all_vn_ids.extend(read_vn_ids)
if page_limit is not None and page_limit <= 0:
continue
self.assertEqual(
set([vn.uuid for vn in vn_p1_objs+vn_p2_objs+vn_p3_objs]) -
set(all_vn_ids),
set([]))
# end for req_method
# end verify_collection_walk
sorted_vn_uuid = sorted([o.uuid for o in (vn_p1_objs+vn_p2_objs)])
sorted_shared_vn_uuid = sorted([o.uuid for o in vn_p3_objs])
FetchExpect = self.FetchExpect
verify_collection_walk(fetch_expects=[
FetchExpect(self.default_paginate_count,
sorted_vn_uuid[self.default_paginate_count-1]),
FetchExpect(self.default_paginate_count,
'shared:%s' %(sorted_shared_vn_uuid[-1])),
FetchExpect(0, None)])
verify_collection_walk(page_limit=-1, fetch_expects=[
FetchExpect(0, None)])
verify_collection_walk(page_limit=0, fetch_expects=[
FetchExpect(0, None)])
verify_collection_walk(page_limit=1, fetch_expects=[
FetchExpect(1, val) for idx, val in enumerate(sorted_vn_uuid)] +
[FetchExpect(1, 'shared:%s' %(val))
for idx, val in enumerate(sorted_shared_vn_uuid)] +
[FetchExpect(0, None)])
verify_collection_walk(page_limit=2, fetch_expects=[
FetchExpect(2, sorted_vn_uuid[1]),
FetchExpect(2, sorted_vn_uuid[3]),
FetchExpect(2, sorted_vn_uuid[5]),
FetchExpect(2, sorted_vn_uuid[7]),
FetchExpect(2, 'shared:%s' %(sorted_shared_vn_uuid[-1])),
FetchExpect(0, None)])
# end test_anchored_by_parent_list_shared
# end class TestPagination
class TestSubCluster(test_case.ApiServerTestCase):
default_subcluster_count = 5
def _get_rt_inst_obj(self):
vnc_lib = self._vnc_lib
rt_inst_obj = vnc_lib.routing_instance_read(
fq_name=['default-domain', 'default-project',
'ip-fabric', '__default__'])
return rt_inst_obj
# end _get_rt_inst_obj
def _get_ip(self, ip_w_pfx):
return str(IPNetwork(ip_w_pfx).ip)
# end _get_ip
def test_subcluster(self):
sub_cluster_obj = SubCluster(
'test-host',
sub_cluster_asn=64514)
self._vnc_lib.sub_cluster_create(sub_cluster_obj)
sub_cluster_obj = self._vnc_lib.sub_cluster_read(
fq_name=sub_cluster_obj.get_fq_name())
sub_cluster_obj.set_sub_cluster_asn(64515)
cant_modify = False
try:
self._vnc_lib.sub_cluster_update(sub_cluster_obj)
except Exception as e:
cant_modify = True
finally:
self.assertTrue(cant_modify,'subcluster asn cannot be modified')
sub_cluster_obj.set_sub_cluster_asn(64514)
# Now that subcluster is created add a bgp router
# with different ASN
rt_inst_obj = self._get_rt_inst_obj()
address_families = ['route-target', 'inet-vpn', 'e-vpn', 'erm-vpn',
'inet6-vpn']
bgp_addr_fams = AddressFamilies(address_families)
bgp_sess_attrs = [
BgpSessionAttributes(address_families=bgp_addr_fams)]
bgp_sessions = [BgpSession(attributes=bgp_sess_attrs)]
bgp_peering_attrs = BgpPeeringAttributes(session=bgp_sessions)
router_params = BgpRouterParams(router_type='external-control-node',
vendor='unknown', autonomous_system=64515,
identifier=self._get_ip('1.1.1.1'),
address=self._get_ip('1.1.1.1'),
port=179, address_families=bgp_addr_fams)
bgp_router_obj = BgpRouter('bgp-router', rt_inst_obj,
bgp_router_parameters=router_params)
bgp_router_obj.add_sub_cluster(sub_cluster_obj)
create_exception = False
try:
cur_id = self._vnc_lib.bgp_router_create(bgp_router_obj)
except Exception as e:
create_exception = True
finally:
self.assertTrue(cant_modify,'subcluster asn bgp asn should be same')
# Now create the bgp with the same asn
bgp_router_obj.bgp_router_parameters.autonomous_system = 64514
try:
cur_id = self._vnc_lib.bgp_router_create(bgp_router_obj)
except Exception as e:
create_exception = False
finally:
self.assertTrue(cant_modify,'subcluster asn bgp asn should be same')
# Now that bgp object is created, modify asn
bgp_router_obj = self._vnc_lib.bgp_router_read(id=cur_id)
bgp_router_parameters = bgp_router_obj.get_bgp_router_parameters()
bgp_router_parameters.autonomous_system = 64515
bgp_router_obj.set_bgp_router_parameters(bgp_router_parameters)
modify_exception = False
try:
self._vnc_lib.bgp_router_update(bgp_router_obj)
except Exception as e:
modify_exception = True
finally:
self.assertTrue(modify_exception,'subcluster asn bgp asn should be same')
# Now create a new sub cluster with different asn and move bgp object
# to that sub cluster
sub_cluster_obj1 = SubCluster(
'test-host1',
sub_cluster_asn=64515)
self._vnc_lib.sub_cluster_create(sub_cluster_obj1)
sub_cluster_obj1 = self._vnc_lib.sub_cluster_read(
fq_name=sub_cluster_obj1.get_fq_name())
bgp_router_obj = self._vnc_lib.bgp_router_read(id=cur_id)
bgp_router_parameters = bgp_router_obj.get_bgp_router_parameters()
bgp_router_parameters.autonomous_system = 64515
bgp_router_obj.set_bgp_router_parameters(bgp_router_parameters)
bgp_router_obj.set_sub_cluster(sub_cluster_obj1)
try:
self._vnc_lib.bgp_router_update(bgp_router_obj)
except Exception as e:
modify_exception = False
finally:
self.assertTrue(modify_exception,'subcluster asn bgp asn should be same')
# Detach subcluster from the bgp object
bgp_router_obj = self._vnc_lib.bgp_router_read(id=cur_id)
bgp_router_obj.del_sub_cluster(sub_cluster_obj1)
no_delete_exception = True
try:
self._vnc_lib.bgp_router_update(bgp_router_obj)
except Exception as e:
no_delete_exception = False
finally:
self.assertTrue(no_delete_exception,'sub cluster couldnot be detached')
# end test_subcluster
# end class TestSubCluster
class TestApiServer(test_case.ApiServerTestCase):
def test_validate_communityattribute_type(self):
test_cases = [
'*:*',
'*:.*',
'.*:*',
'123:*',
'123:.*',
'*:123',
'.*:123',
'.*:.*',
]
for test_case in test_cases:
try:
VncApiServer._validate_communityattribute_type(test_case)
except ValueError as exc:
self.assertFalse(True, msg='Test failed {}'.format(exc))
# end class TestApiServer
if __name__ == '__main__':
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
# unittest.main(failfast=True)
unittest.main()
| 45.937929 | 132 | 0.618249 |
74b93d4be0ab8f90cc3467660ab4b39d02b01114 | 277 | py | Python | backend/api/migrations/0013_merge_20210528_1016.py | alairice/doccano | 27eff5caec1ec6ad31f1e74bd1b73b1dd43228dc | [
"MIT"
] | 2,082 | 2018-05-09T07:16:21.000Z | 2019-12-01T16:41:50.000Z | backend/api/migrations/0013_merge_20210528_1016.py | alairice/doccano | 27eff5caec1ec6ad31f1e74bd1b73b1dd43228dc | [
"MIT"
] | 365 | 2018-07-31T13:49:05.000Z | 2019-11-29T11:25:17.000Z | backend/api/migrations/0013_merge_20210528_1016.py | alairice/doccano | 27eff5caec1ec6ad31f1e74bd1b73b1dd43228dc | [
"MIT"
] | 476 | 2018-08-17T06:43:57.000Z | 2019-12-01T09:47:08.000Z | # Generated by Django 3.2.3 on 2021-05-28 10:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("api", "0009_annotations_relations_20210421_1445"),
("api", "0012_auto_20210514_0654"),
]
operations = []
| 19.785714 | 60 | 0.67148 |
d371c53546a0ab1f6473844da9f5e3582e18d4ca | 7,144 | py | Python | lib/psd_count_contigs/psd_count_contigsClient.py | nlharris/psd_count_contigs | 2b1d1e534dad3c4181258ab97942c7adbc19a406 | [
"MIT"
] | null | null | null | lib/psd_count_contigs/psd_count_contigsClient.py | nlharris/psd_count_contigs | 2b1d1e534dad3c4181258ab97942c7adbc19a406 | [
"MIT"
] | null | null | null | lib/psd_count_contigs/psd_count_contigsClient.py | nlharris/psd_count_contigs | 2b1d1e534dad3c4181258ab97942c7adbc19a406 | [
"MIT"
] | null | null | null | ############################################################
#
# Autogenerated by the KBase type compiler -
# any changes made here will be overwritten
#
############################################################
try:
import json as _json
except ImportError:
import sys
sys.path.append('simplejson-2.3.3')
import simplejson as _json
import requests as _requests
import urlparse as _urlparse
import random as _random
import base64 as _base64
from ConfigParser import ConfigParser as _ConfigParser
import os as _os
_CT = 'content-type'
_AJ = 'application/json'
_URL_SCHEME = frozenset(['http', 'https'])
def _get_token(user_id, password,
auth_svc='https://nexus.api.globusonline.org/goauth/token?' +
'grant_type=client_credentials'):
# This is bandaid helper function until we get a full
# KBase python auth client released
auth = _base64.encodestring(user_id + ':' + password)
headers = {'Authorization': 'Basic ' + auth}
ret = _requests.get(auth_svc, headers=headers, allow_redirects=True)
status = ret.status_code
if status >= 200 and status <= 299:
tok = _json.loads(ret.text)
elif status == 403:
raise Exception('Authentication failed: Bad user_id/password ' +
'combination for user %s' % (user_id))
else:
raise Exception(ret.text)
return tok['access_token']
def _read_rcfile(file=_os.environ['HOME'] + '/.authrc'): # @ReservedAssignment
# Another bandaid to read in the ~/.authrc file if one is present
authdata = None
if _os.path.exists(file):
try:
with open(file) as authrc:
rawdata = _json.load(authrc)
# strip down whatever we read to only what is legit
authdata = {x: rawdata.get(x) for x in (
'user_id', 'token', 'client_secret', 'keyfile',
'keyfile_passphrase', 'password')}
except Exception, e:
print "Error while reading authrc file %s: %s" % (file, e)
return authdata
def _read_inifile(file=_os.environ.get( # @ReservedAssignment
'KB_DEPLOYMENT_CONFIG', _os.environ['HOME'] +
'/.kbase_config')):
# Another bandaid to read in the ~/.kbase_config file if one is present
authdata = None
if _os.path.exists(file):
try:
config = _ConfigParser()
config.read(file)
# strip down whatever we read to only what is legit
authdata = {x: config.get('authentication', x)
if config.has_option('authentication', x)
else None for x in ('user_id', 'token',
'client_secret', 'keyfile',
'keyfile_passphrase', 'password')}
except Exception, e:
print "Error while reading INI file %s: %s" % (file, e)
return authdata
class ServerError(Exception):
def __init__(self, name, code, message, data=None, error=None):
self.name = name
self.code = code
self.message = '' if message is None else message
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
class _JSONObjectEncoder(_json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
return _json.JSONEncoder.default(self, obj)
class psd_count_contigs(object):
def __init__(self, url=None, timeout=30 * 60, user_id=None,
password=None, token=None, ignore_authrc=False,
trust_all_ssl_certificates=False):
if url is None:
raise ValueError('A url is required')
scheme, _, _, _, _, _ = _urlparse.urlparse(url)
if scheme not in _URL_SCHEME:
raise ValueError(url + " isn't a valid http url")
self.url = url
self.timeout = int(timeout)
self._headers = dict()
self.trust_all_ssl_certificates = trust_all_ssl_certificates
# token overrides user_id and password
if token is not None:
self._headers['AUTHORIZATION'] = token
elif user_id is not None and password is not None:
self._headers['AUTHORIZATION'] = _get_token(user_id, password)
elif 'KB_AUTH_TOKEN' in _os.environ:
self._headers['AUTHORIZATION'] = _os.environ.get('KB_AUTH_TOKEN')
elif not ignore_authrc:
authdata = _read_inifile()
if authdata is None:
authdata = _read_rcfile()
if authdata is not None:
if authdata.get('token') is not None:
self._headers['AUTHORIZATION'] = authdata['token']
elif(authdata.get('user_id') is not None
and authdata.get('password') is not None):
self._headers['AUTHORIZATION'] = _get_token(
authdata['user_id'], authdata['password'])
if self.timeout < 1:
raise ValueError('Timeout value must be at least 1 second')
def _call(self, method, params, json_rpc_context = None):
arg_hash = {'method': method,
'params': params,
'version': '1.1',
'id': str(_random.random())[2:]
}
if json_rpc_context:
arg_hash['context'] = json_rpc_context
body = _json.dumps(arg_hash, cls=_JSONObjectEncoder)
ret = _requests.post(self.url, data=body, headers=self._headers,
timeout=self.timeout,
verify=not self.trust_all_ssl_certificates)
if ret.status_code == _requests.codes.server_error:
json_header = None
if _CT in ret.headers:
json_header = ret.headers[_CT]
if _CT in ret.headers and ret.headers[_CT] == _AJ:
err = _json.loads(ret.text)
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, ret.text)
else:
raise ServerError('Unknown', 0, ret.text)
if ret.status_code != _requests.codes.OK:
ret.raise_for_status()
ret.encoding = 'utf-8'
resp = _json.loads(ret.text)
if 'result' not in resp:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
return resp['result']
def count_contigs(self, workspace_name, contigset_id, json_rpc_context = None):
if json_rpc_context and type(json_rpc_context) is not dict:
raise ValueError('Method count_contigs: argument json_rpc_context is not type dict as required.')
resp = self._call('psd_count_contigs.count_contigs',
[workspace_name, contigset_id], json_rpc_context)
return resp[0]
| 39.688889 | 109 | 0.575168 |
4e874f83909e989a87b196065b3b273d1697620f | 5,088 | py | Python | 3DBeam/inputs/eigenvectors/compare_eigenvector.py | JoZimmer/Beam-Models | e701c0bae6e3035e7a07cc590da4a132b133dcff | [
"BSD-3-Clause"
] | null | null | null | 3DBeam/inputs/eigenvectors/compare_eigenvector.py | JoZimmer/Beam-Models | e701c0bae6e3035e7a07cc590da4a132b133dcff | [
"BSD-3-Clause"
] | null | null | null | 3DBeam/inputs/eigenvectors/compare_eigenvector.py | JoZimmer/Beam-Models | e701c0bae6e3035e7a07cc590da4a132b133dcff | [
"BSD-3-Clause"
] | 1 | 2022-01-05T17:32:32.000Z | 2022-01-05T17:32:32.000Z | import sys
from matplotlib.pylab import *
import numpy as np
# pgf_with_rc_fonts = {"pgf.texsystem": "pdflatex"}
# matplotlib.rcParams.update(pgf_with_rc_fonts)
# measurements from padova
z_measured = [0, 0.115, 0.23, 0.345, 0.460]
z_adapted = [0 * 180/.46, 0.115 * 180/.46, 0.23 * 180/.46, 0.345 * 180/.46, 0.460 * 180/.46]
phi_1_measured = [0, 0.228, 0.498, 0.782, 1.0] # weak axis
phi_2_measured = [0, 0.22, 0.535, 0.732, 1.0] # strong axis
#results from eigenvalue analysis
eigenvector_matrix = 'inputs\\EigenvectorMatrix_mod_jz.dat'#'EigenvectorMatrix_conditioned.dat'
eigenvalues = 'inputs\\Eigenvalues.dat'#'Eigenvalues_conditioned.dat'
z = np.loadtxt(eigenvector_matrix, skiprows = 1, delimiter = '),(', usecols = 0)
mode1_raw = np.loadtxt(eigenvector_matrix, skiprows = 1, delimiter = '),(', usecols = 1, dtype=str)
mode2_raw = np.loadtxt(eigenvector_matrix, skiprows = 1, delimiter = '),(', usecols = 2, dtype=str)
mode3_raw = np.loadtxt(eigenvector_matrix, skiprows = 1, delimiter = '),(', usecols = 3, dtype=str)
modi_raw = [mode1_raw, mode2_raw, mode3_raw]
#mode1, mode2, mode3 = np.zeros(len(z),6), np.zeros(len(z),6), np.zeros(len(z),6)
z = np.insert(z, 0, 0.0)
modi = [np.zeros((len(z),6)),np.zeros((len(z),6)),np.zeros((len(z),6))]
for i in range(3):
for z_i in range(len(z)):
if z_i == 0:
continue
cur = modi_raw[i][z_i-1].split(',')
modi[i][z_i] = np.asarray([float(val) for val in cur])
np.save('inputs\\z_coords_gid_45.npy',z)
np.save('inputs\\EigenvectorsGid.npy', np.asarray(modi))
dof_direction_map = ['rotX', 'rotY','rotZ', 'x', 'y','z']
for i in range(3):
fig, ax = plt.subplots(ncols=6, num='modes')
plt.title('mode '+str(i+1))
for dof in range(6):
dof_z = modi[i][:,dof]
ax[dof].plot(dof_z, z, label = 'dof ' + dof_direction_map[dof])
ax[dof].grid()
ax[dof].legend()
plt.show()
# phi_1 = np.loadtxt(eigenvector_matrix, skiprows = 1, delimiter = ',', usecols = 5)
# phi_2 = np.loadtxt(eigenvector_matrix, skiprows = 1, delimiter = ',', usecols = 10)
# phi_3 = np.loadtxt(eigenvector_matrix, skiprows = 1, delimiter = ',', usecols = 15)
# phi_4 = np.loadtxt(eigenvector_matrix, skiprows = 1, delimiter = ',', usecols = 22)
# phi_5 = np.loadtxt(eigenvector_matrix, skiprows = 1, delimiter = ',', usecols = 29)
# freq_1 = round_((np.sqrt(np.loadtxt(eigenvalues, delimiter = ',', usecols = 0)) / (2*math.pi)), decimals = 3) # doesn't work with default Eigenvalues.dat file
# freq_2 = round_((np.sqrt(np.loadtxt(eigenvalues, delimiter = ',', usecols = 1)) / (2*math.pi)), decimals = 3)
# freq_3 = round_((np.sqrt(np.loadtxt(eigenvalues, delimiter = ',', usecols = 2)) / (2*math.pi)), decimals = 3)
# freq_4 = round_((np.sqrt(np.loadtxt(eigenvalues, delimiter = ',', usecols = 3)) / (2*math.pi)), decimals = 3)
# freq_5 = round_((np.sqrt(np.loadtxt(eigenvalues, delimiter = ',', usecols = 4)) / (2*math.pi)), decimals = 3)
# for i in range(0,46):
# print(phi_1[i] / phi_1[-1])
# for i in range(0,46):
# print(phi_2[i] / phi_2[-1])
# for i in range(0,46):
# print(phi_3[i] / phi_3[-1])
# for i in range(0,46):
# print(phi_4[i] / phi_4[-1])
# for i in range(0,46):
# print(phi_5[i] / phi_5[-1])
#plot
# fig = plt.figure('eigenvector generic highrise', figsize=(5.85,3.5), frameon=True)
# plt.subplots_adjust(wspace=0.5)
# plt.subplot(1,8,(1,3))
# plt.plot(phi_1 / phi_1[-1], z, 'k', linewidth=1, label=r'$\Phi_1$')
# plt.plot(phi_4 / phi_4[-1], z, '--k', linewidth=1, label=r'$\Phi_4$')
# plt.plot(phi_1_measured, z_adapted, ':k', linewidth=1, label=r'$\Phi_{ref}$')
# plt.title('y-sway')
# plt.xlim(-1.1, 1.1)
# plt.grid(True)
# plt.yticks(np.arange(0, 200, 20))
# plt.xticks(ticks = [-1, -0.5, 0, 0.5, 1])
# plt.legend(loc="upper left")
# plt.subplot(1,8,(4,6))
# plt.plot(phi_2 / phi_2[-1], z, 'k', linewidth=1, label=r'$\Phi_2$')
# plt.plot(phi_5 / phi_5[-1], z, '--k', linewidth=1, label=r'$\Phi_5$')
# plt.plot(phi_2_measured, z_adapted, ':k', linewidth=1, label=r'$\Phi_{ref}$')
# plt.title('x-sway')
# plt.grid(True)
# plt.xticks(ticks = [-1, -0.5, 0, 0.5, 1])
# plt.yticks(np.arange(0, 200, 20))
# plt.xlim(-1.1, 1.1)
# plt.gca().axes.get_yaxis().set_ticklabels([])
# plt.legend(loc="upper left")
# plt.subplot(1,8,(7,8))
# plt.plot(phi_3 / phi_3[-1], z, 'k', linewidth=1, label=r'$\Phi_3$')
# plt.plot(np.array([0,0.5,1]), np.array([0,90,180]), '-.k', linewidth=1, label=r'$\Phi_{lin}$')
# plt.title('torsion')
# plt.grid(True)
# plt.xlim(-0.1, 1.1)
# plt.xticks(ticks = [0, 0.5, 1])
# plt.yticks(np.arange(0, 200, 20))
# plt.gca().axes.get_yaxis().set_ticklabels([])
# plt.legend(loc="upper left")
# fig.text(0.5, 0.01, r'normalized eigenform $\Phi_{normalized}$', ha='center')
# fig.text(0.04, 0.5, r'height $z$ [m]', va='center', rotation='vertical')
# plt.show()
# fig.savefig('/home/koenig/Desktop/Graphs/eigenvector_generic_highrise.pdf')
# fig.savefig('/home/koenig/Desktop/Graphs/eigenvector_generic_highrise.pgf')
# fig.savefig('/home/koenig/Desktop/Graphs/eigenvector_generic_highrise.svg') | 42.4 | 167 | 0.636989 |
c3f4cf959ad953c0dc67efd176ff8c4ec187ed12 | 97 | py | Python | monitor/__init__.py | GdoongMathew/Monitor | 1affeea0ca4f61d84fd8f0b8838a847da16854c2 | [
"MIT"
] | null | null | null | monitor/__init__.py | GdoongMathew/Monitor | 1affeea0ca4f61d84fd8f0b8838a847da16854c2 | [
"MIT"
] | null | null | null | monitor/__init__.py | GdoongMathew/Monitor | 1affeea0ca4f61d84fd8f0b8838a847da16854c2 | [
"MIT"
] | null | null | null | from .reader import NVGPUReader
from .reader import CPUReader
from .monitor import BasicMonitor
| 19.4 | 33 | 0.835052 |
930a9bde08163fe4edf860e4527a4c2097c28915 | 5,029 | py | Python | floppymusic-web.py | MisterX2000/floppymusic-web | 6fc86757211203fe203c27d64760310766fa7af0 | [
"MIT"
] | null | null | null | floppymusic-web.py | MisterX2000/floppymusic-web | 6fc86757211203fe203c27d64760310766fa7af0 | [
"MIT"
] | 4 | 2017-05-24T05:15:36.000Z | 2017-05-24T21:45:59.000Z | floppymusic-web.py | MisterX2000/floppymusic-web | 6fc86757211203fe203c27d64760310766fa7af0 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request, redirect, url_for, flash, g
from flask_uploads import UploadSet, configure_uploads, UploadNotAllowed
import argparse
import subprocess
import sqlite3
import os
parser = argparse.ArgumentParser(description="IMPORTANT! To ensure floppymusics functionality this script must be run as root.")
parser.add_argument("--port", help="set Flask web port", default=5000, type=int)
parser.add_argument("--host", help="set Flask ip binding (host)", default="0.0.0.0")
parser.add_argument("-debug", help="enable Flask debug option", action='store_true', default=False)
args = parser.parse_args()
app = Flask(__name__)
app.secret_key = "super secret key"
DATABASE = "database.sqlite"
db = sqlite3.connect(DATABASE)
c = db.cursor()
c.execute("""CREATE TABLE IF NOT EXISTS songs(id INTEGER PRIMARY KEY, name TEXT, dropfac REAL)""")
db.commit()
db.close()
midis = UploadSet("MIDIS", "mid")
app.config["UPLOADED_MIDIS_DEST"] = "uploads/"
configure_uploads(app, midis)
playing = None
proc = None
def get_db():
# Opens a new database connection if there is none yet for the current application context.
if not hasattr(g, "sqlite_db"):
g.sqlite_db = sqlite3.connect(DATABASE)
return g.sqlite_db
def query_db(query, args=(), one=False):
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
@app.teardown_appcontext
def close_db(error):
# Closes the database again at the end of the request."""
if hasattr(g, "sqlite_db"):
g.sqlite_db.close()
@app.route("/")
def index():
return render_template("index.html", playing=playing, songs=query_db("""SELECT * FROM songs"""))
@app.route("/add", methods=["GET", "POST"])
def add():
if request.method == "POST" and "midi" in request.files:
try:
if request.files['midi'].filename == '':
flash("No file selected", "alert-warning")
return render_template("add.html")
filename = midis.save(request.files["midi"])
except UploadNotAllowed:
flash("Upload not allowed (MIDI Files only)", "alert-danger")
return render_template("add.html")
dropfactor = request.form["drop-factor"]
get_db().execute("""INSERT INTO songs (name, dropfac) VALUES(?, ?)""", [str(filename), float(dropfactor)])
get_db().commit()
flash(str(filename) + " uploaded", "alert-success")
return render_template("add.html")
@app.route("/stop")
def stop():
global playing
global proc
playing = None
if proc is None:
flash("Process not started", "alert-danger")
else:
if proc.poll() is None:
proc.terminate()
flash("Process stopped", "alert-success")
else:
flash("Process already stopped", "alert-success")
return redirect(url_for("index"))
@app.route("/play/<song_id>")
def play(song_id):
global playing
global proc
if os.path.isfile(app.config["UPLOADED_MIDIS_DEST"] + query_db("""SELECT name FROM songs WHERE id=?""", (song_id,))[0][0]):
playing = query_db("""SELECT * FROM songs WHERE id=?""", (song_id,))[0]
else:
flash("File not found", "alert-danger")
return redirect(url_for("index"))
try:
proc = subprocess.Popen(["./floppymusic", "-d " + str(playing[2]), app.config["UPLOADED_MIDIS_DEST"] + str(playing[1])])
except FileNotFoundError:
flash(flash("Floppymusic file not found", "alert-danger"))
return redirect(url_for("index"))
return redirect(url_for("index"))
@app.route("/edit/<song_id>", methods=["GET", "POST"])
def edit(song_id):
if request.method == "POST":
name = request.form["file-name"]
dropfac = request.form["drop-factor"]
try:
os.rename(app.config["UPLOADED_MIDIS_DEST"] + query_db("""SELECT name FROM songs WHERE id=?""", (song_id,))[0][0],
app.config["UPLOADED_MIDIS_DEST"] + name + ".mid")
except FileNotFoundError:
flash("File not found", "alert-danger")
return redirect(url_for('index'))
get_db().execute("""UPDATE songs SET name=?,dropfac=? WHERE id=?""", [str(name) + ".mid", float(dropfac), int(song_id)])
get_db().commit()
flash("Edited {}. {}.mid ({})".format(song_id, name, dropfac), "alert-success")
return redirect(url_for('index'))
return render_template('edit.html', song=query_db("""SELECT * FROM songs WHERE id=?""", (song_id,)))
@app.route("/delete/<song_id>")
def delete(song_id):
try:
os.remove("uploads/" + query_db("""SELECT name FROM songs WHERE id=?""", (song_id,))[0][0])
except FileNotFoundError:
flash("File not found", "alert-danger")
get_db().execute("""DELETE FROM songs WHERE id=? """, (song_id,))
get_db().commit()
return redirect(url_for("index"))
if __name__ == "__main__":
app.run(host=args.host, port=args.port, debug=args.debug)
| 34.682759 | 128 | 0.641479 |
1fb9c9b268ba8250c3a2c02829f29af9031513dc | 19,987 | py | Python | src/cogent3/util/unit_test.py | Lmaster20/cogent3 | 1d5ff1ba2b3d42736f8f04de8507b5cd585b4fe9 | [
"BSD-3-Clause"
] | null | null | null | src/cogent3/util/unit_test.py | Lmaster20/cogent3 | 1d5ff1ba2b3d42736f8f04de8507b5cd585b4fe9 | [
"BSD-3-Clause"
] | null | null | null | src/cogent3/util/unit_test.py | Lmaster20/cogent3 | 1d5ff1ba2b3d42736f8f04de8507b5cd585b4fe9 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""Extension of the built-in unittest framework for floating-point comparisons.
Specific Extensions:
assertFloatEqual, assertFloatEqualAbs, and assertFloatEqualRel give fine-
grained control over how floating point numbers (or lists thereof) are tested
for equality.
assertContains and assertNotContains give more helpful error
messages when testing whether an observed item is present or absent in a set
of possiblities. Ditto assertGreaterThan, assertLessThan, and assertIsProb.
assertSameItems and assertEqualItems test the items in a list
for pairwise identity and equality respectively (i.e. the observed and
expected values must have the same number of each item, though the order can
differ).
assertSimilarMeans and assertSimilarFreqs allow you to test stochastic results
by setting an explicit P-value and checking that the result is not improbable
given the expected P-value. Please use these instead of guessing confidence
intervals! The major advantage is that you can reset the P-value gloabally over
the whole test suite, so that rare failures don't occur every time.
"""
from unittest import TestCase as orig_TestCase
from unittest import TestSuite, findTestCases, main
import numpy
from numpy import (
array,
asarray,
isfinite,
logical_and,
logical_or,
ravel,
testing,
zeros,
)
from cogent3.maths.stats.test import G_ind, t_two_sample
from cogent3.util.misc import recursive_flatten
__author__ = "Rob Knight"
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__credits__ = [
"Rob Knight",
"Peter Maxwell",
"Sandra Smit",
"Zongzhi Liu",
"Micah Hamady",
"Daniel McDonald",
]
__license__ = "BSD-3"
__version__ = "2020.7.2a"
__maintainer__ = "Gavin Huttley"
__email__ = "Gavin.Huttley@anu.edu.au"
__status__ = "Production"
# SUPPORT2425
class FakeRandom(object):
"""Drop-in substitute for random.random that provides items from list."""
def __init__(self, data, circular=False):
"""Returns new FakeRandom object, using list of items in data.
circular: if True (default is False), wraps the list around. Otherwise,
raises IndexError when we run off the end of the list.
WARNING: data must always be iterable, even if it's a single item.
"""
self._data = data
self._ptr = -1
self._circular = circular
def __call__(self, *args, **kwargs):
"""Returns next item from the list in self._data.
Raises IndexError when we run out of data.
"""
self._ptr += 1
# wrap around if circular
if self._circular:
if self._ptr >= len(self._data):
self._ptr = 0
return self._data[self._ptr]
class TestCase(orig_TestCase):
"""Adds some additional utility methods to unittest.TestCase.
Notably, adds facilities for dealing with floating point numbers,
and some common templates for replicated tests.
BEWARE: Do not start any method with 'test' unless you want it to actually
run as a test suite in every instance!
"""
_suite_pvalue = None # see TestCase._set_suite_pvalue()
def _get_values_from_matching_dicts(self, d1, d2):
"""Gets corresponding values from matching dicts"""
if set(d1) != set(d2):
return None
# might not be in same order
return list(d1.values()), [d2[k] for k in d1]
def errorCheck(self, call, known_errors):
"""Applies function to (data, error) tuples, checking for error"""
for (data, error) in known_errors:
self.assertRaises(error, call, data)
def valueCheck(self, call, known_values, arg_prefix="", eps=None):
"""Applies function to (data, expected) tuples, treating data as args"""
for (data, expected) in known_values:
observed = eval("call(" + arg_prefix + "data)")
try:
allowed_diff = float(eps)
except TypeError:
self.assertEqual(observed, expected)
else:
self.assertFloatEqual(observed, expected, allowed_diff)
def assertFloatEqualRel(self, obs, exp, eps=1e-6):
"""Tests whether two floating point numbers/arrays are approx. equal.
Checks whether the distance is within epsilon relative to the value
of the sum of observed and expected. Use this method when you expect
the difference to be small relative to the magnitudes of the observed
and expected values.
Note: for arbitrary objects, need to compare the specific attribute
that's numeric, not the whole object, using this method.
"""
# do array check first
# note that we can't use array ops to combine, because we need to check
# at each element whether the expected is zero to do the test to avoid
# floating point error.
# WARNING: numpy iterates over objects that are not regular Python
# floats/ints, so need to explicitly catch scalar values and prevent
# cast to array if we want the exact object to print out correctly.
is_array = False
if hasattr(obs, "keys") and hasattr(exp, "keys"): # both dicts?
result = self._get_values_from_matching_dicts(obs, exp)
if result:
obs, exp = result
else:
try:
iter(obs)
iter(exp)
except TypeError:
obs = [obs]
exp = [exp]
else:
try:
arr_obs = array(obs)
arr_exp = array(exp)
arr_diff = arr_obs - arr_exp
if arr_obs.shape != arr_exp.shape:
self.fail(
"Wrong shape: Got %s, but expected %s"
% (repr(obs), repr(exp))
)
obs = arr_obs.ravel()
exp = arr_exp.ravel()
is_array = True
except (TypeError, ValueError):
pass
# shape mismatch can still get by...
# explict cast is to work around bug in certain versions of numpy
# installed version on osx 10.5
if asarray(obs, object).shape != asarray(exp, object).shape:
self.fail("Wrong shape: Got %s, but expected %s" % (obs, exp))
for observed, expected in zip(obs, exp):
# try the cheap comparison first
if observed == expected:
continue
try:
sum = float(observed + expected)
diff = float(observed - expected)
if sum == 0:
if is_array:
self.assertFalse(
abs(diff) > abs(eps),
"Got %s, but expected %s (diff was %s)"
% (repr(arr_obs), repr(arr_exp), repr(arr_diff)),
)
else:
self.assertFalse(
abs(diff) > abs(eps),
"Got %s, but expected %s (diff was %s)"
% (repr(observed), repr(expected), repr(diff)),
)
else:
if is_array:
self.assertFalse(
abs(diff / sum) > abs(eps),
"Got %s, but expected %s (diff was %s)"
% (repr(arr_obs), repr(arr_exp), repr(arr_diff)),
)
else:
self.assertFalse(
abs(diff / sum) > abs(eps),
"Got %s, but expected %s (diff was %s)"
% (repr(observed), repr(expected), repr(diff)),
)
except (TypeError, ValueError, AttributeError, NotImplementedError):
self.fail("Got %s, but expected %s" % (repr(observed), repr(expected)))
def assertFloatEqualAbs(self, obs, exp, eps=1e-6):
"""
Tests whether two floating point numbers are approximately equal.
Checks whether the absolute value of (a - b) is within epsilon. Use
this method when you expect that one of the values should be very
small, and the other should be zero.
"""
# do array check first
# note that we can't use array ops to combine, because we need to check
# at each element whether the expected is zero to do the test to avoid
# floating point error.
if hasattr(obs, "keys") and hasattr(exp, "keys"): # both dicts?
result = self._get_values_from_matching_dicts(obs, exp)
if result:
obs, exp = result
else:
try:
iter(obs)
iter(exp)
except TypeError:
obs = [obs]
exp = [exp]
else:
try:
arr_obs = array(obs)
arr_exp = array(exp)
if arr_obs.shape != arr_exp.shape:
self.fail(
"Wrong shape: Got %s, but expected %s"
% (repr(obs), repr(exp))
)
diff = arr_obs - arr_exp
self.assertFalse(
abs(diff).max() > eps,
"Got %s, but expected %s (diff was %s)"
% (repr(obs), repr(exp), repr(diff)),
)
return
except (TypeError, ValueError):
pass
# only get here if array comparison failed
for observed, expected in zip(obs, exp):
# cheap comparison first
if observed == expected:
continue
try:
diff = observed - expected
self.assertFalse(
abs(diff) > abs(eps),
"Got %s, but expected %s (diff was %s)"
% (repr(observed), repr(expected), repr(diff)),
)
except (TypeError, ValueError, AttributeError, NotImplementedError):
self.fail("Got %s, but expected %s" % (repr(observed), repr(expected)))
def assertFloatEqual(self, obs, exp, eps=1e-6, rel_eps=None, abs_eps=None):
"""Tests whether two floating point numbers are approximately equal.
If one of the arguments is zero, tests the absolute magnitude of the
difference; otherwise, tests the relative magnitude.
Use this method as a reasonable default.
"""
obs = numpy.asarray(obs, dtype="O")
exp = numpy.asarray(exp, dtype="O")
obs = numpy.ravel(obs)
exp = numpy.ravel(exp)
if obs.shape != exp.shape:
self.fail("Shape mismatch. Got, %s but expected %s" % (obs, exp))
for observed, expected in zip(obs, exp):
if self._is_equal(observed, expected):
continue
try:
rel_eps = rel_eps or eps
abs_eps = abs_eps or eps
if (observed == 0) or (expected == 0):
self.assertFloatEqualAbs(observed, expected, abs_eps)
else:
self.assertFloatEqualRel(observed, expected, rel_eps)
except (TypeError, ValueError, AttributeError, NotImplementedError):
self.fail("Got %s, but expected %s" % (repr(observed), repr(expected)))
def _is_equal(self, observed, expected):
"""Returns True if observed and expected are equal, False otherwise."""
# errors to catch: TypeError when obs is None
tolist_errors = (AttributeError, ValueError, TypeError)
try:
obs = observed.tolist()
except tolist_errors:
obs = observed
try:
exp = expected.tolist()
except tolist_errors:
exp = expected
return obs == exp
def failUnlessEqual(self, observed, expected, msg=None):
"""Fail if the two objects are unequal as determined by !=
Overridden to make error message enforce order of observed, expected.
Use numpy.testing.assert_equal if ValueError, TypeError raised.
"""
try:
if not self._is_equal(observed, expected):
raise self.failureException(
msg or "Got %s, but expected %s" % (repr(observed), repr(expected))
)
except (ValueError, TypeError) as e:
# The truth value of an array with more than one element is
# ambiguous. Use a.any() or a.all()
# descriptor 'tolist' of 'numpy.generic' object needs an argument
testing.assert_equal(observed, expected)
def failIfEqual(self, observed, expected, msg=None):
"""Fail if the two objects are equal as determined by =="""
try:
self.assertEqual(observed, expected)
except self.failureException:
pass
else:
raise self.failureException(
msg
or "Observed %s and expected %s: shouldn't test equal"
% (repr(observed), repr(expected))
)
# following needed to get our version instead of unittest's
assertEqual = assertEquals = failUnlessEqual
assertNotEqual = assertNotEquals = failIfEqual
def assertEqualItems(self, observed, expected, msg=None):
"""Fail if the two items contain unequal elements"""
obs_items = list(observed)
exp_items = list(expected)
if len(obs_items) != len(exp_items):
raise self.failureException(
msg
or "Observed and expected are different lengths: %s and %s"
% (len(obs_items), len(exp_items))
)
obs_items.sort()
exp_items.sort()
for index, (obs, exp) in enumerate(zip(obs_items, exp_items)):
if obs != exp:
raise self.failureException(
msg
or "Observed %s and expected %s at sorted index %s"
% (obs, exp, index)
)
def assertSameItems(self, observed, expected, msg=None):
"""Fail if the two items contain non-identical elements"""
obs_items = list(observed)
exp_items = list(expected)
if len(obs_items) != len(exp_items):
raise self.failureException(
msg
or "Observed and expected are different lengths: %s and %s"
% (len(obs_items), len(exp_items))
)
obs_ids = [(id(i), i) for i in obs_items]
exp_ids = [(id(i), i) for i in exp_items]
obs_ids.sort()
exp_ids.sort()
for index, (obs, exp) in enumerate(zip(obs_ids, exp_ids)):
o_id, o = obs
e_id, e = exp
if o_id != e_id: # i.e. the ids are different
raise self.failureException(
msg
or "Observed %s <%s> and expected %s <%s> at sorted index %s"
% (o, o_id, e, e_id, index)
)
def assertContains(self, observed, item, msg=None):
"""Fail if item not in observed"""
try:
if item in observed:
return
except (TypeError, ValueError):
pass
raise self.failureException(
msg or "Item %s not found in %s" % (repr(item), repr(observed))
)
def assertNotContains(self, observed, item, msg=None):
"""Fail if item in observed"""
try:
if item not in observed:
return
except (TypeError, ValueError):
return
raise self.failureException(
msg or "Item %s should not have been in %s" % (repr(item), repr(observed))
)
def assertGreaterThan(self, observed, value, msg=None):
"""Fail if observed is <= value"""
try:
if value is None or observed is None:
raise ValueError
if (asarray(observed) > value).all():
return
except:
pass
raise self.failureException(
msg or "Observed %s has elements <= %s" % (repr(observed), repr(value))
)
def assertLessThan(self, observed, value, msg=None):
"""Fail if observed is >= value"""
try:
if value is None or observed is None:
raise ValueError
if (asarray(observed) < value).all():
return
except:
pass
raise self.failureException(
msg or "Observed %s has elements >= %s" % (repr(observed), repr(value))
)
def assertIsProb(self, observed, msg=None):
"""Fail is observed is not between 0.0 and 1.0"""
try:
if observed is None:
raise ValueError
if (asarray(observed) >= 0.0).all() and (asarray(observed) <= 1.0).all():
return
except:
pass
raise self.failureException(
msg or "Observed %s has elements that are not probs" % (repr(observed))
)
def _set_suite_pvalue(self, pvalue):
"""Sets the test suite pvalue to be used in similarity tests
This value is by default None. The pvalue used in this case is
specified in the test module itself. The purpose of this method is to
set the pvalue to be used when running a massive test suite
"""
self._suite_pvalue = pvalue
def assertSimilarMeans(self, observed, expected, pvalue=0.01, msg=None):
"""Fail if observed p is lower than pvalue"""
if self._suite_pvalue:
pvalue = self._suite_pvalue
observed, expected = asarray(observed), asarray(expected)
t, p = t_two_sample(observed, expected)
# handle case where all elements were the same
if p is None or not isfinite(p):
if not observed.shape:
observed = observed.reshape((1,))
if not expected.shape:
expected = expected.reshape((1,))
if observed[0] == expected[0]:
return
elif p > pvalue:
return
else:
raise self.failureException(
msg or "p-value %s, t-test p %s" % (repr(pvalue), repr(p))
)
def assertSimilarFreqs(self, observed, expected, pvalue=0.01, msg=None):
"""Fail if observed p is lower than pvalue"""
if self._suite_pvalue:
pvalue = self._suite_pvalue
obs_ravel = ravel(asarray(observed))
exp_ravel = ravel(asarray(expected))
m = zeros((2, len(obs_ravel)))
m[0, :] = obs_ravel
m[1, :] = exp_ravel
G, p = G_ind(m)
if p > pvalue:
return
else:
raise self.failureException(
msg or "p-value %s, G-test p %s" % (repr(pvalue), repr(p))
)
def assertSameObj(self, observed, expected, msg=None):
"""Fail if 'observed is not expected'"""
try:
if observed is expected:
return
except:
pass
raise self.failureException(
msg
or "Observed %s is not the same as expected %s"
% (repr(observed), repr(expected))
)
def assertNotSameObj(self, observed, expected, msg=None):
"""Fail if 'observed is expected'"""
try:
if observed is not expected:
return
except:
pass
raise self.failureException(
msg
or "Observed %s is the same as expected %s"
% (repr(observed), repr(expected))
)
| 37.081633 | 87 | 0.553009 |
de3719bd3024869b259b9109ca2c04b0a330dcf9 | 180 | py | Python | config/configuration.py | JobQiu/bi-att-flow | ef5058ca5b7e08bdae930340295786a9c047a664 | [
"Apache-2.0"
] | null | null | null | config/configuration.py | JobQiu/bi-att-flow | ef5058ca5b7e08bdae930340295786a9c047a664 | [
"Apache-2.0"
] | null | null | null | config/configuration.py | JobQiu/bi-att-flow | ef5058ca5b7e08bdae930340295786a9c047a664 | [
"Apache-2.0"
] | null | null | null | class Config():
def __init__(self):
self.squadLocation = "/content/bi-att-flow/data/squad"
self.gloveLocation = "/content/bi-att-flow/data/glove"
pass
| 25.714286 | 62 | 0.633333 |
7aaf03e55aa5ee812d4b7b122436fc8ee5950860 | 1,788 | py | Python | banti/conncomp.py | aimsravi/TELUGU-OCR | 6b15e37fd0ad93e5e2ac90b8822b06e64230f05f | [
"Apache-2.0"
] | 41 | 2015-12-19T15:55:35.000Z | 2021-08-12T22:29:44.000Z | banti/conncomp.py | aimsravi/TELUGU-OCR | 6b15e37fd0ad93e5e2ac90b8822b06e64230f05f | [
"Apache-2.0"
] | 9 | 2015-12-05T04:20:55.000Z | 2022-01-19T21:46:53.000Z | banti/conncomp.py | aimsravi/TELUGU-OCR | 6b15e37fd0ad93e5e2ac90b8822b06e64230f05f | [
"Apache-2.0"
] | 16 | 2016-01-25T11:45:49.000Z | 2021-11-10T06:53:32.000Z | import scipy.ndimage.measurements as meas
from .helpers import arr_to_ascii_art
class Component():
def __init__(self, big_img, slice, index):
self.index = index
self.pix = big_img[slice] == index
self.slice = slice
self.y, self.y2 = slice[0].start, slice[0].stop
self.ht = self.y2 - self.y
self.x, self.x2 = slice[1].start, slice[1].stop
self.wd = self.x2 - self.x
def __lt__(self, other):
overlap = max(0, min(self.x2 - other.x, other.x2 - self.x) - 1)
if overlap / min(self.wd, other.wd) < .5:
return self.x < other.x
else:
return self.y + self.ht/2 < other.y + other.ht/2
def __contains__(self, item):
if isinstance(item, Component):
return item.x >= self.x and item.x2 <= self.x2 and \
item.y >= self.y and item.y2 <= self.y2
else:
raise NotImplementedError("Type of item is unknown: " + type(item))
def has_center_of(self, other):
return self.x <= (other.x + other.x2)/2 <= self.x2 and \
self.y <= (other.y + other.y2)/2 <= self.y2
def small_str(self):
return "Index:{} Range x: {}-{}({}) y:{}-{}({})\n".format(self.index,
self.x, self.x2, self.wd,
self.y, self.y2, self.ht)
def __str__(self):
return self.small_str() + "\n" + arr_to_ascii_art(self.pix)
def get_conn_comp(imgarr, sort=True):
labelled_image, n_components = meas.label(imgarr)
slices = meas.find_objects(labelled_image)
components = []
for islice, slaiss in enumerate(slices):
components.append(Component(labelled_image, slaiss, islice+1))
if sort:
components = sorted(components)
return components, labelled_image | 32.509091 | 79 | 0.587808 |
921d0f26f502b0d59c0ba44e40c95ef2b273c492 | 1,960 | py | Python | aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/CreateFlowProjectRequest.py | bricklayer-Liu/aliyun-openapi-python-sdk | 20da2554de22679fc7c5462c483663e4d79512aa | [
"Apache-2.0"
] | 1 | 2021-03-08T02:59:17.000Z | 2021-03-08T02:59:17.000Z | aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/CreateFlowProjectRequest.py | bricklayer-Liu/aliyun-openapi-python-sdk | 20da2554de22679fc7c5462c483663e4d79512aa | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/CreateFlowProjectRequest.py | bricklayer-Liu/aliyun-openapi-python-sdk | 20da2554de22679fc7c5462c483663e4d79512aa | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkemr.endpoint import endpoint_data
class CreateFlowProjectRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'CreateFlowProject')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Description(self):
return self.get_query_params().get('Description')
def set_Description(self,Description):
self.add_query_param('Description',Description)
def get_ProductType(self):
return self.get_query_params().get('ProductType')
def set_ProductType(self,ProductType):
self.add_query_param('ProductType',ProductType)
def get_ResourceGroupId(self):
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self,ResourceGroupId):
self.add_query_param('ResourceGroupId',ResourceGroupId)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name) | 35 | 74 | 0.766837 |
aa5f3e6e9ad5a9b303ad9726622917e8ac2ecbe4 | 354 | py | Python | app/__init__.py | tonyin/fluffmo | 335b0c072f536b9cd4ed4b75d521ef2403f2000c | [
"Apache-2.0"
] | null | null | null | app/__init__.py | tonyin/fluffmo | 335b0c072f536b9cd4ed4b75d521ef2403f2000c | [
"Apache-2.0"
] | null | null | null | app/__init__.py | tonyin/fluffmo | 335b0c072f536b9cd4ed4b75d521ef2403f2000c | [
"Apache-2.0"
] | null | null | null | from flask import Flask
from werkzeug.contrib.fixers import ProxyFix
app = Flask(
__name__,
instance_relative_config=True,
static_url_path='/fluffymomo/static'
)
# Load default config and then instance config
app.config.from_object('config')
app.config.from_pyfile('config.py')
app.wsgi_app = ProxyFix(app.wsgi_app)
from app import views
| 20.823529 | 46 | 0.774011 |
80251871562022e2440b4cdb3e56f83c037d5175 | 1,379 | py | Python | neofaker/util.py | spacelis/neofaker | 72e2f687280d431e864c55531f29ef3cf2edcdde | [
"MIT"
] | null | null | null | neofaker/util.py | spacelis/neofaker | 72e2f687280d431e864c55531f29ef3cf2edcdde | [
"MIT"
] | null | null | null | neofaker/util.py | spacelis/neofaker | 72e2f687280d431e864c55531f29ef3cf2edcdde | [
"MIT"
] | null | null | null | """
File: util.py
Author: Wen Li Email: spacelis@gmail.com
Github: http://github.com/spacelis
Description: A set of utility functions
"""
from functools import reduce
from csv import DictWriter
def rekey(dct, kname, vname, extra=None):
""" Generate dicts by keying the key-value pair
:dct: a dict
:kname: the name for the key
:vname: the name for the value
:returns: a set of dicts, each item in dct is repack as a dict
"""
for k, v in dct.items():
yield mk_dict({kname: k, vname: v}, extra if extra else {})
def to_csv(fobj, items):
""" Return random items in CSV
:fobj: A file object
:items: A generator of dict items
:returns: None
"""
first = next(items)
wr = DictWriter(fobj, list(first.keys()))
wr.writeheader()
wr.writerow(first)
for item in items:
wr.writerow(item)
def mk_dict(*args):
"""Make a new dict from a series of dict
:*args: dicts
:returns: a combined dicts
"""
return dict(reduce(lambda x, y: x + y, [list(d.items()) for d in args], []))
def number(lst, prefix, start=0):
""" Number the items in the lst
:lst: contains items to number
:returns: a dict with item as the key and its number as the value
"""
return {item: '{0}{1}'.format(prefix, itemId)
for itemId, item in enumerate(sorted(lst), start=start)}
| 23.372881 | 80 | 0.632342 |
d3a61627281349b383baf30802ae2b8810e5fa4f | 11,297 | py | Python | official/benchmark/models/shakespeare/shakespeare_main.py | 873040/Abhishek | 2ddd716e66bc5cc6e6f0787508dd07da0e02e75a | [
"Apache-2.0"
] | 4 | 2020-03-13T14:01:32.000Z | 2021-05-31T17:17:32.000Z | official/benchmark/models/shakespeare/shakespeare_main.py | 873040/Abhishek | 2ddd716e66bc5cc6e6f0787508dd07da0e02e75a | [
"Apache-2.0"
] | 7 | 2020-09-26T01:03:33.000Z | 2022-02-10T01:30:14.000Z | official/benchmark/models/shakespeare/shakespeare_main.py | 873040/Abhishek | 2ddd716e66bc5cc6e6f0787508dd07da0e02e75a | [
"Apache-2.0"
] | 3 | 2020-08-23T21:15:41.000Z | 2021-11-08T10:02:17.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Runs a character LSTM model trained on Shakespeare."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
# pylint: disable=wrong-import-order
from absl import app
from absl import flags
import numpy as np
import tensorflow as tf
# pylint: enable=wrong-import-order
from official.utils.flags import core as flags_core
from official.utils.misc import distribution_utils
from official.utils.misc import keras_utils
EMBEDDING_DIM = 256
RNN_UNITS = 1024
SEQ_LENGTH = 100
# Calculated by running batch_size=1
BATCHES_PER_EPOCH = 11043
def define_flags():
"""Define the flags for the Shakespeare character LSTM."""
flags_core.define_base(data_dir=False,
clean=False,
train_epochs=True,
epochs_between_evals=False,
stop_threshold=False,
num_gpu=True,
export_dir=False,
run_eagerly=True,
distribution_strategy=True)
flags_core.define_performance(num_parallel_calls=False,
inter_op=False,
intra_op=False,
synthetic_data=False,
max_train_steps=False,
dtype=True,
loss_scale=True,
enable_xla=True)
flags_core.set_defaults(train_epochs=43,
batch_size=64)
flags.DEFINE_boolean(name='enable_eager', default=True, help='Enable eager?')
flags.DEFINE_boolean(
name='train', default=True,
help='If true trains the model.')
flags.DEFINE_string(
name='predict_context', default=None,
help='If set, makes a prediction with the given context.')
flags.DEFINE_integer(
name='predict_length', default=1000,
help='Length of the predicted text including the context.')
flags.DEFINE_integer(name='train_steps', default=None,
help='Overrides train_steps per epoch if not None.')
flags.DEFINE_integer(
name='log_steps', default=100,
help='For every log_steps, we log the timing information such as '
'examples per second.')
flags.DEFINE_string(
name='training_data', default=None,
help='Path to file containing the training data.')
flags.DEFINE_boolean(name='cudnn', default=True, help='Use CuDNN LSTM.')
def get_dataset(path_to_file, batch_size=None, seq_length=SEQ_LENGTH):
"""Creates a dataset from a given text file.
Args:
path_to_file: The path to the training data.
batch_size: Batch size to use.
seq_length: The length of the LSTM sequence.
Returns:
A tuple, consisting of the Dataset and the class to character mapping
and character to class mapping.
"""
with tf.io.gfile.GFile(path_to_file, 'rb') as train_data:
text = train_data.read().decode(encoding='utf-8')
# Create vocab
vocab = sorted(set(text))
char2idx = {u: i for i, u in enumerate(vocab)}
idx2char = np.array(vocab)
# Split text into sequence length + 1 chucks to create examples
text_as_int = np.array([char2idx[c] for c in text])
char_dataset = tf.data.Dataset.from_tensor_slices(text_as_int)
sequences = char_dataset.batch(seq_length+1, drop_remainder=True)
def split_input_target(chunk):
input_text = chunk[:-1]
target_text = chunk[1:]
return input_text, tf.one_hot(target_text, len(vocab))
dataset = sequences.map(split_input_target)
dataset = dataset.shuffle(10000).repeat()
dataset = dataset.batch(batch_size, drop_remainder=True)
return dataset, idx2char, char2idx
def build_model(vocab_size,
embedding_dim=EMBEDDING_DIM,
rnn_units=RNN_UNITS,
batch_size=None,
stateful=False,
use_cudnn=True):
"""Builds the Shakespeare model.
Args:
vocab_size: The number of character classes in the input.
embedding_dim: The dimension of the embedding space for each class.
rnn_units: The number of RNN units in the layer.
batch_size: When predicting, the batch size of the predictions.
stateful: If true, the LSTM is stateful.
Returns:
A Keras Model.
"""
assert keras_utils.is_v2_0()
LSTM = functools.partial(tf.keras.layers.LSTM, implementation=2)
# By indirecting the activation through a lambda layer, the logic to dispatch
# to CuDNN in V2 doesn't trigger and we force the LSTM to run in non-CuDNN
# mode.
lstm_activation = ('tanh' if use_cudnn else
lambda x: tf.math.tanh(x))
batch_shape = [batch_size if stateful else None, None]
return tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim,
batch_input_shape=batch_shape),
LSTM(rnn_units,
activation=lstm_activation,
return_sequences=True,
stateful=stateful,
recurrent_initializer='glorot_uniform'),
tf.keras.layers.Dense(vocab_size),
tf.keras.layers.Softmax(dtype=tf.float32)])
def train_model(flags_obj, dataset, vocab_size, strategy, checkpoint_dir=None):
"""Trains a Shakespeare model.
Args:
flags_obj: An object containing parsed flag values.s
dataset: the training data set.
vocab_size: the number of unique character classes.
strategy: distribution strategy to use.
checkpoint_dir: if not None, the directory in which to make checkpoints.
Returns:
The training history and callbacks.
"""
if flags_obj.train_steps:
train_steps = flags_obj.train_steps
else:
train_steps = BATCHES_PER_EPOCH // flags_obj.batch_size
strategy_scope = distribution_utils.get_strategy_scope(strategy)
with strategy_scope:
model = build_model(vocab_size=vocab_size, batch_size=flags_obj.batch_size,
use_cudnn=flags_obj.cudnn)
# When keras_use_ctl is False, Model.fit() automatically applies
# loss scaling so we don't need to create a LossScaleOptimizer.
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.CategoricalCrossentropy(),
metrics=[tf.keras.metrics.Recall(top_k=1, name='RecallAt1'),
tf.keras.metrics.Recall(top_k=5, name='RecallAt5')],
run_eagerly=flags_obj.run_eagerly)
callbacks = []
if checkpoint_dir:
checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt_{epoch}')
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_prefix,
save_weights_only=True)
callbacks.append(checkpoint_callback)
time_callback = keras_utils.TimeHistory(flags_obj.batch_size,
flags_obj.log_steps)
callbacks.append(time_callback)
history = model.fit(dataset,
epochs=flags_obj.train_epochs,
steps_per_epoch=train_steps,
callbacks=callbacks,
verbose=2)
return history, callbacks
def make_prediction(checkpoint_dir, length, context, idx2char, char2idx):
"""Make predictions from a Shakespeare model.
Args:
checkpoint_dir: the directory from which to load checkpoints
length: the total length of the generated text (including the context).
context: the initial text with which the LSTM is primed.
idx2char: the character class to character mapping.
char2idx: the character to character class mapping.
Returns:
A generated string of text of the given length.
"""
prediction_model = build_model(
vocab_size=len(idx2char), batch_size=1, stateful=True)
prediction_model.load_weights(tf.train.latest_checkpoint(checkpoint_dir))
prediction_model.build(tf.TensorShape([1, None]))
input_eval = [char2idx[s] for s in context]
input_eval = tf.expand_dims(input_eval, 0)
text_generated = []
prediction_model.reset_states()
for _ in range(length - len(context)):
predictions = prediction_model(input_eval)
predictions = tf.squeeze(predictions, 0)
# We applied a softmax to the output of the model so that
# tf.keras.metrics.Recall would work. We need logits for
# tf.random.categorical, so we convert the probabilities back to log odds
predictions = tf.math.log(predictions / (1 - predictions))
random_output = tf.random.categorical(predictions, num_samples=1)
selected_id = random_output[-1, 0].numpy()
input_eval = tf.expand_dims([selected_id], 0)
text_generated.append(idx2char[selected_id])
return context + ''.join(text_generated)
def run(flags_obj):
"""Run Shakespeare training and predict.
Args:
flags_obj: An object containing parsed flag values.
Returns:
Dictionary with status from the run.
"""
if not flags_obj.training_data:
raise ValueError(
'Must set the path to a training data file. e.g download the following '
'https://storage.googleapis.com/download.tensorflow.org/data/'
'shakespeare.txt')
if flags_obj.dtype == 'fp16':
policy = tf.keras.mixed_precision.experimental.Policy(
'mixed_float16',
loss_scale=flags_core.get_loss_scale(flags_obj,
default_for_fp16='dynamic'))
tf.keras.mixed_precision.experimental.set_policy(policy)
keras_utils.set_session_config(
enable_eager=flags_obj.enable_eager,
enable_xla=flags_obj.enable_xla)
strategy = distribution_utils.get_distribution_strategy(
distribution_strategy=flags_obj.distribution_strategy,
num_gpus=flags_obj.num_gpus)
dataset, idx2char, char2idx = get_dataset(flags_obj.training_data,
batch_size=flags_obj.batch_size)
stats = {}
if flags_obj.train:
history, callbacks = train_model(flags_obj, dataset,
len(idx2char), strategy,
checkpoint_dir=flags_obj.model_dir)
stats['history'] = history.history
stats['callbacks'] = callbacks
if flags_obj.predict_context:
if not flags_obj.model_dir:
raise ValueError('Must set model_dir to get predictions.')
print(make_prediction(flags_obj.model_dir,
flags_obj.predict_length,
flags_obj.predict_context,
idx2char,
char2idx))
return stats
def main(_):
flags_obj = flags.FLAGS
run(flags_obj)
if __name__ == '__main__':
define_flags()
app.run(main)
| 35.75 | 80 | 0.672922 |
db0922253dae1838663571fc9dff15a3f0189855 | 1,446 | py | Python | joyful/mappings/dualshock4.py | cauebs/joyful | b501827fe6ee7da21e79c5dd8e1e9f5fef7fc674 | [
"MIT"
] | null | null | null | joyful/mappings/dualshock4.py | cauebs/joyful | b501827fe6ee7da21e79c5dd8e1e9f5fef7fc674 | [
"MIT"
] | null | null | null | joyful/mappings/dualshock4.py | cauebs/joyful | b501827fe6ee7da21e79c5dd8e1e9f5fef7fc674 | [
"MIT"
] | null | null | null | from enum import Enum, auto
class Labels(Enum):
DPAD_X = auto()
DPAD_Y = auto()
L1_BUTTON = auto()
L2_BUTTON = auto()
L2_TRIGGER = auto()
L3_X_AXIS = auto()
L3_Y_AXIS = auto()
L3_BUTTON = auto()
SHARE = auto()
OPTIONS = auto()
START = auto()
SELECT = auto()
PLAYSTATION = auto()
CROSS = auto()
CIRCLE = auto()
SQUARE = auto()
TRIANGLE = auto()
R1_BUTTON = auto()
R2_BUTTON = auto()
R2_TRIGGER = auto()
R3_X_AXIS = auto()
R3_Y_AXIS = auto()
R3_BUTTON = auto()
ACCEL_X = auto()
ACCEL_Y = auto()
ACCEL_Z = auto()
GYRO_X = auto()
GYRO_Y = auto()
GYRO_Z = auto()
MAPPING = {
0x00: Labels.L3_X_AXIS,
0x01: Labels.L3_Y_AXIS,
0x02: Labels.L2_TRIGGER,
0x03: Labels.R3_X_AXIS,
0x04: Labels.R3_Y_AXIS,
0x05: Labels.R2_TRIGGER,
0x10: Labels.DPAD_X,
0x11: Labels.DPAD_Y,
0x130: Labels.CROSS,
0x131: Labels.CIRCLE,
0x133: Labels.TRIANGLE,
0x134: Labels.SQUARE,
0x136: Labels.L1_BUTTON,
0x137: Labels.R1_BUTTON,
0x138: Labels.L2_BUTTON,
0x139: Labels.R2_BUTTON,
0x13a: Labels.SHARE,
0x13b: Labels.OPTIONS,
0x13c: Labels.PLAYSTATION,
0x13d: Labels.L3_BUTTON,
0x13e: Labels.R3_BUTTON,
}
MOTION = {
0x00: Labels.ACCEL_X,
0x01: Labels.ACCEL_Y,
0x02: Labels.ACCEL_Z,
0x03: Labels.GYRO_X,
0x04: Labels.GYRO_Y,
0x05: Labels.GYRO_Z,
}
| 19.808219 | 30 | 0.607192 |
affd3c43f7b3cfa1966e92e5bca6a749e47121fa | 9,125 | py | Python | neutron/db/api.py | cleo4zheng/neutron | 6d65318308edfd984bdd0ff1ac7fef9486a040f7 | [
"Apache-2.0"
] | null | null | null | neutron/db/api.py | cleo4zheng/neutron | 6d65318308edfd984bdd0ff1ac7fef9486a040f7 | [
"Apache-2.0"
] | null | null | null | neutron/db/api.py | cleo4zheng/neutron | 6d65318308edfd984bdd0ff1ac7fef9486a040f7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
from debtcollector import removals
from neutron_lib import exceptions
from oslo_config import cfg
from oslo_db import api as oslo_db_api
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import enginefacade
from oslo_log import log as logging
from oslo_utils import excutils
from osprofiler import opts as profiler_opts
import osprofiler.sqlalchemy
from pecan import util as p_util
import six
import sqlalchemy
from sqlalchemy import event # noqa
from sqlalchemy import exc as sql_exc
from sqlalchemy.orm import exc
import traceback
from neutron._i18n import _LE
from neutron.objects import exceptions as obj_exc
def set_hook(engine):
if (profiler_opts.is_trace_enabled() and
profiler_opts.is_db_trace_enabled()):
osprofiler.sqlalchemy.add_tracing(sqlalchemy, engine, 'neutron.db')
context_manager = enginefacade.transaction_context()
context_manager.configure(sqlite_fk=True)
# TODO(ihrachys) the hook assumes options defined by osprofiler, and the only
# public function that is provided by osprofiler that will register them is
# set_defaults, that's why we call it here even though we don't need to change
# defaults
profiler_opts.set_defaults(cfg.CONF)
context_manager.append_on_engine_create(set_hook)
MAX_RETRIES = 10
LOG = logging.getLogger(__name__)
def is_retriable(e):
if getattr(e, '_RETRY_EXCEEDED', False):
return False
if _is_nested_instance(e, (db_exc.DBDeadlock, exc.StaleDataError,
db_exc.DBConnectionError,
db_exc.DBDuplicateEntry, db_exc.RetryRequest,
obj_exc.NeutronDbObjectDuplicateEntry)):
return True
# looking savepoints mangled by deadlocks. see bug/1590298 for details.
return _is_nested_instance(e, db_exc.DBError) and '1305' in str(e)
_retry_db_errors = oslo_db_api.wrap_db_retry(
max_retries=MAX_RETRIES,
retry_interval=0.1,
inc_retry_interval=True,
exception_checker=is_retriable
)
def _tag_retriables_as_unretriable(f):
"""Puts a flag on retriable exceptions so is_retriable returns False.
This decorator can be used outside of a retry decorator to prevent
decorators higher up from retrying again.
"""
@six.wraps(f)
def wrapped(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
with excutils.save_and_reraise_exception():
if is_retriable(e):
setattr(e, '_RETRY_EXCEEDED', True)
return wrapped
def _copy_if_lds(item):
"""Deepcopy lists/dicts/sets, leave everything else alone."""
return copy.deepcopy(item) if isinstance(item, (list, dict, set)) else item
def retry_db_errors(f):
"""Nesting-safe retry decorator with auto-arg-copy and logging.
Retry decorator for all functions which do not accept a context as an
argument. If the function accepts a context, use
'retry_if_session_inactive' below.
If retriable errors are retried and exceed the count, they will be tagged
with a flag so is_retriable will no longer recognize them as retriable.
This prevents multiple applications of this decorator (and/or the one
below) from retrying the same exception.
"""
@_tag_retriables_as_unretriable
@_retry_db_errors
@six.wraps(f)
def wrapped(*args, **kwargs):
try:
# copy mutable args and kwargs to make retries safe. this doesn't
# prevent mutations of complex objects like the context or 'self'
dup_args = [_copy_if_lds(a) for a in args]
dup_kwargs = {k: _copy_if_lds(v) for k, v in kwargs.items()}
return f(*dup_args, **dup_kwargs)
except Exception as e:
with excutils.save_and_reraise_exception():
if is_retriable(e):
LOG.debug("Retry wrapper got retriable exception: %s",
traceback.format_exc())
return wrapped
def retry_if_session_inactive(context_var_name='context'):
"""Retries only if the session in the context is inactive.
Calls a retry_db_errors wrapped version of the function if the context's
session passed in is inactive, otherwise it just calls the function
directly. This is useful to avoid retrying things inside of a transaction
which is ineffective for DB races/errors.
This should be used in all cases where retries are desired and the method
accepts a context.
"""
def decorator(f):
try:
# NOTE(kevinbenton): we use pecan's util function here because it
# deals with the horrors of finding args of already decorated
# functions
ctx_arg_index = p_util.getargspec(f).args.index(context_var_name)
except ValueError:
raise RuntimeError(_LE("Could not find position of var %s")
% context_var_name)
f_with_retry = retry_db_errors(f)
@six.wraps(f)
def wrapped(*args, **kwargs):
# only use retry wrapper if we aren't nested in an active
# transaction
if context_var_name in kwargs:
context = kwargs[context_var_name]
else:
context = args[ctx_arg_index]
method = f if context.session.is_active else f_with_retry
return method(*args, **kwargs)
return wrapped
return decorator
def reraise_as_retryrequest(f):
"""Packs retriable exceptions into a RetryRequest."""
@six.wraps(f)
def wrapped(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
with excutils.save_and_reraise_exception() as ctx:
if is_retriable(e):
ctx.reraise = False
raise db_exc.RetryRequest(e)
return wrapped
def _is_nested_instance(e, etypes):
"""Check if exception or its inner excepts are an instance of etypes."""
if isinstance(e, etypes):
return True
if isinstance(e, exceptions.MultipleExceptions):
return any(_is_nested_instance(i, etypes) for i in e.inner_exceptions)
if isinstance(e, db_exc.DBError):
return _is_nested_instance(e.inner_exception, etypes)
return False
@contextlib.contextmanager
def exc_to_retry(etypes):
try:
yield
except Exception as e:
with excutils.save_and_reraise_exception() as ctx:
if _is_nested_instance(e, etypes):
ctx.reraise = False
raise db_exc.RetryRequest(e)
#TODO(akamyshnikova): when all places in the code, which use sessions/
# connections will be updated, this won't be needed
@removals.remove(version='Ocata', removal_version='Pike',
message="Usage of legacy facade is deprecated. Use "
"get_reader_session or get_writer_session instead.")
def get_session(autocommit=True, expire_on_commit=False, use_slave=False):
"""Helper method to grab session."""
return context_manager.get_legacy_facade().get_session(
autocommit=autocommit, expire_on_commit=expire_on_commit,
use_slave=use_slave)
def get_reader_session():
"""Helper to get reader session"""
return context_manager.reader.get_sessionmaker()()
def get_writer_session():
"""Helper to get writer session"""
return context_manager.writer.get_sessionmaker()()
@contextlib.contextmanager
def autonested_transaction(sess):
"""This is a convenience method to not bother with 'nested' parameter."""
if sess.is_active:
session_context = sess.begin(nested=True)
else:
session_context = sess.begin(subtransactions=True)
with session_context as tx:
yield tx
_REGISTERED_SQLA_EVENTS = []
def sqla_listen(*args):
"""Wrapper to track subscribers for test teardowns.
SQLAlchemy has no "unsubscribe all" option for its event listener
framework so we need to keep track of the subscribers by having
them call through here for test teardowns.
"""
event.listen(*args)
_REGISTERED_SQLA_EVENTS.append(args)
def sqla_remove(*args):
event.remove(*args)
_REGISTERED_SQLA_EVENTS.remove(args)
def sqla_remove_all():
for args in _REGISTERED_SQLA_EVENTS:
try:
event.remove(*args)
except sql_exc.InvalidRequestError:
# already removed
pass
del _REGISTERED_SQLA_EVENTS[:]
| 34.048507 | 79 | 0.688219 |
4b5b81c3390197b9849bea00bd9aedb3207c0229 | 3,164 | py | Python | src/407. Trapping Rain Water II_learned.py | wisesky/LeetCode-Practice | 65549f72c565d9f11641c86d6cef9c7988805817 | [
"MIT"
] | null | null | null | src/407. Trapping Rain Water II_learned.py | wisesky/LeetCode-Practice | 65549f72c565d9f11641c86d6cef9c7988805817 | [
"MIT"
] | null | null | null | src/407. Trapping Rain Water II_learned.py | wisesky/LeetCode-Practice | 65549f72c565d9f11641c86d6cef9c7988805817 | [
"MIT"
] | null | null | null | from typing import List
import heapq
class Solution:
def trapRainWater(self, heightMap: List[List[int]]) -> int:
if len(heightMap) == 0:
return 0
h = len(heightMap)
w = len(heightMap[0])
visited = {}
pq = []
for i in range(h):
heapq.heappush(pq, (heightMap[i][0], (i, 0)))
heapq.heappush(pq, (heightMap[i][w-1], (i, w-1)))
visited[i, 0] = True
visited[i, w-1] = True
for j in range(1,w-1):
heapq.heappush(pq, (heightMap[0][j], (0,j)))
heapq.heappush(pq, (heightMap[h-1][j], (h-1,j)))
visited[0, j] = True
visited[h-1, j] = True
h_max = 0
res = 0
while len(pq) > 0:
value , (x, y) = heapq.heappop(pq)
if value < h_max:
res += h_max - value
else:
h_max = value
up = (x-1, y) if x > 0 else None
down = (x+1, y) if x < h-1 else None
left = (x, y-1) if y > 0 else None
right = (x, y+1) if y < w-1 else None
# if up != None and not visited.get(up, False):
# heapq.heappush(pq, (heightMap[up[0]][up[1]], up))
# visited[up] = True
# if down!= None and not visited.get(down, False):
# heapq.heappush(pq, (heightMap[down[0]][down[1]], down))
# visited[down] = True
# if left != None and not visited.get(left, False):
# heapq.heappush(pq, (heightMap[left[0]][left[1]], left))
# visited[left] = True
# if right != None and not visited.get(right, False):
# heapq.heappush(pq, (heightMap[right[0]][right[1]], right))
# visited[right] = True
# optimization
for rd in [up, down, left, right]:
if rd != None and not visited.get(rd, False):
heapq.heappush(pq, (heightMap[rd[0]][rd[1]], rd))
visited[rd] = True
return res
if __name__ == "__main__":
so = Solution()
heightMap = [
[1,4,3,1,3,2],
[3,2,1,3,2,4],
[2,3,3,2,3,1]
]
# 14
heightMap = [
[12,13,1,12],
[13,4,13,12],
[13,8,10,12],
[12,13,12,12],
[13,13,13,13]
]
# 3
heightMap = [
[5,5,5,1],
[5,1,1,5],
[5,1,5,5],
[5,2,5,8]
]
# 44
heightMap = [
[78,16,94,36],
[87,93,50,22],
[63,28,91,60],
[64,27,41,27],
[73,37,12,69],
[68,30,83,31],
[63,24,68,36]
]
# 25
heightMap = [
[14,17,18,16,14,16],
[17,3,10,2,3,8],
[11,10,4,7,1,7],
[13,7,2,9,8,10],
[13,1,3,4,8,6],
[20,3,3,9,10,8]
]
# 11
# heightMap = [
# [14,20,11,19,19,16],
# [11,10,7,4,9,6],
# [17,2,2,6,10,9],
# [15,9,2,1,4,1],
# [15,5,5,5,8,7],
# [14,2,8,6,10,7]
# ]
result = so.trapRainWater(heightMap)
print(result)
| 26.14876 | 76 | 0.418458 |
d5609e6275c3b63d4fa48b8dd47fb85876df1fe2 | 7,484 | py | Python | api/client/src/pcluster_client/model/ec2_ami_info.py | maclema/aws-parallelcluster | ade6e5e76201ee43c6e222fcd1c2891aba938838 | [
"Apache-2.0"
] | 279 | 2015-01-02T12:03:58.000Z | 2018-11-05T07:58:55.000Z | api/client/src/pcluster_client/model/ec2_ami_info.py | maclema/aws-parallelcluster | ade6e5e76201ee43c6e222fcd1c2891aba938838 | [
"Apache-2.0"
] | 383 | 2015-01-04T18:52:06.000Z | 2018-11-12T16:23:44.000Z | api/client/src/pcluster_client/model/ec2_ami_info.py | maclema/aws-parallelcluster | ade6e5e76201ee43c6e222fcd1c2891aba938838 | [
"Apache-2.0"
] | 127 | 2015-01-25T23:51:28.000Z | 2018-11-04T04:50:29.000Z | """
ParallelCluster
ParallelCluster API # noqa: E501
The version of the OpenAPI document: 3.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from pcluster_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from pcluster_client.model.ec2_ami_state import Ec2AmiState
from pcluster_client.model.tag import Tag
globals()['Ec2AmiState'] = Ec2AmiState
globals()['Tag'] = Tag
class Ec2AmiInfo(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'ami_id': (str,), # noqa: E501
'tags': ([Tag],), # noqa: E501
'ami_name': (str,), # noqa: E501
'architecture': (str,), # noqa: E501
'state': (Ec2AmiState,), # noqa: E501
'description': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'ami_id': 'amiId', # noqa: E501
'tags': 'tags', # noqa: E501
'ami_name': 'amiName', # noqa: E501
'architecture': 'architecture', # noqa: E501
'state': 'state', # noqa: E501
'description': 'description', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, ami_id, *args, **kwargs): # noqa: E501
"""Ec2AmiInfo - a model defined in OpenAPI
Args:
ami_id (str): EC2 AMI id
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
tags ([Tag]): EC2 AMI Tags. [optional] # noqa: E501
ami_name (str): EC2 AMI name. [optional] # noqa: E501
architecture (str): EC2 AMI architecture. [optional] # noqa: E501
state (Ec2AmiState): [optional] # noqa: E501
description (str): EC2 AMI description. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.ami_id = ami_id
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 38.979167 | 110 | 0.576563 |
e3dccdec7ecd277cf4d201c33d6480a350f7635d | 4,960 | py | Python | test/functional/p2p_time_offset.py | nyerium-core/nyerium | 0bc3b4da2c8cf1c96ab3910cff4c743ff09fcd1e | [
"MIT"
] | 2 | 2019-08-06T13:37:38.000Z | 2022-01-07T11:57:49.000Z | test/functional/p2p_time_offset.py | nyerium-core/nyerium | 0bc3b4da2c8cf1c96ab3910cff4c743ff09fcd1e | [
"MIT"
] | 5 | 2018-05-31T20:01:32.000Z | 2018-09-21T22:55:04.000Z | test/functional/p2p_time_offset.py | nyerium-core/nyerium | 0bc3b4da2c8cf1c96ab3910cff4c743ff09fcd1e | [
"MIT"
] | 4 | 2018-06-03T07:09:44.000Z | 2020-08-17T12:43:24.000Z | #!/usr/bin/env python3
# Copyright (c) 2019-2020 The Nyerium developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import time
from test_framework.test_framework import NyeriumTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
set_node_times,
)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
class TimeOffsetTest(NyeriumTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 8
self.enable_mocktime()
def setup_network(self):
# don't connect nodes yet
self.setup_nodes()
def check_connected_nodes(self):
ni = [node.getnetworkinfo() for node in self.connected_nodes]
assert_equal([x['connections'] for x in ni], [2] * len(ni))
assert_equal([x['timeoffset'] for x in ni], [0] * len(ni))
def run_test(self):
# Nodes synced but not connected
self.mocktime = int(time.time())
set_node_times(self.nodes, self.mocktime)
ni = [node.getnetworkinfo() for node in self.nodes]
assert_equal([x['connections'] for x in ni], [0] * self.num_nodes)
self.log.info("Nodes disconnected from each other. Time: %d" % self.mocktime)
assert_equal([x['timeoffset'] for x in ni], [0] * self.num_nodes)
self.log.info("Nodes have nTimeOffset 0")
# Set node times.
# nodes [1, 5]: set times to +10, +15, ..., +30 secs
for i in range(1, 6):
self.nodes[i].setmocktime(self.mocktime + 5 * (i + 1))
# nodes [6, 7]: set time to -5, -10 secs
for i in range(6, 8):
self.nodes[i].setmocktime(self.mocktime - 5 * (i - 5))
# connect nodes 1 and 2
self.log.info("Connecting with node-1 (+10 s) and node-2 (+15 s)...")
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 2)
self.log.info("--> samples = [+0, +10, (+10), +15, +15]")
ni = self.nodes[0].getnetworkinfo()
assert_equal(ni['connections'], 4)
assert_equal(ni['timeoffset'], 10)
self.connected_nodes = [self.nodes[1], self.nodes[2]]
self.check_connected_nodes()
self.log.info("Node-0 nTimeOffset: +%d seconds" % ni['timeoffset'])
# connect node 3
self.log.info("Connecting with node-3 (+20 s). This will print the warning...")
connect_nodes_bi(self.nodes, 0, 3)
self.log.info("--> samples = [+0, +10, +10, (+15), +15, +20, +20]")
ni = self.nodes[0].getnetworkinfo()
assert_equal(ni['connections'], 6)
assert_equal(ni['timeoffset'], 15)
self.connected_nodes.append(self.nodes[3])
self.check_connected_nodes()
self.log.info("Node-0 nTimeOffset: +%d seconds" % ni['timeoffset'])
# connect node 6
self.log.info("Connecting with node-6 (-5 s)...")
connect_nodes_bi(self.nodes, 0, 6)
self.log.info("--> samples = [-5, -5, +0, +10, (+10), +15, +15, +20, +20]")
ni = self.nodes[0].getnetworkinfo()
assert_equal(ni['connections'], 8)
assert_equal(ni['timeoffset'], 10)
self.connected_nodes.append(self.nodes[6])
self.check_connected_nodes()
self.log.info("Node-0 nTimeOffset: +%d seconds" % ni['timeoffset'])
# connect node 4
self.log.info("Connecting with node-4 (+25 s). This will print the warning...")
connect_nodes_bi(self.nodes, 0, 4)
self.log.info("--> samples = [-5, -5, +0, +10, +10, (+15), +15, +20, +20, +25, +25]")
ni = self.nodes[0].getnetworkinfo()
assert_equal(ni['connections'], 10)
assert_equal(ni['timeoffset'], 15)
self.connected_nodes.append(self.nodes[4])
self.check_connected_nodes()
self.log.info("Node-0 nTimeOffset: +%d seconds" % ni['timeoffset'])
# try to connect node 5 and check that it can't
self.log.info("Trying to connect with node-5 (+30 s)...")
connect_nodes_bi(self.nodes, 0, 5)
ni = self.nodes[0].getnetworkinfo()
assert_equal(ni['connections'], 10)
assert_equal(ni['timeoffset'], 15)
self.log.info("Not connected.")
self.log.info("Node-0 nTimeOffset: +%d seconds" % ni['timeoffset'])
# connect node 7
self.log.info("Connecting with node-7 (-10 s)...")
connect_nodes_bi(self.nodes, 0, 7)
self.log.info("--> samples = [-10, -10, -5, -5, +0, +10, (+10), +15, +15, +20, +20, +25, +25]")
ni = self.nodes[0].getnetworkinfo()
assert_equal(ni['connections'], 12)
assert_equal(ni['timeoffset'], 10)
self.connected_nodes.append(self.nodes[6])
self.check_connected_nodes()
self.log.info("Node-0 nTimeOffset: +%d seconds" % ni['timeoffset'])
if __name__ == '__main__':
TimeOffsetTest().main()
| 40.991736 | 103 | 0.604637 |
2fb331b17734ead24561bf6eee1a4920680295c7 | 2,560 | py | Python | tools/rm_subj.py | roshchupkin/hase | c7aa36459c53ccb5bd1f884bbc38df0cfebdf208 | [
"Apache-2.0"
] | 13 | 2016-03-25T12:22:03.000Z | 2021-04-14T11:14:00.000Z | tools/rm_subj.py | roshchupkin/hase | c7aa36459c53ccb5bd1f884bbc38df0cfebdf208 | [
"Apache-2.0"
] | 8 | 2016-08-02T22:06:07.000Z | 2019-12-10T08:42:22.000Z | tools/rm_subj.py | roshchupkin/hase | c7aa36459c53ccb5bd1f884bbc38df0cfebdf208 | [
"Apache-2.0"
] | 3 | 2019-12-03T12:49:46.000Z | 2021-08-13T15:11:27.000Z | import os
import h5py
import pandas as pd
import numpy as np
import argparse
import tables
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Script to remove subjects from HASE hdf5 files')
parser.add_argument("-g", required=True, type=str, help="path/paths to genotype data folder")
parser.add_argument('-study_name', type=str, required=True, default=None, help=' Study names')
parser.add_argument('-exclude_ids', type=str, default=None,
help='Table with IDs to exclude from data. Should have ID column')
args = parser.parse_args()
print args
if args.exclude_ids is not None:
df = pd.DataFrame.from_csv(args.snp_id_inc, index_col=None)
print df.head()
if 'ID' not in df.columns:
raise ValueError('{} table does not have ID or columns'.format(args.exclude_ids))
df['ID'] = df.ID.astype(str)
df_ids = pd.read_hdf(os.path.join(args.g, 'individuals', args.study_name + '.h5'), 'individuals')
df_ids['individual'] = df_ids.individual.astype(str)
info_index = df_ids.individual.isin(df.ID)
remove_index = np.where(info_index == True)[0]
keep_index = np.where(info_index == False)[0]
if len(remove_index) == 0:
print 'There is no ids to remove!'
exit(0)
if len(keep_index) == len(df_ids.individual):
print "Need to remove everybody!!! "
exit(0)
individuals = df_ids.individual[~remove_index]
chunk = pd.DataFrame.from_dict({"individual": individuals})
chunk.to_hdf(os.path.join(args.g, 'individuals', args.study_name + '.h5'), key='individuals', format='table',
min_itemsize=25, complib='zlib', complevel=9)
for g_file in os.listdir(os.path.join(args.g, 'genotype')):
print g_file
data = h5py.File(os.path.join(args.g, 'genotype', g_file), 'r')['genotype'][...]
data = data[:, keep_index]
h5_gen_file = tables.open_file(
os.path.join(args.g, 'genotype', g_file), 'w', title=args.study_name)
atom = tables.Float16Atom()
genotype = h5_gen_file.create_carray(h5_gen_file.root, 'genotype', atom,
(data.shape),
title='Genotype',
filters=tables.Filters(complevel=9, complib='zlib'))
genotype[:] = data
h5_gen_file.close()
| 40 | 117 | 0.591406 |
1dd113147cf4af844657ef03d56202202fd4db5e | 15,265 | py | Python | src/cmds/show.py | Flofie/chia-blockchain | d3013f1a392fc1761d975581a7b1d0770f92cb14 | [
"Apache-2.0"
] | null | null | null | src/cmds/show.py | Flofie/chia-blockchain | d3013f1a392fc1761d975581a7b1d0770f92cb14 | [
"Apache-2.0"
] | null | null | null | src/cmds/show.py | Flofie/chia-blockchain | d3013f1a392fc1761d975581a7b1d0770f92cb14 | [
"Apache-2.0"
] | null | null | null | import click
async def show_async(
rpc_port: int,
state: bool,
show_connections: bool,
exit_node: bool,
add_connection: str,
remove_connection: str,
block_header_hash_by_height: str,
block_by_header_hash: str,
) -> None:
import aiohttp
import time
import traceback
from time import localtime, struct_time
from typing import List, Optional
from src.consensus.block_record import BlockRecord
from src.rpc.full_node_rpc_client import FullNodeRpcClient
from src.server.outbound_message import NodeType
from src.types.full_block import FullBlock
from src.util.bech32m import encode_puzzle_hash
from src.util.byte_types import hexstr_to_bytes
from src.util.config import load_config
from src.util.default_root import DEFAULT_ROOT_PATH
from src.util.ints import uint16
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if rpc_port is None:
rpc_port = config["full_node"]["rpc_port"]
client = await FullNodeRpcClient.create(self_hostname, uint16(rpc_port), DEFAULT_ROOT_PATH, config)
if state:
blockchain_state = await client.get_blockchain_state()
if blockchain_state is None:
print("There is no blockchain found yet. Try again shortly")
return
peak: Optional[BlockRecord] = blockchain_state["peak"]
difficulty = blockchain_state["difficulty"]
sub_slot_iters = blockchain_state["sub_slot_iters"]
synced = blockchain_state["sync"]["synced"]
sync_mode = blockchain_state["sync"]["sync_mode"]
total_iters = peak.total_iters if peak is not None else 0
num_blocks: int = 10
if sync_mode:
sync_max_block = blockchain_state["sync"]["sync_tip_height"]
sync_current_block = blockchain_state["sync"]["sync_progress_height"]
print(
"Current Blockchain Status: Full Node syncing to block",
sync_max_block,
"\nCurrently synced to block:",
sync_current_block,
)
if synced:
print("Current Blockchain Status: Full Node Synced")
print("\nPeak: Hash:", peak.header_hash if peak is not None else "")
elif peak is not None:
print(f"Current Blockchain Status: Not Synced. Peak height: {peak.height}")
else:
print("\nSearching for an initial chain\n")
print("You may be able to expedite with 'chia show -a host:port' using a known node.\n")
if peak is not None:
if peak.is_transaction_block:
peak_time = peak.timestamp
else:
peak_hash = peak.header_hash
curr = await client.get_block_record(peak_hash)
while curr is not None and not curr.is_transaction_block:
curr = await client.get_block_record(curr.prev_hash)
peak_time = curr.timestamp
peak_time_struct = struct_time(localtime(peak_time))
print(
" Time:",
f"{time.strftime('%a %b %d %Y %T %Z', peak_time_struct)}",
f" Height: {peak.height:>10}\n",
)
print("Estimated network space: ", end="")
network_space_human_readable = blockchain_state["space"] / 1024 ** 4
if network_space_human_readable >= 1024:
network_space_human_readable = network_space_human_readable / 1024
print(f"{network_space_human_readable:.3f} PiB")
else:
print(f"{network_space_human_readable:.3f} TiB")
print(f"Current difficulty: {difficulty}")
print(f"Current VDF sub_slot_iters: {sub_slot_iters}")
print("Total iterations since the start of the blockchain:", total_iters)
print("")
print(" Height: | Hash:")
added_blocks: List[BlockRecord] = []
curr = await client.get_block_record(peak.header_hash)
while curr is not None and len(added_blocks) < num_blocks and curr.height > 0:
added_blocks.append(curr)
curr = await client.get_block_record(curr.prev_hash)
for b in added_blocks:
print(f"{b.height:>9} | {b.header_hash}")
else:
print("Blockchain has no blocks yet")
# if called together with show_connections, leave a blank line
if show_connections:
print("")
if show_connections:
connections = await client.get_connections()
print("Connections:")
print(
"Type IP Ports NodeID Last Connect"
+ " MiB Up|Dwn"
)
for con in connections:
last_connect_tuple = struct_time(localtime(con["last_message_time"]))
last_connect = time.strftime("%b %d %T", last_connect_tuple)
mb_down = con["bytes_read"] / (1024 * 1024)
mb_up = con["bytes_written"] / (1024 * 1024)
host = con["peer_host"]
# Strip IPv6 brackets
if host[0] == "[":
host = host[1:39]
# Nodetype length is 9 because INTRODUCER will be deprecated
if NodeType(con["type"]) is NodeType.FULL_NODE:
peak_height = con["peak_height"]
peak_hash = con["peak_hash"]
if peak_hash is None:
peak_hash = "No Info"
if peak_height is None:
peak_height = 0
con_str = (
f"{NodeType(con['type']).name:9} {host:38} "
f"{con['peer_port']:5}/{con['peer_server_port']:<5}"
f" {con['node_id'].hex()[:8]}... "
f"{last_connect} "
f"{mb_up:7.1f}|{mb_down:<7.1f}"
f"\n "
f"-SB Height: {peak_height:8.0f} -Hash: {peak_hash[2:10]}..."
)
else:
con_str = (
f"{NodeType(con['type']).name:9} {host:38} "
f"{con['peer_port']:5}/{con['peer_server_port']:<5}"
f" {con['node_id'].hex()[:8]}... "
f"{last_connect} "
f"{mb_up:7.1f}|{mb_down:<7.1f}"
)
print(con_str)
# if called together with state, leave a blank line
if state:
print("")
if exit_node:
node_stop = await client.stop_node()
print(node_stop, "Node stopped")
if add_connection:
if ":" not in add_connection:
print("Enter a valid IP and port in the following format: 10.5.4.3:8000")
else:
ip, port = (
":".join(add_connection.split(":")[:-1]),
add_connection.split(":")[-1],
)
print(f"Connecting to {ip}, {port}")
try:
await client.open_connection(ip, int(port))
except Exception:
print(f"Failed to connect to {ip}:{port}")
if remove_connection:
result_txt = ""
if len(remove_connection) != 8:
result_txt = "Invalid NodeID. Do not include '.'"
else:
connections = await client.get_connections()
for con in connections:
if remove_connection == con["node_id"].hex()[:8]:
print("Attempting to disconnect", "NodeID", remove_connection)
try:
await client.close_connection(con["node_id"])
except Exception:
result_txt = f"Failed to disconnect NodeID {remove_connection}"
else:
result_txt = f"NodeID {remove_connection}... {NodeType(con['type']).name} "
f"{con['peer_host']} disconnected"
elif result_txt == "":
result_txt = f"NodeID {remove_connection}... not found"
print(result_txt)
if block_header_hash_by_height != "":
block_header = await client.get_block_record_by_height(block_header_hash_by_height)
if block_header is not None:
print(f"Header hash of block {block_header_hash_by_height}: " f"{block_header.header_hash.hex()}")
else:
print("Block height", block_header_hash_by_height, "not found")
if block_by_header_hash != "":
block: Optional[BlockRecord] = await client.get_block_record(hexstr_to_bytes(block_by_header_hash))
full_block: Optional[FullBlock] = await client.get_block(hexstr_to_bytes(block_by_header_hash))
# Would like to have a verbose flag for this
if block is not None:
assert full_block is not None
prev_b = await client.get_block_record(block.prev_hash)
if prev_b is not None:
difficulty = block.weight - prev_b.weight
else:
difficulty = block.weight
if block.is_transaction_block:
assert full_block.transactions_info is not None
block_time = struct_time(
localtime(
full_block.foliage_transaction_block.timestamp
if full_block.foliage_transaction_block
else None
)
)
block_time_string = time.strftime("%a %b %d %Y %T %Z", block_time)
cost = str(full_block.transactions_info.cost)
tx_filter_hash = "Not a transaction block"
if full_block.foliage_transaction_block:
tx_filter_hash = full_block.foliage_transaction_block.filter_hash
else:
block_time_string = "Not a transaction block"
cost = "Not a transaction block"
tx_filter_hash = "Not a transaction block"
print("Block at height", block.height, ":")
address_prefix = config["network_overrides"]["config"][config["selected_network"]]["address_prefix"]
farmer_address = encode_puzzle_hash(block.farmer_puzzle_hash, address_prefix)
pool_address = encode_puzzle_hash(block.pool_puzzle_hash, address_prefix)
print(
f"Header Hash 0x{block.header_hash.hex()}\n"
f"Timestamp {block_time_string}\n"
f"Block Height {block.height}\n"
f"Weight {block.weight}\n"
f"Previous Block 0x{block.prev_hash.hex()}\n"
f"Difficulty {difficulty}\n"
f"Sub-slot iters {block.sub_slot_iters}\n"
f"Cost {cost}\n"
f"Total VDF Iterations {block.total_iters}\n"
f"Is a Transaction Block?{block.is_transaction_block}\n"
f"Deficit {block.deficit}\n"
f"PoSpace 'k' Size {full_block.reward_chain_block.proof_of_space.size}\n"
f"Plot Public Key 0x{full_block.reward_chain_block.proof_of_space.plot_public_key}\n"
f"Pool Public Key 0x{full_block.reward_chain_block.proof_of_space.pool_public_key}\n"
f"Pool Public Key "
f"0x{full_block.reward_chain_block.proof_of_space.pool_contract_puzzle_hash}\n"
f"{full_block.reward_chain_block.proof_of_space.pool_contract_puzzle_hash}\n"
f"Tx Filter Hash {tx_filter_hash}\n"
f"Farmer Address {farmer_address}\n"
f"Pool Address {pool_address}\n"
f"Fees Amount {block.fees}\n"
)
else:
print("Block with header hash", block_header_hash_by_height, "not found")
except Exception as e:
if isinstance(e, aiohttp.client_exceptions.ClientConnectorError):
print(f"Connection error. Check if full node rpc is running at {rpc_port}")
print("This is normal if full node is still starting up")
else:
tb = traceback.format_exc()
print(f"Exception from 'show' {tb}")
client.close()
await client.await_closed()
@click.command("show", short_help="Show node information")
@click.option(
"-p",
"--rpc-port",
help=(
"Set the port where the Full Node is hosting the RPC interface. "
"See the rpc_port under full_node in config.yaml"
),
type=int,
default=8555,
show_default=True,
)
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=9256,
show_default=True,
)
@click.option("-s", "--state", help="Show the current state of the blockchain", is_flag=True, type=bool, default=False)
@click.option(
"-c", "--connections", help="List nodes connected to this Full Node", is_flag=True, type=bool, default=False
)
@click.option("-e", "--exit-node", help="Shut down the running Full Node", is_flag=True, default=False)
@click.option("-a", "--add-connection", help="Connect to another Full Node by ip:port", type=str, default="")
@click.option(
"-r", "--remove-connection", help="Remove a Node by the first 8 characters of NodeID", type=str, default=""
)
@click.option(
"-bh", "--block-header-hash-by-height", help="Look up a block header hash by block height", type=str, default=""
)
@click.option("-b", "--block-by-header-hash", help="Look up a block by block header hash", type=str, default="")
def show_cmd(
rpc_port: int,
wallet_rpc_port: int,
state: bool,
connections: bool,
exit_node: bool,
add_connection: str,
remove_connection: str,
block_header_hash_by_height: str,
block_by_header_hash: str,
) -> None:
import asyncio
asyncio.run(
show_async(
rpc_port,
state,
connections,
exit_node,
add_connection,
remove_connection,
block_header_hash_by_height,
block_by_header_hash,
)
)
| 46.539634 | 119 | 0.541042 |
34cf699feaf63c14ab14f757bec38f2e6ec45490 | 970 | py | Python | src/storage-preview/azext_storage_preview/vendored_sdks/azure_mgmt_preview_storage/v2018_03_01_preview/models/list_container_items.py | mayank88mahajan/azure-cli-extensions | 8bd389a1877bffd14052bec5519ce75dc6fc34cf | [
"MIT"
] | 1 | 2019-05-10T19:58:09.000Z | 2019-05-10T19:58:09.000Z | src/storage-preview/azext_storage_preview/vendored_sdks/azure_mgmt_preview_storage/v2018_03_01_preview/models/list_container_items.py | mayank88mahajan/azure-cli-extensions | 8bd389a1877bffd14052bec5519ce75dc6fc34cf | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | src/storage-preview/azext_storage_preview/vendored_sdks/azure_mgmt_preview_storage/v2018_03_01_preview/models/list_container_items.py | mayank88mahajan/azure-cli-extensions | 8bd389a1877bffd14052bec5519ce75dc6fc34cf | [
"MIT"
] | 1 | 2021-07-28T14:50:54.000Z | 2021-07-28T14:50:54.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ListContainerItems(Model):
"""The list of blob containers.
:param value: The list of blob containers.
:type value:
list[~azure.mgmt.storage.v2018_03_01_preview.models.ListContainerItem]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ListContainerItem]'},
}
def __init__(self, **kwargs):
super(ListContainerItems, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
| 32.333333 | 76 | 0.589691 |
db280fea135ddb681a718266a153452220814710 | 1,924 | py | Python | rest_framework_elasticsearch/es_serializer.py | xaviermathew/django-rest-elasticsearch | d8778165e2e42601b8c15c8901c5f58b3061a47c | [
"Apache-2.0"
] | 219 | 2017-04-22T11:32:06.000Z | 2021-06-04T10:33:28.000Z | rest_framework_elasticsearch/es_serializer.py | xaviermathew/django-rest-elasticsearch | d8778165e2e42601b8c15c8901c5f58b3061a47c | [
"Apache-2.0"
] | 37 | 2017-06-27T15:43:31.000Z | 2020-09-02T03:05:17.000Z | rest_framework_elasticsearch/es_serializer.py | xaviermathew/django-rest-elasticsearch | d8778165e2e42601b8c15c8901c5f58b3061a47c | [
"Apache-2.0"
] | 47 | 2017-05-02T13:18:38.000Z | 2022-02-07T08:54:43.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from rest_framework import serializers
class BaseElasticSerializer(object):
def es_instance(self):
raise NotImplementedError
def get_es_model(self):
if not hasattr(self.Meta, 'es_model'):
raise ValueError(
'Can not find es_model value'
)
return self.Meta.es_model
def save(self, using=None, index=None, validate=True, **kwargs):
instance = self.es_instance()
instance.save(using=using, index=index, validate=validate, **kwargs)
def delete(self, using=None, index=None, **kwargs):
instance = self.es_instance()
instance.delete(using=using, index=index, **kwargs)
class ElasticSerializer(BaseElasticSerializer,
serializers.Serializer):
def get_es_instace_pk(self, data):
try:
return data['id']
except KeyError:
raise ValueError(
'Can not save object without id'
)
def es_repr(self, data):
data['meta'] = dict(id=self.get_es_instace_pk(data))
model = self.get_es_model()
return model(**data)
def es_instance(self):
if not self.is_valid():
raise serializers.ValidationError(self.errors)
return self.es_repr(self.data)
class ElasticModelSerializer(BaseElasticSerializer,
serializers.ModelSerializer):
def get_es_instace_pk(self, instance):
return instance.pk
def es_repr(self, instance):
data = self.to_representation(instance)
data['meta'] = dict(id=self.get_es_instace_pk(instance))
model = self.get_es_model()
return model(**data)
def es_instance(self):
if not self.instance:
raise ValueError("Can't reproduce object")
return self.es_repr(self.instance)
| 30.539683 | 76 | 0.628898 |
2f4c3b4af8211c237d8e989fcc6ae3a4839c9031 | 792 | py | Python | content/pkg/deps/python/dassana/common/timing.py | gauravphoenix/dassana | d663e89353f439b1ff67193635f4c9c06babd104 | [
"Apache-2.0"
] | 1 | 2021-11-17T00:45:07.000Z | 2021-11-17T00:45:07.000Z | content/pkg/deps/python/dassana/common/timing.py | MartinLiu2/dassana | 23c5e6d4e380630945621e2979820a6d61335535 | [
"Apache-2.0"
] | null | null | null | content/pkg/deps/python/dassana/common/timing.py | MartinLiu2/dassana | 23c5e6d4e380630945621e2979820a6d61335535 | [
"Apache-2.0"
] | null | null | null | import logging
from functools import wraps
from time import time
from typing import Dict, List
def timing(f, measurements: Dict[frozenset, List], args_measure_func=lambda x: None, kw_measure_func=lambda y: None):
@wraps(f)
def wrap(*args, **kw):
nonlocal measurements
ts = time()
result = f(*args, **kw)
te = time()
measurement_time = te - ts
logging.log(1, 'func:%r args:%r; kw: %r] took: %2.4f sec', args, kw, te - ts)
freeze = frozenset([f.__name__, *args_measure_func(args), *kw_measure_func(kw)])
if freeze in measurements:
msur = measurements[freeze]
msur.append(measurement_time)
else:
measurements[freeze] = [measurement_time]
return result
return wrap
| 31.68 | 117 | 0.621212 |
7ba3ee75d440262bbb39ada016ada6d90a10bbf2 | 434 | py | Python | scratch/scratch3.py | farooq-teqniqly/pakt-complete-python-course | 01717bbe97181f70c38166b3dc82ba7b00098430 | [
"MIT"
] | null | null | null | scratch/scratch3.py | farooq-teqniqly/pakt-complete-python-course | 01717bbe97181f70c38166b3dc82ba7b00098430 | [
"MIT"
] | null | null | null | scratch/scratch3.py | farooq-teqniqly/pakt-complete-python-course | 01717bbe97181f70c38166b3dc82ba7b00098430 | [
"MIT"
] | null | null | null | class MyTypeError(TypeError):
def __init__(self, message: str, code: int):
super().__init__(message)
self._code = code
@property
def code(self) -> int:
return self._code
if __name__ == "__main__":
error = MyTypeError("Waht?", 12345)
print(error)
print(error.code)
"""
Raises error because there is no setter.
https://www.freecodecamp.org/news/python-property-decorator/
"""
error.code = 234 | 21.7 | 64 | 0.665899 |