gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
from __future__ import division
import errno
import os
import shutil
import time
import uuid
from collections import namedtuple
from itertools import izip as zip
from itertools import repeat
from cassandra import WriteFailure
from cassandra.concurrent import (execute_concurrent,
execute_concurrent_with_args)
from ccmlib.node import Node
from nose.tools import assert_equal, assert_less_equal
from dtest import Tester, create_ks, debug
from tools.data import rows_to_list
from tools.decorators import since
from tools.files import size_of_files_in_dir
from tools.funcutils import get_rate_limited_function
from tools.hacks import advance_to_next_cl_segment
_16_uuid_column_spec = (
'a uuid PRIMARY KEY, b uuid, c uuid, d uuid, e uuid, f uuid, g uuid, '
'h uuid, i uuid, j uuid, k uuid, l uuid, m uuid, n uuid, o uuid, '
'p uuid'
)
def _insert_rows(session, table_name, insert_stmt, values):
prepared_insert = session.prepare(insert_stmt)
values = list(values) # in case values is a generator
execute_concurrent(session, ((prepared_insert, x) for x in values),
concurrency=500, raise_on_first_error=True)
data_loaded = rows_to_list(session.execute('SELECT * FROM ' + table_name))
debug('{n} rows inserted into {table_name}'.format(n=len(data_loaded), table_name=table_name))
# use assert_equal over assert_length_equal to avoid printing out
# potentially large lists
assert_equal(len(values), len(data_loaded))
return data_loaded
def _move_contents(source_dir, dest_dir, verbose=True):
for source_filename in os.listdir(source_dir):
source_path, dest_path = (os.path.join(source_dir, source_filename),
os.path.join(dest_dir, source_filename))
if verbose:
debug('moving {} to {}'.format(source_path, dest_path))
shutil.move(source_path, dest_path)
def _get_16_uuid_insert_stmt(ks_name, table_name):
return (
'INSERT INTO {ks_name}.{table_name} '
'(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) '
'VALUES (uuid(), uuid(), uuid(), uuid(), uuid(), '
'uuid(), uuid(), uuid(), uuid(), uuid(), uuid(), '
'uuid(), uuid(), uuid(), uuid(), uuid())'
).format(ks_name=ks_name, table_name=table_name)
def _get_create_table_statement(ks_name, table_name, column_spec, options=None):
if options:
options_pairs = ('{k}={v}'.format(k=k, v=v) for (k, v) in options.iteritems())
options_string = 'WITH ' + ' AND '.join(options_pairs)
else:
options_string = ''
return (
'CREATE TABLE ' + ks_name + '.' + table_name + ' '
'(' + column_spec + ') ' + options_string
)
def _write_to_cdc_WriteFailure(session, insert_stmt):
prepared = session.prepare(insert_stmt)
start, rows_loaded, error_found = time.time(), 0, False
rate_limited_debug = get_rate_limited_function(debug, 5)
while not error_found:
# We want to fail if inserting data takes too long. Locally this
# takes about 10s, but let's be generous.
assert_less_equal(
(time.time() - start), 600,
"It's taken more than 10 minutes to reach a WriteFailure trying "
'to overrun the space designated for CDC commitlogs. This could '
"be because data isn't being written quickly enough in this "
'environment, or because C* is failing to reject writes when '
'it should.'
)
# If we haven't logged from here in the last 5s, do so.
rate_limited_debug(
' data load step has lasted {s:.2f}s, '
'loaded {r} rows'.format(s=(time.time() - start), r=rows_loaded))
batch_results = list(execute_concurrent(
session,
((prepared, ()) for _ in range(1000)),
concurrency=500,
# Don't propagate errors to the main thread. We expect at least
# one WriteFailure, so we handle it below as part of the
# results recieved from this method.
raise_on_first_error=False
))
# Here, we track the number of inserted values by getting the
# number of successfully completed statements...
rows_loaded += len([br for br in batch_results if br[0]])
# then, we make sure that the only failures are the expected
# WriteFailures.
assert_equal([],
[result for (success, result) in batch_results
if not success and not isinstance(result, WriteFailure)])
# Finally, if we find a WriteFailure, that means we've inserted all
# the CDC data we can and so we flip error_found to exit the loop.
if any(isinstance(result, WriteFailure) for (_, result) in batch_results):
debug("write failed (presumably because we've overrun "
'designated CDC commitlog space) after '
'loading {r} rows in {s:.2f}s'.format(
r=rows_loaded,
s=time.time() - start))
error_found = True
return rows_loaded
_TableInfoNamedtuple = namedtuple('TableInfoNamedtuple', [
# required
'ks_name', 'table_name', 'column_spec',
# optional
'options', 'insert_stmt',
# derived
'name', 'create_stmt'
])
class TableInfo(_TableInfoNamedtuple):
__slots__ = ()
def __new__(cls, ks_name, table_name, column_spec, options=None, insert_stmt=None):
name = ks_name + '.' + table_name
create_stmt = _get_create_table_statement(ks_name, table_name, column_spec, options)
self = super(TableInfo, cls).__new__(
cls,
# required
ks_name=ks_name, table_name=table_name, column_spec=column_spec,
# optional
options=options, insert_stmt=insert_stmt,
# derived
name=name, create_stmt=create_stmt
)
return self
def _set_cdc_on_table(session, table_name, value, ks_name=None):
"""
Uses <session> to set CDC to <value> on <ks_name>.<table_name>.
"""
table_string = ks_name + '.' + table_name if ks_name else table_name
value_string = 'true' if value else 'false'
stmt = 'ALTER TABLE ' + table_string + ' WITH CDC = ' + value_string
debug(stmt)
session.execute(stmt)
def _get_set_cdc_func(session, ks_name, table_name):
"""
Close over a session, keyspace name, and table name and return a function
that takes enables CDC on that keyspace if its argument is truthy and
otherwise disables it.
"""
def set_cdc(value):
return _set_cdc_on_table(
session=session,
ks_name=ks_name, table_name=table_name,
value=value
)
return set_cdc
def _get_commitlog_files(node_path):
commitlog_dir = os.path.join(node_path, 'commitlogs')
return {
os.path.join(commitlog_dir, name)
for name in os.listdir(commitlog_dir)
}
def _get_cdc_raw_files(node_path, cdc_raw_dir_name='cdc_raw'):
commitlog_dir = os.path.join(node_path, cdc_raw_dir_name)
return {
os.path.join(commitlog_dir, name)
for name in os.listdir(commitlog_dir)
}
@since('3.8')
class TestCDC(Tester):
"""
@jira_ticket CASSANDRA-8844
Test the correctness of some features of CDC, Change Data Capture, which
provides a view of the commitlog on tables for which it is enabled.
"""
def _create_temp_dir(self, dir_name, verbose=True):
"""
Create a directory that will be deleted when this test class is torn
down.
"""
if verbose:
debug('creating ' + dir_name)
try:
os.mkdir(dir_name)
except OSError as e:
if e.errno != errno.EEXIST:
debug(dir_name + ' already exists. removing and recreating.')
shutil.rmtree(dir_name)
os.mkdir(dir_name)
else:
raise e
def debug_and_rmtree():
shutil.rmtree(dir_name)
debug(dir_name + ' removed')
self.addCleanup(debug_and_rmtree)
def prepare(self, ks_name,
table_name=None, cdc_enabled_table=None,
gc_grace_seconds=None,
column_spec=None,
configuration_overrides=None,
table_id=None):
"""
Create a 1-node cluster, start it, create a keyspace, and if
<table_name>, create a table in that keyspace. If <cdc_enabled_table>,
that table is created with CDC enabled. If <column_spec>, use that
string to specify the schema of the table -- for example, a valid value
is 'a int PRIMARY KEY, b int'. The <configuration_overrides> is
treated as a dict-like object and passed to
self.cluster.set_configuration_options.
"""
config_defaults = {
'cdc_enabled': True,
# we want to be able to generate new segments quickly
'commitlog_segment_size_in_mb': 2,
}
if configuration_overrides is None:
configuration_overrides = {}
self.cluster.populate(1)
self.cluster.set_configuration_options(dict(config_defaults, **configuration_overrides))
self.cluster.start(wait_for_binary_proto=True)
node = self.cluster.nodelist()[0]
session = self.patient_cql_connection(node)
create_ks(session, ks_name, rf=1)
if table_name is not None:
self.assertIsNotNone(cdc_enabled_table, 'if creating a table in prepare, must specify whether or not CDC is enabled on it')
self.assertIsNotNone(column_spec, 'if creating a table in prepare, must specify its schema')
options = {}
if gc_grace_seconds is not None:
options['gc_grace_seconds'] = gc_grace_seconds
if table_id is not None:
options['id'] = table_id
if cdc_enabled_table:
options['cdc'] = 'true'
stmt = _get_create_table_statement(
ks_name, table_name, column_spec,
options=options
)
debug(stmt)
session.execute(stmt)
return node, session
def _assert_cdc_data_readable_on_round_trip(self, start_with_cdc_enabled):
"""
Parameterized test asserting that data written to a table is still
readable after flipping the CDC flag on that table, then flipping it
again. Starts with CDC enabled if start_with_cdc_enabled, otherwise
starts with it disabled.
"""
ks_name, table_name = 'ks', 'tab'
sequence = [True, False, True] if start_with_cdc_enabled else [False, True, False]
start_enabled, alter_path = sequence[0], list(sequence[1:])
node, session = self.prepare(ks_name=ks_name, table_name=table_name,
cdc_enabled_table=start_enabled,
column_spec='a int PRIMARY KEY, b int')
set_cdc = _get_set_cdc_func(session=session, ks_name=ks_name, table_name=table_name)
insert_stmt = session.prepare('INSERT INTO ' + table_name + ' (a, b) VALUES (?, ?)')
data = tuple(zip(list(range(1000)), list(range(1000))))
execute_concurrent_with_args(session, insert_stmt, data)
# We need data to be in commitlogs, not sstables.
self.assertEqual([], list(node.get_sstables(ks_name, table_name)))
for enable in alter_path:
set_cdc(enable)
self.assertItemsEqual(session.execute('SELECT * FROM ' + table_name), data)
def test_cdc_enabled_data_readable_on_round_trip(self):
"""
Test that data is readable after an enabled->disabled->enabled round
trip.
"""
self._assert_cdc_data_readable_on_round_trip(start_with_cdc_enabled=True)
def test_cdc_disabled_data_readable_on_round_trip(self):
"""
Test that data is readable after an disabled->enabled->disabled round
trip.
"""
self._assert_cdc_data_readable_on_round_trip(start_with_cdc_enabled=False)
def test_insertion_and_commitlog_behavior_after_reaching_cdc_total_space(self):
"""
Test that C* behaves correctly when CDC tables have consumed all the
space available to them. In particular: after writing
cdc_total_space_in_mb MB into CDC commitlogs:
- CDC writes are rejected
- non-CDC writes are accepted
- on flush, CDC commitlogs are copied to cdc_raw
- on flush, non-CDC commitlogs are not copied to cdc_raw
This is a lot of behavior to validate in one test, but we do so to
avoid running multiple tests that each write 1MB of data to fill
cdc_total_space_in_mb.
"""
ks_name = 'ks'
full_cdc_table_info = TableInfo(
ks_name=ks_name, table_name='full_cdc_tab',
column_spec=_16_uuid_column_spec,
insert_stmt=_get_16_uuid_insert_stmt(ks_name, 'full_cdc_tab'),
options={'cdc': 'true'}
)
configuration_overrides = {
# Make CDC space as small as possible so we can fill it quickly.
'cdc_total_space_in_mb': 4,
}
node, session = self.prepare(
ks_name=ks_name,
configuration_overrides=configuration_overrides
)
session.execute(full_cdc_table_info.create_stmt)
# Later, we'll also make assertions about the behavior of non-CDC
# tables, so we create one here.
non_cdc_table_info = TableInfo(
ks_name=ks_name, table_name='non_cdc_tab',
column_spec=_16_uuid_column_spec,
insert_stmt=_get_16_uuid_insert_stmt(ks_name, 'non_cdc_tab')
)
session.execute(non_cdc_table_info.create_stmt)
# We'll also make assertions about the behavior of CDC tables when
# other CDC tables have already filled the designated space for CDC
# commitlogs, so we create the second CDC table here.
empty_cdc_table_info = TableInfo(
ks_name=ks_name, table_name='empty_cdc_tab',
column_spec=_16_uuid_column_spec,
insert_stmt=_get_16_uuid_insert_stmt(ks_name, 'empty_cdc_tab'),
options={'cdc': 'true'}
)
session.execute(empty_cdc_table_info.create_stmt)
# Here, we insert values into the first CDC table until we get a
# WriteFailure. This should happen when the CDC commitlogs take up 1MB
# or more.
debug('flushing non-CDC commitlogs')
node.flush()
# Then, we insert rows into the CDC table until we can't anymore.
debug('beginning data insert to fill CDC commitlogs')
rows_loaded = _write_to_cdc_WriteFailure(session, full_cdc_table_info.insert_stmt)
self.assertLess(0, rows_loaded,
'No CDC rows inserted. This may happen when '
'cdc_total_space_in_mb > commitlog_segment_size_in_mb')
commitlog_dir = os.path.join(node.get_path(), 'commitlogs')
commitlogs_size = size_of_files_in_dir(commitlog_dir)
debug('Commitlog dir ({d}) is {b}B'.format(d=commitlog_dir, b=commitlogs_size))
# We should get a WriteFailure when trying to write to the CDC table
# that's filled the designated CDC space...
with self.assertRaises(WriteFailure):
session.execute(full_cdc_table_info.insert_stmt)
# or any CDC table.
with self.assertRaises(WriteFailure):
session.execute(empty_cdc_table_info.insert_stmt)
# Now we test for behaviors of non-CDC tables when we've exceeded
# cdc_total_space_in_mb.
#
# First, we drain and save the names of all the new discarded CDC
# segments
node.drain()
session.cluster.shutdown()
node.stop()
node.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node)
pre_non_cdc_write_cdc_raw_segments = _get_cdc_raw_files(node.get_path())
# save the names of all the commitlog segments written up to this
# point:
pre_non_cdc_write_segments = _get_commitlog_files(node.get_path())
# Check that writing to non-CDC tables succeeds even when writes to CDC
# tables are rejected:
non_cdc_prepared_insert = session.prepare(non_cdc_table_info.insert_stmt)
session.execute(non_cdc_prepared_insert, ()) # should not raise an exception
# Check the following property: any new commitlog segments written to
# after cdc_raw has reached its maximum configured size should not be
# moved to cdc_raw, on commitlog discard, because any such commitlog
# segments are written to non-CDC tables.
#
# First, write to non-cdc tables.
start, time_limit = time.time(), 600
rate_limited_debug = get_rate_limited_function(debug, 5)
debug('writing to non-cdc table')
# We write until we get a new commitlog segment.
while _get_commitlog_files(node.get_path()) <= pre_non_cdc_write_segments:
elapsed = time.time() - start
rate_limited_debug(' non-cdc load step has lasted {s:.2f}s'.format(s=elapsed))
self.assertLessEqual(
elapsed, time_limit,
"It's been over a {s}s and we haven't written a new "
"commitlog segment. Something is wrong.".format(s=time_limit)
)
execute_concurrent(
session,
((non_cdc_prepared_insert, ()) for _ in range(1000)),
concurrency=500,
raise_on_first_error=True,
)
# Finally, we check that draining doesn't move any new segments to cdc_raw:
node.drain()
session.cluster.shutdown()
self.assertEqual(pre_non_cdc_write_cdc_raw_segments, _get_cdc_raw_files(node.get_path()))
def _init_new_loading_node(self, ks_name, create_stmt, use_thrift=False):
loading_node = Node(
name='node2',
cluster=self.cluster,
auto_bootstrap=False,
thrift_interface=('127.0.0.2', 9160) if use_thrift else None,
storage_interface=('127.0.0.2', 7000),
jmx_port='7400',
remote_debug_port='0',
initial_token=None,
binary_interface=('127.0.0.2', 9042)
)
debug('adding node')
self.cluster.add(loading_node, is_seed=True)
debug('starting new node')
loading_node.start(wait_for_binary_proto=True)
debug('recreating ks and table')
loading_session = self.patient_exclusive_cql_connection(loading_node)
create_ks(loading_session, ks_name, rf=1)
debug('creating new table')
loading_session.execute(create_stmt)
debug('stopping new node')
loading_node.stop()
loading_session.cluster.shutdown()
return loading_node
def test_cdc_data_available_in_cdc_raw(self):
ks_name = 'ks'
# First, create a new node just for data generation.
generation_node, generation_session = self.prepare(ks_name=ks_name)
cdc_table_info = TableInfo(
ks_name=ks_name, table_name='cdc_tab',
column_spec=_16_uuid_column_spec,
insert_stmt=_get_16_uuid_insert_stmt(ks_name, 'cdc_tab'),
options={
'cdc': 'true',
# give table an explicit id so when we create it again it's the
# same table and we can replay into it
'id': uuid.uuid4()
}
)
# Write until we get a new CL segment to avoid replaying initialization
# mutations from this node's startup into system tables in the other
# node. See CASSANDRA-11811.
advance_to_next_cl_segment(
session=generation_session,
commitlog_dir=os.path.join(generation_node.get_path(), 'commitlogs')
)
generation_session.execute(cdc_table_info.create_stmt)
# insert 10000 rows
inserted_rows = _insert_rows(generation_session, cdc_table_info.name, cdc_table_info.insert_stmt, repeat((), 10000))
# drain the node to guarantee all cl segements will be recycled
debug('draining')
generation_node.drain()
debug('stopping')
# stop the node and clean up all sessions attached to it
generation_node.stop()
generation_session.cluster.shutdown()
# create a new node to use for cdc_raw cl segment replay
loading_node = self._init_new_loading_node(ks_name, cdc_table_info.create_stmt, self.cluster.version() < '4')
# move cdc_raw contents to commitlog directories, then start the
# node again to trigger commitlog replay, which should replay the
# cdc_raw files we moved to commitlogs into memtables.
debug('moving cdc_raw and restarting node')
_move_contents(
os.path.join(generation_node.get_path(), 'cdc_raw'),
os.path.join(loading_node.get_path(), 'commitlogs')
)
loading_node.start(wait_for_binary_proto=True)
debug('node successfully started; waiting on log replay')
loading_node.grep_log('Log replay complete')
debug('log replay complete')
# final assertions
validation_session = self.patient_exclusive_cql_connection(loading_node)
data_in_cdc_table_after_restart = rows_to_list(
validation_session.execute('SELECT * FROM ' + cdc_table_info.name)
)
debug('found {cdc} values in CDC table'.format(
cdc=len(data_in_cdc_table_after_restart)
))
# Then we assert that the CDC data that we expect to be there is there.
# All data that was in CDC tables should have been copied to cdc_raw,
# then used in commitlog replay, so it should be back in the cluster.
self.assertEqual(
inserted_rows,
data_in_cdc_table_after_restart,
# The message on failure is too long, since cdc_data is thousands
# of items, so we print something else here
msg='not all expected data selected'
)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""This module contains Google Cloud Transfer operators."""
from copy import deepcopy
from datetime import date, time
from typing import Dict, List, Optional, Sequence, Union
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.providers.google.cloud.hooks.cloud_storage_transfer_service import (
ACCESS_KEY_ID,
AWS_ACCESS_KEY,
AWS_S3_DATA_SOURCE,
BUCKET_NAME,
DAY,
DESCRIPTION,
GCS_DATA_SINK,
GCS_DATA_SOURCE,
HOURS,
HTTP_DATA_SOURCE,
MINUTES,
MONTH,
NAME,
OBJECT_CONDITIONS,
PROJECT_ID,
SCHEDULE,
SCHEDULE_END_DATE,
SCHEDULE_START_DATE,
SECONDS,
SECRET_ACCESS_KEY,
START_TIME_OF_DAY,
STATUS,
TRANSFER_OPTIONS,
TRANSFER_SPEC,
YEAR,
CloudDataTransferServiceHook,
GcpTransferJobsStatus,
)
class TransferJobPreprocessor:
"""Helper class for preprocess of transfer job body."""
def __init__(self, body: dict, aws_conn_id: str = 'aws_default', default_schedule: bool = False) -> None:
self.body = body
self.aws_conn_id = aws_conn_id
self.default_schedule = default_schedule
def _inject_aws_credentials(self) -> None:
if TRANSFER_SPEC in self.body and AWS_S3_DATA_SOURCE in self.body[TRANSFER_SPEC]:
aws_hook = AwsBaseHook(self.aws_conn_id, resource_type="s3")
aws_credentials = aws_hook.get_credentials()
aws_access_key_id = aws_credentials.access_key # type: ignore[attr-defined]
aws_secret_access_key = aws_credentials.secret_key # type: ignore[attr-defined]
self.body[TRANSFER_SPEC][AWS_S3_DATA_SOURCE][AWS_ACCESS_KEY] = {
ACCESS_KEY_ID: aws_access_key_id,
SECRET_ACCESS_KEY: aws_secret_access_key,
}
def _reformat_date(self, field_key: str) -> None:
schedule = self.body[SCHEDULE]
if field_key not in schedule:
return
if isinstance(schedule[field_key], date):
schedule[field_key] = self._convert_date_to_dict(schedule[field_key])
def _reformat_time(self, field_key: str) -> None:
schedule = self.body[SCHEDULE]
if field_key not in schedule:
return
if isinstance(schedule[field_key], time):
schedule[field_key] = self._convert_time_to_dict(schedule[field_key])
def _reformat_schedule(self) -> None:
if SCHEDULE not in self.body:
if self.default_schedule:
self.body[SCHEDULE] = {SCHEDULE_START_DATE: date.today(), SCHEDULE_END_DATE: date.today()}
else:
return
self._reformat_date(SCHEDULE_START_DATE)
self._reformat_date(SCHEDULE_END_DATE)
self._reformat_time(START_TIME_OF_DAY)
def process_body(self) -> dict:
"""
Injects AWS credentials into body if needed and
reformats schedule information.
:return: Preprocessed body
:rtype: dict
"""
self._inject_aws_credentials()
self._reformat_schedule()
return self.body
@staticmethod
def _convert_date_to_dict(field_date: date) -> dict:
"""Convert native python ``datetime.date`` object to a format supported by the API"""
return {DAY: field_date.day, MONTH: field_date.month, YEAR: field_date.year}
@staticmethod
def _convert_time_to_dict(time_object: time) -> dict:
"""Convert native python ``datetime.time`` object to a format supported by the API"""
return {HOURS: time_object.hour, MINUTES: time_object.minute, SECONDS: time_object.second}
class TransferJobValidator:
"""Helper class for validating transfer job body."""
def __init__(self, body: dict) -> None:
if not body:
raise AirflowException("The required parameter 'body' is empty or None")
self.body = body
def _verify_data_source(self) -> None:
is_gcs = GCS_DATA_SOURCE in self.body[TRANSFER_SPEC]
is_aws_s3 = AWS_S3_DATA_SOURCE in self.body[TRANSFER_SPEC]
is_http = HTTP_DATA_SOURCE in self.body[TRANSFER_SPEC]
sources_count = sum([is_gcs, is_aws_s3, is_http])
if sources_count > 1:
raise AirflowException(
"More than one data source detected. Please choose exactly one data source from: "
"gcsDataSource, awsS3DataSource and httpDataSource."
)
def _restrict_aws_credentials(self) -> None:
aws_transfer = AWS_S3_DATA_SOURCE in self.body[TRANSFER_SPEC]
if aws_transfer and AWS_ACCESS_KEY in self.body[TRANSFER_SPEC][AWS_S3_DATA_SOURCE]:
raise AirflowException(
"AWS credentials detected inside the body parameter (awsAccessKey). This is not allowed, "
"please use Airflow connections to store credentials."
)
def validate_body(self) -> None:
"""
Validates the body. Checks if body specifies `transferSpec`
if yes, then check if AWS credentials are passed correctly and
no more than 1 data source was selected.
:raises: AirflowException
"""
if TRANSFER_SPEC in self.body:
self._restrict_aws_credentials()
self._verify_data_source()
class CloudDataTransferServiceCreateJobOperator(BaseOperator):
"""
Creates a transfer job that runs periodically.
.. warning::
This operator is NOT idempotent in the following cases:
* `name` is not passed in body param
* transfer job `name` has been soft deleted. In this case,
each new task will receive a unique suffix
If you run it many times, many transfer jobs will be created in the Google Cloud.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataTransferServiceCreateJobOperator`
:param body: (Required) The request body, as described in
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs#TransferJob
With three additional improvements:
* dates can be given in the form :class:`datetime.date`
* times can be given in the form :class:`datetime.time`
* credentials to Amazon Web Service should be stored in the connection and indicated by the
aws_conn_id parameter
:type body: dict
:param aws_conn_id: The connection ID used to retrieve credentials to
Amazon Web Service.
:type aws_conn_id: str
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param api_version: API version used (e.g. v1).
:type api_version: str
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type google_impersonation_chain: Union[str, Sequence[str]]
"""
# [START gcp_transfer_job_create_template_fields]
template_fields = (
'body',
'gcp_conn_id',
'aws_conn_id',
'google_impersonation_chain',
)
# [END gcp_transfer_job_create_template_fields]
def __init__(
self,
*,
body: dict,
aws_conn_id: str = 'aws_default',
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v1',
google_impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.body = deepcopy(body)
self.aws_conn_id = aws_conn_id
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.google_impersonation_chain = google_impersonation_chain
self._validate_inputs()
def _validate_inputs(self) -> None:
TransferJobValidator(body=self.body).validate_body()
def execute(self, context) -> dict:
TransferJobPreprocessor(body=self.body, aws_conn_id=self.aws_conn_id).process_body()
hook = CloudDataTransferServiceHook(
api_version=self.api_version,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.google_impersonation_chain,
)
return hook.create_transfer_job(body=self.body)
class CloudDataTransferServiceUpdateJobOperator(BaseOperator):
"""
Updates a transfer job that runs periodically.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataTransferServiceUpdateJobOperator`
:param job_name: (Required) Name of the job to be updated
:type job_name: str
:param body: (Required) The request body, as described in
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/patch#request-body
With three additional improvements:
* dates can be given in the form :class:`datetime.date`
* times can be given in the form :class:`datetime.time`
* credentials to Amazon Web Service should be stored in the connection and indicated by the
aws_conn_id parameter
:type body: dict
:param aws_conn_id: The connection ID used to retrieve credentials to
Amazon Web Service.
:type aws_conn_id: str
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param api_version: API version used (e.g. v1).
:type api_version: str
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type google_impersonation_chain: Union[str, Sequence[str]]
"""
# [START gcp_transfer_job_update_template_fields]
template_fields = (
'job_name',
'body',
'gcp_conn_id',
'aws_conn_id',
'google_impersonation_chain',
)
# [END gcp_transfer_job_update_template_fields]
def __init__(
self,
*,
job_name: str,
body: dict,
aws_conn_id: str = 'aws_default',
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v1',
google_impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.job_name = job_name
self.body = body
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.aws_conn_id = aws_conn_id
self.google_impersonation_chain = google_impersonation_chain
self._validate_inputs()
def _validate_inputs(self) -> None:
TransferJobValidator(body=self.body).validate_body()
if not self.job_name:
raise AirflowException("The required parameter 'job_name' is empty or None")
def execute(self, context) -> dict:
TransferJobPreprocessor(body=self.body, aws_conn_id=self.aws_conn_id).process_body()
hook = CloudDataTransferServiceHook(
api_version=self.api_version,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.google_impersonation_chain,
)
return hook.update_transfer_job(job_name=self.job_name, body=self.body)
class CloudDataTransferServiceDeleteJobOperator(BaseOperator):
"""
Delete a transfer job. This is a soft delete. After a transfer job is
deleted, the job and all the transfer executions are subject to garbage
collection. Transfer jobs become eligible for garbage collection
30 days after soft delete.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataTransferServiceDeleteJobOperator`
:param job_name: (Required) Name of the TRANSFER operation
:type job_name: str
:param project_id: (Optional) the ID of the project that owns the Transfer
Job. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:type project_id: str
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param api_version: API version used (e.g. v1).
:type api_version: str
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type google_impersonation_chain: Union[str, Sequence[str]]
"""
# [START gcp_transfer_job_delete_template_fields]
template_fields = (
'job_name',
'project_id',
'gcp_conn_id',
'api_version',
'google_impersonation_chain',
)
# [END gcp_transfer_job_delete_template_fields]
def __init__(
self,
*,
job_name: str,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
project_id: Optional[str] = None,
google_impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.job_name = job_name
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.google_impersonation_chain = google_impersonation_chain
self._validate_inputs()
def _validate_inputs(self) -> None:
if not self.job_name:
raise AirflowException("The required parameter 'job_name' is empty or None")
def execute(self, context) -> None:
self._validate_inputs()
hook = CloudDataTransferServiceHook(
api_version=self.api_version,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.google_impersonation_chain,
)
hook.delete_transfer_job(job_name=self.job_name, project_id=self.project_id)
class CloudDataTransferServiceGetOperationOperator(BaseOperator):
"""
Gets the latest state of a long-running operation in Google Storage Transfer
Service.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataTransferServiceGetOperationOperator`
:param operation_name: (Required) Name of the transfer operation.
:type operation_name: str
:param gcp_conn_id: The connection ID used to connect to Google
Cloud Platform.
:type gcp_conn_id: str
:param api_version: API version used (e.g. v1).
:type api_version: str
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type google_impersonation_chain: Union[str, Sequence[str]]
"""
# [START gcp_transfer_operation_get_template_fields]
template_fields = (
'operation_name',
'gcp_conn_id',
'google_impersonation_chain',
)
# [END gcp_transfer_operation_get_template_fields]
def __init__(
self,
*,
operation_name: str,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
google_impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.operation_name = operation_name
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.google_impersonation_chain = google_impersonation_chain
self._validate_inputs()
def _validate_inputs(self) -> None:
if not self.operation_name:
raise AirflowException("The required parameter 'operation_name' is empty or None")
def execute(self, context) -> dict:
hook = CloudDataTransferServiceHook(
api_version=self.api_version,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.google_impersonation_chain,
)
operation = hook.get_transfer_operation(operation_name=self.operation_name)
return operation
class CloudDataTransferServiceListOperationsOperator(BaseOperator):
"""
Lists long-running operations in Google Storage Transfer
Service that match the specified filter.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataTransferServiceListOperationsOperator`
:param request_filter: (Required) A request filter, as described in
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/list#body.QUERY_PARAMETERS.filter
:type request_filter: dict
:param gcp_conn_id: The connection ID used to connect to Google
Cloud Platform.
:type gcp_conn_id: str
:param api_version: API version used (e.g. v1).
:type api_version: str
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type google_impersonation_chain: Union[str, Sequence[str]]
"""
# [START gcp_transfer_operations_list_template_fields]
template_fields = (
'filter',
'gcp_conn_id',
'google_impersonation_chain',
)
# [END gcp_transfer_operations_list_template_fields]
def __init__(
self,
request_filter: Optional[Dict] = None,
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v1',
google_impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
# To preserve backward compatibility
# TODO: remove one day
if request_filter is None:
if 'filter' in kwargs:
request_filter = kwargs['filter']
DeprecationWarning("Use 'request_filter' instead 'filter' to pass the argument.")
else:
TypeError("__init__() missing 1 required positional argument: 'request_filter'")
super().__init__(**kwargs)
self.filter = request_filter
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.google_impersonation_chain = google_impersonation_chain
self._validate_inputs()
def _validate_inputs(self) -> None:
if not self.filter:
raise AirflowException("The required parameter 'filter' is empty or None")
def execute(self, context) -> List[dict]:
hook = CloudDataTransferServiceHook(
api_version=self.api_version,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.google_impersonation_chain,
)
operations_list = hook.list_transfer_operations(request_filter=self.filter)
self.log.info(operations_list)
return operations_list
class CloudDataTransferServicePauseOperationOperator(BaseOperator):
"""
Pauses a transfer operation in Google Storage Transfer Service.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataTransferServicePauseOperationOperator`
:param operation_name: (Required) Name of the transfer operation.
:type operation_name: str
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param api_version: API version used (e.g. v1).
:type api_version: str
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type google_impersonation_chain: Union[str, Sequence[str]]
"""
# [START gcp_transfer_operation_pause_template_fields]
template_fields = (
'operation_name',
'gcp_conn_id',
'api_version',
'google_impersonation_chain',
)
# [END gcp_transfer_operation_pause_template_fields]
def __init__(
self,
*,
operation_name: str,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
google_impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.operation_name = operation_name
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.google_impersonation_chain = google_impersonation_chain
self._validate_inputs()
def _validate_inputs(self) -> None:
if not self.operation_name:
raise AirflowException("The required parameter 'operation_name' is empty or None")
def execute(self, context) -> None:
hook = CloudDataTransferServiceHook(
api_version=self.api_version,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.google_impersonation_chain,
)
hook.pause_transfer_operation(operation_name=self.operation_name)
class CloudDataTransferServiceResumeOperationOperator(BaseOperator):
"""
Resumes a transfer operation in Google Storage Transfer Service.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataTransferServiceResumeOperationOperator`
:param operation_name: (Required) Name of the transfer operation.
:type operation_name: str
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param api_version: API version used (e.g. v1).
:type api_version: str
:type gcp_conn_id: str
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type google_impersonation_chain: Union[str, Sequence[str]]
"""
# [START gcp_transfer_operation_resume_template_fields]
template_fields = (
'operation_name',
'gcp_conn_id',
'api_version',
'google_impersonation_chain',
)
# [END gcp_transfer_operation_resume_template_fields]
def __init__(
self,
*,
operation_name: str,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
google_impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.operation_name = operation_name
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.google_impersonation_chain = google_impersonation_chain
self._validate_inputs()
super().__init__(**kwargs)
def _validate_inputs(self) -> None:
if not self.operation_name:
raise AirflowException("The required parameter 'operation_name' is empty or None")
def execute(self, context) -> None:
hook = CloudDataTransferServiceHook(
api_version=self.api_version,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.google_impersonation_chain,
)
hook.resume_transfer_operation(operation_name=self.operation_name)
class CloudDataTransferServiceCancelOperationOperator(BaseOperator):
"""
Cancels a transfer operation in Google Storage Transfer Service.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataTransferServiceCancelOperationOperator`
:param operation_name: (Required) Name of the transfer operation.
:type operation_name: str
:param api_version: API version used (e.g. v1).
:type api_version: str
:param gcp_conn_id: The connection ID used to connect to Google
Cloud Platform.
:type gcp_conn_id: str
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type google_impersonation_chain: Union[str, Sequence[str]]
"""
# [START gcp_transfer_operation_cancel_template_fields]
template_fields = (
'operation_name',
'gcp_conn_id',
'api_version',
'google_impersonation_chain',
)
# [END gcp_transfer_operation_cancel_template_fields]
def __init__(
self,
*,
operation_name: str,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
google_impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.operation_name = operation_name
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.google_impersonation_chain = google_impersonation_chain
self._validate_inputs()
def _validate_inputs(self) -> None:
if not self.operation_name:
raise AirflowException("The required parameter 'operation_name' is empty or None")
def execute(self, context) -> None:
hook = CloudDataTransferServiceHook(
api_version=self.api_version,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.google_impersonation_chain,
)
hook.cancel_transfer_operation(operation_name=self.operation_name)
class CloudDataTransferServiceS3ToGCSOperator(BaseOperator):
"""
Synchronizes an S3 bucket with a Google Cloud Storage bucket using the
Google Cloud Storage Transfer Service.
.. warning::
This operator is NOT idempotent. If you run it many times, many transfer
jobs will be created in the Google Cloud.
**Example**:
.. code-block:: python
s3_to_gcs_transfer_op = S3ToGoogleCloudStorageTransferOperator(
task_id="s3_to_gcs_transfer_example",
s3_bucket="my-s3-bucket",
project_id="my-gcp-project",
gcs_bucket="my-gcs-bucket",
dag=my_dag,
)
:param s3_bucket: The S3 bucket where to find the objects. (templated)
:type s3_bucket: str
:param gcs_bucket: The destination Google Cloud Storage bucket
where you want to store the files. (templated)
:type gcs_bucket: str
:param project_id: Optional ID of the Google Cloud Console project that
owns the job
:type project_id: str
:param aws_conn_id: The source S3 connection
:type aws_conn_id: str
:param gcp_conn_id: The destination connection ID to use
when connecting to Google Cloud Storage.
:type gcp_conn_id: str
:param delegate_to: Google account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param description: Optional transfer service job description
:type description: str
:param schedule: Optional transfer service schedule;
If not set, run transfer job once as soon as the operator runs
The format is described
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs.
With two additional improvements:
* dates they can be passed as :class:`datetime.date`
* times they can be passed as :class:`datetime.time`
:type schedule: dict
:param object_conditions: Optional transfer service object conditions; see
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/TransferSpec
:type object_conditions: dict
:param transfer_options: Optional transfer service transfer options; see
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/TransferSpec
:type transfer_options: dict
:param wait: Wait for transfer to finish. It must be set to True, if
'delete_job_after_completion' is set to True.
:type wait: bool
:param timeout: Time to wait for the operation to end in seconds. Defaults to 60 seconds if not specified.
:type timeout: Optional[Union[float, timedelta]]
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type google_impersonation_chain: Union[str, Sequence[str]]
:param delete_job_after_completion: If True, delete the job after complete.
If set to True, 'wait' must be set to True.
:type delete_job_after_completion: bool
"""
template_fields = (
'gcp_conn_id',
's3_bucket',
'gcs_bucket',
'description',
'object_conditions',
'google_impersonation_chain',
)
ui_color = '#e09411'
def __init__(
self,
*,
s3_bucket: str,
gcs_bucket: str,
project_id: Optional[str] = None,
aws_conn_id: str = 'aws_default',
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
description: Optional[str] = None,
schedule: Optional[Dict] = None,
object_conditions: Optional[Dict] = None,
transfer_options: Optional[Dict] = None,
wait: bool = True,
timeout: Optional[float] = None,
google_impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
delete_job_after_completion: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.s3_bucket = s3_bucket
self.gcs_bucket = gcs_bucket
self.project_id = project_id
self.aws_conn_id = aws_conn_id
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.description = description
self.schedule = schedule
self.object_conditions = object_conditions
self.transfer_options = transfer_options
self.wait = wait
self.timeout = timeout
self.google_impersonation_chain = google_impersonation_chain
self.delete_job_after_completion = delete_job_after_completion
self._validate_inputs()
def _validate_inputs(self) -> None:
if self.delete_job_after_completion and not self.wait:
raise AirflowException("If 'delete_job_after_completion' is True, then 'wait' must also be True.")
def execute(self, context) -> None:
hook = CloudDataTransferServiceHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.google_impersonation_chain,
)
body = self._create_body()
TransferJobPreprocessor(body=body, aws_conn_id=self.aws_conn_id, default_schedule=True).process_body()
job = hook.create_transfer_job(body=body)
if self.wait:
hook.wait_for_transfer_job(job, timeout=self.timeout)
if self.delete_job_after_completion:
hook.delete_transfer_job(job_name=job[NAME], project_id=self.project_id)
def _create_body(self) -> dict:
body = {
DESCRIPTION: self.description,
STATUS: GcpTransferJobsStatus.ENABLED,
TRANSFER_SPEC: {
AWS_S3_DATA_SOURCE: {BUCKET_NAME: self.s3_bucket},
GCS_DATA_SINK: {BUCKET_NAME: self.gcs_bucket},
},
}
if self.project_id is not None:
body[PROJECT_ID] = self.project_id
if self.schedule is not None:
body[SCHEDULE] = self.schedule
if self.object_conditions is not None:
body[TRANSFER_SPEC][OBJECT_CONDITIONS] = self.object_conditions # type: ignore[index]
if self.transfer_options is not None:
body[TRANSFER_SPEC][TRANSFER_OPTIONS] = self.transfer_options # type: ignore[index]
return body
class CloudDataTransferServiceGCSToGCSOperator(BaseOperator):
"""
Copies objects from a bucket to another using the Google Cloud Storage Transfer Service.
.. warning::
This operator is NOT idempotent. If you run it many times, many transfer
jobs will be created in the Google Cloud.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GCSToGCSOperator`
**Example**:
.. code-block:: python
gcs_to_gcs_transfer_op = GoogleCloudStorageToGoogleCloudStorageTransferOperator(
task_id="gcs_to_gcs_transfer_example",
source_bucket="my-source-bucket",
destination_bucket="my-destination-bucket",
project_id="my-gcp-project",
dag=my_dag,
)
:param source_bucket: The source Google Cloud Storage bucket where the
object is. (templated)
:type source_bucket: str
:param destination_bucket: The destination Google Cloud Storage bucket
where the object should be. (templated)
:type destination_bucket: str
:param project_id: The ID of the Google Cloud Console project that
owns the job
:type project_id: str
:param gcp_conn_id: Optional connection ID to use when connecting to Google Cloud
Storage.
:type gcp_conn_id: str
:param delegate_to: Google account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param description: Optional transfer service job description
:type description: str
:param schedule: Optional transfer service schedule;
If not set, run transfer job once as soon as the operator runs
See:
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs.
With two additional improvements:
* dates they can be passed as :class:`datetime.date`
* times they can be passed as :class:`datetime.time`
:type schedule: dict
:param object_conditions: Optional transfer service object conditions; see
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/TransferSpec#ObjectConditions
:type object_conditions: dict
:param transfer_options: Optional transfer service transfer options; see
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/TransferSpec#TransferOptions
:type transfer_options: dict
:param wait: Wait for transfer to finish. It must be set to True, if
'delete_job_after_completion' is set to True.
:type wait: bool
:param timeout: Time to wait for the operation to end in seconds. Defaults to 60 seconds if not specified.
:type timeout: Optional[Union[float, timedelta]]
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type google_impersonation_chain: Union[str, Sequence[str]]
:param delete_job_after_completion: If True, delete the job after complete.
If set to True, 'wait' must be set to True.
:type delete_job_after_completion: bool
"""
template_fields = (
'gcp_conn_id',
'source_bucket',
'destination_bucket',
'description',
'object_conditions',
'google_impersonation_chain',
)
ui_color = '#e09411'
def __init__(
self,
*,
source_bucket: str,
destination_bucket: str,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
description: Optional[str] = None,
schedule: Optional[Dict] = None,
object_conditions: Optional[Dict] = None,
transfer_options: Optional[Dict] = None,
wait: bool = True,
timeout: Optional[float] = None,
google_impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
delete_job_after_completion: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.source_bucket = source_bucket
self.destination_bucket = destination_bucket
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.description = description
self.schedule = schedule
self.object_conditions = object_conditions
self.transfer_options = transfer_options
self.wait = wait
self.timeout = timeout
self.google_impersonation_chain = google_impersonation_chain
self.delete_job_after_completion = delete_job_after_completion
self._validate_inputs()
def _validate_inputs(self) -> None:
if self.delete_job_after_completion and not self.wait:
raise AirflowException("If 'delete_job_after_completion' is True, then 'wait' must also be True.")
def execute(self, context) -> None:
hook = CloudDataTransferServiceHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.google_impersonation_chain,
)
body = self._create_body()
TransferJobPreprocessor(body=body, default_schedule=True).process_body()
job = hook.create_transfer_job(body=body)
if self.wait:
hook.wait_for_transfer_job(job, timeout=self.timeout)
if self.delete_job_after_completion:
hook.delete_transfer_job(job_name=job[NAME], project_id=self.project_id)
def _create_body(self) -> dict:
body = {
DESCRIPTION: self.description,
STATUS: GcpTransferJobsStatus.ENABLED,
TRANSFER_SPEC: {
GCS_DATA_SOURCE: {BUCKET_NAME: self.source_bucket},
GCS_DATA_SINK: {BUCKET_NAME: self.destination_bucket},
},
}
if self.project_id is not None:
body[PROJECT_ID] = self.project_id
if self.schedule is not None:
body[SCHEDULE] = self.schedule
if self.object_conditions is not None:
body[TRANSFER_SPEC][OBJECT_CONDITIONS] = self.object_conditions # type: ignore[index]
if self.transfer_options is not None:
body[TRANSFER_SPEC][TRANSFER_OPTIONS] = self.transfer_options # type: ignore[index]
return body
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding M2M table for field removed_collaborators on 'Makey'
m2m_table_name = db.shorten_name(u'catalog_makey_removed_collaborators')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('makey', models.ForeignKey(orm['catalog.makey'], null=False)),
('user', models.ForeignKey(orm[u'auth.user'], null=False))
))
db.create_unique(m2m_table_name, ['makey_id', 'user_id'])
def backwards(self, orm):
# Removing M2M table for field removed_collaborators on 'Makey'
db.delete_table(db.shorten_name(u'catalog_makey_removed_collaborators'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'catalog.cfistoreitem': {
'Meta': {'object_name': 'CfiStoreItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'item': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.Product']", 'unique': 'True'}),
'likers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'cfi_store_item_likes'", 'symmetrical': 'False', 'through': "orm['catalog.LikeCfiStoreItem']", 'to': u"orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.comment': {
'Meta': {'object_name': 'Comment'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.documentation': {
'Meta': {'object_name': 'Documentation'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.emailcollect': {
'Meta': {'object_name': 'EmailCollect'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'catalog.image': {
'Meta': {'object_name': 'Image'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'large_url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'small_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': u"orm['auth.User']"})
},
'catalog.like': {
'Meta': {'object_name': 'Like'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likecfistoreitem': {
'Meta': {'unique_together': "(('user', 'cfi_store_item'),)", 'object_name': 'LikeCfiStoreItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'cfi_store_item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.CfiStoreItem']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likecomment': {
'Meta': {'unique_together': "(('user', 'comment'),)", 'object_name': 'LikeComment'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Comment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeimage': {
'Meta': {'unique_together': "(('user', 'image'),)", 'object_name': 'LikeImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likemakey': {
'Meta': {'unique_together': "(('user', 'makey'),)", 'object_name': 'LikeMakey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'makeylikes'", 'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likenote': {
'Meta': {'unique_together': "(('user', 'note'),)", 'object_name': 'LikeNote'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'note': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Note']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproduct': {
'Meta': {'unique_together': "(('user', 'product'),)", 'object_name': 'LikeProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproductdescription': {
'Meta': {'unique_together': "(('user', 'product_description'),)", 'object_name': 'LikeProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product_description': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductDescription']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproductimage': {
'Meta': {'unique_together': "(('user', 'image'),)", 'object_name': 'LikeProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductImage']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproducttutorial': {
'Meta': {'unique_together': "(('user', 'tutorial', 'product'),)", 'object_name': 'LikeProductTutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeshop': {
'Meta': {'unique_together': "(('user', 'shop'),)", 'object_name': 'LikeShop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likevideo': {
'Meta': {'unique_together': "(('user', 'video'),)", 'object_name': 'LikeVideo'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Video']"})
},
'catalog.list': {
'Meta': {'object_name': 'List'},
'access': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'access'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_private': ('django.db.models.fields.BooleanField', [], {}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.ListItem']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.listgroup': {
'Meta': {'object_name': 'ListGroup'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'lists': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.List']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.listitem': {
'Meta': {'object_name': 'ListItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'createdby': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.location': {
'Meta': {'object_name': 'Location'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.logidenticalproduct': {
'Meta': {'object_name': 'LogIdenticalProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product1'", 'to': "orm['catalog.Product']"}),
'product2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product2'", 'to': "orm['catalog.Product']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.makey': {
'Meta': {'object_name': 'Makey'},
'about': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'collaborators'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeycomments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Comment']"}),
'cover_pic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'credits': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'documentations': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeydocumentations'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Documentation']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'made_in': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'makeys_made_in'", 'null': 'True', 'to': "orm['catalog.Space']"}),
'mentors': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'new_parts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys_parts'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewProduct']"}),
'new_tools': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys_tools'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewProduct']"}),
'new_users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewUser']"}),
'notes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeynotes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Note']"}),
'removed_collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makey_removed'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyvideos'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Video']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'why': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'catalog.makeyimage': {
'Meta': {'object_name': 'MakeyImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey_id': ('django.db.models.fields.IntegerField', [], {}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.newproduct': {
'Meta': {'object_name': 'NewProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.newuser': {
'Meta': {'object_name': 'NewUser'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.note': {
'Meta': {'object_name': 'Note'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.product': {
'Meta': {'object_name': 'Product'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identicalto': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makeys': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'partsused'", 'blank': 'True', 'to': "orm['catalog.Makey']"}),
'makeys_as_tools': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'tools_used'", 'blank': 'True', 'to': "orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('django.db.models.fields.IntegerField', [], {}),
'space_as_tools': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tools_in_space'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Space']"}),
'tutorials': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'products'", 'blank': 'True', 'to': "orm['catalog.Tutorial']"})
},
'catalog.productdescription': {
'Meta': {'object_name': 'ProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productdescriptions'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'blank': 'True'}),
'user_or_shop': ('django.db.models.fields.BooleanField', [], {})
},
'catalog.productimage': {
'Meta': {'object_name': 'ProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productimages'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.productreview': {
'Meta': {'object_name': 'ProductReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product_reviews'", 'to': "orm['catalog.Product']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.productshopurl': {
'Meta': {'object_name': 'ProductShopUrl'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productshopurls'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.searchlog': {
'Meta': {'object_name': 'SearchLog'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.shop': {
'Meta': {'object_name': 'Shop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'shopimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.shopreview': {
'Meta': {'object_name': 'ShopReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shop_reviews'", 'to': "orm['catalog.Shop']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.space': {
'Meta': {'object_name': 'Space'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'admins': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'space_admins'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'date_of_founding': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'facebook': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'last_updated_external': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '15', 'decimal_places': '10', 'blank': 'True'}),
'logo': ('django.db.models.fields.URLField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '15', 'decimal_places': '10', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'space_members'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'membership_fee': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'new_members': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'space_new_members'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewUser']"}),
'new_tools': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'space_new_tools'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewProduct']"}),
'no_of_members': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'twitter': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'catalog.toindexstore': {
'Meta': {'object_name': 'ToIndexStore'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.topmakeys': {
'Meta': {'object_name': 'TopMakeys'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.topproducts': {
'Meta': {'object_name': 'TopProducts'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.topshops': {
'Meta': {'object_name': 'TopShops'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"})
},
'catalog.toptutorials': {
'Meta': {'object_name': 'TopTutorials'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"})
},
'catalog.topusers': {
'Meta': {'object_name': 'TopUsers'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tutorialimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.userflags': {
'Meta': {'object_name': 'UserFlags'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_maker_intro': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_makey_intro': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.userinteraction': {
'Meta': {'object_name': 'UserInteraction'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'event': ('django.db.models.fields.IntegerField', [], {}),
'event_id': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'aboutme': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'blog_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'college': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'facebook_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'following': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'followers'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.UserProfile']"}),
'github_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructables_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'linkedin_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "'Bangalore, India'", 'max_length': '255'}),
'membership': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'patent': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'stackoverflow_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'twitter_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'yt_channel_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'})
},
'catalog.video': {
'Meta': {'object_name': 'Video'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'embed_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'site': ('django.db.models.fields.IntegerField', [], {}),
'thumb_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.votemakey': {
'Meta': {'unique_together': "(('user', 'makey'),)", 'object_name': 'VoteMakey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.voteproductreview': {
'Meta': {'unique_together': "(('user', 'review'),)", 'object_name': 'VoteProductReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'review': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductReview']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.voteshopreview': {
'Meta': {'unique_together': "(('user', 'review'),)", 'object_name': 'VoteShopReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'review': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ShopReview']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.votetutorial': {
'Meta': {'unique_together': "(('user', 'tutorial'),)", 'object_name': 'VoteTutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['catalog']
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Recurrent computation.
The main interface of this module is Recurrent().
A recurrent computation describes an auto-regressive process, where outputs
of one time step are fed to the output of the next time step.
This module uses:
theta: the "weights" each RNN uses.
state0: the initial state of each RNN.
cell_fn: A python function describing RNN cell. It must has the following
signature:
cell_fn: (theta, state0, inputs) -> (state1, extras)
state1 is the next RNN state, extras are computed by cell_fn
and the library forwards extras to cell_fn's gradient function.
cell_grad: A python function describing the backprop gradient function
for the RNN cell. It must has the following signature:
cell_grad: (theta, state0, inputs, extras, dstate1) -> (
dtheta, dstate0, dinputs)
dstate1 is what the backprop algorithm provides representing
gradients of state1 w.r.t. the final loss.
In this module, we handle structures of tensors for theta, state0, inputs,
and extras. The structure is an arbitrarily nested python structure, such
as a dictionary of named tuples.
Because the computation is a left-to-right chain, a single in-place accumulator
can be used rather than a stack. Thus a special gradient was written to reduce
unnecessary memory usage.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import inplace_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.inplace_ops import alias_inplace_update
from tensorflow.python.util import nest
def _AssertIsCompatible(a, b):
"""Checks that `a` and `b` are nested structures of the same type."""
# TODO(drpng): implement.
del a
del b
def _Index(struct, index):
"""Returns a structure with `x[index]` for each tensor `x` in the structure.
Args:
struct: A structure of tensors.
index: A scalar integer tensor. Performance is better if `index` is
on the host memory.
Returns:
A structure of tensors congruent to `struct`.
For each key in `ret`, `rets[key] = struct[key][index]`.
"""
index = ops.convert_to_tensor(index)
index.get_shape().assert_has_rank(0)
return nest.map_structure(lambda x: array_ops.gather(x, index), struct)
def _Update(struct_acc, struct_x, t):
"""Updates t-th row in accumulators.
Args:
struct_acc: The accumulators. A structure of tensors.
struct_x: The new values. A structure of tensors congruent to `struct_acc`.
t: A scalar integer. Performance is better if `t` is on the device
memory.
Returns:
A structure of tensors. Say, ret is a returned dictionary. Then, for
each key, we have:
ret[key] = struct_acc[key];
ret[key][t, :] = struct_x[key]
"""
to_skip_update = set()
acc_lst = nest.flatten(struct_acc)
x_lst = nest.flatten(struct_x)
t = math_ops.to_int32([t]) # tf.to_int32 casts on-device tensors.
lst = []
for acc, x in zip(acc_lst, x_lst):
if acc in to_skip_update:
# Until b/62105730 is fixed, we need to avoid inplace update for tensors
# of rank 1. could reshape to handle it, but we don't really need the
# values applied to these, so just skip their modification.
lst += [acc]
else:
lst += [alias_inplace_update(acc, t, array_ops.expand_dims(x, 0))]
return nest.pack_sequence_as(struct_acc, lst)
def _SeqLenDim(struct):
"""Returns the 0-th dim size of tensors in a structure of tensors.
This is the max sequence length according to the shape of the inputs.
Args:
struct: A structure of tensors. Every tensor's 0-th dim has the same size.
Returns:
A scalar tensor which is the size of 0-th dim of every tensors in struct.
"""
xs = nest.flatten(struct)
assert xs
dim0 = array_ops.shape(xs[0])[0]
return dim0
def _Flatten(struct):
"""Flattens a structure."""
return nest.flatten(struct)
def _Pack(elements, struct_template):
"""Packs the list of tensors according to the structure.
In the event that `elements` should be a scalar, `struct_template` must
contain exactly one non-trivial element (for instance, `[[], {'x':elt}]`).
Args:
elements: Elements to be packed. A list of tensor, or a single tensor.
struct_template: The container structure in which to pack them.
Returns:
A python structure of the same type as `struct_template`, containing
`elements` as its contained elements.
"""
if not nest.is_sequence(elements):
return nest.pack_sequence_as(struct_template, [elements])
return nest.pack_sequence_as(struct_template, elements)
def _EmptyAcc(slen, struct_template):
"""Creates a set of accumulators for tensors in structure.
Args:
slen: The sequence length. A scalar tensor.
struct_template: A structure of tensors.
Returns:
A structure congruent to `struct_template`. Say ret is a returned
dictionary. Then, `ret.key`, a tensor, has the same dtype as
`struct_template.key`. The tensor's shape has 1 more dimension
than the tensor `struct_template.key`. The extra 0-th dimension is of size
`slen`. E.g., if `slen=10` and `struct_template.key`'s shape is `[3, 5]`,
then, `ret.key`'s shape is `[10, 3, 5]`.
"""
def _EmptyAccForTensor(tensor):
return inplace_ops.empty(
array_ops.concat([[slen], array_ops.shape(tensor)], axis=0),
tensor.dtype,
init=True)
return nest.map_structure(_EmptyAccForTensor, struct_template)
def _EmptyLike(struct):
"""Creates a set of empty initialized tensors.
Args:
struct: A structure of tensors.
Returns:
A struct of tensors. Each tensor has the same shape and dtype as
its corresponding tensor in `struct`. And each tensor is initialized.
"""
return nest.map_structure(
lambda x: inplace_ops.empty_like(x, init=True), struct)
def _Add(struct_x, struct_y):
"""Adds tensors in `struct_x` with respective tensors in `struct_y`.
Args:
struct_x: A struct of tensors.
struct_y: A struct of tensors congruent to `struct_x`.
Returns:
A struct of tensors. Each element of the returned value
equals `x + y`, with corresponding values in `struct_x` and `struct_y`.
"""
list_x = nest.flatten(struct_x)
list_y = nest.flatten(struct_y)
z = []
for x, y in zip(list_x, list_y):
z += [math_ops.add(x, y)]
return nest.pack_sequence_as(struct_x, z)
def _Dtypes(struct):
"""Returns all tensors' data types in a list."""
return [x.dtype for x in nest.flatten(struct)]
def _ConvertNoneGradientToZeros(xs, dxs):
"""Sanitize dxs so that None becomes zeros appropriately.
Args:
xs: A list of tensors.
dxs: A list of tensors. dxs[i] corresponds to xs[i]'s gradient.
Returns:
A structure same as `dxs` with `None` replaced by a zero tensor.
"""
list_xs = nest.flatten(xs)
list_dxs = nest.flatten(dxs)
# If x does not get any backprop-ed gradient, propagate zeros.
rets = []
for (x, dx) in zip(list_xs, list_dxs):
if dx is None:
rets.append(array_ops.zeros_like(x))
else:
rets.append(dx)
return nest.pack_sequence_as(dxs, rets)
# All structures are flattened for use internally. This is for simplicity
# and also to use the Defun construct.
# In the forward pass (inference), the computation is structured as follows.
# Forward: [gradient = _Recurrent.Grad]
# Flatten structures, create accumulators.
# for t = 0..max_input_length:
# Defun ForwardLoopBody:
# Defun Fwd: flatten/pack around cell_fn
# state1 = Fwd(inputs[t], state0)
# acc_state += [state1]
# Pack structures.
# During the backward pass (backpropping the gradient from the last time
# step to the first, through the structure), the computation is structured
# as follows.
# Grad:
# Flatten structures.
# Defun Backward:
# Create create accumulated derivatives: d_theta, d_inputs, d_acc_state.
# Regarding the note at the top of the file, there is only one accumulator
# for d_theta accumulated over the whole sequence.
# for t = max_input_length -1..0:
# Defun BackwardLoopBody:
# Retrieve acc_state[t] computed in the forward pass.
# Defun Bak: flatten/back around cell_fn_grad.
# d_state1 is d_state0 from previous step (ie next time).
# d_acc_state[dev_t] += d_state1
# d_theta_t, d_state0, d_inputs_t, = Bak()
# d_inputs[dev_t] += d_inputs
# d_theta += d_theta_t
# d_acc_state[t] += d_state1
# Pack structures and return.
class _Recurrent(object):
"""A helper class to construct a recurrent neural net."""
def __init__(self, cell_fn, cell_grad, theta, state0, inputs,
max_input_length, extras, use_tpu):
"""RNN helper class.
Args:
cell_fn: A python function, which computes:
state1, extras = cell_fn(theta, state0, inputs[t, :])
cell_grad: A python function which computes:
dtheta, dstate0, dinputs[t, :] = cell_grad(
theta, state0, inputs[t, :], extras, dstate1)
theta: weights. A structure of tensors.
state0: initial state. A structure of tensors.
inputs: inputs. A structure of tensors.
max_input_length: None, or the maximum effective length of the input over
all batches. A scalar tensor.
extras: A structure of tensors. The 2nd return value of every
invocation of cell_fn is a structure of tensors with matching keys
and shapes of this `extras`.
use_tpu: A boolean indicating whether the computation is mean to
run on a TPU.
"""
self._theta = theta
self._state = state0
self._inputs = inputs
self._max_input_length = self._MaybeComputeMaxInputLength(
inputs, max_input_length)
self._cell_fn = cell_fn
self._cell_grad = cell_grad
self._extras = extras
# pylint: disable=unbalanced-tuple-unpacking
# NOTE: TF Function (Fwd, Bak, ForwardLoopBody, BackwardLoopBody,
# Forward and Backward defined below) simply takes a list of
# Tensors and returns a list of Tensors. When we pass in a
# structure (a list of structures of Tensors), we use _Flatten to
# convert the structure into a list of tensor. Conversely, the
# following code often uses _Pack to formulate a structure from a
# list of tensors based on a "template".
# Wraps cell_fn in a TF Function:
# state1 = cell_fn(theta, state0, inputs)
fwd_sig = [self._theta, self._state, self._inputs]
compiled = use_tpu
noinline = not compiled
dev_t_type = dtypes.int32 if use_tpu else dtypes.int64
@function.Defun(*_Dtypes(fwd_sig))
def Fwd(*args):
(theta, state0, inputs) = _Pack(args, fwd_sig)
state1, extras = self._cell_fn(theta, state0, inputs)
assert not function.get_extra_args(), (
'cell_fn is not pure with extra args: %s.' %
(function.get_extra_args()))
_AssertIsCompatible(state1, self._state)
_AssertIsCompatible(extras, self._extras)
return _Flatten([state1, extras])
# Wraps cell_fn in a TF Function as a for-loop's body.
#
# The loop state is composed of:
# t: The loop variable. Timestep id.
# dev_t: The loop variable mirrored on the device.
# theta: the recurrent net's weights.
# state0: the previous recurrent state.
# inputs: inputs to the recurrent net. inputs[t, :] are for the timestep t.
# acc_state: Each timestep's computed new state is also stashed into
# acc_state.
# acc_extras: Each timestep's computed extras is stashed into acc_extras
fwdloop_sig = [
self._theta, self._state, self._inputs, self._state, self._extras
]
@function.Defun(dtypes.int32, dev_t_type, *_Dtypes(fwdloop_sig))
def ForwardLoopBody(*args):
"""The body of forward loop."""
t, dev_t = args[0], args[1]
(theta, state0, inputs, acc_state, acc_extras) = _Pack(
args[2:], fwdloop_sig)
inputs_t = _Index(inputs, t) # external input at time step t.
fwd = Fwd(*_Flatten([theta, state0, inputs_t]))
state1, extras = _Pack(fwd, [self._state, self._extras])
# Saves state1 and extras in their accumulators.
acc_state = _Update(acc_state, state1, dev_t)
acc_extras = _Update(acc_extras, extras, dev_t)
return [math_ops.add(dev_t, 1)] + _Flatten(
[theta, state1, inputs, acc_state, acc_extras])
def Grad(op, *args):
"""The python grad function for the Forward function."""
# NOTE: tf.gradient backprops None for int32/int64 while zeros
# for float32/float64. For consistency, we always backprop
# zeros.
args = list(args)
for i, dy in enumerate(args):
if dy is None:
args[i] = array_ops.zeros_like(op.outputs[i])
# TODO(drpng): getting the extra state here?
op_inputs = [x for x in op.inputs]
op_struct = [
self._theta, self._state, self._inputs, self._max_input_length,
self._extras
]
(theta, state0, inputs, max_input_length, _) = _Pack(op_inputs, op_struct)
# acc_state and acc_extras are computed by the Forward pass and
# needed by the Backward pass.
acc_state, _, acc_extras = _Pack([x for x in op.outputs],
[self._state, self._state, self._extras])
# Forward computes acc_state, the final state and
# acc_extras. tf.gradients gives us their gradients w.r.t. the
# final loss. Because acc_extras are not exposed by Compute(),
# it has no gradients w.r.t. the final loss (i.e., by
# construction, it must be zeros).
d_acc_state, d_state1, _ = _Pack(args,
[self._state, self._state, self._extras])
return Backward(*_Flatten([
theta, state0, inputs, max_input_length, acc_state, acc_extras,
d_acc_state, d_state1
]))
# Forward calls ForwardLoopBody n times. Each time computes one
# time step of the recurrent net.
forward_sig = [
self._theta, self._state, self._inputs, self._max_input_length,
self._extras
]
@function.Defun(
*_Dtypes(forward_sig), python_grad_func=Grad, noinline=noinline)
def Forward(*args):
"""Forward pass of the recurrent net."""
theta, state0, inputs, max_input_length, extras = _Pack(args, forward_sig)
slen_dim = _SeqLenDim(inputs)
# Creates accumulators for state0 and extras.
acc_state = _EmptyAcc(slen_dim, state0)
acc_extras = _EmptyAcc(slen_dim, extras)
dev_t = array_ops.constant(0, dtype=dev_t_type)
run = functional_ops.For(
start=0,
limit=max_input_length,
delta=1,
inputs=[dev_t] + _Flatten(
[theta, state0, inputs, acc_state, acc_extras]),
body=ForwardLoopBody,
rewrite_with_while=compiled)
_, state1, _, acc_state, acc_extras = _Pack(
run[1:],
[self._theta, self._state, self._inputs, self._state, self._extras])
return _Flatten([acc_state, state1, acc_extras])
# The per-step backward computes:
# d_theta, d_state0, d_inputs = cell_grad(
# theta, state0, inputs, extras, d_state1)
# where d_state1 is the backprop-ed gradient for state1, and
# extras is the computed by the forward step to facilitate the
# backward step.
bak_sig = [
self._theta, self._state, self._inputs, self._extras, self._state
]
@function.Defun(*_Dtypes(bak_sig))
def Bak(*args):
"""Backward step."""
(theta, state0, inputs, extras, d_state1) = _Pack(args, bak_sig)
(dtheta, dstate0, dinputs) = self._cell_grad(theta, state0, inputs,
extras, d_state1)
assert not function.get_extra_args(), (
'cell_grad is not pure with extra args: %s.' %
(function.get_extra_args()))
_AssertIsCompatible(dtheta, self._theta)
_AssertIsCompatible(dstate0, self._state)
_AssertIsCompatible(dinputs, self._inputs)
return _Flatten(
_ConvertNoneGradientToZeros([theta, state0, inputs],
[dtheta, dstate0, dinputs]))
# Define defuns used by a functional_ops.If in BackwardLoopBody.
state_if_sig = [self._state, self._state]
@function.Defun(*_Dtypes(state_if_sig))
def ReturnOrigState0(*args):
"""Returns original state0 from inputs."""
(_, orig_state0) = _Pack(args, state_if_sig)
return nest.flatten(orig_state0)
@function.Defun(*_Dtypes(state_if_sig))
def ReturnAccState(*args):
"""Returns acc_state[t-1] from inputs."""
(acc_state, _) = _Pack(args, state_if_sig)
return nest.flatten(acc_state)
# Wraps cell_grad gradient function in a TF Function as a
# for-loop's body for the Backward pass.
#
# The loop state is composed of:
# t: The loop variable. Timestep id.
# state0: the initial state for the entire backward loop.
# dev_t: The loop variable mirrored on the device.
# theta: the recurrent net's weights.
# inputs: inputs to the recurrent net. inputs[t, :] are for the timestep t.
# acc_state: Each timestep's computed new state was stashed into
# acc_state by the Forward pass.
# acc_extras: Each timestep's computed extras was stashed into
# acc_extras by the Forward pass.
# d_theta: All timestep's gradient for theta is accumulated (added) into
# d_theta.
# d_state1: The backprop-ed gradient for the new stated computed by
# timestep t.
# d_inputs: d_inputs[t, :] is populated by the backward time step t.
# d_acc_state: The backprop-ed gradient for acc_state.
bakloop_sig = [
self._theta, self._state, self._inputs, self._state, self._extras,
self._theta, self._state, self._inputs, self._state
]
@function.Defun(dtypes.int32, dev_t_type, *_Dtypes(bakloop_sig))
def BackwardLoopBody(*args):
"""Backward loop body function."""
t, dev_t = args[0], args[1]
(theta, orig_state0, inputs, acc_state, acc_extras, d_theta, d_state1,
d_inputs, d_acc_state) = _Pack(args[2:], bakloop_sig)
# The input recurrent state for time step t is previous time step's
# output, or the original state0 when on time step 0.
state_from_acc = _Index(acc_state, math_ops.maximum(0, t - 1))
state0 = functional_ops.If(
math_ops.equal(t, array_ops.constant(0, dtypes.int32)),
_Flatten([state_from_acc, orig_state0]), ReturnOrigState0,
ReturnAccState)
state0 = nest.pack_sequence_as(orig_state0, state0)
# The external inputs for time step t.
inputs_t = _Index(inputs, t)
# The extras for time step t.
extras_t = _Index(acc_extras, t)
d_state1 = _Add(_Index(d_acc_state, t), d_state1)
(d_theta_t, d_state0, d_inputs_t) = _Pack(
Bak(*_Flatten([theta, state0, inputs_t, extras_t, d_state1])),
[self._theta, self._state, self._inputs])
d_theta = _Add(d_theta, d_theta_t)
d_inputs = _Update(d_inputs, d_inputs_t, dev_t)
return [math_ops.subtract(dev_t, 1)] + _Flatten([
theta, orig_state0, inputs, acc_state, acc_extras, d_theta, d_state0,
d_inputs, d_acc_state
])
# Backward calls BackwardLoopBody n times. Each time computes the backprop
# for one time step of the recurrent net.
backward_sig = [
self._theta, self._state, self._inputs, self._max_input_length,
self._state, self._extras, self._state, self._state
]
@function.Defun(*_Dtypes(backward_sig), noinline=noinline)
def Backward(*args):
"""Backward pass for the recurrent net."""
# theta, state0, inputs are Forward's inputs.
# acc_state is the accumulated 1st output of Forward.
# acc_extras is the accumulated 2nd output of Forward.
# d_acc_state is the gradient for acc_state.
# d_state1 is the gradient for the final state computed by Forward.
(theta, state0, inputs, max_input_length, acc_state, acc_extras,
d_acc_state, d_state1) = _Pack(args, backward_sig)
# Accumulators for gradients.
d_theta = _EmptyLike(theta)
d_inputs = _EmptyLike(inputs)
# Loop backwards. Note the loop's limit is open-ended, so goes through
# t=0.
t = max_input_length - 1
dev_t = math_ops.to_int32(t) if use_tpu else math_ops.to_int64(t)
run = functional_ops.For(
start=t,
limit=-1,
delta=-1,
inputs=[dev_t] + _Flatten([
theta, state0, inputs, acc_state, acc_extras, d_theta, d_state1,
d_inputs, d_acc_state
]),
body=BackwardLoopBody,
rewrite_with_while=compiled)
(theta, state0, inputs, acc_state, acc_extras, d_theta, d_state0,
d_inputs, d_acc_state) = _Pack(run[1:], bakloop_sig)
d_max_input_length = array_ops.constant(0, dtype=max_input_length.dtype)
return _Flatten(
[d_theta, d_state0, d_inputs, d_max_input_length, acc_extras])
self._forward = Forward
def _MaybeComputeMaxInputLength(self, inputs, max_input_length):
if max_input_length is not None:
return max_input_length
return math_ops.reduce_max(array_ops.shape(nest.flatten(inputs)[0])[0])
def Compute(self):
return _Pack(
self._forward(*_Flatten([
self._theta, self._state, self._inputs, self._max_input_length,
self._extras
])), [self._state, self._state, self._extras])[:2]
def _GetCellGrad(cell_fn, cell_grad):
"""Returns the gradient function for cell_fn.
Args:
cell_fn: The recurrent neural net's cell function.
cell_grad: If not None, cell_fn's gradient function.
Returns:
Returns cell_grad if not None. Otherwise, assume cell_fn is a python
function representing the recurrent neural net's cell function, i.e.,
cell_fn: (theta, state0, inputs) -> (state1, extra)
returns its default gradient python function, i.e.,
cell_grad: (theta, state0, inputs, extras, dstate1) -> (
dtheta, dstate0, dinputs)
"""
if cell_grad:
return cell_grad
def CellGrad(theta, state0, inputs, extras, dstate1):
"""Default gradient function for cell_fn."""
# NOTE: The default grad function recomputes the forward
# function and does not take advantage of 'extras' returned by
# the forward function.
del extras
state1, extras = cell_fn(theta, state0, inputs)
ys = _Flatten([state1])
xs = _Flatten([theta, state0, inputs])
grad_ys = _Flatten([dstate1])
grads = gradients_impl.gradients(ys=ys, xs=xs, grad_ys=grad_ys)
return _ConvertNoneGradientToZeros([theta, state0, inputs],
_Pack(grads, [theta, state0, inputs]))
return CellGrad
def _IsSingleTimeStep(inputs, max_input_length):
"""Returns True only if the time dimension of inputs is 1."""
if not isinstance(max_input_length, ops.Tensor):
return max_input_length == 1
for x in nest.flatten(inputs):
if x.shape.dims is None or x.shape[0].value != 1:
return False
return True
def Recurrent(theta,
state0,
inputs,
cell_fn,
cell_grad=None,
extras=None,
max_input_length=None,
use_tpu=False):
"""Compute a recurrent neural net.
Roughly, Recurrent() computes the following:
state = state0
for t in inputs' sequence length:
state = cell_fn(theta, state, inputs[t, :])
accumulate_state[t, :] = state
return accumulate_state, state
theta, state, inputs are all structures of tensors.
inputs[t, :] means taking a slice out from every tensor in the inputs.
accumulate_state[t, :] = state means that we stash every tensor in
'state' into a slice of the corresponding tensor in
accumulate_state.
cell_fn is a python callable computing (building up a TensorFlow
graph) the recurrent neural network's one forward step. Two calls of
cell_fn must describe two identical computations.
By construction, Recurrent()'s backward computation does not access
any intermediate values computed by cell_fn during forward
computation. We may extend Recurrent() to support that by taking a
customized backward function of cell_fn.
Args:
theta: weights. A structure of tensors.
state0: initial state. A structure of tensors.
inputs: inputs. A structure of tensors.
cell_fn: A python function, which computes:
state1, extras = cell_fn(theta, state0, inputs[t, :])
cell_grad: A python function which computes:
dtheta, dstate0, dinputs[t, :] = cell_grad(
theta, state0, inputs[t, :], extras, dstate1)
extras: A structure of tensors. The 2nd return value of every
invocation of cell_fn is a structure of tensors with matching keys
and shapes of this `extras`.
max_input_length: maximum length of effective input. This is used to
truncate the computation if the inputs have been allocated to a
larger size. A scalar tensor.
use_tpu: whether or not we are on TPU.
Returns:
accumulate_state and the final state.
"""
if cell_grad is None and _IsSingleTimeStep(inputs, max_input_length):
# The seqlen length is staticly known as 1. Hence, we just need to
# call cell_fn once without putting it into a loop.
inputs = nest.map_structure(lambda x: array_ops.squeeze(x, axis=0), inputs)
state1, _ = cell_fn(theta, state0, inputs)
acc_state = nest.map_structure(lambda x: array_ops.expand_dims(x, axis=0),
state1)
return acc_state, state1
# If cell_grad is not given, derives the gradient function from
# cell_fn.
cell_grad = _GetCellGrad(cell_fn, cell_grad)
if extras is None:
# Derives 'extras' so that we can allocate extras' accumulator.
_, extras = cell_fn(theta, state0, _Index(inputs, 0))
extras = nest.map_structure(array_ops.zeros_like, extras)
else:
_, actual = cell_fn(theta, state0, _Index(inputs, 0))
_AssertIsCompatible(extras, actual)
return _Recurrent(
cell_fn=cell_fn,
cell_grad=cell_grad,
theta=theta,
state0=state0,
inputs=inputs,
max_input_length=max_input_length,
extras=extras,
use_tpu=use_tpu).Compute()
|
|
# Authors: Adam Li <adam2392@gmail.com>
#
# License: BSD (3-clause)
import os
import os.path as op
from collections import OrderedDict
from datetime import datetime, timezone
import numpy as np
from ..base import BaseRaw
from ..constants import FIFF
from ..meas_info import create_info
from ..utils import _mult_cal_one
from ...annotations import Annotations
from ...utils import logger, verbose, fill_doc, warn
@fill_doc
def read_raw_persyst(fname, preload=False, verbose=None):
"""Reader for a Persyst (.lay/.dat) recording.
Parameters
----------
fname : str
Path to the Persyst header (.lay) file.
%(preload)s
%(verbose)s
Returns
-------
raw : instance of RawPersyst
A Raw object containing Persyst data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
return RawPersyst(fname, preload, verbose)
@fill_doc
class RawPersyst(BaseRaw):
"""Raw object from a Persyst file.
Parameters
----------
fname : str
Path to the Persyst header (.lay) file.
%(preload)s
%(verbose)s
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
@verbose
def __init__(self, fname, preload=False, verbose=None):
logger.info('Loading %s' % fname)
if not fname.endswith('.lay'):
fname = fname + '.lay'
curr_path, lay_fname = op.dirname(fname), op.basename(fname)
if not op.exists(fname):
raise FileNotFoundError(f'The path you specified, '
f'"{lay_fname}",does not exist.')
# sections and subsections currently unused
keys, data, sections = _read_lay_contents(fname)
# these are the section headers in the Persyst file layout
# Note: We do not make use of "SampleTimes" yet
fileinfo_dict = OrderedDict()
channelmap_dict = OrderedDict()
patient_dict = OrderedDict()
comments_dict = OrderedDict()
# loop through each line in the lay file
for key, val, section in zip(keys, data, sections):
if key == '':
continue
# make sure key are lowercase for everything, but electrodes
if key is not None and section != 'channelmap':
key = key.lower()
# FileInfo
if section == 'fileinfo':
# extract the .dat file name
if key == 'file':
dat_fname = val
dat_path = op.dirname(dat_fname)
dat_fpath = op.join(curr_path, op.basename(dat_fname))
# determine if .dat file exists where it should
error_msg = f'The data path you specified ' \
f'does not exist for the lay path, {lay_fname}'
if op.isabs(dat_path) and not op.exists(dat_fname):
raise FileNotFoundError(error_msg)
if not op.exists(dat_fpath):
raise FileNotFoundError(error_msg)
fileinfo_dict[key] = val
# ChannelMap
elif section == 'channelmap':
# channel map has <channel_name>=<number> for <key>=<val>
channelmap_dict[key] = val
# Patient (All optional)
elif section == 'patient':
patient_dict[key] = val
elif section == 'comments':
comments_dict[key] = val
# get numerical metadata
# datatype is either 7 for 32 bit, or 0 for 16 bit
datatype = fileinfo_dict.get('datatype')
cal = float(fileinfo_dict.get('calibration'))
n_chs = int(fileinfo_dict.get('waveformcount'))
# Store subject information from lay file in mne format
# Note: Persyst also records "Physician", "Technician",
# "Medications", "History", and "Comments1" and "Comments2"
# and this information is currently discarded
subject_info = _get_subjectinfo(patient_dict)
# set measurement date
testdate = patient_dict.get('testdate')
if testdate is not None:
# TODO: Persyst may change its internal date schemas
# without notice
# These are the 3 "so far" possible datatime storage
# formats in Persyst .lay
if '/' in testdate:
testdate = datetime.strptime(testdate, '%m/%d/%Y')
elif '-' in testdate:
testdate = datetime.strptime(testdate, '%d-%m-%Y')
elif '.' in testdate:
testdate = datetime.strptime(testdate, '%Y.%m.%d')
if not isinstance(testdate, datetime):
warn('Cannot read in the measurement date due '
'to incompatible format. Please set manually '
'for %s ' % lay_fname)
meas_date = None
else:
testtime = datetime.strptime(patient_dict.get('testtime'),
'%H:%M:%S')
meas_date = datetime(
year=testdate.year, month=testdate.month,
day=testdate.day, hour=testtime.hour,
minute=testtime.minute, second=testtime.second,
tzinfo=timezone.utc)
# Create mne structure
ch_names = list(channelmap_dict.keys())
if n_chs != len(ch_names):
raise RuntimeError('Channels in lay file do not '
'match the number of channels '
'in the .dat file.') # noqa
# get rid of the "-Ref" in channel names
ch_names = [ch.upper().split('-REF')[0] for ch in ch_names]
# get the sampling rate and default channel types to EEG
sfreq = fileinfo_dict.get('samplingrate')
ch_types = 'eeg'
info = create_info(ch_names, sfreq, ch_types=ch_types)
info.update(subject_info=subject_info)
for idx in range(n_chs):
# calibration brings to uV then 1e-6 brings to V
info['chs'][idx]['cal'] = cal * 1.0e-6
info['meas_date'] = meas_date
# determine number of samples in file
# Note: We do not use the lay file to do this
# because clips in time may be generated by Persyst that
# DO NOT modify the "SampleTimes" section
with open(dat_fpath, 'rb') as f:
# determine the precision
if int(datatype) == 7:
# 32 bit
dtype = np.dtype('i4')
elif int(datatype) == 0:
# 16 bit
dtype = np.dtype('i2')
else:
raise RuntimeError(f'Unknown format: {datatype}')
# allow offset to occur
f.seek(0, os.SEEK_END)
n_samples = f.tell()
n_samples = n_samples // (dtype.itemsize * n_chs)
logger.debug(f'Loaded {n_samples} samples '
f'for {n_chs} channels.')
raw_extras = {
'dtype': dtype,
'n_chs': n_chs,
'n_samples': n_samples
}
# create Raw object
super(RawPersyst, self).__init__(
info, preload, filenames=[dat_fpath],
last_samps=[n_samples - 1],
raw_extras=[raw_extras], verbose=verbose)
# set annotations based on the comments read in
num_comments = len(comments_dict)
onset = np.zeros(num_comments, float)
duration = np.zeros(num_comments, float)
description = [''] * num_comments
for t_idx, (_description, (_onset, _duration)) in \
enumerate(comments_dict.items()):
# extract the onset, duration, description to
# create an Annotations object
onset[t_idx] = _onset
duration[t_idx] = _duration
description[t_idx] = _description
annot = Annotations(onset, duration, description)
self.set_annotations(annot)
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a segment of data from a file.
The Persyst software records raw data in either 16 or 32 bit
binary files. In addition, it stores the calibration to convert
data to uV in the lay file.
"""
dtype = self._raw_extras[fi]['dtype']
n_chs = self._raw_extras[fi]['n_chs']
dat_fname = self._filenames[fi]
# compute samples count based on start and stop
time_length_samps = stop - start
# read data from .dat file into array of correct size, then calibrate
# records = recnum rows x inf columns
count = time_length_samps * n_chs
# seek the dat file
with open(dat_fname, 'rb') as dat_file_ID:
# allow offset to occur
dat_file_ID.seek(n_chs * dtype.itemsize * start, 1)
# read in the actual record starting at possibly offset
record = np.fromfile(dat_file_ID, dtype=dtype,
count=count)
# chs * rows
# cast as float32; more than enough precision
record = np.reshape(record, (n_chs, -1), 'F').astype(np.float32)
# calibrate to convert to V and handle mult
_mult_cal_one(data, record, idx, cals, mult)
def _get_subjectinfo(patient_dict):
# attempt to parse out the birthdate, but if it doesn't
# meet spec, then it will set to None
birthdate = patient_dict.get('birthdate')
if '/' in birthdate:
try:
birthdate = datetime.strptime(birthdate, '%m/%d/%y')
except ValueError:
birthdate = None
print('Unable to process birthdate of %s ' % birthdate)
elif '-' in birthdate:
try:
birthdate = datetime.strptime(birthdate, '%d-%m-%y')
except ValueError:
birthdate = None
print('Unable to process birthdate of %s ' % birthdate)
subject_info = {
'first_name': patient_dict.get('first'),
'middle_name': patient_dict.get('middle'),
'last_name': patient_dict.get('last'),
'sex': patient_dict.get('sex'),
'hand': patient_dict.get('hand'),
'his_id': patient_dict.get('id'),
'birthday': birthdate,
}
# Recode sex values
sex_dict = dict(
m=FIFF.FIFFV_SUBJ_SEX_MALE,
male=FIFF.FIFFV_SUBJ_SEX_MALE,
f=FIFF.FIFFV_SUBJ_SEX_FEMALE,
female=FIFF.FIFFV_SUBJ_SEX_FEMALE,
)
subject_info['sex'] = sex_dict.get(subject_info['sex'],
FIFF.FIFFV_SUBJ_SEX_UNKNOWN)
# Recode hand values
hand_dict = dict(
r=FIFF.FIFFV_SUBJ_HAND_RIGHT,
right=FIFF.FIFFV_SUBJ_HAND_RIGHT,
l=FIFF.FIFFV_SUBJ_HAND_LEFT,
left=FIFF.FIFFV_SUBJ_HAND_LEFT,
a=FIFF.FIFFV_SUBJ_HAND_AMBI,
ambidextrous=FIFF.FIFFV_SUBJ_HAND_AMBI,
ambi=FIFF.FIFFV_SUBJ_HAND_AMBI,
)
# no handedness is set when unknown
try:
subject_info['hand'] = hand_dict[subject_info['hand']]
except KeyError:
subject_info.pop('hand')
return subject_info
def _read_lay_contents(fname):
"""Lay file are laid out like a INI file."""
# keep track of sections, keys and data
sections = []
keys, data = [], []
# initialize all section to empty str
section = ''
with open(fname, 'r') as fin:
for line in fin:
# break a line into a status, key and value
status, key, val = _process_lay_line(line, section)
# handle keys and values if they are
# Section, Subsections, or Line items
if status == 1: # Section was found
section = val.lower()
continue
# keep track of all sections, subsections,
# keys and the data of the file
sections.append(section)
data.append(val)
keys.append(key)
return keys, data, sections
def _process_lay_line(line, section):
"""Process a line read from the Lay (INI) file.
Each line in the .lay file will be processed
into a structured ``status``, ``key`` and ``value``.
Parameters
----------
line : str
The actual line in the Lay file.
section : str
The section in the Lay file.
Returns
-------
status : int
Returns the following integers based on status.
-1 => unknown string found
0 => empty line found
1 => section found
2 => key-value pair found
key : str
The string before the ``'='`` character. If section is "Comments",
then returns the text comment description.
value : str
The string from the line after the ``'='`` character. If section is
"Comments", then returns the onset and duration as a tuple.
"""
key = '' # default; only return value possibly not set
line = line.strip() # remove leading and trailing spaces
end_idx = len(line) - 1 # get the last index of the line
# empty sequence evaluates to false
if not line:
status = 0
key = ''
value = ''
return status, key, value
# section found
elif (line[0] == '[') and (line[end_idx] == ']') \
and (end_idx + 1 >= 3):
status = 1
value = line[1:end_idx].lower()
# key found
else:
# handle Comments section differently from all other sections
# TODO: utilize state and var_type in code.
# Currently not used
if section == 'comments':
# Persyst Comments output 5 variables "," separated
time_sec, duration, state, var_type, text = line.split(',')
status = 2
key = text
value = (time_sec, duration)
# all other sections
else:
if '=' not in line:
raise RuntimeError('The line %s does not conform '
'to the standards. Please check the '
'.lay file.' % line) # noqa
pos = line.index('=')
status = 2
# the line now is composed of a
# <key>=<value>
key = line[0:pos]
key.strip()
value = line[pos + 1:end_idx + 1]
value.strip()
return status, key, value
|
|
# sql/ddl.py
# Copyright (C) 2009-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Provides the hierarchy of DDL-defining schema items as well as routines
to invoke them for a create/drop call.
"""
from .. import util
from .elements import ClauseElement
from .visitors import traverse
from .base import Executable, _generative, SchemaVisitor, _bind_or_error
from ..util import topological
from .. import event
from .. import exc
class _DDLCompiles(ClauseElement):
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a
Dialect."""
return dialect.ddl_compiler(dialect, self, **kw)
class DDLElement(Executable, _DDLCompiles):
"""Base class for DDL expression constructs.
This class is the base for the general purpose :class:`.DDL` class,
as well as the various create/drop clause constructs such as
:class:`.CreateTable`, :class:`.DropTable`, :class:`.AddConstraint`,
etc.
:class:`.DDLElement` integrates closely with SQLAlchemy events,
introduced in :ref:`event_toplevel`. An instance of one is
itself an event receiving callable::
event.listen(
users,
'after_create',
AddConstraint(constraint).execute_if(dialect='postgresql')
)
.. seealso::
:class:`.DDL`
:class:`.DDLEvents`
:ref:`event_toplevel`
:ref:`schema_ddl_sequences`
"""
_execution_options = Executable.\
_execution_options.union({'autocommit': True})
target = None
on = None
dialect = None
callable_ = None
def _execute_on_connection(self, connection, multiparams, params):
return connection._execute_ddl(self, multiparams, params)
def execute(self, bind=None, target=None):
"""Execute this DDL immediately.
Executes the DDL statement in isolation using the supplied
:class:`.Connectable` or
:class:`.Connectable` assigned to the ``.bind``
property, if not supplied. If the DDL has a conditional ``on``
criteria, it will be invoked with None as the event.
:param bind:
Optional, an ``Engine`` or ``Connection``. If not supplied, a valid
:class:`.Connectable` must be present in the
``.bind`` property.
:param target:
Optional, defaults to None. The target SchemaItem for the
execute call. Will be passed to the ``on`` callable if any,
and may also provide string expansion data for the
statement. See ``execute_at`` for more information.
"""
if bind is None:
bind = _bind_or_error(self)
if self._should_execute(target, bind):
return bind.execute(self.against(target))
else:
bind.engine.logger.info(
"DDL execution skipped, criteria not met.")
@util.deprecated("0.7", "See :class:`.DDLEvents`, as well as "
":meth:`.DDLElement.execute_if`.")
def execute_at(self, event_name, target):
"""Link execution of this DDL to the DDL lifecycle of a SchemaItem.
Links this ``DDLElement`` to a ``Table`` or ``MetaData`` instance,
executing it when that schema item is created or dropped. The DDL
statement will be executed using the same Connection and transactional
context as the Table create/drop itself. The ``.bind`` property of
this statement is ignored.
:param event:
One of the events defined in the schema item's ``.ddl_events``;
e.g. 'before-create', 'after-create', 'before-drop' or 'after-drop'
:param target:
The Table or MetaData instance for which this DDLElement will
be associated with.
A DDLElement instance can be linked to any number of schema items.
``execute_at`` builds on the ``append_ddl_listener`` interface of
:class:`.MetaData` and :class:`.Table` objects.
Caveat: Creating or dropping a Table in isolation will also trigger
any DDL set to ``execute_at`` that Table's MetaData. This may change
in a future release.
"""
def call_event(target, connection, **kw):
if self._should_execute_deprecated(event_name,
target, connection, **kw):
return connection.execute(self.against(target))
event.listen(target, "" + event_name.replace('-', '_'), call_event)
@_generative
def against(self, target):
"""Return a copy of this DDL against a specific schema item."""
self.target = target
@_generative
def execute_if(self, dialect=None, callable_=None, state=None):
"""Return a callable that will execute this
DDLElement conditionally.
Used to provide a wrapper for event listening::
event.listen(
metadata,
'before_create',
DDL("my_ddl").execute_if(dialect='postgresql')
)
:param dialect: May be a string, tuple or a callable
predicate. If a string, it will be compared to the name of the
executing database dialect::
DDL('something').execute_if(dialect='postgresql')
If a tuple, specifies multiple dialect names::
DDL('something').execute_if(dialect=('postgresql', 'mysql'))
:param callable_: A callable, which will be invoked with
four positional arguments as well as optional keyword
arguments:
:ddl:
This DDL element.
:target:
The :class:`.Table` or :class:`.MetaData` object which is the
target of this event. May be None if the DDL is executed
explicitly.
:bind:
The :class:`.Connection` being used for DDL execution
:tables:
Optional keyword argument - a list of Table objects which are to
be created/ dropped within a MetaData.create_all() or drop_all()
method call.
:state:
Optional keyword argument - will be the ``state`` argument
passed to this function.
:checkfirst:
Keyword argument, will be True if the 'checkfirst' flag was
set during the call to ``create()``, ``create_all()``,
``drop()``, ``drop_all()``.
If the callable returns a true value, the DDL statement will be
executed.
:param state: any value which will be passed to the callable\_
as the ``state`` keyword argument.
.. seealso::
:class:`.DDLEvents`
:ref:`event_toplevel`
"""
self.dialect = dialect
self.callable_ = callable_
self.state = state
def _should_execute(self, target, bind, **kw):
if self.on is not None and \
not self._should_execute_deprecated(None, target, bind, **kw):
return False
if isinstance(self.dialect, util.string_types):
if self.dialect != bind.engine.name:
return False
elif isinstance(self.dialect, (tuple, list, set)):
if bind.engine.name not in self.dialect:
return False
if self.callable_ is not None and \
not self.callable_(self, target, bind, state=self.state, **kw):
return False
return True
def _should_execute_deprecated(self, event, target, bind, **kw):
if self.on is None:
return True
elif isinstance(self.on, util.string_types):
return self.on == bind.engine.name
elif isinstance(self.on, (tuple, list, set)):
return bind.engine.name in self.on
else:
return self.on(self, event, target, bind, **kw)
def __call__(self, target, bind, **kw):
"""Execute the DDL as a ddl_listener."""
if self._should_execute(target, bind, **kw):
return bind.execute(self.against(target))
def _check_ddl_on(self, on):
if (on is not None and
(not isinstance(on, util.string_types + (tuple, list, set)) and
not util.callable(on))):
raise exc.ArgumentError(
"Expected the name of a database dialect, a tuple "
"of names, or a callable for "
"'on' criteria, got type '%s'." % type(on).__name__)
def bind(self):
if self._bind:
return self._bind
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
def _generate(self):
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
return s
class DDL(DDLElement):
"""A literal DDL statement.
Specifies literal SQL DDL to be executed by the database. DDL objects
function as DDL event listeners, and can be subscribed to those events
listed in :class:`.DDLEvents`, using either :class:`.Table` or
:class:`.MetaData` objects as targets. Basic templating support allows
a single DDL instance to handle repetitive tasks for multiple tables.
Examples::
from sqlalchemy import event, DDL
tbl = Table('users', metadata, Column('uid', Integer))
event.listen(tbl, 'before_create', DDL('DROP TRIGGER users_trigger'))
spow = DDL('ALTER TABLE %(table)s SET secretpowers TRUE')
event.listen(tbl, 'after_create', spow.execute_if(dialect='somedb'))
drop_spow = DDL('ALTER TABLE users SET secretpowers FALSE')
connection.execute(drop_spow)
When operating on Table events, the following ``statement``
string substitions are available::
%(table)s - the Table name, with any required quoting applied
%(schema)s - the schema name, with any required quoting applied
%(fullname)s - the Table name including schema, quoted if needed
The DDL's "context", if any, will be combined with the standard
substutions noted above. Keys present in the context will override
the standard substitutions.
"""
__visit_name__ = "ddl"
def __init__(self, statement, on=None, context=None, bind=None):
"""Create a DDL statement.
:param statement:
A string or unicode string to be executed. Statements will be
processed with Python's string formatting operator. See the
``context`` argument and the ``execute_at`` method.
A literal '%' in a statement must be escaped as '%%'.
SQL bind parameters are not available in DDL statements.
:param on:
.. deprecated:: 0.7
See :meth:`.DDLElement.execute_if`.
Optional filtering criteria. May be a string, tuple or a callable
predicate. If a string, it will be compared to the name of the
executing database dialect::
DDL('something', on='postgresql')
If a tuple, specifies multiple dialect names::
DDL('something', on=('postgresql', 'mysql'))
If a callable, it will be invoked with four positional arguments
as well as optional keyword arguments:
:ddl:
This DDL element.
:event:
The name of the event that has triggered this DDL, such as
'after-create' Will be None if the DDL is executed explicitly.
:target:
The ``Table`` or ``MetaData`` object which is the target of
this event. May be None if the DDL is executed explicitly.
:connection:
The ``Connection`` being used for DDL execution
:tables:
Optional keyword argument - a list of Table objects which are to
be created/ dropped within a MetaData.create_all() or drop_all()
method call.
If the callable returns a true value, the DDL statement will be
executed.
:param context:
Optional dictionary, defaults to None. These values will be
available for use in string substitutions on the DDL statement.
:param bind:
Optional. A :class:`.Connectable`, used by
default when ``execute()`` is invoked without a bind argument.
.. seealso::
:class:`.DDLEvents`
:mod:`sqlalchemy.event`
"""
if not isinstance(statement, util.string_types):
raise exc.ArgumentError(
"Expected a string or unicode SQL statement, got '%r'" %
statement)
self.statement = statement
self.context = context or {}
self._check_ddl_on(on)
self.on = on
self._bind = bind
def __repr__(self):
return '<%s@%s; %s>' % (
type(self).__name__, id(self),
', '.join([repr(self.statement)] +
['%s=%r' % (key, getattr(self, key))
for key in ('on', 'context')
if getattr(self, key)]))
class _CreateDropBase(DDLElement):
"""Base class for DDL constucts that represent CREATE and DROP or
equivalents.
The common theme of _CreateDropBase is a single
``element`` attribute which refers to the element
to be created or dropped.
"""
def __init__(self, element, on=None, bind=None):
self.element = element
self._check_ddl_on(on)
self.on = on
self.bind = bind
def _create_rule_disable(self, compiler):
"""Allow disable of _create_rule using a callable.
Pass to _create_rule using
util.portable_instancemethod(self._create_rule_disable)
to retain serializability.
"""
return False
class CreateSchema(_CreateDropBase):
"""Represent a CREATE SCHEMA statement.
.. versionadded:: 0.7.4
The argument here is the string name of the schema.
"""
__visit_name__ = "create_schema"
def __init__(self, name, quote=None, **kw):
"""Create a new :class:`.CreateSchema` construct."""
self.quote = quote
super(CreateSchema, self).__init__(name, **kw)
class DropSchema(_CreateDropBase):
"""Represent a DROP SCHEMA statement.
The argument here is the string name of the schema.
.. versionadded:: 0.7.4
"""
__visit_name__ = "drop_schema"
def __init__(self, name, quote=None, cascade=False, **kw):
"""Create a new :class:`.DropSchema` construct."""
self.quote = quote
self.cascade = cascade
super(DropSchema, self).__init__(name, **kw)
class CreateTable(_CreateDropBase):
"""Represent a CREATE TABLE statement."""
__visit_name__ = "create_table"
def __init__(self, element, on=None, bind=None):
"""Create a :class:`.CreateTable` construct.
:param element: a :class:`.Table` that's the subject
of the CREATE
:param on: See the description for 'on' in :class:`.DDL`.
:param bind: See the description for 'bind' in :class:`.DDL`.
"""
super(CreateTable, self).__init__(element, on=on, bind=bind)
self.columns = [CreateColumn(column)
for column in element.columns
]
class _DropView(_CreateDropBase):
"""Semi-public 'DROP VIEW' construct.
Used by the test suite for dialect-agnostic drops of views.
This object will eventually be part of a public "view" API.
"""
__visit_name__ = "drop_view"
class CreateColumn(_DDLCompiles):
"""Represent a :class:`.Column` as rendered in a CREATE TABLE statement,
via the :class:`.CreateTable` construct.
This is provided to support custom column DDL within the generation
of CREATE TABLE statements, by using the
compiler extension documented in :ref:`sqlalchemy.ext.compiler_toplevel`
to extend :class:`.CreateColumn`.
Typical integration is to examine the incoming :class:`.Column`
object, and to redirect compilation if a particular flag or condition
is found::
from sqlalchemy import schema
from sqlalchemy.ext.compiler import compiles
@compiles(schema.CreateColumn)
def compile(element, compiler, **kw):
column = element.element
if "special" not in column.info:
return compiler.visit_create_column(element, **kw)
text = "%s SPECIAL DIRECTIVE %s" % (
column.name,
compiler.type_compiler.process(column.type)
)
default = compiler.get_column_default_string(column)
if default is not None:
text += " DEFAULT " + default
if not column.nullable:
text += " NOT NULL"
if column.constraints:
text += " ".join(
compiler.process(const)
for const in column.constraints)
return text
The above construct can be applied to a :class:`.Table` as follows::
from sqlalchemy import Table, Metadata, Column, Integer, String
from sqlalchemy import schema
metadata = MetaData()
table = Table('mytable', MetaData(),
Column('x', Integer, info={"special":True}, primary_key=True),
Column('y', String(50)),
Column('z', String(20), info={"special":True})
)
metadata.create_all(conn)
Above, the directives we've added to the :attr:`.Column.info` collection
will be detected by our custom compilation scheme::
CREATE TABLE mytable (
x SPECIAL DIRECTIVE INTEGER NOT NULL,
y VARCHAR(50),
z SPECIAL DIRECTIVE VARCHAR(20),
PRIMARY KEY (x)
)
The :class:`.CreateColumn` construct can also be used to skip certain
columns when producing a ``CREATE TABLE``. This is accomplished by
creating a compilation rule that conditionally returns ``None``.
This is essentially how to produce the same effect as using the
``system=True`` argument on :class:`.Column`, which marks a column
as an implicitly-present "system" column.
For example, suppose we wish to produce a :class:`.Table` which skips
rendering of the Postgresql ``xmin`` column against the Postgresql backend,
but on other backends does render it, in anticipation of a triggered rule.
A conditional compilation rule could skip this name only on Postgresql::
from sqlalchemy.schema import CreateColumn
@compiles(CreateColumn, "postgresql")
def skip_xmin(element, compiler, **kw):
if element.element.name == 'xmin':
return None
else:
return compiler.visit_create_column(element, **kw)
my_table = Table('mytable', metadata,
Column('id', Integer, primary_key=True),
Column('xmin', Integer)
)
Above, a :class:`.CreateTable` construct will generate a ``CREATE TABLE``
which only includes the ``id`` column in the string; the ``xmin`` column
will be omitted, but only against the Postgresql backend.
.. versionadded:: 0.8.3 The :class:`.CreateColumn` construct supports
skipping of columns by returning ``None`` from a custom compilation rule.
.. versionadded:: 0.8 The :class:`.CreateColumn` construct was added
to support custom column creation styles.
"""
__visit_name__ = 'create_column'
def __init__(self, element):
self.element = element
class DropTable(_CreateDropBase):
"""Represent a DROP TABLE statement."""
__visit_name__ = "drop_table"
class CreateSequence(_CreateDropBase):
"""Represent a CREATE SEQUENCE statement."""
__visit_name__ = "create_sequence"
class DropSequence(_CreateDropBase):
"""Represent a DROP SEQUENCE statement."""
__visit_name__ = "drop_sequence"
class CreateIndex(_CreateDropBase):
"""Represent a CREATE INDEX statement."""
__visit_name__ = "create_index"
class DropIndex(_CreateDropBase):
"""Represent a DROP INDEX statement."""
__visit_name__ = "drop_index"
class AddConstraint(_CreateDropBase):
"""Represent an ALTER TABLE ADD CONSTRAINT statement."""
__visit_name__ = "add_constraint"
def __init__(self, element, *args, **kw):
super(AddConstraint, self).__init__(element, *args, **kw)
element._create_rule = util.portable_instancemethod(
self._create_rule_disable)
class DropConstraint(_CreateDropBase):
"""Represent an ALTER TABLE DROP CONSTRAINT statement."""
__visit_name__ = "drop_constraint"
def __init__(self, element, cascade=False, **kw):
self.cascade = cascade
super(DropConstraint, self).__init__(element, **kw)
element._create_rule = util.portable_instancemethod(
self._create_rule_disable)
class DDLBase(SchemaVisitor):
def __init__(self, connection):
self.connection = connection
class SchemaGenerator(DDLBase):
def __init__(self, dialect, connection, checkfirst=False,
tables=None, **kwargs):
super(SchemaGenerator, self).__init__(connection, **kwargs)
self.checkfirst = checkfirst
self.tables = tables
self.preparer = dialect.identifier_preparer
self.dialect = dialect
self.memo = {}
def _can_create_table(self, table):
self.dialect.validate_identifier(table.name)
if table.schema:
self.dialect.validate_identifier(table.schema)
return not self.checkfirst or \
not self.dialect.has_table(self.connection,
table.name, schema=table.schema)
def _can_create_sequence(self, sequence):
return self.dialect.supports_sequences and \
(
(not self.dialect.sequences_optional or
not sequence.optional) and
(
not self.checkfirst or
not self.dialect.has_sequence(
self.connection,
sequence.name,
schema=sequence.schema)
)
)
def visit_metadata(self, metadata):
if self.tables is not None:
tables = self.tables
else:
tables = list(metadata.tables.values())
collection = [t for t in sort_tables(tables)
if self._can_create_table(t)]
seq_coll = [s for s in metadata._sequences.values()
if s.column is None and self._can_create_sequence(s)]
metadata.dispatch.before_create(metadata, self.connection,
tables=collection,
checkfirst=self.checkfirst,
_ddl_runner=self)
for seq in seq_coll:
self.traverse_single(seq, create_ok=True)
for table in collection:
self.traverse_single(table, create_ok=True)
metadata.dispatch.after_create(metadata, self.connection,
tables=collection,
checkfirst=self.checkfirst,
_ddl_runner=self)
def visit_table(self, table, create_ok=False):
if not create_ok and not self._can_create_table(table):
return
table.dispatch.before_create(table, self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self)
for column in table.columns:
if column.default is not None:
self.traverse_single(column.default)
self.connection.execute(CreateTable(table))
if hasattr(table, 'indexes'):
for index in table.indexes:
self.traverse_single(index)
table.dispatch.after_create(table, self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self)
def visit_sequence(self, sequence, create_ok=False):
if not create_ok and not self._can_create_sequence(sequence):
return
self.connection.execute(CreateSequence(sequence))
def visit_index(self, index):
self.connection.execute(CreateIndex(index))
class SchemaDropper(DDLBase):
def __init__(self, dialect, connection, checkfirst=False,
tables=None, **kwargs):
super(SchemaDropper, self).__init__(connection, **kwargs)
self.checkfirst = checkfirst
self.tables = tables
self.preparer = dialect.identifier_preparer
self.dialect = dialect
self.memo = {}
def visit_metadata(self, metadata):
if self.tables is not None:
tables = self.tables
else:
tables = list(metadata.tables.values())
collection = [
t
for t in reversed(sort_tables(tables))
if self._can_drop_table(t)
]
seq_coll = [
s
for s in metadata._sequences.values()
if s.column is None and self._can_drop_sequence(s)
]
metadata.dispatch.before_drop(
metadata, self.connection, tables=collection,
checkfirst=self.checkfirst, _ddl_runner=self)
for table in collection:
self.traverse_single(table, drop_ok=True)
for seq in seq_coll:
self.traverse_single(seq, drop_ok=True)
metadata.dispatch.after_drop(
metadata, self.connection, tables=collection,
checkfirst=self.checkfirst, _ddl_runner=self)
def _can_drop_table(self, table):
self.dialect.validate_identifier(table.name)
if table.schema:
self.dialect.validate_identifier(table.schema)
return not self.checkfirst or self.dialect.has_table(self.connection,
table.name, schema=table.schema)
def _can_drop_sequence(self, sequence):
return self.dialect.supports_sequences and \
((not self.dialect.sequences_optional or
not sequence.optional) and
(not self.checkfirst or
self.dialect.has_sequence(
self.connection,
sequence.name,
schema=sequence.schema))
)
def visit_index(self, index):
self.connection.execute(DropIndex(index))
def visit_table(self, table, drop_ok=False):
if not drop_ok and not self._can_drop_table(table):
return
table.dispatch.before_drop(table, self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self)
for column in table.columns:
if column.default is not None:
self.traverse_single(column.default)
self.connection.execute(DropTable(table))
table.dispatch.after_drop(table, self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self)
def visit_sequence(self, sequence, drop_ok=False):
if not drop_ok and not self._can_drop_sequence(sequence):
return
self.connection.execute(DropSequence(sequence))
def sort_tables(tables, skip_fn=None, extra_dependencies=None):
"""sort a collection of Table objects in order of
their foreign-key dependency."""
tables = list(tables)
tuples = []
if extra_dependencies is not None:
tuples.extend(extra_dependencies)
def visit_foreign_key(fkey):
if fkey.use_alter:
return
elif skip_fn and skip_fn(fkey):
return
parent_table = fkey.column.table
if parent_table in tables:
child_table = fkey.parent.table
if parent_table is not child_table:
tuples.append((parent_table, child_table))
for table in tables:
traverse(table,
{'schema_visitor': True},
{'foreign_key': visit_foreign_key})
tuples.extend(
[parent, table] for parent in table._extra_dependencies
)
return list(topological.sort(tuples, tables))
|
|
# -*- coding: utf-8 -*-
"""core -- core behaviors for Owyl.
Copyright 2008 David Eyk. All rights reserved.
$Author$\n
$Rev$\n
$Date$
"""
__author__ = "$Author$"[9:-2]
__revision__ = "$Rev$"[6:-2]
__date__ = "$Date$"[7:-2]
import logging
try:
from mx.Stack import Stack, EmptyError
except ImportError:
from stack import Stack, EmptyError
RETURN_VALUES = set((True, False, None))
__all__ = ['wrap', 'task', 'taskmethod', 'parent_task', 'parent_taskmethod', 'visit',
'succeed', 'fail', 'succeedAfter', 'failAfter',
'sequence', 'selector', 'parallel', 'PARALLEL_SUCCESS',
'queue', 'parallel_queue',
'throw', 'catch',
'log',]
def wrap(func, *args, **kwargs):
"""Wrap a callable as a task. Yield the boolean of its result.
"""
def initTask(**initkwargs):
def makeIterator(**runkwargs):
result = func(*args, **kwargs)
yield bool(result)
try: makeIterator.__name__ = func.__name__
except AttributeError: pass
try: makeIterator.__doc__ = func.__doc__
except AttributeError: pass
return makeIterator
try: initTask.__doc__ = func.__doc__
except AttributeError: pass
try: initTask.__name__ = func.__name__
except AttributeError: pass
return initTask
def task(func):
"""Task decorator.
Decorate a generator function to produce a re-usable generator
factory for the given task.
"""
def initTask(**initkwargs):
def makeIterator(**runkwargs):
runkwargs.update(initkwargs)
iterator = func(**runkwargs)
return iterator
try: makeIterator.__name__ = func.__name__
except AttributeError: pass
try: makeIterator.__doc__ = func.__doc__
except AttributeError: pass
return makeIterator
try: initTask.__doc__ = func.__doc__
except AttributeError: pass
try: initTask.__name__ = func.__name__
except AttributeError: pass
return initTask
def taskmethod(func):
"""Task decorator.
Decorate a generator function to produce a re-usable generator
factory for the given task.
"""
def initTask(self, **initkwargs):
def makeIterator(**runkwargs):
runkwargs.update(initkwargs)
iterator = func(self, **runkwargs)
return iterator
try: makeIterator.__name__ = func.__name__
except AttributeError: pass
try: makeIterator.__doc__ = func.__doc__
except AttributeError: pass
return makeIterator
try: initTask.__doc__ = func.__doc__
except AttributeError: pass
try: initTask.__name__ = func.__name__
except AttributeError: pass
return initTask
def parent_task(func):
"""Parent task decorator.
A parent task is a task that accepts children.
Decorate a generator function to produce a re-usable generator
factory for the given task.
"""
def initTask(*children, **initkwargs):
def makeIterator(**runkwargs):
runkwargs.update(initkwargs)
iterator = func(*children, **runkwargs)
return iterator
try: makeIterator.__name__ = func.__name__
except AttributeError: pass
try: makeIterator.__doc__ = func.__doc__
except AttributeError: pass
return makeIterator
try: initTask.__doc__ = func.__doc__
except AttributeError: pass
try: initTask.__name__ = func.__name__
except AttributeError: pass
return initTask
def parent_taskmethod(func):
"""Parent task decorator.
A parent task is a task that accepts children.
Decorate a generator function to produce a re-usable generator
factory for the given task.
"""
def initTask(self, *children, **initkwargs):
def makeIterator(**runkwargs):
runkwargs.update(initkwargs)
iterator = func(self, *children, **runkwargs)
return iterator
try: makeIterator.__name__ = func.__name__
except AttributeError: pass
try: makeIterator.__doc__ = func.__doc__
except AttributeError: pass
return makeIterator
try: initTask.__doc__ = func.__doc__
except AttributeError: pass
try: initTask.__name__ = func.__name__
except AttributeError: pass
return initTask
def visit(tree, **kwargs):
"""Iterate over a tree of nested iterators.
Apply the U{Visitor
Pattern<http://en.wikipedia.org/wiki/Visitor_pattern>} to a tree
of nested iterators. Iterators should yield True, False, None, or
a child iterator. Values of True or False are passed back to the
parent iterator. A value of None is silently ignored, and the
current iterator will be queried again on the next pass.
The visitor will yield None until the tree raises StopIteration,
upon which the visitor will yield the last value yielded by the
tree, and terminate itself with StopIteration.
The visitor is essentially a micro-scheduler for a Behavior Tree
implemented as a tree of nested iterators. For more information,
see the discussion at
U{http://aigamedev.com/programming-tips/scheduler}.
"""
s = Stack()
return_values = RETURN_VALUES
current = tree(**kwargs)
send_value = None
send_ok = False
while True:
try:
if send_ok:
child = current.send(send_value)
send_value = None
send_ok = False
else:
child = current.next()
if child in return_values:
send_value = child
yield send_value
else:
# Descend into child node
s.push(current)
current = child
except StopIteration:
try:
current = s.pop()
send_ok = True
except EmptyError:
raise StopIteration
@task
def succeed(**kwargs):
"""Always succeed.
"""
yield True
@task
def fail(**kwargs):
"""Always fail.
"""
yield False
@task
def stall(**kwargs):
"""Wrap a callable as a task. Yield the boolean of its result after 'after' iterations.
Yields 'None' 'after' times.
@keyword func: The callable to run.
@type func: callable
@keyword after: Run the callable after this many iterations.
@type after: int
"""
func = kwargs.pop('func')
after = kwargs.pop('after', 1)
for x in xrange(after):
yield None
yield bool(func())
@task
def succeedAfter(**kwargs):
"""Succeed after a given number of iterations.
Yields 'None' 'after' times.
@keyword after: How many iterations to succeed after.
@type after: int
"""
after = kwargs.pop('after', 1)
for x in xrange(after):
yield None
yield True
@task
def failAfter(**kwargs):
"""Fail after a given number of iterations.
Yields 'None' 'after' times.
@keyword after: How many iterations to fail after.
@type after: int
"""
after = kwargs.pop('after', 1)
for x in xrange(after):
yield None
yield False
@parent_task
def sequence(*children, **kwargs):
"""Run tasks in sequence until one fails.
The sequence will run each task in sequence until one fails,
returning a failure. If all fail, returns a success.
For more information, see the discussion at
U{http://aigamedev.com/hierarchical-logic/sequence}.
@param children: tasks to run in sequence as children.
"""
final_value = True
for child in children:
result = yield child(**kwargs)
if not result and result is not None:
final_value = False
break
yield final_value
@parent_task
def queue(queue, **kwargs):
"""Run tasks in the queue in sequence.
The queue will run each task in the queue in sequence. If the
queue is empty, it will stall until the queue receives new items.
Note: the queue task *never* returns a success or failure code.
The queue should be an object implementing pop(). If the queue has
items in it, it should evaluate to True, otherwise False. The
queue task will pop the next task in the queue and evaluate it in
the normal fashion.
@param queue: task queue.
@type queue: A sequence object implementing pop()
"""
while True:
if queue:
child = queue.pop()
yield child(**kwargs)
else:
yield None
@parent_task
def parallel_queue(queue, **kwargs):
"""Run tasks in the queue in parallel.
The queue will run each task in the queue in parallel. If the
queue is empty, it will stall until the queue receives new items.
Note: the queue task *never* returns a success or failure code.
The queue should be an object implementing pop(). If the queue has
items in it, it should evaluate to True, otherwise False. The
queue task will pop the next task in the queue and evaluate it in
the normal fashion.
@param queue: task queue.
"""
visits = [] # Canonical list of visited children
visiting = [] # Working list of visited children
while True:
if queue:
child = queue.pop()
visits.append(visit(child, **kwargs))
visiting[:] = visits # Se we can remove from visits
for child in visiting:
try:
child.next()
except StopIteration:
visits.remove(child)
yield None
@parent_task
def selector(*children, **kwargs):
"""Run tasks in sequence until one succeeds.
The selector will run each task in sequence until one succeeds,
returning a success. If all fail, returns a failure.
For more information, see the discussion at
U{http://aigamedev.com/hierarchical-logic/selector}.
@param children: child tasks to select from.
"""
final_value = False
for child in children:
result = (yield child(**kwargs))
if result:
final_value = True
break
yield final_value
class Enum(object):
"""Enum/namespace class. Cannot be implemented.
Subclass and add class variables.
"""
def __init__(self):
raise NotImplementedError("_Enum class object. Do not instantiate.")
class PARALLEL_SUCCESS(Enum):
"""Success policy enumerator for parallel behavior.
C{REQUIRE_ALL}: All child tasks must succeed.
C{REQUIRE_ONE}: Only one child task must succeed.
"""
REQUIRE_ALL = "ALL"
REQUIRE_ONE = "ONE"
@parent_task
def parallel(*children, **kwargs):
"""Run tasks in parallel until the success policy is fulfilled or broken.
If the success policy is met, return a success. If the policy is
broken, return a failure.
For more information, see the discussion at
U{aigamedev.com/hierarchical-logic/parallel}.
@param children: tasks to run in parallel as children.
@keyword policy: The success policy. All must succeed,
or only one must succeed.
@type policy: C{PARALLEL_SUCCESS.REQUIRE_ALL} or
C{PARALLEL_SUCCESS.REQUIRE_ONE}.
"""
return_values = set((True, False))
policy = kwargs.pop('policy', PARALLEL_SUCCESS.REQUIRE_ONE)
all_must_succeed = (policy == PARALLEL_SUCCESS.REQUIRE_ALL)
visits = [visit(arg, **kwargs) for arg in children]
final_value = True
while True:
try:
# Run one step on each child per iteration.
for child in visits:
result = child.next()
if result in return_values:
if not result and all_must_succeed:
final_value = False
break
elif result and not all_must_succeed:
final_value = True
break
else:
final_value = result
yield None
except StopIteration:
break
except EmptyError:
break
yield final_value
@task
def throw(**kwargs):
"""Throw (raise) an exception.
@keyword throws: An Exception to throw.
@type throws: C{Exception}
@keyword throws_message: Text to instantiate C{throws} with.
@type throws_message: C{str}
"""
throws = kwargs.pop('throws', Exception)
throws_message = kwargs.pop('throws_message', '')
class gen(object):
def __iter__(self):
return self
def next(self):
raise throws(throws_message)
return gen()
@parent_task
def catch(child, **kwargs):
"""Catch a raised exception from child and run an alternate branch.
Note: this will not catch exceptions raised in the branch.
@keyword caught: An Exception to catch.
@type caught: C{Exception}
@keyword branch: An alternate tree to visit when caught.
"""
caught = kwargs.pop('caught', Exception)
branch = kwargs.pop('branch', fail())
result = None
tree = visit(child, **kwargs)
try:
while result is None:
result = tree.next()
yield None
except caught:
while result is None:
result = (yield branch(**kwargs))
yield result
@parent_task
def log(message, **kwargs):
"""Log a message to the given logger.
@keyword name: The name of the logger to use.
@type name: str
@keyword level: The logging level to use.
@default level: logging.DEBUG
"""
name = kwargs.pop('name', None)
if name is None:
logger = logging.getLogger()
else:
logger = logging.getLogger(name)
level = kwargs.pop('level', logging.DEBUG)
logger.log(level, message)
yield True
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from six import moves
import testtools
from tempest.api.object_storage import base
from tempest import clients
from tempest.common import custom_matchers
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
class AccountTest(base.BaseObjectTest):
containers = []
@classmethod
def resource_setup(cls):
super(AccountTest, cls).resource_setup()
for i in moves.xrange(ord('a'), ord('f') + 1):
name = data_utils.rand_name(name='%s-' % chr(i))
cls.container_client.create_container(name)
cls.containers.append(name)
cls.containers_count = len(cls.containers)
@classmethod
def resource_cleanup(cls):
cls.delete_containers(cls.containers)
super(AccountTest, cls).resource_cleanup()
@test.attr(type='smoke')
def test_list_containers(self):
# list of all containers should not be empty
resp, container_list = self.account_client.list_account_containers()
self.assertHeaders(resp, 'Account', 'GET')
self.assertIsNotNone(container_list)
for container_name in self.containers:
self.assertIn(container_name, container_list)
@test.attr(type='smoke')
def test_list_no_containers(self):
# List request to empty account
# To test listing no containers, create new user other than
# the base user of this instance.
self.data.setup_test_user()
os_test_user = clients.Manager(self.data.test_credentials)
resp, container_list = \
os_test_user.account_client.list_account_containers()
# When sending a request to an account which has not received a PUT
# container request, the response does not contain 'accept-ranges'
# header. This is a special case, therefore the existence of response
# headers is checked without custom matcher.
self.assertIn('content-length', resp)
self.assertIn('x-timestamp', resp)
self.assertIn('x-account-bytes-used', resp)
self.assertIn('x-account-container-count', resp)
self.assertIn('x-account-object-count', resp)
self.assertIn('content-type', resp)
self.assertIn('x-trans-id', resp)
self.assertIn('date', resp)
# Check only the format of common headers with custom matcher
self.assertThat(resp, custom_matchers.AreAllWellFormatted())
self.assertEqual(len(container_list), 0)
@test.attr(type='smoke')
def test_list_containers_with_format_json(self):
# list containers setting format parameter to 'json'
params = {'format': 'json'}
resp, container_list = self.account_client.list_account_containers(
params=params)
self.assertHeaders(resp, 'Account', 'GET')
self.assertIsNotNone(container_list)
self.assertTrue([c['name'] for c in container_list])
self.assertTrue([c['count'] for c in container_list])
self.assertTrue([c['bytes'] for c in container_list])
@test.attr(type='smoke')
def test_list_containers_with_format_xml(self):
# list containers setting format parameter to 'xml'
params = {'format': 'xml'}
resp, container_list = self.account_client.list_account_containers(
params=params)
self.assertHeaders(resp, 'Account', 'GET')
self.assertIsNotNone(container_list)
self.assertEqual(container_list.tag, 'account')
self.assertTrue('name' in container_list.keys())
self.assertEqual(container_list.find(".//container").tag, 'container')
self.assertEqual(container_list.find(".//name").tag, 'name')
self.assertEqual(container_list.find(".//count").tag, 'count')
self.assertEqual(container_list.find(".//bytes").tag, 'bytes')
@test.attr(type='smoke')
@testtools.skipIf(
not CONF.object_storage_feature_enabled.discoverability,
'Discoverability function is disabled')
def test_list_extensions(self):
resp, extensions = self.account_client.list_extensions()
self.assertThat(resp, custom_matchers.AreAllWellFormatted())
@test.attr(type='smoke')
def test_list_containers_with_limit(self):
# list containers one of them, half of them then all of them
for limit in (1, self.containers_count / 2, self.containers_count):
params = {'limit': limit}
resp, container_list = \
self.account_client.list_account_containers(params=params)
self.assertHeaders(resp, 'Account', 'GET')
self.assertEqual(len(container_list), limit)
@test.attr(type='smoke')
def test_list_containers_with_marker(self):
# list containers using marker param
# first expect to get 0 container as we specified last
# the container as marker
# second expect to get the bottom half of the containers
params = {'marker': self.containers[-1]}
resp, container_list = \
self.account_client.list_account_containers(params=params)
self.assertHeaders(resp, 'Account', 'GET')
self.assertEqual(len(container_list), 0)
params = {'marker': self.containers[self.containers_count / 2]}
resp, container_list = \
self.account_client.list_account_containers(params=params)
self.assertHeaders(resp, 'Account', 'GET')
self.assertEqual(len(container_list), self.containers_count / 2 - 1)
@test.attr(type='smoke')
def test_list_containers_with_end_marker(self):
# list containers using end_marker param
# first expect to get 0 container as we specified first container as
# end_marker
# second expect to get the top half of the containers
params = {'end_marker': self.containers[0]}
resp, container_list = \
self.account_client.list_account_containers(params=params)
self.assertHeaders(resp, 'Account', 'GET')
self.assertEqual(len(container_list), 0)
params = {'end_marker': self.containers[self.containers_count / 2]}
resp, container_list = \
self.account_client.list_account_containers(params=params)
self.assertHeaders(resp, 'Account', 'GET')
self.assertEqual(len(container_list), self.containers_count / 2)
@test.attr(type='smoke')
def test_list_containers_with_marker_and_end_marker(self):
# list containers combining marker and end_marker param
params = {'marker': self.containers[0],
'end_marker': self.containers[self.containers_count - 1]}
resp, container_list = self.account_client.list_account_containers(
params=params)
self.assertHeaders(resp, 'Account', 'GET')
self.assertEqual(len(container_list), self.containers_count - 2)
@test.attr(type='smoke')
def test_list_containers_with_limit_and_marker(self):
# list containers combining marker and limit param
# result are always limitated by the limit whatever the marker
for marker in random.choice(self.containers):
limit = random.randint(0, self.containers_count - 1)
params = {'marker': marker,
'limit': limit}
resp, container_list = \
self.account_client.list_account_containers(params=params)
self.assertHeaders(resp, 'Account', 'GET')
self.assertTrue(len(container_list) <= limit, str(container_list))
@test.attr(type='smoke')
def test_list_containers_with_limit_and_end_marker(self):
# list containers combining limit and end_marker param
limit = random.randint(1, self.containers_count)
params = {'limit': limit,
'end_marker': self.containers[self.containers_count / 2]}
resp, container_list = self.account_client.list_account_containers(
params=params)
self.assertHeaders(resp, 'Account', 'GET')
self.assertEqual(len(container_list),
min(limit, self.containers_count / 2))
@test.attr(type='smoke')
def test_list_containers_with_limit_and_marker_and_end_marker(self):
# list containers combining limit, marker and end_marker param
limit = random.randint(1, self.containers_count)
params = {'limit': limit,
'marker': self.containers[0],
'end_marker': self.containers[self.containers_count - 1]}
resp, container_list = self.account_client.list_account_containers(
params=params)
self.assertHeaders(resp, 'Account', 'GET')
self.assertEqual(len(container_list),
min(limit, self.containers_count - 2))
@test.attr(type='smoke')
def test_list_account_metadata(self):
# list all account metadata
# set metadata to account
metadata = {'test-account-meta1': 'Meta1',
'test-account-meta2': 'Meta2'}
resp, _ = self.account_client.create_account_metadata(metadata)
resp, _ = self.account_client.list_account_metadata()
self.assertHeaders(resp, 'Account', 'HEAD')
self.assertIn('x-account-meta-test-account-meta1', resp)
self.assertIn('x-account-meta-test-account-meta2', resp)
self.account_client.delete_account_metadata(metadata)
@test.attr(type='smoke')
def test_list_no_account_metadata(self):
# list no account metadata
resp, _ = self.account_client.list_account_metadata()
self.assertHeaders(resp, 'Account', 'HEAD')
self.assertNotIn('x-account-meta-', str(resp))
@test.attr(type='smoke')
def test_update_account_metadata_with_create_metadata(self):
# add metadata to account
metadata = {'test-account-meta1': 'Meta1'}
resp, _ = self.account_client.create_account_metadata(metadata)
self.assertHeaders(resp, 'Account', 'POST')
resp, body = self.account_client.list_account_metadata()
self.assertIn('x-account-meta-test-account-meta1', resp)
self.assertEqual(resp['x-account-meta-test-account-meta1'],
metadata['test-account-meta1'])
self.account_client.delete_account_metadata(metadata)
@test.attr(type='smoke')
def test_update_account_metadata_with_delete_matadata(self):
# delete metadata from account
metadata = {'test-account-meta1': 'Meta1'}
self.account_client.create_account_metadata(metadata)
resp, _ = self.account_client.delete_account_metadata(metadata)
self.assertHeaders(resp, 'Account', 'POST')
resp, _ = self.account_client.list_account_metadata()
self.assertNotIn('x-account-meta-test-account-meta1', resp)
@test.attr(type='smoke')
def test_update_account_metadata_with_create_matadata_key(self):
# if the value of metadata is not set, the metadata is not
# registered at a server
metadata = {'test-account-meta1': ''}
resp, _ = self.account_client.create_account_metadata(metadata)
self.assertHeaders(resp, 'Account', 'POST')
resp, _ = self.account_client.list_account_metadata()
self.assertNotIn('x-account-meta-test-account-meta1', resp)
@test.attr(type='smoke')
def test_update_account_metadata_with_delete_matadata_key(self):
# Although the value of metadata is not set, the feature of
# deleting metadata is valid
metadata_1 = {'test-account-meta1': 'Meta1'}
self.account_client.create_account_metadata(metadata_1)
metadata_2 = {'test-account-meta1': ''}
resp, _ = self.account_client.delete_account_metadata(metadata_2)
self.assertHeaders(resp, 'Account', 'POST')
resp, _ = self.account_client.list_account_metadata()
self.assertNotIn('x-account-meta-test-account-meta1', resp)
@test.attr(type='smoke')
def test_update_account_metadata_with_create_and_delete_metadata(self):
# Send a request adding and deleting metadata requests simultaneously
metadata_1 = {'test-account-meta1': 'Meta1'}
self.account_client.create_account_metadata(metadata_1)
metadata_2 = {'test-account-meta2': 'Meta2'}
resp, body = self.account_client.create_and_delete_account_metadata(
metadata_2,
metadata_1)
self.assertHeaders(resp, 'Account', 'POST')
resp, _ = self.account_client.list_account_metadata()
self.assertNotIn('x-account-meta-test-account-meta1', resp)
self.assertIn('x-account-meta-test-account-meta2', resp)
self.assertEqual(resp['x-account-meta-test-account-meta2'],
metadata_2['test-account-meta2'])
self.account_client.delete_account_metadata(metadata_2)
|
|
"""Linux mount(2) API wrapper module.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import itertools
import logging
import operator
import os
import ctypes
from ctypes import (
c_int,
c_char_p,
c_ulong,
c_void_p,
)
from ctypes.util import find_library
import enum
import six
from treadmill import utils
_LOGGER = logging.getLogger(__name__)
###############################################################################
# Map the C interface
_LIBC_PATH = find_library('c')
_LIBC = ctypes.CDLL(_LIBC_PATH, use_errno=True)
if (not getattr(_LIBC, 'mount', None) or
not getattr(_LIBC, 'umount', None) or
not getattr(_LIBC, 'umount2', None)):
raise ImportError('Unsupported libc version found: %s' % _LIBC_PATH)
# int mount(const char *source, const char *target,
# const char *filesystemtype, unsigned long mountflags,
# const void *data);
_MOUNT_DECL = ctypes.CFUNCTYPE(
c_int,
c_char_p, # source
c_char_p, # target
c_char_p, # filesystem type
c_ulong, # mount flags
c_void_p, # data
use_errno=True
)
_MOUNT = _MOUNT_DECL(('mount', _LIBC))
def _mount(source, target, fs_type, mnt_flags, data):
res = _MOUNT(source, target, fs_type, mnt_flags, data)
if res < 0:
errno = ctypes.get_errno()
raise OSError(
errno, os.strerror(errno),
'mount(%r, %r, %r, 0x%x, %r)' % (
source,
target,
fs_type,
mnt_flags,
data
)
)
return res
# int umount(const char *target);
_UMOUNT_DECL = ctypes.CFUNCTYPE(
c_int,
c_char_p, # target
use_errno=True
)
_UMOUNT = _UMOUNT_DECL(('umount', _LIBC))
# int umount2(const char *target, int flags);
_UMOUNT2_DECL = ctypes.CFUNCTYPE(
c_int,
c_char_p, # target
c_int, # flags
use_errno=True
)
_UMOUNT2 = _UMOUNT2_DECL(('umount2', _LIBC))
def _umount(target):
"""Umount ``target``.
"""
res = _UMOUNT(target)
if res < 0:
errno = ctypes.get_errno()
raise OSError(
errno, os.strerror(errno),
'umount(%r)' % (target, )
)
def _umount2(target, flags=None):
res = _UMOUNT2(target, flags)
if res < 0:
errno = ctypes.get_errno()
raise OSError(
errno, os.strerror(errno),
'umount2(%r, %r)' % (target, flags)
)
###############################################################################
# NOTE: below values taken from mount kernel interface sys/mount.h
class MSFlags(enum.IntEnum):
"""All mount flags.
"""
#: MS_MGC_VAL is a flag marker, needs to be included in all calls.
MGC_VAL = 0xC0ED0000
#: Mount read-only.
RDONLY = 0x000001
#: Ignore suid and sgid bits.
NOSUID = 0x000002
#: Disallow access to device special files.
NODEV = 0x000004
#: Disallow program execution.
NOEXEC = 0x000008
#: Writes are synced at once.
SYNCHRONOUS = 0x000010
#: Alter flags of a mounted FS.
REMOUNT = 0x000020
#: Allow mandatory locks on an FS.
MANDLOCK = 0x000040
#: Directory modifications are synchronous.
DIRSYNC = 0x000080
#: Update atime relative to mtime/ctime
RELATIME = 0x200000
#: Do not update access times.
NOATIME = 0x000400
#: Do not update directory access times.
NODIRATIME = 0x000800
#: Bind a mount point to a different place .
BIND = 0x001000
#: Move a mount point to a different place .
MOVE = 0x002000
#: Recursively apply the UNBINDABLE, PRIVATE, SLAVE, or SHARED flags.
REC = 0x004000
# See https://www.kernel.org/doc/Documentation/filesystems/
# sharedsubtree.txt
#: unbindable mount
UNBINDABLE = 0x020000
#: private mount
PRIVATE = 0x040000
#: slave mount
SLAVE = 0x080000
#: shared mount
SHARED = 0x100000
#: Mount flag marker.
MS_MGC_VAL = MSFlags.MGC_VAL
#: Mount read-only.
MS_RDONLY = MSFlags.RDONLY
#: Ignore suid and sgid bits.
MS_NOSUID = MSFlags.NOSUID
#: Disallow access to device special files.
MS_NODEV = MSFlags.NODEV
#: Disallow program execution.
MS_NOEXEC = MSFlags.NOEXEC
#: Writes are synced at once.
MS_SYNCHRONOUS = MSFlags.SYNCHRONOUS
#: Alter flags of a mounted FS.
MS_REMOUNT = MSFlags.REMOUNT
#: Allow mandatory locks on an FS.
MS_MANDLOCK = MSFlags.MANDLOCK
#: Directory modifications are synchronous.
MS_DIRSYNC = MSFlags.DIRSYNC
#: Update atime relative to mtime/ctime
MS_RELATIME = MSFlags.RELATIME
#: Do not update access times.
MS_NOATIME = MSFlags.NOATIME
#: Do not update directory access times.
MS_NODIRATIME = MSFlags.NODIRATIME
#: Bind a mount point to a different place .
MS_BIND = MSFlags.BIND
#: Move a mount point to a different place .
MS_MOVE = MSFlags.MOVE
#: Recursively apply the UNBINDABLE, PRIVATE, SLAVE, or SHARED flags.
MS_REC = MSFlags.REC
# See https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt
#: unbindable mount
MS_UNBINDABLE = MSFlags.UNBINDABLE
#: private mount
MS_PRIVATE = MSFlags.PRIVATE
#: slave mount
MS_SLAVE = MSFlags.SLAVE
#: shared mount
MS_SHARED = MSFlags.SHARED
class MNTFlags(enum.IntEnum):
"""All umount2 operations flags.
"""
#: Force unmounting
FORCE = 0x1
#: Just detach from the tree
DETACH = 0x2
#: Mark for expiry
EXPIRE = 0x4
#: Force unmounting
MNT_FORCE = MNTFlags.FORCE
#: Just detach from the tree
MNT_DETACH = MNTFlags.DETACH
#: Mark for expiry
MNT_EXPIRE = MNTFlags.EXPIRE
###############################################################################
# Main mount/umount functions
def mount(source, target, fs_type, *mnt_opts_args,
mnt_flags=(), **mnt_opts_kwargs):
"""Mount ``source`` on ``target`` using filesystem type ``fs_type`` and
mount flags ``mnt_flags``.
NOTE: Mount data argument is not supported.
:params `str` source:
What to mount
:params `str` target:
Where to mount it
"""
if source is not None:
source = source.encode()
if target is not None:
target = target.encode()
else:
target = source
if fs_type is not None:
fs_type = fs_type.encode()
# Fix up mount flags
mnt_flags = utils.get_iterable(mnt_flags)
flags = int(
six.moves.reduce(
operator.or_, mnt_flags, MS_MGC_VAL
)
)
# Fix up mount options
options = ','.join(
itertools.chain(
mnt_opts_args,
(
'%s=%s' % (key, value)
for (key, value) in six.iteritems(mnt_opts_kwargs)
)
)
)
if options:
options = options.encode()
else:
options = None
_LOGGER.debug('mount(%r, %r, %r, %r, %r)',
source, target, fs_type,
utils.parse_mask(flags, MSFlags), options)
return _mount(source, target, fs_type, flags, options)
def unmount(target, mnt_flags=()):
"""Umount ``target``.
"""
target = target.encode()
mnt_flags = utils.get_iterable(mnt_flags)
mnt_flags = six.moves.reduce(
operator.or_, mnt_flags, 0
)
_LOGGER.debug('umount(%r, %r)',
target, utils.parse_mask(mnt_flags, MNTFlags))
if not mnt_flags:
return _umount(target)
else:
return _umount2(target, mnt_flags)
###############################################################################
__all__ = [
'MNT_DETACH',
'MNT_EXPIRE',
'MNT_FORCE',
'MS_BIND',
'MS_DIRSYNC',
'MS_MANDLOCK',
'MS_MGC_VAL',
'MS_MOVE',
'MS_NOATIME',
'MS_NODEV',
'MS_NODIRATIME',
'MS_NOEXEC',
'MS_NOSUID',
'MS_PRIVATE',
'MS_RDONLY',
'MS_REC',
'MS_REMOUNT',
'MS_SHARED',
'MS_SLAVE',
'MS_SYNCHRONOUS',
'MS_UNBINDABLE',
'mount',
'unmount',
]
|
|
from threading import Thread
import logging
import time
from datetime import datetime, timedelta
import random
import Settings
s = Settings.Settings()
LED_MIN = 0
LED_MAX = 4095
logger = logging.getLogger('__main__')
class Channel(Thread):
def __init__(self, c_id, pwm, channel_info):
super(Channel, self).__init__(name=str(c_id))
self.daemon = True
self.cancelled = False
# do other initialization here
self.ls = channel_info
self.pwm = pwm
self.c_id = c_id
self.curTime = datetime.now()
self.cur = 0
self.goal = 0
self.sleepTime = 1
self.delta = 0
self.weather = 'null'
self.sendInfo = {}
#self.weather = Settings.Settings().weather
def run(self):
"""Overloaded Thread.run"""
time.sleep(self.c_id)
while not self.cancelled:
self.curTime = datetime.now()
self.curHour = self.curTime.hour
self.nextHour = (self.curTime + timedelta(hours=1)).hour
self.goal = self.ls.get_pwm(self.c_id, self.nextHour)
# nextPwm = round(self.goal * ( (self.curTime.minute * 60 + self.curTime.second) / 3600))
lastGoal = self.ls.get_pwm(self.c_id, self.curHour)
newGoal = self.goal
newGoalWeight = (self.curTime.minute * 60 + self.curTime.second) / 3600
lastGoalWeight = 1 - newGoalWeight
currentTimeGoal = round((lastGoal * lastGoalWeight) +
(newGoal * newGoalWeight))
self.smoothTransition(currentTimeGoal)
if (self.ls.get_preview_status(self.c_id)):
self.preview_worker()
if (s.weather == "storm"):
self.thunderstorm_worker()
if (s.weather == "cloudy"):
self.new_cloud_worker()
time.sleep(self.sleepTime)
s.read_file()
def cancel(self):
"""End this timer thread"""
self.cancelled = True
time.sleep(.2)
def broadcast(self):
self.sendInfo['c_id'] = self.c_id
self.sendInfo['cur'] = self.cur
self.sendInfo['goal'] = self.goal
self.sendInfo['sleepTime'] = self.sleepTime
self.sendInfo['delta'] = self.delta
self.sendInfo['percent'] = round((self.cur / LED_MAX * 100))
self.sendInfo['weather'] = s.weather
self.sendInfo['alias'] = self.ls.get_alias(self.c_id)
return self.sendInfo
def preview_worker(self):
timeout_length_secs = s.preview_timeout
total_time_secs = 0
print("Preview started on channel {:d}...timeout {:d}".format(
self.c_id, timeout_length_secs))
while (self.ls.get_preview_status(self.c_id)
and total_time_secs < timeout_length_secs):
self.smoothTransition(self.ls.get_preview_pwm(self.c_id))
time.sleep(self.sleepTime)
total_time_secs += self.sleepTime
print("Preview ended on channel {:d}...total time {:d}".format(
self.c_id, total_time_secs))
self.ls.set_preview_status(self.c_id)
def new_cloud_worker(self):
speed = s.clouds_dim_speed
count = 0
time.sleep(self.c_id * 1.5)
while s.weather == "cloudy" and not self.cancelled:
if self.ls.get_iswhite(self.c_id):
#speed = random.randint(2,10)
speed = s.clouds_dim_speed
light_peak = random.randint(LED_MIN + 25, LED_MAX )
if count % 2 == 0:
light_peak = LED_MAX - 1000
else:
light_peak = LED_MIN + 25
self.smoothTransition(light_peak, speed)
else: #dim colored lights to something
self.smoothTransition(100, _speed=2)
time.sleep(1)
s.read_file()
count = count + 1
s.weather='normal'
s.dump_file()
def thunderstorm_worker(self):
'''makes a thunderstorm'''
self.smoothTransition(LED_MIN) #always fade to nothing at end
time.sleep(5)
if s.sound_on and self.c_id == 0:
try:
import simpleaudio as sa
wave_obj = sa.WaveObject.from_wave_file("sound/t1.wav")
# wave_obj = sa.WaveObject.from_wave_file("sound/t" + str(random.randint(1, 5)) + ".wav")
play_obj = wave_obj.play()
except ImportError:
logger.info(
"Cant import SimpleAudio / play thunderstorm audio")
while s.weather == "storm" and not self.cancelled:
s.read_file()
if not self.ls.get_iswhite(self.c_id): #dont do lightning stikes
r_pwm = random.randint(1, 200) #TODO magicnumber
self.smoothTransition(r_pwm)
time.sleep(random.uniform(0, 2))
else: # do lightning strikes
if (random.randint(1, 5) == 3):
self.setPwm(LED_MIN)
time.sleep(random.uniform(0, 1))
self.setPwm(LED_MAX)
time.sleep(random.uniform(0, .02))
print(datetime.now().strftime('%H:%M:%S') + "|Channel = " +
str(self.c_id) + "|Lightning Strike!")
if random.randint(1, 5) == 2:
x = 0
r = random.randint(-200, 200)
y = random.randint(100, 2000)
while (x < y):
r = random.randint(-100, 200)
self.setPwm(x)
x = x + r
self.setPwm(LED_MIN)
time.sleep(random.uniform(0, .09))
time.sleep(random.uniform(0, 4))
self.smoothTransition(LED_MIN) #always fade to nothing at end
time.sleep(5)
def setPwm(self, pwmval):
'''sets pwm and value to hold it'''
if pwmval > LED_MAX or pwmval < LED_MIN:
logger.info("PWM value is out of range =" + str(pwmval))
pwmval = max(pwmval, LED_MIN)
pwmval = min(pwmval, LED_MAX)
self.cur = pwmval
self.pwm.set_s(self.c_id, pwmval)
logger.debug("set pwm to" + str(pwmval))
return self.cur
def smoothTransition(self, _end=0, _speed=1):
'''runs to smooth transitions'''
_start = self.cur
if not abs(_start - _end) > 1:
logger.debug("Channel %s - Transition started - Start=%s End=%s Speed=%s",
self.c_id, _start, _end, _speed)
if _start > _end:
_speed = _speed * -1
for pwm in range(_start, _end, _speed):
self.setPwm(pwm)
self.setPwm(_end) #ensure always get to end
|
|
"""Functions for FIR filter design."""
from __future__ import division, print_function, absolute_import
from math import ceil, log
import numpy as np
from numpy.fft import irfft
from scipy.special import sinc
from . import sigtools
__all__ = ['kaiser_beta', 'kaiser_atten', 'kaiserord',
'firwin', 'firwin2', 'remez']
# Some notes on function parameters:
#
# `cutoff` and `width` are given as a numbers between 0 and 1. These
# are relative frequencies, expressed as a fraction of the Nyquist rate.
# For example, if the Nyquist rate is 2KHz, then width=0.15 is a width
# of 300 Hz.
#
# The `order` of a FIR filter is one less than the number of taps.
# This is a potential source of confusion, so in the following code,
# we will always use the number of taps as the parameterization of
# the 'size' of the filter. The "number of taps" means the number
# of coefficients, which is the same as the length of the impulse
# response of the filter.
def kaiser_beta(a):
"""Compute the Kaiser parameter `beta`, given the attenuation `a`.
Parameters
----------
a : float
The desired attenuation in the stopband and maximum ripple in
the passband, in dB. This should be a *positive* number.
Returns
-------
beta : float
The `beta` parameter to be used in the formula for a Kaiser window.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
if a > 50:
beta = 0.1102 * (a - 8.7)
elif a > 21:
beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21)
else:
beta = 0.0
return beta
def kaiser_atten(numtaps, width):
"""Compute the attenuation of a Kaiser FIR filter.
Given the number of taps `N` and the transition width `width`, compute the
attenuation `a` in dB, given by Kaiser's formula:
a = 2.285 * (N - 1) * pi * width + 7.95
Parameters
----------
numtaps : int
The number of taps in the FIR filter.
width : float
The desired width of the transition region between passband and
stopband (or, in general, at any discontinuity) for the filter.
Returns
-------
a : float
The attenuation of the ripple, in dB.
See Also
--------
kaiserord, kaiser_beta
"""
a = 2.285 * (numtaps - 1) * np.pi * width + 7.95
return a
def kaiserord(ripple, width):
"""
Design a Kaiser window to limit ripple and width of transition region.
Parameters
----------
ripple : float
Positive number specifying maximum ripple in passband (dB) and minimum
ripple in stopband.
width : float
Width of transition region (normalized so that 1 corresponds to pi
radians / sample).
Returns
-------
numtaps : int
The length of the kaiser window.
beta : float
The beta parameter for the kaiser window.
See Also
--------
kaiser_beta, kaiser_atten
Notes
-----
There are several ways to obtain the Kaiser window:
- ``signal.kaiser(numtaps, beta, sym=0)``
- ``signal.get_window(beta, numtaps)``
- ``signal.get_window(('kaiser', beta), numtaps)``
The empirical equations discovered by Kaiser are used.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
A = abs(ripple) # in case somebody is confused as to what's meant
if A < 8:
# Formula for N is not valid in this range.
raise ValueError("Requested maximum ripple attentuation %f is too "
"small for the Kaiser formula." % A)
beta = kaiser_beta(A)
# Kaiser's formula (as given in Oppenheim and Schafer) is for the filter
# order, so we have to add 1 to get the number of taps.
numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1
return int(ceil(numtaps)), beta
def firwin(numtaps, cutoff, width=None, window='hamming', pass_zero=True,
scale=True, nyq=1.0):
"""
FIR filter design using the window method.
This function computes the coefficients of a finite impulse response
filter. The filter will have linear phase; it will be Type I if
`numtaps` is odd and Type II if `numtaps` is even.
Type II filters always have zero response at the Nyquist rate, so a
ValueError exception is raised if firwin is called with `numtaps` even and
having a passband whose right end is at the Nyquist rate.
Parameters
----------
numtaps : int
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be even if a passband includes the
Nyquist frequency.
cutoff : float or 1D array_like
Cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `nyq`. The values 0 and
`nyq` must not be included in `cutoff`.
width : float or None, optional
If `width` is not None, then assume it is the approximate width
of the transition region (expressed in the same units as `nyq`)
for use in Kaiser FIR filter design. In this case, the `window`
argument is ignored.
window : string or tuple of string and parameter values, optional
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
pass_zero : bool, optional
If True, the gain at the frequency 0 (i.e. the "DC gain") is 1.
Otherwise the DC gain is 0.
scale : bool, optional
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
- 0 (DC) if the first passband starts at 0 (i.e. pass_zero
is True)
- `nyq` (the Nyquist rate) if the first passband ends at
`nyq` (i.e the filter is a single band highpass filter);
center of first passband otherwise
nyq : float, optional
Nyquist frequency. Each frequency in `cutoff` must be between 0
and `nyq`.
Returns
-------
h : (numtaps,) ndarray
Coefficients of length `numtaps` FIR filter.
Raises
------
ValueError
If any value in `cutoff` is less than or equal to 0 or greater
than or equal to `nyq`, if the values in `cutoff` are not strictly
monotonically increasing, or if `numtaps` is even but a passband
includes the Nyquist frequency.
See also
--------
scipy.signal.firwin2
Examples
--------
Low-pass from 0 to f:
>>> from scipy import signal
>>> numtaps = 3
>>> f = 0.1
>>> signal.firwin(numtaps, f)
array([ 0.06799017, 0.86401967, 0.06799017])
Use a specific window function:
>>> signal.firwin(numtaps, f, window='nuttall')
array([ 3.56607041e-04, 9.99286786e-01, 3.56607041e-04])
High-pass ('stop' from 0 to f):
>>> signal.firwin(numtaps, f, pass_zero=False)
array([-0.00859313, 0.98281375, -0.00859313])
Band-pass:
>>> f1, f2 = 0.1, 0.2
>>> signal.firwin(numtaps, [f1, f2], pass_zero=False)
array([ 0.06301614, 0.88770441, 0.06301614])
Band-stop:
>>> signal.firwin(numtaps, [f1, f2])
array([-0.00801395, 1.0160279 , -0.00801395])
Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1]):
>>> f3, f4 = 0.3, 0.4
>>> signal.firwin(numtaps, [f1, f2, f3, f4])
array([-0.01376344, 1.02752689, -0.01376344])
Multi-band (passbands are [f1, f2] and [f3,f4]):
>>> signal.firwin(numtaps, [f1, f2, f3, f4], pass_zero=False)
array([ 0.04890915, 0.91284326, 0.04890915])
"""
# The major enhancements to this function added in November 2010 were
# developed by Tom Krauss (see ticket #902).
cutoff = np.atleast_1d(cutoff) / float(nyq)
# Check for invalid input.
if cutoff.ndim > 1:
raise ValueError("The cutoff argument must be at most "
"one-dimensional.")
if cutoff.size == 0:
raise ValueError("At least one cutoff frequency must be given.")
if cutoff.min() <= 0 or cutoff.max() >= 1:
raise ValueError("Invalid cutoff frequency: frequencies must be "
"greater than 0 and less than nyq.")
if np.any(np.diff(cutoff) <= 0):
raise ValueError("Invalid cutoff frequencies: the frequencies "
"must be strictly increasing.")
if width is not None:
# A width was given. Find the beta parameter of the Kaiser window
# and set `window`. This overrides the value of `window` passed in.
atten = kaiser_atten(numtaps, float(width) / nyq)
beta = kaiser_beta(atten)
window = ('kaiser', beta)
pass_nyquist = bool(cutoff.size & 1) ^ pass_zero
if pass_nyquist and numtaps % 2 == 0:
raise ValueError("A filter with an even number of coefficients must "
"have zero response at the Nyquist rate.")
# Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff
# is even, and each pair in cutoff corresponds to passband.
cutoff = np.hstack(([0.0] * pass_zero, cutoff, [1.0] * pass_nyquist))
# `bands` is a 2D array; each row gives the left and right edges of
# a passband.
bands = cutoff.reshape(-1, 2)
# Build up the coefficients.
alpha = 0.5 * (numtaps - 1)
m = np.arange(0, numtaps) - alpha
h = 0
for left, right in bands:
h += right * sinc(right * m)
h -= left * sinc(left * m)
# Get and apply the window function.
from .signaltools import get_window
win = get_window(window, numtaps, fftbins=False)
h *= win
# Now handle scaling if desired.
if scale:
# Get the first passband.
left, right = bands[0]
if left == 0:
scale_frequency = 0.0
elif right == 1:
scale_frequency = 1.0
else:
scale_frequency = 0.5 * (left + right)
c = np.cos(np.pi * m * scale_frequency)
s = np.sum(h * c)
h /= s
return h
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0,
antisymmetric=False):
"""
FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`.
freq : array_like, 1D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency can be redefined with the argument
`nyq`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be `nyq`.
gain : array_like
The filter gains at the frequency sampling points. Certain
constraints to gain values, depending on the filter type, are applied,
see Notes for details.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float, optional
Nyquist frequency. Each frequency in `freq` must be between 0 and
`nyq` (inclusive).
antisymmetric : bool, optional
Whether resulting impulse response is symmetric/antisymmetric.
See Notes for more details.
Returns
-------
taps : ndarray
The filter coefficients of the FIR filter, as a 1-D array of length
`numtaps`.
See also
--------
scipy.signal.firwin
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The type of filter is determined by
the value of 'numtaps` and `antisymmetric` flag.
There are four possible combinations:
- odd `numtaps`, `antisymmetric` is False, type I filter is produced
- even `numtaps`, `antisymmetric` is False, type II filter is produced
- odd `numtaps`, `antisymmetric` is True, type III filter is produced
- even `numtaps`, `antisymmetric` is True, type IV filter is produced
Magnitude response of all but type I filters are subjects to following
constraints:
- type II -- zero at the Nyquist frequency
- type III -- zero at zero and Nyquist frequencies
- type IV -- zero at zero frequency
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
Examples
--------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> from scipy import signal
>>> taps = signal.firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
>>> print(taps[72:78])
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
"""
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError(('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s') %
(numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with `nyq`.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if antisymmetric:
if numtaps % 2 == 0:
ftype = 4
else:
ftype = 3
else:
if numtaps % 2 == 0:
ftype = 2
else:
ftype = 1
if ftype == 2 and gain[-1] != 0.0:
raise ValueError("A Type II filter must have zero gain at the "
"Nyquist rate.")
elif ftype == 3 and (gain[0] != 0.0 or gain[-1] != 0.0):
raise ValueError("A Type III filter must have zero gain at zero "
"and Nyquist rates.")
elif ftype == 4 and gain[0] != 0.0:
raise ValueError("A Type IV filter must have zero gain at zero rate.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))
# Tweak any repeated values in freq so that interp works.
eps = np.finfo(float).eps
for k in range(len(freq)):
if k < len(freq) - 1 and freq[k] == freq[k + 1]:
freq[k] = freq[k] - eps
freq[k + 1] = freq[k + 1] + eps
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)
if ftype > 2:
shift *= 1j
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from .signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
if ftype == 3:
out[out.size // 2] = 0.0
return out
def remez(numtaps, bands, desired, weight=None, Hz=1, type='bandpass',
maxiter=25, grid_density=16):
"""
Calculate the minimax optimal filter using the Remez exchange algorithm.
Calculate the filter-coefficients for the finite impulse response
(FIR) filter whose transfer function minimizes the maximum error
between the desired gain and the realized gain in the specified
frequency bands using the Remez exchange algorithm.
Parameters
----------
numtaps : int
The desired number of taps in the filter. The number of taps is
the number of terms in the filter, or the filter order plus one.
bands : array_like
A monotonic sequence containing the band edges in Hz.
All elements must be non-negative and less than half the sampling
frequency as given by `Hz`.
desired : array_like
A sequence half the size of bands containing the desired gain
in each of the specified bands.
weight : array_like, optional
A relative weighting to give to each band region. The length of
`weight` has to be half the length of `bands`.
Hz : scalar, optional
The sampling frequency in Hz. Default is 1.
type : {'bandpass', 'differentiator', 'hilbert'}, optional
The type of filter:
'bandpass' : flat response in bands. This is the default.
'differentiator' : frequency proportional response in bands.
'hilbert' : filter with odd symmetry, that is, type III
(for even order) or type IV (for odd order)
linear phase filters.
maxiter : int, optional
Maximum number of iterations of the algorithm. Default is 25.
grid_density : int, optional
Grid density. The dense grid used in `remez` is of size
``(numtaps + 1) * grid_density``. Default is 16.
Returns
-------
out : ndarray
A rank-1 array containing the coefficients of the optimal
(in a minimax sense) filter.
See Also
--------
freqz : Compute the frequency response of a digital filter.
References
----------
.. [1] J. H. McClellan and T. W. Parks, "A unified approach to the
design of optimum FIR linear phase digital filters",
IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973.
.. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer
Program for Designing Optimum FIR Linear Phase Digital
Filters", IEEE Trans. Audio Electroacoust., vol. AU-21,
pp. 506-525, 1973.
Examples
--------
We want to construct a filter with a passband at 0.2-0.4 Hz, and
stop bands at 0-0.1 Hz and 0.45-0.5 Hz. Note that this means that the
behavior in the frequency ranges between those bands is unspecified and
may overshoot.
>>> from scipy import signal
>>> bpass = signal.remez(72, [0, 0.1, 0.2, 0.4, 0.45, 0.5], [0, 1, 0])
>>> freq, response = signal.freqz(bpass)
>>> ampl = np.abs(response)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(111)
>>> ax1.semilogy(freq/(2*np.pi), ampl, 'b-') # freq in Hz
>>> plt.show()
"""
# Convert type
try:
tnum = {'bandpass': 1, 'differentiator': 2, 'hilbert': 3}[type]
except KeyError:
raise ValueError("Type must be 'bandpass', 'differentiator', "
"or 'hilbert'")
# Convert weight
if weight is None:
weight = [1] * len(desired)
bands = np.asarray(bands).copy()
return sigtools._remez(numtaps, bands, desired, weight, tnum, Hz,
maxiter, grid_density)
|
|
import asyncio
import unittest
from .http import *
from .http import read_headers
class HTTPAsyncTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.stream = asyncio.StreamReader(loop=self.loop)
def tearDown(self):
self.loop.close()
super().tearDown()
def test_read_request(self):
# Example from the protocol overview in RFC 6455
self.stream.feed_data(
b'GET /chat HTTP/1.1\r\n'
b'Host: server.example.com\r\n'
b'Upgrade: websocket\r\n'
b'Connection: Upgrade\r\n'
b'Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==\r\n'
b'Origin: http://example.com\r\n'
b'Sec-WebSocket-Protocol: chat, superchat\r\n'
b'Sec-WebSocket-Version: 13\r\n'
b'\r\n'
)
path, headers = self.loop.run_until_complete(
read_request(self.stream))
self.assertEqual(path, '/chat')
self.assertEqual(headers['Upgrade'], 'websocket')
def test_read_response(self):
# Example from the protocol overview in RFC 6455
self.stream.feed_data(
b'HTTP/1.1 101 Switching Protocols\r\n'
b'Upgrade: websocket\r\n'
b'Connection: Upgrade\r\n'
b'Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=\r\n'
b'Sec-WebSocket-Protocol: chat\r\n'
b'\r\n'
)
status_code, headers = self.loop.run_until_complete(
read_response(self.stream))
self.assertEqual(status_code, 101)
self.assertEqual(headers['Upgrade'], 'websocket')
def test_request_method(self):
self.stream.feed_data(b'OPTIONS * HTTP/1.1\r\n\r\n')
with self.assertRaises(ValueError):
self.loop.run_until_complete(read_request(self.stream))
def test_request_version(self):
self.stream.feed_data(b'GET /chat HTTP/1.0\r\n\r\n')
with self.assertRaises(ValueError):
self.loop.run_until_complete(read_request(self.stream))
def test_response_version(self):
self.stream.feed_data(b'HTTP/1.0 400 Bad Request\r\n\r\n')
with self.assertRaises(ValueError):
self.loop.run_until_complete(read_response(self.stream))
def test_response_status(self):
self.stream.feed_data(b'HTTP/1.1 007 My name is Bond\r\n\r\n')
with self.assertRaises(ValueError):
self.loop.run_until_complete(read_response(self.stream))
def test_response_reason(self):
self.stream.feed_data(b'HTTP/1.1 200 \x7f\r\n\r\n')
with self.assertRaises(ValueError):
self.loop.run_until_complete(read_response(self.stream))
def test_header_name(self):
self.stream.feed_data(b'foo bar: baz qux\r\n\r\n')
with self.assertRaises(ValueError):
self.loop.run_until_complete(read_headers(self.stream))
def test_header_value(self):
self.stream.feed_data(b'foo: \x00\x00\x0f\r\n\r\n')
with self.assertRaises(ValueError):
self.loop.run_until_complete(read_headers(self.stream))
def test_headers_limit(self):
self.stream.feed_data(b'foo: bar\r\n' * 257 + b'\r\n')
with self.assertRaises(ValueError):
self.loop.run_until_complete(read_headers(self.stream))
def test_line_limit(self):
# Header line contains 5 + 4090 + 2 = 4097 bytes.
self.stream.feed_data(b'foo: ' + b'a' * 4090 + b'\r\n\r\n')
with self.assertRaises(ValueError):
self.loop.run_until_complete(read_headers(self.stream))
def test_line_ending(self):
self.stream.feed_data(b'foo: bar\n\n')
with self.assertRaises(ValueError):
self.loop.run_until_complete(read_headers(self.stream))
class HeadersTests(unittest.TestCase):
def setUp(self):
self.headers = Headers([
('Connection', 'Upgrade'),
('Server', USER_AGENT),
])
def test_str(self):
self.assertEqual(
str(self.headers),
"Connection: Upgrade\r\nServer: {}\r\n\r\n".format(USER_AGENT),
)
def test_repr(self):
self.assertEqual(
repr(self.headers),
"Headers([('Connection', 'Upgrade'), "
"('Server', '{}')])".format(USER_AGENT),
)
def test_multiple_values_error_str(self):
self.assertEqual(
str(MultipleValuesError('Connection')),
"'Connection'",
)
self.assertEqual(
str(MultipleValuesError()),
"",
)
def test_contains(self):
self.assertIn('Server', self.headers)
def test_contains_case_insensitive(self):
self.assertIn('server', self.headers)
def test_contains_not_found(self):
self.assertNotIn('Date', self.headers)
def test_iter(self):
self.assertEqual(set(iter(self.headers)), {'connection', 'server'})
def test_len(self):
self.assertEqual(len(self.headers), 2)
def test_getitem(self):
self.assertEqual(self.headers['Server'], USER_AGENT)
def test_getitem_case_insensitive(self):
self.assertEqual(self.headers['server'], USER_AGENT)
def test_getitem_key_error(self):
with self.assertRaises(KeyError):
self.headers['Upgrade']
def test_getitem_multiple_values_error(self):
self.headers['Server'] = '2'
with self.assertRaises(MultipleValuesError):
self.headers['Server']
def test_setitem(self):
self.headers['Upgrade'] = 'websocket'
self.assertEqual(self.headers['Upgrade'], 'websocket')
def test_setitem_case_insensitive(self):
self.headers['upgrade'] = 'websocket'
self.assertEqual(self.headers['Upgrade'], 'websocket')
def test_setitem_multiple_values(self):
self.headers['Connection'] = 'close'
with self.assertRaises(MultipleValuesError):
self.headers['Connection']
def test_delitem(self):
del self.headers['Connection']
with self.assertRaises(KeyError):
self.headers['Connection']
def test_delitem_case_insensitive(self):
del self.headers['connection']
with self.assertRaises(KeyError):
self.headers['Connection']
def test_delitem_multiple_values(self):
self.headers['Connection'] = 'close'
del self.headers['Connection']
with self.assertRaises(KeyError):
self.headers['Connection']
def test_eq(self):
other_headers = self.headers.copy()
self.assertEqual(self.headers, other_headers)
def test_eq_not_equal(self):
self.assertNotEqual(self.headers, [])
def test_clear(self):
self.headers.clear()
self.assertFalse(self.headers)
self.assertEqual(self.headers, Headers())
def test_get_all(self):
self.assertEqual(self.headers.get_all('Connection'), ['Upgrade'])
def test_get_all_case_insensitive(self):
self.assertEqual(self.headers.get_all('connection'), ['Upgrade'])
def test_get_all_no_values(self):
self.assertEqual(self.headers.get_all('Upgrade'), [])
def test_get_all_multiple_values(self):
self.headers['Connection'] = 'close'
self.assertEqual(
self.headers.get_all('Connection'), ['Upgrade', 'close'])
def test_raw_items(self):
self.assertEqual(
list(self.headers.raw_items()),
[
('Connection', 'Upgrade'),
('Server', USER_AGENT),
],
)
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
we want to make sure that our SR network behavior in terms of degree assortativity is not accident
for that reason, we will mix (randomize) the CV vectors input and calculate new "SR values"
with which we can recalculate "random" SR networs and see their behavior
"""
from collections import defaultdict
import math
import json
import numpy as np
import glob, os
import time
import random
#####################################################
# input
#####################################################
WORKING_FOLDER = "../../../DATA/CV/"
F_IN = "CVs_usrs.json"
F_OUT_v1 = "randomize_" + F_IN
#####################################################
F_OUT_v2 = "scaled_randomize_v27s_" + F_IN
# for a faster processing for all user pairs, we do not want to query MongoDB for the text
# more than once for one user; so here we read in all the precaclculated user CVs.
def read_all_user_CVs():
user_CVs = defaultdict(int)
#raw_CVs = defaultdict(int)
cnt = 0
with open(F_IN) as f:
for line in f:
line_dict = json.loads(line)
usr = line_dict["_id"]
user_CVs[cnt] = {}
user_CVs[cnt]["num_tweets"] = line_dict["num_tweets"]
CVa = line_dict["CV"]
user_CVs[cnt]["CV"] = { k: v for d in CVa for k, v in d.items() }
#raw_CVs[cnt] = { k: v for d in CVa for k, v in d.items() }
if cnt % 1000 == 0:
print cnt, usr
cnt += 1
return user_CVs #, raw_CVs
# the same as above, just scale the CVs for each user by the number of tweets
def read_all_user_CVs_v2():
user_CVs = defaultdict(int)
cnt = 0
with open(F_IN) as f:
for line in f:
line_dict = json.loads(line)
usr = line_dict["_id"]
user_CVs[cnt] = {}
user_CVs[cnt]["num_tweets"] = line_dict["num_tweets"]
scale_factor_num_tweets = float(user_CVs[cnt]["num_tweets"])
CVa = line_dict["CV"]
user_CVs[cnt]["CV"] = { k: float(v)/scale_factor_num_tweets for d in CVa for k, v in d.items() }
if cnt % 1000 == 0:
print cnt, usr
cnt += 1
return user_CVs
########################################################
# v1 randomize outputs a json valid for this save function
########################################################
def save_CVs(CVs, F_OUT):
f = open(F_OUT, 'w')
for usr in CVs:
cv_line = {}
cv_line["_id"] = str(usr)
cv_line["CV"] = [ {k:v} for k,v in CVs[usr]["CV"].items()]
cv_line["num_tweets"] = CVs[usr]["num_tweets"]
f.write(unicode(json.dumps(cv_line, ensure_ascii=False)) + '\n')
########################################################
# v1 randomzies only the user ids in the dicitonary
# basically assigning the same CVs to shuffled user ids
########################################################
def randomize_v1():
randomized_user_CVs = defaultdict(int)
user_CVs = read_all_user_CVs()
SHUFFLE_USERS = user_CVs.keys()
#print SHUFFLE_USERS
random.shuffle(SHUFFLE_USERS)
#print SHUFFLE_USERS
for k in range(len(SHUFFLE_USERS)):
randomized_user_CVs[SHUFFLE_USERS[k]] = user_CVs[k]
for k in randomized_user_CVs:
if randomized_user_CVs[k] == user_CVs[k]:
print k
return randomized_user_CVs
########################################################
# v2 randomzies all the CVs as Aris suggested
# for a more efficient structure, we use numpy matrix to do the shuffle we need
########################################################
def randomize_v2():
# read in the original CVs json that we now want to randomize
user_CVs = read_all_user_CVs_v2()
# the output in a format that we can save to a json with save_CVs_v2()
randomized_user_CVs = defaultdict(int)
# for working with an np matrix, we need ids in range 0 .. num_of_concepts
# to save the TFs and shuffle easily
conceptID7s = defaultdict(int)
reverse_conceptID7s = defaultdict(int)
# let's see what is that num_of_concepts (distinct),
# that will be the number of columns in the numpy matrix
count_all_vectors = defaultdict(int)
for user in user_CVs:
for vec in user_CVs[user]["CV"]:
count_all_vectors[vec] = 1
N_all_vec = sum(count_all_vectors.values())
print "Number of distinct concepts in the data is %d " % N_all_vec
# for the numpy matrix, we also need the number of users, as the number of rows
N_users = len(user_CVs)
# conceptID7s will hold a map: concepts --> IDs, in the range N_all_vec
i = 0
for vec in count_all_vectors:
conceptID7s[vec] = i
reverse_conceptID7s[i] = vec
i += 1
# finally we can define an np matrix
# defalut dtype=float64 is NOT OK due to size limit, we'd need 90GB RAM
shuffled_CVs = np.zeros((N_users,N_all_vec), dtype='float32')
# and we can add the CVs in it
for user in user_CVs:
CV = user_CVs[user]["CV"]
for concept in CV:
concept_id = conceptID7s[concept]
# shuffled_Cvs will hold concept_id --> TF
shuffled_CVs[user][concept_id] = CV[concept]
print "We have copied the CVs to a numpy array for a shufffffle ;) "
t0 = time.time()
# and shuffle them finally
for col_id in range(N_all_vec):
np.random.shuffle(shuffled_CVs[:,col_id])
print "took %.3f seconds " % (time.time() - t0)
print "You've got the CVs shuffled, well... "
t0 = time.time()
# for each user
for user in range(N_users):
# let us assign the desired output in a dict
randomized_user_CVs[user] = {}
# first assign the neccesary stuff from eariler
randomized_user_CVs[user]["num_tweets"] = user_CVs[user]["num_tweets"]
# next take the new random Cv for that user
single_CV = shuffled_CVs[user]
# and properly assign it (select only the nonzero entries from the numpy array)
randomized_user_CVs[user]["CV"] = { reverse_conceptID7s[k]:str(single_CV[k]) \
for k in np.nonzero(single_CV)[0] }
#randomized_user_CVs[user]["CV"] = [ {reverse_conceptID7s[k]:single_CV[k]} \
#for k in np.nonzero(single_CV)[0] ]
print "took %.3f seconds " % (time.time() - t0)
print "You've got the data ready for saving to .json, cool stuff ;) "
return randomized_user_CVs
def randomize_and_save_v1():
randomized_user_CVs = randomize_v1()
print "Data shuffled per user V1 :)"
save_CVs(randomized_user_CVs, F_OUT_v1)
print "Shuffled CVs V1 saved in %s :)" % F_OUT_v1
def randomize_and_save_v2():
randomized_user_CVs = randomize_v2()
print "Data shuffled per column V2 :)"
save_CVs(randomized_user_CVs, F_OUT_v2)
print "Shuffled CVs V2 saved in %s :)" % F_OUT_v2
def main():
os.chdir(WORKING_FOLDER)
#randomize_and_save_v1()
randomize_and_save_v2()
main()
|
|
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# Copyright (c) 2011 Blue Pines Technologies LLC, Brad Carleton
# www.bluepines.org
# Copyright (c) 2012 42 Lines Inc., Jim Browne
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import exception
import random
import urllib
import uuid
import xml.sax
import boto
from boto.connection import AWSAuthConnection
from boto import handler
import boto.jsonresponse
from boto.route53.record import ResourceRecordSets
from boto.route53.zone import Zone
HZXML = """<?xml version="1.0" encoding="UTF-8"?>
<CreateHostedZoneRequest xmlns="%(xmlns)s">
<Name>%(name)s</Name>
<CallerReference>%(caller_ref)s</CallerReference>
<HostedZoneConfig>
<Comment>%(comment)s</Comment>
</HostedZoneConfig>
</CreateHostedZoneRequest>"""
#boto.set_stream_logger('dns')
class Route53Connection(AWSAuthConnection):
DefaultHost = 'route53.amazonaws.com'
"""The default Route53 API endpoint to connect to."""
Version = '2013-04-01'
"""Route53 API version."""
XMLNameSpace = 'https://route53.amazonaws.com/doc/2013-04-01/'
"""XML schema for this Route53 API version."""
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
port=None, proxy=None, proxy_port=None,
host=DefaultHost, debug=0, security_token=None,
validate_certs=True, https_connection_factory=None,
profile_name=None):
super(Route53Connection, self).__init__(host,
aws_access_key_id, aws_secret_access_key,
True, port, proxy, proxy_port, debug=debug,
security_token=security_token,
validate_certs=validate_certs,
https_connection_factory=https_connection_factory,
profile_name=profile_name)
def _required_auth_capability(self):
return ['route53']
def make_request(self, action, path, headers=None, data='', params=None):
if params:
pairs = []
for key, val in params.iteritems():
if val is None:
continue
pairs.append(key + '=' + urllib.quote(str(val)))
path += '?' + '&'.join(pairs)
return super(Route53Connection, self).make_request(action, path,
headers, data,
retry_handler=self._retry_handler)
# Hosted Zones
def get_all_hosted_zones(self, start_marker=None, zone_list=None):
"""
Returns a Python data structure with information about all
Hosted Zones defined for the AWS account.
:param int start_marker: start marker to pass when fetching additional
results after a truncated list
:param list zone_list: a HostedZones list to prepend to results
"""
params = {}
if start_marker:
params = {'marker': start_marker}
response = self.make_request('GET', '/%s/hostedzone' % self.Version,
params=params)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element(list_marker='HostedZones',
item_marker=('HostedZone',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
if zone_list:
e['ListHostedZonesResponse']['HostedZones'].extend(zone_list)
while 'NextMarker' in e['ListHostedZonesResponse']:
next_marker = e['ListHostedZonesResponse']['NextMarker']
zone_list = e['ListHostedZonesResponse']['HostedZones']
e = self.get_all_hosted_zones(next_marker, zone_list)
return e
def get_hosted_zone(self, hosted_zone_id):
"""
Get detailed information about a particular Hosted Zone.
:type hosted_zone_id: str
:param hosted_zone_id: The unique identifier for the Hosted Zone
"""
uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id)
response = self.make_request('GET', uri)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element(list_marker='NameServers',
item_marker=('NameServer',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
def get_hosted_zone_by_name(self, hosted_zone_name):
"""
Get detailed information about a particular Hosted Zone.
:type hosted_zone_name: str
:param hosted_zone_name: The fully qualified domain name for the Hosted
Zone
"""
if hosted_zone_name[-1] != '.':
hosted_zone_name += '.'
all_hosted_zones = self.get_all_hosted_zones()
for zone in all_hosted_zones['ListHostedZonesResponse']['HostedZones']:
#check that they gave us the FQDN for their zone
if zone['Name'] == hosted_zone_name:
return self.get_hosted_zone(zone['Id'].split('/')[-1])
def create_hosted_zone(self, domain_name, caller_ref=None, comment=''):
"""
Create a new Hosted Zone. Returns a Python data structure with
information about the newly created Hosted Zone.
:type domain_name: str
:param domain_name: The name of the domain. This should be a
fully-specified domain, and should end with a final period
as the last label indication. If you omit the final period,
Amazon Route 53 assumes the domain is relative to the root.
This is the name you have registered with your DNS registrar.
It is also the name you will delegate from your registrar to
the Amazon Route 53 delegation servers returned in
response to this request.A list of strings with the image
IDs wanted.
:type caller_ref: str
:param caller_ref: A unique string that identifies the request
and that allows failed CreateHostedZone requests to be retried
without the risk of executing the operation twice. If you don't
provide a value for this, boto will generate a Type 4 UUID and
use that.
:type comment: str
:param comment: Any comments you want to include about the hosted
zone.
"""
if caller_ref is None:
caller_ref = str(uuid.uuid4())
params = {'name': domain_name,
'caller_ref': caller_ref,
'comment': comment,
'xmlns': self.XMLNameSpace}
xml_body = HZXML % params
uri = '/%s/hostedzone' % self.Version
response = self.make_request('POST', uri,
{'Content-Type': 'text/xml'}, xml_body)
body = response.read()
boto.log.debug(body)
if response.status == 201:
e = boto.jsonresponse.Element(list_marker='NameServers',
item_marker=('NameServer',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
else:
raise exception.DNSServerError(response.status,
response.reason,
body)
def delete_hosted_zone(self, hosted_zone_id):
uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id)
response = self.make_request('DELETE', uri)
body = response.read()
boto.log.debug(body)
if response.status not in (200, 204):
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
# Resource Record Sets
def get_all_rrsets(self, hosted_zone_id, type=None,
name=None, identifier=None, maxitems=None):
"""
Retrieve the Resource Record Sets defined for this Hosted Zone.
Returns the raw XML data returned by the Route53 call.
:type hosted_zone_id: str
:param hosted_zone_id: The unique identifier for the Hosted Zone
:type type: str
:param type: The type of resource record set to begin the record
listing from. Valid choices are:
* A
* AAAA
* CNAME
* MX
* NS
* PTR
* SOA
* SPF
* SRV
* TXT
Valid values for weighted resource record sets:
* A
* AAAA
* CNAME
* TXT
Valid values for Zone Apex Aliases:
* A
* AAAA
:type name: str
:param name: The first name in the lexicographic ordering of domain
names to be retrieved
:type identifier: str
:param identifier: In a hosted zone that includes weighted resource
record sets (multiple resource record sets with the same DNS
name and type that are differentiated only by SetIdentifier),
if results were truncated for a given DNS name and type,
the value of SetIdentifier for the next resource record
set that has the current DNS name and type
:type maxitems: int
:param maxitems: The maximum number of records
"""
params = {'type': type, 'name': name,
'Identifier': identifier, 'maxitems': maxitems}
uri = '/%s/hostedzone/%s/rrset' % (self.Version, hosted_zone_id)
response = self.make_request('GET', uri, params=params)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
rs = ResourceRecordSets(connection=self, hosted_zone_id=hosted_zone_id)
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body, h)
return rs
def change_rrsets(self, hosted_zone_id, xml_body):
"""
Create or change the authoritative DNS information for this
Hosted Zone.
Returns a Python data structure with information about the set of
changes, including the Change ID.
:type hosted_zone_id: str
:param hosted_zone_id: The unique identifier for the Hosted Zone
:type xml_body: str
:param xml_body: The list of changes to be made, defined in the
XML schema defined by the Route53 service.
"""
uri = '/%s/hostedzone/%s/rrset' % (self.Version, hosted_zone_id)
response = self.make_request('POST', uri,
{'Content-Type': 'text/xml'},
xml_body)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
def get_change(self, change_id):
"""
Get information about a proposed set of changes, as submitted
by the change_rrsets method.
Returns a Python data structure with status information about the
changes.
:type change_id: str
:param change_id: The unique identifier for the set of changes.
This ID is returned in the response to the change_rrsets method.
"""
uri = '/%s/change/%s' % (self.Version, change_id)
response = self.make_request('GET', uri)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
def create_zone(self, name):
"""
Create a new Hosted Zone. Returns a Zone object for the newly
created Hosted Zone.
:type name: str
:param name: The name of the domain. This should be a
fully-specified domain, and should end with a final period
as the last label indication. If you omit the final period,
Amazon Route 53 assumes the domain is relative to the root.
This is the name you have registered with your DNS registrar.
It is also the name you will delegate from your registrar to
the Amazon Route 53 delegation servers returned in
response to this request.
"""
zone = self.create_hosted_zone(name)
return Zone(self, zone['CreateHostedZoneResponse']['HostedZone'])
def get_zone(self, name):
"""
Returns a Zone object for the specified Hosted Zone.
:param name: The name of the domain. This should be a
fully-specified domain, and should end with a final period
as the last label indication.
"""
name = self._make_qualified(name)
for zone in self.get_zones():
if name == zone.name:
return zone
def get_zones(self):
"""
Returns a list of Zone objects, one for each of the Hosted
Zones defined for the AWS account.
"""
zones = self.get_all_hosted_zones()
return [Zone(self, zone) for zone in
zones['ListHostedZonesResponse']['HostedZones']]
def _make_qualified(self, value):
"""
Ensure passed domain names end in a period (.) character.
This will usually make a domain fully qualified.
"""
if type(value) in [list, tuple, set]:
new_list = []
for record in value:
if record and not record[-1] == '.':
new_list.append("%s." % record)
else:
new_list.append(record)
return new_list
else:
value = value.strip()
if value and not value[-1] == '.':
value = "%s." % value
return value
def _retry_handler(self, response, i, next_sleep):
status = None
boto.log.debug("Saw HTTP status: %s" % response.status)
if response.status == 400:
code = response.getheader('Code')
if code and 'PriorRequestNotComplete' in code:
# This is a case where we need to ignore a 400 error, as
# Route53 returns this. See
# http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html
msg = "%s, retry attempt %s" % (
'PriorRequestNotComplete',
i
)
next_sleep = random.random() * (2 ** i)
i += 1
status = (msg, i, next_sleep)
return status
|
|
"""Test inter-conversion of different polynomial classes.
This tests the convert and cast methods of all the polynomial classes.
"""
from __future__ import division, absolute_import, print_function
import operator as op
from numbers import Number
import numpy as np
from numpy.polynomial import (
Polynomial, Legendre, Chebyshev, Laguerre,
Hermite, HermiteE)
from numpy.testing import (
TestCase, assert_almost_equal, assert_raises,
assert_equal, assert_, run_module_suite, dec)
from numpy.testing.noseclasses import KnownFailure
from numpy.compat import long
classes = (
Polynomial, Legendre, Chebyshev, Laguerre,
Hermite, HermiteE)
def test_class_methods():
for Poly1 in classes:
for Poly2 in classes:
yield check_conversion, Poly1, Poly2
yield check_cast, Poly1, Poly2
for Poly in classes:
yield check_call, Poly
yield check_identity, Poly
yield check_basis, Poly
yield check_fromroots, Poly
yield check_fit, Poly
yield check_equal, Poly
yield check_not_equal, Poly
yield check_add, Poly
yield check_sub, Poly
yield check_mul, Poly
yield check_floordiv, Poly
yield check_truediv, Poly
yield check_mod, Poly
yield check_divmod, Poly
yield check_pow, Poly
yield check_integ, Poly
yield check_deriv, Poly
yield check_roots, Poly
yield check_linspace, Poly
yield check_mapparms, Poly
yield check_degree, Poly
yield check_copy, Poly
yield check_cutdeg, Poly
yield check_truncate, Poly
yield check_trim, Poly
#
# helper functions
#
random = np.random.random
def assert_poly_almost_equal(p1, p2, msg=""):
try:
assert_(np.all(p1.domain == p2.domain))
assert_(np.all(p1.window == p2.window))
assert_almost_equal(p1.coef, p2.coef)
except AssertionError:
msg = "Result: %s\nTarget: %s", (p1, p2)
raise AssertionError(msg)
#
# conversion methods that depend on two classes
#
def check_conversion(Poly1, Poly2):
x = np.linspace(0, 1, 10)
coef = random((3,))
d1 = Poly1.domain + random((2,))*.25
w1 = Poly1.window + random((2,))*.25
p1 = Poly1(coef, domain=d1, window=w1)
d2 = Poly2.domain + random((2,))*.25
w2 = Poly2.window + random((2,))*.25
p2 = p1.convert(kind=Poly2, domain=d2, window=w2)
assert_almost_equal(p2.domain, d2)
assert_almost_equal(p2.window, w2)
assert_almost_equal(p2(x), p1(x))
def check_cast(Poly1, Poly2):
x = np.linspace(0, 1, 10)
coef = random((3,))
d1 = Poly1.domain + random((2,))*.25
w1 = Poly1.window + random((2,))*.25
p1 = Poly1(coef, domain=d1, window=w1)
d2 = Poly2.domain + random((2,))*.25
w2 = Poly2.window + random((2,))*.25
p2 = Poly2.cast(p1, domain=d2, window=w2)
assert_almost_equal(p2.domain, d2)
assert_almost_equal(p2.window, w2)
assert_almost_equal(p2(x), p1(x))
#
# methods that depend on one class
#
def check_identity(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
x = np.linspace(d[0], d[1], 11)
p = Poly.identity(domain=d, window=w)
assert_equal(p.domain, d)
assert_equal(p.window, w)
assert_almost_equal(p(x), x)
def check_basis(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
p = Poly.basis(5, domain=d, window=w)
assert_equal(p.domain, d)
assert_equal(p.window, w)
assert_equal(p.coef, [0]*5 + [1])
def check_fromroots(Poly):
# check that requested roots are zeros of a polynomial
# of correct degree, domain, and window.
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
r = random((5,))
p1 = Poly.fromroots(r, domain=d, window=w)
assert_equal(p1.degree(), len(r))
assert_equal(p1.domain, d)
assert_equal(p1.window, w)
assert_almost_equal(p1(r), 0)
# check that polynomial is monic
pdom = Polynomial.domain
pwin = Polynomial.window
p2 = Polynomial.cast(p1, domain=pdom, window=pwin)
assert_almost_equal(p2.coef[-1], 1)
def check_fit(Poly):
def f(x):
return x*(x - 1)*(x - 2)
x = np.linspace(0, 3)
y = f(x)
# check default value of domain and window
p = Poly.fit(x, y, 3)
assert_almost_equal(p.domain, [0, 3])
assert_almost_equal(p(x), y)
assert_equal(p.degree(), 3)
# check with given domains and window
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
p = Poly.fit(x, y, 3, domain=d, window=w)
assert_almost_equal(p(x), y)
assert_almost_equal(p.domain, d)
assert_almost_equal(p.window, w)
# check with class domain default
p = Poly.fit(x, y, 3, [])
assert_equal(p.domain, Poly.domain)
assert_equal(p.window, Poly.window)
# check that fit accepts weights.
w = np.zeros_like(x)
z = y + random(y.shape)*.25
w[::2] = 1
p1 = Poly.fit(x[::2], z[::2], 3)
p2 = Poly.fit(x, z, 3, w=w)
assert_almost_equal(p1(x), p2(x))
def check_equal(Poly):
p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3])
p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3])
p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3])
p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2])
assert_(p1 == p1)
assert_(not p1 == p2)
assert_(not p1 == p3)
assert_(not p1 == p4)
def check_not_equal(Poly):
p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3])
p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3])
p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3])
p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2])
assert_(not p1 != p1)
assert_(p1 != p2)
assert_(p1 != p3)
assert_(p1 != p4)
def check_add(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = p1 + p2
assert_poly_almost_equal(p2 + p1, p3)
assert_poly_almost_equal(p1 + c2, p3)
assert_poly_almost_equal(c2 + p1, p3)
assert_poly_almost_equal(p1 + tuple(c2), p3)
assert_poly_almost_equal(tuple(c2) + p1, p3)
assert_poly_almost_equal(p1 + np.array(c2), p3)
assert_poly_almost_equal(np.array(c2) + p1, p3)
assert_raises(TypeError, op.add, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, op.add, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.add, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.add, p1, Polynomial([0]))
def check_sub(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = p1 - p2
assert_poly_almost_equal(p2 - p1, -p3)
assert_poly_almost_equal(p1 - c2, p3)
assert_poly_almost_equal(c2 - p1, -p3)
assert_poly_almost_equal(p1 - tuple(c2), p3)
assert_poly_almost_equal(tuple(c2) - p1, -p3)
assert_poly_almost_equal(p1 - np.array(c2), p3)
assert_poly_almost_equal(np.array(c2) - p1, -p3)
assert_raises(TypeError, op.sub, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, op.sub, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.sub, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.sub, p1, Polynomial([0]))
def check_mul(Poly):
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = p1 * p2
assert_poly_almost_equal(p2 * p1, p3)
assert_poly_almost_equal(p1 * c2, p3)
assert_poly_almost_equal(c2 * p1, p3)
assert_poly_almost_equal(p1 * tuple(c2), p3)
assert_poly_almost_equal(tuple(c2) * p1, p3)
assert_poly_almost_equal(p1 * np.array(c2), p3)
assert_poly_almost_equal(np.array(c2) * p1, p3)
assert_poly_almost_equal(p1 * 2, p1 * Poly([2]))
assert_poly_almost_equal(2 * p1, p1 * Poly([2]))
assert_raises(TypeError, op.mul, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, op.mul, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.mul, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.mul, p1, Polynomial([0]))
def check_floordiv(Poly):
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
c3 = list(random((2,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = Poly(c3)
p4 = p1 * p2 + p3
c4 = list(p4.coef)
assert_poly_almost_equal(p4 // p2, p1)
assert_poly_almost_equal(p4 // c2, p1)
assert_poly_almost_equal(c4 // p2, p1)
assert_poly_almost_equal(p4 // tuple(c2), p1)
assert_poly_almost_equal(tuple(c4) // p2, p1)
assert_poly_almost_equal(p4 // np.array(c2), p1)
assert_poly_almost_equal(np.array(c4) // p2, p1)
assert_poly_almost_equal(2 // p2, Poly([0]))
assert_poly_almost_equal(p2 // 2, 0.5*p2)
assert_raises(
TypeError, op.floordiv, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(
TypeError, op.floordiv, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.floordiv, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.floordiv, p1, Polynomial([0]))
def check_truediv(Poly):
# true division is valid only if the denominator is a Number and
# not a python bool.
p1 = Poly([1,2,3])
p2 = p1 * 5
for stype in np.ScalarType:
if not issubclass(stype, Number) or issubclass(stype, bool):
continue
s = stype(5)
assert_poly_almost_equal(op.truediv(p2, s), p1)
assert_raises(TypeError, op.truediv, s, p2)
for stype in (int, long, float):
s = stype(5)
assert_poly_almost_equal(op.truediv(p2, s), p1)
assert_raises(TypeError, op.truediv, s, p2)
for stype in [complex]:
s = stype(5, 0)
assert_poly_almost_equal(op.truediv(p2, s), p1)
assert_raises(TypeError, op.truediv, s, p2)
for s in [tuple(), list(), dict(), bool(), np.array([1])]:
assert_raises(TypeError, op.truediv, p2, s)
assert_raises(TypeError, op.truediv, s, p2)
for ptype in classes:
assert_raises(TypeError, op.truediv, p2, ptype(1))
def check_mod(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
c3 = list(random((2,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = Poly(c3)
p4 = p1 * p2 + p3
c4 = list(p4.coef)
assert_poly_almost_equal(p4 % p2, p3)
assert_poly_almost_equal(p4 % c2, p3)
assert_poly_almost_equal(c4 % p2, p3)
assert_poly_almost_equal(p4 % tuple(c2), p3)
assert_poly_almost_equal(tuple(c4) % p2, p3)
assert_poly_almost_equal(p4 % np.array(c2), p3)
assert_poly_almost_equal(np.array(c4) % p2, p3)
assert_poly_almost_equal(2 % p2, Poly([2]))
assert_poly_almost_equal(p2 % 2, Poly([0]))
assert_raises(TypeError, op.mod, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, op.mod, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.mod, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.mod, p1, Polynomial([0]))
def check_divmod(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
c3 = list(random((2,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = Poly(c3)
p4 = p1 * p2 + p3
c4 = list(p4.coef)
quo, rem = divmod(p4, p2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(p4, c2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(c4, p2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(p4, tuple(c2))
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(tuple(c4), p2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(p4, np.array(c2))
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(np.array(c4), p2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(p2, 2)
assert_poly_almost_equal(quo, 0.5*p2)
assert_poly_almost_equal(rem, Poly([0]))
quo, rem = divmod(2, p2)
assert_poly_almost_equal(quo, Poly([0]))
assert_poly_almost_equal(rem, Poly([2]))
assert_raises(TypeError, divmod, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, divmod, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, divmod, p1, Chebyshev([0]))
else:
assert_raises(TypeError, divmod, p1, Polynomial([0]))
def check_roots(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
tgt = np.sort(random((5,)))
res = np.sort(Poly.fromroots(tgt).roots())
assert_almost_equal(res, tgt)
def check_degree(Poly):
p = Poly.basis(5)
assert_equal(p.degree(), 5)
def check_copy(Poly):
p1 = Poly.basis(5)
p2 = p1.copy()
assert_(p1 == p2)
assert_(p1 is not p2)
assert_(p1.coef is not p2.coef)
assert_(p1.domain is not p2.domain)
assert_(p1.window is not p2.window)
def check_integ(Poly):
P = Polynomial
# Check defaults
p0 = Poly.cast(P([1*2, 2*3, 3*4]))
p1 = P.cast(p0.integ())
p2 = P.cast(p0.integ(2))
assert_poly_almost_equal(p1, P([0, 2, 3, 4]))
assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1]))
# Check with k
p0 = Poly.cast(P([1*2, 2*3, 3*4]))
p1 = P.cast(p0.integ(k=1))
p2 = P.cast(p0.integ(2, k=[1, 1]))
assert_poly_almost_equal(p1, P([1, 2, 3, 4]))
assert_poly_almost_equal(p2, P([1, 1, 1, 1, 1]))
# Check with lbnd
p0 = Poly.cast(P([1*2, 2*3, 3*4]))
p1 = P.cast(p0.integ(lbnd=1))
p2 = P.cast(p0.integ(2, lbnd=1))
assert_poly_almost_equal(p1, P([-9, 2, 3, 4]))
assert_poly_almost_equal(p2, P([6, -9, 1, 1, 1]))
# Check scaling
d = 2*Poly.domain
p0 = Poly.cast(P([1*2, 2*3, 3*4]), domain=d)
p1 = P.cast(p0.integ())
p2 = P.cast(p0.integ(2))
assert_poly_almost_equal(p1, P([0, 2, 3, 4]))
assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1]))
def check_deriv(Poly):
# Check that the derivative is the inverse of integration. It is
# assumes that the integration has been checked elsewhere.
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
p1 = Poly([1, 2, 3], domain=d, window=w)
p2 = p1.integ(2, k=[1, 2])
p3 = p1.integ(1, k=[1])
assert_almost_equal(p2.deriv(1).coef, p3.coef)
assert_almost_equal(p2.deriv(2).coef, p1.coef)
def check_linspace(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
p = Poly([1, 2, 3], domain=d, window=w)
# check default domain
xtgt = np.linspace(d[0], d[1], 20)
ytgt = p(xtgt)
xres, yres = p.linspace(20)
assert_almost_equal(xres, xtgt)
assert_almost_equal(yres, ytgt)
# check specified domain
xtgt = np.linspace(0, 2, 20)
ytgt = p(xtgt)
xres, yres = p.linspace(20, domain=[0, 2])
assert_almost_equal(xres, xtgt)
assert_almost_equal(yres, ytgt)
def check_pow(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
tgt = Poly([1], domain=d, window=d)
tst = Poly([1, 2, 3], domain=d, window=d)
for i in range(5):
assert_poly_almost_equal(tst**i, tgt)
tgt = tgt * tst
assert_raises(ValueError, op.pow, tgt, 1.5)
assert_raises(ValueError, op.pow, tgt, -1)
def check_call(Poly):
P = Polynomial
d = Poly.domain
x = np.linspace(d[0], d[1], 11)
# Check defaults
p = Poly.cast(P([1, 2, 3]))
tgt = 1 + x*(2 + 3*x)
res = p(x)
assert_almost_equal(res, tgt)
def check_cutdeg(Poly):
p = Poly([1, 2, 3])
assert_raises(ValueError, p.cutdeg, .5)
assert_raises(ValueError, p.cutdeg, -1)
assert_equal(len(p.cutdeg(3)), 3)
assert_equal(len(p.cutdeg(2)), 3)
assert_equal(len(p.cutdeg(1)), 2)
assert_equal(len(p.cutdeg(0)), 1)
def check_truncate(Poly):
p = Poly([1, 2, 3])
assert_raises(ValueError, p.truncate, .5)
assert_raises(ValueError, p.truncate, 0)
assert_equal(len(p.truncate(4)), 3)
assert_equal(len(p.truncate(3)), 3)
assert_equal(len(p.truncate(2)), 2)
assert_equal(len(p.truncate(1)), 1)
def check_trim(Poly):
c = [1, 1e-6, 1e-12, 0]
p = Poly(c)
assert_equal(p.trim().coef, c[:3])
assert_equal(p.trim(1e-10).coef, c[:2])
assert_equal(p.trim(1e-5).coef, c[:1])
def check_mapparms(Poly):
# check with defaults. Should be identity.
d = Poly.domain
w = Poly.window
p = Poly([1], domain=d, window=w)
assert_almost_equal([0, 1], p.mapparms())
#
w = 2*d + 1
p = Poly([1], domain=d, window=w)
assert_almost_equal([1, 2], p.mapparms())
if __name__ == "__main__":
run_module_suite()
|
|
# This file is part of Charlton
# Copyright (C) 2011 Nathaniel Smith <njs@pobox.com>
# See file COPYING for license information.
# This file defines the ModelDesc class, which describes a model at a high
# level, as a list of interactions of factors. It also has the code to convert
# a formula parse tree (from charlton.parse) into a ModelDesc.
from charlton import CharltonError
from charlton.parse import ParseNode, parse
from charlton.eval import EvalFactor
from charlton.util import to_unique_tuple
__all__ = ["Term", "ModelDesc", "INTERCEPT"]
# One might think it would make more sense for 'factors' to be a set, rather
# than a tuple-with-guaranteed-unique-entries-that-compares-like-a-set. The
# reason we do it this way is that it preserves the order that the user typed
# and is expecting, which then ends up producing nicer names in our final
# output, nicer column ordering, etc. (A similar comment applies to the
# ordering of terms in ModelDesc objects as a whole.)
class Term(object):
def __init__(self, factors):
self.factors = to_unique_tuple(factors)
def __eq__(self, other):
return (isinstance(other, Term)
and frozenset(other.factors) == frozenset(self.factors))
def __hash__(self):
return hash((Term, frozenset(self.factors)))
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.factors)
def name(self):
if self.factors:
return ":".join([f.name() for f in self.factors])
else:
return "1"
INTERCEPT = Term([])
class _MockFactor(object):
def __init__(self, name):
self._name = name
def name(self):
return self._name
def test_Term():
assert Term([1, 2, 1]).factors == (1, 2)
assert Term([1, 2]) == Term([2, 1])
assert hash(Term([1, 2])) == hash(Term([2, 1]))
f1 = _MockFactor("a")
f2 = _MockFactor("b")
assert Term([f1, f2]).name() == "a:b"
assert Term([f2, f1]).name() == "b:a"
assert Term([]).name() == "1"
class ModelDesc(object):
def __init__(self, input_code, lhs_terms, rhs_terms):
self.input_code = input_code
self.lhs_terms = to_unique_tuple(lhs_terms)
self.rhs_terms = to_unique_tuple(rhs_terms)
def __repr__(self):
return ("%s(%r, lhs_terms=%r, rhs_terms=%s)"
% (self.__class__.__name__,
self.input_code, self.lhs_terms, self.rhs_terms))
def describe(self):
def describe_side(terms):
items = []
if INTERCEPT not in terms:
items += "0"
items += [term.name() for term in terms]
return " + ".join(items)
result = " + ".join([term.name() for term in self.lhs_terms])
if result:
result += " ~ "
else:
result += "~ "
if self.rhs_terms == (INTERCEPT,):
result += "1"
else:
term_names = []
if INTERCEPT not in self.rhs_terms:
term_names.append("0")
term_names += [term.name() for term in self.rhs_terms
if term != INTERCEPT]
result += " + ".join(term_names)
return result
@classmethod
def from_formula(cls, tree_or_string):
if isinstance(tree_or_string, ParseNode):
tree = tree_or_string
else:
tree = parse(tree_or_string)
value = Evaluator().eval(tree, require_evalexpr=False)
assert isinstance(value, cls)
return value
def test_ModelDesc():
f1 = _MockFactor("a")
f2 = _MockFactor("b")
m = ModelDesc("asdf", [INTERCEPT, Term([f1])], [Term([f1]), Term([f1, f2])])
assert m.input_code == "asdf"
assert m.lhs_terms == (INTERCEPT, Term([f1]))
assert m.rhs_terms == (Term([f1]), Term([f1, f2]))
print m.describe()
assert m.describe() == "1 + a ~ 0 + a + a:b"
assert ModelDesc("", [], []).describe() == "~ 0"
assert ModelDesc("", [INTERCEPT], []).describe() == "1 ~ 0"
assert ModelDesc("", [INTERCEPT], [INTERCEPT]).describe() == "1 ~ 1"
assert (ModelDesc("", [INTERCEPT], [INTERCEPT, Term([f2])]).describe()
== "1 ~ b")
def test_ModelDesc_from_formula():
for input in ("y ~ x", parse("y ~ x")):
md = ModelDesc.from_formula(input)
assert md.input_code == "y ~ x"
assert md.lhs_terms == (Term([EvalFactor("y")]),)
assert md.rhs_terms == (INTERCEPT, Term([EvalFactor("x")]))
class IntermediateExpr(object):
"This class holds an intermediate result while we're evaluating a tree."
def __init__(self, intercept, intercept_origin, intercept_removed, terms):
self.intercept = intercept
self.intercept_origin = intercept_origin
self.intercept_removed =intercept_removed
self.terms = to_unique_tuple(terms)
if self.intercept:
assert self.intercept_origin
assert not (self.intercept and self.intercept_removed)
def __repr__(self):
return "%s(%r, %r, %r, %r)" % (self.__class__.__name__,
self.intercept, self.intercept_origin,
self.intercept_removed,
self.terms)
def _maybe_add_intercept(doit, terms):
if doit:
return (INTERCEPT,) + terms
else:
return terms
def _eval_any_tilde(evaluator, tree):
exprs = [evaluator.eval(arg) for arg in tree.args]
if len(exprs) == 1:
# Formula was like: "~ foo"
# We pretend that instead it was like: "0 ~ foo"
exprs.insert(0, IntermediateExpr(False, None, True, []))
assert len(exprs) == 2
return ModelDesc(tree.origin.code,
# Note that only the RHS gets an implicit intercept:
_maybe_add_intercept(exprs[0].intercept, exprs[0].terms),
_maybe_add_intercept(not exprs[1].intercept_removed,
exprs[1].terms))
def _eval_binary_plus(evaluator, tree):
left_expr = evaluator.eval(tree.args[0])
if tree.args[1] == "0":
return IntermediateExpr(False, None, True, left_expr.terms)
elif tree.args[1] == "1":
return IntermediateExpr(True, tree.args[1], False, left_expr.terms)
else:
right_expr = evaluator.eval(tree.args[1])
if right_expr.intercept:
return IntermediateExpr(True, right_expr.intercept_origin, False,
left_expr.terms + right_expr.terms)
else:
return IntermediateExpr(left_expr.intercept,
left_expr.intercept_origin,
left_expr.intercept_removed,
left_expr.terms + right_expr.terms)
def _eval_binary_minus(evaluator, tree):
left_expr = evaluator.eval(tree.args[0])
if tree.args[1] == "0":
return IntermediateExpr(True, tree.args[1], False,
left_expr.terms)
elif tree.args[1] == "1":
return IntermediateExpr(False, None, True, left_expr.terms)
else:
right_expr = evaluator.eval(tree.args[1])
terms = [term for term in left_expr.terms
if term not in right_expr.terms]
if right_expr.intercept:
return IntermediateExpr(False, None, True, terms)
else:
return IntermediateExpr(left_expr.intercept,
left_expr.intercept_origin,
left_expr.intercept_removed,
terms)
def _check_interactable(expr):
if expr.intercept:
raise CharltonError("intercept term cannot interact with "
"anything else", expr.intercept_origin)
def _interaction(left_expr, right_expr):
for expr in (left_expr, right_expr):
_check_interactable(expr)
terms = []
for l_term in left_expr.terms:
for r_term in right_expr.terms:
terms.append(Term(l_term.factors + r_term.factors))
return IntermediateExpr(False, None, False, terms)
def _eval_binary_prod(evaluator, tree):
exprs = [evaluator.eval(arg) for arg in tree.args]
return IntermediateExpr(False, None, False,
exprs[0].terms
+ exprs[1].terms
+ _interaction(*exprs).terms)
# Division (nesting) is right-ward distributive:
# a / (b + c) -> a/b + a/c -> a + a:b + a:c
# But left-ward, in S/R it has a quirky behavior:
# (a + b)/c -> a + b + a:b:c
# This is because it's meaningless for a factor to be "nested" under two
# different factors. (This is documented in Chambers and Hastie (page 30) as a
# "Slightly more subtle..." rule, with no further elaboration. Hopefully we
# will do better.)
def _eval_binary_div(evaluator, tree):
left_expr = evaluator.eval(tree.args[0])
right_expr = evaluator.eval(tree.args[1])
terms = list(left_expr.terms)
_check_interactable(left_expr)
# Build a single giant combined term for everything on the left:
left_factors = []
for term in left_expr.terms:
left_factors += list(term.factors)
left_combined_expr = IntermediateExpr(False, None, False,
[Term(left_factors)])
# Then interact it with everything on the right:
terms += list(_interaction(left_combined_expr, right_expr).terms)
return IntermediateExpr(False, None, False, terms)
def _eval_binary_interact(evaluator, tree):
exprs = [evaluator.eval(arg) for arg in tree.args]
return _interaction(*exprs)
def _eval_binary_power(evaluator, tree):
left_expr = evaluator.eval(tree.args[0])
_check_interactable(left_expr)
power = -1
try:
power = int(tree.args[1])
except (ValueError, TypeError):
pass
if power < 1:
raise CharltonError("'**' requires a positive integer", tree.args[1])
all_terms = left_expr.terms
big_expr = left_expr
# Small optimization: (a + b)**100 is just the same as (a + b)**2.
power = min(len(left_expr.terms), power)
for i in xrange(1, power):
big_expr = _interaction(left_expr, big_expr)
all_terms = all_terms + big_expr.terms
return IntermediateExpr(False, None, False, all_terms)
def _eval_unary_plus(evaluator, tree):
return evaluator.eval(tree.args[0])
def _eval_unary_minus(evaluator, tree):
if tree.args[0] == "0":
return IntermediateExpr(True, tree.origin, False, [])
elif tree.args[0] == "1":
return IntermediateExpr(False, None, True, [])
else:
raise CharltonError("Unary minus can only be applied to 1 or 0", tree)
class Evaluator(object):
def __init__(self):
self._evaluators = {}
self.add_op("~", 2, _eval_any_tilde)
self.add_op("~", 1, _eval_any_tilde)
self.add_op("+", 2, _eval_binary_plus)
self.add_op("-", 2, _eval_binary_minus)
self.add_op("*", 2, _eval_binary_prod)
self.add_op("/", 2, _eval_binary_div)
self.add_op(":", 2, _eval_binary_interact)
self.add_op("**", 2, _eval_binary_power)
self.add_op("+", 1, _eval_unary_plus)
self.add_op("-", 1, _eval_unary_minus)
self.stash = {}
# This should not be considered a public API yet (to use for actually
# adding new operator semantics) because I wrote in some of the relevant
# code sort of speculatively, but it isn't actually tested.
def add_op(self, op, arity, evaluator):
self._evaluators[op, arity] = evaluator
def _is_a(self, f, v):
try:
f(v)
except ValueError:
return False
else:
return True
def eval(self, tree, require_evalexpr=True):
result = None
if isinstance(tree, str):
if tree == "0":
result = IntermediateExpr(False, None, True, [])
elif tree == "1":
result = IntermediateExpr(True, tree.origin, False, [])
elif self._is_a(int, tree) or self._is_a(float, tree):
raise CharltonError("numbers besides '0' and '1' are "
"only allowed with **", tree)
else:
# Guess it's a Python expression
result = IntermediateExpr(False, None, False,
[Term([EvalFactor(tree)])])
else:
assert isinstance(tree, ParseNode)
key = (tree.op.token, len(tree.args))
if key not in self._evaluators:
raise CharltonError("I don't know how to evaluate "
"this '%s' operator" % (tree.op.token,),
tree.op)
result = self._evaluators[key](self, tree)
if require_evalexpr and not isinstance(result, IntermediateExpr):
if isinstance(result, ModelDesc):
raise CharltonError("~ can only be used once, and "
"only at the top level",
tree)
else:
raise CharltonError("custom operator returned an "
"object that I don't know how to "
"handle", tree)
return result
#############
_eval_tests = {
"": (True, []),
" ": (True, []),
" \n ": (True, []),
"a": (True, ["a"]),
"1": (True, []),
"0": (False, []),
"- 1": (False, []),
"- 0": (True, []),
"+ 1": (True, []),
"+ 0": (False, []),
"0 + 1": (True, []),
"1 + 0": (False, []),
"1 - 0": (True, []),
"0 - 1": (False, []),
"1 + a": (True, ["a"]),
"0 + a": (False, ["a"]),
"a - 1": (False, ["a"]),
"a - 0": (True, ["a"]),
"1 - a": (True, []),
"a + b": (True, ["a", "b"]),
"(a + b)": (True, ["a", "b"]),
"a + ((((b))))": (True, ["a", "b"]),
"a + ((((+b))))": (True, ["a", "b"]),
"a + ((((b - a))))": (True, ["a", "b"]),
"a + a + a": (True, ["a"]),
"a + (b - a)": (True, ["a", "b"]),
"a + np.log(a, base=10)": (True, ["a", "np.log(a, base=10)"]),
# Note different spacing:
"a + np.log(a, base=10) - np . log(a , base = 10)": (True, ["a"]),
"a + (I(b) + c)": (True, ["a", "I(b)", "c"]),
"a + I(b + c)": (True, ["a", "I(b + c)"]),
"a:b": (True, [("a", "b")]),
"a:b:a": (True, [("a", "b")]),
"a:(b + c)": (True, [("a", "b"), ("a", "c")]),
"(a + b):c": (True, [("a", "c"), ("b", "c")]),
"a:(b - c)": (True, [("a", "b")]),
"c + a:c + a:(b - c)": (True, ["c", ("a", "c"), ("a", "b")]),
"(a - b):c": (True, [("a", "c")]),
"b + b:c + (a - b):c": (True, ["b", ("b", "c"), ("a", "c")]),
"a:b - a:b": (True, []),
"a:b - b:a": (True, []),
"1 - (a + b)": (True, []),
"a + b - (a + b)": (True, []),
"a * b": (True, ["a", "b", ("a", "b")]),
"a * b * a": (True, ["a", "b", ("a", "b")]),
"a * (b + c)": (True, ["a", "b", "c", ("a", "b"), ("a", "c")]),
"(a + b) * c": (True, ["a", "b", "c", ("a", "c"), ("b", "c")]),
"a * (b - c)": (True, ["a", "b", ("a", "b")]),
"c + a:c + a * (b - c)": (True, ["c", ("a", "c"), "a", "b", ("a", "b")]),
"(a - b) * c": (True, ["a", "c", ("a", "c")]),
"b + b:c + (a - b) * c": (True, ["b", ("b", "c"), "a", "c", ("a", "c")]),
"a/b": (True, ["a", ("a", "b")]),
"(a + b)/c": (True, ["a", "b", ("a", "b", "c")]),
"b + b:c + (a - b)/c": (True, ["b", ("b", "c"), "a", ("a", "c")]),
"a/(b + c)": (True, ["a", ("a", "b"), ("a", "c")]),
"a ** 2": (True, ["a"]),
"(a + b + c + d) ** 2": (True, ["a", "b", "c", "d",
("a", "b"), ("a", "c"), ("a", "d"),
("b", "c"), ("b", "d"), ("c", "d")]),
"(a + b + c + d) ** 3": (True, ["a", "b", "c", "d",
("a", "b"), ("a", "c"), ("a", "d"),
("b", "c"), ("b", "d"), ("c", "d"),
("a", "b", "c"), ("a", "b", "d"),
("a", "c", "d"), ("b", "c", "d")]),
"a + +a": (True, ["a"]),
"~ a + b": (True, ["a", "b"]),
"~ a*b": (True, ["a", "b", ("a", "b")]),
"~ a*b + 0": (False, ["a", "b", ("a", "b")]),
"~ -1": (False, []),
"0 ~ a + b": (True, ["a", "b"]),
"1 ~ a + b": (True, [], True, ["a", "b"]),
"y ~ a + b": (False, ["y"], True, ["a", "b"]),
"0 + y ~ a + b": (False, ["y"], True, ["a", "b"]),
"0 + y * z ~ a + b": (False, ["y", "z", ("y", "z")], True, ["a", "b"]),
"-1 ~ 1": (False, [], True, []),
"1 + y ~ a + b": (True, ["y"], True, ["a", "b"]),
# Check precedence:
"a + b * c": (True, ["a", "b", "c", ("b", "c")]),
"a * b + c": (True, ["a", "b", ("a", "b"), "c"]),
"a * b - a": (True, ["b", ("a", "b")]),
"a + b / c": (True, ["a", "b", ("b", "c")]),
"a / b + c": (True, ["a", ("a", "b"), "c"]),
"a*b:c": (True, ["a", ("b", "c"), ("a", "b", "c")]),
"a:b*c": (True, [("a", "b"), "c", ("a", "b", "c")]),
# Intercept handling:
"~ 1 + 1 + 0 + 1": (True, []),
"~ 0 + 1 + 0": (False, []),
"~ 0 - 1 - 1 + 0 + 1": (True, []),
"~ 1 - 1": (False, []),
"~ 0 + a + 1": (True, ["a"]),
"~ 1 + (a + 0)": (True, ["a"]), # This is correct, but perhaps surprising!
"~ 0 + (a + 1)": (True, ["a"]), # Also correct!
"~ 1 - (a + 1)": (False, []),
}
# <> mark off where the error should be reported:
_eval_error_tests = [
"a <+>",
"a + <(>",
"b + (<-a>)",
"a:<1>",
"(a + <1>)*b",
"a + <2>",
"a + <1.0>",
# eh, catching this is a hassle, we'll just leave the user some rope if
# they really want it:
#"a + <0x1>",
"a ** <b>",
"a ** (<1 + 1>)",
"a + b <# asdf>",
"<)>",
"a + <)>",
"<*> a",
"a + <*>",
"a + <foo[bar>",
"a + <foo{bar>",
"a + <foo(bar>",
"a + <[bar>",
"a + <{bar>",
"a + <{bar[]>",
"a + foo<]>bar",
"a + foo[]<]>bar",
"a + foo{}<}>bar",
"a + foo<)>bar",
"a + b<)>",
"(a) <.>",
"<(>a + b",
"<y ~ a> ~ b",
"y ~ (<a ~ b>)",
"<~ a> ~ b",
"~ (<a ~ b>)",
# XX FIXME: this one is sort of ugly:
"1 + <-(a + b>)",
"<- a>",
"a + <-a**2>",
]
def _assert_terms_match(terms, expected_intercept, expecteds):
if expected_intercept:
expecteds = [()] + expecteds
assert len(terms) == len(expecteds)
for term, expected in zip(terms, expecteds):
if isinstance(term, Term):
if isinstance(expected, str):
expected = (expected,)
assert term.factors == tuple([EvalFactor(s) for s in expected])
else:
assert term == expected
def _do_eval_formula_tests(tests):
for code, result in tests.iteritems():
if len(result) == 2:
result = (False, []) + result
model_desc = ModelDesc.from_formula(code)
print repr(code)
print result
print model_desc
lhs_intercept, lhs_terms, rhs_intercept, rhs_terms = result
_assert_terms_match(model_desc.lhs_terms, lhs_intercept, lhs_terms)
_assert_terms_match(model_desc.rhs_terms, rhs_intercept, rhs_terms)
def test_eval_formula():
_do_eval_formula_tests(_eval_tests)
from charlton.parse import _parsing_error_test
def test_eval_formula_error_reporting():
_parsing_error_test(ModelDesc.from_formula, _eval_error_tests)
|
|
import urllib
import sys
import os
from cStringIO import StringIO
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest
from django.core.signals import got_request_exception
from django.dispatch import dispatcher
from django.http import SimpleCookie, HttpRequest
from django.template import TemplateDoesNotExist
from django.test import signals
from django.utils.functional import curry
from django.utils.encoding import smart_str
from django.utils.http import urlencode
from django.utils.itercompat import is_iterable
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes.
Uses the WSGI interface to compose requests, but returns
the raw HttpResponse object
"""
def __call__(self, environ):
from django.conf import settings
from django.core import signals
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
self.load_middleware()
dispatcher.send(signal=signals.request_started)
try:
request = WSGIRequest(environ)
response = self.get_response(request)
# Apply response middleware
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
response = self.apply_response_fixes(request, response)
finally:
dispatcher.send(signal=signals.request_finished)
return response
def store_rendered_templates(store, signal, sender, template, context):
"A utility function for storing templates and contexts that are rendered"
store.setdefault('template',[]).append(template)
store.setdefault('context',[]).append(context)
def encode_multipart(boundary, data):
"""
A simple method for encoding multipart POST data from a dictionary of
form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
to_str = lambda s: smart_str(s, settings.DEFAULT_CHARSET)
for (key, value) in data.items():
if isinstance(value, file):
lines.extend([
'--' + boundary,
'Content-Disposition: form-data; name="%s"; filename="%s"' % (to_str(key), to_str(os.path.basename(value.name))),
'Content-Type: application/octet-stream',
'',
value.read()
])
else:
if not isinstance(value, basestring) and is_iterable(value):
for item in value:
lines.extend([
'--' + boundary,
'Content-Disposition: form-data; name="%s"' % to_str(key),
'',
to_str(item)
])
else:
lines.extend([
'--' + boundary,
'Content-Disposition: form-data; name="%s"' % to_str(key),
'',
to_str(value)
])
lines.extend([
'--' + boundary + '--',
'',
])
return '\r\n'.join(lines)
class Client:
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(self, **defaults):
self.handler = ClientHandler()
self.defaults = defaults
self.cookies = SimpleCookie()
self.exc_info = None
def store_exc_info(self, *args, **kwargs):
"""
Utility method that can be used to store exceptions when they are
generated by a view.
"""
self.exc_info = sys.exc_info()
def _session(self):
"Obtain the current session variables"
if 'django.contrib.sessions' in settings.INSTALLED_APPS:
engine = __import__(settings.SESSION_ENGINE, {}, {}, [''])
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME, None)
if cookie:
return engine.SessionStore(cookie.value)
return {}
session = property(_session)
def request(self, **request):
"""
The master request method. Composes the environment dictionary
and passes to the handler, returning the result of the handler.
Assumes defaults for the query environment, which can be overridden
using the arguments to the request.
"""
environ = {
'HTTP_COOKIE': self.cookies,
'PATH_INFO': '/',
'QUERY_STRING': '',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': None,
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
'SERVER_PROTOCOL': 'HTTP/1.1',
}
environ.update(self.defaults)
environ.update(request)
# Curry a data dictionary into an instance of
# the template renderer callback function
data = {}
on_template_render = curry(store_rendered_templates, data)
dispatcher.connect(on_template_render, signal=signals.template_rendered)
# Capture exceptions created by the handler
dispatcher.connect(self.store_exc_info, signal=got_request_exception)
try:
response = self.handler(environ)
except TemplateDoesNotExist, e:
# If the view raises an exception, Django will attempt to show
# the 500.html template. If that template is not available,
# we should ignore the error in favor of re-raising the
# underlying exception that caused the 500 error. Any other
# template found to be missing during view error handling
# should be reported as-is.
if e.args != ('500.html',):
raise
# Look for a signalled exception, clear the current context
# exception data, then re-raise the signalled exception.
# Also make sure that the signalled exception is cleared from
# the local cache!
if self.exc_info:
exc_info = self.exc_info
self.exc_info = None
raise exc_info[1], None, exc_info[2]
# Save the client and request that stimulated the response
response.client = self
response.request = request
# Add any rendered template detail to the response
# If there was only one template rendered (the most likely case),
# flatten the list to a single element
for detail in ('template', 'context'):
if data.get(detail):
if len(data[detail]) == 1:
setattr(response, detail, data[detail][0]);
else:
setattr(response, detail, data[detail])
else:
setattr(response, detail, None)
# Update persistent cookie data
if response.cookies:
self.cookies.update(response.cookies)
return response
def get(self, path, data={}, **extra):
"Request a response from the server using GET."
r = {
'CONTENT_LENGTH': None,
'CONTENT_TYPE': 'text/html; charset=utf-8',
'PATH_INFO': urllib.unquote(path),
'QUERY_STRING': urlencode(data, doseq=True),
'REQUEST_METHOD': 'GET',
}
r.update(extra)
return self.request(**r)
def post(self, path, data={}, content_type=MULTIPART_CONTENT, **extra):
"Request a response from the server using POST."
if content_type is MULTIPART_CONTENT:
post_data = encode_multipart(BOUNDARY, data)
else:
post_data = data
r = {
'CONTENT_LENGTH': len(post_data),
'CONTENT_TYPE': content_type,
'PATH_INFO': urllib.unquote(path),
'REQUEST_METHOD': 'POST',
'wsgi.input': StringIO(post_data),
}
r.update(extra)
return self.request(**r)
def login(self, **credentials):
"""Set the Client to appear as if it has sucessfully logged into a site.
Returns True if login is possible; False if the provided credentials
are incorrect, or the user is inactive, or if the sessions framework is
not available.
"""
user = authenticate(**credentials)
if user and user.is_active and 'django.contrib.sessions' in settings.INSTALLED_APPS:
engine = __import__(settings.SESSION_ENGINE, {}, {}, [''])
# Create a fake request to store login details
request = HttpRequest()
request.session = engine.SessionStore()
login(request, user)
# Set the cookie to represent the session
self.cookies[settings.SESSION_COOKIE_NAME] = request.session.session_key
self.cookies[settings.SESSION_COOKIE_NAME]['max-age'] = None
self.cookies[settings.SESSION_COOKIE_NAME]['path'] = '/'
self.cookies[settings.SESSION_COOKIE_NAME]['domain'] = settings.SESSION_COOKIE_DOMAIN
self.cookies[settings.SESSION_COOKIE_NAME]['secure'] = settings.SESSION_COOKIE_SECURE or None
self.cookies[settings.SESSION_COOKIE_NAME]['expires'] = None
# Save the session values
request.session.save()
return True
else:
return False
def logout(self):
"""Removes the authenticated user's cookies.
Causes the authenticated user to be logged out.
"""
session = __import__(settings.SESSION_ENGINE, {}, {}, ['']).SessionStore()
session.delete(session_key=self.cookies[settings.SESSION_COOKIE_NAME].value)
self.cookies = SimpleCookie()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
from contextlib import closing
from datetime import datetime
import logging
from time import sleep
import uuid
from celery.exceptions import SoftTimeLimitExceeded
from contextlib2 import contextmanager
from flask_babel import lazy_gettext as _
import simplejson as json
import sqlalchemy
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import NullPool
from superset import app, dataframe, db, results_backend, security_manager
from superset.models.sql_lab import Query
from superset.sql_parse import ParsedQuery
from superset.tasks.celery_app import app as celery_app
from superset.utils.core import (
json_iso_dttm_ser,
QueryStatus,
sources,
zlib_compress,
)
from superset.utils.dates import now_as_float
from superset.utils.decorators import stats_timing
config = app.config
stats_logger = config.get('STATS_LOGGER')
SQLLAB_TIMEOUT = config.get('SQLLAB_ASYNC_TIME_LIMIT_SEC', 600)
log_query = config.get('QUERY_LOGGER')
class SqlLabException(Exception):
pass
class SqlLabSecurityException(SqlLabException):
pass
class SqlLabTimeoutException(SqlLabException):
pass
def handle_query_error(msg, query, session, payload=None):
"""Local method handling error while processing the SQL"""
payload = payload or {}
troubleshooting_link = config['TROUBLESHOOTING_LINK']
query.error_message = msg
query.status = QueryStatus.FAILED
query.tmp_table_name = None
session.commit()
payload.update({
'status': query.status,
'error': msg,
})
if troubleshooting_link:
payload['link'] = troubleshooting_link
return payload
def get_query(query_id, session, retry_count=5):
"""attemps to get the query and retry if it cannot"""
query = None
attempt = 0
while not query and attempt < retry_count:
try:
query = session.query(Query).filter_by(id=query_id).one()
except Exception:
attempt += 1
logging.error(
'Query with id `{}` could not be retrieved'.format(query_id))
stats_logger.incr('error_attempting_orm_query_' + str(attempt))
logging.error('Sleeping for a sec before retrying...')
sleep(1)
if not query:
stats_logger.incr('error_failed_at_getting_orm_query')
raise SqlLabException('Failed at getting query')
return query
@contextmanager
def session_scope(nullpool):
"""Provide a transactional scope around a series of operations."""
if nullpool:
engine = sqlalchemy.create_engine(
app.config.get('SQLALCHEMY_DATABASE_URI'), poolclass=NullPool)
session_class = sessionmaker()
session_class.configure(bind=engine)
session = session_class()
else:
session = db.session()
session.commit() # HACK
try:
yield session
session.commit()
except Exception as e:
session.rollback()
logging.exception(e)
raise
finally:
session.close()
@celery_app.task(name='sql_lab.get_sql_results',
bind=True,
soft_time_limit=SQLLAB_TIMEOUT)
def get_sql_results(
ctask, query_id, rendered_query, return_results=True, store_results=False,
user_name=None, start_time=None):
"""Executes the sql query returns the results."""
with session_scope(not ctask.request.called_directly) as session:
try:
return execute_sql_statements(
ctask, query_id, rendered_query, return_results, store_results, user_name,
session=session, start_time=start_time)
except Exception as e:
logging.exception(e)
stats_logger.incr('error_sqllab_unhandled')
query = get_query(query_id, session)
return handle_query_error(str(e), query, session)
def execute_sql_statement(
sql_statement, query, user_name, session,
cursor, return_results=False):
"""Executes a single SQL statement"""
database = query.database
db_engine_spec = database.db_engine_spec
parsed_query = ParsedQuery(sql_statement)
sql = parsed_query.stripped()
SQL_MAX_ROWS = app.config.get('SQL_MAX_ROW')
if not parsed_query.is_readonly() and not database.allow_dml:
raise SqlLabSecurityException(
_('Only `SELECT` statements are allowed against this database'))
if query.select_as_cta:
if not parsed_query.is_select():
raise SqlLabException(_(
'Only `SELECT` statements can be used with the CREATE TABLE '
'feature.'))
if not query.tmp_table_name:
start_dttm = datetime.fromtimestamp(query.start_time)
query.tmp_table_name = 'tmp_{}_table_{}'.format(
query.user_id, start_dttm.strftime('%Y_%m_%d_%H_%M_%S'))
sql = parsed_query.as_create_table(query.tmp_table_name)
query.select_as_cta_used = True
if parsed_query.is_select():
if SQL_MAX_ROWS and (not query.limit or query.limit > SQL_MAX_ROWS):
query.limit = SQL_MAX_ROWS
if query.limit:
sql = database.apply_limit_to_sql(sql, query.limit)
# Hook to allow environment-specific mutation (usually comments) to the SQL
SQL_QUERY_MUTATOR = config.get('SQL_QUERY_MUTATOR')
if SQL_QUERY_MUTATOR:
sql = SQL_QUERY_MUTATOR(sql, user_name, security_manager, database)
try:
if log_query:
log_query(
query.database.sqlalchemy_uri,
query.executed_sql,
query.schema,
user_name,
__name__,
security_manager,
)
query.executed_sql = sql
with stats_timing('sqllab.query.time_executing_query', stats_logger):
logging.info('Running query: \n{}'.format(sql))
db_engine_spec.execute(cursor, sql, async_=True)
logging.info('Handling cursor')
db_engine_spec.handle_cursor(cursor, query, session)
with stats_timing('sqllab.query.time_fetching_results', stats_logger):
logging.debug('Fetching data for query object: {}'.format(query.to_dict()))
data = db_engine_spec.fetch_data(cursor, query.limit)
except SoftTimeLimitExceeded as e:
logging.exception(e)
raise SqlLabTimeoutException(
"SQL Lab timeout. This environment's policy is to kill queries "
'after {} seconds.'.format(SQLLAB_TIMEOUT))
except Exception as e:
logging.exception(e)
raise SqlLabException(db_engine_spec.extract_error_message(e))
logging.debug('Fetching cursor description')
cursor_description = cursor.description
return dataframe.SupersetDataFrame(data, cursor_description, db_engine_spec)
def execute_sql_statements(
ctask, query_id, rendered_query, return_results=True, store_results=False,
user_name=None, session=None, start_time=None,
):
"""Executes the sql query returns the results."""
if store_results and start_time:
# only asynchronous queries
stats_logger.timing(
'sqllab.query.time_pending', now_as_float() - start_time)
query = get_query(query_id, session)
payload = dict(query_id=query_id)
database = query.database
db_engine_spec = database.db_engine_spec
db_engine_spec.patch()
if store_results and not results_backend:
raise SqlLabException("Results backend isn't configured.")
# Breaking down into multiple statements
parsed_query = ParsedQuery(rendered_query)
statements = parsed_query.get_statements()
logging.info(f'Executing {len(statements)} statement(s)')
logging.info("Set query to 'running'")
query.status = QueryStatus.RUNNING
query.start_running_time = now_as_float()
engine = database.get_sqla_engine(
schema=query.schema,
nullpool=True,
user_name=user_name,
source=sources.get('sql_lab', None),
)
# Sharing a single connection and cursor across the
# execution of all statements (if many)
with closing(engine.raw_connection()) as conn:
with closing(conn.cursor()) as cursor:
statement_count = len(statements)
for i, statement in enumerate(statements):
# TODO CHECK IF STOPPED
msg = f'Running statement {i+1} out of {statement_count}'
logging.info(msg)
query.set_extra_json_key('progress', msg)
session.commit()
is_last_statement = i == len(statements) - 1
try:
cdf = execute_sql_statement(
statement, query, user_name, session, cursor,
return_results=is_last_statement and return_results)
msg = f'Running statement {i+1} out of {statement_count}'
except Exception as e:
msg = str(e)
if statement_count > 1:
msg = f'[Statement {i+1} out of {statement_count}] ' + msg
payload = handle_query_error(msg, query, session, payload)
return payload
# Success, updating the query entry in database
query.rows = cdf.size
query.progress = 100
query.set_extra_json_key('progress', None)
query.status = QueryStatus.SUCCESS
if query.select_as_cta:
query.select_sql = database.select_star(
query.tmp_table_name,
limit=query.limit,
schema=database.force_ctas_schema,
show_cols=False,
latest_partition=False)
query.end_time = now_as_float()
session.commit()
payload.update({
'status': query.status,
'data': cdf.data if cdf.data else [],
'columns': cdf.columns if cdf.columns else [],
'query': query.to_dict(),
})
if store_results:
key = str(uuid.uuid4())
logging.info(f'Storing results in results backend, key: {key}')
with stats_timing('sqllab.query.results_backend_write', stats_logger):
json_payload = json.dumps(
payload, default=json_iso_dttm_ser, ignore_nan=True)
cache_timeout = database.cache_timeout
if cache_timeout is None:
cache_timeout = config.get('CACHE_DEFAULT_TIMEOUT', 0)
results_backend.set(key, zlib_compress(json_payload), cache_timeout)
query.results_key = key
session.commit()
if return_results:
return payload
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.tpu_v2alpha1.types import cloud_tpu
from google.longrunning import operations_pb2 # type: ignore
from .base import TpuTransport, DEFAULT_CLIENT_INFO
from .grpc import TpuGrpcTransport
class TpuGrpcAsyncIOTransport(TpuTransport):
"""gRPC AsyncIO backend transport for Tpu.
Manages TPU nodes and other resources
TPU API v2alpha1
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "tpu.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "tpu.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def list_nodes(
self,
) -> Callable[[cloud_tpu.ListNodesRequest], Awaitable[cloud_tpu.ListNodesResponse]]:
r"""Return a callable for the list nodes method over gRPC.
Lists nodes.
Returns:
Callable[[~.ListNodesRequest],
Awaitable[~.ListNodesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_nodes" not in self._stubs:
self._stubs["list_nodes"] = self.grpc_channel.unary_unary(
"/google.cloud.tpu.v2alpha1.Tpu/ListNodes",
request_serializer=cloud_tpu.ListNodesRequest.serialize,
response_deserializer=cloud_tpu.ListNodesResponse.deserialize,
)
return self._stubs["list_nodes"]
@property
def get_node(
self,
) -> Callable[[cloud_tpu.GetNodeRequest], Awaitable[cloud_tpu.Node]]:
r"""Return a callable for the get node method over gRPC.
Gets the details of a node.
Returns:
Callable[[~.GetNodeRequest],
Awaitable[~.Node]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_node" not in self._stubs:
self._stubs["get_node"] = self.grpc_channel.unary_unary(
"/google.cloud.tpu.v2alpha1.Tpu/GetNode",
request_serializer=cloud_tpu.GetNodeRequest.serialize,
response_deserializer=cloud_tpu.Node.deserialize,
)
return self._stubs["get_node"]
@property
def create_node(
self,
) -> Callable[[cloud_tpu.CreateNodeRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the create node method over gRPC.
Creates a node.
Returns:
Callable[[~.CreateNodeRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_node" not in self._stubs:
self._stubs["create_node"] = self.grpc_channel.unary_unary(
"/google.cloud.tpu.v2alpha1.Tpu/CreateNode",
request_serializer=cloud_tpu.CreateNodeRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_node"]
@property
def delete_node(
self,
) -> Callable[[cloud_tpu.DeleteNodeRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the delete node method over gRPC.
Deletes a node.
Returns:
Callable[[~.DeleteNodeRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_node" not in self._stubs:
self._stubs["delete_node"] = self.grpc_channel.unary_unary(
"/google.cloud.tpu.v2alpha1.Tpu/DeleteNode",
request_serializer=cloud_tpu.DeleteNodeRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_node"]
@property
def stop_node(
self,
) -> Callable[[cloud_tpu.StopNodeRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the stop node method over gRPC.
Stops a node. This operation is only available with
single TPU nodes.
Returns:
Callable[[~.StopNodeRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "stop_node" not in self._stubs:
self._stubs["stop_node"] = self.grpc_channel.unary_unary(
"/google.cloud.tpu.v2alpha1.Tpu/StopNode",
request_serializer=cloud_tpu.StopNodeRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["stop_node"]
@property
def start_node(
self,
) -> Callable[[cloud_tpu.StartNodeRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the start node method over gRPC.
Starts a node.
Returns:
Callable[[~.StartNodeRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "start_node" not in self._stubs:
self._stubs["start_node"] = self.grpc_channel.unary_unary(
"/google.cloud.tpu.v2alpha1.Tpu/StartNode",
request_serializer=cloud_tpu.StartNodeRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["start_node"]
@property
def update_node(
self,
) -> Callable[[cloud_tpu.UpdateNodeRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the update node method over gRPC.
Updates the configurations of a node.
Returns:
Callable[[~.UpdateNodeRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_node" not in self._stubs:
self._stubs["update_node"] = self.grpc_channel.unary_unary(
"/google.cloud.tpu.v2alpha1.Tpu/UpdateNode",
request_serializer=cloud_tpu.UpdateNodeRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_node"]
@property
def generate_service_identity(
self,
) -> Callable[
[cloud_tpu.GenerateServiceIdentityRequest],
Awaitable[cloud_tpu.GenerateServiceIdentityResponse],
]:
r"""Return a callable for the generate service identity method over gRPC.
Generates the Cloud TPU service identity for the
project.
Returns:
Callable[[~.GenerateServiceIdentityRequest],
Awaitable[~.GenerateServiceIdentityResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "generate_service_identity" not in self._stubs:
self._stubs["generate_service_identity"] = self.grpc_channel.unary_unary(
"/google.cloud.tpu.v2alpha1.Tpu/GenerateServiceIdentity",
request_serializer=cloud_tpu.GenerateServiceIdentityRequest.serialize,
response_deserializer=cloud_tpu.GenerateServiceIdentityResponse.deserialize,
)
return self._stubs["generate_service_identity"]
@property
def list_accelerator_types(
self,
) -> Callable[
[cloud_tpu.ListAcceleratorTypesRequest],
Awaitable[cloud_tpu.ListAcceleratorTypesResponse],
]:
r"""Return a callable for the list accelerator types method over gRPC.
Lists accelerator types supported by this API.
Returns:
Callable[[~.ListAcceleratorTypesRequest],
Awaitable[~.ListAcceleratorTypesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_accelerator_types" not in self._stubs:
self._stubs["list_accelerator_types"] = self.grpc_channel.unary_unary(
"/google.cloud.tpu.v2alpha1.Tpu/ListAcceleratorTypes",
request_serializer=cloud_tpu.ListAcceleratorTypesRequest.serialize,
response_deserializer=cloud_tpu.ListAcceleratorTypesResponse.deserialize,
)
return self._stubs["list_accelerator_types"]
@property
def get_accelerator_type(
self,
) -> Callable[
[cloud_tpu.GetAcceleratorTypeRequest], Awaitable[cloud_tpu.AcceleratorType]
]:
r"""Return a callable for the get accelerator type method over gRPC.
Gets AcceleratorType.
Returns:
Callable[[~.GetAcceleratorTypeRequest],
Awaitable[~.AcceleratorType]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_accelerator_type" not in self._stubs:
self._stubs["get_accelerator_type"] = self.grpc_channel.unary_unary(
"/google.cloud.tpu.v2alpha1.Tpu/GetAcceleratorType",
request_serializer=cloud_tpu.GetAcceleratorTypeRequest.serialize,
response_deserializer=cloud_tpu.AcceleratorType.deserialize,
)
return self._stubs["get_accelerator_type"]
@property
def list_runtime_versions(
self,
) -> Callable[
[cloud_tpu.ListRuntimeVersionsRequest],
Awaitable[cloud_tpu.ListRuntimeVersionsResponse],
]:
r"""Return a callable for the list runtime versions method over gRPC.
Lists runtime versions supported by this API.
Returns:
Callable[[~.ListRuntimeVersionsRequest],
Awaitable[~.ListRuntimeVersionsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_runtime_versions" not in self._stubs:
self._stubs["list_runtime_versions"] = self.grpc_channel.unary_unary(
"/google.cloud.tpu.v2alpha1.Tpu/ListRuntimeVersions",
request_serializer=cloud_tpu.ListRuntimeVersionsRequest.serialize,
response_deserializer=cloud_tpu.ListRuntimeVersionsResponse.deserialize,
)
return self._stubs["list_runtime_versions"]
@property
def get_runtime_version(
self,
) -> Callable[
[cloud_tpu.GetRuntimeVersionRequest], Awaitable[cloud_tpu.RuntimeVersion]
]:
r"""Return a callable for the get runtime version method over gRPC.
Gets a runtime version.
Returns:
Callable[[~.GetRuntimeVersionRequest],
Awaitable[~.RuntimeVersion]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_runtime_version" not in self._stubs:
self._stubs["get_runtime_version"] = self.grpc_channel.unary_unary(
"/google.cloud.tpu.v2alpha1.Tpu/GetRuntimeVersion",
request_serializer=cloud_tpu.GetRuntimeVersionRequest.serialize,
response_deserializer=cloud_tpu.RuntimeVersion.deserialize,
)
return self._stubs["get_runtime_version"]
@property
def get_guest_attributes(
self,
) -> Callable[
[cloud_tpu.GetGuestAttributesRequest],
Awaitable[cloud_tpu.GetGuestAttributesResponse],
]:
r"""Return a callable for the get guest attributes method over gRPC.
Retrieves the guest attributes for the node.
Returns:
Callable[[~.GetGuestAttributesRequest],
Awaitable[~.GetGuestAttributesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_guest_attributes" not in self._stubs:
self._stubs["get_guest_attributes"] = self.grpc_channel.unary_unary(
"/google.cloud.tpu.v2alpha1.Tpu/GetGuestAttributes",
request_serializer=cloud_tpu.GetGuestAttributesRequest.serialize,
response_deserializer=cloud_tpu.GetGuestAttributesResponse.deserialize,
)
return self._stubs["get_guest_attributes"]
def close(self):
return self.grpc_channel.close()
__all__ = ("TpuGrpcAsyncIOTransport",)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any, List, Optional, Tuple
from flask import current_app, request
from marshmallow import ValidationError
from sqlalchemy import and_, func
from airflow.api.common.experimental.mark_tasks import set_state
from airflow.api_connexion import security
from airflow.api_connexion.exceptions import BadRequest, NotFound
from airflow.api_connexion.parameters import format_datetime, format_parameters
from airflow.api_connexion.schemas.task_instance_schema import (
TaskInstanceCollection,
TaskInstanceReferenceCollection,
clear_task_instance_form,
set_task_instance_state_form,
task_instance_batch_form,
task_instance_collection_schema,
task_instance_reference_collection_schema,
task_instance_schema,
)
from airflow.exceptions import SerializedDagNotFound
from airflow.models import SlaMiss
from airflow.models.dagrun import DagRun as DR
from airflow.models.taskinstance import TaskInstance as TI, clear_task_instances
from airflow.security import permissions
from airflow.utils.session import provide_session
from airflow.utils.state import State
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@provide_session
def get_task_instance(dag_id: str, dag_run_id: str, task_id: str, session=None):
"""Get task instance"""
query = (
session.query(TI)
.filter(TI.dag_id == dag_id)
.join(DR, and_(TI.dag_id == DR.dag_id, TI.execution_date == DR.execution_date))
.filter(DR.run_id == dag_run_id)
.filter(TI.task_id == task_id)
.outerjoin(
SlaMiss,
and_(
SlaMiss.dag_id == TI.dag_id,
SlaMiss.execution_date == TI.execution_date,
SlaMiss.task_id == TI.task_id,
),
)
.add_entity(SlaMiss)
)
task_instance = query.one_or_none()
if task_instance is None:
raise NotFound("Task instance not found")
return task_instance_schema.dump(task_instance)
def _apply_array_filter(query, key, values):
if values is not None:
query = query.filter(key.in_(values))
return query
def _apply_range_filter(query, key, value_range: Tuple[Any, Any]):
gte_value, lte_value = value_range
if gte_value is not None:
query = query.filter(key >= gte_value)
if lte_value is not None:
query = query.filter(key <= lte_value)
return query
@format_parameters(
{
"execution_date_gte": format_datetime,
"execution_date_lte": format_datetime,
"start_date_gte": format_datetime,
"start_date_lte": format_datetime,
"end_date_gte": format_datetime,
"end_date_lte": format_datetime,
}
)
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@provide_session
def get_task_instances(
limit: int,
dag_id: Optional[str] = None,
dag_run_id: Optional[str] = None,
execution_date_gte: Optional[str] = None,
execution_date_lte: Optional[str] = None,
start_date_gte: Optional[str] = None,
start_date_lte: Optional[str] = None,
end_date_gte: Optional[str] = None,
end_date_lte: Optional[str] = None,
duration_gte: Optional[float] = None,
duration_lte: Optional[float] = None,
state: Optional[str] = None,
pool: Optional[List[str]] = None,
queue: Optional[List[str]] = None,
offset: Optional[int] = None,
session=None,
): # pylint: disable=too-many-arguments
"""Get list of task instances."""
base_query = session.query(TI)
if dag_id != "~":
base_query = base_query.filter(TI.dag_id == dag_id)
if dag_run_id != "~":
base_query = base_query.join(DR, and_(TI.dag_id == DR.dag_id, TI.execution_date == DR.execution_date))
base_query = base_query.filter(DR.run_id == dag_run_id)
base_query = _apply_range_filter(
base_query,
key=DR.execution_date,
value_range=(execution_date_gte, execution_date_lte),
)
base_query = _apply_range_filter(
base_query, key=TI.start_date, value_range=(start_date_gte, start_date_lte)
)
base_query = _apply_range_filter(base_query, key=TI.end_date, value_range=(end_date_gte, end_date_lte))
base_query = _apply_range_filter(base_query, key=TI.duration, value_range=(duration_gte, duration_lte))
base_query = _apply_array_filter(base_query, key=TI.state, values=state)
base_query = _apply_array_filter(base_query, key=TI.pool, values=pool)
base_query = _apply_array_filter(base_query, key=TI.queue, values=queue)
# Count elements before joining extra columns
total_entries = base_query.with_entities(func.count('*')).scalar()
# Add join
base_query = base_query.join(
SlaMiss,
and_(
SlaMiss.dag_id == TI.dag_id,
SlaMiss.task_id == TI.task_id,
SlaMiss.execution_date == TI.execution_date,
),
isouter=True,
)
ti_query = base_query.add_entity(SlaMiss)
task_instances = ti_query.offset(offset).limit(limit).all()
return task_instance_collection_schema.dump(
TaskInstanceCollection(task_instances=task_instances, total_entries=total_entries)
)
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@provide_session
def get_task_instances_batch(session=None):
"""Get list of task instances."""
body = request.get_json()
try:
data = task_instance_batch_form.load(body)
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
base_query = session.query(TI)
base_query = _apply_array_filter(base_query, key=TI.dag_id, values=data["dag_ids"])
base_query = _apply_range_filter(
base_query,
key=TI.execution_date,
value_range=(data["execution_date_gte"], data["execution_date_lte"]),
)
base_query = _apply_range_filter(
base_query,
key=TI.start_date,
value_range=(data["start_date_gte"], data["start_date_lte"]),
)
base_query = _apply_range_filter(
base_query, key=TI.end_date, value_range=(data["end_date_gte"], data["end_date_lte"])
)
base_query = _apply_range_filter(
base_query, key=TI.duration, value_range=(data["duration_gte"], data["duration_lte"])
)
base_query = _apply_array_filter(base_query, key=TI.state, values=data["state"])
base_query = _apply_array_filter(base_query, key=TI.pool, values=data["pool"])
base_query = _apply_array_filter(base_query, key=TI.queue, values=data["queue"])
# Count elements before joining extra columns
total_entries = base_query.with_entities(func.count('*')).scalar()
# Add join
base_query = base_query.join(
SlaMiss,
and_(
SlaMiss.dag_id == TI.dag_id,
SlaMiss.task_id == TI.task_id,
SlaMiss.execution_date == TI.execution_date,
),
isouter=True,
)
ti_query = base_query.add_entity(SlaMiss)
task_instances = ti_query.all()
return task_instance_collection_schema.dump(
TaskInstanceCollection(task_instances=task_instances, total_entries=total_entries)
)
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),
]
)
@provide_session
def post_clear_task_instances(dag_id: str, session=None):
"""Clear task instances."""
body = request.get_json()
try:
data = clear_task_instance_form.load(body)
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
dag = current_app.dag_bag.get_dag(dag_id)
if not dag:
error_message = f"Dag id {dag_id} not found"
raise NotFound(error_message)
reset_dag_runs = data.pop('reset_dag_runs')
task_instances = dag.clear(get_tis=True, **data)
if not data["dry_run"]:
clear_task_instances(
task_instances, session, dag=dag, dag_run_state=State.RUNNING if reset_dag_runs else False
)
task_instances = task_instances.join(
DR, and_(DR.dag_id == TI.dag_id, DR.execution_date == TI.execution_date)
).add_column(DR.run_id)
return task_instance_reference_collection_schema.dump(
TaskInstanceReferenceCollection(task_instances=task_instances.all())
)
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),
]
)
@provide_session
def post_set_task_instances_state(dag_id, session):
"""Set a state of task instances."""
body = request.get_json()
try:
data = set_task_instance_state_form.load(body)
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
error_message = f"Dag ID {dag_id} not found"
try:
dag = current_app.dag_bag.get_dag(dag_id)
if not dag:
raise NotFound(error_message)
except SerializedDagNotFound:
# If DAG is not found in serialized_dag table
raise NotFound(error_message)
task_id = data['task_id']
task = dag.task_dict.get(task_id)
if not task:
error_message = f"Task ID {task_id} not found"
raise NotFound(error_message)
tis = set_state(
tasks=[task],
execution_date=data["execution_date"],
upstream=data["include_upstream"],
downstream=data["include_downstream"],
future=data["include_future"],
past=data["include_past"],
state=data["new_state"],
commit=not data["dry_run"],
)
execution_dates = {ti.execution_date for ti in tis}
execution_date_to_run_id_map = dict(
session.query(DR.execution_date, DR.run_id).filter(
DR.dag_id == dag_id, DR.execution_date.in_(execution_dates)
)
)
tis_with_run_id = [(ti, execution_date_to_run_id_map.get(ti.execution_date)) for ti in tis]
return task_instance_reference_collection_schema.dump(
TaskInstanceReferenceCollection(task_instances=tis_with_run_id)
)
|
|
#!/usr/bin/python
# this defaults to python 2 on my machine
# (c) 2017 Treadco software.
#
# python version of the fuzzy rbm
# supports the non-fuzzy version.
#
#
license ='''
Copyright (c) 2017 Treadco LLC, Amelia Treader, Robert W Harrison
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import numpy as np
import sys,os
from math import exp as exp
import fuzzy
#
# use pickle.dump(instance,file)
# and pickle.load(file)
#
# to save and restore data. file is a Python FILE object so
# it's opened first.
#
#
class rbm: #the basic rbm
def __init__(me, number_visible, number_hidden):
me.nvis = number_visible
me.nhid = number_hidden
me.layers = []
me.energies = []
me.hidden = []
me.symmetric_encoding = False
# initialize the space
# making essentially empty lists means that we can avoid using append etc
me.scratch = np.full(number_visible,0.0)
for i in range(0,number_hidden):
me.layers.append(np.float32(np.zeros(number_visible)))
me.hidden.append(0)
me.energies.append(0.)
def add_fuzzy(me, thmin,thmax,thenumber):
me.fuzz = []
for i in range(0, me.nhid):
me.fuzz.append( fuzzy.fuzzy(thmin,thmax,thenumber))
def reinitialize_fuzzy(me):
for i in range(0, me.nhid):
me.fuzz[i].initialize_counts()
def reconstruct(me, data, use_best = True):
the_layer = me.the_best_layer(data,use_best)
ib = the_layer[0]
a = me.layers[ib]
sign = 1.
if me.hidden[ib] < 0.:
sign = -1.
#
# there may be a clever numpy solution for this loop
#
for i in range(0,me.nvis):
me.scratch[i] = -1.
if a[i] < 0.:
me.scratch[i] = 1.
return me.scratch.__mul__(sign)
def its_symmetric(me):
me.symmetric_encoding = True
def the_best_layer(me, data, use_best = True):
if use_best:
me.assign_hidden_and_reconstruction_energy(data)
else:
me.assign_hidden_and_energy(data)
ib = np.argmin(me.energies)
eb = me.energies[ib]
# ib = 0
# eb = me.energies[0]
# for i in range(1,me.nhid):
# if me.energies[i] <= eb:
# ib = i
# eb = me.energies[i]
return ib,eb
def the_best_built_layer(me, data, use_best = True):
if use_best:
me.assign_hidden_and_reconstruction_energy(data)
else:
me.assign_hidden_and_energy(data)
# ib = 0
# eb = 10.e10
# for i in range(0,me.nhid):
# if me.energies[i] < -1.0 and use_best:
# me.energies[i] = 10.e10
# if me.energies[i] <= eb:
# ib = i
# eb = me.energies[i]
ib = np.argmin(me.energies)
eb = me.energies[ib]
while use_best and eb < -1.:
me.energies[ib] = 10.e10
ib = np.argmin(me.energies)
eb = me.energies[ib]
return ib,eb
def estimate_EV( me, data, use_best = True):
ib = me.the_best_layer(data,use_best)[0]
return me.fuzz[ib].expected_value()
def assign_hidden_and_reconstruction_energy(me, data):
for i in range(0, me.nhid):
eraw = np.dot( data, me.layers[i])
ebest = np.dot( data.__abs__(), (me.layers[i]).__abs__())
if ebest == 0.0:
# ebest = 1.0
# this forces the RBM to train this layer.
me.energies[i] = -10.e10
# me.energies[i] = 0.0
me.hidden[i] = 1.0
elif me.symmetric_encoding:
me.hidden[i] = 1.0
me.energies[i] = eraw/ebest
else:
if eraw > 0.:
me.hidden[i] = -1.0
me.energies[i] = -eraw/ebest
else:
me.hidden[i] = 1.0
me.energies[i] = eraw/ebest
def assign_hidden_and_energy(me, data):
for i in range(0, me.nhid):
eraw = np.dot( data, me.layers[i])
if me.symmetric_encoding:
me.hidden[i] = 1.0
me.energies[i] = eraw
else:
if eraw > 0.:
me.hidden[i] = -1.0
me.energies[i] = -eraw
else:
me.hidden[i] = 1.0
me.energies[i] = eraw
def trainOmatic(me,data,beta,learning_rate,use_best = True):
me.train(data,beta,learning_rate,use_best)
me.antitrain(data,beta,learning_rate*0.1,use_best)
# this is the online one pass algorithm
def train_fuzzy(me,data,beta,learning_rate,dependent_value, use_best = True):
if len(me.fuzz) == 0:
print("You Must define fuzzy first to use this")
if use_best:
me.assign_hidden_and_reconstruction_energy(data)
else:
me.assign_hidden_and_energy(data)
# select the row to train.
imin = 0
emin = me.energies[0]
for i in range(1,me.nhid):
# print( emin, me.energies[i])
if emin >= me.energies[i] :
imin = i
emin = me.energies[i]
#
# emin,imin now point to the best row
#
hsign = me.hidden[imin]
alayer = me.layers[imin]
# print(me.fuzz[imin].counts)
# fdamp = me.fuzz[imin].damp(dependent_value)
# if fdamp > 0:
me.fuzz[imin].add(dependent_value)
# print(me.fuzz[imin].counts)
# sys.stdout.flush()
# the products with hsign keep the +- straight.
# for the gradients that is.
# learning_rate = learning_rate*fdamp
for i in range(0, me.nvis): # over the row
ef = alayer[i]*hsign*data[i]
ep = ef*beta*hsign
em = -ep
fp = exp(-ep)
fm = exp(-em)
damp = (fp-fm)/(fp+fm) *hsign *data[i]
hv = hsign *data[i]
alayer[i] += learning_rate*( -hv + damp)
return emin
def train(me,data,beta,learning_rate, use_best = True):
if use_best:
me.assign_hidden_and_reconstruction_energy(data)
else:
me.assign_hidden_and_energy(data)
# select the row to train.
imin = 0
emin = me.energies[0]
for i in range(1,me.nhid):
if emin >= me.energies[i] :
imin = i
emin = me.energies[i]
# print(emin)
# sys.stdout.flush()
#
# emin,imin now point to the best row
#
hsign = me.hidden[imin]
alayer = me.layers[imin]
# the products with hsign keep the +- straight.
# for the gradients that is.
for i in range(0, me.nvis): # over the row
ef = alayer[i]*hsign*data[i]
ep = ef*beta*hsign
em = -ep
fp = exp(-ep)
fm = exp(-em)
damp = (fp-fm)/(fp+fm) *hsign *data[i]
hv = hsign *data[i]
alayer[i] += learning_rate*( -hv + damp)
return emin
def antitrain(me,data,beta,learning_rate,use_best = True):
if use_best:
me.assign_hidden_and_reconstruction_energy(data)
else:
me.assign_hidden_and_energy(data)
# select the row to train.
imax = 0
emax = me.energies[0]
for i in range(1,me.nhid):
if emax <= me.energies[i] :
imax = i
emax = me.energies[i]
#
# emin,imin now point to the best row
#
hsign = me.hidden[imax]
alayer = me.layers[imax]
# the products with hsign keep the +- straight.
# for the gradients that is.
for i in range(0, me.nvis): # over the row
ef = alayer[i]*hsign*data[i]
ep = ef*beta*hsign
em = -ep
fp = exp(-ep)
fm = exp(-em)
damp = (fp-fm)/(fp+fm) *hsign *data[i]
hv = hsign *data[i]
alayer[i] -= learning_rate*( -hv + damp)
def main():
print("this is the main routine, set up for testing")
my_rbm = rbm(2,2)
print(my_rbm.layers)
d = np.full(2,1.)
d[0] = -1.0
# my_rbm.train(d, 0.1, 0.1)
# print(my_rbm.layers)
for i in range(1,10):
d[0] = 1.; d[1] = -1.
my_rbm.train(d, 0.1, 0.1)
d[0] = 1.; d[1] = 1.
my_rbm.train(d, 0.1, 0.1)
print(my_rbm.layers)
d[0] = 0.
print(my_rbm.reconstruct(d))
d[0] = 1.
d[1] = 0.
print(my_rbm.reconstruct(d))
my_rbm.layers[0] = np.array([-1.,1.])
my_rbm.layers[1] = np.array([1.,1.])
my_rbm.add_fuzzy(-1., 1., 20)
print(my_rbm.layers)
d[0] = 1.; d[1] = -1.
my_rbm.train_fuzzy(d, 0.1, 0.1, 0.4)
d[0] = 1.; d[1] = 1.
my_rbm.train_fuzzy(d, 0.1, 0.1, -0.4)
print(my_rbm.layers)
print(d,my_rbm.estimate_EV(d))
d[0] = 1.; d[1] = -1.
print(d,my_rbm.estimate_EV(d))
#main()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import os
import sys
from py4j.java_gateway import java_import, JavaObject
from pyspark import RDD, SparkConf
from pyspark.serializers import NoOpSerializer, UTF8Deserializer, CloudPickleSerializer
from pyspark.context import SparkContext
from pyspark.storagelevel import StorageLevel
from pyspark.streaming.dstream import DStream
from pyspark.streaming.util import TransformFunction, TransformFunctionSerializer
__all__ = ["StreamingContext"]
class StreamingContext(object):
"""
Main entry point for Spark Streaming functionality. A StreamingContext
represents the connection to a Spark cluster, and can be used to create
L{DStream} various input sources. It can be from an existing L{SparkContext}.
After creating and transforming DStreams, the streaming computation can
be started and stopped using `context.start()` and `context.stop()`,
respectively. `context.awaitTermination()` allows the current thread
to wait for the termination of the context by `stop()` or by an exception.
"""
_transformerSerializer = None
# Reference to a currently active StreamingContext
_activeContext = None
def __init__(self, sparkContext, batchDuration=None, jssc=None):
"""
Create a new StreamingContext.
@param sparkContext: L{SparkContext} object.
@param batchDuration: the time interval (in seconds) at which streaming
data will be divided into batches
"""
self._sc = sparkContext
self._jvm = self._sc._jvm
self._jssc = jssc or self._initialize_context(self._sc, batchDuration)
def _initialize_context(self, sc, duration):
self._ensure_initialized()
return self._jvm.JavaStreamingContext(sc._jsc, self._jduration(duration))
def _jduration(self, seconds):
"""
Create Duration object given number of seconds
"""
return self._jvm.Duration(int(seconds * 1000))
@classmethod
def _ensure_initialized(cls):
SparkContext._ensure_initialized()
gw = SparkContext._gateway
java_import(gw.jvm, "org.apache.spark.streaming.*")
java_import(gw.jvm, "org.apache.spark.streaming.api.java.*")
java_import(gw.jvm, "org.apache.spark.streaming.api.python.*")
from pyspark.java_gateway import ensure_callback_server_started
ensure_callback_server_started(gw)
# register serializer for TransformFunction
# it happens before creating SparkContext when loading from checkpointing
cls._transformerSerializer = TransformFunctionSerializer(
SparkContext._active_spark_context, CloudPickleSerializer(), gw)
@classmethod
def getOrCreate(cls, checkpointPath, setupFunc):
"""
Either recreate a StreamingContext from checkpoint data or create a new StreamingContext.
If checkpoint data exists in the provided `checkpointPath`, then StreamingContext will be
recreated from the checkpoint data. If the data does not exist, then the provided setupFunc
will be used to create a new context.
@param checkpointPath: Checkpoint directory used in an earlier streaming program
@param setupFunc: Function to create a new context and setup DStreams
"""
cls._ensure_initialized()
gw = SparkContext._gateway
# Check whether valid checkpoint information exists in the given path
ssc_option = gw.jvm.StreamingContextPythonHelper().tryRecoverFromCheckpoint(checkpointPath)
if ssc_option.isEmpty():
ssc = setupFunc()
ssc.checkpoint(checkpointPath)
return ssc
jssc = gw.jvm.JavaStreamingContext(ssc_option.get())
# If there is already an active instance of Python SparkContext use it, or create a new one
if not SparkContext._active_spark_context:
jsc = jssc.sparkContext()
conf = SparkConf(_jconf=jsc.getConf())
SparkContext(conf=conf, gateway=gw, jsc=jsc)
sc = SparkContext._active_spark_context
# update ctx in serializer
cls._transformerSerializer.ctx = sc
return StreamingContext(sc, None, jssc)
@classmethod
def getActive(cls):
"""
Return either the currently active StreamingContext (i.e., if there is a context started
but not stopped) or None.
"""
activePythonContext = cls._activeContext
if activePythonContext is not None:
# Verify that the current running Java StreamingContext is active and is the same one
# backing the supposedly active Python context
activePythonContextJavaId = activePythonContext._jssc.ssc().hashCode()
activeJvmContextOption = activePythonContext._jvm.StreamingContext.getActive()
if activeJvmContextOption.isEmpty():
cls._activeContext = None
elif activeJvmContextOption.get().hashCode() != activePythonContextJavaId:
cls._activeContext = None
raise Exception("JVM's active JavaStreamingContext is not the JavaStreamingContext "
"backing the action Python StreamingContext. This is unexpected.")
return cls._activeContext
@classmethod
def getActiveOrCreate(cls, checkpointPath, setupFunc):
"""
Either return the active StreamingContext (i.e. currently started but not stopped),
or recreate a StreamingContext from checkpoint data or create a new StreamingContext
using the provided setupFunc function. If the checkpointPath is None or does not contain
valid checkpoint data, then setupFunc will be called to create a new context and setup
DStreams.
@param checkpointPath: Checkpoint directory used in an earlier streaming program. Can be
None if the intention is to always create a new context when there
is no active context.
@param setupFunc: Function to create a new JavaStreamingContext and setup DStreams
"""
if setupFunc is None:
raise Exception("setupFunc cannot be None")
activeContext = cls.getActive()
if activeContext is not None:
return activeContext
elif checkpointPath is not None:
return cls.getOrCreate(checkpointPath, setupFunc)
else:
return setupFunc()
@property
def sparkContext(self):
"""
Return SparkContext which is associated with this StreamingContext.
"""
return self._sc
def start(self):
"""
Start the execution of the streams.
"""
self._jssc.start()
StreamingContext._activeContext = self
def awaitTermination(self, timeout=None):
"""
Wait for the execution to stop.
@param timeout: time to wait in seconds
"""
if timeout is None:
self._jssc.awaitTermination()
else:
self._jssc.awaitTerminationOrTimeout(int(timeout * 1000))
def awaitTerminationOrTimeout(self, timeout):
"""
Wait for the execution to stop. Return `true` if it's stopped; or
throw the reported error during the execution; or `false` if the
waiting time elapsed before returning from the method.
@param timeout: time to wait in seconds
"""
return self._jssc.awaitTerminationOrTimeout(int(timeout * 1000))
def stop(self, stopSparkContext=True, stopGraceFully=False):
"""
Stop the execution of the streams, with option of ensuring all
received data has been processed.
@param stopSparkContext: Stop the associated SparkContext or not
@param stopGracefully: Stop gracefully by waiting for the processing
of all received data to be completed
"""
self._jssc.stop(stopSparkContext, stopGraceFully)
StreamingContext._activeContext = None
if stopSparkContext:
self._sc.stop()
def remember(self, duration):
"""
Set each DStreams in this context to remember RDDs it generated
in the last given duration. DStreams remember RDDs only for a
limited duration of time and releases them for garbage collection.
This method allows the developer to specify how long to remember
the RDDs (if the developer wishes to query old data outside the
DStream computation).
@param duration: Minimum duration (in seconds) that each DStream
should remember its RDDs
"""
self._jssc.remember(self._jduration(duration))
def checkpoint(self, directory):
"""
Sets the context to periodically checkpoint the DStream operations for master
fault-tolerance. The graph will be checkpointed every batch interval.
@param directory: HDFS-compatible directory where the checkpoint data
will be reliably stored
"""
self._jssc.checkpoint(directory)
def socketTextStream(self, hostname, port, storageLevel=StorageLevel.MEMORY_AND_DISK_2):
"""
Create an input from TCP source hostname:port. Data is received using
a TCP socket and receive byte is interpreted as UTF8 encoded ``\\n`` delimited
lines.
@param hostname: Hostname to connect to for receiving data
@param port: Port to connect to for receiving data
@param storageLevel: Storage level to use for storing the received objects
"""
jlevel = self._sc._getJavaStorageLevel(storageLevel)
return DStream(self._jssc.socketTextStream(hostname, port, jlevel), self,
UTF8Deserializer())
def textFileStream(self, directory):
"""
Create an input stream that monitors a Hadoop-compatible file system
for new files and reads them as text files. Files must be wrriten to the
monitored directory by "moving" them from another location within the same
file system. File names starting with . are ignored.
"""
return DStream(self._jssc.textFileStream(directory), self, UTF8Deserializer())
def binaryRecordsStream(self, directory, recordLength):
"""
Create an input stream that monitors a Hadoop-compatible file system
for new files and reads them as flat binary files with records of
fixed length. Files must be written to the monitored directory by "moving"
them from another location within the same file system.
File names starting with . are ignored.
@param directory: Directory to load data from
@param recordLength: Length of each record in bytes
"""
return DStream(self._jssc.binaryRecordsStream(directory, recordLength), self,
NoOpSerializer())
def _check_serializers(self, rdds):
# make sure they have same serializer
if len(set(rdd._jrdd_deserializer for rdd in rdds)) > 1:
for i in range(len(rdds)):
# reset them to sc.serializer
rdds[i] = rdds[i]._reserialize()
def queueStream(self, rdds, oneAtATime=True, default=None):
"""
Create an input stream from a queue of RDDs or list. In each batch,
it will process either one or all of the RDDs returned by the queue.
.. note:: Changes to the queue after the stream is created will not be recognized.
@param rdds: Queue of RDDs
@param oneAtATime: pick one rdd each time or pick all of them once.
@param default: The default rdd if no more in rdds
"""
if default and not isinstance(default, RDD):
default = self._sc.parallelize(default)
if not rdds and default:
rdds = [rdds]
if rdds and not isinstance(rdds[0], RDD):
rdds = [self._sc.parallelize(input) for input in rdds]
self._check_serializers(rdds)
queue = self._jvm.PythonDStream.toRDDQueue([r._jrdd for r in rdds])
if default:
default = default._reserialize(rdds[0]._jrdd_deserializer)
jdstream = self._jssc.queueStream(queue, oneAtATime, default._jrdd)
else:
jdstream = self._jssc.queueStream(queue, oneAtATime)
return DStream(jdstream, self, rdds[0]._jrdd_deserializer)
def transform(self, dstreams, transformFunc):
"""
Create a new DStream in which each RDD is generated by applying
a function on RDDs of the DStreams. The order of the JavaRDDs in
the transform function parameter will be the same as the order
of corresponding DStreams in the list.
"""
jdstreams = [d._jdstream for d in dstreams]
# change the final serializer to sc.serializer
func = TransformFunction(self._sc,
lambda t, *rdds: transformFunc(rdds),
*[d._jrdd_deserializer for d in dstreams])
jfunc = self._jvm.TransformFunction(func)
jdstream = self._jssc.transform(jdstreams, jfunc)
return DStream(jdstream, self, self._sc.serializer)
def union(self, *dstreams):
"""
Create a unified DStream from multiple DStreams of the same
type and same slide duration.
"""
if not dstreams:
raise ValueError("should have at least one DStream to union")
if len(dstreams) == 1:
return dstreams[0]
if len(set(s._jrdd_deserializer for s in dstreams)) > 1:
raise ValueError("All DStreams should have same serializer")
if len(set(s._slideDuration for s in dstreams)) > 1:
raise ValueError("All DStreams should have same slide duration")
cls = SparkContext._jvm.org.apache.spark.streaming.api.java.JavaDStream
jdstreams = SparkContext._gateway.new_array(cls, len(dstreams))
for i in range(0, len(dstreams)):
jdstreams[i] = dstreams[i]._jdstream
return DStream(self._jssc.union(jdstreams), self, dstreams[0]._jrdd_deserializer)
def addStreamingListener(self, streamingListener):
"""
Add a [[org.apache.spark.streaming.scheduler.StreamingListener]] object for
receiving system events related to streaming.
"""
self._jssc.addStreamingListener(self._jvm.JavaStreamingListenerWrapper(
self._jvm.PythonStreamingListenerWrapper(streamingListener)))
|
|
import sys
import inspect
import argparse
from dipy.workflows.docstring_parser import NumpyDocString
def get_args_default(func):
if sys.version_info[0] >= 3:
sig_object = inspect.signature(func)
params = sig_object.parameters.values()
names = [param.name for param in params if param.name != 'self']
defaults = [param.default for param in params
if param.default is not inspect._empty]
else:
specs = inspect.getargspec(func)
names = specs.args[1:]
defaults = specs.defaults
return names, defaults
class IntrospectiveArgumentParser(argparse.ArgumentParser):
def __init__(self, prog=None, usage=None, description=None, epilog=None,
parents=[], formatter_class=argparse.RawTextHelpFormatter,
prefix_chars='-', fromfile_prefix_chars=None,
argument_default=None, conflict_handler='resolve',
add_help=True):
""" Augmenting the argument parser to allow automatic creation of
arguments from workflows
Parameters
-----------
prog : None
The name of the program (default: sys.argv[0])
usage : None
A usage message (default: auto-generated from arguments)
description : str
A description of what the program does
epilog : str
Text following the argument descriptions
parents : list
Parsers whose arguments should be copied into this one
formatter_class : obj
HelpFormatter class for printing help messages
prefix_chars : str
Characters that prefix optional arguments
fromfile_prefix_chars : None
Characters that prefix files containing additional arguments
argument_default : None
The default value for all arguments
conflict_handler : str
String indicating how to handle conflicts
add_help : bool
Add a -h/-help option
"""
iap = IntrospectiveArgumentParser
if epilog is None:
epilog =\
("References: \n"
"Garyfallidis, E., M. Brett, B. Amirbekian, A. Rokem,"
" S. Van Der Walt, M. Descoteaux, and I. Nimmo-Smith. Dipy, a"
" library for the analysis of diffusion MRI data. Frontiers"
" in Neuroinformatics, 1-18, 2014.")
super(iap, self).__init__(prog=prog, usage=usage,
description=description,
epilog=epilog, parents=parents,
formatter_class=formatter_class,
prefix_chars=prefix_chars,
fromfile_prefix_chars=fromfile_prefix_chars,
argument_default=argument_default,
conflict_handler=conflict_handler,
add_help=add_help)
self.doc = None
def add_workflow(self, workflow):
""" Take a workflow object and use introspection to extract the parameters,
types and docstrings of its run method. Then add these parameters
to the current arparser's own params to parse. If the workflow is of
type combined_workflow, the optional input parameters of its
sub workflows will also be added.
Parameters
-----------
workflow : dipy.workflows.workflow.Workflow
Workflow from which to infer parameters.
Returns
-------
sub_flow_optionals : dictionary of all sub workflow optional parameters
"""
doc = inspect.getdoc(workflow.run)
npds = NumpyDocString(doc)
self.doc = npds['Parameters']
self.description = '{0}\n\n{1}'.format(
' '.join(npds['Summary']),
' '.join(npds['Extended Summary']))
if npds['References']:
ref_text = [text if text else "\n" for text in npds['References']]
ref_idx = self.epilog.find('References: \n') + \
len('References: \n')
self.epilog = "{0}{1}\n{2}".format(self.epilog[:ref_idx],
''.join(ref_text),
self.epilog[ref_idx:])
self._output_params = [param for param in npds['Parameters']
if 'out_' in param[0]]
self._positional_params = [param for param in npds['Parameters']
if 'optional' not in param[1] and
'out_' not in param[0]]
self._optional_params = [param for param in npds['Parameters']
if 'optional' in param[1]]
args, defaults = get_args_default(workflow.run)
output_args = self.add_argument_group('output arguments(optional)')
len_args = len(args)
len_defaults = len(defaults)
nb_positional_variable = 0
if len_args != len(self.doc):
raise ValueError(
self.prog + ": Number of parameters in the "
"doc string and run method does not match. "
"Please ensure that the number of parameters "
"in the run method is same as the doc string.")
for i, arg in enumerate(args):
prefix = ''
is_optional = i >= len_args - len_defaults
if is_optional:
prefix = '--'
typestr = self.doc[i][1]
dtype, isnarg = self._select_dtype(typestr)
help_msg = ' '.join(self.doc[i][2])
_args = ['{0}{1}'.format(prefix, arg)]
_kwargs = {'help': help_msg,
'type': dtype,
'action': 'store'}
if is_optional:
_kwargs['metavar'] = dtype.__name__
if dtype is bool:
_kwargs['action'] = 'store_true'
default_ = dict()
default_[arg] = False
self.set_defaults(**default_)
del _kwargs['type']
del _kwargs['metavar']
elif dtype is bool:
_kwargs['type'] = int
_kwargs['choices'] = [0, 1]
if dtype is tuple:
_kwargs['type'] = str
if isnarg:
if is_optional:
_kwargs['nargs'] = '*'
else:
_kwargs['nargs'] = '+'
nb_positional_variable += 1
if 'out_' in arg:
output_args.add_argument(*_args, **_kwargs)
else:
self.add_argument(*_args, **_kwargs)
if nb_positional_variable > 1:
raise ValueError(self.prog + " : All positional arguments present"
" are gathered into a list. It does not make"
"much sense to have more than one positional"
" argument with 'variable string' as dtype."
" Please, ensure that 'variable (type)'"
" appears only once as a positional argument."
)
return self.add_sub_flow_args(workflow.get_sub_runs())
def add_sub_flow_args(self, sub_flows):
""" Take an array of workflow objects and use introspection to extract
the parameters, types and docstrings of their run method. Only the
optional input parameters are extracted for these as they are treated
as sub workflows.
Parameters
-----------
sub_flows : array of dipy.workflows.workflow.Workflow
Workflows to inspect.
Returns
-------
sub_flow_optionals : dictionary of all sub workflow optional parameters
"""
sub_flow_optionals = dict()
for name, flow, short_name in sub_flows:
sub_flow_optionals[name] = {}
doc = inspect.getdoc(flow)
npds = NumpyDocString(doc)
_doc = npds['Parameters']
args, defaults = get_args_default(flow)
len_args = len(args)
len_defaults = len(defaults)
flow_args = \
self.add_argument_group('{0} arguments(optional)'.
format(name))
for i, arg_name in enumerate(args):
is_not_optionnal = i < len_args - len_defaults
if 'out_' in arg_name or is_not_optionnal:
continue
arg_name = '{0}.{1}'.format(short_name, arg_name)
sub_flow_optionals[name][arg_name] = None
prefix = '--'
typestr = _doc[i][1]
dtype, isnarg = self._select_dtype(typestr)
help_msg = ''.join(_doc[i][2])
_args = ['{0}{1}'.format(prefix, arg_name)]
_kwargs = {'help': help_msg,
'type': dtype,
'action': 'store'}
_kwargs['metavar'] = dtype.__name__
if dtype is bool:
_kwargs['action'] = 'store_true'
default_ = dict()
default_[arg_name] = False
self.set_defaults(**default_)
del _kwargs['type']
del _kwargs['metavar']
elif dtype is bool:
_kwargs['type'] = int
_kwargs['choices'] = [0, 1]
if dtype is tuple:
_kwargs['type'] = str
if isnarg:
_kwargs['nargs'] = '*'
flow_args.add_argument(*_args, **_kwargs)
return sub_flow_optionals
def _select_dtype(self, text):
""" Analyses a docstring parameter line and returns the good argparser
type.
Parameters
-----------
text : string
Parameter text line to inspect.
Returns
-------
arg_type : The type found by inspecting the text line.
is_nargs : Whether or not this argument is nargs
(arparse's multiple values argument)
"""
text = text.lower()
nargs_str = 'variable'
is_nargs = nargs_str in text
arg_type = None
if 'str' in text:
arg_type = str
if 'int' in text:
arg_type = int
if 'float' in text:
arg_type = float
if 'bool' in text:
arg_type = bool
if 'tuple' in text:
arg_type = tuple
return arg_type, is_nargs
def get_flow_args(self, args=None, namespace=None):
""" Returns the parsed arguments as a dictionary that will be used
as a workflow's run method arguments.
"""
ns_args = self.parse_args(args, namespace)
dct = vars(ns_args)
return dict((k, v) for k, v in dct.items() if v is not None)
def update_argument(self, *args, **kargs):
self.add_argument(*args, **kargs)
def show_argument(self, dest):
for act in self._actions[1:]:
if act.dest == dest:
print(act)
def add_epilogue(self):
pass
def add_description(self):
pass
@property
def output_parameters(self):
return self._output_params
@property
def positional_parameters(self):
return self._positional_params
@property
def optional_parameters(self):
return self._optional_params
|
|
import csv
import os
from json import loads
from os.path import exists
from old.project import CassandraInsert
from old.project import CassandraUtils
cassandra = CassandraUtils()
QTD_STS_KEY = 'quotedStatus'
RTD_STS_KEY = 'retweetedStatus'
MT_STS_KEY = 'userMentionEntities'
def check_mention_entities(tweet):
return RTD_STS_KEY not in tweet and MT_STS_KEY in tweet and len(tweet[MT_STS_KEY]) > 0
def remove_retweets(tts_rows):
tts = map(lambda tt_row: loads(tt_row.tweet), tts_rows)
return filter(lambda tt: QTD_STS_KEY not in tt and RTD_STS_KEY not in tt, tts)
def find_retweets(tts_rows):
tts = map(lambda tt_row: loads(tt_row.tweet), tts_rows)
rts = []
rts.extend(map(lambda tt: tt[RTD_STS_KEY], filter(lambda tt: RTD_STS_KEY in tt, tts)))
rts.extend(map(lambda tt: tt[QTD_STS_KEY], filter(lambda tt: QTD_STS_KEY in tt, tts)))
return rts
def find_mentions(tts_rows):
tts = map(lambda tt_row: loads(tt_row.tweet), tts_rows)
return filter(lambda tt: check_mention_entities(tt), tts)
def mount_path(dirfile, user_id):
return '/home/joao/Dev/Data/Twitter/' + dirfile + str(user_id) + '.csv'
def check_file(path):
return exists(path)
def save_friends(user_id, friends_rows):
path = mount_path('friends/', user_id)
with open(path, 'w') as writer:
friends_ids = map(lambda friend: friend.friend_id, friends_rows)
for friend_id in friends_ids:
writer.write(str(friend_id) + '\n')
writer.flush()
writer.close()
def delete_file(path):
if os.stat(path).st_size == 0:
os.remove(path)
def friends2file():
print "Saving Friends..."
seeds = cassandra.find_seeds()
c = 0
for seeds_row in seeds:
user_id = seeds_row.user_id
friends_rows = cassandra.find_friends(user_id=user_id)
ci = CassandraInsert()
for friend_row in friends_rows:
ci.insert_friendship({'user_id': user_id, 'friend_id': friend_row.friend_id})
c = c + 1
print 'Users: ', c
print "Friends Saved..."
def save_likes(user_id, likes_rows):
lks_tts = map(lambda row: loads(row.tweet), likes_rows)
with open(mount_path('likes/', user_id), 'w') as csvfile:
fieldnames = ['alter_id', 'tweet_id']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for tt in lks_tts:
writer.writerow({
'alter_id': tt['user']['id'],
'tweet_id': tt['id']
})
def likes2file():
print "Saving likes..."
seeds = cassandra.find_seeds()
c = 0
for seeds_row in seeds:
user_id = seeds_row.user_id
ci = CassandraInsert()
likes = map(lambda lk: loads(lk.tweet), cassandra.find_likes(user_id=user_id))
for like in likes:
lk = {
'user_id': user_id,
'tweet_id': like['id'],
'alter_id': like['user']['id']}
ci.insert_lk_interaction(lk)
tt = {
'tweet_id': like['id'],
'date': like['createdAt'],
'lang': like['lang'],
'text': like['text'],
'user_id': like['user']['id']}
ci.insert_tweet(tt)
usr = {
'id': like['user']['id'],
'flw_count': like['user']['followersCount'],
'frd_count': like['user']['friendsCount'],
'is_protected': like['user']['isProtected'],
'is_verified': like['user']['isVerified'],
'lk_count': like['user']['favouritesCount'],
'lang': like['user']['lang'],
'tt_count': like['user']['statusesCount']}
ci.insert_user(usr)
c = c + 1
print 'Users: ', c
print "Likes saved"
def save_retweets(ci, user_id, tweets_rows):
retweets = find_retweets(tweets_rows)
for tt in retweets:
rt = {
'user_id': user_id,
'tweet_id': tt['id'],
'alter_id': tt['user']['id']}
ci.insert_rt_interaction(rt)
_tt = {
'tweet_id': tt['id'],
'date': tt['createdAt'],
'lang': tt['lang'],
'text': tt['text'],
'user_id': tt['user']['id']}
ci.insert_tweet(_tt)
usr = {
'id': tt['user']['id'],
'flw_count': tt['user']['followersCount'],
'frd_count': tt['user']['friendsCount'],
'is_protected': tt['user']['isProtected'],
'is_verified': tt['user']['isVerified'],
'lk_count': tt['user']['favouritesCount'],
'lang': tt['user']['lang'],
'tt_count': tt['user']['statusesCount']}
ci.insert_user(usr)
def save_mentions(ci, user_id, tweets_rows):
tweets = find_mentions(tweets_rows)
for tt in tweets:
for me in tt[MT_STS_KEY]:
me = {
'user_id': user_id,
'tweet_id': tt['id'],
'alter_id': me['id']}
ci.insert_mt_interaction(me)
_tt = {
'tweet_id': tt['id'],
'date': tt['createdAt'],
'lang': tt['lang'],
'text': tt['text'],
'user_id': tt['user']['id']}
ci.insert_tweet(_tt)
usr = {
'id': tt['user']['id'],
'flw_count': tt['user']['followersCount'],
'frd_count': tt['user']['friendsCount'],
'is_protected': tt['user']['isProtected'],
'is_verified': tt['user']['isVerified'],
'lk_count': tt['user']['favouritesCount'],
'lang': tt['user']['lang'],
'tt_count': tt['user']['statusesCount']}
ci.insert_user(usr)
def save_tweets(ci, user_id, tweets_rows):
save_retweets(ci, user_id, tweets_rows)
save_mentions(ci, user_id, tweets_rows)
tweets = remove_retweets(tweets_rows)
for tt in tweets:
_tt = {
'tweet_id': tt['id'],
'date': tt['createdAt'],
'lang': tt['lang'],
'text': tt['text'],
'user_id': tt['user']['id']}
ci.insert_tweet(_tt)
usr = {
'id': tt['user']['id'],
'flw_count': tt['user']['followersCount'],
'frd_count': tt['user']['friendsCount'],
'is_protected': tt['user']['isProtected'],
'is_verified': tt['user']['isVerified'],
'lk_count': tt['user']['favouritesCount'],
'lang': tt['user']['lang'],
'tt_count': tt['user']['statusesCount']}
ci.insert_user(usr)
def tweets2file():
print "Saving Tweets..."
seeds = cassandra.find_seeds()
ci = CassandraInsert()
c = 0
for seeds_row in seeds:
user_id = seeds_row.user_id
tweets_rows = cassandra.find_tweets(user_id=user_id)
save_tweets(ci=ci, user_id=user_id, tweets_rows=tweets_rows)
c = c + 1
print 'Users: ', c
print "Tweets saved"
|
|
"""
Test suite for Cython utilities.
"""
import numpy as np
def interp_conserve(x, xp, fp, left=0., right=0.):
"""
Interpolate `xp`,`yp` array to the output x array, conserving flux.
`xp` can be irregularly spaced.
"""
midpoint = (x[1:]-x[:-1])/2.+x[:-1]
midpoint = np.append(midpoint, np.array([x[0],x[-1]]))
midpoint = midpoint[np.argsort(midpoint)]
int_midpoint = np.interp(midpoint, xp, fp, left=left, right=right)
int_midpoint[midpoint > xp.max()] = 0.
int_midpoint[midpoint < xp.min()] = 0.
fullx = np.append(xp, midpoint)
fully = np.append(fp, int_midpoint)
so = np.argsort(fullx)
fullx, fully = fullx[so], fully[so]
outy = x*0.
dx = midpoint[1:]-midpoint[:-1]
for i in range(len(x)):
bin = (fullx >= midpoint[i]) & (fullx <= midpoint[i+1])
outy[i] = np.trapz(fully[bin], fullx[bin])/dx[i]
return outy
def test_nmf():
import interp_c
import numpy as np
import time
import matplotlib.pyplot as plt
x = np.arange(0,16,0.5)*np.pi*2
NFILT = len(x)
NTEMP = 7
coeffs = np.random.random(NTEMP)
y = x*0.
templates = np.zeros((NTEMP, NFILT))
norms = np.zeros(NTEMP)
for i in range(0,NTEMP):
templates[i,:] = np.sin(x/((i+1)*1.6))+1.1
norms[i] = np.trapz(templates[i,:], x)
y = np.dot(coeffs.reshape((1,NTEMP)), templates).flatten()
err = y*0.+np.median(y)/10.
yerr = y+np.random.normal(size=NFILT)*err
t0 = time.time()
amatrix = interp_c.prepare_nmf_amatrix(err**2, templates)
t1= time.time()
coeffs_fit = interp_c.run_nmf(yerr, err**2, templates, amatrix, toler=1.e-5)
t2 = time.time()
print 'Prepare: %.4f' %(t1-t0)
print 'Fit : %.4f' %(t2-t1)
yfit = np.dot(coeffs_fit.reshape((1,-1)), templates).flatten()
fig = plt.figure()
ax = fig.add_subplot(311)
ax.plot(x, y/np.median(y), color='blue')
ax.errorbar(x, yerr/np.median(y), err/np.median(y), color='blue', marker='o', linestyle='None')
ax.plot(x, yfit/np.median(y), color='red')
ax.set_ylabel('"Flux"')
ax = fig.add_subplot(312)
ax.plot(x, y-yfit, color='blue')
ax.errorbar(x, yerr-yfit, err, color='blue', marker='o', linestyle='None')
ax.plot(x, yfit-yfit, color='red')
ax.set_ylabel(r'$\Delta$(obs-fit)')
chi2 = np.sum((yerr-yfit)**2/err**2)/(len(x)-1)
ax.text(0.1,0.8,r'$\chi^2_\nu$=%.3f' %(chi2), transform=ax.transAxes)
ax = fig.add_subplot(313)
ax.plot(np.log10(coeffs/coeffs_fit), color='orange')
ax.set_ylabel(r'$\log$($\Delta$coeff)')
#### Is sum of normalizations conserved?
norm_input = np.sum(norms*coeffs)
norm_fit = np.sum(norms*coeffs_fit)
int_fit = np.trapz(yfit, x)
print 'Norm_in: %.2f, Norm_fit: %.2f, trapz_fit: %.2f' %(norm_input, norm_fit, int_fit)
fig.savefig('/tmp/nmf.png')
def test():
import interp_c
import time
import scipy
import threedhst
import numpy as np
N = int(1.e6)
xfull = np.arange(0,N+1,1)*1.
#yfull = np.sin(xfull/(N/1239.)*2*np.pi)+1
yfull = np.sin(xfull/np.pi/2/20)+0.2
# coeffs = np.random.random(size=12)*5
# yfull = scipy.polyval(coeffs, xfull)
xint = np.arange(0,N+1,N/100)*1.
tstart = time.time()
denom = np.trapz(yfull,xfull)
tstart = time.time()
yint_0 = np.interp(xint, xfull, yfull)
t0 = time.time()
print 'Linear : %.3f (%.4e)' %(t0-tstart, np.trapz(yint_0, xint)/denom-1)
yint_x = interp_c.interp_c(xint, xfull, yfull)
tx = time.time()
print 'Linear(c) : %.3f (%.4e)' %(tx-t0, np.trapz(yint_x, xint)/denom-1)
xreverse = xint[::-1]
yint_y = interp_c.interp_c(xreverse, xfull, yfull, assume_sorted=0)
ty = time.time()
print 'Linear(c) rev : %.3f (%.4e)' %(ty-tx, np.trapz(yint_y, xint)/denom-1)
yint_1 = threedhst.utils.interp_conserve(xint, xfull, yfull)
t1 = time.time()
print 'Conserve : %.3f (%.4e)' %(t1-ty, np.trapz(yint_1, xint)/denom-1)
yint_2 = interp_c.interp_conserve(xint, xfull, yfull)
t2 = time.time()
print 'Conserve (Cython): %.3f (%.4e)' %(t2-t1, np.trapz(yint_2, xint)/denom-1)
yint_3 = interp_c.interp_conserve_c(xint, xfull, yfull)
t3 = time.time()
print 'Conserve (more c): %.3f (%.4e)' %(t3-t2, np.trapz(yint_3, xint)/denom-1)
yint_4 = threedhst.utils.interp_conserve_c(xint, xfull, yfull)
t4 = time.time()
print 'Inline c : %.3f (%.4e)' %(t4-t3, np.trapz(yint_4, xint)/denom-1)
#### Test interpolation
threedhst.showMessage('Interpolation')
#### Faster while n(int)/n(full) < 1./50
xint = xfull[1000:-1000:40]
tstart = time.time()
yint = np.interp(xint, xfull, yfull, left=0., right=0.)
t0 = time.time()
print 'Python : %.4f' %(t0-tstart)
yint1 = interp_c.interp_c(xint, xfull, yfull, extrapolate=0.)
t1 = time.time()
print 'Cython rewrite : %.4f (%.2e)' %(t1-t0, np.sum((yint1-yint)**2))
#### Test midpoint definition --- slices work better than by hand
threedhst.showMessage('Midpoint')
xmid = xfull
tstart = time.time()
midpoint = (xmid[1:]+xmid[:-1])/2.
midpoint = np.append(midpoint, np.array([xmid[0],xmid[-1]]))
midpoint = midpoint[np.argsort(midpoint)]
t0 = time.time()
print 'Python : %.3f %.2e' %(t0-tstart, np.sum((midpoint-midpoint)**2))
midpoint_c1 = interp_c.midpoint(xmid)
t1 = time.time()
print 'Cython : %.3f %.2e' %(t1-t0, np.sum((midpoint_c1-midpoint)**2))
midpoint_c2 = interp_c.midpoint_c(xmid, N+1)
t2 = time.time()
print 'Cython (opt): %.3f %.2e' %(t2-t1, np.sum((midpoint_c2-midpoint)**2))
# Compare cython to numba
#@autojit
def interpolate_tempfilt(tempfilt, zgrid, zi, output):
"""
interpolate_tempfilt(tempfilt, zgrid, zi, output)
Linear interpolate an Eazy "tempfilt" grid at z=zi.
`tempfilt` is [NFILT, NTEMP, NZ] integrated flux matrix
`zgrid` is [NZ] redshift grid
Result is stored in the input variable `output`, which needs shape [NFILT, NTEMP]
"""
#cdef unsigned long NT, NF, NZ, itemp, ifilt, iz
#cdef double dz, fint, fint2
#cdef extern from "math.h":
# double fabs(double)
sh = tempfilt.shape
NF, NT, NZ = sh[0], sh[1], sh[2]
#### Output array
#cdef np.ndarray[DTYPE_t, ndim=2] tempfilt_interp = np.zeros((NF, NT), dtype=DTYPE)
for iz in range(NZ-1):
dz = zgrid[iz+1]-zgrid[iz]
fint = 1 - (zi-zgrid[iz])/dz
if (fint > 0) & (fint <= 1):
fint2 = 1 - (zgrid[iz+1]-zi)/dz
# print iz, zgrid[iz], fint, fint2
for ifilt in range(NF):
for itemp in range(NT):
#print ifilt, itemp
output[ifilt, itemp] = tempfilt[ifilt, itemp, iz]*fint + tempfilt[ifilt, itemp, iz+1]*fint2
#
break
return output
#
from numba import double, jit
fast_interpolate_tempfilt = jit(double[:,:](double[:,:,:], double[:], double, double[:,:]))(interpolate_tempfilt)
if __name__ == "__main__":
test()
|
|
#!/usr/bin/env python
from ctypes import *
from ctypes.util import find_library
from os import path
import sys
if sys.version_info[0] >= 3:
xrange = range
__all__ = ['libsvm', 'svm_problem', 'svm_parameter',
'toPyModel', 'gen_svm_nodearray', 'print_null', 'svm_node', 'C_SVC',
'EPSILON_SVR', 'LINEAR', 'NU_SVC', 'NU_SVR', 'ONE_CLASS',
'POLY', 'PRECOMPUTED', 'PRINT_STRING_FUN', 'RBF',
'SIGMOID', 'c_double', 'svm_model']
try:
dirname = path.dirname(path.abspath(__file__))
if sys.platform == 'win32':
libsvm = CDLL(path.join(dirname, r'..\windows\libsvm.dll'))
else:
libsvm = CDLL(path.join(dirname, './libsvm.so.2'))
except:
# For unix the prefix 'lib' is not considered.
if find_library('svm'):
libsvm = CDLL(find_library('svm'))
elif find_library('libsvm'):
libsvm = CDLL(find_library('libsvm'))
else:
raise Exception('LIBSVM library not found.')
C_SVC = 0
NU_SVC = 1
ONE_CLASS = 2
EPSILON_SVR = 3
NU_SVR = 4
LINEAR = 0
POLY = 1
RBF = 2
SIGMOID = 3
PRECOMPUTED = 4
PRINT_STRING_FUN = CFUNCTYPE(None, c_char_p)
def print_null(s):
return
def genFields(names, types):
return list(zip(names, types))
def fillprototype(f, restype, argtypes):
f.restype = restype
f.argtypes = argtypes
class svm_node(Structure):
_names = ["index", "value"]
_types = [c_int, c_double]
_fields_ = genFields(_names, _types)
def __str__(self):
return '%d:%g' % (self.index, self.value)
def gen_svm_nodearray(xi, feature_max=None, isKernel=None):
if isinstance(xi, dict):
index_range = xi.keys()
elif isinstance(xi, (list, tuple)):
if not isKernel:
xi = [0] + xi # idx should start from 1
index_range = range(len(xi))
else:
raise TypeError('xi should be a dictionary, list or tuple')
if feature_max:
assert(isinstance(feature_max, int))
index_range = filter(lambda j: j <= feature_max, index_range)
if not isKernel:
index_range = filter(lambda j:xi[j] != 0, index_range)
index_range = sorted(index_range)
ret = (svm_node * (len(index_range)+1))()
ret[-1].index = -1
for idx, j in enumerate(index_range):
ret[idx].index = j
ret[idx].value = xi[j]
max_idx = 0
if index_range:
max_idx = index_range[-1]
return ret, max_idx
class svm_problem(Structure):
_names = ["l", "y", "x"]
_types = [c_int, POINTER(c_double), POINTER(POINTER(svm_node))]
_fields_ = genFields(_names, _types)
def __init__(self, y, x, isKernel=None):
if len(y) != len(x):
raise ValueError("len(y) != len(x)")
self.l = l = len(y)
max_idx = 0
x_space = self.x_space = []
for i, xi in enumerate(x):
tmp_xi, tmp_idx = gen_svm_nodearray(xi,isKernel=isKernel)
x_space += [tmp_xi]
max_idx = max(max_idx, tmp_idx)
self.n = max_idx
self.y = (c_double * l)()
for i, yi in enumerate(y): self.y[i] = yi
self.x = (POINTER(svm_node) * l)()
for i, xi in enumerate(self.x_space): self.x[i] = xi
class svm_parameter(Structure):
_names = ["svm_type", "kernel_type", "degree", "gamma", "coef0",
"cache_size", "eps", "C", "nr_weight", "weight_label", "weight",
"nu", "p", "shrinking", "probability"]
_types = [c_int, c_int, c_int, c_double, c_double,
c_double, c_double, c_double, c_int, POINTER(c_int), POINTER(c_double),
c_double, c_double, c_int, c_int]
_fields_ = genFields(_names, _types)
def __init__(self, options = None):
if options == None:
options = ''
self.parse_options(options)
def __str__(self):
s = ''
attrs = svm_parameter._names + list(self.__dict__.keys())
values = map(lambda attr: getattr(self, attr), attrs)
for attr, val in zip(attrs, values):
s += (' %s: %s\n' % (attr, val))
s = s.strip()
return s
def set_to_default_values(self):
self.svm_type = C_SVC;
self.kernel_type = RBF
self.degree = 3
self.gamma = 0
self.coef0 = 0
self.nu = 0.5
self.cache_size = 100
self.C = 1
self.eps = 0.001
self.p = 0.1
self.shrinking = 1
self.probability = 0
self.nr_weight = 0
self.weight_label = None
self.weight = None
self.cross_validation = False
self.nr_fold = 0
self.print_func = cast(None, PRINT_STRING_FUN)
def parse_options(self, options):
if isinstance(options, list):
argv = options
elif isinstance(options, str):
argv = options.split()
else:
raise TypeError("arg 1 should be a list or a str.")
self.set_to_default_values()
self.print_func = cast(None, PRINT_STRING_FUN)
weight_label = []
weight = []
i = 0
while i < len(argv):
if argv[i] == "-s":
i = i + 1
self.svm_type = int(argv[i])
elif argv[i] == "-t":
i = i + 1
self.kernel_type = int(argv[i])
elif argv[i] == "-d":
i = i + 1
self.degree = int(argv[i])
elif argv[i] == "-g":
i = i + 1
self.gamma = float(argv[i])
elif argv[i] == "-r":
i = i + 1
self.coef0 = float(argv[i])
elif argv[i] == "-n":
i = i + 1
self.nu = float(argv[i])
elif argv[i] == "-m":
i = i + 1
self.cache_size = float(argv[i])
elif argv[i] == "-c":
i = i + 1
self.C = float(argv[i])
elif argv[i] == "-e":
i = i + 1
self.eps = float(argv[i])
elif argv[i] == "-p":
i = i + 1
self.p = float(argv[i])
elif argv[i] == "-h":
i = i + 1
self.shrinking = int(argv[i])
elif argv[i] == "-b":
i = i + 1
self.probability = int(argv[i])
elif argv[i] == "-q":
self.print_func = PRINT_STRING_FUN(print_null)
elif argv[i] == "-v":
i = i + 1
self.cross_validation = 1
self.nr_fold = int(argv[i])
if self.nr_fold < 2:
raise ValueError("n-fold cross validation: n must >= 2")
elif argv[i].startswith("-w"):
i = i + 1
self.nr_weight += 1
weight_label += [int(argv[i-1][2:])]
weight += [float(argv[i])]
else:
raise ValueError("Wrong options")
i += 1
libsvm.svm_set_print_string_function(self.print_func)
self.weight_label = (c_int*self.nr_weight)()
self.weight = (c_double*self.nr_weight)()
for i in range(self.nr_weight):
self.weight[i] = weight[i]
self.weight_label[i] = weight_label[i]
class svm_model(Structure):
_names = ['param', 'nr_class', 'l', 'SV', 'sv_coef', 'rho',
'probA', 'probB', 'sv_indices', 'label', 'nSV', 'free_sv']
_types = [svm_parameter, c_int, c_int, POINTER(POINTER(svm_node)),
POINTER(POINTER(c_double)), POINTER(c_double),
POINTER(c_double), POINTER(c_double), POINTER(c_int),
POINTER(c_int), POINTER(c_int), c_int]
_fields_ = genFields(_names, _types)
def __init__(self):
self.__createfrom__ = 'python'
def __del__(self):
# free memory created by C to avoid memory leak
if hasattr(self, '__createfrom__') and self.__createfrom__ == 'C':
libsvm.svm_free_and_destroy_model(pointer(self))
def get_svm_type(self):
return libsvm.svm_get_svm_type(self)
def get_nr_class(self):
return libsvm.svm_get_nr_class(self)
def get_svr_probability(self):
return libsvm.svm_get_svr_probability(self)
def get_labels(self):
nr_class = self.get_nr_class()
labels = (c_int * nr_class)()
libsvm.svm_get_labels(self, labels)
return labels[:nr_class]
def get_sv_indices(self):
total_sv = self.get_nr_sv()
sv_indices = (c_int * total_sv)()
libsvm.svm_get_sv_indices(self, sv_indices)
return sv_indices[:total_sv]
def get_nr_sv(self):
return libsvm.svm_get_nr_sv(self)
def is_probability_model(self):
return (libsvm.svm_check_probability_model(self) == 1)
def get_sv_coef(self):
return [tuple(self.sv_coef[j][i] for j in xrange(self.nr_class - 1))
for i in xrange(self.l)]
def get_SV(self):
result = []
for sparse_sv in self.SV[:self.l]:
row = dict()
i = 0
while True:
row[sparse_sv[i].index] = sparse_sv[i].value
if sparse_sv[i].index == -1:
break
i += 1
result.append(row)
return result
def toPyModel(model_ptr):
"""
toPyModel(model_ptr) -> svm_model
Convert a ctypes POINTER(svm_model) to a Python svm_model
"""
if bool(model_ptr) == False:
raise ValueError("Null pointer")
m = model_ptr.contents
m.__createfrom__ = 'C'
return m
fillprototype(libsvm.svm_train, POINTER(svm_model), [POINTER(svm_problem), POINTER(svm_parameter)])
fillprototype(libsvm.svm_cross_validation, None, [POINTER(svm_problem), POINTER(svm_parameter), c_int, POINTER(c_double)])
fillprototype(libsvm.svm_save_model, c_int, [c_char_p, POINTER(svm_model)])
fillprototype(libsvm.svm_load_model, POINTER(svm_model), [c_char_p])
fillprototype(libsvm.svm_get_svm_type, c_int, [POINTER(svm_model)])
fillprototype(libsvm.svm_get_nr_class, c_int, [POINTER(svm_model)])
fillprototype(libsvm.svm_get_labels, None, [POINTER(svm_model), POINTER(c_int)])
fillprototype(libsvm.svm_get_sv_indices, None, [POINTER(svm_model), POINTER(c_int)])
fillprototype(libsvm.svm_get_nr_sv, c_int, [POINTER(svm_model)])
fillprototype(libsvm.svm_get_svr_probability, c_double, [POINTER(svm_model)])
fillprototype(libsvm.svm_predict_values, c_double, [POINTER(svm_model), POINTER(svm_node), POINTER(c_double)])
fillprototype(libsvm.svm_predict, c_double, [POINTER(svm_model), POINTER(svm_node)])
fillprototype(libsvm.svm_predict_probability, c_double, [POINTER(svm_model), POINTER(svm_node), POINTER(c_double)])
fillprototype(libsvm.svm_free_model_content, None, [POINTER(svm_model)])
fillprototype(libsvm.svm_free_and_destroy_model, None, [POINTER(POINTER(svm_model))])
fillprototype(libsvm.svm_destroy_param, None, [POINTER(svm_parameter)])
fillprototype(libsvm.svm_check_parameter, c_char_p, [POINTER(svm_problem), POINTER(svm_parameter)])
fillprototype(libsvm.svm_check_probability_model, c_int, [POINTER(svm_model)])
fillprototype(libsvm.svm_set_print_string_function, None, [PRINT_STRING_FUN])
|
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras text vectorization preprocessing layer."""
import tensorflow.compat.v2 as tf
import os
from absl.testing import parameterized
import numpy as np
import keras
from keras.testing_infra import test_combinations
from keras.testing_infra import test_utils
from keras.layers.preprocessing import preprocessing_test_utils
from keras.layers.preprocessing import string_lookup
def _get_end_to_end_test_cases():
test_cases = (
{
"testcase_name": "test_strings_soft_vocab_cap",
# Create an array where 'earth' is the most frequent term, followed by
# 'wind', then 'and', then 'fire'. This ensures that the vocab
# accumulator is sorting by frequency.
"vocab_data":
np.array([["fire"], ["earth"], ["earth"], ["earth"], ["earth"],
["wind"], ["wind"], ["wind"], ["and"], ["and"]]),
"input_data":
np.array([["earth"], ["wind"], ["and"], ["fire"], ["fire"],
["and"], ["earth"], ["michigan"]]),
"kwargs": {
"max_tokens": None,
},
"expected_output": [[1], [2], [3], [4], [4], [3], [1], [0]],
"input_dtype":
tf.string
},
)
crossed_test_cases = []
# Cross above test cases with use_dataset in (True, False)
for use_dataset in (True, False):
for case in test_cases:
case = case.copy()
if use_dataset:
case["testcase_name"] = case["testcase_name"] + "_with_dataset"
case["use_dataset"] = use_dataset
crossed_test_cases.append(case)
return crossed_test_cases
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class StringLookupLayerTest(test_combinations.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
@parameterized.named_parameters(*_get_end_to_end_test_cases())
def test_layer_end_to_end_with_adapt(self, vocab_data, input_data, kwargs,
use_dataset, expected_output,
input_dtype):
cls = string_lookup.StringLookup
expected_output_dtype = tf.int64
input_shape = input_data.shape
if use_dataset:
# Keras APIs expect batched datasets.
# TODO(rachelim): `model.predict` predicts the result on each
# dataset batch separately, then tries to concatenate the results
# together. When the results have different shapes on the non-concat
# axis (which can happen in the output_mode = INT case for
# StringLookup), the concatenation fails. In real use cases, this may
# not be an issue because users are likely to pipe the preprocessing layer
# into other keras layers instead of predicting it directly. A workaround
# for these unit tests is to have the dataset only contain one batch, so
# no concatenation needs to happen with the result. For consistency with
# numpy input, we should make `predict` join differently shaped results
# together sensibly, with 0 padding.
input_data = tf.data.Dataset.from_tensor_slices(input_data).batch(
input_shape[0])
vocab_data = tf.data.Dataset.from_tensor_slices(vocab_data).batch(
input_shape[0])
output_data = test_utils.layer_test(
cls,
kwargs=kwargs,
input_shape=input_shape,
input_data=input_data,
input_dtype=input_dtype,
expected_output_dtype=expected_output_dtype,
validate_training=False,
adapt_data=vocab_data)
self.assertAllClose(expected_output, output_data)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class StringLookupVocabularyTest(test_combinations.TestCase,
preprocessing_test_utils.PreprocessingLayerTest
):
def _write_to_temp_file(self, file_name, vocab_list):
vocab_path = os.path.join(self.get_temp_dir(), file_name + ".txt")
with tf.io.gfile.GFile(vocab_path, "w") as writer:
for vocab in vocab_list:
writer.write(vocab + "\n")
writer.flush()
writer.close()
return vocab_path
def test_int_output_explicit_vocab(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup(vocabulary=vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_int_output_explicit_vocab_with_special_tokens(self):
vocab_data = ["", "[UNK]", "earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup(vocabulary=vocab_data, mask_token="")
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_int_output_no_oov(self):
vocab_data = ["earth", "wind", "and", "fire"]
valid_input = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", ""]])
invalid_input = np.array([["earth", "wind", "and", "michigan"],
["fire", "and", "earth", "michigan"]])
expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup(
vocabulary=vocab_data, mask_token="", num_oov_indices=0)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(valid_input)
self.assertAllEqual(expected_output, output_data)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
"found OOV values.*michigan"):
_ = model.predict(invalid_input)
def test_no_vocab(self):
with self.assertRaisesRegex(RuntimeError,
"you must set the layer's vocabulary"):
layer = string_lookup.StringLookup(output_mode="binary")
layer([["a"]])
def test_one_hot_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(["earth", "wind", "and", "fire", "michigan"])
expected_output = [
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[1, 0, 0, 0, 0],
]
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = string_lookup.StringLookup(
vocabulary=vocab_data, output_mode="one_hot")
res = layer(input_data)
model = keras.Model(inputs=input_data, outputs=res)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_multi_hot_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[0, 1, 1, 1, 1], [1, 1, 0, 1, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup(
vocabulary=vocab_data, output_mode="multi_hot")
res = layer(input_data)
model = keras.Model(inputs=input_data, outputs=res)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_count_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "earth", "fire", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[0, 2, 0, 0, 2], [1, 1, 0, 1, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup(
vocabulary=vocab_data, output_mode="count")
res = layer(input_data)
model = keras.Model(inputs=input_data, outputs=res)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_sparse_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup(
vocabulary=vocab_data, output_mode="multi_hot", sparse=True)
res = layer(input_data)
self.assertTrue(res.__class__.__name__, "SparseKerasTensor")
def test_get_vocab_returns_str(self):
vocab_data = ["earth", "wind", "and", "fire"]
expected_vocab = ["[UNK]", "earth", "wind", "and", "fire"]
layer = string_lookup.StringLookup(vocabulary=vocab_data)
layer_vocab = layer.get_vocabulary()
self.assertAllEqual(expected_vocab, layer_vocab)
self.assertIsInstance(layer_vocab[0], str)
inverse_layer = string_lookup.StringLookup(
vocabulary=layer.get_vocabulary(), invert=True)
layer_vocab = inverse_layer.get_vocabulary()
self.assertAllEqual(expected_vocab, layer_vocab)
self.assertIsInstance(layer_vocab[0], str)
def test_int_output_explicit_vocab_from_file(self):
vocab_list = ["earth", "wind", "and", "fire"]
vocab_path = self._write_to_temp_file("vocab_file", vocab_list)
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup(vocabulary=vocab_path)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_int_output_explicit_vocab_from_file_via_setter(self):
vocab_list = ["earth", "wind", "and", "fire"]
vocab_path = self._write_to_temp_file("vocab_file", vocab_list)
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup()
layer.set_vocabulary(vocab_path)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_non_unique_vocab_fails(self):
vocab_data = ["earth", "wind", "and", "fire", "fire"]
with self.assertRaisesRegex(ValueError, ".*repeated term.*fire.*"):
_ = string_lookup.StringLookup(vocabulary=vocab_data)
def test_non_unique_vocab_from_file_fails(self):
vocab_list = ["earth", "wind", "and", "fire", "earth"]
vocab_path = self._write_to_temp_file("repeat_vocab_file", vocab_list)
with self.assertRaisesRegex(
tf.errors.FailedPreconditionError,
"HashTable has different value for same key.*earth"):
_ = string_lookup.StringLookup(vocabulary=vocab_path)
def test_inverse_layer(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([[2, 3, 4, 5], [5, 4, 2, 0]])
expected_output = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", ""]])
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = string_lookup.StringLookup(
vocabulary=vocab_data, invert=True, mask_token="")
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_inverse_layer_from_file(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([[1, 2, 3, 4], [4, 3, 1, 0]])
expected_output = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "[UNK]"]])
vocab_path = self._write_to_temp_file("vocab_file", vocab_data)
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = string_lookup.StringLookup(vocabulary=vocab_path, invert=True)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_inverse_layer_from_file_with_mask(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([[2, 3, 4, 5], [5, 4, 2, 0]])
expected_output = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "[M]"]])
vocab_path = self._write_to_temp_file("vocab_file", vocab_data)
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = string_lookup.StringLookup(
vocabulary=vocab_path, invert=True, mask_token="[M]")
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_forward_backward_explicit_vocab(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "[UNK]"]])
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup(vocabulary=vocab_data)
invert_layer = string_lookup.StringLookup(
vocabulary=vocab_data, invert=True)
int_data = layer(input_data)
out_data = invert_layer(int_data)
model = keras.Model(inputs=input_data, outputs=out_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_forward_backward_adapted_vocab(self):
adapt_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "[UNK]"]])
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup()
layer.adapt(adapt_data)
invert_layer = string_lookup.StringLookup(
vocabulary=layer.get_vocabulary(), invert=True)
int_data = layer(input_data)
out_data = invert_layer(int_data)
model = keras.Model(inputs=input_data, outputs=out_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_ragged_string_input_multi_bucket(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = tf.ragged.constant([["earth", "wind", "fire"],
["fire", "and", "earth", "ohio"]])
expected_output = [[2, 3, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.string, ragged=True)
layer = string_lookup.StringLookup(num_oov_indices=2)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_tensor_vocab(self):
vocab_data = ["[UNK]", "wind", "and", "fire"]
vocab_tensor = tf.constant(vocab_data)
layer = string_lookup.StringLookup(vocabulary=vocab_tensor)
returned_vocab = layer.get_vocabulary()
self.assertAllEqual(vocab_data, returned_vocab)
self.assertAllEqual(layer.vocabulary_size(), 4)
fn = tf.function(lambda: layer.set_vocabulary(vocab_tensor))
with self.assertRaisesRegex(RuntimeError, "Cannot set a tensor vocabulary"):
fn()
if __name__ == "__main__":
tf.test.main()
|
|
#!/usr/bin/env python
# tile-generator
#
# Copyright (c) 2015-Present Pivotal Software, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fcntl
import glob
import json
import os
import signal
import struct
import subprocess
import sys
import tempfile
import termios
import time
import yaml
from pexpect import pxssh
from requests_toolbelt import MultipartEncoderMonitor
try:
# Python 3
from urllib.parse import urlparse
except ImportError:
# Python 2
from urllib.parse import urlparse
import requests
requests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning)
def find_credentials(target):
if not target.endswith('.yml'):
target += '.yml'
if '/' in target:
return target
dir = get_credential_dir(update=True)
return os.path.join(dir, target)
def find_credential_dir():
dirname = 'pie-credentials'
parent = '.'
while not os.path.samefile(parent, '/'):
candidate = os.path.join(parent, dirname)
if os.path.exists(candidate):
return candidate
pattern = os.path.join(parent, '*', dirname)
matches = glob.glob(pattern)
if len(matches) > 0:
return matches[0]
parent = os.path.join('..', parent)
raise Exception('Did not find a target repository named ' + dirname)
def get_credential_dir(update=False):
dir = find_credential_dir()
if update:
devnull = open(os.devnull,"w")
subprocess.call(['git', 'pull'], cwd=dir, stdout=devnull, stderr=devnull)
return dir
def is_poolsmiths_env(creds):
return 'ops_manager' in creds
def get_credentials(target=None, non_interactive=False):
if get_credentials.credentials is not None:
return get_credentials.credentials
ssh_key = None
if target is not None:
credential_dir = get_credential_dir(update=(not non_interactive))
credential_file = os.path.join(credential_dir, target + '.yml')
private_key_file = os.path.join(credential_dir, target + '.opsman_rsa')
if os.path.isfile(private_key_file):
with open(private_key_file, 'rb') as keyfile:
ssh_key = keyfile.read()
else:
# This default handles the case where we are executing from within a
# concourse pool-resource repository, where the claimed PCF instance
# metadata is available in a file named './metadata'
credential_file = 'metadata'
try:
with open(credential_file) as cred_file:
creds = yaml.safe_load(cred_file)
if is_poolsmiths_env(creds):
creds['opsmgr'] = creds['ops_manager']
creds['opsmgr']['ssh_key'] = creds['ops_manager_private_key']
creds['opsmgr']
creds['opsmgr']['url']
creds['opsmgr']['username']
creds['opsmgr']['password']
creds['opsmgr']['ssh_key'] = creds['opsmgr'].get('ssh_key', ssh_key)
get_credentials.credentials = creds
except KeyError as e:
raise Exception('Credential file is missing a value:' + e.message)
except IOError as e:
if target is not None:
raise Exception('No target named {} found in {}'.format(target, credential_dir))
else:
raise Exception(
'You must either specify a target using the --target option,\n'
'or execute this command from within a directory that has credentials\n'
'in a file named "metadata" (like a claimed Concourse pool resource)\n'
)
return creds
get_credentials.credentials = None
def set_credentials(credentials):
get_credentials.credentials = credentials
class auth(requests.auth.AuthBase):
def __init__(self, creds):
self.creds = creds
def __call__(self, request):
url = self.creds.get('opsmgr').get('url') + '/uaa/oauth/token'
username = self.creds.get('opsmgr').get('username')
password = self.creds.get('opsmgr').get('password')
headers = { 'Accept': 'application/json' }
data = {
'grant_type': 'password',
'client_id': 'opsman',
'client_secret': '',
'username': username,
'password': password,
'response_type': 'token',
}
response = requests.post(url, data=data, verify=False, headers=headers)
if response.status_code != requests.codes.ok:
return requests.auth.HTTPBasicAuth(username, password)(request)
response = response.json()
access_token = response.get('access_token')
token_type = response.get('token_type')
request.headers['Authorization'] = token_type + ' ' + access_token
return request
def get(url, stream=False, check=True):
creds = get_credentials()
url = creds.get('opsmgr').get('url') + url
headers = { 'Accept': 'application/json' }
response = requests.get(url, auth=auth(creds), verify=False, headers=headers, stream=stream)
check_response(response, check=check)
return response
def put(url, payload, check=True):
creds = get_credentials()
url = creds.get('opsmgr').get('url') + url
response = requests.put(url, auth=auth(creds), verify=False, data=payload)
check_response(response, check=check)
return response
def put_json(url, payload):
creds = get_credentials()
url = creds.get('opsmgr').get('url') + url
response = requests.put(url, auth=auth(creds), verify=False, json=payload)
check_response(response)
return response
def post(url, payload, files=None, check=True):
creds = get_credentials()
url = creds.get('opsmgr').get('url') + url
response = requests.post(url, auth=auth(creds), verify=False, data=payload, files=files)
check_response(response, check)
return response
def post_yaml(url, filename, payload):
creds = get_credentials()
url = creds.get('opsmgr').get('url') + url
files = { filename: yaml.safe_dump(payload) }
response = requests.post(url, auth=auth(creds), verify=False, files=files)
check_response(response)
return response
class ProgressBar:
def __init__(self):
self.last_update = 0
self.update_every = 2 * 1024 * 1024
sys.stdout.write('0%')
sys.stdout.flush()
def update(self, monitor):
self.update_every = min(self.update_every, monitor.len / 30)
if monitor.bytes_read - self.last_update >= self.update_every:
sys.stdout.write('.')
old_percent = float(self.last_update) / monitor.len
new_percent = float(monitor.bytes_read) / monitor.len
for step in range(90, 0, -10):
step /= 100.0
if new_percent >= step > old_percent:
sys.stdout.write('{:.0%}'.format(step))
break
sys.stdout.flush()
self.last_update = monitor.bytes_read
def upload(url, filename, check=True):
creds = get_credentials()
url = creds.get('opsmgr').get('url') + url
multipart = MultipartEncoderMonitor.from_fields(
fields={
'product[file]': ('product[file]', open(filename, 'rb'), 'application/octet-stream')
},
callback=ProgressBar().update
)
response = requests.post(url,
auth=auth(creds),
verify=False,
data=multipart,
headers={ 'Content-Type': multipart.content_type }
)
sys.stdout.write('.100%\n')
sys.stdout.flush()
if response.status_code == 422:
errors = response.json()["errors"]
try:
product = errors.get('product', [])
for reason in product:
if reason.startswith('Metadata already exists for'):
print('-','version already uploaded')
return response
except:
pass
check_response(response, check)
return response
def delete(url, check=True):
creds = get_credentials()
url = creds.get('opsmgr').get('url') + url
response = requests.delete(url, auth=auth(creds), verify=False)
check_response(response, check=check)
return response
def check_response(response, check=True):
if check and response.status_code != requests.codes.ok:
message = '- {} {}\n'.format(response.status_code, response.request.url)
try:
errors = response.json()["errors"]
for line in json.dumps(errors, indent=4).splitlines():
message += '- ' + line + '\n'
except:
message += response.text
raise Exception(message)
def ssh(command=None, login_to_bosh=True, quiet=False):
def print_if(message):
if not quiet: print(message)
# Note that the prompt matching uses regex
bosh2_username_prompt = 'Email \(\): '
bosh2_password_prompt = 'Password \(\): '
sudo_prompt = '\[sudo\] password for .*:'
sudo_fail_prompt = 'Sorry, try again.'
prompt_wait_timeout = 3
creds = get_credentials()
url = creds.get('opsmgr').get('url')
host = urlparse(url).hostname
ssh_key = creds.get('opsmgr').get('ssh_key', None)
print_if('Attempting to connect to %s...' % host)
global session # Needs to be a global to be used in sigwinch_passthrough.
session = pxssh.pxssh(options={
"StrictHostKeyChecking": "no",
"UserKnownHostsFile": "/dev/null"})
if ssh_key is not None:
print_if('Logging in with a key file...')
with tempfile.NamedTemporaryFile('wb') as keyfile:
keyfile.write(ssh_key)
keyfile.flush()
session.login(host, username='ubuntu', ssh_key=keyfile.name, quiet=True)
else:
print_if('Logging in with using a username and password...')
session.login(host, username='ubuntu',
password=creds.get('opsmgr').get('password'), quiet=True)
if login_to_bosh:
# Setup the env
print_if('Exporting needed bosh environment variables...')
director_creds = get('/api/v0/deployed/director/credentials/director_credentials').json()
director_manifest = get('/api/v0/deployed/director/manifest').json()
if 'jobs' in director_manifest: # PCF 2.2 and earlier
director_address = director_manifest['jobs'][0]['properties']['director']['address']
else: # PCF 2.3 and later
director_address = director_manifest['instance_groups'][0]['properties']['director']['address']
session.sendline('export BOSH_ENVIRONMENT="{}"'.format(director_address))
session.sendline('export BOSH_CA_CERT="/var/tempest/workspaces/default/root_ca_certificate"')
bosh2_username = director_creds['credential']['value']['identity']
print_if('Logging into bosh2 as %s...' % bosh2_username)
session.sendline('which bosh2 || alias bosh2=bosh') # In Ops Manager 2.0+, there is just bosh (which is v2).
session.sendline('bosh2 login')
session.expect(bosh2_username_prompt, timeout=prompt_wait_timeout)
session.send(bosh2_username)
session.sendcontrol('m') # For some reason bosh2 login requires to send enter manually
session.expect(bosh2_password_prompt, timeout=prompt_wait_timeout)
session.send(director_creds['credential']['value']['password'])
session.sendcontrol('m') # For some reason bosh2 login requires to send enter manually
if command:
session.sync_original_prompt()
print_if('Sending command: "%s"...' % command)
session.sendline(command)
# Try to be smart about sudo
if 'sudo' in command:
resp = session.expect([sudo_prompt, session.PROMPT], timeout=prompt_wait_timeout)
if resp == 0: # We got the sudo password prompt
print_if('A sudo password prompt was detected. Attempting to login...')
session.sendline(creds.get('opsmgr').get('password'))
resp = session.expect([sudo_fail_prompt, session.PROMPT], timeout=prompt_wait_timeout)
if resp == 0: # Password was wrong
raise Exception('UNAUTHORIZED: Password was incorrect.')
print(session.before.strip(command).strip())
else:
session.prompt(timeout=prompt_wait_timeout)
print(session.before.strip(command).strip())
else:
# Get us a native prompt
print_if('Sourcing .bashrc for a correct shell..')
session.sendline('source .bashrc')
# This is the recommended way to keep parent window resizes in sync with the child
# http://pexpect.sourceforge.net/pxssh.html
def sigwinch_passthrough (sig, data):
s = struct.pack("HHHH", 0, 0, 0, 0)
a = struct.unpack('hhhh', fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ , s))
session.setwinsize(a[0],a[1])
signal.signal(signal.SIGWINCH, sigwinch_passthrough)
os.kill(os.getpid(), signal.SIGWINCH) # Set initial window size.
# Hand the shell off and make it interactive
session.interact()
def get_products():
available_products = get('/api/products').json()
installed_products = get('/api/installation_settings').json()['products']
products = [{
'guid': p['guid'],
'name': p['identifier'],
'product_version': p['product_version'],
'installed': True,
} for p in installed_products ]
for p in available_products:
installed = [ i for i in products if p['name'] == i['name'] and p['product_version'] == i['product_version'] ]
if len(installed) == 0:
p['installed'] = False
products += [ p ]
return products
def get_version():
# 1.7 and 1.8 have version in the diagnostic report.
response = get('/api/v0/diagnostic_report', check=False)
if response.status_code == requests.codes.ok:
diag = response.json()
version = diag['versions']['release_version']
version = version.split('-')[0]
return [ int(x) for x in version.split('.') ]
raise Exception('Error: could not determine Ops Manager version.')
def get_job_guid(job_identifier, jobs_settings):
for job in jobs_settings:
if job.get('identifier', None) == job_identifier:
return job['guid']
print('Warning: Could not find job with identifier', job_identifier, file=sys.stderr)
return None
def configure(product, properties, strict=False, skip_validation=False, network=None):
settings = get('/api/installation_settings').json()
infrastructure = settings['infrastructure']
product_settings = [ p for p in settings['products'] if p['identifier'] == product ]
if len(product_settings) < 1:
raise Exception('Product {} does not appear to be installed'.format(product))
product_settings = product_settings[0]
properties = properties if properties is not None else {}
#
# Make sure Elastic Runtime tile is installed
#
cf = [ p for p in settings['products'] if p['identifier'] == 'cf' ]
if len(cf) < 1:
raise Exception('Required product Elastic Runtime is missing')
#
# Use the Elastic Runtime stemcell (unless the --strict option was used)
#
if not strict:
if 'stemcell' in cf[0]:
stemcell = cf[0]['stemcell']
elif 'stemcells' in cf[0]:
stemcell = cf[0]['stemcells'][0]
else:
raise Exception("Cannot find cf stemcell to use")
print('- Using stemcell', stemcell['name'], 'version', stemcell['version'])
product_settings['stemcell'] = stemcell
post_yaml('/api/installation_settings', 'installation[file]', settings)
#
# Use the first availability zone (skip this for Azure, which doesn't use them)
#
availability_zones = infrastructure.get('availability_zones', [])
if 'availability_zones' in infrastructure:
product_settings['availability_zone_references'] = [ az['guid'] for az in availability_zones ]
product_settings['singleton_availability_zone_reference'] = availability_zones[0]['guid']
#
# Insert supplied properties
#
jobs_properties = properties.pop('jobs', {})
missing_properties = []
for job in product_settings.get('jobs', []):
job_properties = jobs_properties.get(job['identifier'], {})
for job_property in job.get('properties', []):
property_name = job_property['identifier']
if property_name == 'app_credentials':
# app_credentials are generated in opsmgr; skip.
continue
if property_name in job_properties:
job_property['value'] = job_properties[property_name]
else:
if job_property.get('value', None) is None:
missing_properties.append('.'.join(('jobs', job['identifier'], property_name)))
for p in product_settings.get('properties', []):
key = p['identifier']
value = properties.get(key, None)
if value is not None:
p['value'] = value
else:
if p.get('value', None) is None:
missing_properties += [ key ]
if not skip_validation and len(missing_properties) > 0:
message = 'Input file is missing required properties:\n'
message += '- ' + '\n- '.join(missing_properties)
raise Exception(message)
#
# Normalize az properties
#
for az in availability_zones:
if az.get('name', None) is None:
az['name'] = az['iaas_identifier']
#
# Default networks if not provided (preserves prior behavior)
#
if network is None:
network = infrastructure['networks'][0]['name']
service_network = ([ n for n in infrastructure['networks'] if n.get('service_network', False) ] + [ None ])[0]
if service_network is not None:
service_network = service_network['name']
#
# Update using the appropriate API for the Ops Manager version
#
version = get_version()
if version[0] > 1 or (version[0] == 1 and version[1] >= 8):
url = '/api/v0/staged/products/' + product_settings['guid']
if 'availability_zones' in infrastructure:
networks_and_azs = {
'networks_and_azs': {
'singleton_availability_zone': { 'name': availability_zones[0]['name'] },
'other_availability_zones': [ { 'name': az['name'] } for az in availability_zones ],
'network': { 'name': network },
}
}
if service_network is not None:
networks_and_azs['networks_and_azs']['service_network'] = { 'name': service_network }
put_json(url + '/networks_and_azs', networks_and_azs)
scoped_properties = {}
resource_config = {}
for job, job_properties in jobs_properties.items():
if 'resource_config' in job_properties:
job_resource_config = job_properties.pop('resource_config')
job_guid = get_job_guid(job, product_settings.get('jobs', []))
if job_guid is None:
continue
resource_config[job_guid] = job_resource_config
for name, value in job_properties.items():
key = '.'.join(('', job, name))
scoped_properties[key] = value
for key in properties:
value = properties[key]
if not key.startswith('.'):
key = '.properties.' + key
scoped_properties[key] = { 'value': value }
properties = { 'properties': scoped_properties }
put_json(url + '/properties', properties)
for job_guid, job_resource_config in resource_config.items():
resource_config_url = url + '/jobs/' + job_guid + '/resource_config'
merged_job_resource_config = get(resource_config_url).json()
merged_job_resource_config.update(job_resource_config)
put_json(url + '/jobs/' + job_guid + '/resource_config', merged_job_resource_config)
else:
print("PCF version ({}) is unsupported, but we'll give it a try".format('.'.join(str(x) for x in version)))
try:
post_yaml('/api/installation_settings', 'installation[file]', settings)
except:
raise Exception('Configuration failed, probably due to incompatible PCF version.')
def get_changes(product = None, deploy_errands = None, delete_errands = None):
return build_changes(deploy_errands, delete_errands)
def build_changes(deploy_errands, delete_errands):
changes = get('/api/v0/staged/pending_changes').json()
for product_change in changes['product_changes']:
if product_change['action'] in ['install', 'update']:
product_change['errands'] = [
e for e in product_change['errands']
if deploy_errands is None or e['name'] in deploy_errands
]
for product_change in changes['product_changes']:
if product_change['action'] == 'delete':
product_change['errands'] = [
e for e in product_change['errands']
if delete_errands is None or e['name'] in delete_errands
]
return changes
def build_changes_1_7(product, deploy_errands, delete_errands):
if deploy_errands is None and delete_errands is None:
raise Exception(
'You must specify --deploy-errands or --delete-errands on PCF 1.7,\n'
'since we cannot reliably discover them on that version'
)
deployed = [p for p in get('/api/v0/deployed/products').json()]
staged = [p for p in get('/api/v0/staged/products').json()]
install = [p for p in staged if p["guid"] not in [g["guid"] for g in deployed]]
delete = [p for p in deployed if p["guid"] not in [g["guid"] for g in staged]]
update = [p for p in deployed if p["guid"] in [g["guid"] for g in staged if not g["guid"].startswith('cf-')]]
# update = []
if product is not None:
install = [p for p in install if p["guid"].startswith(product + '-')]
delete = [p for p in delete if p["guid"].startswith(product + '-')]
update = [p for p in update if p["guid"].startswith(product + '-')]
for p in install + update:
p['errands'] = []
if deploy_errands is None:
deploy_errands = []
for deploy_errand in deploy_errands:
p['errands'].append({'name': deploy_errand, 'post_deploy': True})
for p in delete:
p['errands'] = []
if delete_errands is None:
delete_errands = []
for delete_errand in delete_errands:
p['errands'].append({'name': delete_errand, 'pre_delete': True})
changes = {'product_changes': [{
'guid': p['guid'],
'errands': p.get('errands', []),
'action': 'install' if p in install else 'delete' if p in delete else 'update'
}
for p in install + delete + update
]}
return changes
def get_cfinfo():
settings = get('/api/installation_settings').json()
cf_settings = [ p for p in settings['products'] if p['identifier'] == 'cf' ]
if len(cf_settings) < 1:
raise Exception('Elastic Runtime is not installed')
cf_settings = cf_settings[0]
jobs = cf_settings['jobs']
cc_properties = [ j for j in jobs if j['identifier'] == 'cloud_controller' ][0]['properties']
system_domain = [ p for p in cc_properties if p['identifier'] == 'system_domain' ][0]['value']
apps_domain = [ p for p in cc_properties if p['identifier'] == 'apps_domain' ][0]['value']
uaa_properties = [ j for j in jobs if j['identifier'] == 'uaa' ][0]['properties']
admin_credentials = [ c for c in uaa_properties if c['identifier'] == 'admin_credentials' ][0]['value']
system_services_credentials = [ c for c in uaa_properties if c['identifier'] == 'system_services_credentials' ][0]['value']
return {
'system_domain': system_domain,
'apps_domain': apps_domain,
'schema_version': settings.get("installation_schema_version"),
'admin_username': admin_credentials['identity'],
'admin_password': admin_credentials.get('password', None),
'system_services_username': system_services_credentials['identity'],
'system_services_password': system_services_credentials.get('password', None)
}
def logs(install_id):
if install_id is None:
install_id = last_install()
if install_id == 0:
raise Exception('No installation has ever been performed')
lines_shown = 0
running = True
while running:
install_status = get('/api/installation/' + str(install_id)).json()['status']
running = install_status == 'running'
log_lines = get('/api/installation/' + str(install_id) + '/logs').json()['logs'].splitlines()
for line in log_lines[lines_shown:]:
if not line.startswith('{'):
print(' ', line.encode('utf-8'))
lines_shown = len(log_lines)
if running:
time.sleep(1)
if not install_status.startswith('succ'):
raise Exception('- install finished with status: {}'.format(install_status))
def install_exists(id):
response = get('/api/installation/' + str(id), check=False)
return response.status_code == requests.codes.ok
def last_install(lower=0, upper=1, check=install_exists):
try:
installations = get('/api/v0/installations', check=False).json()['installations']
installations = [ i['id'] for i in installations]
return sorted([ 0 ] + installations)[-1]
except:
pass
if lower == upper:
return lower
if check(upper):
return last_install(upper, upper * 2, check=check)
middle = (lower + upper + 1) // 2
if check(middle):
return last_install(middle, upper, check=check)
else:
return last_install(lower, middle - 1, check=check)
def get_history():
try:
installations = get('/api/v0/installations', check=False).json()['installations']
return installations
except:
pass
installations = []
install_id = 1
while True:
response = get('/api/installation/' + str(install_id), check=False)
if response.status_code != requests.codes.ok:
break
installations += [ response.json() ]
install_id += 1
return installations
def get_status():
id = last_install()
if id == 0:
return { 'status': 'idle' }
return get('/api/installation/' + str(id)).json()
def unlock():
creds = get_credentials()
passphrase = creds.get('opsmgr').get('password')
body = { 'passphrase': passphrase }
waiting = False
while True:
try:
response = put('/api/v0/unlock', body, check=False)
if response.status_code == requests.codes.ok:
if waiting:
print(' ok')
return
if response.status_code == 404:
if waiting:
print(' ok')
print("Unlock not required for this version")
return
if response.status_code != 503 and response.status_code != 502:
message = '- {} {}\n'.format(response.status_code, response.request.url)
try:
errors = response.json()["errors"]
message += '- '+('\n- '.join(json.dumps(errors, indent=4).splitlines()))
except:
message += response.text
raise Exception(message)
except requests.exceptions.ConnectionError:
pass
if waiting:
sys.stdout.write('.')
sys.stdout.flush()
else:
sys.stdout.write('Waiting for ops manager ')
sys.stdout.flush()
waiting = True
time.sleep(5)
continue
def get_stemcells():
response = get('/api/v0/diagnostic_report', check=False)
if response.status_code == requests.codes.ok:
diag = response.json()
stemcells = diag.get('stemcells') or diag.get('available_stemcells') or []
return stemcells
return []
|
|
"""Implements a layer of neurons."""
from parameter import *
import matplotlib.pyplot as plt
plt.ion()
class Layer(Parameter):
def __init__(self, proto, t_op=None, tied_to=None):
super(Layer, self).__init__()
self.tied_to = tied_to
if proto.tied:
tied_to.num_shares += 1
proto = util.LoadMissing(proto, tied_to.proto)
self.proto = proto
self.state = None
self.params = {}
self.hyperparams = proto.hyperparams
self.incoming_edge = []
self.outgoing_edge = []
self.outgoing_neighbour = []
self.incoming_neighbour = []
self.use_suff_stats = False
self.fast_dropout_partner = None
if t_op:
self.batchsize = t_op.batchsize
self.use_suff_stats = t_op.optimizer == deepnet_pb2.Operation.PCD \
or t_op.optimizer == deepnet_pb2.Operation.CD
else:
self.batchsize = 0
self.name = proto.name
self.dimensions = proto.dimensions
self.numlabels = proto.numlabels
self.activation = proto.hyperparams.activation
self.is_input = proto.is_input
self.is_output = proto.is_output
self.loss_function = proto.loss_function
self.loss_weight = proto.loss_weight
self.train_data_handler = None
self.validation_data_handler = None
self.test_data_handler = None
self.tied_to = None
self.data_tied_to = None
self.data = None
self.deriv = None
self.prefix = proto.prefix
self.marker = 0
self.fig = visualize.GetFigId()
self.tiny = 1e-10
self.replicated_neighbour = None
self.is_initialized = proto.is_initialized
self.t_op = t_op
self.learn_precision = False
self.sample_input = self.hyperparams.sample_input
self.LoadParams(proto, t_op=t_op, tied_to=tied_to)
if self.batchsize > 0:
self.AllocateMemory(self.batchsize)
def LoadParams(self, proto, **kwargs):
assert proto
for param in proto.param:
if not param.dimensions:
param.dimensions.extend([proto.numlabels * proto.dimensions, 1])
elif len(param.dimensions) == 1:
param.dimensions.append(1)
super(Layer, self).LoadParams(proto, **kwargs)
def LoadPretrained(self, param):
node_name = param.pretrained_model_node1
if node_name == '':
node_name = self.proto.name
mat = None
for pretrained_model in param.pretrained_model:
model_file = os.path.join(self.prefix, pretrained_model)
ext = os.path.splitext(pretrained_model)[1]
if ext == '.npz':
npzfile = np.load(model_file)
if param.name == 'bias':
this_mat = np.nan_to_num(npzfile['mean'] / npzfile['std'])
elif param.name == 'precision':
this_mat = np.nan_to_num(1. / npzfile['std'])
elif ext == '.npy':
this_mat = np.load(model_file)
else:
model = util.ReadModel(model_file)
# Find the relevant node in the model.
node = next(n for n in model.layer if n.name == node_name)
# Find the relevant parameter in the node.
pretrained_param = next(p for p in node.param if p.name == param.name)
assert pretrained_param.mat != '',\
'Pretrained param %s in layer %s of model %s is empty!!' % (
pretrained_param.name, node.name, pretrained_model)
this_mat = util.ParameterAsNumpy(pretrained_param)
if len(this_mat.shape) == 1:
this_mat = this_mat.reshape(-1, 1)
if mat is None:
mat = this_mat
else:
mat += this_mat
return mat / len(param.pretrained_model)
def SetData(self, data):
self.data = data
def AddIncomingEdge(self, edge):
if edge not in self.incoming_edge:
self.incoming_edge.append(edge)
if self == edge.node1:
neighbour = edge.node2
else:
neighbour = edge.node1
self.incoming_neighbour.append(neighbour)
if neighbour.proto.replicate_bias and neighbour.activation == deepnet_pb2.Hyperparams.REPLICATED_SOFTMAX:
self.replicated_neighbour = neighbour
def AddOutgoingEdge(self, edge):
if edge not in self.outgoing_edge:
self.outgoing_edge.append(edge)
if self == edge.node1:
self.outgoing_neighbour.append(edge.node2)
else:
self.outgoing_neighbour.append(edge.node1)
def PrintNeighbours(self):
for n in self.incoming_neighbour:
print "Incoming edge from %s" % n.name
for n in self.outgoing_neighbour:
print "Outgoing edge to %s" % n.name
def ResetState(self, rand=False):
if rand:
self.state.fill_with_randn()
self.ApplyActivation()
else:
self.state.assign(0)
def GetData(self):
self.state.assign(self.data)
def GetSparsityGradient(self):
h = self.hyperparams
damping = h.sparsity_damping
target = h.sparsity_target
cost = h.sparsity_cost
# Update \hat{\rho}.
self.means.mult(damping)
self.means.add_sums(self.state, axis=1, mult=(1-damping)/self.batchsize)
# Compute gradient.
self.means.subtract(target, target=self.sparsity_gradient)
div = self.GetSparsityDivisor()
self.sparsity_gradient.divide(div)
self.sparsity_gradient.mult(cost)
# Return gradient.
return self.sparsity_gradient
def AllocateMemory(self, batchsize):
self.AllocateBatchsizeDependentMemory(batchsize)
dimensions = self.dimensions
numlabels = self.numlabels
numdims = dimensions * numlabels
self.dimsize = cm.CUDAMatrix(np.zeros((numdims, 1)))
if self.hyperparams.sparsity:
tgt = self.hyperparams.sparsity_target
self.means = cm.CUDAMatrix(tgt + np.zeros((numdims, 1)))
self.sparsity_gradient = cm.CUDAMatrix(np.zeros((numdims, 1)))
self.means_temp2 = cm.CUDAMatrix(np.zeros((numdims, 1)))
self.gradient = cm.CUDAMatrix(np.zeros((numdims, 1)))
self.gradient_history = cm.CUDAMatrix(np.zeros((numdims, 1)))
def AllocateBatchsizeDependentMemory(self, batchsize):
if self.data:
self.data.free_device_memory()
if self.deriv:
self.deriv.free_device_memory()
self.batchsize = batchsize
dimensions = self.dimensions
numlabels = self.numlabels
numdims = dimensions * numlabels
self.statesize = cm.CUDAMatrix(np.zeros((numdims, batchsize)))
self.batchsize_temp = cm.CUDAMatrix(np.zeros((1, batchsize)))
self.state = cm.CUDAMatrix(np.zeros((numdims, batchsize)))
self.deriv = cm.CUDAMatrix(np.zeros((numdims, batchsize)))
if self.t_op:
if self.t_op.optimizer == deepnet_pb2.Operation.PCD:
self.pos_state = self.state
self.pos_sample = cm.CUDAMatrix(np.zeros((numdims, batchsize)))
self.neg_state = cm.CUDAMatrix(np.zeros((numdims, batchsize)))
self.neg_sample = cm.CUDAMatrix(np.zeros((numdims, batchsize)))
self.sample = self.pos_sample
self.suff_stats = cm.empty((numdims, 1))
elif self.t_op.optimizer == deepnet_pb2.Operation.CD:
self.sample = cm.CUDAMatrix(np.zeros((numdims, batchsize)))
self.suff_stats = cm.empty((numdims, 1))
else:
self.state = cm.CUDAMatrix(np.zeros((numdims, batchsize)))
if self.is_input or self.is_initialized or self.is_output:
self.data = cm.CUDAMatrix(np.zeros((dimensions, batchsize)))
if self.hyperparams.dropout:
self.mask = cm.CUDAMatrix(np.zeros(self.state.shape))
def CollectSufficientStatistics(self, neg=False):
"""Collect sufficient statistics for this layer."""
h = self.hyperparams
if not neg:
self.state.sum(axis=1, target=self.suff_stats)
if h.sparsity:
sparsity_gradient = self.GetSparsityGradient()
self.suff_stats.add_mult(sparsity_gradient, -self.batchsize)
else:
self.suff_stats.add_sums(self.state, axis=1, mult=-1.0)
if not neg and h.sparsity:
return self.means.sum()/self.means.shape[0]
def Show(self, train=False):
"""Displays useful statistics about the model."""
if not self.proto.hyperparams.enable_display:
return
f = 1
if self.hyperparams.dropout and not train:
f = 1 / (1 - self.hyperparams.dropout_prob)
if self.is_input:
visualize.display_hidden(self.state.asarray(), self.fig, title=self.name)
#visualize.display_w(self.neg_sample.asarray(), 28, 10, self.state.shape[1]/10, self.fig, title=self.name, vmax=1, vmin=0)
#visualize.show_hist(self.params['bias'].asarray(), self.fig)
else:
visualize.display_hidden(f*self.state.asarray(), self.fig, title=self.name)
#visualize.show_hist(self.params['bias'].asarray(), self.fig)
"""
plt.figure(self.fig)
plt.clf()
plt.subplot(1, 3, 1)
plt.title('pos_probabilities')
plt.imshow(self.pos_state.asarray(), cmap = plt.cm.gray, interpolation = 'nearest', vmax=1, vmin=0)
plt.subplot(1, 3, 2)
plt.title('neg_probabilities')
plt.imshow(self.neg_state.asarray(), cmap = plt.cm.gray, interpolation = 'nearest', vmax=1, vmin=0)
plt.subplot(1, 3, 3)
plt.title('neg_samples')
plt.imshow(self.neg_sample.asarray(), cmap = plt.cm.gray, interpolation = 'nearest', vmax=1, vmin=0)
plt.suptitle(self.name)
plt.draw()
"""
#visualize.display_w(self.neg_sample.asarray(), 1, 1, self.state.shape[1], self.fig, title=self.name)
def display_w(w, s, r, c, fig, vmax=None, vmin=None, dataset='mnist', title='weights'):
def ComputeDeriv(self):
pass
def GetLoss(self, get_deriv=False):
pass
def Sample(self):
pass
def ApplyActivation(self):
pass
def GetSparsityDivisor(self):
self.means_temp2.assign(1)
return self.means_temp2
|
|
import random
from django.contrib.auth.models import AnonymousUser, Group, User
from django.db import connection
from django.test import RequestFactory
from django.test.utils import override_settings
import mock
import waffle
from test_app import views
from waffle.middleware import WaffleMiddleware
from waffle.models import Flag, Sample, Switch
from waffle.tests.base import TestCase
def get(**kw):
request = RequestFactory().get('/foo', data=kw)
request.user = AnonymousUser()
return request
def process_request(request, view):
response = view(request)
return WaffleMiddleware().process_response(request, response)
class WaffleTests(TestCase):
def test_persist_active_flag(self):
Flag.objects.create(name='myflag', percent='0.1')
request = get()
# Flag stays on.
request.COOKIES['dwf_myflag'] = 'True'
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' in response.cookies
self.assertEqual('True', response.cookies['dwf_myflag'].value)
def test_persist_inactive_flag(self):
Flag.objects.create(name='myflag', percent='99.9')
request = get()
# Flag stays off.
request.COOKIES['dwf_myflag'] = 'False'
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' in response.cookies
self.assertEqual('False', response.cookies['dwf_myflag'].value)
def test_no_set_unused_flag(self):
"""An unused flag shouldn't have its cookie reset."""
request = get()
request.COOKIES['dwf_unused'] = 'True'
response = process_request(request, views.flag_in_view)
assert 'dwf_unused' not in response.cookies
def test_superuser(self):
"""Test the superuser switch."""
Flag.objects.create(name='myflag', superusers=True)
request = get()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
superuser = User(username='foo', is_superuser=True)
request.user = superuser
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
non_superuser = User(username='bar', is_superuser=False)
non_superuser.save()
request.user = non_superuser
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
def test_staff(self):
"""Test the staff switch."""
Flag.objects.create(name='myflag', staff=True)
request = get()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
staff = User(username='foo', is_staff=True)
request.user = staff
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
non_staff = User(username='foo', is_staff=False)
non_staff.save()
request.user = non_staff
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
def test_languages(self):
Flag.objects.create(name='myflag', languages='en,fr')
request = get()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
request.LANGUAGE_CODE = 'en'
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
request.LANGUAGE_CODE = 'de'
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
def test_user(self):
"""Test the per-user switch."""
user = User.objects.create(username='foo')
flag = Flag.objects.create(name='myflag')
flag.users.add(user)
request = get()
request.user = user
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
request.user = User.objects.create(username='someone_else')
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
def test_user_cache(self):
user = User.objects.create(username='foo')
flag = Flag.objects.create(name='myflag')
flag.users.add(user)
request = get()
request.user = user
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
with self.assertNumQueries(1):
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
def test_group(self):
"""Test the per-group switch."""
group = Group.objects.create(name='foo')
user = User.objects.create(username='bar')
user.groups.add(group)
flag = Flag.objects.create(name='myflag')
flag.groups.add(group)
request = get()
request.user = user
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
request.user = User(username='someone_else')
request.user.save()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
def test_group_cache(self):
group = Group.objects.create(name='foo')
user = User.objects.create(username='bar')
user.groups.add(group)
flag = Flag.objects.create(name='myflag')
flag.groups.add(group)
request = get()
request.user = user
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
with self.assertNumQueries(2):
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
def test_authenticated(self):
"""Test the authenticated/anonymous switch."""
Flag.objects.create(name='myflag', authenticated=True)
request = get()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
request.user = User(username='foo')
assert request.user.is_authenticated()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
def test_everyone_on(self):
"""Test the 'everyone' switch on."""
Flag.objects.create(name='myflag', everyone=True)
request = get()
request.COOKIES['dwf_myflag'] = 'False'
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
request.user = User(username='foo')
assert request.user.is_authenticated()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
def test_everyone_off(self):
"""Test the 'everyone' switch off."""
Flag.objects.create(name='myflag', everyone=False,
authenticated=True)
request = get()
request.COOKIES['dwf_myflag'] = 'True'
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
request.user = User(username='foo')
assert request.user.is_authenticated()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
def test_percent(self):
"""If you have no cookie, you get a cookie!"""
Flag.objects.create(name='myflag', percent='50.0')
request = get()
response = process_request(request, views.flag_in_view)
assert 'dwf_myflag' in response.cookies
@mock.patch.object(random, 'uniform')
def test_reroll(self, uniform):
"""Even without a cookie, calling flag_is_active twice should return
the same value."""
Flag.objects.create(name='myflag', percent='50.0')
# Make sure we're not really random.
request = get() # Create a clean request.
assert not hasattr(request, 'waffles')
uniform.return_value = '10' # < 50. Flag is True.
assert waffle.flag_is_active(request, 'myflag')
assert hasattr(request, 'waffles') # We should record this flag.
assert 'myflag' in request.waffles
assert request.waffles['myflag'][0]
uniform.return_value = '70' # > 50. Normally, Flag would be False.
assert waffle.flag_is_active(request, 'myflag')
assert request.waffles['myflag'][0]
def test_undefined(self):
"""Undefined flags are always false."""
request = get()
assert not waffle.flag_is_active(request, 'foo')
@override_settings(WAFFLE_FLAG_DEFAULT=True)
def test_undefined_default(self):
"""WAFFLE_FLAG_DEFAULT controls undefined flags."""
request = get()
assert waffle.flag_is_active(request, 'foo')
@override_settings(WAFFLE_OVERRIDE=True)
def test_override(self):
request = get(foo='1')
Flag.objects.create(name='foo') # Off for everyone.
assert waffle.flag_is_active(request, 'foo')
def test_testing_flag(self):
Flag.objects.create(name='foo', testing=True)
request = get(dwft_foo='1')
assert waffle.flag_is_active(request, 'foo')
assert 'foo' in request.waffle_tests
assert request.waffle_tests['foo']
# GET param should override cookie
request = get(dwft_foo='0')
request.COOKIES['dwft_foo'] = 'True'
assert not waffle.flag_is_active(request, 'foo')
assert 'foo' in request.waffle_tests
assert not request.waffle_tests['foo']
def test_testing_disabled_flag(self):
Flag.objects.create(name='foo')
request = get(dwft_foo='1')
assert not waffle.flag_is_active(request, 'foo')
assert not hasattr(request, 'waffle_tests')
request = get(dwft_foo='0')
assert not waffle.flag_is_active(request, 'foo')
assert not hasattr(request, 'waffle_tests')
def test_set_then_unset_testing_flag(self):
Flag.objects.create(name='myflag', testing=True)
response = self.client.get('/flag_in_view?dwft_myflag=1')
self.assertEqual(b'on', response.content)
response = self.client.get('/flag_in_view')
self.assertEqual(b'on', response.content)
response = self.client.get('/flag_in_view?dwft_myflag=0')
self.assertEqual(b'off', response.content)
response = self.client.get('/flag_in_view')
self.assertEqual(b'off', response.content)
response = self.client.get('/flag_in_view?dwft_myflag=1')
self.assertEqual(b'on', response.content)
class SwitchTests(TestCase):
def test_switch_active(self):
switch = Switch.objects.create(name='myswitch', active=True)
assert waffle.switch_is_active(switch.name)
def test_switch_inactive(self):
switch = Switch.objects.create(name='myswitch', active=False)
assert not waffle.switch_is_active(switch.name)
def test_switch_active_from_cache(self):
"""Do not make two queries for an existing active switch."""
switch = Switch.objects.create(name='myswitch', active=True)
# Get the value once so that it will be put into the cache
assert waffle.switch_is_active(switch.name)
queries = len(connection.queries)
assert waffle.switch_is_active(switch.name)
self.assertEqual(queries, len(connection.queries), 'We should only make one query.')
def test_switch_inactive_from_cache(self):
"""Do not make two queries for an existing inactive switch."""
switch = Switch.objects.create(name='myswitch', active=False)
# Get the value once so that it will be put into the cache
assert not waffle.switch_is_active(switch.name)
queries = len(connection.queries)
assert not waffle.switch_is_active(switch.name)
self.assertEqual(queries, len(connection.queries), 'We should only make one query.')
def test_undefined(self):
assert not waffle.switch_is_active('foo')
@override_settings(WAFFLE_SWITCH_DEFAULT=True)
def test_undefined_default(self):
assert waffle.switch_is_active('foo')
@override_settings(DEBUG=True)
def test_no_query(self):
"""Do not make two queries for a non-existent switch."""
assert not Switch.objects.filter(name='foo').exists()
queries = len(connection.queries)
assert not waffle.switch_is_active('foo')
assert len(connection.queries) == queries+1, 'We should make one query.'
class SampleTests(TestCase):
def test_sample_100(self):
sample = Sample.objects.create(name='sample', percent='100.0')
assert waffle.sample_is_active(sample.name)
def test_sample_0(self):
sample = Sample.objects.create(name='sample', percent='0.0')
assert not waffle.sample_is_active(sample.name)
def test_undefined(self):
assert not waffle.sample_is_active('foo')
@override_settings(WAFFLE_SAMPLE_DEFAULT=True)
def test_undefined_default(self):
assert waffle.sample_is_active('foo')
|
|
import os
import logging
import logging.handlers
import environment
import logconfig
# If using a separate Python package (e.g. a submodule in vendor/) to share
# logic between applications, you can also share settings. Just create another
# settings file in your package and import it like so:
#
# from comrade.core.settings import *
#
# The top half of this settings.py file is copied from comrade for clarity. We
# use the import method in actual deployments.
# Make filepaths relative to settings.
path = lambda root,*a: os.path.join(root, *a)
ROOT = os.path.dirname(os.path.abspath(__file__))
# List of admin e-mails - we use Hoptoad to collect error notifications, so this
# is usually blank.
ADMINS = ()
MANAGERS = ADMINS
# Deployment Configuration
class DeploymentType:
PRODUCTION = "PRODUCTION"
DEV = "DEV"
SOLO = "SOLO"
STAGING = "STAGING"
dict = {
SOLO: 1,
PRODUCTION: 2,
DEV: 3,
STAGING: 4
}
if 'DEPLOYMENT_TYPE' in os.environ:
DEPLOYMENT = os.environ['DEPLOYMENT_TYPE'].upper()
else:
DEPLOYMENT = DeploymentType.SOLO
SITE_ID = DeploymentType.dict[DEPLOYMENT]
DEBUG = DEPLOYMENT != DeploymentType.PRODUCTION
STATIC_MEDIA_SERVER = DEPLOYMENT == DeploymentType.SOLO
TEMPLATE_DEBUG = DEBUG
SSL_ENABLED = DEBUG
INTERNAL_IPS = ('127.0.0.1',)
# Logging
if DEBUG:
LOG_LEVEL = logging.DEBUG
else:
LOG_LEVEL = logging.INFO
# Only log to syslog if this is not a solo developer server.
USE_SYSLOG = DEPLOYMENT != DeploymentType.SOLO
# Cache Backend
CACHE_TIMEOUT = 3600
MAX_CACHE_ENTRIES = 10000
CACHE_MIDDLEWARE_SECONDS = 3600
CACHE_MIDDLEWARE_KEY_PREFIX = ''
# Don't require developers to install memcached, and also make debugging easier
# because cache is automatically wiped when the server reloads.
if DEPLOYMENT == DeploymentType.SOLO:
CACHE_BACKEND = ('locmem://?timeout=%(CACHE_TIMEOUT)d'
'&max_entries=%(MAX_CACHE_ENTRIES)d' % locals())
else:
CACHE_BACKEND = ('memcached://127.0.0.1:11211/?timeout=%(CACHE_TIMEOUT)d'
'&max_entries=%(MAX_CACHE_ENTRIES)d' % locals())
# E-mail Server
if DEPLOYMENT != DeploymentType.SOLO:
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'YOU@YOUR-SITE.com'
EMAIL_HOST_PASSWORD = 'PASSWORD'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
else:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEFAULT_FROM_EMAIL = "Bueda Support <support@bueda.com>"
SERVER_EMAIL = "Bueda Operations <ops@bueda.com>"
CONTACT_EMAIL = 'support@bueda.com'
# Internationalization
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-us'
USE_I18N = False
# Testing
# Use nosetests instead of unittest
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
# Paths
MEDIA_ROOT = path(ROOT, 'media')
MEDIA_URL = '/media/'
ADMIN_MEDIA_PREFIX = '/media/admin'
ROOT_URLCONF = 'urls'
# Version Information
# Grab the current commit SHA from git - handy for confirming the version
# deployed on a remote server is the one you think it is.
#import subprocess
#GIT_COMMIT = subprocess.Popen(['git', 'rev-parse', '--short', 'HEAD'],
# stdout=subprocess.PIPE).communicate()[0].strip()
#del subprocess
# Database
DATABASES = {}
if DEPLOYMENT == DeploymentType.PRODUCTION:
DATABASES['default'] = {
'NAME': 'boilerplate',
'ENGINE': 'django.db.backends.mysql',
'HOST': 'your-database.com',
'PORT': '',
'USER': 'boilerplate',
'PASSWORD': 'your-password'
}
elif DEPLOYMENT == DeploymentType.DEV:
DATABASES['default'] = {
'NAME': 'boilerplate_dev',
'ENGINE': 'django.db.backends.mysql',
'HOST': 'your-database.com',
'PORT': '',
'USER': 'boilerplate',
'PASSWORD': 'your-password'
}
elif DEPLOYMENT == DeploymentType.STAGING:
DATABASES['default'] = {
'NAME': 'boilerplate_staging',
'ENGINE': 'django.db.backends.mysql',
'HOST': 'your-database.com',
'PORT': '',
'USER': 'boilerplate',
'PASSWORD': 'your-password'
}
else:
DATABASES['default'] = {
'NAME': 'db',
'ENGINE': 'django.db.backends.sqlite3',
'HOST': '',
'PORT': '',
'USER': '',
'PASSWORD': ''
}
# Message Broker (for Celery)
BROKER_HOST = "localhost"
BROKER_PORT = 5672
BROKER_USER = "boilerplate"
BROKER_PASSWORD = "boilerplate"
BROKER_VHOST = "boilerplate"
CELERY_RESULT_BACKEND = "amqp"
# Run tasks eagerly in development, so developers don't have to keep a celeryd
# processing running.
CELERY_ALWAYS_EAGER = DEPLOYMENT == DeploymentType.SOLO
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
# South
# Speed up testing when you have lots of migrations.
SOUTH_TESTS_MIGRATE = False
SKIP_SOUTH_TESTS = True
# Logging
SYSLOG_FACILITY = logging.handlers.SysLogHandler.LOG_LOCAL0
SYSLOG_TAG = "boilerplate"
# See PEP 391 and logconfig.py for formatting help. Each section of LOGGING
# will get merged into the corresponding section of log_settings.py.
# Handlers and log levels are set up automatically based on LOG_LEVEL and DEBUG
# unless you set them here. Messages will not propagate through a logger
# unless propagate: True is set.
LOGGERS = {
'loggers': {
'boilerplate': {},
},
}
logconfig.initialize_logging(SYSLOG_TAG, SYSLOG_FACILITY, LOGGERS, LOG_LEVEL,
USE_SYSLOG)
# Debug Toolbar
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False
}
# Application Settings
CBIR_PATH = path('../cbir')
GALLERY_PATH = os.path.join(MEDIA_ROOT, 'images', 'gallery')
GALLERY_URL = MEDIA_URL + 'images/gallery/'
GALLERY_SIZE = 10000 # Number of images in gallery
# Maximum length of the filename. Forms should use this and raise
# ValidationError if the length is exceeded.
# @see http://code.djangoproject.com/ticket/9893
# Columns are 250 but this leaves 50 chars for the upload_to prefix
MAX_FILENAME_LENGTH = 200
MAX_FILEPATH_LENGTH = 250
IMAGE_UPLOAD_PATH = 'uploads/images/'
IMAGE_UPLOAD_URL = MEDIA_URL + IMAGE_UPLOAD_PATH
IMAGE_UPLOAD_PATH_FULL = os.path.join(MEDIA_ROOT, IMAGE_UPLOAD_PATH)
IMAGE_ALLOWED_TYPES = {'.pgm': 'image/x-portable-graymap'}
IMAGE_ALLOWED_EXTENSIONS = IMAGE_ALLOWED_TYPES.keys()
IMAGE_ALLOWED_MIMETYPES = IMAGE_ALLOWED_TYPES.values()
SECRET_KEY = 'some-super-secret-token'
LOGIN_URL = '/login'
LOGIN_REDIRECT_URL = '/'
# Sessions
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
# Middleware
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
if DEPLOYMENT != DeploymentType.SOLO:
MIDDLEWARE_CLASSES += (
'django.middleware.transaction.TransactionMiddleware',
)
# Templates
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
if DEPLOYMENT != DeploymentType.SOLO:
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', TEMPLATE_LOADERS),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.request',
'django.core.context_processors.csrf',
'django.core.context_processors.media',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.media',
)
TEMPLATE_DIRS = (
path(ROOT, 'templates')
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sites',
'django.contrib.markup',
'django.contrib.messages',
'images',
)
if DEPLOYMENT == DeploymentType.SOLO:
INSTALLED_APPS += (
'django_extensions',
'django_nose',
)
|
|
#!/usr/bin/env python2.7
#
# This is the Telegraf build script.
#
# Current caveats:
# - Does not checkout the correct commit/branch (for now, you will need to do so manually)
# - Has external dependencies for packaging (fpm) and uploading (boto)
#
import sys
import os
import subprocess
import time
import datetime
import shutil
import tempfile
import hashlib
import re
try:
import boto
from boto.s3.key import Key
except ImportError:
pass
# PACKAGING VARIABLES
INSTALL_ROOT_DIR = "/usr/bin"
LOG_DIR = "/var/log/telegraf"
SCRIPT_DIR = "/usr/lib/telegraf/scripts"
CONFIG_DIR = "/etc/telegraf"
LOGROTATE_DIR = "/etc/logrotate.d"
INIT_SCRIPT = "scripts/init.sh"
SYSTEMD_SCRIPT = "scripts/telegraf.service"
LOGROTATE_SCRIPT = "etc/logrotate.d/telegraf"
DEFAULT_CONFIG = "etc/telegraf.conf"
POSTINST_SCRIPT = "scripts/post-install.sh"
PREINST_SCRIPT = "scripts/pre-install.sh"
# META-PACKAGE VARIABLES
PACKAGE_LICENSE = "MIT"
PACKAGE_URL = "https://github.com/influxdata/telegraf"
MAINTAINER = "support@influxdb.com"
VENDOR = "InfluxData"
DESCRIPTION = "Plugin-driven server agent for reporting metrics into InfluxDB."
# SCRIPT START
prereqs = [ 'git', 'go' ]
optional_prereqs = [ 'gvm', 'fpm', 'rpmbuild' ]
fpm_common_args = "-f -s dir --log error \
--vendor {} \
--url {} \
--license {} \
--maintainer {} \
--config-files {} \
--config-files {} \
--after-install {} \
--before-install {} \
--description \"{}\"".format(
VENDOR,
PACKAGE_URL,
PACKAGE_LICENSE,
MAINTAINER,
CONFIG_DIR + '/telegraf.conf',
LOGROTATE_DIR + '/telegraf',
POSTINST_SCRIPT,
PREINST_SCRIPT,
DESCRIPTION)
targets = {
'telegraf' : './cmd/telegraf/telegraf.go',
}
supported_builds = {
# TODO(rossmcdonald): Add support for multiple GOARM values
'darwin': [ "amd64", "386" ],
# 'windows': [ "amd64", "386", "arm", "arm64" ],
'linux': [ "amd64", "386", "arm" ]
}
supported_go = [ '1.5.1' ]
supported_packages = {
"darwin": [ "tar", "zip" ],
"linux": [ "deb", "rpm", "tar", "zip" ],
"windows": [ "tar", "zip" ],
}
def run(command, allow_failure=False, shell=False):
out = None
try:
if shell:
out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=shell)
else:
out = subprocess.check_output(command.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print ""
print ""
print "Executed command failed!"
print "-- Command run was: {}".format(command)
print "-- Failure was: {}".format(e.output)
if allow_failure:
print "Continuing..."
return None
else:
print ""
print "Stopping."
sys.exit(1)
except OSError as e:
print ""
print ""
print "Invalid command!"
print "-- Command run was: {}".format(command)
print "-- Failure was: {}".format(e)
if allow_failure:
print "Continuing..."
return out
else:
print ""
print "Stopping."
sys.exit(1)
else:
return out
def create_temp_dir():
return tempfile.mkdtemp(prefix="telegraf-build.")
def get_current_version():
command = "git describe --always --tags --abbrev=0"
out = run(command)
return out.strip()
def get_current_commit(short=False):
command = None
if short:
command = "git log --pretty=format:'%h' -n 1"
else:
command = "git rev-parse HEAD"
out = run(command)
return out.strip('\'\n\r ')
def get_current_branch():
command = "git rev-parse --abbrev-ref HEAD"
out = run(command)
return out.strip()
def get_system_arch():
arch = os.uname()[4]
if arch == "x86_64":
arch = "amd64"
return arch
def get_system_platform():
if sys.platform.startswith("linux"):
return "linux"
else:
return sys.platform
def get_go_version():
out = run("go version")
matches = re.search('go version go(\S+)', out)
if matches is not None:
return matches.groups()[0].strip()
return None
def check_path_for(b):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
full_path = os.path.join(path, b)
if os.path.isfile(full_path) and os.access(full_path, os.X_OK):
return full_path
def check_environ(build_dir = None):
print "\nChecking environment:"
for v in [ "GOPATH", "GOBIN", "GOROOT" ]:
print "\t- {} -> {}".format(v, os.environ.get(v))
cwd = os.getcwd()
if build_dir == None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd:
print "\n!! WARNING: Your current directory is not under your GOPATH. This may lead to build failures."
def check_prereqs():
print "\nChecking for dependencies:"
for req in prereqs:
print "\t- {} ->".format(req),
path = check_path_for(req)
if path:
print "{}".format(path)
else:
print "?"
for req in optional_prereqs:
print "\t- {} (optional) ->".format(req),
path = check_path_for(req)
if path:
print "{}".format(path)
else:
print "?"
print ""
def upload_packages(packages, nightly=False):
print "Uploading packages to S3..."
print ""
c = boto.connect_s3()
# TODO(rossmcdonald) - Set to different S3 bucket for release vs nightly
bucket = c.get_bucket('telegraf-nightly')
for p in packages:
name = os.path.basename(p)
if bucket.get_key(name) is None or nightly:
print "\t - Uploading {}...".format(name),
k = Key(bucket)
k.key = name
if nightly:
n = k.set_contents_from_filename(p, replace=True)
else:
n = k.set_contents_from_filename(p, replace=False)
k.make_public()
print "[ DONE ]"
else:
print "\t - Not uploading {}, already exists.".format(p)
print ""
def run_tests(race, parallel, timeout, no_vet):
get_command = "go get -d -t ./..."
print "Retrieving Go dependencies...",
sys.stdout.flush()
run(get_command)
print "done."
print "Running tests:"
print "\tRace: ", race
if parallel is not None:
print "\tParallel:", parallel
if timeout is not None:
print "\tTimeout:", timeout
sys.stdout.flush()
p = subprocess.Popen(["go", "fmt", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if len(out) > 0 or len(err) > 0:
print "Code not formatted. Please use 'go fmt ./...' to fix formatting errors."
print out
print err
return False
if not no_vet:
p = subprocess.Popen(["go", "tool", "vet", "-composites=false", "./"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if len(out) > 0 or len(err) > 0:
print "Go vet failed. Please run 'go vet ./...' and fix any errors."
print out
print err
return False
else:
print "Skipping go vet ..."
sys.stdout.flush()
test_command = "go test -v"
if race:
test_command += " -race"
if parallel is not None:
test_command += " -parallel {}".format(parallel)
if timeout is not None:
test_command += " -timeout {}".format(timeout)
test_command += " ./..."
code = os.system(test_command)
if code != 0:
print "Tests Failed"
return False
else:
print "Tests Passed"
return True
def build(version=None,
branch=None,
commit=None,
platform=None,
arch=None,
nightly=False,
rc=None,
race=False,
clean=False,
outdir=".",
goarm_version="6"):
print "-------------------------"
print ""
print "Build plan:"
print "\t- version: {}".format(version)
if rc:
print "\t- release candidate: {}".format(rc)
print "\t- commit: {}".format(commit)
print "\t- branch: {}".format(branch)
print "\t- platform: {}".format(platform)
print "\t- arch: {}".format(arch)
if arch == 'arm' and goarm_version:
print "\t- ARM version: {}".format(goarm_version)
print "\t- nightly? {}".format(str(nightly).lower())
print "\t- race enabled? {}".format(str(race).lower())
print ""
if not os.path.exists(outdir):
os.makedirs(outdir)
elif clean and outdir != '/':
print "Cleaning build directory..."
shutil.rmtree(outdir)
os.makedirs(outdir)
if rc:
# If a release candidate, update the version information accordingly
version = "{}rc{}".format(version, rc)
print "Starting build..."
for b, c in targets.iteritems():
print "\t- Building '{}'...".format(os.path.join(outdir, b)),
build_command = ""
build_command += "GOOS={} GOARCH={} ".format(platform, arch)
if arch == "arm" and goarm_version:
if goarm_version not in ["5", "6", "7", "arm64"]:
print "!! Invalid ARM build version: {}".format(goarm_version)
build_command += "GOARM={} ".format(goarm_version)
build_command += "go build -o {} ".format(os.path.join(outdir, b))
if race:
build_command += "-race "
go_version = get_go_version()
if "1.4" in go_version:
build_command += "-ldflags=\"-X main.buildTime '{}' ".format(datetime.datetime.utcnow().isoformat())
build_command += "-X main.Version {} ".format(version)
build_command += "-X main.Branch {} ".format(branch)
build_command += "-X main.Commit {}\" ".format(get_current_commit())
else:
build_command += "-ldflags=\"-X main.buildTime='{}' ".format(datetime.datetime.utcnow().isoformat())
build_command += "-X main.Version={} ".format(version)
build_command += "-X main.Branch={} ".format(branch)
build_command += "-X main.Commit={}\" ".format(get_current_commit())
build_command += c
run(build_command, shell=True)
print "[ DONE ]"
print ""
def create_dir(path):
try:
os.makedirs(path)
except OSError as e:
print e
def rename_file(fr, to):
try:
os.rename(fr, to)
except OSError as e:
print e
# Return the original filename
return fr
else:
# Return the new filename
return to
def copy_file(fr, to):
try:
shutil.copy(fr, to)
except OSError as e:
print e
def create_package_fs(build_root):
print "\t- Creating a filesystem hierarchy from directory: {}".format(build_root)
# Using [1:] for the path names due to them being absolute
# (will overwrite previous paths, per 'os.path.join' documentation)
dirs = [ INSTALL_ROOT_DIR[1:], LOG_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:] ]
for d in dirs:
create_dir(os.path.join(build_root, d))
os.chmod(os.path.join(build_root, d), 0755)
def package_scripts(build_root):
print "\t- Copying scripts and sample configuration to build directory"
shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]))
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0644)
shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]))
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0644)
shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"))
os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"), 0644)
shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"))
os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"), 0644)
def go_get(update=False):
get_command = None
if update:
get_command = "go get -u -f -d ./..."
else:
get_command = "go get -d ./..."
print "Retrieving Go dependencies...",
run(get_command)
print "done.\n"
def generate_md5_from_file(path):
m = hashlib.md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""):
m.update(chunk)
return m.hexdigest()
def build_packages(build_output, version, nightly=False, rc=None, iteration=1):
outfiles = []
tmp_build_dir = create_temp_dir()
try:
print "-------------------------"
print ""
print "Packaging..."
for p in build_output:
# Create top-level folder displaying which platform (linux, etc)
create_dir(os.path.join(tmp_build_dir, p))
for a in build_output[p]:
current_location = build_output[p][a]
# Create second-level directory displaying the architecture (amd64, etc)p
build_root = os.path.join(tmp_build_dir, p, a)
# Create directory tree to mimic file system of package
create_dir(build_root)
create_package_fs(build_root)
# Copy in packaging and miscellaneous scripts
package_scripts(build_root)
# Copy newly-built binaries to packaging directory
for b in targets:
if p == 'windows':
b = b + '.exe'
fr = os.path.join(current_location, b)
to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], b)
print "\t- [{}][{}] - Moving from '{}' to '{}'".format(p, a, fr, to)
copy_file(fr, to)
# Package the directory structure
for package_type in supported_packages[p]:
print "\t- Packaging directory '{}' as '{}'...".format(build_root, package_type),
name = "telegraf"
package_version = version
package_iteration = iteration
if package_type in ['zip', 'tar']:
if nightly:
name = '{}-nightly_{}_{}'.format(name, p, a)
else:
name = '{}-{}_{}_{}'.format(name, version, p, a)
if package_type == 'tar':
# Add `tar.gz` to path to reduce package size
current_location = os.path.join(current_location, name + '.tar.gz')
if rc is not None:
package_iteration = "0.rc{}".format(rc)
fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format(
fpm_common_args,
name,
a,
package_type,
package_version,
package_iteration,
build_root,
current_location)
if package_type == "rpm":
fpm_command += "--depends coreutils "
out = run(fpm_command, shell=True)
matches = re.search(':path=>"(.*)"', out)
outfile = None
if matches is not None:
outfile = matches.groups()[0]
if outfile is None:
print "[ COULD NOT DETERMINE OUTPUT ]"
else:
# Strip nightly version (the unix epoch) from filename
if nightly and package_type in ['deb', 'rpm']:
outfile = rename_file(outfile, outfile.replace("{}-{}".format(version, iteration), "nightly"))
outfiles.append(os.path.join(os.getcwd(), outfile))
print "[ DONE ]"
# Display MD5 hash for generated package
print "\t\tMD5 = {}".format(generate_md5_from_file(outfile))
print ""
return outfiles
finally:
# Cleanup
shutil.rmtree(tmp_build_dir)
def print_usage():
print "Usage: ./build.py [options]"
print ""
print "Options:"
print "\t --outdir=<path> \n\t\t- Send build output to a specified path. Defaults to ./build."
print "\t --arch=<arch> \n\t\t- Build for specified architecture. Acceptable values: x86_64|amd64, 386, arm, or all"
print "\t --goarm=<arm version> \n\t\t- Build for specified ARM version (when building for ARM). Default value is: 6"
print "\t --platform=<platform> \n\t\t- Build for specified platform. Acceptable values: linux, windows, darwin, or all"
print "\t --version=<version> \n\t\t- Version information to apply to build metadata. If not specified, will be pulled from repo tag."
print "\t --commit=<commit> \n\t\t- Use specific commit for build (currently a NOOP)."
print "\t --branch=<branch> \n\t\t- Build from a specific branch (currently a NOOP)."
print "\t --rc=<rc number> \n\t\t- Whether or not the build is a release candidate (affects version information)."
print "\t --iteration=<iteration number> \n\t\t- The iteration to display on the package output (defaults to 0 for RC's, and 1 otherwise)."
print "\t --race \n\t\t- Whether the produced build should have race detection enabled."
print "\t --package \n\t\t- Whether the produced builds should be packaged for the target platform(s)."
print "\t --nightly \n\t\t- Whether the produced build is a nightly (affects version information)."
print "\t --update \n\t\t- Whether dependencies should be updated prior to building."
print "\t --test \n\t\t- Run Go tests. Will not produce a build."
print "\t --parallel \n\t\t- Run Go tests in parallel up to the count specified."
print "\t --timeout \n\t\t- Timeout for Go tests. Defaults to 480s."
print "\t --clean \n\t\t- Clean the build output directory prior to creating build."
print ""
def print_package_summary(packages):
print packages
def main():
# Command-line arguments
outdir = "build"
commit = None
target_platform = None
target_arch = None
nightly = False
race = False
branch = None
version = get_current_version()
rc = None
package = False
update = False
clean = False
upload = False
test = False
parallel = None
timeout = None
iteration = 1
no_vet = False
goarm_version = "6"
for arg in sys.argv[1:]:
if '--outdir' in arg:
# Output directory. If none is specified, then builds will be placed in the same directory.
output_dir = arg.split("=")[1]
if '--commit' in arg:
# Commit to build from. If none is specified, then it will build from the most recent commit.
commit = arg.split("=")[1]
if '--branch' in arg:
# Branch to build from. If none is specified, then it will build from the current branch.
branch = arg.split("=")[1]
elif '--arch' in arg:
# Target architecture. If none is specified, then it will build for the current arch.
target_arch = arg.split("=")[1]
elif '--platform' in arg:
# Target platform. If none is specified, then it will build for the current platform.
target_platform = arg.split("=")[1]
elif '--version' in arg:
# Version to assign to this build (0.9.5, etc)
version = arg.split("=")[1]
elif '--rc' in arg:
# Signifies that this is a release candidate build.
rc = arg.split("=")[1]
elif '--race' in arg:
# Signifies that race detection should be enabled.
race = True
elif '--package' in arg:
# Signifies that packages should be built.
package = True
elif '--nightly' in arg:
# Signifies that this is a nightly build.
nightly = True
elif '--update' in arg:
# Signifies that dependencies should be updated.
update = True
elif '--upload' in arg:
# Signifies that the resulting packages should be uploaded to S3
upload = True
elif '--test' in arg:
# Run tests and exit
test = True
elif '--parallel' in arg:
# Set parallel for tests.
parallel = int(arg.split("=")[1])
elif '--timeout' in arg:
# Set timeout for tests.
timeout = arg.split("=")[1]
elif '--clean' in arg:
# Signifies that the outdir should be deleted before building
clean = True
elif '--iteration' in arg:
iteration = arg.split("=")[1]
elif '--no-vet' in arg:
no_vet = True
elif '--goarm' in arg:
# Signifies GOARM flag to pass to build command when compiling for ARM
goarm_version = arg.split("=")[1]
elif '--help' in arg:
print_usage()
return 0
else:
print "!! Unknown argument: {}".format(arg)
print_usage()
return 1
if nightly:
if rc:
print "!! Cannot be both nightly and a release candidate! Stopping."
return 1
# In order to support nightly builds on the repository, we are adding the epoch timestamp
# to the version so that version numbers are always greater than the previous nightly.
version = "{}.n{}".format(version, int(time.time()))
# Pre-build checks
check_environ()
check_prereqs()
if not commit:
commit = get_current_commit(short=True)
if not branch:
branch = get_current_branch()
if not target_arch:
if 'arm' in get_system_arch():
# Prevent uname from reporting ARM arch (eg 'armv7l')
target_arch = "arm"
else:
target_arch = get_system_arch()
if not target_platform:
target_platform = get_system_platform()
if rc or nightly:
# If a release candidate or nightly, set iteration to 0 (instead of 1)
iteration = 0
build_output = {}
# TODO(rossmcdonald): Prepare git repo for build (checking out correct branch/commit, etc.)
# prepare(branch=branch, commit=commit)
if test:
if not run_tests(race, parallel, timeout, no_vet):
return 1
return 0
go_get(update=update)
platforms = []
single_build = True
if target_platform == 'all':
platforms = supported_builds.keys()
single_build = False
else:
platforms = [target_platform]
for platform in platforms:
build_output.update( { platform : {} } )
archs = []
if target_arch == "all":
single_build = False
archs = supported_builds.get(platform)
else:
archs = [target_arch]
for arch in archs:
od = outdir
if not single_build:
od = os.path.join(outdir, platform, arch)
build(version=version,
branch=branch,
commit=commit,
platform=platform,
arch=arch,
nightly=nightly,
rc=rc,
race=race,
clean=clean,
outdir=od,
goarm_version=goarm_version)
build_output.get(platform).update( { arch : od } )
# Build packages
if package:
if not check_path_for("fpm"):
print "!! Cannot package without command 'fpm'. Stopping."
return 1
packages = build_packages(build_output, version, nightly=nightly, rc=rc, iteration=iteration)
# TODO(rossmcdonald): Add nice output for print_package_summary()
# print_package_summary(packages)
# Optionally upload to S3
if upload:
upload_packages(packages, nightly=nightly)
return 0
if __name__ == '__main__':
sys.exit(main())
|
|
# Copyright (c) Ralph Meijer.
# See LICENSE for details.
"""
Tests for L{udplog.redis}.
"""
from __future__ import division, absolute_import
import simplejson
from twisted.application.internet import TCPClient
from twisted.internet import defer
from twisted.trial import unittest
from udplog import redis
from udplog.twisted import Dispatcher
class FakeRedisClient(object):
def __init__(self):
self.pushes = []
self.disconnected = False
def lpush(self, key, *values, **kwargs):
if self.disconnected:
return defer.fail(RuntimeError("Not connected"))
else:
self.pushes.append((key, values, kwargs))
return defer.succeed(len(self.pushes))
class RedisPublisherServiceTest(unittest.TestCase):
def setUp(self):
self.dispatcher = Dispatcher()
self.client = FakeRedisClient()
self.publisher = redis.RedisPublisher(self.dispatcher,
self.client,
"test_list")
def test_startService(self):
"""
The publisher registers itself with the dispatcher.
"""
event = {'message': 'test'}
self.dispatcher.eventReceived(event)
self.assertEqual(0, len(self.client.pushes))
self.publisher.startService()
self.dispatcher.eventReceived(event)
self.assertEqual(1, len(self.client.pushes))
def test_stopService(self):
"""
The publisher registers itself with the dispatcher.
"""
event = {'message': 'test'}
self.publisher.startService()
self.dispatcher.eventReceived(event)
self.publisher.stopService()
self.dispatcher.eventReceived(event)
self.assertEqual(1, len(self.client.pushes))
def test_sendEvent(self):
"""
An event is pushed as a JSON string.
"""
event = {'category': u'test',
'message': u'test',
'timestamp': 1340634165}
self.publisher.sendEvent(event)
output = self.client.pushes[-1]
self.assertEqual('test_list', output[0])
eventDict = simplejson.loads(output[1][0])
self.assertEqual(u'test', eventDict['message'])
def test_sendEventUnserializable(self):
"""
An event that cannot be serialized is dropped and an error logged.
"""
class Object(object):
pass
event = {'category': u'test',
'message': Object(),
'timestamp': 1340634165}
self.publisher.sendEvent(event)
self.assertEqual(0, len(self.client.pushes))
self.assertEqual(1, len(self.flushLoggedErrors(TypeError)))
def test_sendEventNoClient(self):
"""
An event that cannot be serialized is dropped and an error logged.
"""
event = {'category': u'test',
'message': u'test',
'timestamp': 1340634165}
def lpush(key, *args, **kwargs):
return defer.fail(redis.NoClientError())
self.patch(self.client, "lpush", lpush)
self.publisher.sendEvent(event)
self.assertEqual(0, len(self.client.pushes))
self.assertEqual(0, len(self.flushLoggedErrors()),
"Unexpected error logged")
class FakeFactory(object):
def __init__(self, client):
self.client = client
self.deferred = defer.Deferred()
if client is not None:
self.deferred.callback(client)
def disconnect(self):
self.client.disconnected = True
self.deferred = defer.Deferred()
class RedisPushMultiClientTest(unittest.TestCase):
def test_lpush(self):
"""
An lpush is passed on to the factory client.
"""
client = FakeRedisClient()
factories = [FakeFactory(client)]
multiClient = redis.RedisPushMultiClient(factories)
value = '{"message": "test"}'
def cb(result):
self.assertEqual(1, result)
output = client.pushes[-1]
self.assertEqual('test_list', output[0])
self.assertEqual(value, output[1][0])
d = multiClient.lpush('test_list', value)
d.addCallback(cb)
return d
def test_lpushNoFactories(self):
"""
If the list of factories is empty, NoClientError is raised.
"""
factories = []
multiClient = redis.RedisPushMultiClient(factories)
value = '{"message": "test"}'
d = multiClient.lpush('test_list', value)
self.assertFailure(d, redis.NoClientError)
return d
def test_lpushNoClient(self):
"""
If a factory's client is not connected, it is removed from the pool.
"""
factories = [FakeFactory(None)]
multiClient = redis.RedisPushMultiClient(factories)
value = '{"message": "test"}'
d = multiClient.lpush('test_list', value)
self.assertFailure(d, redis.NoClientError)
return d
def test_lpushRuntimeError(self):
"""
If the list of factories is empty, NoClientError is raised.
"""
client = FakeRedisClient()
factories = [FakeFactory(client)]
multiClient = redis.RedisPushMultiClient(factories)
value = '{"message": "test"}'
def lpush(key, *args, **kwargs):
return defer.fail(RuntimeError("something"))
self.patch(client, "lpush", lpush)
d = multiClient.lpush('test_list', value)
self.assertFailure(d, RuntimeError)
return d
def test_lpushMultiple(self):
"""
Pushes are distributed over multiple clients.
"""
client1 = FakeRedisClient()
client2 = FakeRedisClient()
factories = [FakeFactory(client1), FakeFactory(client2)]
multiClient = redis.RedisPushMultiClient(factories)
value = '{"message": "test"}'
def cb(result):
self.assertNotEqual(0, len(client1.pushes), "No pushes to client1")
self.assertNotEqual(0, len(client2.pushes), "No pushes to client2")
self.assertEqual(50, len(client1.pushes) + len(client2.pushes))
d = defer.gatherResults([multiClient.lpush('test_list', value)
for i in xrange(50)])
d.addCallback(cb)
return d
def test_lpushMultipleOneDisconnected(self):
"""
If a client is disconnected, its factory is removed from the pool.
"""
client1 = FakeRedisClient()
factory1 = FakeFactory(client1)
client2 = FakeRedisClient()
factory2 = FakeFactory(client2)
factories = [factory1, factory2]
multiClient = redis.RedisPushMultiClient(factories)
value = '{"message": "test"}'
def cb(result):
self.assertNotIn(factory1, multiClient.factories)
self.assertEqual(0, len(client1.pushes))
self.assertEqual(50, len(client2.pushes))
factory1.disconnect()
d = defer.gatherResults([multiClient.lpush('test_list', value)
for i in xrange(50)])
d.addCallback(cb)
return d
def test_lpushMultipleReconnected(self):
"""
If a factory reconnects, it is added back to the pool.
"""
client1 = FakeRedisClient()
factory1 = FakeFactory(client1)
client2 = FakeRedisClient()
factory2 = FakeFactory(client2)
factories = [factory1, factory2]
multiClient = redis.RedisPushMultiClient(factories)
value = '{"message": "test"}'
def onDisconnected(result):
self.assertNotIn(factory1, multiClient.factories)
self.assertEqual(0, len(client1.pushes))
self.assertEqual(50, len(client2.pushes))
client1.disconnected = False
factory1.deferred.callback(client1)
self.assertIn(factory1, multiClient.factories)
factory1.disconnect()
d = defer.gatherResults([multiClient.lpush('test_list', value)
for i in xrange(50)])
d.addCallback(onDisconnected)
return d
class MakeServiceTest(unittest.TestCase):
"""
Tests for L{redis.makeService}.
"""
def test_services(self):
"""
The right type and number of services are created.
"""
config = {'redis-hosts': set(['10.0.0.2', '10.0.0.3']),
'redis-port': 6379,
'redis-key': 'udplog'}
dispatcher = Dispatcher()
multiService = redis.makeService(config, dispatcher)
services = list(multiService)
self.assertEqual(3, len(services))
for service in services[:-1]:
self.assertIsInstance(service, TCPClient)
self.assertIsInstance(services[-1], redis.RedisPublisher)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Computes a header file to be used with SELECTIVE_REGISTRATION.
See the executable wrapper, print_selective_registration_header.py, for more
information.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import sys
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.python import _pywrap_kernel_registry
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging
# Usually, we use each graph node to induce registration of an op and
# corresponding kernel; nodes without a corresponding kernel (perhaps due to
# attr types) generate a warning but are otherwise ignored. Ops in this set are
# registered even if there's no corresponding kernel.
OPS_WITHOUT_KERNEL_ALLOWLIST = frozenset([
# AccumulateNV2 is rewritten away by AccumulateNV2RemovePass; see
# core/common_runtime/accumulate_n_optimizer.cc.
'AccumulateNV2'
])
FLEX_PREFIX = b'Flex'
FLEX_PREFIX_LENGTH = len(FLEX_PREFIX)
def _get_ops_from_ops_list(input_file):
"""Gets the ops and kernels needed from the ops list file."""
ops = set()
ops_list_str = gfile.GFile(input_file, 'r').read()
if not ops_list_str:
raise Exception('Input file should not be empty')
ops_list = json.loads(ops_list_str)
for op, kernel in ops_list:
op_and_kernel = (op, kernel if kernel else None)
ops.add(op_and_kernel)
return ops
def _get_ops_from_graphdef(graph_def):
"""Gets the ops and kernels needed from the tensorflow model."""
ops = set()
for node_def in graph_def.node:
if not node_def.device:
node_def.device = '/cpu:0'
kernel_class = _pywrap_kernel_registry.TryFindKernelClass(
node_def.SerializeToString())
op = str(node_def.op)
if kernel_class or op in OPS_WITHOUT_KERNEL_ALLOWLIST:
op_and_kernel = (op, str(kernel_class.decode('utf-8'))
if kernel_class else None)
ops.add(op_and_kernel)
else:
print('Warning: no kernel found for op %s' % node_def.op, file=sys.stderr)
return ops
def get_ops_and_kernels(proto_fileformat, proto_files, default_ops_str):
"""Gets the ops and kernels needed from the model files."""
ops = set()
for proto_file in proto_files:
tf_logging.info('Loading proto file %s', proto_file)
# Load ops list file.
if proto_fileformat == 'ops_list':
ops = ops.union(_get_ops_from_ops_list(proto_file))
continue
# Load GraphDef.
file_data = gfile.GFile(proto_file, 'rb').read()
if proto_fileformat == 'rawproto':
graph_def = graph_pb2.GraphDef.FromString(file_data)
else:
assert proto_fileformat == 'textproto'
graph_def = text_format.Parse(file_data, graph_pb2.GraphDef())
ops = ops.union(_get_ops_from_graphdef(graph_def))
# Add default ops.
if default_ops_str and default_ops_str != 'all':
for s in default_ops_str.split(','):
op, kernel = s.split(':')
op_and_kernel = (op, kernel)
if op_and_kernel not in ops:
ops.add(op_and_kernel)
return list(sorted(ops))
def get_header_from_ops_and_kernels(ops_and_kernels,
include_all_ops_and_kernels):
"""Returns a header for use with tensorflow SELECTIVE_REGISTRATION.
Args:
ops_and_kernels: a set of (op_name, kernel_class_name) pairs to include.
include_all_ops_and_kernels: if True, ops_and_kernels is ignored and all op
kernels are included.
Returns:
the string of the header that should be written as ops_to_register.h.
"""
ops = set(op for op, _ in ops_and_kernels)
result_list = []
def append(s):
result_list.append(s)
_, script_name = os.path.split(sys.argv[0])
append('// This file was autogenerated by %s' % script_name)
append('#ifndef OPS_TO_REGISTER')
append('#define OPS_TO_REGISTER')
if include_all_ops_and_kernels:
append('#define SHOULD_REGISTER_OP(op) true')
append('#define SHOULD_REGISTER_OP_KERNEL(clz) true')
append('#define SHOULD_REGISTER_OP_GRADIENT true')
else:
line = """
namespace {
constexpr const char* skip(const char* x) {
return (*x) ? (*x == ' ' ? skip(x + 1) : x) : x;
}
constexpr bool isequal(const char* x, const char* y) {
return (*skip(x) && *skip(y))
? (*skip(x) == *skip(y) && isequal(skip(x) + 1, skip(y) + 1))
: (!*skip(x) && !*skip(y));
}
template<int N>
struct find_in {
static constexpr bool f(const char* x, const char* const y[N]) {
return isequal(x, y[0]) || find_in<N - 1>::f(x, y + 1);
}
};
template<>
struct find_in<0> {
static constexpr bool f(const char* x, const char* const y[]) {
return false;
}
};
} // end namespace
"""
line += 'constexpr const char* kNecessaryOpKernelClasses[] = {\n'
for _, kernel_class in ops_and_kernels:
if kernel_class is None:
continue
line += '"%s",\n' % kernel_class
line += '};'
append(line)
append('#define SHOULD_REGISTER_OP_KERNEL(clz) '
'(find_in<sizeof(kNecessaryOpKernelClasses) '
'/ sizeof(*kNecessaryOpKernelClasses)>::f(clz, '
'kNecessaryOpKernelClasses))')
append('')
append('constexpr inline bool ShouldRegisterOp(const char op[]) {')
append(' return false')
for op in sorted(ops):
append(' || isequal(op, "%s")' % op)
append(' ;')
append('}')
append('#define SHOULD_REGISTER_OP(op) ShouldRegisterOp(op)')
append('')
append('#define SHOULD_REGISTER_OP_GRADIENT ' +
('true' if 'SymbolicGradient' in ops else 'false'))
append('#endif')
return '\n'.join(result_list)
def get_header(graphs,
proto_fileformat='rawproto',
default_ops='NoOp:NoOp,_Recv:RecvOp,_Send:SendOp'):
"""Computes a header for use with tensorflow SELECTIVE_REGISTRATION.
Args:
graphs: a list of paths to GraphDef files to include.
proto_fileformat: optional format of proto file, either 'textproto',
'rawproto' (default) or ops_list. The ops_list is the file contain the
list of ops in JSON format, Ex: "[["Transpose", "TransposeCpuOp"]]".
default_ops: optional comma-separated string of operator:kernel pairs to
always include implementation for. Pass 'all' to have all operators and
kernels included. Default: 'NoOp:NoOp,_Recv:RecvOp,_Send:SendOp'.
Returns:
the string of the header that should be written as ops_to_register.h.
"""
ops_and_kernels = get_ops_and_kernels(proto_fileformat, graphs, default_ops)
if not ops_and_kernels:
print('Error reading graph!')
return 1
return get_header_from_ops_and_kernels(ops_and_kernels, default_ops == 'all')
|
|
# Emote Trainer Wrapper / Mass Analyzer
# Command line interface for mass analysis for Emote, and to train Emote's classification system using TextBlob's API
# Note: Training features have not yet been implemented yet
# All that's really working is mass analyzing text files through the CLI
#!/usr/bin/python
# -*- coding: utf-8-*-
encoding = "utf-8"
import os
import sys
from emote import emote
import time
import gc
import re
import csv
import os
from nltk.tokenize import sent_tokenize
data = ""
trainingFile = ""
trainingData = []
testData = {}
openingCSV = False
openingText = False
openingPDF = False
csvOutputData = False
csvData = []
csvTextData = []
csvResults = []
csvFile = {}
massResults = []
sentences = []
def startInterface():
global openingCSV
global openingText
global openingPDF
print("\n\tNow starting Emote Mass Analyzer..")
option = input("\n\tTo analyze a file with Emote, enter in the type name ('PDF', 'Text', or 'CSV'), or enter 'Train' or 'Test': ")
option = option.lower()
if option == 'pdf':
# print("PDF file input.")
openingPDF = True
openPDF(path, data)
elif option == 'text':
# print("Text file input.")
openingText = True
openFile(path, data)
elif option == 'csv':
# print("CSV file input.")
openingCSV = True
parseCSV(path, csvData, csvTextData)
elif option == 'train':
addToDatabase(trainingFile, trainingData)
elif option == 'test':
testEmote(testData)
else:
print("\n\tBad command entered. Please try again.")
startInterface()
def openFile(path, data):
global csvOutputData
path = input("\n\tEnter the name of the text and extension of the text, CSV, or PDF file (has to be in directory 'texts') to be mass analyzed in Emote: ")
try:
p = os.getcwd()
p = os.path.join(p, 'texts', path)
print(p)
file = open(p, 'r')
data = file.read()
text = data
split_into_sentences(text)
return data
except IOError as err:
print("Error opening path to file.")
# self.openFile(path, data)
# print("\n\tI/O error({0}): {1}".format(errno, strerror))
option = input("\n\tOutput classification results to CSV file? (Yes / No)")
option = option.lower()
if option == 'yes':
csvOutputData = True
elif option == 'no':
csvOutputData = False
else:
("Commad not understood!")
startInterface()
return
def parseText(path):
csvData = []
csvTextData = []
file = open(path, 'r')
csv_file = csv.reader(file, delimiter = ",")
for row in csv_file:
csvData.append(row[0])
csvTextData.append(row[1])
file.close()
analyzeCSV(csvData, csvTextData, massResults, csvFile)
print("\n\t", csvData)
print("\n\t", csvTextData)
return csvData, csvTextData
def analyzeText(csvData, csvTextData, csvFile):
print("\n\t",csvTextData)
print("\n\t", csvData)
global massResults
massResults = []
csvResults = {}
csvFile = {}
for i in range(len(csvTextData)):
emote.getInput(csvTextData[i])
# print(emote.normalizedProbValues)
massResults.append(emote.normalizedProbValues)
csvFile = open('static/results.csv', 'w', newline='')
for i in range(len(massResults)):
# with open('static/results.csv', 'w', newline='') as csvFile:
csvIndRowList = []
csvResults = csv.writer(csvFile, delimiter = ',')
csvIndRowList.append(csvData[i])
csvIndRowList.append(csvTextData[i])
csvIndRowList.append(massResults[i][0])
csvIndRowList.append(massResults[i][1])
csvIndRowList.append(massResults[i][2])
csvIndRowList.append(massResults[i][3])
csvIndRowList.append(massResults[i][4])
csvIndRowList.append(massResults[i][5])
print("\n\tROW LIST", csvIndRowList)
csvResults.writerow(csvIndRowList)
csvFile.close()
return csvResults, csvFile
def openPDF(path, data):
return
# def openCSV(path, data):
# path = input("\n\tEnter the name of the text and extension of the text, CSV, or PDF file (has to be in same directory) to be mass analyzed in Emote: ")
# try:
# with open(path) as csvfile:
# reader = csv.reader(csvfile, dialect='excel', quoting=csv.QUOTE_NONNUMERIC)
# datalist = []
# datalist = list(reader)
# return datalist
# except IOError as err:
# print("Error opening path to file.")
# startInterface()
# return
def split_into_sentences(text):
print("CLASSIFYING MULTIPLE SENTENCES")
global sentences
sentences = []
# Code below splits sentences without NLTK Tokenizer
# START
# caps = "([A-Z])"
# prefixes = "(Mr|St|Mrs|Ms|Dr)[.]"
# suffixes = "(Inc|Ltd|Jr|Sr|Co)"
# starters = "(Mr|Mrs|Ms|Dr|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|Wherever)"
# acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
# websites = "[.](com|net|org|io|gov)"
# digits = "([0-9])"
# print("\n\tTaking input file, converting to text, and splitting it up into sentences..")
# text = " " + text + " "
# text = text.replace("\n"," ")
# text = re.sub(prefixes,"\\1<prd>",text)
# text = re.sub(websites,"<prd>\\1",text)
# if "Ph.D" in text: text = text.replace("Ph.D.","Ph<prd>D<prd>")
# text = re.sub("\s" + caps + "[.] "," \\1<prd> ",text)
# text = re.sub(acronyms+" "+starters,"\\1<stop> \\2",text)
# text = re.sub(caps + "[.]" + caps + "[.]" + caps + "[.]","\\1<prd>\\2<prd>\\3<prd>",text)
# text = re.sub(caps + "[.]" + caps + "[.]","\\1<prd>\\2<prd>",text)
# text = re.sub(" "+suffixes+"[.] "+starters," \\1<stop> \\2",text)
# text = re.sub(" "+suffixes+"[.]"," \\1<prd>",text)
# text = re.sub(" " + caps + "[.]"," \\1<prd>",text)
# text = re.sub(digits + "[.]" + digits,"\\1<prd>\\2",text)
# if '"' in text: text = text.replace('."','".')
# if "\"" in text: text = text.replace(".\"","\".")
# if "!" in text: text = text.replace("!\"","\"!")
# if "?" in text: text = text.replace("?\"","\"?")
# text = text.replace(".",".<stop>")
# text = text.replace("?","?<stop>")
# text = text.replace("!","!<stop>")
# text = text.replace("<prd>",".")
# sentences = text.split("<stop>")
# sentences = sentences[:-1]
# sentences = [s.strip() for s in sentences]
# END
sentences = sent_tokenize(text)
print("\n\tSENTENCES", sentences)
sendToEmote()
# return sentences
def sendToEmote():
# pickledData = pickle.load(open("base_corpus.pickle", "wb"))
# print(emote.objectToPickle)
# print(sentences)
global sentences
global massResults
massResults = []
print("""\n\n\tEach sentence found in the text will now be output with the strongest classification associated with it on screen.""")
print("""\n\tEmote will classify: """ + str(len(sentences)) + " detected sentences.")
countSentences = int(len(sentences))
print("COUNT: ", countSentences)
# sentences = []
# for num, sent in enumerate(sentences):
for m in sentences:
print("\n\t", m)
# print("\n\tMESSAGE, message")
# gc.disable()
# print("\n\tI", i)
emote.getInput(m)
# print("\n")
# print("\n\tH1", message)
# print("\n\tH2", emote.normalizedProbValues)
# print("\n"+emote.pre_result)
# gc.enable()
# return emote.normalizedProbVals
massResults.append(emote.normalizedProbValues)
# massResult.append(emote.result)
print("\n\tMASS:" , massResults)
# returnAsCSV(massresults)
# timeTotal = time.time() - timeToClassify
# m, s = divmod(s, 60)
# timeTotal = m + " :" + s + " "
# print("""\n\n\n\tEmote took """ + str(timeTotal) + " time to classify the text of " + sentences.length + " detected sentences.")
# outputOption(massResults)
# return massResults
# Code below is not functional
def outputOption(massResults):
option = input("\n\tOutput results into data for 'training' or 'spreadsheet' (enter one): ")
option = option.lower()
if option == 'training':
print("\n\tOutputting data into traininable format.")
outputTraining(massResults)
elif option == 'spreadsheet':
print("\n\tOutputting data into CSV with full results.")
outputSpreadsheet(massResults)
else:
print("\n\tCommand not understood!")
outputOption(massResults)
def outputTraining(massResults):
startInterfaCe()
def outputSpreadsheet(massResults):
startInterfaCe()
# Code below is not functional
def addToDatabase(trainingFile, trainingData):
# USING SHELF
try:
pickledData = shelve.open('base_corpus.db', writeback = True)
train = pickledData["base"]
# train = pickle.load(open("base_corpus.pickle", "rb" ) )
# cl = NaiveBayesClassifier(train)
print("\n\tLoaded pickled default database corpus.")
# pickledData.close()
except IOError as err:
# print("\n\tI/O error({0}): {1}".format(errno, strerror))
print("\n\tError training pickle file.. system will exit. Go into the directory, delete the corrupt pickle file, and retry this script to train a new copy.")
# sys.exit()
trainingFile = raw_input("\n\tEnter the directory / name of the text file to train (with extension): ")
with open(trainingFile, "rb") as fp:
for i in fp.readlines():
# tmp = i.decode('string_escape')
# tmp = i.decode('utf-8').strip()
tmp = i.strip()
# tmp = i.replace('\r', ' ').replace('\n', '')
# tmp = i.replace('\\', ' ').replace('\'', '')
# tmp = i.replace('\\', ' ').replace('\"', '')
# i = i.strip()
tmp = tmp.split(",")
try:
trainingData.append((str(tmp[0]), str(tmp[1])))
except:
pass
for data in trainingData:
try:
pickledData["base"].append(data)
except:
pass
pickledData.sync()
pickledData.close()
print("\n\tTraining data added to the default database corpus.")
startInterface()
# Code below is not functional
def testEmote(testData):
testFileLoc = input("Enter the file name of the text you would like to test against Emote for accuracy (Must be in the same directory and labelled as .txt):")
try:
print(str(testFileLoc))
except:
print("Did not find the text file.")
startInterface()
# for line in open (str(testFileLoc)):
# print(str(testFileLoc))
# testDataTemp = line.split()
# testDataTemp = str(testDataTemp)
# print(testDataTemp)
# testData.append(testDataTemp)
# print(testData)
# print(testData)
with open(str(testFileLoc)) as testData:
print(testData)
emote.testAccuracy(testData)
print("\n\n\t\tTesting done.")
startInterfaCe()
# print("\n\tNo test data file found.")
# startInterface()
if __name__ == '__main__':
path = ""
data = ""
trainingFile = ""
trainingData = []
csvData = []
csvTextData = []
csvResults = []
csvIndRowList = []
testData = {}
csvFile = {}
massResults = []
sentences = []
startInterface()
else:
path = ""
data = ""
trainingFile = ""
trainingData = []
csvData = []
csvTextData = []
csvResults = []
csvIndRowList = []
testData = {}
csvFile = {}
massResults = []
sentences = []
|
|
import fnmatch
import glob
import os
import re
import tempfile
from datetime import datetime
from gppylib import gplog
from gppylib.commands.base import WorkerPool, Command, REMOTE
from gppylib.commands.unix import Scp
from gppylib.db import dbconn
from gppylib.db.dbconn import execSQL
from gppylib.gparray import GpArray
from gppylib.mainUtils import gp
from gppylib import pgconf
from optparse import Values
from pygresql import pg
import gzip
logger = gplog.get_default_logger()
class Context(Values, object):
filename_dict = {
"ao": ("dump", "_ao_state_file"), "cdatabase": ("cdatabase_%(content)d_%(dbid)s", ""), "co": ("dump", "_co_state_file"), "dirty_table": ("dump", "_dirty_list"),
"dump": ("dump_%(content)d_%(dbid)s", ""), "files": ("dump", "_regular_files"), "filter": ("dump", "_filter"), "global": ("global_%(content)d_%(dbid)s", ""),
"increments": ("dump", "_increments"), "last_operation": ("dump", "_last_operation"), "master_config": ("master_config_files", ".tar"),
"metadata": ("dump_%(content)d_%(dbid)s", ""), "partition_list": ("dump", "_table_list"), "pipes": ("dump", "_pipes"), "plan": ("restore", "_plan"),
"postdata": ("dump_%(content)d_%(dbid)s", "_post_data"), "report": ("dump", ".rpt"), "schema": ("dump", "_schema"),
"segment_config": ("segment_config_files_%(content)d_%(dbid)s", ".tar"), "stats": ("statistics_%(content)d_%(dbid)s", ""), "table": ("dump", "_table"),
"status": ("dump_status_%(content)d_%(dbid)s", ""),
}
defaults = {
"backup_dir": None, "batch_default": 64, "change_schema": None, "cleanup_date": None, "cleanup_total": None, "clear_catalog_dumps": False,
"clear_dumps": False, "clear_dumps_only": False, "compress": True, "db_host_path": None, "ddboost": False, "ddboost_backupdir": None, "ddboost_config_remove": False,
"ddboost_hosts": None, "ddboost_ping": True, "ddboost_remote": False, "ddboost_show_config": False, "ddboost_storage_unit": None, "ddboost_user": None,
"ddboost_verify": False, "drop_db": False, "dump_config": False, "dump_databases": [], "dump_dir": "db_dumps", "dump_global": False, "dump_prefix": "",
"dump_schema": "", "dump_stats": False, "encoding": None, "exclude_dump_schema": "", "exclude_dump_tables": "", "exclude_dump_tables_file": "",
"exclude_schema_file": "", "free_space_percent": None, "history": True, "include_dump_tables": "", "include_dump_tables_file": "",
"include_schema_file": "", "incremental": False, "list_filter_tables": False, "local_dump_prefix": None, "masterDataDirectory": None,
"master_port": 0, "max_streams": None, "netbackup_block_size": None, "netbackup_keyword": None, "netbackup_policy": None, "netbackup_schedule": None,
"netbackup_service_host": None, "metadata_only": False, "no_analyze": False, "no_ao_stats": False, "no_plan": False, "no_validate_table_name": False,
"output_options": [], "post_script": "", "redirected_restore_db": None, "report_dir": "", "report_status_dir": "", "restore_global": False, "restore_schemas":
None, "restore_stats": None, "restore_tables": [], "target_db": None, "timestamp": None, "timestamp_key": None, "full_dump_timestamp": None,
}
def __init__(self, values=None):
if values:
self.defaults.update(values.__dict__) # Ensure that context has default values for all unset variables
super(self.__class__, self).__init__(vars(Values(self.defaults)))
if self.masterDataDirectory:
self.master_datadir = self.masterDataDirectory
else:
self.master_datadir = gp.get_masterdatadir()
self.master_port = self.get_master_port()
if self.local_dump_prefix:
self.dump_prefix = self.local_dump_prefix + "_"
else:
self.dump_prefix = ""
if not self.include_dump_tables: self.include_dump_tables = []
if not self.exclude_dump_tables: self.exclude_dump_tables = []
if not self.output_options: self.output_options = []
if not self.dump_schema: self.dump_schema = []
if not self.exclude_dump_schema: self.exclude_dump_schema = []
self.gparray = GpArray.initFromCatalog(dbconn.DbURL(dbname="template1", port=self.master_port), utility=True)
self.use_old_filename_format = False # Use new filename format by default
self.content_map = self.setup_content_map()
def get_master_port(self):
pgconf_dict = pgconf.readfile(self.master_datadir + "/postgresql.conf")
return pgconf_dict.int('port')
def setup_content_map(self):
content_map = {}
content_map[1] = -1 #for master
for seg in self.gparray.getDbList():
content_map[seg.dbid] = seg.content
return content_map
def generate_filename(self, filetype, dbid=1, content=None, timestamp=None, directory=None, use_old_format=None, use_compress=True):
"""
"Old format" filename format: <prefix>gp_<infix>_<1 if master|0 if segment>_<dbid>_<timestamp><suffix>
"New format" filename format: <prefix>gp_<infix>_<content>_<dbid>_<timestamp><suffix>
The "content" parameter is used to generate a filename pattern for finding files of that content id, not a single filename
"""
if timestamp is None:
timestamp = self.timestamp
if directory:
use_dir = directory
elif dbid == 1:
use_dir = self.get_backup_dir(timestamp)
else:
use_dir = self.get_backup_dir(timestamp, segment_dir=self.get_datadir_for_dbid(dbid))
if use_old_format is None:
use_old_format = self.use_old_filename_format
format_str = "%s/%sgp_%s_%s%s" % (use_dir, self.dump_prefix, "%s", timestamp, "%s")
filename = format_str % (self.filename_dict[filetype][0], self.filename_dict[filetype][1])
if "%(content)d_%(dbid)s" in filename:
if use_old_format:
if content is not None: # Doesn't use "if not content" because 0 is a valid content id
dbids = ["%d" % id for id in self.content_map if self.content_map[id] == content]
filename = filename % {"content": 1 if content == -1 else 0, "dbid": "[%s]" % ("|".join(dbids))}
elif dbid == 1:
filename = filename % {"content": 1, "dbid": 1}
else:
filename = filename % {"content": 0, "dbid": dbid}
else:
if content is not None:
filename = filename % {"content": content, "dbid": "*"}
else:
filename = filename % {"content": self.content_map[dbid], "dbid": dbid}
if self.compress and filetype in ["metadata", "dump", "postdata"] and use_compress:
filename += ".gz"
return filename
def generate_prefix(self, filetype, dbid=1, content=None, use_old_format=None):
format_str = "%sgp_%s_" % (self.dump_prefix, "%s")
filename = format_str % (self.filename_dict[filetype][0])
if "%(content)d_%(dbid)s" in filename:
if use_old_format:
if dbid == 1:
filename = filename % {"content": 1, "dbid": 1}
else:
filename = filename % {"content": 0, "dbid": dbid}
else:
if content is None:
content = self.content_map[dbid]
filename = filename % {"content": content, "dbid": dbid}
return filename
def get_datadir_for_dbid(self, dbid):
for seg in self.gparray.getDbList():
if seg.getSegmentDbId() == dbid:
return seg.getSegmentDataDirectory()
raise Exception("Segment with dbid %d not found" % dbid)
def get_current_primaries(self):
return [seg for seg in self.gparray.getDbList() if seg.isSegmentPrimary(current_role=True)]
def is_timestamp_in_old_format(self, timestamp=None):
if not timestamp:
timestamp = self.timestamp
dump_dirs = self.get_dump_dirs()
report_file = None
using_nbu = (self.netbackup_service_host is not None)
if using_nbu:
restore_file_with_nbu(self, "report", timestamp=timestamp)
report_file = self.generate_filename('report', timestamp=timestamp)
else:
for dump_dir in dump_dirs:
report_file_attempt = self.generate_filename('report', timestamp=timestamp, directory=dump_dir)
if os.path.exists(report_file_attempt):
report_file = report_file_attempt
break
if not report_file:
raise Exception("Unable to locate report file for timestamp %s" % timestamp)
report_contents = get_lines_from_file(report_file)
old_metadata = self.generate_filename("metadata", timestamp=timestamp, use_old_format=True, use_compress=False)
old_format = False
for line in report_contents:
if old_metadata in line:
return True
return False
def get_backup_dir(self, timestamp=None, segment_dir=None):
if self.backup_dir and not self.ddboost:
use_dir = self.backup_dir
elif segment_dir is not None:
use_dir = segment_dir
elif self.master_datadir:
use_dir = self.master_datadir
else:
raise Exception("Cannot locate backup directory with existing parameters")
if timestamp:
use_timestamp = timestamp
else:
use_timestamp = self.timestamp
if not use_timestamp:
raise Exception("Cannot locate backup directory without timestamp")
if not validate_timestamp(use_timestamp):
raise Exception('Invalid timestamp: "%s"' % use_timestamp)
return "%s/%s/%s" % (use_dir, self.dump_dir, use_timestamp[0:8])
def get_backup_root(self):
if self.backup_dir and not self.ddboost:
return self.backup_dir
else:
return self.master_datadir
def get_gpd_path(self):
gpd_path = os.path.join(self.dump_dir, self.timestamp[0:8])
if self.backup_dir:
gpd_path = os.path.join(self.backup_dir, gpd_path)
return gpd_path
def get_date_dir(self):
if self.db_date_dir:
date_dir = self.db_date_dir
else:
date_dir = self.timestamp[0:8]
return os.path.join(self.get_backup_root(), self.dump_dir, date_dir)
def backup_dir_is_writable(self):
if self.backup_dir and not self.report_status_dir:
try:
check_dir_writable(self.get_backup_dir())
except Exception as e:
logger.warning('Backup directory %s is not writable. Error %s' % (self.get_backup_dir(), str(e)))
logger.warning('Since --report-status-dir option is not specified, report and status file will be written in segment data directory.')
return False
return True
def generate_dump_timestamp(self):
if self.timestamp_key:
timestamp_key = self.timestamp_key
else:
timestamp_key = datetime.now().strftime("%Y%m%d%H%M%S")
if not validate_timestamp(timestamp_key):
raise Exception('Invalid timestamp key')
year = int(timestamp_key[:4])
month = int(timestamp_key[4:6])
day = int(timestamp_key[6:8])
hours = int(timestamp_key[8:10])
minutes = int(timestamp_key[10:12])
seconds = int(timestamp_key[12:14])
self.timestamp = timestamp_key
self.db_date_dir = "%4d%02d%02d" % (year, month, day)
self.timestamp_object = datetime(year, month, day, hours, minutes, seconds)
def get_dump_dirs(self):
use_dir = self.get_backup_root()
dump_path = os.path.join(use_dir, self.dump_dir)
if not os.path.isdir(dump_path):
return []
initial_list = os.listdir(dump_path)
initial_list = fnmatch.filter(initial_list, '[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]')
dirnames = []
for d in initial_list:
pth = os.path.join(dump_path, d)
if os.path.isdir(pth):
dirnames.append(pth)
dirnames = sorted(dirnames, key=lambda x: int(os.path.basename(x)), reverse=True)
return dirnames
def get_report_files_and_paths(self, backup_root):
reports = []
prefix = "%s*.rpt" % self.generate_prefix("report")
for path, dirs, files in os.walk(backup_root):
matching = fnmatch.filter(files, "%s*" % prefix)
reports.extend([(path, report_file) for report_file in matching])
if len(reports) == 0:
raise Exception("No report files located")
return reports
def get_compress_and_dbname_from_report_file(self, report_file):
contents = get_lines_from_file(report_file)
compress = None
target_db = ""
name_pattern = re.compile(r'Port [0-9]+ Database (.*) BackupFile')
for line in contents:
if "Compression Program: gzip" in line:
compress = True
elif "Compression Program: None" in line:
compress = False
matching = name_pattern.search(line)
if matching and matching.group(1):
target_db = matching.group(1)
if compress is None or not target_db:
raise Exception("Could not determine database name and compression type from report file %s" % report_file)
return compress, target_db
def get_filename_for_content(context, filetype, content, remote_directory=None, host=None):
filetype_glob = context.generate_filename(filetype, content=content, directory=remote_directory)
if remote_directory:
if not host:
raise Exception("Must supply name of remote host to check for %s file" % filetype)
cmd = Command(name = "Find file of type %s for content %d on host %s" % (filetype, content, host),
cmdStr = 'python -c "import glob; print glob.glob(\'%s\')[0]"' % filetype_glob, ctxt = REMOTE, remoteHost = host)
cmd.run()
if cmd.get_results().rc == 0 and cmd.get_results().stdout:
return cmd.get_results().stdout
return None
else:
filenames = glob.glob(filetype_glob)
if filenames and len(filenames) > 0:
return filenames[0]
return None
def expand_partitions_and_populate_filter_file(context, partition_list, file_prefix):
expanded_partitions = expand_partition_tables(context, partition_list)
dump_partition_list = list(set(expanded_partitions + partition_list))
return create_temp_file_from_list(dump_partition_list, file_prefix)
def populate_filter_tables(table, rows, non_partition_tables, partition_leaves):
if not rows:
non_partition_tables.append(table)
else:
for (schema_name, partition_leaf_name) in rows:
partition_leaf = schema_name.strip() + '.' + partition_leaf_name.strip()
partition_leaves.append(partition_leaf)
return (non_partition_tables, partition_leaves)
def get_all_parent_tables(dbname):
SQL = "SELECT DISTINCT (schemaname || '.' || tablename) FROM pg_partitions"
data = []
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
curs = dbconn.execSQL(conn, SQL)
data = curs.fetchall()
return set([d[0] for d in data])
def list_to_quoted_string(conn, filter_tables):
filter_string = "'" + "', '".join([escape_string(t, conn) for t in filter_tables]) + "'"
return filter_string
def convert_parents_to_leafs(context, parents):
partition_leaves_sql = """
SELECT x.partitionschemaname || '.' || x.partitiontablename
FROM (
SELECT distinct schemaname, tablename, partitionschemaname, partitiontablename, partitionlevel
FROM pg_partitions
WHERE schemaname || '.' || tablename in (%s)
) as X,
(SELECT schemaname, tablename maxtable, max(partitionlevel) maxlevel
FROM pg_partitions
group by (tablename, schemaname)
) as Y
WHERE x.schemaname = y.schemaname and x.tablename = Y.maxtable and x.partitionlevel = Y.maxlevel;
"""
if not parents:
return []
conn = dbconn.connect(dbconn.DbURL(dbname=context.target_db))
partition_sql = partition_leaves_sql % list_to_quoted_string(conn, parents)
curs = dbconn.execSQL(conn, partition_sql)
rows = curs.fetchall()
curs.close()
return [r[0] for r in rows]
#input: list of tables to be filtered
#output: same list but parent tables converted to leafs
def expand_partition_tables(context, filter_tables):
if not filter_tables or len(filter_tables) == 0:
return filter_tables
parent_tables = list()
non_parent_tables = list()
expanded_list = list()
all_parent_tables = get_all_parent_tables(context.target_db)
for table in filter_tables:
if table in all_parent_tables:
parent_tables.append(table)
else:
non_parent_tables.append(table)
expanded_list += non_parent_tables
local_batch_size = 1000
for (s, e) in get_batch_from_list(len(parent_tables), local_batch_size):
tmp = convert_parents_to_leafs(context, parent_tables[s:e])
expanded_list += tmp
return expanded_list
def get_batch_from_list(length, batch_size):
indices = []
for i in range(0, length, batch_size):
indices.append((i, i+batch_size))
return indices
def create_temp_file_from_list(entries, prefix):
"""
When writing the entries into temp file, don't do any strip as there might be
white space in schema name and table name.
"""
if len(entries) == 0:
return None
fd = tempfile.NamedTemporaryFile(mode='w', prefix=prefix, delete=False)
for entry in entries:
fd.write(entry + '\n')
tmp_file_name = fd.name
fd.close()
return tmp_file_name
def create_temp_file_with_tables(table_list):
return create_temp_file_from_list(table_list, 'table_list_')
def create_temp_file_with_schemas(schema_list):
return create_temp_file_from_list(schema_list, 'schema_file_')
def validate_timestamp(timestamp):
if not timestamp:
return False
if len(timestamp) != 14:
return False
if timestamp.isdigit():
return True
else:
return False
def check_successful_dump(report_file_contents):
for line in report_file_contents:
if line.strip() == 'gp_dump utility finished successfully.':
return True
return False
# raise exception for bad data
def convert_report_filename_to_cdatabase_filename(context, report_file):
timestamp = report_file[-18:-4]
ddboost_parent_dir = None
if context.ddboost:
# We pass in segment_dir='' because we don't want it included in our path for ddboost
ddboost_parent_dir = context.get_backup_dir(timestamp=timestamp, segment_dir='')
old_format = context.is_timestamp_in_old_format(timestamp=timestamp)
return context.generate_filename("cdatabase", timestamp=timestamp, use_old_format=old_format, directory=ddboost_parent_dir)
def get_lines_from_dd_file(filename, ddboost_storage_unit):
cmdStr = 'gpddboost --readFile --from-file=%s' % filename
if ddboost_storage_unit:
cmdStr += ' --ddboost-storage-unit=%s' % ddboost_storage_unit
cmd = Command('DDBoost copy of master dump file', cmdStr)
cmd.run(validateAfter=True)
contents = cmd.get_results().stdout.splitlines()
return contents
def check_cdatabase_exists(context, report_file):
try:
filename = convert_report_filename_to_cdatabase_filename(context, report_file)
except Exception, err:
return False
if context.ddboost:
cdatabase_contents = get_lines_from_dd_file(filename, context.ddboost_storage_unit)
elif context.netbackup_service_host:
restore_file_with_nbu(context, path=filename)
cdatabase_contents = get_lines_from_file(filename)
else:
cdatabase_contents = get_lines_from_file(filename, context)
dbname = escapeDoubleQuoteInSQLString(context.target_db, forceDoubleQuote=False)
for line in cdatabase_contents:
if 'CREATE DATABASE' in line:
dump_dbname = get_dbname_from_cdatabaseline(line)
if dump_dbname is None:
continue
else:
if dbname == checkAndRemoveEnclosingDoubleQuote(dump_dbname):
return True
return False
def get_dbname_from_cdatabaseline(line):
"""
Line format: CREATE DATABASE "DBNAME" WITH TEMPLATE = template0 ENCODING = 'UTF8' OWNER = gpadmin;
To get the dbname:
substring between the ending index of the first statement: CREATE DATABASE and the starting index
of WITH TEMPLATE whichever is not inside any double quotes, based on the fact that double quote
inside any name will be escaped by extra double quote, so there's always only one WITH TEMPLATE not
inside any doubles, means its previous and post string should have only even number of double
quotes.
Note: OWER name can also have special characters with double quote.
"""
cdatabase = "CREATE DATABASE "
try:
start = line.index(cdatabase)
except Exception as e:
logger.error('Failed to find substring %s in line %s, error: %s' % (cdatabase, line, str(e)))
return None
keyword = " WITH TEMPLATE = "
pos = get_nonquoted_keyword_index(line, keyword, '"', len(keyword))
if pos != -1:
dbname = line[start+len(cdatabase) : pos]
return dbname
return None
def get_nonquoted_keyword_index(line, keyword, quote, keyword_len):
# quote can be single quote or double quote
all_positions = get_all_occurrences(keyword, line)
if all_positions != None and len(all_positions) > 0:
for pos in all_positions:
pre_string = line[:pos]
post_string = line[pos + keyword_len:]
quotes_before = get_all_occurrences('%s' % quote, pre_string)
quotes_after = get_all_occurrences('%s' % quote, post_string)
num_quotes_before = 0 if (quotes_before is None or len(quotes_before) == 0) else len(quotes_before)
num_quotes_after = 0 if (quotes_after is None or len(quotes_after) == 0) else len(quotes_after)
if num_quotes_before % 2 == 0 and num_quotes_after % 2 == 0:
return pos
return -1
def get_all_occurrences(substr, line):
# substr is used for generating the pattern, escape those special chars in regexp
if substr is None or line is None or len(substr) > len(line):
return None
return [m.start() for m in re.finditer('(?=%s)' % substr, line)]
def get_type_ts_from_report_file(context, report_file, backup_type):
report_file_contents = get_lines_from_file(report_file)
if not check_successful_dump(report_file_contents):
return None
if not check_cdatabase_exists(context, report_file):
return None
if check_backup_type(report_file_contents, backup_type):
return get_timestamp_val(report_file_contents)
return None
def get_full_ts_from_report_file(context, report_file):
return get_type_ts_from_report_file(context, report_file, 'Full')
def get_incremental_ts_from_report_file(context, report_file):
return get_type_ts_from_report_file(context, report_file, 'Incremental')
def get_timestamp_val(report_file_contents):
for line in report_file_contents:
if line.startswith('Timestamp Key'):
timestamp = line.split(':')[-1].strip()
if not validate_timestamp(timestamp):
raise Exception('Invalid timestamp value found in report_file')
return timestamp
return None
def check_backup_type(report_file_contents, backup_type):
for line in report_file_contents:
if line.startswith('Backup Type'):
if line.split(':')[-1].strip() == backup_type:
return True
return False
def get_lines_from_zipped_file(fname):
"""
Don't strip white space here as it may be part of schema name and table name
"""
content = []
fd = gzip.open(fname, 'r')
try:
for line in fd:
content.append(line.strip('\n'))
except Exception as err:
raise Exception("Error reading from file %s: %s" % (fname, err))
finally:
fd.close()
return content
def get_lines_from_file(fname, context=None):
"""
Don't strip white space here as it may be part of schema name and table name
"""
content = []
if context and context.ddboost:
contents = get_lines_from_dd_file(fname, context.ddboost_storage_unit)
return contents
else:
with open(fname) as fd:
for line in fd:
content.append(line.strip('\n'))
return content
def write_lines_to_file(filename, lines):
"""
Don't do strip in line for white space in case it is part of schema name or table name
"""
with open(filename, 'w') as fp:
for line in lines:
fp.write("%s\n" % line.strip('\n'))
def verify_lines_in_file(fname, expected):
lines = get_lines_from_file(fname)
if lines != expected:
raise Exception("After writing file '%s' contents not as expected.\nLines read from file: %s\nLines expected from file: %s\n" % (fname, lines, expected))
def check_dir_writable(directory):
fp = None
try:
tmp_file = os.path.join(directory, 'tmp_file')
fp = open(tmp_file, 'w')
except IOError as e:
raise Exception('No write access permission on %s' % directory)
except Exception as e:
raise Exception(str(e))
finally:
if fp is not None:
fp.close()
if os.path.isfile(tmp_file):
os.remove(tmp_file)
def execute_sql(query, master_port, dbname):
dburl = dbconn.DbURL(port=master_port, dbname=dbname)
conn = dbconn.connect(dburl)
cursor = execSQL(conn, query)
return cursor.fetchall()
def execute_sql_with_connection(query, conn):
return execSQL(conn, query).fetchall()
def get_latest_report_timestamp(context):
dump_dirs = context.get_dump_dirs()
for d in dump_dirs:
latest = get_latest_report_in_dir(d, context.dump_prefix)
if latest:
return latest
return None
def get_latest_report_in_dir(report_dir, dump_prefix):
files = os.listdir(report_dir)
if len(files) == 0:
return None
dump_report_files = fnmatch.filter(files, '%sgp_dump_[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9].rpt' % dump_prefix)
if len(dump_report_files) == 0:
return None
dump_report_files = sorted(dump_report_files, key=lambda x: int(x.split('_')[-1].split('.')[0]), reverse=True)
return dump_report_files[0][-18:-4]
def get_timestamp_from_increments_filename(filename, dump_prefix):
fname = os.path.basename(filename)
parts = fname.split('_')
# Check for 4 underscores if there is no prefix, or more than 4 if there is a prefix
if not ((not dump_prefix and len(parts) == 4) or (dump_prefix and len(parts) > 4)):
raise Exception("Invalid increments file '%s' passed to get_timestamp_from_increments_filename" % filename)
return parts[-2].strip()
def get_full_timestamp_for_incremental(context):
full_timestamp = None
if context.netbackup_service_host:
full_timestamp = get_full_timestamp_for_incremental_with_nbu(context)
else:
pattern = '%s/%s/[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]/%sgp_dump_[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]_increments' % \
(context.get_backup_root(), context.dump_dir, context.dump_prefix)
increments_files = glob.glob(pattern)
for increments_file in increments_files:
if os.path.exists(increments_file):
increment_ts = get_lines_from_file(increments_file)
else:
continue
if context.timestamp in increment_ts:
full_timestamp = get_timestamp_from_increments_filename(increments_file, context.dump_prefix)
break
if not full_timestamp:
raise Exception("Could not locate full backup associated with timestamp '%s'. "
"Either increments file or full backup is missing.\n"
% (context.timestamp))
return full_timestamp
# backup_dir will be either MDD or some other directory depending on call
def get_latest_full_dump_timestamp(context):
dump_dirs = context.get_dump_dirs()
for dump_dir in dump_dirs:
files = sorted(os.listdir(dump_dir))
if len(files) == 0:
logger.warn('Dump directory %s is empty' % dump_dir)
continue
dump_report_files = fnmatch.filter(files, '%sgp_dump_[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9].rpt' % context.dump_prefix)
if len(dump_report_files) == 0:
logger.warn('No dump report files found in dump directory %s' % dump_dir)
continue
dump_report_files = sorted(dump_report_files, key=lambda x: int(x.split('_')[-1].split('.')[0]), reverse=True)
for dump_report_file in dump_report_files:
report_path = os.path.join(dump_dir, dump_report_file)
logger.debug('Checking for latest timestamp in report file %s' % report_path)
timestamp = get_full_ts_from_report_file(context, report_path)
logger.debug('Timestamp = %s' % timestamp)
if timestamp is not None:
return timestamp
raise Exception('No full backup found for incremental')
def get_all_segment_addresses(context):
addresses = [seg.getSegmentAddress() for seg in context.gparray.getDbList() if seg.isSegmentPrimary(current_role=True)]
return list(set(addresses))
def scp_file_to_hosts(host_list, filename, batch_default):
pool = WorkerPool(numWorkers=min(len(host_list), batch_default))
for hname in host_list:
pool.addCommand(Scp('Copying table_filter_file to %s' % hname,
srcFile=filename,
dstFile=filename,
dstHost=hname))
pool.join()
pool.haltWork()
pool.check_results()
def run_pool_command(host_list, cmd_str, batch_default, check_results=True):
pool = WorkerPool(numWorkers=min(len(host_list), batch_default))
for host in host_list:
cmd = Command(host, cmd_str, ctxt=REMOTE, remoteHost=host)
pool.addCommand(cmd)
pool.join()
pool.haltWork()
if check_results:
pool.check_results()
def check_funny_chars_in_names(names, is_full_qualified_name=True):
"""
'\n' inside table name makes it hard to specify the object name in shell command line,
this may be worked around by using table file, but currently we read input line by line.
'!' inside table name will mess up with the shell history expansion.
',' is used for separating tables in plan file during incremental restore.
'.' dot is currently being used for full qualified table name in format: schema.table
"""
if names and len(names) > 0:
for name in names:
if ('\t' in name or '\n' in name or '!' in name or ',' in name or
(is_full_qualified_name and name.count('.') > 1) or (not is_full_qualified_name and name.count('.') > 0)):
raise Exception('Name has an invalid character "\\t" "\\n" "!" "," ".": "%s"' % name)
def backup_file_with_ddboost(context, filetype=None, dbid=1, timestamp=None):
if filetype is None:
raise Exception("Cannot call backup_file_with_ddboost without a filetype argument")
if timestamp is None:
timestamp = context.timestamp
path = context.generate_filename(filetype, dbid=dbid, timestamp=timestamp)
copy_file_to_dd(context, path, timestamp)
def copy_file_to_dd(context, filename, timestamp=None):
if timestamp is None:
timestamp = context.timestamp
basefilename = os.path.basename(filename)
cmdStr = "gpddboost --copyToDDBoost --from-file=%s --to-file=%s/%s/%s" % (filename, context.dump_dir, context.timestamp[0:8], basefilename)
if context.ddboost_storage_unit:
cmdStr += " --ddboost-storage-unit=%s" % context.ddboost_storage_unit
cmd = Command('copy file %s to DD machine' % basefilename, cmdStr)
cmd.run(validateAfter=True)
#Form and run command line to backup individual file with NBU
def backup_file_with_nbu(context, filetype=None, path=None, dbid=1, hostname=None, timestamp=None):
if filetype and path:
raise Exception("Cannot supply both a file type and a file path to backup_file_with_nbu")
if filetype is None and path is None:
raise Exception("Cannot call backup_file_with_nbu with no type or path argument")
if timestamp is None:
timestamp = context.timestamp
if filetype:
path = context.generate_filename(filetype, dbid=dbid, timestamp=timestamp)
command_string = "cat %s | gp_bsa_dump_agent --netbackup-service-host %s --netbackup-policy %s --netbackup-schedule %s --netbackup-filename %s" % \
(path, context.netbackup_service_host, context.netbackup_policy, context.netbackup_schedule, path)
if context.netbackup_block_size is not None:
command_string += " --netbackup-block-size %s" % context.netbackup_block_size
if context.netbackup_keyword is not None:
command_string += " --netbackup-keyword %s" % context.netbackup_keyword
logger.debug("Command string inside backup_%s_file_with_nbu: %s\n", filetype, command_string)
if hostname is None:
Command("dumping metadata files from master", command_string).run(validateAfter=True)
else:
Command("dumping metadata files from segment", command_string, ctxt=REMOTE, remoteHost=hostname).run(validateAfter=True)
logger.debug("Command ran successfully\n")
#Form and run command line to restore individual file with NBU
def restore_file_with_nbu(context, filetype=None, path=None, dbid=1, hostname=None, timestamp=None):
if filetype and path:
raise Exception("Cannot supply both a file type and a file path to restore_file_with_nbu")
if filetype is None and path is None:
raise Exception("Cannot call restore_file_with_nbu with no type or path argument")
if timestamp is None:
timestamp = context.timestamp
if filetype:
path = context.generate_filename(filetype, dbid=dbid, timestamp=timestamp)
command_string = "gp_bsa_restore_agent --netbackup-service-host %s" % context.netbackup_service_host
if context.netbackup_block_size is not None:
command_string += " --netbackup-block-size %s" % context.netbackup_block_size
command_string += " --netbackup-filename %s > %s" % (path, path)
logger.debug("Command string inside restore_file_with_nbu: %s\n", command_string)
if hostname is None:
Command("restoring metadata files to master", command_string).run(validateAfter=True)
else:
Command("restoring metadata files to segment", command_string, ctxt=REMOTE, remoteHost=hostname).run(validateAfter=True)
def check_file_dumped_with_nbu(context, filetype=None, path=None, dbid=1, hostname=None):
if filetype and path:
raise Exception("Cannot supply both a file type and a file path to check_file_dumped_with_nbu")
if filetype is None and path is None:
raise Exception("Cannot call check_file_dumped_with_nbu with no type or path argument")
if filetype:
path = context.generate_filename(filetype, dbid=dbid)
command_string = "gp_bsa_query_agent --netbackup-service-host %s --netbackup-filename %s" % (context.netbackup_service_host, path)
logger.debug("Command string inside 'check_file_dumped_with_nbu': %s\n", command_string)
if hostname is None:
cmd = Command("Querying NetBackup server to check for dumped file", command_string)
else:
cmd = Command("Querying NetBackup server to check for dumped file", command_string, ctxt=REMOTE, remoteHost=hostname)
cmd.run(validateAfter=True)
if cmd.get_results().stdout.strip() == path:
return True
else:
return False
def get_full_timestamp_for_incremental_with_nbu(context):
if context.dump_prefix:
get_inc_files_cmd = "gp_bsa_query_agent --netbackup-service-host=%s --netbackup-list-dumped-objects=%sgp_dump_*_increments" % (context.netbackup_service_host, context.dump_prefix)
else:
get_inc_files_cmd = "gp_bsa_query_agent --netbackup-service-host=%s --netbackup-list-dumped-objects=gp_dump_*_increments" % context.netbackup_service_host
cmd = Command("Query NetBackup server to get the list of increments files backed up", get_inc_files_cmd)
cmd.run(validateAfter=True)
files_list = cmd.get_results().stdout.strip().split('\n')
for line in files_list:
fname = line.strip()
restore_file_with_nbu(context, path=fname)
contents = get_lines_from_file(fname)
if context.timestamp in contents:
full_timestamp = get_timestamp_from_increments_filename(fname, context.dump_prefix)
return full_timestamp
return None
def get_latest_full_ts_with_nbu(context):
if context.dump_prefix:
get_rpt_files_cmd = "gp_bsa_query_agent --netbackup-service-host=%s --netbackup-list-dumped-objects=%sgp_dump_*.rpt" % \
(context.netbackup_service_host, context.dump_prefix)
else:
get_rpt_files_cmd = "gp_bsa_query_agent --netbackup-service-host=%s --netbackup-list-dumped-objects=gp_dump_*.rpt" % context.netbackup_service_host
cmd = Command("Query NetBackup server to get the list of report files backed up", get_rpt_files_cmd)
cmd.run(validateAfter=True)
files_list = cmd.get_results().stdout.strip().split('\n')
for line in files_list:
fname = line.strip()
if fname == '':
continue
if context.backup_dir is not None and context.backup_dir not in fname:
continue
if ("No object matched the specified predicate" in fname) or ("No objects of the format" in fname):
return None
restore_file_with_nbu(context, path=fname)
timestamp = get_full_ts_from_report_file(context, report_file=fname)
logger.debug('Timestamp = %s' % timestamp)
if timestamp is not None:
return timestamp
raise Exception('No full backup found for given incremental on the specified NetBackup server')
def getRows(conn, exec_sql):
curs = dbconn.execSQL(conn, exec_sql)
results = curs.fetchall()
curs.close()
return results
def check_change_schema_exists(context, use_redirect):
with dbconn.connect(dbconn.DbURL(port=context.master_port, dbname=context.target_db)) as conn:
schemaname = escape_string(context.change_schema, conn)
dbname = context.target_db if not use_redirect else context.redirected_restore_db
schema_check_sql = "select * from pg_catalog.pg_namespace where nspname='%s';" % schemaname
if len(getRows(conn, schema_check_sql)) < 1:
return False
return True
def escape_string(string, conn):
return pg.DB(db=conn).escape_string(string)
def unescape_string(string):
if string:
string = string.replace('\\\\', '\\').replace("''", "'")
return string
def isDoubleQuoted(string):
if len(string) > 2 and string[0] == '"' and string[-1] == '"':
return True
return False
def checkAndRemoveEnclosingDoubleQuote(string):
if isDoubleQuoted(string):
string = string[1 : len(string) - 1]
return string
def checkAndAddEnclosingDoubleQuote(string):
if not isDoubleQuoted(string):
string = '"' + string + '"'
return string
def escapeDoubleQuoteInSQLString(string, forceDoubleQuote=True):
"""
Accept true database name, schema name, table name, escape the double quote
inside the name, add enclosing double quote by default.
"""
string = string.replace('"', '""')
if forceDoubleQuote:
string = '"' + string + '"'
return string
def removeEscapingDoubleQuoteInSQLString(string, forceDoubleQuote=True):
"""
Remove the escaping double quote in database/schema/table name.
"""
if string is None:
return string
string = string.replace('""', '"')
if forceDoubleQuote:
string = '"' + string + '"'
return string
def formatSQLString(rel_file, isTableName=False):
"""
Read the full qualified schema or table name, do a split
if each item is a table name into schema and table,
escape the double quote inside the name properly.
"""
relnames = []
if rel_file and os.path.exists(rel_file):
with open(rel_file, 'r') as fr:
lines = fr.read().strip('\n').split('\n')
for line in lines:
if isTableName:
schema, table = split_fqn(line)
schema = escapeDoubleQuoteInSQLString(schema)
table = escapeDoubleQuoteInSQLString(table)
relnames.append(schema + '.' + table)
else:
schema = escapeDoubleQuoteInSQLString(line)
relnames.append(schema)
if len(relnames) > 0:
write_lines_to_file(rel_file, relnames)
return rel_file
def split_fqn(fqn_name):
"""
Split full qualified table name into schema and table by separator '.',
"""
try:
schema, table = fqn_name.split('.')
except Exception as e:
logger.error("Failed to split name %s into schema and table, please check the format is schema.table" % fqn_name)
raise Exception('%s' % str(e))
return schema, table
def remove_file_on_segments(context, filename):
addresses = get_all_segment_addresses(context)
try:
cmd = 'rm -f %s' % filename
run_pool_command(addresses, cmd, context.batch_default, check_results=False)
except Exception as e:
logger.error("cleaning up file failed: %s" % e.__str__())
def get_table_info(line):
"""
It's complex to split when table name/schema name/user name/ tablespace name
contains full context of one of others', which is very unlikely, but in
case it happens, return None.
Since we only care about table name, type, and schema name, strip the input
is safe here.
line: contains the true (un-escaped) schema name, table name, and user name.
"""
COMMENT_EXPR = '-- Name: '
TYPE_EXPR = '; Type: '
SCHEMA_EXPR = '; Schema: '
OWNER_EXPR = '; Owner: '
TABLESPACE_EXPR = '; Tablespace: '
temp = line.strip('\n')
type_start = get_all_occurrences(TYPE_EXPR, temp)
schema_start = get_all_occurrences(SCHEMA_EXPR, temp)
owner_start = get_all_occurrences(OWNER_EXPR, temp)
tblspace_start = get_all_occurrences(TABLESPACE_EXPR, temp)
if len(type_start) != 1 or len(schema_start) != 1 or len(owner_start) != 1:
return (None, None, None, None)
name = temp[len(COMMENT_EXPR) : type_start[0]]
type = temp[type_start[0] + len(TYPE_EXPR) : schema_start[0]]
schema = temp[schema_start[0] + len(SCHEMA_EXPR) : owner_start[0]]
if not tblspace_start:
tblspace_start.append(None)
owner = temp[owner_start[0] + len(OWNER_EXPR) : tblspace_start[0]]
return (name, type, schema, owner)
def validate_netbackup_params(param_dict):
max_len = 127
for label, param in param_dict.iteritems():
if param and len(param) > max_len:
raise Exception("Netbackup {0} ({1}) exceeds the maximum length of {2} characters".format(label, param, max_len))
|
|
"""Gaussian processes regression. """
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve_triangular
from scipy.optimize import fmin_l_bfgs_b
from sklearn.base import BaseEstimator, RegressorMixin, clone
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_X_y, check_array
from sklearn.utils.deprecation import deprecated
class GaussianProcessRegressor(BaseEstimator, RegressorMixin):
"""Gaussian process regression (GPR).
The implementation is based on Algorithm 2.1 of Gaussian Processes
for Machine Learning (GPML) by Rasmussen and Williams.
In addition to standard scikit-learn estimator API,
GaussianProcessRegressor:
* allows prediction without prior fitting (based on the GP prior)
* provides an additional method sample_y(X), which evaluates samples
drawn from the GPR (prior or posterior) at given inputs
* exposes a method log_marginal_likelihood(theta), which can be used
externally for other ways of selecting hyperparameters, e.g., via
Markov chain Monte Carlo.
Read more in the :ref:`User Guide <gaussian_process>`.
.. versionadded:: 0.18
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
alpha : float or array-like, optional (default: 1e-10)
Value added to the diagonal of the kernel matrix during fitting.
Larger values correspond to increased noise level in the observations.
This can also prevent a potential numerical issue during fitting, by
ensuring that the calculated values form a positive definite matrix.
If an array is passed, it must have the same number of entries as the
data used for fitting and is used as datapoint-dependent noise level.
Note that this is equivalent to adding a WhiteKernel with c=alpha.
Allowing to specify the noise level directly as a parameter is mainly
for convenience and for consistency with Ridge.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer : int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer == 0 implies that one
run is performed.
normalize_y : boolean, optional (default: False)
Whether the target values y are normalized, i.e., the mean of the
observed target values become zero. This parameter should be set to
True if the target values' mean is expected to differ considerable from
zero. When enabled, the normalization effectively modifies the GP's
prior based on the data, which contradicts the likelihood principle;
normalization is thus disabled per default.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : int, RandomState instance or None, optional (default: None)
The generator used to initialize the centers. If int, random_state is
the seed used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_ : array-like, shape = (n_samples, [n_output_dims])
Target values in training data (also required for prediction)
kernel_ : kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_ : array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in ``X_train_``
alpha_ : array-like, shape = (n_samples,)
Dual coefficients of training data points in kernel space
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``
"""
def __init__(self, kernel=None, alpha=1e-10,
optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0,
normalize_y=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.alpha = alpha
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.normalize_y = normalize_y
self.copy_X_train = copy_X_train
self.random_state = random_state
@property
@deprecated("Attribute rng was deprecated in version 0.19 and "
"will be removed in 0.21.")
def rng(self):
return self._rng
@property
@deprecated("Attribute y_train_mean was deprecated in version 0.19 and "
"will be removed in 0.21.")
def y_train_mean(self):
return self._y_train_mean
def fit(self, X, y):
"""Fit Gaussian process regression model.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples, [n_output_dims])
Target values
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self._rng = check_random_state(self.random_state)
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
# Normalize target value
if self.normalize_y:
self._y_train_mean = np.mean(y, axis=0)
# demean y
y = y - self._y_train_mean
else:
self._y_train_mean = np.zeros(1)
if np.iterable(self.alpha) \
and self.alpha.shape[0] != y.shape[0]:
if self.alpha.shape[0] == 1:
self.alpha = self.alpha[0]
else:
raise ValueError("alpha must be a scalar or an array"
" with same number of entries as y.(%d != %d)"
% (self.alpha.shape[0], y.shape[0]))
self.X_train_ = np.copy(X) if self.copy_X_train else X
self.y_train_ = np.copy(y) if self.copy_X_train else y
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [(self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds))]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = \
self._rng.uniform(bounds[:, 0], bounds[:, 1])
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
self.L_ = cholesky(K, lower=True) # Line 2
except np.linalg.LinAlgError as exc:
exc.args = ("The kernel, %s, is not returning a "
"positive definite matrix. Try gradually "
"increasing the 'alpha' parameter of your "
"GaussianProcessRegressor estimator."
% self.kernel_,) + exc.args
raise
self.alpha_ = cho_solve((self.L_, True), self.y_train_) # Line 3
return self
def predict(self, X, return_std=False, return_cov=False):
"""Predict using the Gaussian process regression model
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, also its
standard deviation (return_std=True) or covariance (return_cov=True).
Note that at most one of the two can be requested.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Query points where the GP is evaluated
return_std : bool, default: False
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
return_cov : bool, default: False
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean
Returns
-------
y_mean : array, shape = (n_samples, [n_output_dims])
Mean of predictive distribution a query points
y_std : array, shape = (n_samples,), optional
Standard deviation of predictive distribution at query points.
Only returned when return_std is True.
y_cov : array, shape = (n_samples, n_samples), optional
Covariance of joint predictive distribution a query points.
Only returned when return_cov is True.
"""
if return_std and return_cov:
raise RuntimeError(
"Not returning standard deviation of predictions when "
"returning full covariance.")
X = check_array(X)
if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior
if self.kernel is None:
kernel = (C(1.0, constant_value_bounds="fixed") *
RBF(1.0, length_scale_bounds="fixed"))
else:
kernel = self.kernel
y_mean = np.zeros(X.shape[0])
if return_cov:
y_cov = kernel(X)
return y_mean, y_cov
elif return_std:
y_var = kernel.diag(X)
return y_mean, np.sqrt(y_var)
else:
return y_mean
else: # Predict based on GP posterior
K_trans = self.kernel_(X, self.X_train_)
y_mean = K_trans.dot(self.alpha_) # Line 4 (y_mean = f_star)
y_mean = self._y_train_mean + y_mean # undo normal.
if return_cov:
v = cho_solve((self.L_, True), K_trans.T) # Line 5
y_cov = self.kernel_(X) - K_trans.dot(v) # Line 6
return y_mean, y_cov
elif return_std:
# compute inverse K_inv of K based on its Cholesky
# decomposition L and its inverse L_inv
L_inv = solve_triangular(self.L_.T, np.eye(self.L_.shape[0]))
K_inv = L_inv.dot(L_inv.T)
# Compute variance of predictive distribution
y_var = self.kernel_.diag(X)
y_var -= np.einsum("ij,ij->i", np.dot(K_trans, K_inv), K_trans)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = y_var < 0
if np.any(y_var_negative):
warnings.warn("Predicted variances smaller than 0. "
"Setting those variances to 0.")
y_var[y_var_negative] = 0.0
return y_mean, np.sqrt(y_var)
else:
return y_mean
def sample_y(self, X, n_samples=1, random_state=0):
"""Draw samples from Gaussian process and evaluate at X.
Parameters
----------
X : array-like, shape = (n_samples_X, n_features)
Query points where the GP samples are evaluated
n_samples : int, default: 1
The number of samples drawn from the Gaussian process
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the
random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
Returns
-------
y_samples : array, shape = (n_samples_X, [n_output_dims], n_samples)
Values of n_samples samples drawn from Gaussian process and
evaluated at query points.
"""
rng = check_random_state(random_state)
y_mean, y_cov = self.predict(X, return_cov=True)
if y_mean.ndim == 1:
y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T
else:
y_samples = \
[rng.multivariate_normal(y_mean[:, i], y_cov,
n_samples).T[:, np.newaxis]
for i in range(y_mean.shape[1])]
y_samples = np.hstack(y_samples)
return y_samples
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
L = cholesky(K, lower=True) # Line 2
except np.linalg.LinAlgError:
return (-np.inf, np.zeros_like(theta)) \
if eval_gradient else -np.inf
# Support multi-dimensional output of self.y_train_
y_train = self.y_train_
if y_train.ndim == 1:
y_train = y_train[:, np.newaxis]
alpha = cho_solve((L, True), y_train) # Line 3
# Compute log-likelihood (compare line 7)
log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha)
log_likelihood_dims -= np.log(np.diag(L)).sum()
log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi)
log_likelihood = log_likelihood_dims.sum(-1) # sum over dimensions
if eval_gradient: # compare Equation 5.9 from GPML
tmp = np.einsum("ik,jk->ijk", alpha, alpha) # k: output-dimension
tmp -= cho_solve((L, True), np.eye(K.shape[0]))[:, :, np.newaxis]
# Compute "0.5 * trace(tmp.dot(K_gradient))" without
# constructing the full matrix tmp.dot(K_gradient) since only
# its diagonal is required
log_likelihood_gradient_dims = \
0.5 * np.einsum("ijl,ijk->kl", tmp, K_gradient)
log_likelihood_gradient = log_likelihood_gradient_dims.sum(-1)
if eval_gradient:
return log_likelihood, log_likelihood_gradient
else:
return log_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
theta_opt, func_min, convergence_dict = \
fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
if convergence_dict["warnflag"] != 0:
warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
" state: %s" % convergence_dict)
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
|
|
"""
snackPacket.py
class for collected snack icon that follows dino to station after collection
"""
import pygame
import dinosInSpace
import infoGraphic56
import gfx56
import fpsSwitch
WHITE = (255,255,255)
BLACK = (0,0,0)
FONTCOLOR = BLACK
FONTSIZE = 16
RELATIVE_TO_DINO_CENTER = (-40,-40)
SCALESTEP = 2
SCALESTARTSIZE = (2,2)
BOUNCEMAX = 30 # size above 1x scale before image contracts
#SPINSTEP = 10
class ImgLib(object):
""" image library to load and access local images """
imgDict = None
def __init__(self):
if not ImgLib.imgDict:
ImgLib.imgDict = {
"SP_FRAME" : dinosInSpace.loadImage("packetFrame.png", "2X", (0,0)) # pygame.Surface((40,40))
}
# ImgLib.imgDict["SP_FRAME"].fill(WHITE)
@staticmethod
def getImage(name):
if name in ImgLib.imgDict:
return ImgLib.imgDict[name].copy()
else:
print "image, " + name + " not found"
def initImgLib():
ImgLib()
class SnackPacket(pygame.sprite.Sprite):
""" collected snack icon that follow dino to statoin after collection
- can 'combine' with other packets to increase count
- are only created if dino collides with a snack
"""
packetList = []
packetAtStation = {}
def __init__(self, dino):
pygame.sprite.Sprite.__init__(self)
self.originalFrame = ImgLib.getImage("SP_FRAME")
self.count = 1
self.dino = dino
self.image = self.renderCurrentCount()
self.rect = self.image.get_rect()
self.followDino()
self.atStation = None
self.isScalingIntoView = True
self.isBouncing = False
self.isSpinning = False
self.bounceReset = False # switch for bounce interuption handling
self.scaleStep = SCALESTEP
self.scaleSize = SCALESTARTSIZE
# self.spinDistance = 0
# self.spinStep = SPINSTEP
self.originalFrameSize = self.originalFrame.get_size()
self.bounceMax = self.originalFrameSize[0] + BOUNCEMAX
self.scaleDirection = 1 # 1 or -1 switch for self.bounce
if fpsSwitch.FPSSwitch._fps == 60:
self.scaleStep *= 2
# self.spinStep *= 2
SnackPacket.packetList.append(self)
def renderCurrentCount(self):
""" return image with current count """
textSurface = infoGraphic56.TextObject("+ " + str(self.count), FONTSIZE, FONTCOLOR).image
image = gfx56.centerBlit(self.originalFrame.copy(), textSurface)
return image
def update(self):
if self.atStation:
self.followStation()
else:
self.followDino()
if self.isScalingIntoView:
self.scaleIntoView()
if self.isBouncing:
self.bounce()
# if self.isSpinning:
# self.spin()
def followDino(self):
self.rect.center = (self.dino.rect.center[0] + RELATIVE_TO_DINO_CENTER[0], self.dino.rect.center[1] + RELATIVE_TO_DINO_CENTER[1])
def followStation(self):
self.rect.center = (self.atStation.rect.center[0] + RELATIVE_TO_DINO_CENTER[0], self.atStation.rect.center[1] + RELATIVE_TO_DINO_CENTER[1])
def addCount(self, count=1):
self.count += count
self.image = self.renderCurrentCount()
self.isBouncing = True
self.bounceReset = True
def hookToStation(self, station):
self.dino = None
if station in SnackPacket.packetAtStation:
SnackPacket.packetAtStation[station].addCount(self.count)
self.kill()
else:
SnackPacket.packetAtStation[station] = self
self.atStation = station
self.followStation()
# def spin(self):
# """ update callback
# - spins image once
# """
# # increment spinDistance
# self.spinDistance += self.spinStep
#
# # ending condition
# if self.spinDistance >= 360:
# self.spinDistance = 0
# self.image = pygame.transform.scale(self.originalFrame, self.scaleSize)
# self.isSpinning = False
# else:
# image = self.originalFrame.copy()
# if self.scaleSize != self.originalFrameSize:
# image = pygame.transform.scale(self.originalFrame.copy(), self.scaleSize)
# self.image = pygame.transform.rotate(image, self.spinDistance)
def scaleIntoView(self):
""" update callback
- scales self.image up by self.scaleStep
- when original image size is met, self.isScalingIntoView set to False
"""
self.centerScale(self.scaleSize)
self.scaleSize = (self.scaleSize[0] + self.scaleStep, self.scaleSize[1] + self.scaleStep)
if self.scaleSize[0] >= self.originalFrameSize[0]:
self.isScalingIntoView = False
self.isBouncing = True
# if self.scaleSize >= self.originalFrameSize:
# center = self.rect.center
# self.image = self.renderCurrentCount()
# self.rect = self.image.get_rect()
# self.rect.center = center
# self.isScalingIntoView = False
# else:
# self.centerScale(self.scaleSize)
# self.scaleSize = (self.scaleSize[0] + self.scaleStep, self.scaleSize[1] + self.scaleStep)
def bounce(self):
""" update callback
- scales image up to bounce max, then down to original size
- when original image size is met, self.isBouncing sets to False
"""
# anything not following scaleIntoView resets the image in case of interuptions
if self.bounceReset:
center = self.rect.center
self.image = self.renderCurrentCount()
self.rect = self.image.get_rect()
self.rect.center = center
self.scaleSize = self.originalFrameSize
self.bounceReset = False
# scale image
else:
self.centerScale(self.scaleSize)
# bounce ending condition
if self.scaleSize[0] < self.originalFrameSize[0]:
center = self.rect.center
self.image = self.renderCurrentCount()
self.rect = self.image.get_rect()
self.rect.center = center
self.scaleSize = self.originalFrameSize
self.isBouncing = False
self.scaleDirection = 1 # reset so can bounce again
# bounce reaches changing point
elif self.scaleSize[0] >= self.bounceMax:
self.scaleDirection = -1
# increment scaleSize based on direction
self.scaleSize = (self.scaleSize[0] + self.scaleDirection * self.scaleStep, self.scaleSize[1] + self.scaleDirection * self.scaleStep)
def centerScale(self, size):
""" scale self.image in place from center """
center = self.rect.center
self.image = pygame.transform.scale(self.renderCurrentCount(), size)
self.rect = self.image.get_rect()
self.rect.center = center
@staticmethod
def wipe():
for p in SnackPacket.packetList:
p.kill()
SnackPacket.packetList = []
SnackPacket.packetAtStation = {}
@staticmethod
def quickReset():
SnackPacket.wipe()
def quickReset():
SnackPacket.quickReset()
def wipe():
SnackPacket.wipe()
|
|
# Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import base64
from collections import namedtuple
import json
import logging
import hashlib
from c7n_gcp.client import errors
from c7n.mu import custodian_archive as base_archive
from c7n.utils import local_session
from googleapiclient.errors import HttpError
log = logging.getLogger('c7n_gcp.mu')
def custodian_archive(packages=None):
if not packages:
packages = []
packages.append('c7n_gcp')
archive = base_archive(packages)
# Requirements are fetched server-side, which helps for binary extensions
# but for pure python packages, if we have a local install and its
# relatively small, it might be faster to just upload.
#
requirements = set()
requirements.add('jmespath')
requirements.add('retrying')
requirements.add('python-dateutil')
requirements.add('ratelimiter>=1.2.0.post0')
requirements.add('google-auth>=1.4.1')
requirements.add('google-auth-httplib2>=0.0.3')
requirements.add('google-api-python-client>=1.7.3')
archive.add_contents(
'requirements.txt',
'\n'.join(sorted(requirements)))
return archive
class CloudFunctionManager(object):
def __init__(self, session_factory, region):
self.session_factory = session_factory
self.session = local_session(session_factory)
self.client = self.session.client(
'cloudfunctions', 'v1', 'projects.locations.functions')
self.region = region
def list_functions(self, prefix=None):
"""List extant cloud functions."""
return self.client.execute_command(
'list',
{'parent': "projects/{}/locations/{}".format(
self.session.get_default_project(),
self.region)}
).get('functions', [])
def remove(self, func):
project = self.session.get_default_project()
# delete event sources
for e in func.events:
e.remove(func)
func_name = "projects/{}/locations/{}/functions/{}".format(
project, self.region, func.name)
try:
return self.client.execute_command('delete', {'name': func_name})
except errors.HttpError as e:
if e.resp.status != 404:
raise
def publish(self, func):
"""publish the given function."""
project = self.session.get_default_project()
func_name = "projects/{}/locations/{}/functions/{}".format(
project, self.region, func.name)
func_info = self.get(func.name)
source_url = None
archive = func.get_archive()
if not func_info or self._delta_source(archive, func_name):
source_url = self._upload(archive, self.region)
config = func.get_config()
config['name'] = func_name
if source_url:
config['sourceUploadUrl'] = source_url
# todo - we'll really need before() and after() for pre-provisioning of
# resources (ie topic for function stream on create) and post provisioning (schedule
# invocation of extant function).
#
# convergent event source creation
for e in func.events:
e.add(func)
if func_info is None:
log.info("creating function")
response = self.client.execute_command(
'create', {
'location': "projects/{}/locations/{}".format(
project, self.region),
'body': config})
else:
delta = delta_resource(func_info, config, ('httpsTrigger',))
if not delta:
response = None
else:
update_mask = ','.join(delta)
log.info("updating function config %s", update_mask)
response = self.client.execute_command(
'patch', {
'name': func_name,
'body': config,
'updateMask': update_mask})
return response
def metrics(self, funcs, start, end, period=5 * 60):
"""Get the metrics for a set of functions."""
def logs(self, func, start, end):
"""Get the logs for a given function."""
def get(self, func_name, qualifier=None):
"""Get the details on a given function."""
project = self.session.get_default_project()
func_name = "projects/{}/locations/{}/functions/{}".format(
project, self.region, func_name)
try:
return self.client.execute_query('get', {'name': func_name})
except errors.HttpError as e:
if e.resp.status != 404:
raise
def _get_http_client(self, client):
# Upload source, we need a class sans credentials as we're
# posting to a presigned url.
return self.client.get_http()
def _delta_source(self, archive, func_name):
checksum = archive.get_checksum(hasher=hashlib.md5)
source_info = self.client.execute_command(
'generateDownloadUrl', {'name': func_name, 'body': {}})
http = self._get_http_client(self.client)
source_headers, _ = http.request(source_info['downloadUrl'], 'HEAD')
# 'x-goog-hash': 'crc32c=tIfQ9A==, md5=DqrN06/NbVGsG+3CdrVK+Q=='
deployed_checksum = source_headers['x-goog-hash'].split(',')[-1].split('=', 1)[-1]
modified = deployed_checksum != checksum
log.debug("archive modified:%s checksum %r deployed checksum %r",
modified, checksum, deployed_checksum)
return modified
def _upload(self, archive, region):
"""Upload function source and return source url
"""
# Generate source upload url
url = self.client.execute_command(
'generateUploadUrl',
{'parent': 'projects/{}/locations/{}'.format(
self.session.get_default_project(),
region)}).get('uploadUrl')
log.debug("uploading function code %s", url)
http = self._get_http_client(self.client)
headers, response = http.request(
url, method='PUT',
headers={
'content-type': 'application/zip',
'Content-Length': '%d' % archive.size,
'x-goog-content-length-range': '0,104857600'
},
body=open(archive.path, 'rb')
)
log.info("function code uploaded")
if headers['status'] != '200':
raise RuntimeError("%s\n%s" % (headers, response))
return url
def delta_resource(old_config, new_config, ignore=()):
found = []
for k in new_config:
if k in ignore:
continue
if new_config[k] != old_config[k]:
found.append(k)
return found
class CloudFunction(object):
def __init__(self, func_data, archive=None):
self.func_data = func_data
self.archive = archive
@property
def name(self):
return self.func_data['name']
@property
def timeout(self):
return self.func_data.get('timeout', '60s')
@property
def memory_size(self):
return self.func_data.get('memory-size', 512)
@property
def service_account(self):
return self.func_data.get('service-account', None)
@property
def runtime(self):
return self.func_data.get('runtime', 'python37')
@property
def labels(self):
return dict(self.func_data.get('labels', {}))
@property
def environment(self):
return self.func_data.get('environment', {})
@property
def network(self):
return self.func_data.get('network')
@property
def max_instances(self):
return self.func_data.get('max-instances')
@property
def events(self):
return [e for e in self.func_data.get('events', ())]
def get_archive(self):
return self.archive
def get_config(self):
labels = self.labels
labels['deployment-tool'] = 'custodian'
conf = {
'name': self.name,
'timeout': self.timeout,
'entryPoint': 'handler',
'runtime': self.runtime,
'labels': labels,
'availableMemoryMb': self.memory_size}
if self.environment:
conf['environmentVariables'] = self.environment
if self.network:
conf['network'] = self.network
if self.max_instances:
conf['maxInstances'] = self.max_instances
if self.service_account:
conf['serviceAccountEmail'] = self.service_account
for e in self.events:
conf.update(e.get_config(self))
return conf
PolicyHandlerTemplate = """\
import base64
import json
import traceback
import os
import logging
import sys
def run(event, context=None):
logging.info("starting function execution")
trigger_type = os.environ.get('FUNCTION_TRIGGER_TYPE', '')
if trigger_type == 'HTTP_TRIGGER':
event = {'request': event}
else:
event = json.loads(base64.b64decode(event['data']).decode('utf-8'))
print("Event: %s" % (event,))
try:
from c7n_gcp.handler import run
result = run(event, context)
logging.info("function execution complete")
if trigger_type == 'HTTP_TRIGGER':
return json.dumps(result), 200, (('Content-Type', 'application/json'),)
return result
except Exception as e:
traceback.print_exc()
raise
"""
class PolicyFunction(CloudFunction):
def __init__(self, policy, archive=None, events=()):
self.policy = policy
self.func_data = self.policy.data['mode']
self.archive = archive or custodian_archive()
self._events = events
@property
def name(self):
return self.policy.name
@property
def events(self):
return self._events
def get_archive(self):
self.archive.add_contents('main.py', PolicyHandlerTemplate)
self.archive.add_contents(
'config.json', json.dumps(
{'policies': [self.policy.data]}, indent=2))
self.archive.close()
return self.archive
def get_config(self):
config = super(PolicyFunction, self).get_config()
config['entryPoint'] = 'run'
return config
class EventSource(object):
def __init__(self, session, data=None):
self.data = data
self.session = session
@property
def prefix(self):
return self.data.get('prefix', 'custodian-auto-')
def add(self, func):
"""Default no-op
"""
def remove(self, func):
"""Default no-op
"""
def get_config(self, func):
return {}
class HTTPEvent(EventSource):
"""Internet exposed http endpoint for cloud function"""
def get_config(self, func):
return {'httpsTrigger': {}}
class BucketEvent(EventSource):
trigger = 'google.storage.object.finalize'
collection_id = 'cloudfunctions.projects.buckets'
events = [
# finalize is basically on write
'google.storage.object.finalize',
'google.storage.object.archive',
'google.storage.object.delete',
'google.storage.object.metadataUpdate',
'providers/cloud.storage/eventTypes/object.change']
def get_config(self, func):
return {
'eventTrigger': {
'eventType': self.data.get('event', self.trigger),
'resource': self.data['bucket']}}
class PubSubSource(EventSource):
trigger = 'providers/cloud.pubsub/eventTypes/topic.publish'
collection_id = 'pubsub.projects.topics'
# data -> topic
def get_config(self, func):
return {
'eventTrigger': {
'eventType': self.trigger,
'failurePolicy': {},
'service': 'pubsub.googleapis.com',
'resource': self.get_topic_param()}}
def get_topic_param(self, topic=None, project=None):
return 'projects/{}/topics/{}'.format(
project or self.session.get_default_project(),
topic or self.data['topic'])
def ensure_topic(self):
"""Verify the pub/sub topic exists.
Returns the topic qualified name.
"""
client = self.session.client('pubsub', 'v1', 'projects.topics')
topic = self.get_topic_param()
try:
client.execute_command('get', {'topic': topic})
except HttpError as e:
if e.resp.status != 404:
raise
else:
return topic
# bug in discovery doc.. apis say body must be empty but its required in the
# discovery api for create.
client.execute_command('create', {'name': topic, 'body': {}})
return topic
def ensure_iam(self, publisher=None):
"""Ensure the given identities are in the iam role bindings for the topic.
"""
topic = self.get_topic_param()
client = self.session.client('pubsub', 'v1', 'projects.topics')
policy = client.execute_command('getIamPolicy', {'resource': topic})
policy.pop('etag')
found = False
for binding in policy.get('bindings', {}):
if binding['role'] != 'roles/pubsub.publisher':
continue
if publisher in binding['members']:
return
found = binding
if not found:
policy.setdefault(
'bindings', {'members': [publisher], 'role': 'roles/pubsub.publisher'})
else:
found['members'].append(publisher)
client.execute_command('setIamPolicy', {'resource': topic, 'body': {'policy': policy}})
def add(self):
self.ensure_topic()
def remove(self):
if not self.data.get('topic').startswith(self.prefix):
return
client = self.session.client('topic', 'v1', 'projects.topics')
client.execute_command('delete', {'topic': self.get_topic_param()})
class PeriodicEvent(EventSource):
"""Periodic serverless execution.
Supports both http and pub/sub triggers.
Note periodic requires the setup of app engine and is restricted
to app engine locations.
https://cloud.google.com/scheduler/docs/setup
Schedule can be specified in either cron syntax or app engine schedule expression.
https://cloud.google.com/scheduler/docs/configuring/cron-job-schedules
Examples of schedule expressions.
https://cloud.google.com/appengine/docs/standard/python/config/cronref
"""
def __init__(self, session, data, region):
self.session = session
self.data = data
self.region = region
@property
def target_type(self):
return self.data.get('target-type', 'http')
def get_config(self, func):
return self.get_target(func).get_config(func)
def add(self, func):
target = self.get_target(func)
target.add(func)
job = self.get_job_config(func, target)
client = self.session.client(
'cloudscheduler', 'v1beta1', 'projects.locations.jobs')
delta = self.diff_job(client, job)
if delta:
log.info("update periodic function - %s" % (", ".join(delta)))
return client.execute_command(
'patch', {
'name': job['name'],
'updateMask': ','.join(delta),
'body': job})
elif delta is not None:
return
return client.execute_command(
'create', {
'parent': 'projects/{}/locations/{}'.format(
self.session.get_default_project(),
self.region),
'body': job})
def remove(self, func):
target = self.get_target(func)
target.remove(func)
job = self.get_job_config(func, target)
if not job['name'].rsplit('/', 1)[-1].startswith(self.prefix):
return
client = self.session.client(
'cloudscheduler', 'v1beta1', 'projects.locations.jobs')
return client.execute_command('delete', {'name': job['name']})
# Periodic impl
def diff_job(self, client, target_job):
try:
job = client.execute_query('get', {'name': target_job['name']})
except HttpError as e:
if e.resp.status != 404:
raise
return None
delta = delta_resource(job, target_job, ignore=('httpTarget', 'pubSubTarget'))
if not delta:
return False
return delta
def get_target(self, func):
if self.target_type == 'http':
return HTTPEvent(self.session, self.data)
elif self.target_type == 'pubsub':
config = dict(self.data)
config['topic'] = '{}{}'.format(self.prefix, func.name)
return PubSubSource(self.session, config)
else:
raise ValueError("Unknown periodic target: %s" % self.target_type)
def get_job_config(self, func, target):
job = {
'name': "projects/{}/locations/{}/jobs/{}".format(
self.session.get_default_project(),
self.region,
self.data.get('name', '{}{}'.format(self.prefix, func.name))),
'schedule': self.data['schedule'],
'timeZone': self.data.get('tz', 'Etc/UTC')}
if self.target_type == 'http':
job['httpTarget'] = {
'uri': 'https://{}-{}.cloudfunctions.net/{}'.format(
self.region,
self.session.get_default_project(),
func.name)
}
elif self.target_type == 'pubsub':
job['pubsubTarget'] = {
'topicName': target.get_topic_param(),
}
return job
LogInfo = namedtuple('LogInfo', 'name scope_type scope_id id')
class LogSubscriber(EventSource):
"""Composite as a log sink
subscriber = LogSubscriber(dict(
log='projects/custodian-1291/logs/cloudaudit.googleapis.com%2Factivity'))
function = CloudFunction(dict(name='log-sub', events=[subscriber])
"""
# filter, log, topic, name
# optional scope, scope_id (if scope != default)
# + pub sub
def __init__(self, session, data):
self.data = data
self.session = session
self.pubsub = PubSubSource(session, data)
def get_log(self):
scope_type, scope_id, _, log_id = self.data['log'].split('/', 3)
return LogInfo(
scope_type=scope_type, scope_id=scope_id,
id=log_id, name=self.data['log'])
def get_log_filter(self):
return self.data.get('filter')
def get_parent(self, log_info):
"""Get the parent container for the log sink"""
if self.data.get('scope', 'log') == 'log':
if log_info.scope_type != 'projects':
raise ValueError("Invalid log subscriber scope")
parent = "%s/%s" % (log_info.scope_type, log_info.scope_id)
elif self.data['scope'] == 'project':
parent = 'projects/{}'.format(
self.data.get('scope_id', self.session.get_default_project()))
elif self.data['scope'] == 'organization':
parent = 'organizations/{}'.format(self.data['scope_id'])
elif self.data['scope'] == 'folder':
parent = 'folders/{}'.format(self.data['scope_id'])
elif self.data['scope'] == 'billing':
parent = 'billingAccounts/{}'.format(self.data['scope_id'])
else:
raise ValueError(
'invalid log subscriber scope %s' % (self.data))
return parent
def get_sink(self, topic_info=""):
log_info = self.get_log()
parent = self.get_parent(log_info)
log_filter = self.get_log_filter()
scope = parent.split('/', 1)[0]
sink = {
'parent': parent,
'uniqueWriterIdentity': False,
# Sink body
'body': {
'name': self.data['name'],
'destination': "pubsub.googleapis.com/%s" % topic_info
}
}
if log_filter is not None:
sink['body']['filter'] = log_filter
if scope != 'projects':
sink['body']['includeChildren'] = True
sink['uniqueWriterIdentity'] = True
sink_path = '%s/sinks/%s' % (sink['parent'], sink['body']['name'])
return scope, sink_path, sink
def ensure_sink(self):
"""Ensure the log sink and its pub sub topic exist."""
topic_info = self.pubsub.ensure_topic()
scope, sink_path, sink_info = self.get_sink(topic_info)
client = self.session.client('logging', 'v2', '%s.sinks' % scope)
try:
sink = client.execute_command('get', {'sinkName': sink_path})
except HttpError as e:
if e.resp.status != 404:
raise
sink = client.execute_command('create', sink_info)
else:
delta = delta_resource(sink, sink_info['body'])
if delta:
sink_info['updateMask'] = ','.join(delta)
sink_info['sinkName'] = sink_path
sink_info.pop('parent')
sink = client.execute_command('update', sink_info)
else:
return sink_path
self.pubsub.ensure_iam(publisher=sink['writerIdentity'])
return sink_path
def add(self, func):
"""Create any configured log sink if doesn't exist."""
return self.ensure_sink()
def remove(self, func):
"""Remove any provisioned log sink if auto created"""
if not self.data['name'].startswith(self.prefix):
return
parent = self.get_parent(self.get_log())
_, sink_path, _ = self.get_sink()
client = self.session.client(
'logging', 'v2', '%s.sinks' % (parent.split('/', 1)[0]))
try:
client.execute_command(
'delete', {'sinkName': sink_path})
except HttpError as e:
if e.resp.status != 404:
raise
def get_config(self, func):
return self.pubsub.get_config(func)
class ApiSubscriber(EventSource):
"""Subscribe to individual api calls
via audit log -> filtered sink -> pub/sub topic -> cloud function.
"""
# https://cloud.google.com/logging/docs/reference/audit/auditlog/rest/Shared.Types/AuditLog
# scope - project
# api calls
def __init__(self, session, data):
self.data = data
self.session = session
def get_subscription(self, func):
log_name = "{}/{}/logs/cloudaudit.googleapis.com%2Factivity".format(
self.data.get('scope', 'projects'),
self.session.get_default_project())
log_filter = 'logName = "%s"' % log_name
log_filter += " AND protoPayload.methodName = (%s)" % (
' OR '.join(['"%s"' % m for m in self.data['methods']]))
return {
'topic': '{}audit-{}'.format(self.prefix, func.name),
'name': '{}audit-{}'.format(self.prefix, func.name),
'log': log_name,
'filter': log_filter}
def add(self, func):
return LogSubscriber(self.session, self.get_subscription(func)).add(func)
def remove(self, func):
return LogSubscriber(self.session, self.get_subscription(func)).remove(func)
def get_config(self, func):
return LogSubscriber(self.session, self.get_subscription(func)).get_config(func)
|
|
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
import socket
import logging
from ryu.ofproto import ofproto_v1_0
from ryu.lib import hub
from ryu.lib.mac import haddr_to_bin, haddr_to_str
LOG = logging.getLogger('ryu.lib.ofctl_v1_0')
DEFAULT_TIMEOUT = 1.0 # TODO:XXX
def to_actions(dp, acts):
actions = []
for a in acts:
action_type = a.get('type')
if action_type == 'OUTPUT':
port = int(a.get('port', ofproto_v1_0.OFPP_NONE))
# NOTE: The reason of this magic number (0xffe5)
# is because there is no good constant in of1.0.
# The same value as OFPCML_MAX of of1.2 and of1.3 is used.
max_len = int(a.get('max_len', 0xffe5))
actions.append(dp.ofproto_parser.OFPActionOutput(port, max_len))
elif action_type == 'SET_VLAN_VID':
vlan_vid = int(a.get('vlan_vid', 0xffff))
actions.append(dp.ofproto_parser.OFPActionVlanVid(vlan_vid))
elif action_type == 'SET_VLAN_PCP':
vlan_pcp = int(a.get('vlan_pcp', 0))
actions.append(dp.ofproto_parser.OFPActionVlanPcp(vlan_pcp))
elif action_type == 'STRIP_VLAN':
actions.append(dp.ofproto_parser.OFPActionStripVlan())
elif action_type == 'SET_DL_SRC':
dl_src = haddr_to_bin(a.get('dl_src'))
actions.append(dp.ofproto_parser.OFPActionSetDlSrc(dl_src))
elif action_type == 'SET_DL_DST':
dl_dst = haddr_to_bin(a.get('dl_dst'))
actions.append(dp.ofproto_parser.OFPActionSetDlDst(dl_dst))
elif action_type == 'SET_NW_SRC':
nw_src = ipv4_to_int(a.get('nw_src'))
actions.append(dp.ofproto_parser.OFPActionSetNwSrc(nw_src))
elif action_type == 'SET_NW_DST':
nw_dst = ipv4_to_int(a.get('nw_dst'))
actions.append(dp.ofproto_parser.OFPActionSetNwDst(nw_dst))
elif action_type == 'SET_NW_TOS':
nw_tos = int(a.get('nw_tos', 0))
actions.append(dp.ofproto_parser.OFPActionSetNwTos(nw_tos))
elif action_type == 'SET_TP_SRC':
tp_src = int(a.get('tp_src', 0))
actions.append(dp.ofproto_parser.OFPActionSetTpSrc(tp_src))
elif action_type == 'SET_TP_DST':
tp_dst = int(a.get('tp_dst', 0))
actions.append(dp.ofproto_parser.OFPActionSetTpDst(tp_dst))
elif action_type == 'ENQUEUE':
port = int(a.get('port', ofproto_v1_0.OFPP_NONE))
queue_id = int(a.get('queue_id', 0))
actions.append(dp.ofproto_parser.OFPActionEnqueue(port, queue_id))
else:
LOG.error('Unknown action type')
return actions
def actions_to_str(acts):
actions = []
for a in acts:
action_type = a.cls_action_type
if action_type == ofproto_v1_0.OFPAT_OUTPUT:
buf = 'OUTPUT:' + str(a.port)
elif action_type == ofproto_v1_0.OFPAT_SET_VLAN_VID:
buf = 'SET_VLAN_VID:' + str(a.vlan_vid)
elif action_type == ofproto_v1_0.OFPAT_SET_VLAN_PCP:
buf = 'SET_VLAN_PCP:' + str(a.vlan_pcp)
elif action_type == ofproto_v1_0.OFPAT_STRIP_VLAN:
buf = 'STRIP_VLAN'
elif action_type == ofproto_v1_0.OFPAT_SET_DL_SRC:
buf = 'SET_DL_SRC:' + haddr_to_str(a.dl_addr)
elif action_type == ofproto_v1_0.OFPAT_SET_DL_DST:
buf = 'SET_DL_DST:' + haddr_to_str(a.dl_addr)
elif action_type == ofproto_v1_0.OFPAT_SET_NW_SRC:
buf = 'SET_NW_SRC:' + \
socket.inet_ntoa(struct.pack('!I', a.nw_addr))
elif action_type == ofproto_v1_0.OFPAT_SET_NW_DST:
buf = 'SET_NW_DST:' + \
socket.inet_ntoa(struct.pack('!I', a.nw_addr))
elif action_type == ofproto_v1_0.OFPAT_SET_NW_TOS:
buf = 'SET_NW_TOS:' + str(a.tos)
elif action_type == ofproto_v1_0.OFPAT_SET_TP_SRC:
buf = 'SET_TP_SRC:' + str(a.tp)
elif action_type == ofproto_v1_0.OFPAT_SET_TP_DST:
buf = 'SET_TP_DST:' + str(a.tp)
elif action_type == ofproto_v1_0.OFPAT_ENQUEUE:
buf = 'ENQUEUE:' + str(a.queue_id)
elif action_type == ofproto_v1_0.OFPAT_VENDOR:
buf = 'VENDOR'
else:
buf = 'UNKNOWN'
actions.append(buf)
return actions
def ipv4_to_int(addr):
ip = addr.split('.')
assert len(ip) == 4
i = 0
for b in ip:
b = int(b)
i = (i << 8) | b
return i
def to_match(dp, attrs):
ofp = dp.ofproto
wildcards = ofp.OFPFW_ALL
in_port = 0
dl_src = 0
dl_dst = 0
dl_vlan = 0
dl_vlan_pcp = 0
dl_type = 0
nw_tos = 0
nw_proto = 0
nw_src = 0
nw_dst = 0
tp_src = 0
tp_dst = 0
for key, value in attrs.items():
if key == 'in_port':
in_port = int(value)
wildcards &= ~ofp.OFPFW_IN_PORT
elif key == 'dl_src':
dl_src = haddr_to_bin(value)
wildcards &= ~ofp.OFPFW_DL_SRC
elif key == 'dl_dst':
dl_dst = haddr_to_bin(value)
wildcards &= ~ofp.OFPFW_DL_DST
elif key == 'dl_vlan':
dl_vlan = int(value)
wildcards &= ~ofp.OFPFW_DL_VLAN
elif key == 'dl_vlan_pcp':
dl_vlan_pcp = int(value)
wildcards &= ~ofp.OFPFW_DL_VLAN_PCP
elif key == 'dl_type':
dl_type = int(value)
wildcards &= ~ofp.OFPFW_DL_TYPE
elif key == 'nw_tos':
nw_tos = int(value)
wildcards &= ~ofp.OFPFW_NW_TOS
elif key == 'nw_proto':
nw_proto = int(value)
wildcards &= ~ofp.OFPFW_NW_PROTO
elif key == 'nw_src':
ip = value.split('/')
nw_src = struct.unpack('!I', socket.inet_aton(ip[0]))[0]
mask = 32
if len(ip) == 2:
mask = int(ip[1])
assert 0 < mask <= 32
v = (32 - mask) << ofp.OFPFW_NW_SRC_SHIFT | \
~ofp.OFPFW_NW_SRC_MASK
wildcards &= v
elif key == 'nw_dst':
ip = value.split('/')
nw_dst = struct.unpack('!I', socket.inet_aton(ip[0]))[0]
mask = 32
if len(ip) == 2:
mask = int(ip[1])
assert 0 < mask <= 32
v = (32 - mask) << ofp.OFPFW_NW_DST_SHIFT | \
~ofp.OFPFW_NW_DST_MASK
wildcards &= v
elif key == 'tp_src':
tp_src = int(value)
wildcards &= ~ofp.OFPFW_TP_SRC
elif key == 'tp_dst':
tp_dst = int(value)
wildcards &= ~ofp.OFPFW_TP_DST
else:
LOG.error("unknown match name %s, %s, %d", key, value, len(key))
match = dp.ofproto_parser.OFPMatch(
wildcards, in_port, dl_src, dl_dst, dl_vlan, dl_vlan_pcp,
dl_type, nw_tos, nw_proto, nw_src, nw_dst, tp_src, tp_dst)
return match
def match_to_str(m):
match = {}
if ~m.wildcards & ofproto_v1_0.OFPFW_IN_PORT:
match['in_port'] = m.in_port
if ~m.wildcards & ofproto_v1_0.OFPFW_DL_SRC:
match['dl_src'] = haddr_to_str(m.dl_src)
if ~m.wildcards & ofproto_v1_0.OFPFW_DL_DST:
match['dl_dst'] = haddr_to_str(m.dl_dst)
if ~m.wildcards & ofproto_v1_0.OFPFW_DL_VLAN:
match['dl_vlan'] = m.dl_vlan
if ~m.wildcards & ofproto_v1_0.OFPFW_DL_VLAN_PCP:
match['dl_vlan_pcp'] = m.dl_vlan_pcp
if ~m.wildcards & ofproto_v1_0.OFPFW_DL_TYPE:
match['dl_type'] = m.dl_type
if ~m.wildcards & ofproto_v1_0.OFPFW_NW_TOS:
match['nw_tos'] = m.nw_tos
if ~m.wildcards & ofproto_v1_0.OFPFW_NW_PROTO:
match['nw_proto'] = m.nw_proto
if ~m.wildcards & ofproto_v1_0.OFPFW_NW_SRC_ALL:
match['nw_src'] = nw_src_to_str(m.wildcards, m.nw_src)
if ~m.wildcards & ofproto_v1_0.OFPFW_NW_DST_ALL:
match['nw_dst'] = nw_dst_to_str(m.wildcards, m.nw_dst)
if ~m.wildcards & ofproto_v1_0.OFPFW_TP_SRC:
match['tp_src'] = m.tp_src
if ~m.wildcards & ofproto_v1_0.OFPFW_TP_DST:
match['tp_dst'] = m.tp_dst
return match
def nw_src_to_str(wildcards, addr):
ip = socket.inet_ntoa(struct.pack('!I', addr))
mask = 32 - ((wildcards & ofproto_v1_0.OFPFW_NW_SRC_MASK)
>> ofproto_v1_0.OFPFW_NW_SRC_SHIFT)
if mask == 32:
mask = 0
if mask:
ip += '/%d' % mask
return ip
def nw_dst_to_str(wildcards, addr):
ip = socket.inet_ntoa(struct.pack('!I', addr))
mask = 32 - ((wildcards & ofproto_v1_0.OFPFW_NW_DST_MASK)
>> ofproto_v1_0.OFPFW_NW_DST_SHIFT)
if mask == 32:
mask = 0
if mask:
ip += '/%d' % mask
return ip
def send_stats_request(dp, stats, waiters, msgs):
dp.set_xid(stats)
waiters_per_dp = waiters.setdefault(dp.id, {})
lock = hub.Event()
waiters_per_dp[stats.xid] = (lock, msgs)
dp.send_msg(stats)
lock.wait(timeout=DEFAULT_TIMEOUT)
if not lock.is_set():
del waiters_per_dp[stats.xid]
def get_desc_stats(dp, waiters):
stats = dp.ofproto_parser.OFPDescStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
for msg in msgs:
stats = msg.body
s = {'mfr_desc': stats.mfr_desc,
'hw_desc': stats.hw_desc,
'sw_desc': stats.sw_desc,
'serial_num': stats.serial_num,
'dp_desc': stats.dp_desc}
desc = {str(dp.id): s}
return desc
def get_queue_stats(dp, waiters):
stats = dp.ofproto_parser.OFPQueueStatsRequest(dp, 0, dp.ofproto.OFPP_ALL,
dp.ofproto.OFPQ_ALL)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
s = []
for msg in msgs:
stats = msg.body
for stat in stats:
s.append({'port_no': stat.port_no,
'queue_id': stat.queue_id,
'tx_bytes': stat.tx_bytes,
'tx_errors': stat.tx_errors,
'tx_packets': stat.tx_packets})
desc = {str(dp.id): s}
return desc
def get_flow_stats(dp, waiters, flow={}):
match = to_match(dp, flow.get('match', {}))
table_id = int(flow.get('table_id', 0xff))
out_port = int(flow.get('out_port', dp.ofproto.OFPP_NONE))
stats = dp.ofproto_parser.OFPFlowStatsRequest(
dp, 0, match, table_id, out_port)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
flows = []
for msg in msgs:
for stats in msg.body:
actions = actions_to_str(stats.actions)
match = match_to_str(stats.match)
s = {'priority': stats.priority,
'cookie': stats.cookie,
'idle_timeout': stats.idle_timeout,
'hard_timeout': stats.hard_timeout,
'actions': actions,
'match': match,
'byte_count': stats.byte_count,
'duration_sec': stats.duration_sec,
'duration_nsec': stats.duration_nsec,
'packet_count': stats.packet_count,
'table_id': stats.table_id}
flows.append(s)
flows = {str(dp.id): flows}
return flows
def get_aggregate_flow_stats(dp, waiters, flow={}):
match = to_match(dp, flow.get('match', {}))
table_id = int(flow.get('table_id', 0xff))
out_port = int(flow.get('out_port', dp.ofproto.OFPP_NONE))
stats = dp.ofproto_parser.OFPAggregateStatsRequest(
dp, 0, match, table_id, out_port)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
flows = []
for msg in msgs:
stats = msg.body
for st in stats:
s = {'packet_count': st.packet_count,
'byte_count': st.byte_count,
'flow_count': st.flow_count}
flows.append(s)
flows = {str(dp.id): flows}
return flows
def get_port_stats(dp, waiters):
stats = dp.ofproto_parser.OFPPortStatsRequest(
dp, 0, dp.ofproto.OFPP_NONE)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ports = []
for msg in msgs:
for stats in msg.body:
s = {'port_no': stats.port_no,
'rx_packets': stats.rx_packets,
'tx_packets': stats.tx_packets,
'rx_bytes': stats.rx_bytes,
'tx_bytes': stats.tx_bytes,
'rx_dropped': stats.rx_dropped,
'tx_dropped': stats.tx_dropped,
'rx_errors': stats.rx_errors,
'tx_errors': stats.tx_errors,
'rx_frame_err': stats.rx_frame_err,
'rx_over_err': stats.rx_over_err,
'rx_crc_err': stats.rx_crc_err,
'collisions': stats.collisions}
ports.append(s)
ports = {str(dp.id): ports}
return ports
def get_port_desc(dp, waiters):
stats = dp.ofproto_parser.OFPFeaturesRequest(dp)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
descs = []
for msg in msgs:
stats = msg.ports
for stat in stats.values():
d = {'port_no': stat.port_no,
'hw_addr': stat.hw_addr,
'name': stat.name,
'config': stat.config,
'state': stat.state,
'curr': stat.curr,
'advertised': stat.advertised,
'supported': stat.supported,
'peer': stat.peer}
descs.append(d)
descs = {str(dp.id): descs}
return descs
def mod_flow_entry(dp, flow, cmd):
cookie = int(flow.get('cookie', 0))
priority = int(flow.get('priority',
dp.ofproto.OFP_DEFAULT_PRIORITY))
buffer_id = int(flow.get('buffer_id', dp.ofproto.OFP_NO_BUFFER))
out_port = int(flow.get('out_port', dp.ofproto.OFPP_NONE))
flags = int(flow.get('flags', 0))
idle_timeout = int(flow.get('idle_timeout', 0))
hard_timeout = int(flow.get('hard_timeout', 0))
actions = to_actions(dp, flow.get('actions', []))
match = to_match(dp, flow.get('match', {}))
flow_mod = dp.ofproto_parser.OFPFlowMod(
datapath=dp, match=match, cookie=cookie,
command=cmd, idle_timeout=idle_timeout,
hard_timeout=hard_timeout, priority=priority,
buffer_id=buffer_id, out_port=out_port,
flags=flags,
actions=actions)
dp.send_msg(flow_mod)
def delete_flow_entry(dp):
match = dp.ofproto_parser.OFPMatch(
dp.ofproto.OFPFW_ALL, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
flow_mod = dp.ofproto_parser.OFPFlowMod(
datapath=dp, match=match, cookie=0,
command=dp.ofproto.OFPFC_DELETE)
dp.send_msg(flow_mod)
def mod_port_behavior(dp, port_config):
port_no = int(port_config.get('port_no', 0))
hw_addr = port_config.get('hw_addr')
config = int(port_config.get('config', 0))
mask = int(port_config.get('mask', 0))
advertise = int(port_config.get('advertise'))
port_mod = dp.ofproto_parser.OFPPortMod(
dp, port_no, hw_addr, config, mask, advertise)
dp.send_msg(port_mod)
|
|
import socket, select, time, struct, random
from libs.dae.socks import network_socket
REALM_MESSAGE_KEEP_ALIVE = 1
REALM_MESSAGE_KEEP_ALIVE_OPEN = 1
REALM_MESSAGE_KEEP_ALIVE_CLOSE = 2
REALM_MESSAGE_USER_LOGIN = 2
GAME_MESSAGE_UPDATE_PLAYER_POSITION = 3
GAME_MESSAGE_CREATE_PLAYER = 5
GAME_MESSAGE_UPDATE_PLAYER = 6
GAME_MESSAGE_UPDATE_PLAYER_DIRECTION = 7
DEBUG1 = True
class vector2d(object):
def __init__(self):
self.x_ = 0
self.y_ = 0
self.changed = False
def get_x(self):
return self.x_
def set_x(self, value):
if self.x_ != value:
self.x_ = value
self.changed = True
def get_y(self):
return self.y_
def set_y(self, value):
if self.y_ != value:
self.y_ = value
self.changed = True
x = property(get_x, set_x)
y = property(get_y, set_y)
class player(object):
def __init__(self, sock):
self.socket = sock
self.data = ""
self.position = vector2d()
self.id = 0
self.name = "None"
self.rotation = vector2d()
self.position.x = 100
self.position.y = -100
class policy_server(object):
def __init__(self, port):
self.read = []
self.write = []
self.error = []
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.bind(('', port))
self.server.listen(5)
self.read.append(self.server)
def update(self):
r, w, e = select.select(self.read, self.write, self.error, 0)
for sock in r:
if sock == self.server:
client, (host, port) = self.server.accept()
if DEBUG1:
print "[policy_server] Accepting client..."
#read.append(client)
self.write.append(client)
else:
available = sock.recv(1, 2)
if available:
data = sock.recv(1024)
if data.startswith("<policy-file-request/>"):
if DEBUG1:
print "[policy_server] Accepted client."
sock.send("<?xml version=\"1.0\"?><cross-domain-policy><allow-access-from domain=\"*\" to-ports=\"6110-6112\" /></cross-domain-policy>\0")
else:
if DEBUG1:
print "[policy_server] Refused client."
for sock in w:
self.read.append(sock)
self.write.remove(sock)
class game_server(object):
def __init__(self, port):
self.read = []
self.write = []
self.error = []
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.bind(('', port))
self.server.listen(5)
self.read.append(self.server)
def update(self):
r, w, e = select.select(self.read, self.write, self.error, 0)
for sock in r:
if sock == self.server:
client, (host, port) = self.server.accept()
if DEBUG1:
print "[game_server] Accepting client..."
#read.append(client)
self.write.append(client)
else:
pass
for sock in w:
self.read.append(sock)
self.write.remove(sock)
class realm_server(object):
def __init__(self, port):
self.read = []
self.write = []
self.error = []
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.bind(('', port))
self.server.listen(5)
self.player_list = {}
self.timer = int(time.time() * 100)
self.read.append(self.server)
def update(self):
r, w, e = select.select(self.read, self.write, self.error, 0)
for sock in e:
print "player left"
del self.player_list[sock]
self.player_list.remove(sock)
self.write.remove(sock)
self.read.remove(sock)
time.sleep(0.01)
for sock in r:
if sock == self.server:
client, (host, port) = self.server.accept()
if DEBUG1:
print "[realm_server] Accepting client..."
self.write.append(client)
p = player(network_socket(client))
p.id = random.randint(10000, 99999)
self.player_list[client] = p
else:
p = self.player_list[sock]
if p.socket.is_connected():
available = p.socket.available()
if available >= 8:
data = p.socket.read(8)
print "receiving: " + data
print ''.join(["\\x%02x" % ord( x ) for x in data]).strip()
messageID = struct.unpack(">i", data[:4])[0]
messageLength = struct.unpack(">i", data[4:8])[0]
if available >= messageLength:
messageData = p.socket.read(messageLength)
print "receiving: " + messageData
print ''.join(["\\x%02x" % ord( x ) for x in messageData]).strip()
if DEBUG1:
print "messageID: " + str(messageID)
if DEBUG1:
print "messageLength: " + str(messageLength)
if messageID == REALM_MESSAGE_KEEP_ALIVE:
if DEBUG1:
print "1) keepalive"
msg = struct.pack(">i", REALM_MESSAGE_KEEP_ALIVE_OPEN)
packet = struct.pack(">ii", REALM_MESSAGE_KEEP_ALIVE, len(msg))
packet += msg
p.socket.write(packet)
elif messageID == REALM_MESSAGE_USER_LOGIN:
if DEBUG1:
print "2) login"
# update the new player for old players
msg = struct.pack(">ii" + str(len(p.name) * 4) + "siiiiB", p.id, len(p.name) * 4, p.name, p.position.x, p.position.y, 0, 0, False)
new_player_packet = struct.pack(">ii", GAME_MESSAGE_CREATE_PLAYER, len(msg))
new_player_packet += msg
for sock in self.player_list:
p2 = self.player_list[sock]
if p2 == p:
# update the new player for itself
msg = struct.pack(">ii" + str(len(p.name) * 4) + "siiiiB", p.id, len(p.name) * 4, p.name, p.position.x, p.position.y, 0, 0, True)
packet = struct.pack(">ii", GAME_MESSAGE_CREATE_PLAYER, len(msg))
packet += msg
p.socket.write(packet)
print "sending: " + packet
else:
p2.socket.write(new_player_packet)
# show the old players for the new player
msg = struct.pack(">ii" + str(len(p2.name) * 4) + "siiiiB", p2.id, len(p2.name) * 4, p2.name, p2.position.x, p2.position.y, 0, 0, False)
packet = struct.pack(">ii", GAME_MESSAGE_CREATE_PLAYER, len(msg))
packet += msg
p.socket.write(packet)
elif messageID == GAME_MESSAGE_UPDATE_PLAYER_DIRECTION:
if DEBUG1:
print "3) move"
p.rotation.x, p.rotation.y = struct.unpack(">ii", messageData[:8])
else:
print "player left"
del self.player_list[sock]
self.read.remove(sock)
time.sleep(0.01)
for sock in w:
self.read.append(sock)
self.write.remove(sock)
time.sleep(0.01)
timer = int(time.time() * 100)
step = timer - self.timer
self.timer = timer
for sock in self.player_list:
p = self.player_list[sock]
if p.rotation.x == 1:
p.position.x += step
elif p.rotation.x == -1:
p.position.x -= step
elif p.rotation.y == 1:
p.position.y += step
elif p.rotation.y == -1:
p.position.y -= step
if p.position.changed:
if DEBUG1:
print "player id: " + str(p.id)
if DEBUG1:
print "player dir: " + str(p.rotation.x) + "/" + str(p.rotation.y)
if DEBUG1:
print "player pos: " + str(p.position.x) + "/" + str(p.position.y)
if DEBUG1:
print "changed pos"
msg = struct.pack(">iii", p.id, p.position.x, p.position.y)
packet = struct.pack(">ii", GAME_MESSAGE_UPDATE_PLAYER_POSITION, len(msg))
packet += msg
for sock2 in self.player_list:
p2 = self.player_list[sock2]
if p2 != p: # dont need to update own character's position
p2.socket.write(packet)
p.position.changed = False
time.sleep(0.01)
class chat_server(object):
def __init__(self, port):
self.read = []
self.write = []
self.error = []
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.bind(('', port))
self.server.listen(5)
self.read.append(self.server)
def update(self):
r, w, e = select.select(self.read, self.write, self.error, 0)
for sock in r:
if sock == self.server:
client, (host, port) = self.server.accept()
if DEBUG1:
print "[chat_server] Accepting client..."
#read.append(client)
self.write.append(client)
else:
pass
for sock in w:
self.read.append(sock)
self.write.remove(sock)
s1 = policy_server(843)
s2 = realm_server(6110)
s3 = chat_server(6112)
s4 = game_server(6111)
while True:
s1.update()
time.sleep(0.05)
s2.update()
time.sleep(0.05)
s3.update()
time.sleep(0.05)
s4.update()
time.sleep(0.05)
########################
|
|
# !/usr/bin/python
# (c) Shahar Gino, June-2017, sgino209@gmail.com
from sys import exit, argv
from getopt import getopt, GetoptError
from numpy import loadtxt, savetxt, float32, empty
from os import path, mkdir, chdir, system, listdir
from cv2 import imread, imwrite, cvtColor, COLOR_RGB2GRAY
__version__ = "1.0"
# Python structuring way:
class Struct:
def __init__(self, **kwds):
self.__dict__.update(kwds)
# ---------------------------------------------------------------------------------------------------------------
def main(_argv):
""" Converts standard images to flattened images, and vice versus """
# Default parameters:
args = Struct(
flattened_images="./flattened_images.txt",
classifications="./classifications.txt",
images_folder="./images",
mode="pack",
format="xml",
flattened_size=(20,30), # (width,height)
debugMode=False
)
# -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- ..
# User-Arguments parameters (overrides Defaults):
try:
opts, user_args = getopt(_argv, "h", ["flattened_images=", "classifications=", "images_folder=",
"mode=", "format=", "flattened_size=","debug"])
for opt, user_arg in opts:
if opt == '-h':
usage()
exit()
elif opt in "--flattened_images":
args.flattened_images = user_arg
elif opt in "--classifications":
args.classifications = user_arg
elif opt in "--images_folder":
args.images_folder = user_arg
elif opt in "--mode":
args.mode = user_arg
elif opt in "--format":
args.format = user_arg
elif opt in "--flattened_size":
args.flattened_size = user_arg
elif opt in "--debug":
args.debugMode = True
except GetoptError:
usage()
exit(2)
# -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- .. -- ..
# Convert:
if args.mode == "unpack":
unpack(args.flattened_images, args.classifications, args.images_folder, args.flattened_size, args.format)
elif args.mode == "pack":
pack(args.flattened_images, args.classifications, args.images_folder, args.flattened_size, args.format)
else:
usage()
exit(2)
# ---------------------------------------------------------------------------------------------------------------
def pack(flattened_img, classifications, images_folder, flattened_size, fmt):
""" Pack standard images into a flattened images + classifications pair format """
N = flattened_size[0] * flattened_size[1]
nImages = len([name for name in listdir(images_folder) if path.isfile(path.join(images_folder,name))])
npaClassifications = empty((nImages,1))
npaFlattenedImages = empty((nImages,N))
k = 0
for filename in listdir(images_folder):
if filename == ".DS_Store":
continue
image = imread(path.join(images_folder, filename))
image = cvtColor(image, COLOR_RGB2GRAY).reshape(1, N)
if image is not None:
npaFlattenedImages[k] = image
npaClassifications[k] = ord(filename[0])
k += 1
if fmt == "txt":
savetxt(classifications, npaClassifications, delimiter=' ', fmt='%1.18e')
savetxt(flattened_img, npaFlattenedImages, delimiter=' ', fmt='%1.18e')
elif fmt == "xml":
with open(classifications, 'w') as classifications_file:
classifications_file.write('<?xml version="1.0"?>\n')
classifications_file.write('<opencv_storage>\n')
classifications_file.write('<classifications type_id="opencv-matrix">\n')
classifications_file.write(' <rows>'+str(nImages)+'</rows>\n')
classifications_file.write(' <cols>1</cols>\n')
classifications_file.write(' <dt>i</dt>\n')
classifications_file.write(' <data>\n')
i = 0
for t in npaClassifications:
if i % 24 == 0:
classifications_file.write(" %d " % t)
elif i % 24 == 23:
classifications_file.write("%d " % t + '\n')
elif i == nImages - 1:
classifications_file.write("%d" % t + '\n')
else:
classifications_file.write("%d " % t)
i += 1
classifications_file.write(' </data>\n')
classifications_file.write('</classifications>\n')
classifications_file.write('</opencv_storage>\n')
with open(flattened_img, 'w') as images_file:
images_file.write('<?xml version="1.0"?>\n')
images_file.write('<opencv_storage>\n')
images_file.write('<images type_id="opencv-matrix">\n')
images_file.write(' <rows>' + str(nImages) + '</rows>\n')
images_file.write(' <cols>' + str(N) + '</cols>\n')
images_file.write(' <dt>f</dt>\n')
images_file.write(' <data>\n')
i = 0
for img in npaFlattenedImages:
for t in img:
if i % 14 == 0:
images_file.write(" %d. " % t)
elif i % 14 == 13:
images_file.write("%d. " % t + '\n')
elif i == nImages - 1:
images_file.write("%d." % t + '\n')
else:
images_file.write("%d. " % t)
i += 1
images_file.write(' </data>\n')
images_file.write('</images>\n')
images_file.write('</opencv_storage>\n')
else:
print("ERROR: UnSupported format %s" % fmt)
# ---------------------------------------------------------------------------------------------------------------
def unpack(flattened_images, classifications, images_folder, flattened_size, fmt):
""" Unpack flattened images and classifications pair into standard images format """
if fmt == "xml":
print("ERROR: UnSupported format %s" % format)
return
# Read in training classifications:
try:
npaClassifications = loadtxt(classifications, float32)
except IOError:
print("ERROR: Unable to open %s, exiting program" % classifications)
system("pause")
return
# Read in training images:
try:
npaFlattenedImages = loadtxt(flattened_images, float32)
except IOError:
print("ERROR: Unable to open %s, exiting program" % flattened_images)
system("pause")
return
# Create images folder:
if not path.exists(images_folder):
mkdir(images_folder, 0777)
chdir(images_folder)
# Convert:
counters = {}
for kClass in range(npaClassifications.size):
className = npaClassifications[kClass]
classNameStr = str(chr(int(className)))
if className in counters:
counters[className] += 1
else:
counters[className] = 0
image = npaFlattenedImages[kClass].reshape(flattened_size[1],flattened_size[0])
imwrite(classNameStr + "_" + str(counters[className]) + ".png", image)
# ---------------------------------------------------------------------------------------------------------------
def usage():
""" Usage printout """
script_name = path.basename(__file__)
print 'Usage examples:'
print '(1) %s --flattened_images=./flattened_images.txt --classifications=./classifications.txt --images_folder=./images --mode=pack' % script_name
print '(2) %s --flattened_images=./flattened_images.txt --classifications=./classifications.txt --images_folder=./images --mode=unpack' % script_name
print ''
print 'Optional flags: --flattened_size=(width,height) '
print ' --debug'
print ''
# ---------------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
main(argv[1:])
print 'Done!'
|
|
#!/usr/bin/env python
#===============================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
"""
Command line interface to test db server utilities.
"""
import argparse
import os
import sys
import logging
import re
import dbutil
from EOtools.execute import execute
#
# Temporary test database pattern
#
# This is the regular expression used to identify a test database.
#
# The current pattern looks for a name containing 'test' and ending
# in an underscore followed by a 9 digit number.
#
TESTDB_PATTERN = r".*test.*_\d{9}$"
#
# Default database file
#
# This is the path to the empty hypercube dump used as a base for newly
# created databases.
#
DEFAULT_DBFILE = os.path.join(dbutil.TEST_RESOURCES_ROOT,
'databases/hypercube_empty.sql')
#
# Temporary directory.
#
# This is used for a temporary copy of the config file.
#
#TEMP_DIR = dbutil.temp_directory()
TEMP_DIR = './temp'
#
# Set up logging
#
logging.basicConfig(level=logging.DEBUG, stream=sys.stderr)
LOGGER = logging.getLogger()
#
# Argument parser setup functions
#
def command_line_parser():
"""Return the top level parser."""
description = "Run utility commands on the test database server."
parser = argparse.ArgumentParser(description=description)
subparser_factory = parser.add_subparsers(title="subcommands")
add_create_subcommand(subparser_factory)
add_save_subcommand(subparser_factory)
add_drop_subcommand(subparser_factory)
add_list_subcommand(subparser_factory)
add_cleanup_subcommand(subparser_factory)
# add_dbupdate_subcommand(subparser_factory)
return parser
def add_create_subcommand(subparser_factory):
"""Add a subparser for the create subcommand."""
create_help = "Create and load a database from an sql dump file."
subparser = subparser_factory.add_parser('create', help=create_help,
description=create_help)
dbname_help = "The name of the database to be created."
subparser.add_argument('dbname', help=dbname_help)
dbfile_help = ("An sql database dump to be loaded into the new " +
"database. If not given, an empty hypercube database " +
"will be loaded.")
subparser.add_argument('dbfile', help=dbfile_help, nargs='?',
default=DEFAULT_DBFILE)
subparser.set_defaults(subcommand=run_create_subcommand)
def add_save_subcommand(subparser_factory):
"""Add a subparser for the save subcommand."""
save_help = "Save a database to an sql dump file."
subparser = subparser_factory.add_parser('save', help=save_help,
description=save_help)
dbname_help = "The name of the database to be saved."
subparser.add_argument('dbname', help=dbname_help)
dbfile_help = "The sql dump file to save to."
subparser.add_argument('dbfile', help=dbfile_help)
subparser.set_defaults(subcommand=run_save_subcommand)
def add_drop_subcommand(subparser_factory):
"""Add a subparser for the drop subcommand."""
drop_help = "Drop a database from the test server."
subparser = subparser_factory.add_parser('drop', help=drop_help,
description=drop_help)
dbname_help = "The name of the database to drop."
subparser.add_argument('dbname', help=dbname_help)
subparser.set_defaults(subcommand=run_drop_subcommand)
def add_list_subcommand(subparser_factory):
"""Add a subparser for the list subcommand."""
list_help = "List the databases on the test server."
subparser = subparser_factory.add_parser('list', help=list_help,
description=list_help)
subparser.set_defaults(subcommand=run_list_subcommand)
def add_cleanup_subcommand(subparser_factory):
"""Add a subparser for the cleanup subcommand."""
cleanup_help = "Drop all temporary test databases."
description = (cleanup_help + " Note that running " +
"this command may cause tests currently running to fail.")
subparser = subparser_factory.add_parser('cleanup', help=cleanup_help,
description=description)
subparser.set_defaults(subcommand=run_cleanup_subcommand)
def add_dbupdate_subcommand(subparser_factory):
"""Add a subparser for the dbupdate subcommand."""
dbupdate_help = "Run dbupdater.py to catalog a dataset or datasets."
description = (dbupdate_help + " This will create an acquisition_record " +
"and a dataset_record if they do not already exist.")
subparser = subparser_factory.add_parser('dbupdate', help=dbupdate_help,
description=description)
dbname_help = "The name of the database to update."
subparser.add_argument('dbname', help=dbname_help)
source_dir_help = "The source directory for the datasets."
subparser.add_argument('source_dir', help=source_dir_help)
subparser.set_defaults(subcommand=run_dbupdate_subcommand)
#
# Subcommand functions
#
def run_create_subcommand(args):
"""Run the create subcommand."""
LOGGER.debug("Running create subcommand:")
LOGGER.debug(" dbname = %s", args.dbname)
LOGGER.debug(" dbfile = %s", args.dbfile)
dbutil.TESTSERVER.create(args.dbname, "", args.dbfile)
def run_save_subcommand(args):
"""Run the save subcommand."""
LOGGER.debug("Running save subcommand:")
LOGGER.debug(" dbname = %s", args.dbname)
LOGGER.debug(" dbfile = %s", args.dbfile)
dbutil.TESTSERVER.save(args.dbname, "", args.dbfile)
def run_drop_subcommand(args):
"""Run the drop subcommand."""
LOGGER.debug("Running drop subcommand:")
LOGGER.debug(" dbname = %s", args.dbname)
dbutil.TESTSERVER.drop(args.dbname)
def run_list_subcommand(dummy_args):
"""Run the list subcommand."""
LOGGER.debug("Running list subcommand:")
dblist = dbutil.TESTSERVER.dblist()
for dbname in sorted(dblist):
print dbname
def run_cleanup_subcommand(dummy_args):
"""Run the cleanup subcommand."""
LOGGER.debug("Running cleanup subcommand:")
dblist = dbutil.TESTSERVER.dblist()
test_dblist = [db for db in dblist if re.match(TESTDB_PATTERN, db)]
print "Dropping temporary test databases:"
if test_dblist:
for dbname in test_dblist:
print " %s" % dbname
dbutil.TESTSERVER.drop(dbname)
else:
print " nothing to do."
def run_dbupdate_subcommand(args):
"""Run the dbupdate subcommand."""
raise NotImplementedError
# def run_dbupdate_subcommand(args):
# """Run the dbupdate subcommand."""
# LOGGER.debug("Running dbupdate subcommand:")
# LOGGER.debug(" dbname = %s", args.dbname)
# LOGGER.debug(" source_dir = %s", args.source_dir)
# config_file_name = dbutil.random_name("test_datacube") + ".conf"
# config_file_path = dbutil.make_config_file(args.dbname, TEMP_DIR,
# config_file_name)
# dbupdater_cmd = ["python",
# "dbupdater.py",
# "--debug",
# "--config=%s" % config_file_path,
# "--source=%s" % args.source_dir,
# "--removedblist",
# "--followsymlinks"]
# result = execute(dbupater_cmd, shell=False)
#
# Main program
#
if __name__ == '__main__':
ARGS = command_line_parser().parse_args()
ARGS.subcommand(ARGS)
|
|
""" List of all possible 'tasks', i.e. ci configurations.
WARNING : this is siconos specific!!
A task, see :class:`machinery.ci_tasks.CiTask` must be defined with at least:
* a name (ci_config)
* a distribution (name:version)
* a list of dependencies (pkgs)
"""
from machinery.ci_task import CiTask
import os
# not generic, to be moved somewhere else
#
class SiconosCiTask(CiTask):
def __init__(self, *args, **kwargs):
return super(SiconosCiTask, self).__init__(*args, **kwargs)
def template_maker(self):
unwanted_for_sitename = [
'build-base', 'gfortran', 'gnu-c++', 'lpsolve', 'wget', 'xz',
'asan', 'cppunit_clang', 'python-env', 'profiling', 'path',
'h5py3']
return '-'.join([p.replace('+', 'x')
for p in self._pkgs if p not in
unwanted_for_sitename])
# PLEASE KEEP CONFIGS AS WHAT THEY MEAN.
# DO NOT ADD PACKAGES IF THEY ARE NOT NECESSARY.
#
# 1. where the packages configurations are defined
# Used in driver.py.
database = os.path.join('config', 'siconos.yml')
empty = SiconosCiTask()
base = empty.copy()(
ci_config='default',
pkgs=['build-base', 'gcc', 'gfortran', 'gnu-c++', 'atlas-lapack',
'python-env'],
srcs=['.'],
targets={'.': ['all', 'test']})
#
# 2. the default task
#
default = SiconosCiTask(
docker=True,
ci_config='default',
distrib='ubuntu:16.04',
pkgs=['build-base', 'gcc', 'gfortran', 'gnu-c++', 'atlas-lapack',
'python-env'],
srcs=['.'],
targets={'.': ['docker-build', 'docker-ctest', 'docker-submit']})
minimal = SiconosCiTask(
docker=True,
ci_config='minimal',
distrib='ubuntu:16.10',
pkgs=['build-base', 'gcc', 'gfortran', 'gnu-c++',
'atlas-lapack', 'python-minimal'],
srcs=['.'],
targets={'.': ['docker-build', 'docker-ctest', 'docker-submit']})
minimal_with_python = SiconosCiTask(
docker=True,
ci_config='minimal_with_python',
distrib='ubuntu:16.10',
pkgs=['build-base', 'gcc', 'gfortran', 'gnu-c++',
'atlas-lapack', 'python-env'],
srcs=['.'],
targets={'.': ['docker-build', 'docker-ctest', 'docker-submit']})
#
# 3. all the tasks
#
siconos_default = default
print (default.template_maker())
siconos_default_nix = default.copy()(
ci_config='nix',
distrib='nixos/nix:latest')
siconos_debian_latest = siconos_default.copy()(
distrib='debian:latest')
siconos_ubuntu_14_04 = siconos_default.copy()(
distrib='ubuntu:14.04')
siconos_ubuntu_15_04 = siconos_default.copy()(
distrib='ubuntu:15.04')
siconos_ubuntu_15_10 = siconos_default.copy()(
distrib='ubuntu:15.10')
siconos_ubuntu_16_10 = siconos_default.copy()(
distrib='ubuntu:16.10')
siconos_ubuntu_17_10 = siconos_default.copy()(
distrib='ubuntu:17.10')
siconos_fedora_latest = siconos_default.copy()(
distrib='fedora:latest')
siconos_cxx_11_ubuntu_17_10 = siconos_default.copy()(
distrib='ubuntu:17.10',
ci_config='with_cxx11')
siconos_gazebo = siconos_default.copy()(
distrib='nvidia/opengl:1.0-glvnd-devel-ubuntu16.04',
ci_config=('with_cxx11', 'with_bullet', 'with_py3'),
add_pkgs=['bullet','gazebo'],
targets={'.': ['docker-build', 'docker-cmake', 'docker-make',
'docker-make-install', 'docker-cmd']})
siconos_with_lpsolve = siconos_default.copy()(
add_pkgs=['lpsolve'])
import os
from os.path import expanduser
home = expanduser("~")
siconos_documentation = siconos_default.copy()(
distrib='ubuntu:16.10',
ci_config='with_documentation',
add_pkgs=['documentation'],
add_directories=[os.path.join(home, '.ssh:/root/.ssh')],
targets={'.': ['docker-build', 'docker-cmake', 'docker-make',
'docker-make-install',
'docker-make-doc', 'docker-make-upload']})
siconos_ubuntu_15_10_with_mechanisms = siconos_default.copy()(
ci_config='with_mechanisms_conda_version',
add_pkgs=['pythonocc-conda', 'wget', 'bash', 'bzip2',
'pythonocc-conda-dep'],
cmake_cmd='Build/ci-scripts/conda.sh',
distrib='debian:stretch')
siconos_debian_mechanisms = siconos_default.copy()(
ci_config='with_mechanisms',
add_pkgs=['wget', 'bash', 'h5py', 'oce-pythonocc-deps',
'oce-pythonocc'],
distrib='debian:latest')
siconos_ubuntu_latest_mechanisms = siconos_default.copy()(
ci_config='with_mechanisms',
add_pkgs=['wget', 'bash', 'h5py', 'oce-pythonocc-deps',
'oce-pythonocc'],
distrib='ubuntu:latest')
siconos_numerics_only = siconos_ubuntu_17_10.copy()(
ci_config='no_cxx',
remove_pkgs=['gnu-c++'])
siconos_profiling = siconos_ubuntu_17_10.copy()(
build_configuration='Profiling',
add_pkgs=['profiling'])
# note fedora/atlas-lapack in siconos.yml -> cmake does not detect blas
siconos_fedora_latest_with_umfpack = siconos_default.copy()(
distrib='fedora:latest',
ci_config=('with_umfpack',),
remove_pkgs=['atlas-lapack', 'python-env'],
add_pkgs=['openblas-lapacke', 'python3-env', 'umfpack'])
siconos_openblas_lapacke = siconos_default.copy()(
remove_pkgs=['atlas-lapack'],
add_pkgs=['openblas-lapacke'])
siconos_clang = siconos_ubuntu_17_10.copy()(
ci_config=('with_bullet', 'with_py3'),
remove_pkgs=['python-env'],
add_pkgs=['clang-3.9', 'bullet', 'cppunit_clang-3.9', 'wget', 'xz', 'python3-env', 'path', 'h5py3']) # h5py-3 for mechanics.io
siconos_clang_asan = siconos_clang.copy()(
ci_config=('with_asan_clang', 'with_mumps', 'with_hdf5', 'with_serialization', 'with_py3'),
add_pkgs=['mumps', 'hdf5', 'serialization'],
build_configuration='Debug',)
# <clang-3.7.1 does not support linux 4.2
# This will likely hurt you
siconos_clang_msan = siconos_default.copy()(
distrib='debian:jessie',
ci_config='with_msan',
build_configuration='Debug',
add_pkgs=['clang-3.8', 'libcxx_msan', 'wget', 'xz', 'path'])
siconos_clang_cfi = siconos_default.copy()(
distrib='debian:jessie',
ci_config='with_cfi',
build_configuration='Debug',
add_pkgs=['mumps', 'hdf5', 'cfi'])
siconos_gcc_asan = siconos_fedora_latest.copy()(
ci_config=('with_asan', 'with_mumps', 'with_hdf5', 'with_serialization'),
# cmake_cmd='Build/ci-scripts/fedora-mpi.sh',
add_pkgs=['mumps', 'hdf5', 'asan', 'serialization', 'path', 'wget'], # wget for path
build_configuration='Debug')
siconos_gcc_asan_latest = siconos_fedora_latest.copy()(
ci_config=('with_asan', 'with_mumps', 'with_hdf5', 'with_serialization'),
distrib='fedora:rawhide',
# cmake_cmd='Build/ci-scripts/fedora-mpi.sh',
add_pkgs=['mumps', 'hdf5', 'asan', 'serialization', 'path', 'wget'], # wget for path
build_configuration='Debug',
fast=False)
# There is a bug in boost 1.58 distributed with Xenial (Ubuntu LTS 16.04).
# As long as it is not patched, we have to build on a newer ubuntu
siconos_serialization = siconos_ubuntu_17_10.copy()(
ci_config='with_serialization',
add_pkgs=['serialization'])
siconos_with_mumps = siconos_default.copy()(
ci_config='with_mumps',
add_pkgs=['mumps'])
siconos_with_umfpack = siconos_default.copy()(
ci_config='with_umfpack',
add_pkgs=['umfpack'])
siconos_dev_mode_strict = siconos_default.copy()(
ci_config='with_dev_mode_strict')
siconos_frama_c = siconos_default.copy()(
ci_config='with_frama_c',
add_pkgs=['opam', 'frama-c', 'libgtksourceview2.0-dev',
'libgnomecanvas2-dev', 'aspcud', 'm4',
'unzip', 'coq', 'ocaml', 'z3'])
#
# 4. dispatch based on hostname and distrib type (to min. disk requirement)
#
known_tasks = {'siconos---vm0':
(siconos_gcc_asan,
siconos_serialization,
siconos_profiling,
siconos_gcc_asan_latest,
siconos_debian_mechanisms),
'siconos---vm1':
(minimal,
minimal_with_python,
siconos_documentation,
siconos_dev_mode_strict,
siconos_clang,
siconos_clang_asan),
'siconos---vm2':
(siconos_clang_msan,
siconos_ubuntu_15_10_with_mechanisms),
'siconos---vm4':
(siconos_numerics_only)}
|
|
# This file is part of the Indico plugins.
# Copyright (C) 2002 - 2022 CERN
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
import re
import sys
import threading
from collections import defaultdict
from functools import wraps
from flask import current_app
from flask.globals import _app_ctx_stack
from indico.core.db import db
from indico.core.db.sqlalchemy.principals import PrincipalMixin, PrincipalPermissionsMixin, PrincipalType
from indico.modules.groups import GroupProxy
from indico.util.caching import memoize_redis
def parallelize(func, entries, batch_size=200):
@wraps(func)
def wrapper(*args, **kwargs):
iterable_lock = threading.Lock()
result_lock = threading.Lock()
abort = threading.Event()
finished = threading.Event()
results = []
app = current_app._get_current_object()
main_app_context = _app_ctx_stack.top
worker_exc_info = None
def worker(iterator):
nonlocal worker_exc_info
while not abort.is_set() and not finished.is_set():
try:
with iterable_lock:
with main_app_context:
item = next(iterator)
except StopIteration:
finished.set()
break
with app.app_context():
try:
res = func(item, *args, **kwargs)
except BaseException:
worker_exc_info = sys.exc_info()
finished.set()
return
with result_lock:
results.append(res)
it = iter(entries)
threads = [threading.Thread(target=worker, name=f'worker/{i}', args=(it,))
for i in enumerate(range(batch_size))]
for t in threads:
t.start()
try:
finished.wait()
except KeyboardInterrupt:
print('\nFinishing pending jobs before aborting')
abort.set()
for t in threads:
t.join()
if worker_exc_info:
raise worker_exc_info[1].with_traceback(worker_exc_info[2])
return results, abort.is_set()
return wrapper
def format_query(query, placeholders):
"""Format and split the query into keywords and placeholders.
https://cern-search.docs.cern.ch/usage/operations/#advanced-queries
:param query: search query
:param placeholders: placeholder whitelist
:return: escaped query
"""
patt = r'(?:^|\s)({}):([^:"\s]+|"[^"]+")(?:$|\s)'.format('|'.join(map(re.escape, placeholders)))
idx = 0
keys = []
for match in re.finditer(patt, query):
placeholder = f'{placeholders[match.group(1)]}:{escape(match.group(2))}'
if idx != match.start():
keys.append(escape(query[idx:match.start()]))
keys.append(placeholder)
idx = match.end()
if idx != len(query):
keys.append(escape(query[idx:len(query)]))
return ' '.join(keys).strip()
def format_filters(params, filters, range_filters):
"""Extract any special placeholder filter, such as ranges, from the query params.
https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html#_ranges
:param params: The filter query params
:param filters: The filter whitelist
:param range_filters: The range filter whitelist
:return: filters, extracted placeholders
"""
_filters = {}
query = []
for k, v in params.items():
if k not in filters:
continue
if k in range_filters:
match = re.match(r'[[{].+ TO .+[]}]', v)
if match:
query.append(f'+{range_filters[k]}:{v}')
continue
_filters[k] = v
return _filters, ' '.join(query)
def escape(query):
"""Prepend all special ElasticSearch characters with a backslash."""
patt = r'([+\-=><!(){}[\]\^~?:\\\/]|&&|\|\|)'
return re.sub(patt, r'\\\1', query)
def remove_none_entries(obj):
"""Remove dict entries that are ``None``.
This is cascaded in case of nested dicts/collections.
"""
if isinstance(obj, dict):
return {k: remove_none_entries(v) for k, v in obj.items() if v is not None}
elif isinstance(obj, (list, tuple, set)):
return type(obj)(map(remove_none_entries, obj))
return obj
def format_aggregations(aggregations, filters):
"""Format aggregations into a bucket dictionary.
Besides transforming each aggregation, ensures each bucket key
contains the most common result as defined from Elastic Search.
:param aggregations: The raw aggregation object
:param filters: The filter whitelist
:return: key: {label, buckets: [{key, count}]}
"""
return {
key: {
'label': str(filters[key]),
'buckets': [{
'key': bucket['most_common']['buckets'][0]['key'] if 'most_common' in bucket else bucket['key'],
'count': bucket['doc_count'],
**{k: v for k, v in bucket.items() if k in ('from_as_string', 'to_as_string')}
} for bucket in value['buckets']]
}
for key, value in _flatten(aggregations)
if key in filters
}
def _flatten(obj, target_key='buckets', parent_key=''):
if not isinstance(obj, dict):
return
if target_key in obj:
yield parent_key, obj
for key, value in obj.items():
yield from _flatten(value, target_key, f'{parent_key}_{key}' if parent_key else key)
@memoize_redis(86400)
def _get_alternative_group_names():
"""Get non-lowercase versions of group names."""
classes = [sc for sc in [*PrincipalMixin.__subclasses__(), *PrincipalPermissionsMixin.__subclasses__()]
if hasattr(sc, 'query')]
alternatives = defaultdict(set)
for cls in classes:
res = (db.session.query(cls.multipass_group_provider, cls.multipass_group_name)
.distinct()
.filter(cls.type == PrincipalType.multipass_group,
cls.multipass_group_name != db.func.lower(cls.multipass_group_name))
.all())
for provider, name in res:
alternatives[(provider, name.lower())].add(name)
return dict(alternatives)
def _include_capitalized_groups(groups):
alternatives = _get_alternative_group_names()
for group in groups:
yield group.identifier
for alt_name in alternatives.get((group.provider, group.name.lower()), ()):
yield GroupProxy(alt_name, group.provider).identifier
@memoize_redis(3600)
def get_user_access(user, admin_override_enabled=False):
if not user:
return []
if admin_override_enabled and user.is_admin:
return ['IndicoAdmin']
access = [user.identifier] + [u.identifier for u in user.get_merged_from_users_recursive()]
access += [GroupProxy(x.id, _group=x).identifier for x in user.local_groups]
if user.can_get_all_multipass_groups:
multipass_groups = [GroupProxy(x.name, x.provider.name, x)
for x in user.iter_all_multipass_groups()]
access += _include_capitalized_groups(multipass_groups)
return access
|
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from collections import defaultdict
from twitter.common.collections import OrderedSet
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.backend.jvm.targets.unpacked_jars import UnpackedJars
from pants.backend.jvm.tasks.classpath_util import ClasspathUtil
from pants.build_graph.aliased_target import AliasTarget
from pants.build_graph.build_graph import sort_targets
from pants.build_graph.resources import Resources
from pants.build_graph.target import Target
from pants.build_graph.target_scopes import Scopes
from pants.java.distribution.distribution import DistributionLocator
from pants.util.contextutil import open_zip
from pants.util.memo import memoized_method, memoized_property
class JvmDependencyAnalyzer(object):
"""Helper class for tasks which need to analyze source dependencies.
Primary purpose is to provide a classfile --> target mapping, which subclasses can use in
determining which targets correspond to the actual source dependencies of any given target.
"""
def __init__(self, buildroot, runtime_classpath, product_deps_by_src):
self.buildroot = buildroot
self.runtime_classpath = runtime_classpath
self.product_deps_by_src = product_deps_by_src
@memoized_method
def files_for_target(self, target):
"""Yields a sequence of abs path of source, class or jar files provided by the target.
The runtime classpath for a target must already have been finalized for a target in order
to compute its provided files.
"""
def gen():
# Compute src -> target.
if isinstance(target, JvmTarget):
for src in target.sources_relative_to_buildroot():
yield os.path.join(self.buildroot, src)
# TODO(Tejal Desai): pantsbuild/pants/65: Remove java_sources attribute for ScalaLibrary
if isinstance(target, ScalaLibrary):
for java_source in target.java_sources:
for src in java_source.sources_relative_to_buildroot():
yield os.path.join(self.buildroot, src)
# Compute classfile -> target and jar -> target.
files = ClasspathUtil.classpath_contents((target,), self.runtime_classpath)
# And jars; for binary deps, zinc doesn't emit precise deps (yet).
cp_entries = ClasspathUtil.classpath((target,), self.runtime_classpath)
jars = [cpe for cpe in cp_entries if ClasspathUtil.is_jar(cpe)]
for coll in [files, jars]:
for f in coll:
yield f
return set(gen())
def targets_by_file(self, targets):
"""Returns a map from abs path of source, class or jar file to an OrderedSet of targets.
The value is usually a singleton, because a source or class file belongs to a single target.
However a single jar may be provided (transitively or intransitively) by multiple JarLibrary
targets. But if there is a JarLibrary target that depends on a jar directly, then that
"canonical" target will be the first one in the list of targets.
"""
targets_by_file = defaultdict(OrderedSet)
for target in targets:
for f in self.files_for_target(target):
targets_by_file[f].add(target)
return targets_by_file
def _jar_classfiles(self, jar_file):
"""Returns an iterator over the classfiles inside jar_file."""
with open_zip(jar_file, 'r') as jar:
for cls in jar.namelist():
if cls.endswith(b'.class'):
yield cls
def count_products(self, target):
contents = ClasspathUtil.classpath_contents((target,), self.runtime_classpath)
# Generators don't implement len.
return sum(1 for _ in contents)
@memoized_property
def bootstrap_jar_classfiles(self):
"""Returns a set of classfiles from the JVM bootstrap jars."""
bootstrap_jar_classfiles = set()
for jar_file in self._find_all_bootstrap_jars():
for cls in self._jar_classfiles(jar_file):
bootstrap_jar_classfiles.add(cls)
return bootstrap_jar_classfiles
def _find_all_bootstrap_jars(self):
def get_path(key):
return DistributionLocator.cached().system_properties.get(key, '').split(':')
def find_jars_in_dirs(dirs):
ret = []
for d in dirs:
if os.path.isdir(d):
ret.extend(filter(lambda s: s.endswith('.jar'), os.listdir(d)))
return ret
# Note: assumes HotSpot, or some JVM that supports sun.boot.class.path.
# TODO: Support other JVMs? Not clear if there's a standard way to do so.
# May include loose classes dirs.
boot_classpath = get_path('sun.boot.class.path')
# Note that per the specs, overrides and extensions must be in jars.
# Loose class files will not be found by the JVM.
override_jars = find_jars_in_dirs(get_path('java.endorsed.dirs'))
extension_jars = find_jars_in_dirs(get_path('java.ext.dirs'))
# Note that this order matters: it reflects the classloading order.
bootstrap_jars = filter(os.path.isfile, override_jars + boot_classpath + extension_jars)
return bootstrap_jars # Technically, may include loose class dirs from boot_classpath.
def compute_transitive_deps_by_target(self, targets):
"""Map from target to all the targets it depends on, transitively."""
# Sort from least to most dependent.
sorted_targets = reversed(sort_targets(targets))
transitive_deps_by_target = defaultdict(set)
# Iterate in dep order, to accumulate the transitive deps for each target.
for target in sorted_targets:
transitive_deps = set()
for dep in target.dependencies:
transitive_deps.update(transitive_deps_by_target.get(dep, []))
transitive_deps.add(dep)
# Need to handle the case where a java_sources target has dependencies.
# In particular if it depends back on the original target.
if hasattr(target, 'java_sources'):
for java_source_target in target.java_sources:
for transitive_dep in java_source_target.dependencies:
transitive_deps_by_target[java_source_target].add(transitive_dep)
transitive_deps_by_target[target] = transitive_deps
return transitive_deps_by_target
def resolve_aliases(self, target, scope=None):
"""Resolve aliases in the direct dependencies of the target.
:param target: The direct dependencies of this target are included.
:param scope: When specified, only deps with this scope are included. This is more
than a filter, because it prunes the subgraphs represented by aliases with
un-matched scopes.
:returns: An iterator of (resolved_dependency, resolved_from) tuples.
`resolved_from` is the top level target alias that depends on `resolved_dependency`,
and `None` if `resolved_dependency` is not a dependency of a target alias.
"""
for declared in target.dependencies:
if scope is not None and declared.scope != scope:
# Only `DEFAULT` scoped deps are eligible for the unused dep check.
continue
elif type(declared) in (AliasTarget, Target):
# Is an alias. Recurse to expand.
for r, _ in self.resolve_aliases(declared, scope=scope):
yield r, declared
else:
yield declared, None
def compute_unused_deps(self, target):
"""Computes unused deps for the given Target.
:returns: A set of directly declared but unused targets, and a set of suggested replacements.
"""
# Flatten the product deps of this target.
product_deps = set()
for dep_entries in self.product_deps_by_src.get(target, {}).values():
product_deps.update(dep_entries)
# Determine which of the DEFAULT deps in the declared set of this target were used.
used = set()
unused = set()
for dep, _ in self.resolve_aliases(target, scope=Scopes.DEFAULT):
if dep in used or dep in unused:
continue
# TODO: What's a better way to accomplish this check? Filtering by `has_sources` would
# incorrectly skip "empty" `*_library` targets, which could then be used as a loophole.
if isinstance(dep, (Resources, UnpackedJars)):
continue
# If any of the target's jars or classfiles were used, consider it used.
if product_deps.isdisjoint(self.files_for_target(dep)):
unused.add(dep)
else:
used.add(dep)
# If there were no unused deps, break.
if not unused:
return {}
# For any deps that were used, count their derived-from targets used as well.
# TODO: Refactor to do some of this above once tests are in place.
for dep in list(used):
for derived_from in dep.derived_from_chain:
if derived_from in unused:
unused.remove(derived_from)
used.add(derived_from)
# Prune derived targets that would be in the set twice.
for dep in list(unused):
if set(dep.derived_from_chain) & unused:
unused.remove(dep)
if not unused:
return {}
# For any deps that were not used, determine whether their transitive deps were used, and
# recommend those as replacements.
replacements = {}
for dep in unused:
replacements[dep] = set()
for t in dep.closure():
if t in used or t in unused:
continue
if not product_deps.isdisjoint(self.files_for_target(t)):
replacements[dep].add(t.concrete_derived_from)
return replacements
|
|
#!/usr/bin/env python
#
# Copyright (c) 2011, Andres Moreira <andres@andresmoreira.com>
# 2011, Felipe Cruz <felipecruz@loogica.net>
# 2012, JT Olds <jt@spacemonkey.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the authors nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL ANDRES MOREIRA BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""python-snappy
Python library for the snappy compression library from Google.
Expected usage like:
import snappy
compressed = snappy.compress("some data")
assert "some data" == snappy.uncompress(compressed)
"""
import sys
import struct
try:
from _snappy import UncompressError, compress, decompress, \
isValidCompressed, uncompress, _crc32c
except ImportError:
from snappy_cffi import UncompressError, compress, decompress, \
isValidCompressed, uncompress, _crc32c
_CHUNK_MAX = 65536
_STREAM_TO_STREAM_BLOCK_SIZE = _CHUNK_MAX
_STREAM_IDENTIFIER = "sNaPpY"
_COMPRESSED_CHUNK = 0x00
_UNCOMPRESSED_CHUNK = 0x01
_IDENTIFIER_CHUNK = 0xff
_RESERVED_UNSKIPPABLE = (0x02, 0x80) # chunk ranges are [inclusive, exclusive)
_RESERVED_SKIPPABLE = (0x80, 0xff)
# the minimum percent of bytes compression must save to be enabled in automatic
# mode
_COMPRESSION_THRESHOLD = .125
def _masked_crc32c(data):
# see the framing format specification
crc = _crc32c(data)
return (((crc >> 15) | (crc << 17)) + 0xa282ead8) & 0xffffffff
_compress = compress
_uncompress = uncompress
py3k = False
if sys.hexversion > 0x03000000:
unicode = str
py3k = True
def compress(data, encoding='utf-8'):
if isinstance(data, unicode):
data = data.encode(encoding)
return _compress(data)
def uncompress(data, decoding=None):
if isinstance(data, unicode):
raise UncompressError("It's only possible to uncompress bytes")
if decoding:
return _uncompress(data).decode(decoding)
return _uncompress(data)
decompress = uncompress
class StreamCompressor(object):
"""This class implements the compressor-side of the proposed Snappy framing
format, found at
http://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
?spec=svn68&r=71
This class matches the interface found for the zlib module's compression
objects (see zlib.compressobj), but also provides some additions, such as
the snappy framing format's ability to intersperse uncompressed data.
Keep in mind that this compressor object does no buffering for you to
appropriately size chunks. Every call to StreamCompressor.compress results
in a unique call to the underlying snappy compression method.
"""
__slots__ = ["_header_chunk_written"]
def __init__(self):
self._header_chunk_written = False
def add_chunk(self, data, compress=None):
"""Add a chunk containing 'data', returning a string that is framed and
(optionally, default) compressed. This data should be concatenated to
the tail end of an existing Snappy stream. In the absence of any
internal buffering, no data is left in any internal buffers, and so
unlike zlib.compress, this method returns everything.
If compress is None, compression is determined automatically based on
snappy's performance. If compress == True, compression always happens,
and if compress == False, compression never happens.
"""
if not self._header_chunk_written:
self._header_chunk_written = True
out = [struct.pack("<L", _IDENTIFIER_CHUNK +
(len(_STREAM_IDENTIFIER) << 8)),
_STREAM_IDENTIFIER]
else:
out = []
for i in range(0, len(data), _CHUNK_MAX):
chunk = data[i:i + _CHUNK_MAX]
crc = _masked_crc32c(chunk)
if compress is None:
compressed_chunk = _compress(chunk)
if (len(compressed_chunk) <=
(1 - _COMPRESSION_THRESHOLD) * len(chunk)):
chunk = compressed_chunk
chunk_type = _COMPRESSED_CHUNK
else:
chunk_type = _UNCOMPRESSED_CHUNK
compressed_chunk = None
elif compress:
chunk = _compress(chunk)
chunk_type = _COMPRESSED_CHUNK
else:
chunk_type = _UNCOMPRESSED_CHUNK
out.append(struct.pack("<LL", chunk_type + ((len(chunk) + 4) << 8),
crc))
out.append(chunk)
return "".join(out)
def compress(self, data):
"""This method is simply an alias for compatibility with zlib
compressobj's compress method.
"""
return self.add_chunk(data)
def flush(self, mode=None):
"""This method does nothing and only exists for compatibility with
the zlib compressobj
"""
pass
def copy(self):
"""This method exists for compatibility with the zlib compressobj.
"""
copy = StreamCompressor()
copy._header_chunk_written = self._header_chunk_written
return copy
class StreamDecompressor(object):
"""This class implements the decompressor-side of the proposed Snappy
framing format, found at
http://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
?spec=svn68&r=71
This class matches a subset of the interface found for the zlib module's
decompression objects (see zlib.decompressobj). Specifically, it currently
implements the decompress method without the max_length option, the flush
method without the length option, and the copy method.
"""
__slots__ = ["_buf", "_header_found"]
def __init__(self):
self._buf = ""
self._header_found = False
def decompress(self, data):
"""Decompress 'data', returning a string containing the uncompressed
data corresponding to at least part of the data in string. This data
should be concatenated to the output produced by any preceding calls to
the decompress() method. Some of the input data may be preserved in
internal buffers for later processing.
"""
self._buf += data
uncompressed = []
while True:
if len(self._buf) < 4:
return "".join(uncompressed)
chunk_type = struct.unpack("<L", self._buf[:4])[0]
size = (chunk_type >> 8)
chunk_type &= 0xff
if not self._header_found:
if (chunk_type != _IDENTIFIER_CHUNK or
size != len(_STREAM_IDENTIFIER)):
raise UncompressError("stream missing snappy identifier")
self._header_found = True
if (_RESERVED_UNSKIPPABLE[0] <= chunk_type and
chunk_type < _RESERVED_UNSKIPPABLE[1]):
raise UncompressError(
"stream received unskippable but unknown chunk")
if len(self._buf) < 4 + size:
return "".join(uncompressed)
chunk, self._buf = self._buf[4:4 + size], self._buf[4 + size:]
if chunk_type == _IDENTIFIER_CHUNK:
if chunk != _STREAM_IDENTIFIER:
raise UncompressError(
"stream has invalid snappy identifier")
continue
if (_RESERVED_SKIPPABLE[0] <= chunk_type and
chunk_type < _RESERVED_SKIPPABLE[1]):
continue
assert chunk_type in (_COMPRESSED_CHUNK, _UNCOMPRESSED_CHUNK)
crc, chunk = chunk[:4], chunk[4:]
if chunk_type == _COMPRESSED_CHUNK:
chunk = _uncompress(chunk)
if struct.pack("<L", _masked_crc32c(chunk)) != crc:
raise UncompressError("crc mismatch")
uncompressed.append(chunk)
def flush(self):
"""All pending input is processed, and a string containing the
remaining uncompressed output is returned. After calling flush(), the
decompress() method cannot be called again; the only realistic action
is to delete the object.
"""
if self._buf != "":
raise UncompressError("chunk truncated")
return ""
def copy(self):
"""Returns a copy of the decompression object. This can be used to save
the state of the decompressor midway through the data stream in order
to speed up random seeks into the stream at a future point.
"""
copy = StreamDecompressor()
copy._buf, copy._header_found = self._buf, self._header_found
return copy
def stream_compress(src, dst, blocksize=_STREAM_TO_STREAM_BLOCK_SIZE):
"""Takes an incoming file-like object and an outgoing file-like object,
reads data from src, compresses it, and writes it to dst. 'src' should
support the read method, and 'dst' should support the write method.
The default blocksize is good for almost every scenario.
"""
compressor = StreamCompressor()
while True:
buf = src.read(blocksize)
if not buf: break
buf = compressor.add_chunk(buf)
if buf: dst.write(buf)
def stream_decompress(src, dst, blocksize=_STREAM_TO_STREAM_BLOCK_SIZE):
"""Takes an incoming file-like object and an outgoing file-like object,
reads data from src, decompresses it, and writes it to dst. 'src' should
support the read method, and 'dst' should support the write method.
The default blocksize is good for almost every scenario.
"""
decompressor = StreamDecompressor()
while True:
buf = src.read(blocksize)
if not buf: break
buf = decompressor.decompress(buf)
if buf: dst.write(buf)
decompressor.flush() # makes sure the stream ended well
def cmdline_main():
"""This method is what is run when invoking snappy via the commandline.
Try python -m snappy --help
"""
import sys
if (len(sys.argv) < 2 or len(sys.argv) > 4 or "--help" in sys.argv or
"-h" in sys.argv or sys.argv[1] not in ("-c", "-d")):
print("Usage: python -m snappy <-c/-d> [src [dst]]")
print(" -c compress")
print(" -d decompress")
print("output is stdout if dst is omitted or '-'")
print("input is stdin if src and dst are omitted or src is '-'.")
sys.exit(1)
if len(sys.argv) >= 4 and sys.argv[3] != "-":
dst = open(sys.argv[3], "wb")
elif hasattr(sys.stdout, 'buffer'):
dst = sys.stdout.buffer
else:
dst = sys.stdout
if len(sys.argv) >= 3 and sys.argv[2] != "-":
src = open(sys.argv[2], "rb")
elif hasattr(sys.stdin, "buffer"):
src = sys.stdin.buffer
else:
src = sys.stdin
if sys.argv[1] == "-c":
method = stream_compress
else:
method = stream_decompress
method(src, dst)
if __name__ == "__main__":
cmdline_main()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-import-not-at-top
"""Callbacks: utilities called at certain points during model training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.profiler import profiler_v2 as profiler
from tensorflow.python.summary import summary as tf_summary
from tensorflow.python.training import saver
from tensorflow.python.util.tf_export import keras_export
@keras_export(v1=['keras.callbacks.TensorBoard'])
class TensorBoard(callbacks.TensorBoard):
# pylint: disable=line-too-long
"""Enable visualizations for TensorBoard.
TensorBoard is a visualization tool provided with TensorFlow.
This callback logs events for TensorBoard, including:
* Metrics summary plots
* Training graph visualization
* Activation histograms
* Sampled profiling
If you have installed TensorFlow with pip, you should be able
to launch TensorBoard from the command line:
```sh
tensorboard --logdir=path_to_your_logs
```
You can find more information about TensorBoard
[here](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
Arguments:
log_dir: the path of the directory where to save the log files to be
parsed by TensorBoard.
histogram_freq: frequency (in epochs) at which to compute activation and
weight histograms for the layers of the model. If set to 0, histograms
won't be computed. Validation data (or split) must be specified for
histogram visualizations.
write_graph: whether to visualize the graph in TensorBoard. The log file
can become quite large when write_graph is set to True.
write_grads: whether to visualize gradient histograms in TensorBoard.
`histogram_freq` must be greater than 0.
batch_size: size of batch of inputs to feed to the network for histograms
computation.
write_images: whether to write model weights to visualize as image in
TensorBoard.
embeddings_freq: frequency (in epochs) at which selected embedding layers
will be saved. If set to 0, embeddings won't be computed. Data to be
visualized in TensorBoard's Embedding tab must be passed as
`embeddings_data`.
embeddings_layer_names: a list of names of layers to keep eye on. If None
or empty list all the embedding layer will be watched.
embeddings_metadata: a dictionary which maps layer name to a file name in
which metadata for this embedding layer is saved.
[Here are details](
https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
about metadata files format. In case if the same metadata file is
used for all embedding layers, string can be passed.
embeddings_data: data to be embedded at layers specified in
`embeddings_layer_names`. Numpy array (if the model has a single input)
or list of Numpy arrays (if the model has multiple inputs). Learn more
about embeddings [in this guide](
https://www.tensorflow.org/programmers_guide/embedding).
update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`,
writes the losses and metrics to TensorBoard after each batch. The same
applies for `'epoch'`. If using an integer, let's say `1000`, the
callback will write the metrics and losses to TensorBoard every 1000
samples. Note that writing too frequently to TensorBoard can slow down
your training.
profile_batch: Profile the batch to sample compute characteristics. By
default, it will profile the second batch. Set profile_batch=0 to
disable profiling.
Raises:
ValueError: If histogram_freq is set and no validation data is provided.
@compatibility(eager)
Using the `TensorBoard` callback will work when eager execution is enabled,
with the restriction that outputting histogram summaries of weights and
gradients is not supported. Consequently, `histogram_freq` will be ignored.
@end_compatibility
"""
# pylint: enable=line-too-long
def __init__(self,
log_dir='./logs',
histogram_freq=0,
batch_size=32,
write_graph=True,
write_grads=False,
write_images=False,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None,
embeddings_data=None,
update_freq='epoch',
profile_batch=2):
# Don't call super's init since it is an eager-only version.
callbacks.Callback.__init__(self)
self.log_dir = log_dir
self.histogram_freq = histogram_freq
if self.histogram_freq and context.executing_eagerly():
logging.warning(
UserWarning('Weight and gradient histograms not supported for eager'
'execution, setting `histogram_freq` to `0`.'))
self.histogram_freq = 0
self.merged = None
self.write_graph = write_graph
self.write_grads = write_grads
self.write_images = write_images
self.batch_size = batch_size
self._current_batch = 0
self._total_batches_seen = 0
self._total_val_batches_seen = 0
self.embeddings_freq = embeddings_freq
self.embeddings_layer_names = embeddings_layer_names
self.embeddings_metadata = embeddings_metadata
self.embeddings_data = embeddings_data
if update_freq == 'batch':
self.update_freq = 1
else:
self.update_freq = update_freq
self._samples_seen = 0
self._samples_seen_at_last_write = 0
# TODO(fishx): Add a link to the full profiler tutorial.
self._profile_batch = profile_batch
# One profiler session is running if it is True.
self._is_profiling = False
# TensorBoard should only write summaries on the chief when in a
# Multi-Worker setting.
self._chief_worker_only = True
def _init_writer(self, model):
"""Sets file writer."""
if context.executing_eagerly():
self.writer = summary_ops_v2.create_file_writer_v2(self.log_dir)
if not model.run_eagerly and self.write_graph:
with self.writer.as_default():
summary_ops_v2.graph(K.get_graph())
elif self.write_graph:
self.writer = tf_summary.FileWriter(self.log_dir, K.get_graph())
else:
self.writer = tf_summary.FileWriter(self.log_dir)
def _make_histogram_ops(self, model):
"""Defines histogram ops when histogram_freq > 0."""
# only make histogram summary op if it hasn't already been made
if self.histogram_freq and self.merged is None:
for layer in self.model.layers:
for weight in layer.weights:
mapped_weight_name = weight.name.replace(':', '_')
tf_summary.histogram(mapped_weight_name, weight)
if self.write_images:
w_img = array_ops.squeeze(weight)
shape = K.int_shape(w_img)
if len(shape) == 2: # dense layer kernel case
if shape[0] > shape[1]:
w_img = array_ops.transpose(w_img)
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
elif len(shape) == 3: # convnet case
if K.image_data_format() == 'channels_last':
# switch to channels_first to display
# every kernel as a separate image
w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img,
[shape[0], shape[1], shape[2], 1])
elif len(shape) == 1: # bias case
w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
else:
# not possible to handle 3D convnets etc.
continue
shape = K.int_shape(w_img)
assert len(shape) == 4 and shape[-1] in [1, 3, 4]
tf_summary.image(mapped_weight_name, w_img)
if self.write_grads:
for weight in layer.trainable_weights:
mapped_weight_name = weight.name.replace(':', '_')
grads = model.optimizer.get_gradients(model.total_loss, weight)
def is_indexed_slices(grad):
return type(grad).__name__ == 'IndexedSlices'
grads = [
grad.values if is_indexed_slices(grad) else grad
for grad in grads
]
tf_summary.histogram('{}_grad'.format(mapped_weight_name), grads)
if hasattr(layer, 'output'):
if isinstance(layer.output, list):
for i, output in enumerate(layer.output):
tf_summary.histogram('{}_out_{}'.format(layer.name, i), output)
else:
tf_summary.histogram('{}_out'.format(layer.name), layer.output)
def set_model(self, model):
"""Sets Keras model and creates summary ops."""
self.model = model
self._init_writer(model)
# histogram summaries only enabled in graph mode
if not context.executing_eagerly():
self._make_histogram_ops(model)
self.merged = tf_summary.merge_all()
# If both embedding_freq and embeddings_data are available, we will
# visualize embeddings.
if self.embeddings_freq and self.embeddings_data is not None:
# Avoid circular dependency.
from tensorflow.python.keras.engine import training_utils_v1 # pylint: disable=g-import-not-at-top
self.embeddings_data = training_utils_v1.standardize_input_data(
self.embeddings_data, model.input_names)
# If embedding_layer_names are not provided, get all of the embedding
# layers from the model.
embeddings_layer_names = self.embeddings_layer_names
if not embeddings_layer_names:
embeddings_layer_names = [
layer.name
for layer in self.model.layers
if type(layer).__name__ == 'Embedding'
]
self.assign_embeddings = []
embeddings_vars = {}
self.batch_id = batch_id = array_ops.placeholder(dtypes.int32)
self.step = step = array_ops.placeholder(dtypes.int32)
for layer in self.model.layers:
if layer.name in embeddings_layer_names:
embedding_input = self.model.get_layer(layer.name).output
embedding_size = np.prod(embedding_input.shape[1:])
embedding_input = array_ops.reshape(embedding_input,
(step, int(embedding_size)))
shape = (self.embeddings_data[0].shape[0], int(embedding_size))
embedding = variables.Variable(
array_ops.zeros(shape), name=layer.name + '_embedding')
embeddings_vars[layer.name] = embedding
batch = state_ops.assign(embedding[batch_id:batch_id + step],
embedding_input)
self.assign_embeddings.append(batch)
self.saver = saver.Saver(list(embeddings_vars.values()))
# Create embeddings_metadata dictionary
if isinstance(self.embeddings_metadata, str):
embeddings_metadata = {
layer_name: self.embeddings_metadata
for layer_name in embeddings_vars.keys()
}
else:
# If embedding_metadata is already a dictionary
embeddings_metadata = self.embeddings_metadata
try:
from tensorboard.plugins import projector
except ImportError:
raise ImportError('Failed to import TensorBoard. Please make sure that '
'TensorBoard integration is complete."')
# TODO(psv): Add integration tests to test embedding visualization
# with TensorBoard callback. We are unable to write a unit test for this
# because TensorBoard dependency assumes TensorFlow package is installed.
config = projector.ProjectorConfig()
for layer_name, tensor in embeddings_vars.items():
embedding = config.embeddings.add()
embedding.tensor_name = tensor.name
if (embeddings_metadata is not None and
layer_name in embeddings_metadata):
embedding.metadata_path = embeddings_metadata[layer_name]
projector.visualize_embeddings(self.writer, config)
def _fetch_callback(self, summary):
self.writer.add_summary(summary, self._total_val_batches_seen)
self._total_val_batches_seen += 1
def _write_custom_summaries(self, step, logs=None):
"""Writes metrics out as custom scalar summaries.
Arguments:
step: the global step to use for TensorBoard.
logs: dict. Keys are scalar summary names, values are
NumPy scalars.
"""
logs = logs or {}
if context.executing_eagerly():
# use v2 summary ops
with self.writer.as_default(), summary_ops_v2.record_if(True):
for name, value in logs.items():
if isinstance(value, np.ndarray):
value = value.item()
summary_ops_v2.scalar(name, value, step=step)
else:
# use FileWriter from v1 summary
for name, value in logs.items():
if isinstance(value, np.ndarray):
value = value.item()
summary = tf_summary.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
self.writer.add_summary(summary, step)
self.writer.flush()
def on_train_batch_begin(self, batch, logs=None):
if (not self._is_profiling and
self._total_batches_seen == self._profile_batch - 1):
profiler.start(self.log_dir)
self._is_profiling = True
def on_train_batch_end(self, batch, logs=None):
return self.on_batch_end(batch, logs)
def on_test_begin(self, logs=None):
pass
def on_test_end(self, logs=None):
pass
def on_batch_end(self, batch, logs=None):
"""Writes scalar summaries for metrics on every training batch.
Performs profiling if current batch is in profiler_batches.
"""
# Don't output batch_size and batch number as TensorBoard summaries
logs = logs or {}
self._samples_seen += logs.get('size', 1)
samples_seen_since = self._samples_seen - self._samples_seen_at_last_write
if self.update_freq != 'epoch' and samples_seen_since >= self.update_freq:
batch_logs = {('batch_' + k): v
for k, v in logs.items()
if k not in ['batch', 'size', 'num_steps']}
self._write_custom_summaries(self._total_batches_seen, batch_logs)
self._samples_seen_at_last_write = self._samples_seen
self._total_batches_seen += 1
if self._is_profiling:
profiler.stop()
self._is_profiling = False
def on_train_begin(self, logs=None):
pass
def on_epoch_begin(self, epoch, logs=None):
"""Add histogram op to Model eval_function callbacks, reset batch count."""
# check if histogram summary should be run for this epoch
if self.histogram_freq and epoch % self.histogram_freq == 0:
self._epoch = epoch
# pylint: disable=protected-access
# add the histogram summary op if it should run this epoch
self.model._make_test_function()
if self.merged not in self.model.test_function.fetches:
self.model.test_function.fetches.append(self.merged)
self.model.test_function.fetch_callbacks[
self.merged] = self._fetch_callback
# pylint: enable=protected-access
def on_epoch_end(self, epoch, logs=None):
"""Checks if summary ops should run next epoch, logs scalar summaries."""
# don't output batch_size and
# batch number as TensorBoard summaries
logs = {('epoch_' + k): v
for k, v in logs.items()
if k not in ['batch', 'size', 'num_steps']}
if self.update_freq == 'epoch':
step = epoch
else:
step = self._samples_seen
self._write_custom_summaries(step, logs)
# pop the histogram summary op after each epoch
if self.histogram_freq:
# pylint: disable=protected-access
if self.merged in self.model.test_function.fetches:
self.model.test_function.fetches.remove(self.merged)
if self.merged in self.model.test_function.fetch_callbacks:
self.model.test_function.fetch_callbacks.pop(self.merged)
# pylint: enable=protected-access
if self.embeddings_data is None and self.embeddings_freq:
raise ValueError('To visualize embeddings, embeddings_data must '
'be provided.')
if self.embeddings_freq and self.embeddings_data is not None:
if epoch % self.embeddings_freq == 0:
# We need a second forward-pass here because we're passing
# the `embeddings_data` explicitly. This design allows to pass
# arbitrary data as `embeddings_data` and results from the fact
# that we need to know the size of the `tf.Variable`s which
# hold the embeddings in `set_model`. At this point, however,
# the `validation_data` is not yet set.
embeddings_data = self.embeddings_data
n_samples = embeddings_data[0].shape[0]
i = 0
sess = K.get_session()
while i < n_samples:
step = min(self.batch_size, n_samples - i)
batch = slice(i, i + step)
if isinstance(self.model.input, list):
feed_dict = {
model_input: embeddings_data[idx][batch]
for idx, model_input in enumerate(self.model.input)
}
else:
feed_dict = {self.model.input: embeddings_data[0][batch]}
feed_dict.update({self.batch_id: i, self.step: step})
if not isinstance(K.learning_phase(), int):
feed_dict[K.learning_phase()] = False
sess.run(self.assign_embeddings, feed_dict=feed_dict)
self.saver.save(sess,
os.path.join(self.log_dir, 'keras_embedding.ckpt'),
epoch)
i += self.batch_size
def on_train_end(self, logs=None):
if self._is_profiling:
profiler.stop()
self._is_profiling = False
self.writer.close()
|
|
#
# Handler library for Linux IaaS
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
"""
JSON def:
HandlerEnvironment.json
[{
"name": "ExampleHandlerLinux",
"seqNo": "seqNo",
"version": "1.0",
"handlerEnvironment": {
"logFolder": "<your log folder location>",
"configFolder": "<your config folder location>",
"statusFolder": "<your status folder location>",
"heartbeatFile": "<your heartbeat file location>",
}
}]
Example ./config/1.settings
"{"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"1BE9A13AA1321C7C515EF109746998BAB6D86FD1","protectedSettings":
"MIIByAYJKoZIhvcNAQcDoIIBuTCCAbUCAQAxggFxMIIBbQIBADBVMEExPzA9BgoJkiaJk/IsZAEZFi9XaW5kb3dzIEF6dXJlIFNlcnZpY2UgTWFuYWdlbWVudCBmb3IgR+nhc6VHQTQpCiiV2zANBgkqhkiG9w0BAQEFAASCAQCKr09QKMGhwYe+O4/a8td+vpB4eTR+BQso84cV5KCAnD6iUIMcSYTrn9aveY6v6ykRLEw8GRKfri2d6tvVDggUrBqDwIgzejGTlCstcMJItWa8Je8gHZVSDfoN80AEOTws9Fp+wNXAbSuMJNb8EnpkpvigAWU2v6pGLEFvSKC0MCjDTkjpjqciGMcbe/r85RG3Zo21HLl0xNOpjDs/qqikc/ri43Y76E/Xv1vBSHEGMFprPy/Hwo3PqZCnulcbVzNnaXN3qi/kxV897xGMPPC3IrO7Nc++AT9qRLFI0841JLcLTlnoVG1okPzK9w6ttksDQmKBSHt3mfYV+skqs+EOMDsGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQITgu0Nu3iFPuAGD6/QzKdtrnCI5425fIUy7LtpXJGmpWDUA==","publicSettings":{"port":"3000"}}}]}"
Example HeartBeat
{
"version": 1.0,
"heartbeat" : {
"status": "ready",
"code": 0,
"Message": "Sample Handler running. Waiting for a new configuration from user."
}
}
Example Status Report:
[{"version":"1.0","timestampUTC":"2014-05-29T04:20:13Z","status":{"name":"Chef Extension Handler","operation":"chef-client-run","status":"success","code":0,"formattedMessage":{"lang":"en-US","message":"Chef-client run success"}}}]
"""
import os
import os.path
import sys
import imp
import base64
import json
import time
from os.path import join
from Utils.WAAgentUtil import waagent
from waagent import LoggerInit
import logging
import logging.handlers
DateTimeFormat = "%Y-%m-%dT%H:%M:%SZ"
class HandlerContext:
def __init__(self,name):
self._name = name
self._version = '0.0'
return
class HandlerUtility:
def __init__(self, log, error, short_name):
self._log = log
self._error = error
self._short_name = short_name
self.syslogger = logging.getLogger(self._short_name)
self.syslogger.setLevel(logging.INFO)
handler = logging.handlers.SysLogHandler(address='/dev/log')
formatter = logging.Formatter('%(name)s: %(levelname)s %(message)s')
handler.setFormatter(formatter)
self.syslogger.addHandler(handler)
def _get_log_prefix(self):
return '[%s-%s]' %(self._context._name, self._context._version)
def _get_current_seq_no(self, config_folder):
seq_no = -1
cur_seq_no = -1
freshest_time = None
for subdir, dirs, files in os.walk(config_folder):
for file in files:
try:
cur_seq_no = int(os.path.basename(file).split('.')[0])
if(freshest_time == None):
freshest_time = os.path.getmtime(join(config_folder,file))
seq_no = cur_seq_no
else:
current_file_m_time = os.path.getmtime(join(config_folder,file))
if(current_file_m_time > freshest_time):
freshest_time=current_file_m_time
seq_no = cur_seq_no
except ValueError:
continue
return seq_no
def log(self, message):
self._log(self._get_log_prefix() + message)
def error(self, message):
self._error(self._get_log_prefix() + message)
def syslog(self, level, message):
if level == logging.INFO:
self.syslogger.info(message)
elif level == logging.WARNING:
self.syslogger.warning(message)
elif level == logging.ERROR:
self.syslogger.error(message)
def log_and_syslog(self, level, message):
self.syslog(level, message)
if level == logging.INFO:
self.log(message)
elif level == logging.WARNING:
self.log(" ".join(["Warning:", message]))
elif level == logging.ERROR:
self.error(message)
def _parse_config(self, ctxt):
config = None
try:
config=json.loads(ctxt)
except:
self.error('JSON exception decoding ' + ctxt)
if config == None:
self.error("JSON error processing settings file:" + ctxt)
else:
handlerSettings = config['runtimeSettings'][0]['handlerSettings']
if handlerSettings.has_key('protectedSettings') and \
handlerSettings.has_key("protectedSettingsCertThumbprint") and \
handlerSettings['protectedSettings'] is not None and \
handlerSettings["protectedSettingsCertThumbprint"] is not None:
protectedSettings = handlerSettings['protectedSettings']
thumb=handlerSettings['protectedSettingsCertThumbprint']
cert=waagent.LibDir+'/'+thumb+'.crt'
pkey=waagent.LibDir+'/'+thumb+'.prv'
waagent.SetFileContents('/tmp/kk', protectedSettings)
cleartxt=None
cleartxt=waagent.RunGetOutput("base64 -d /tmp/kk | openssl smime -inform DER -decrypt -recip " + cert + " -inkey " + pkey )[1]
os.remove("/tmp/kk")
if cleartxt == None:
self.error("OpenSSh decode error using thumbprint " + thumb )
do_exit(1,operation,'error','1', operation + ' Failed')
jctxt=''
try:
jctxt=json.loads(cleartxt)
except:
self.error('JSON exception decoding ' + cleartxt)
handlerSettings['protectedSettings']=jctxt
self.log('Config decoded correctly.')
return config
def do_parse_context(self,operation):
_context = self.try_parse_context()
if not _context:
self.do_exit(1,operation,'error','1', operation + ' Failed')
return _context
def try_parse_context(self):
self._context = HandlerContext(self._short_name)
handler_env=None
config=None
ctxt=None
code=0
# get the HandlerEnvironment.json. According to the extension handler spec, it is always in the ./ directory
self.log('cwd is ' + os.path.realpath(os.path.curdir))
handler_env_file='./HandlerEnvironment.json'
if not os.path.isfile(handler_env_file):
self.error("Unable to locate " + handler_env_file)
return None
ctxt=waagent.GetFileContents(handler_env_file)
if ctxt == None :
self.error("Unable to read " + handler_env_file)
try:
handler_env=json.loads(ctxt)
except:
pass
if handler_env == None :
self.log("JSON error processing " + handler_env_file)
return None
if type(handler_env) == list:
handler_env = handler_env[0]
self._context._name = handler_env['name']
self._context._version = str(handler_env['version'])
self._context._config_dir=handler_env['handlerEnvironment']['configFolder']
self._context._log_dir= handler_env['handlerEnvironment']['logFolder']
self._context._log_file= os.path.join(handler_env['handlerEnvironment']['logFolder'],'extension.log')
self._change_log_file()
self._context._status_dir=handler_env['handlerEnvironment']['statusFolder']
self._context._heartbeat_file=handler_env['handlerEnvironment']['heartbeatFile']
self._context._seq_no = self._get_current_seq_no(self._context._config_dir)
if self._context._seq_no < 0:
self.error("Unable to locate a .settings file!")
return None
self._context._seq_no = str(self._context._seq_no)
self.log('sequence number is ' + self._context._seq_no)
self._context._status_file= os.path.join(self._context._status_dir, self._context._seq_no +'.status')
self._context._settings_file = os.path.join(self._context._config_dir, self._context._seq_no + '.settings')
self.log("setting file path is" + self._context._settings_file)
ctxt=None
ctxt=waagent.GetFileContents(self._context._settings_file)
if ctxt == None :
error_msg = 'Unable to read ' + self._context._settings_file + '. '
self.error(error_msg)
return None
self.log("JSON config: " + ctxt)
self._context._config = self._parse_config(ctxt)
return self._context
def _change_log_file(self):
self.log("Change log file to " + self._context._log_file)
LoggerInit(self._context._log_file,'/dev/stdout')
self._log = waagent.Log
self._error = waagent.Error
def set_verbose_log(self, verbose):
if(verbose == "1" or verbose == 1):
self.log("Enable verbose log")
LoggerInit(self._context._log_file, '/dev/stdout', verbose=True)
else:
self.log("Disable verbose log")
LoggerInit(self._context._log_file, '/dev/stdout', verbose=False)
def is_seq_smaller(self):
return int(self._context._seq_no) <= self._get_most_recent_seq()
def save_seq(self):
self._set_most_recent_seq(self._context._seq_no)
self.log("set most recent sequence number to " + self._context._seq_no)
def exit_if_enabled(self):
self.exit_if_seq_smaller()
def exit_if_seq_smaller(self):
if(self.is_seq_smaller()):
self.log("Current sequence number, " + self._context._seq_no + ", is not greater than the sequnce number of the most recent executed configuration. Exiting...")
sys.exit(0)
self.save_seq()
def _get_most_recent_seq(self):
if(os.path.isfile('mrseq')):
seq = waagent.GetFileContents('mrseq')
if(seq):
return int(seq)
return -1
def is_current_config_seq_greater_inused(self):
return int(self._context._seq_no) > self._get_most_recent_seq()
def get_inused_config_seq(self):
return self._get_most_recent_seq()
def set_inused_config_seq(self,seq):
self._set_most_recent_seq(seq)
def _set_most_recent_seq(self,seq):
waagent.SetFileContents('mrseq', str(seq))
def do_status_report(self, operation, status, status_code, message):
self.log("{0},{1},{2},{3}".format(operation, status, status_code, message))
tstamp=time.strftime(DateTimeFormat, time.gmtime())
stat = [{
"version" : self._context._version,
"timestampUTC" : tstamp,
"status" : {
"name" : self._context._name,
"operation" : operation,
"status" : status,
"code" : status_code,
"formattedMessage" : {
"lang" : "en-US",
"message" : message
}
}
}]
stat_rept = json.dumps(stat)
if self._context._status_file:
with open(self._context._status_file,'w+') as f:
f.write(stat_rept)
def do_heartbeat_report(self, heartbeat_file,status,code,message):
# heartbeat
health_report='[{"version":"1.0","heartbeat":{"status":"' + status+ '","code":"'+ code + '","Message":"' + message + '"}}]'
if waagent.SetFileContents(heartbeat_file,health_report) == None :
self.error('Unable to wite heartbeat info to ' + heartbeat_file)
def do_exit(self,exit_code,operation,status,code,message):
try:
self.do_status_report(operation, status,code,message)
except Exception as e:
self.log("Can't update status: "+str(e))
sys.exit(exit_code)
def get_name(self):
return self._context._name
def get_seq_no(self):
return self._context._seq_no
def get_log_dir(self):
return self._context._log_dir
def get_handler_settings(self):
return self._context._config['runtimeSettings'][0]['handlerSettings']
def get_protected_settings(self):
return self.get_handler_settings().get('protectedSettings')
def get_public_settings(self):
return self.get_handler_settings().get('publicSettings')
|
|
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.state import ctx_parameters as inputs
import subprocess
import os
import re
import sys
import time
import threading
import platform
from StringIO import StringIO
from cloudify_rest_client import CloudifyClient
from cloudify import utils
if 'MANAGER_REST_PROTOCOL' in os.environ and os.environ['MANAGER_REST_PROTOCOL'] == "https":
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port(), protocol='https', trust_all=True)
else:
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port())
def convert_env_value_to_string(envDict):
for key, value in envDict.items():
envDict[str(key)] = str(envDict.pop(key))
def get_host(entity):
if entity.instance.relationships:
for relationship in entity.instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target
return None
def has_attribute_mapping(entity, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, entity.node.properties))
mapping_configuration = entity.node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def process_attribute_mapping(entity, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = entity.node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(entity, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and entity.instance.relationships:
for relationship in entity.instance.relationships:
if mapping_configuration['parameters'][1] in relationship.type_hierarchy:
return data_retriever_function(relationship.target, mapping_configuration['parameters'][2])
return ""
def get_nested_attribute(entity, attribute_names):
deep_properties = get_attribute(entity, attribute_names[0])
attribute_names_iter = iter(attribute_names)
next(attribute_names_iter)
for attribute_name in attribute_names_iter:
if deep_properties is None:
return ""
else:
deep_properties = deep_properties.get(attribute_name, None)
return deep_properties
def _all_instances_get_nested_attribute(entity, attribute_names):
return None
def get_attribute(entity, attribute_name):
if has_attribute_mapping(entity, attribute_name):
# First check if any mapping exist for attribute
mapped_value = process_attribute_mapping(entity, attribute_name, get_attribute)
ctx.logger.info('Mapping exists for attribute {0} with value {1}'.format(attribute_name, mapped_value))
return mapped_value
# No mapping exist, try to get directly the attribute from the entity
attribute_value = entity.instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
ctx.logger.info('Found the attribute {0} with value {1} on the node {2}'.format(attribute_name, attribute_value, entity.node.id))
return attribute_value
# Attribute retrieval fails, fall back to property
property_value = entity.node.properties.get(attribute_name, None)
if property_value is not None:
return property_value
# Property retrieval fails, fall back to host instance
host = get_host(entity)
if host is not None:
ctx.logger.info('Attribute not found {0} go up to the parent node {1}'.format(attribute_name, host.node.id))
return get_attribute(host, attribute_name)
# Nothing is found
return ""
def _all_instances_get_attribute(entity, attribute_name):
result_map = {}
# get all instances data using cfy rest client
# we have to get the node using the rest client with node_instance.node_id
# then we will have the relationships
node = client.nodes.get(ctx.deployment.id, entity.node.id)
all_node_instances = client.node_instances.list(ctx.deployment.id, entity.node.id)
for node_instance in all_node_instances:
prop_value = __recursively_get_instance_data(node, node_instance, attribute_name)
if prop_value is not None:
ctx.logger.info('Found the property/attribute {0} with value {1} on the node {2} instance {3}'.format(attribute_name, prop_value, entity.node.id,
node_instance.id))
result_map[node_instance.id + '_'] = prop_value
return result_map
def get_property(entity, property_name):
# Try to get the property value on the node
property_value = entity.node.properties.get(property_name, None)
if property_value is not None:
ctx.logger.info('Found the property {0} with value {1} on the node {2}'.format(property_name, property_value, entity.node.id))
return property_value
# No property found on the node, fall back to the host
host = get_host(entity)
if host is not None:
ctx.logger.info('Property not found {0} go up to the parent node {1}'.format(property_name, host.node.id))
return get_property(host, property_name)
return ""
def get_instance_list(node_id):
result = ''
all_node_instances = client.node_instances.list(ctx.deployment.id, node_id)
for node_instance in all_node_instances:
if len(result) > 0:
result += ','
result += node_instance.id
return result
def get_host_node_name(instance):
for relationship in instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target.node.id
return None
def __get_relationship(node, target_name, relationship_type):
for relationship in node.relationships:
if relationship.get('target_id') == target_name and relationship_type in relationship.get('type_hierarchy'):
return relationship
return None
def __has_attribute_mapping(node, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, node.properties))
mapping_configuration = node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def __process_attribute_mapping(node, node_instance, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(node, node_instance, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and node_instance.relationships:
for rel in node_instance.relationships:
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if mapping_configuration['parameters'][1] in relationship.get('type_hierarchy'):
target_instance = client.node_instances.get(rel.get('target_id'))
target_node = client.nodes.get(ctx.deployment.id, target_instance.node_id)
return data_retriever_function(target_node, target_instance, mapping_configuration['parameters'][2])
return None
def __recursively_get_instance_data(node, node_instance, attribute_name):
if __has_attribute_mapping(node, attribute_name):
return __process_attribute_mapping(node, node_instance, attribute_name, __recursively_get_instance_data)
attribute_value = node_instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
return attribute_value
elif node_instance.relationships:
for rel in node_instance.relationships:
# on rel we have target_name, target_id (instanceId), type
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if 'cloudify.relationships.contained_in' in relationship.get('type_hierarchy'):
parent_instance = client.node_instances.get(rel.get('target_id'))
parent_node = client.nodes.get(ctx.deployment.id, parent_instance.node_id)
return __recursively_get_instance_data(parent_node, parent_instance, attribute_name)
return None
else:
return None
def parse_output(output):
# by convention, the last output is the result of the operation
last_output = None
outputs = {}
pattern = re.compile('EXPECTED_OUTPUT_(\w+)=(.*)')
for line in output.splitlines():
match = pattern.match(line)
if match is None:
last_output = line
else:
output_name = match.group(1)
output_value = match.group(2)
outputs[output_name] = output_value
return {'last_output': last_output, 'outputs': outputs}
def execute(script_path, process, outputNames, command_prefix=None, cwd=None):
os.chmod(script_path, 0755)
on_posix = 'posix' in sys.builtin_module_names
env = os.environ.copy()
process_env = process.get('env', {})
env.update(process_env)
if outputNames is not None:
env['EXPECTED_OUTPUTS'] = outputNames
if platform.system() == 'Windows':
wrapper_path = ctx.download_resource("scriptWrapper.bat")
else:
wrapper_path = ctx.download_resource("scriptWrapper.sh")
os.chmod(wrapper_path, 0755)
command = '{0} {1}'.format(wrapper_path, script_path)
else:
command = script_path
if command_prefix is not None:
command = "{0} {1}".format(command_prefix, command)
ctx.logger.info('Executing: {0} in env {1}'.format(command, env))
process = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
cwd=cwd,
bufsize=1,
close_fds=on_posix)
return_code = None
stdout_consumer = OutputConsumer(process.stdout)
stderr_consumer = OutputConsumer(process.stderr)
while True:
return_code = process.poll()
if return_code is not None:
break
time.sleep(0.1)
stdout_consumer.join()
stderr_consumer.join()
parsed_output = parse_output(stdout_consumer.buffer.getvalue())
if outputNames is not None:
outputNameList = outputNames.split(';')
for outputName in outputNameList:
ctx.logger.info('Ouput name: {0} value : {1}'.format(outputName, parsed_output['outputs'].get(outputName, None)))
if return_code != 0:
error_message = "Script {0} encountered error with return code {1} and standard output {2}, error output {3}".format(command, return_code,
stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
error_message = str(unicode(error_message, errors='ignore'))
ctx.logger.error(error_message)
raise NonRecoverableError(error_message)
else:
ok_message = "Script {0} executed normally with standard output {1} and error output {2}".format(command, stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
ok_message = str(unicode(ok_message, errors='ignore'))
ctx.logger.info(ok_message)
return parsed_output
class OutputConsumer(object):
def __init__(self, out):
self.out = out
self.buffer = StringIO()
self.consumer = threading.Thread(target=self.consume_output)
self.consumer.daemon = True
self.consumer.start()
def consume_output(self):
for line in iter(self.out.readline, b''):
self.buffer.write(line)
self.out.close()
def join(self):
self.consumer.join()
def download(child_rel_path, child_abs_path, download_dir):
artifact_downloaded_path = ctx.download_resource(child_abs_path)
new_file = os.path.join(download_dir, child_rel_path)
new_file_dir = os.path.dirname(new_file)
if not os.path.exists(new_file_dir):
os.makedirs(new_file_dir)
os.rename(artifact_downloaded_path, new_file)
ctx.logger.info('Downloaded artifact from path ' + child_abs_path + ', it\'s available now at ' + new_file)
return new_file
def download_artifacts(artifacts, download_dir):
downloaded_artifacts = {}
os.makedirs(download_dir)
for artifact_name, artifact_ref in artifacts.items():
ctx.logger.info('Download artifact ' + artifact_name)
if isinstance(artifact_ref, basestring):
downloaded_artifacts[artifact_name] = download(os.path.basename(artifact_ref), artifact_ref, download_dir)
else:
child_download_dir = os.path.join(download_dir, artifact_name)
for child_path in artifact_ref:
download(child_path['relative_path'], child_path['absolute_path'], child_download_dir)
downloaded_artifacts[artifact_name] = child_download_dir
return downloaded_artifacts
env_map = {}
env_map['NODE'] = ctx.node.id
env_map['INSTANCE'] = ctx.instance.id
env_map['INSTANCES'] = get_instance_list(ctx.node.id)
env_map['HOST'] = get_host_node_name(ctx.instance)
new_script_process = {'env': env_map}
node_artifacts = {
"scripts": [
{
"relative_path": "configureSupp_hss.sh",
"absolute_path": "artifacts/support_hss_types/scripts/configureSupp_hss.sh"
}
,
{
"relative_path": "deleteSupp_hss.sh",
"absolute_path": "artifacts/support_hss_types/scripts/deleteSupp_hss.sh"
}
,
{
"relative_path": "relationships/supp_to_volume.sh",
"absolute_path": "artifacts/support_hss_types/scripts/relationships/supp_to_volume.sh"
}
,
{
"relative_path": "stopSupp_hss.sh",
"absolute_path": "artifacts/support_hss_types/scripts/stopSupp_hss.sh"
}
,
{
"relative_path": "createSupp_hss.sh",
"absolute_path": "artifacts/support_hss_types/scripts/createSupp_hss.sh"
}
,
{
"relative_path": "startSupp_hss.sh",
"absolute_path": "artifacts/support_hss_types/scripts/startSupp_hss.sh"
}
]
}
relationship_artifacts = {
}
artifacts = node_artifacts.copy()
artifacts.update(relationship_artifacts)
download_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'downloads')
new_script_process['env'].update(download_artifacts(artifacts, download_dir))
ctx.logger.info('Operation is executed with inputs {0}'.format(inputs))
if inputs.get('process', None) is not None and inputs['process'].get('env', None) is not None:
ctx.logger.info('Operation is executed with environment variable {0}'.format(inputs['process']['env']))
new_script_process['env'].update(inputs['process']['env'])
operationOutputNames = None
convert_env_value_to_string(new_script_process['env'])
parsed_output = execute(ctx.download_resource('artifacts/support_hss_types/scripts/deleteSupp_hss.sh'), new_script_process, operationOutputNames)
for k,v in parsed_output['outputs'].items():
ctx.logger.info('Output name: {0} value: {1}'.format(k, v))
ctx.instance.runtime_properties['_a4c_OO:tosca.interfaces.node.lifecycle.Standard:delete:{0}'.format(k)] = v
ctx.instance.update()
|
|
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import cPickle as pickle
class HWaitingRuleLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HWaitingRuleLHS.
"""
# Create the himesis graph
EDGE_LIST = [(3, 0), (0, 5), (4, 1), (1, 5), (2, 3), (6, 2)]
super(HWaitingRuleLHS, self).__init__(name='HWaitingRuleLHS', num_nodes=7, edges=EDGE_LIST)
self.is_compiled = True # now this instance has been compiled
# Set the graph attributes
self["mm__"] = pickle.loads("""(lp1
S'MT_pre__Mutex'
p2
aS'MoTifRule'
p3
a.""")
self["MT_constraint__"] = pickle.loads("""S"#===============================================================================@n# This code is executed after the nodes in the LHS have been matched.@n# You can access a matched node labelled n by: PreNode('n').@n# To access attribute x of node n, use: PreNode('n')['x'].@n# The given constraint must evaluate to a boolean expression:@n# returning True enables the rule to be applied,@n# returning False forbids the rule from being applied.@n#===============================================================================@n@nreturn True@n"
p1
.""").replace("@n", "\n")
self["name"] = pickle.loads("""S''
.""")
self["GUID__"] = pickle.loads("""ccopy_reg
_reconstructor
p1
(cuuid
UUID
p2
c__builtin__
object
p3
NtRp4
(dp5
S'int'
p6
L3277550863639785529332917267335178559L
sb.""")
# Set the node attributes
self.vs[0]["MT_subtypeMatching__"] = pickle.loads("""I00
.""")
self.vs[0]["MT_label__"] = pickle.loads("""S'6'
.""")
self.vs[0]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[0]["mm__"] = pickle.loads("""S'MT_pre__held_by'
p1
.""")
self.vs[0]["MT_dirty__"] = pickle.loads("""I00
.""")
self.vs[0]["GUID__"] = pickle.loads("""ccopy_reg
_reconstructor
p1
(cuuid
UUID
p2
c__builtin__
object
p3
NtRp4
(dp5
S'int'
p6
L72490072080382963330180439758308723887L
sb.""")
self.vs[1]["MT_subtypeMatching__"] = pickle.loads("""I00
.""")
self.vs[1]["MT_label__"] = pickle.loads("""S'7'
.""")
self.vs[1]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[1]["mm__"] = pickle.loads("""S'MT_pre__blocked'
p1
.""")
self.vs[1]["MT_dirty__"] = pickle.loads("""I00
.""")
self.vs[1]["GUID__"] = pickle.loads("""ccopy_reg
_reconstructor
p1
(cuuid
UUID
p2
c__builtin__
object
p3
NtRp4
(dp5
S'int'
p6
L81122773032648319620206128728610011363L
sb.""")
self.vs[2]["MT_subtypeMatching__"] = pickle.loads("""I00
.""")
self.vs[2]["MT_label__"] = pickle.loads("""S'5'
.""")
self.vs[2]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[2]["mm__"] = pickle.loads("""S'MT_pre__request'
p1
.""")
self.vs[2]["MT_dirty__"] = pickle.loads("""I00
.""")
self.vs[2]["GUID__"] = pickle.loads("""ccopy_reg
_reconstructor
p1
(cuuid
UUID
p2
c__builtin__
object
p3
NtRp4
(dp5
S'int'
p6
L43628909232077438900949031236016664630L
sb.""")
self.vs[3]["MT_subtypeMatching__"] = pickle.loads("""I00
.""")
self.vs[3]["MT_label__"] = pickle.loads("""S'3'
.""")
self.vs[3]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[3]["mm__"] = pickle.loads("""S'MT_pre__Resource'
p1
.""")
self.vs[3]["MT_dirty__"] = pickle.loads("""I00
.""")
self.vs[3]["MT_pre__name"] = pickle.loads("""S"@n#===============================================================================@n# This code is executed when evaluating if a node shall be matched by this rule.@n# You can access the value of the current node's attribute value by: attr_value.@n# You can access a matched node 'n' by: PreNode('n').@n# To access attribute x of node n, use: PreNode('n')['x'].@n# The given constraint must evaluate to a boolean expression.@n#===============================================================================@n@nreturn True@n"
p1
.""").replace("@n", "\n")
self.vs[3]["GUID__"] = pickle.loads("""ccopy_reg
_reconstructor
p1
(cuuid
UUID
p2
c__builtin__
object
p3
NtRp4
(dp5
S'int'
p6
L236893906032470449657487004851290020376L
sb.""")
self.vs[4]["MT_subtypeMatching__"] = pickle.loads("""I00
.""")
self.vs[4]["MT_label__"] = pickle.loads("""S'4'
.""")
self.vs[4]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[4]["mm__"] = pickle.loads("""S'MT_pre__Resource'
p1
.""")
self.vs[4]["MT_dirty__"] = pickle.loads("""I00
.""")
self.vs[4]["MT_pre__name"] = pickle.loads("""S"@n#===============================================================================@n# This code is executed when evaluating if a node shall be matched by this rule.@n# You can access the value of the current node's attribute value by: attr_value.@n# You can access a matched node 'n' by: PreNode('n').@n# To access attribute x of node n, use: PreNode('n')['x'].@n# The given constraint must evaluate to a boolean expression.@n#===============================================================================@n@nreturn True@n"
p1
.""").replace("@n", "\n")
self.vs[4]["GUID__"] = pickle.loads("""ccopy_reg
_reconstructor
p1
(cuuid
UUID
p2
c__builtin__
object
p3
NtRp4
(dp5
S'int'
p6
L270342823131535439241940617825124864812L
sb.""")
self.vs[5]["MT_subtypeMatching__"] = pickle.loads("""I00
.""")
self.vs[5]["MT_label__"] = pickle.loads("""S'1'
.""")
self.vs[5]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[5]["mm__"] = pickle.loads("""S'MT_pre__Process'
p1
.""")
self.vs[5]["MT_dirty__"] = pickle.loads("""I00
.""")
self.vs[5]["MT_pre__name"] = pickle.loads("""S"@n#===============================================================================@n# This code is executed when evaluating if a node shall be matched by this rule.@n# You can access the value of the current node's attribute value by: attr_value.@n# You can access a matched node 'n' by: PreNode('n').@n# To access attribute x of node n, use: PreNode('n')['x'].@n# The given constraint must evaluate to a boolean expression.@n#===============================================================================@n@nreturn True@n"
p1
.""").replace("@n", "\n")
self.vs[5]["GUID__"] = pickle.loads("""ccopy_reg
_reconstructor
p1
(cuuid
UUID
p2
c__builtin__
object
p3
NtRp4
(dp5
S'int'
p6
L177741816679941888370877730297682793919L
sb.""")
self.vs[6]["MT_subtypeMatching__"] = pickle.loads("""I00
.""")
self.vs[6]["MT_label__"] = pickle.loads("""S'2'
.""")
self.vs[6]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[6]["mm__"] = pickle.loads("""S'MT_pre__Process'
p1
.""")
self.vs[6]["MT_dirty__"] = pickle.loads("""I00
.""")
self.vs[6]["MT_pre__name"] = pickle.loads("""S"@n#===============================================================================@n# This code is executed when evaluating if a node shall be matched by this rule.@n# You can access the value of the current node's attribute value by: attr_value.@n# You can access a matched node 'n' by: PreNode('n').@n# To access attribute x of node n, use: PreNode('n')['x'].@n# The given constraint must evaluate to a boolean expression.@n#===============================================================================@n@nreturn True@n"
p1
.""").replace("@n", "\n")
self.vs[6]["GUID__"] = pickle.loads("""ccopy_reg
_reconstructor
p1
(cuuid
UUID
p2
c__builtin__
object
p3
NtRp4
(dp5
S'int'
p6
L191977859277208521439236097331207801259L
sb.""")
def eval_name3(self, attr_value, PreNode, graph):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access a matched node 'n' by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name4(self, attr_value, PreNode, graph):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access a matched node 'n' by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name1(self, attr_value, PreNode, graph):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access a matched node 'n' by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name2(self, attr_value, PreNode, graph):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access a matched node 'n' by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
|
|
"""
An extension of the Python stdlib SimpleHTTPServer module, to
support the "Range" header in HTTP requests, as needed by iOS Safari
to support (some) MP3s.
Some methods are modifications to the original SimpleHTTPServer that is
part of the Python stdlib. This uses the versions that ship with Python
2.7 on Fedora 15.
Licensed under BSD 2-Clause License
"""
__version__ = "0.2"
__author__ = "John Smith <code@john-smith.me>"
import os
import BaseHTTPServer
import SimpleHTTPServer
# Additions for handling Range: header
import logging
import re
class InvalidRangeHeader(Exception):
pass
def parse_range_header(range_header, total_length):
"""
Return a 2-element tuple containing the requested range offsets
in bytes.
- range_header is the HTTP header sans the "Range:" prefix
- total_length is the length in bytes of the requested resource
(needed to calculate offsets for a 'n bytes from the end' request
If no Range explicitly requested, returns (None, None)
If Range header could not be parsed, raises InvalidRangeHeader
(which could either be handled as a user
request failure, or the same as if (None, None) was returned
"""
# range_header = self.headers.getheader("Range")
if range_header is None or range_header == "":
return (None, None)
if not range_header.startswith("bytes="):
# logging.error("Don't know how to parse Range: %s [1]" %
# (range_header))
raise InvalidRangeHeader("Don't know how to parse non-bytes Range: %s" %
(range_header))
regex = re.compile(r"^bytes=(\d*)\-(\d*)$")
rangething = regex.search(range_header)
if rangething:
r1 = rangething.group(1)
r2 = rangething.group(2)
logging.debug("Requested range is [%s]-[%s]" % (r1, r2))
if r1 == "" and r2 == "":
# logging.warning("Requested range is meaningless")
raise InvalidRangeHeader("Requested range is meaningless")
if r1 == "":
# x bytes from the end of the file
try:
final_bytes = int(r2)
except ValueError:
raise InvalidRangeHeader("Invalid trailing range")
return (total_length-final_bytes, total_length - 1)
try:
from_val = int(r1)
except ValueError:
raise InvalidRangeHeader("Invalid starting range value")
if r2 != "":
try:
end_val = int(r2)
except ValueError:
raise InvalidRangeHeader("Invalid ending range value")
return (from_val, end_val)
else:
return (from_val, total_length - 1)
else:
raise InvalidRangeHeader("Don't know how to parse Range: %s" %
(range_header))
class HTTPRangeRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""
Extension of SimpleHTTPServer.SimpleHTTPRequestHandler to support
the Range header in HTTP requests. (As needed for serving certain
MP3 files to Mobile Safari.
"""
server_version = "HTTPRangeServer/" + __version__
def do_GET(self):
"""Serve a GET request."""
f = self.send_head()
if f:
if self.range_from is not None and self.range_to is not None:
self.copy_chunk(f, self.wfile)
else:
self.copyfile(f, self.wfile)
f.close()
def copy_chunk(self, in_file, out_file):
"""
Copy a chunk of in_file as dictated by self.range_[from|to]
to out_file.
NB: range values are inclusive so 0-99 => 100 bytes
Neither of the file objects are closed when the
function returns. Assumes that in_file is open
for reading, out_file is open for writing.
If range_tuple specifies something bigger/outside
than the size of in_file, out_file will contain as
much content as matches. e.g. with a 1000 byte input,
(500, 2000) will create a 500 byte long file
(2000, 3000) will create a zero length output file
"""
in_file.seek(self.range_from)
# Add 1 because the range is inclusive
left_to_copy = 1 + self.range_to - self.range_from
bytes_copied = 0
while bytes_copied < left_to_copy:
read_buf = in_file.read(left_to_copy)
if len(read_buf) == 0:
break
out_file.write(read_buf)
bytes_copied += len(read_buf)
return bytes_copied
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
fs = os.fstat(f.fileno())
total_length = fs[6]
try:
self.range_from, self.range_to = parse_range_header(
self.headers.getheader("Range"), total_length)
except InvalidRangeHeader, e:
# Just serve them the whole file, although it's possibly
# more correct to return a 4xx error?
logging.warning("Range header parsing failed, "
"serving complete file")
self.range_from = self.range_to = None
if self.range_from is not None or self.range_to is not None:
self.send_response(206)
self.send_header("Accept-Ranges", "bytes")
else:
self.send_response(200)
self.send_header("Content-Type", ctype)
if self.range_from is not None or self.range_to is not None:
# TODO: Should also check that range is within the file size
self.send_header("Content-Range",
"bytes %d-%d/%d" % (self.range_from,
self.range_to,
total_length))
# Add 1 because ranges are inclusive
self.send_header("Content-Length",
(1 + self.range_to - self.range_from))
else:
self.send_header("Content-Length", str(total_length))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def test(HandlerClass = HTTPRangeRequestHandler,
ServerClass = BaseHTTPServer.HTTPServer):
BaseHTTPServer.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test()
|
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Delta(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "indicator"
_path_str = "indicator.delta"
_valid_props = {
"decreasing",
"font",
"increasing",
"position",
"reference",
"relative",
"valueformat",
}
# decreasing
# ----------
@property
def decreasing(self):
"""
The 'decreasing' property is an instance of Decreasing
that may be specified as:
- An instance of :class:`plotly.graph_objs.indicator.delta.Decreasing`
- A dict of string/value properties that will be passed
to the Decreasing constructor
Supported dict properties:
color
Sets the color for increasing value.
symbol
Sets the symbol to display for increasing value
Returns
-------
plotly.graph_objs.indicator.delta.Decreasing
"""
return self["decreasing"]
@decreasing.setter
def decreasing(self, val):
self["decreasing"] = val
# font
# ----
@property
def font(self):
"""
Set the font used to display the delta
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.indicator.delta.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.indicator.delta.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# increasing
# ----------
@property
def increasing(self):
"""
The 'increasing' property is an instance of Increasing
that may be specified as:
- An instance of :class:`plotly.graph_objs.indicator.delta.Increasing`
- A dict of string/value properties that will be passed
to the Increasing constructor
Supported dict properties:
color
Sets the color for increasing value.
symbol
Sets the symbol to display for increasing value
Returns
-------
plotly.graph_objs.indicator.delta.Increasing
"""
return self["increasing"]
@increasing.setter
def increasing(self, val):
self["increasing"] = val
# position
# --------
@property
def position(self):
"""
Sets the position of delta with respect to the number.
The 'position' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'bottom', 'left', 'right']
Returns
-------
Any
"""
return self["position"]
@position.setter
def position(self, val):
self["position"] = val
# reference
# ---------
@property
def reference(self):
"""
Sets the reference value to compute the delta. By default, it
is set to the current value.
The 'reference' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["reference"]
@reference.setter
def reference(self, val):
self["reference"] = val
# relative
# --------
@property
def relative(self):
"""
Show relative change
The 'relative' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["relative"]
@relative.setter
def relative(self, val):
self["relative"] = val
# valueformat
# -----------
@property
def valueformat(self):
"""
Sets the value formatting rule using d3 formatting mini-
language which is similar to those of Python. See
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
The 'valueformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["valueformat"]
@valueformat.setter
def valueformat(self, val):
self["valueformat"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
decreasing
:class:`plotly.graph_objects.indicator.delta.Decreasing
` instance or dict with compatible properties
font
Set the font used to display the delta
increasing
:class:`plotly.graph_objects.indicator.delta.Increasing
` instance or dict with compatible properties
position
Sets the position of delta with respect to the number.
reference
Sets the reference value to compute the delta. By
default, it is set to the current value.
relative
Show relative change
valueformat
Sets the value formatting rule using d3 formatting
mini-language which is similar to those of Python. See
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
"""
def __init__(
self,
arg=None,
decreasing=None,
font=None,
increasing=None,
position=None,
reference=None,
relative=None,
valueformat=None,
**kwargs
):
"""
Construct a new Delta object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.indicator.Delta`
decreasing
:class:`plotly.graph_objects.indicator.delta.Decreasing
` instance or dict with compatible properties
font
Set the font used to display the delta
increasing
:class:`plotly.graph_objects.indicator.delta.Increasing
` instance or dict with compatible properties
position
Sets the position of delta with respect to the number.
reference
Sets the reference value to compute the delta. By
default, it is set to the current value.
relative
Show relative change
valueformat
Sets the value formatting rule using d3 formatting
mini-language which is similar to those of Python. See
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
Returns
-------
Delta
"""
super(Delta, self).__init__("delta")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.indicator.Delta
constructor must be a dict or
an instance of :class:`plotly.graph_objs.indicator.Delta`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("decreasing", None)
_v = decreasing if decreasing is not None else _v
if _v is not None:
self["decreasing"] = _v
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("increasing", None)
_v = increasing if increasing is not None else _v
if _v is not None:
self["increasing"] = _v
_v = arg.pop("position", None)
_v = position if position is not None else _v
if _v is not None:
self["position"] = _v
_v = arg.pop("reference", None)
_v = reference if reference is not None else _v
if _v is not None:
self["reference"] = _v
_v = arg.pop("relative", None)
_v = relative if relative is not None else _v
if _v is not None:
self["relative"] = _v
_v = arg.pop("valueformat", None)
_v = valueformat if valueformat is not None else _v
if _v is not None:
self["valueformat"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
|
from __future__ import absolute_import
import logging
import datetime
import iso8601
import elasticsearch
from six import iteritems
from six.moves.urllib.parse import urlparse
log = logging.getLogger(__name__)
RESULTS_MAX_SIZE = 200
RESULTS_DEFAULT_SIZE = 20
class ElasticSearch(object):
"""
Thin wrapper around an ElasticSearch connection to make connection handling
more convenient.
Settings for the ES host and index name etcetera can still be changed in
the corresponding attributes before the connection (self.conn) is used.
"""
def __init__(self,
host='http://127.0.0.1:9200',
index='annotator',
authorization_enabled=False):
self.host = host
self.index = index
self.authorization_enabled = authorization_enabled
self.Model = make_model(self)
def _connect(self):
host = self.host
parsed = urlparse(host)
connargs = {
'host': parsed.hostname,
}
username = parsed.username
password = parsed.password
if username is not None or password is not None:
connargs['http_auth'] = ((username or ''), (password or ''))
if parsed.port is not None:
connargs['port'] = parsed.port
if parsed.path:
connargs['url_prefix'] = parsed.path
conn = elasticsearch.Elasticsearch(
hosts=[connargs],
connection_class=elasticsearch.Urllib3HttpConnection)
return conn
@property
def conn(self):
if not hasattr(self, '_connection'):
self._connection = self._connect()
return self._connection
class _Model(dict):
"""Base class that represents a document type in an ElasticSearch index.
A child class is expected to define these two attributes:
__type__ -- The name of the document type
__mapping__ -- A mapping of the document's fields
Mapping: Calling create_all() will create the mapping in the index.
One field, 'id', is treated specially. Its value will not be stored,
but be used as the _id identifier of the document in Elasticsearch. If
an item is indexed without providing an id, the _id is automatically
generated by ES.
Unmapped fields: Fields that are not defined in the mapping are analyzed
using the 'keyword' analyzer, which practically means no analysis is
performed: searching for these fields will be exact and case sensitive.
To make a field full-text searchable, its mapping should configure it
with 'analyzer':'standard'.
"""
@classmethod
def create_all(cls):
log.info("Creating index '%s'.", cls.es.index)
conn = cls.es.conn
conn.indices.create(cls.es.index, ignore=400)
mapping = cls.get_mapping()
conn.indices.put_mapping(index=cls.es.index,
doc_type=cls.__type__,
body=mapping)
@classmethod
def get_mapping(cls):
return {
cls.__type__: {
'_id': {
'path': 'id',
},
'_source': {
'excludes': ['id'],
},
'analyzer': 'keyword',
'properties': cls.__mapping__,
}
}
@classmethod
def drop_all(cls):
if cls.es.conn.indices.exists(cls.es.index):
cls.es.conn.indices.close(cls.es.index)
cls.es.conn.indices.delete(cls.es.index)
# It would be lovely if this were called 'get', but the dict semantics
# already define that method name.
@classmethod
def fetch(cls, docid):
doc = cls.es.conn.get(index=cls.es.index,
doc_type=cls.__type__,
ignore=404,
id=docid)
if doc.get('found', True):
return cls(doc['_source'], id=docid)
@classmethod
def _build_query(cls, query=None, offset=None, limit=None, sort=None, order=None):
if offset is None:
offset = 0
if limit is None:
limit = RESULTS_DEFAULT_SIZE
if query is None:
query = {}
if sort is None:
sort = 'updated'
if order is None:
order = 'desc'
return _build_query(query, offset, limit, sort, order)
@classmethod
def search(cls, query=None, offset=0, limit=RESULTS_DEFAULT_SIZE,
sort='updated', order='desc', **kwargs):
q = cls._build_query(query=query, offset=offset, limit=limit,
sort=sort, order=order)
if not q:
return []
return cls.search_raw(q, **kwargs)
@classmethod
def search_raw(cls, query=None, params=None, raw_result=False):
"""Perform a raw Elasticsearch query
Any ElasticsearchExceptions are to be caught by the caller.
Keyword arguments:
query -- Query to send to Elasticsearch
params -- Extra keyword arguments to pass to Elasticsearch.search
raw_result -- Return Elasticsearch's response as is
"""
if query is None:
query = {}
if params is None:
params = {}
res = cls.es.conn.search(index=cls.es.index,
doc_type=cls.__type__,
body=query,
**params)
if not raw_result:
docs = res['hits']['hits']
res = [cls(d['_source'], id=d['_id']) for d in docs]
return res
@classmethod
def count(cls, **kwargs):
"""Like search, but only count the number of matches."""
kwargs.setdefault('params', {})
kwargs['params'].update({'search_type': 'count'})
res = cls.search(raw_result=True, **kwargs)
return res['hits']['total']
def save(self, refresh=True):
_add_created(self)
_add_updated(self)
if 'id' not in self:
op_type = 'create'
else:
op_type = 'index'
res = self.es.conn.index(index=self.es.index,
doc_type=self.__type__,
body=self,
op_type=op_type,
refresh=refresh)
self['id'] = res['_id']
def delete(self):
if 'id' in self:
self.es.conn.delete(index=self.es.index,
doc_type=self.__type__,
id=self['id'])
def make_model(es):
return type('Model', (_Model,), {'es': es})
def _build_query(query, offset, limit, sort, order):
# Create a match query for each keyword
match_clauses = [{'match': {k: v}} for k, v in iteritems(query)]
if len(match_clauses) == 0:
# Elasticsearch considers an empty conjunction to be false..
match_clauses.append({'match_all': {}})
return {
'sort': [{sort: {
# Sort most recent first
'order': order,
# While we do always provide a mapping for the field, elasticsearch
# will bomb if there are no documents in the index. Although this
# is an edge case, we don't want the API to return a 500 with an
# empty index, so ignore this sort instruction if the field appears
# unmapped due to an empty index.
'ignore_unmapped': True,
}}],
'from': max(0, offset),
'size': min(RESULTS_MAX_SIZE, max(0, limit)),
'query': {'bool': {'must': match_clauses}}
}
def _add_created(ann):
if 'created' not in ann:
ann['created'] = datetime.datetime.now(iso8601.iso8601.UTC).isoformat()
def _add_updated(ann):
ann['updated'] = datetime.datetime.now(iso8601.iso8601.UTC).isoformat()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import os
#sys.path.append("".join([os.path.dirname(__file__), "/../"]))
from libnmap.parser import NmapParser
from libnmap.diff import NmapDiffException
class TestNmapParser(unittest.TestCase):
def setUp(self):
fdir = os.path.dirname(os.path.realpath(__file__))
self.flist_full = [{'file': "%s/%s" % (fdir, 'files/2_hosts.xml'),
'hosts': 2},
{'file': "%s/%s" % (fdir, 'files/1_hosts.xml'),
'hosts': 1},
{'file': "%s/%s" % (fdir,
'files/1_hosts_banner_ports_notsyn.xml'),
'hosts': 1},
{'file': "%s/%s" % (fdir,
'files/1_hosts_banner_ports.xml'),
'hosts': 1},
{'file': "%s/%s" % (fdir,
'files/1_hosts_banner.xml'),
'hosts': 1},
{'file': "%s/%s" % (fdir,
'files/2_hosts_version.xml'),
'hosts': 2},
{'file': "%s/%s" % (fdir,
'files/2_tcp_hosts.xml'),
'hosts': 2},
{'file': "%s/%s" % (fdir,
'files/1_hosts_nohostname.xml'),
'hosts': 1}]
self.flist_one = [{'file': "%s/%s" % (fdir,
'files/1_hosts_nohostname.xml'),
'hosts': 1}]
self.flist_two = [{'file': "%s/%s" % (fdir, 'files/2_hosts.xml'),
'hosts': 2,
'elapsed': '134.36', 'endtime': "1361738040",
'summary': ("Nmap done at Sun Feb 24 21:34:00 2013;"
" 2 IP addresses (2 hosts up) scanned"
" in 134.36 seconds")}]
self.hlist = [{'hostname': 'localhost', 'ports': 5, 'open': 5},
{'hostname': 'localhost2', 'ports': 4, 'open': 2},
{'hostname': 'scanme.nmap.org', 'ports': 4, 'open': 3},
{'hostname': '1.1.1.1', 'ports': 2, 'open': 0}]
self.flist_banner = [{'file': "%s/%s" % (fdir,
'files/1_hosts_banner.xml'),
'banner': {
'631': 'product: CUPS version: 1.4',
'3306':
'product: MySQL version: 5.1.61',
'22': ("product: OpenSSH version: 5.3"
" extrainfo: protocol 2.0"),
'25': ("product: Postfix smtpd"
" hostname: jambon.localdomain"),
'111': ''}}]
self.flist = self.flist_full
def test_report_constructor(self):
for testfile in self.flist:
fd = open(testfile['file'], 'r')
s = fd.read()
fd.close()
nr = NmapParser.parse(s)
nr2 = NmapParser.parse(s)
self.assertEqual(len(nr.hosts), testfile['hosts'])
self.assertEqual(len(nr2.hosts), testfile['hosts'])
self.assertEqual(sorted(nr2.get_raw_data()),
sorted(nr.get_raw_data()))
def test_get_ports(self):
for testfile in self.flist:
fd = open(testfile['file'], 'r')
s = fd.read()
fd.close()
nr = NmapParser.parse(s)
for h in nr.hosts:
for th in self.hlist:
continue
# TODO FIX THIS TEST
# if th['hostname'] == h.hostnames[0]:
# self.assertEqual(th['ports'], len(h.get_ports()))
# self.assertEqual(th['open'], len(h.get_open_ports()))
for np in h.get_open_ports():
sport = h.get_service(np[0], np[1])
self.assertEqual((sport.port, sport.protocol), np)
def test_runstats(self):
for testfile in self.flist_two:
fd = open(testfile['file'], 'r')
s = fd.read()
fd.close()
nr = NmapParser.parse(s)
self.assertEqual(getattr(nr, 'endtime'), int(testfile['endtime']))
self.assertEqual(getattr(nr, 'summary'), testfile['summary'])
self.assertEqual(getattr(nr, 'elapsed'), float(testfile['elapsed']))
def test_banner(self):
for testfile in self.flist_banner:
fd = open(testfile['file'], 'r')
nr = NmapParser.parse(fd.read())
fd.close()
for h in nr.hosts:
for service in h.services:
b = service.banner
self.assertEqual(b, testfile['banner'][str(service.port)])
def test_service_equal(self):
for testfile in self.flist:
fd = open(testfile['file'], 'r')
np1 = NmapParser.parse(fd.read())
fd.close()
fd = open(testfile['file'], 'r')
np2 = NmapParser.parse(fd.read())
fd.close()
host1 = np1.hosts.pop()
host2 = np2.hosts.pop()
"""All the service of the host must be compared and
the hash should be also the same"""
for i in range(len(host1.services)):
self.assertEqual(hash(host1.services[i]),
hash(host2.services[i]))
self.assertEqual(host1.services[i],
host2.services[i])
#print host1.serviceChanged(host2)
def test_service_not_equal(self):
for testfile in self.flist:
fd = open(testfile['file'], 'r')
np1 = NmapParser.parse(fd.read())
fd.close()
fd = open(testfile['file'], 'r')
np2 = NmapParser.parse(fd.read())
fd.close()
host1 = np1.hosts.pop()
host2 = np2.hosts.pop()
for i in range(len(host1.services)):
host1.services[i]._state['state'] = 'changed'
self.assertNotEqual(host1.services[i], host2.services[i])
#print "-----------"
#print host1.serviceChanged(host2)
#print "-----------"
def test_host_not_equal(self):
for testfile in self.flist:
fd = open(testfile['file'], 'r')
np1 = NmapParser.parse(fd.read())
fd.close()
fd = open(testfile['file'], 'r')
np2 = NmapParser.parse(fd.read())
fd.close()
host1 = np1.hosts.pop()
host2 = np2.hosts.pop()
host1.address = {'addr': '1.3.3.7', 'addrtype': 'ipv4'}
self.assertNotEqual(host1, host2)
def test_host_equal(self):
for testfile in self.flist:
fd = open(testfile['file'], 'r')
np1 = NmapParser.parse(fd.read())
fd.close()
fd = open(testfile['file'], 'r')
np2 = NmapParser.parse(fd.read())
fd.close()
host1 = np1.hosts.pop()
host2 = np2.hosts.pop()
host1.services[0]._portid = '23'
self.assertEqual(host1, host2)
def test_host_address_changed(self):
fdir = os.path.dirname(os.path.realpath(__file__))
fd1 = open("%s/%s" % (fdir, 'files/1_hosts_down.xml'), 'r')
fd2 = open("%s/%s" % (fdir, 'files/1_hosts.xml'), 'r')
nr1 = NmapParser.parse(fd1.read())
nr2 = NmapParser.parse(fd2.read())
h1 = nr1.hosts[0]
h2 = nr2.hosts[0]
self.assertRaises(NmapDiffException, h1.diff, h2)
def test_host_address_unchanged(self):
fdir = os.path.dirname(os.path.realpath(__file__))
fd1 = open("%s/%s" % (fdir, 'files/1_hosts_down.xml'), 'r')
fd2 = open("%s/%s" % (fdir, 'files/1_hosts.xml'), 'r')
fd3 = open("%s/%s" % (fdir, 'files/1_hosts.xml'), 'r')
nr1 = NmapParser.parse(fd1.read())
nr2 = NmapParser.parse(fd2.read())
nr3 = NmapParser.parse(fd3.read())
h1 = nr1.hosts.pop()
h2 = nr2.hosts.pop()
h3 = nr3.hosts.pop()
self.assertRaises(NmapDiffException, h1.diff, h2)
self.assertEqual(h2.diff(h3).changed(), set([]))
self.assertEqual(h2.diff(h3).added(), set([]))
self.assertEqual(h2.diff(h3).removed(), set([]))
self.assertEqual(h2.diff(h3).unchanged(),
set(['status',
"NmapService::tcp.22",
"NmapService::tcp.111",
"NmapService::tcp.631",
'hostnames',
"NmapService::tcp.3306",
'address',
"NmapService::tcp.25"]))
if __name__ == '__main__':
test_suite = ['test_report_constructor', 'test_get_ports',
'test_runstats', 'test_banner', 'test_service_equal',
'test_service_not_equal', 'test_host_not_equal',
'test_host_equal', 'test_host_address_changed',
'test_host_address_unchanged']
suite = unittest.TestSuite(map(TestNmapParser, test_suite))
test_result = unittest.TextTestRunner(verbosity=2).run(suite)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
programs
----------------------------------
The different programs feeding into the UGS
service. They handle seeding, updating the different
data sources.
"""
import ConfigParser
import csv
import cx_Oracle
import glob
import models
import resultmodels as resultmodel
import stationmodels as stationmodel
import os
from services import WebQuery, Normalizer, ChargeBalancer
class Balanceable(object):
"""
common balanceable things for programs
"""
#: the concentrations grouped with their sampleid
samples = None
def __init__(self):
super(Balanceable, self).__init__()
self.samples = {}
self.balancer = ChargeBalancer()
def track_concentration(self, etl):
etl.balance(etl.row)
if etl.sample_id in self.samples.keys():
self.samples[etl.sample_id].append(etl.concentration)
return
self.samples[etl.sample_id] = etl.concentration
def write_balance_rows(self, etl, location):
for sample_id in self.samples.keys():
concentration = self.samples[sample_id]
if not concentration.has_major_params:
continue
balance, cation, anion = (
self.balancer.calculate_charge_balance(concentration))
balance = {'balance': balance,
'cation': cation,
'anion': anion}
balance_rows = etl.create_rows_from_balance(sample_id, balance)
for row in balance_rows:
self._insert_row(row, etl.balance_fields, location)
class Program(object):
def __init__(self, location, InsertCursor):
super(Program, self).__init__()
self.location = location
self.InsertCursor = InsertCursor
self.normalizer = Normalizer()
def _get_default_fields(self, schema_map):
fields = []
for item in schema_map:
fields.append(item)
return fields
def _get_fields(self, schema_map):
return [schema_map[item].field_name for item in schema_map]
class GdbProgram(Program):
def __init__(self, location, InsertCursor):
super(GdbProgram, self).__init__(location, InsertCursor)
def _read_gdb(self, location, fields):
#: location - the path to the table data
#: fields - the fields form the data to pull
try:
with self.SearchCursor(location, fields) as cursor:
for row in cursor:
yield row
except RuntimeError as e:
#: the fields in the feature class
import arcpy
actual = set([str(x.name) for x in arcpy.ListFields(location)])
#: the fields you are trying to use
input_fields = set(fields)
missing = input_fields - actual
print 'the fouled up columns are {}'.format(missing)
raise e
def _insert_row(self, row, fields, location):
with self.InsertCursor(location, fields) as cursor:
cursor.insertRow(row)
class Wqp(Program):
def _insert_rows(self, data, feature_class):
location = os.path.join(self.location, feature_class)
print 'inserting into {} WQP type {}'.format(location, feature_class)
station_ids = {}
if feature_class == 'Results':
Type = resultmodel.WqpResult
elif feature_class == 'Stations':
Type = stationmodel.WqpStation
schema_map = Type.build_schema_map(feature_class)
fields = self._get_fields(schema_map)
if feature_class == 'Stations':
fields.append('SHAPE@XY')
with self.InsertCursor(location, fields) as curser:
for row in data:
etl = Type(row, self.normalizer)
insert_row = etl.row
station_id = etl.normalize_fields['stationid'][0]
if station_id:
if station_id in station_ids.keys():
#: station is already inserted skip it
continue
station_ids[station_id] = True
try:
curser.insertRow(insert_row)
except Exception as e:
raise e
def _csvs_on_disk(self, parent_folder, type):
folder = os.path.join(parent_folder, type, '*.csv')
for file in glob.glob(folder):
yield file
def _query(self, url):
data = WebQuery().results(url)
return data
def _read_response(self, data):
reader = csv.DictReader(data, delimiter=',')
return reader
def _build_field_length_structure(self, schema):
"""turns the schema doc into a structure that can count fields lengths
dict[source column] = array[destination column, count]
"""
results = {}
for column in schema:
if column['source'] is None and (
column['type'].lower() != 'text' or
column['type'].lower() != 'string'):
continue
results[column['source']] = [column['destination'], 0]
return results
def field_lengths(self, folder, program_type):
schema = models.Schema()
if program_type.lower() == 'stations':
maps = self._build_field_length_structure(schema.station)
elif program_type.lower() == 'results':
maps = self._build_field_length_structure(schema.result)
else:
raise Exception('flag must be stations or results')
for csv_file in self._csvs_on_disk(folder, program_type):
print 'processing {}'.format(csv_file)
with open(csv_file, 'r') as f:
data = csv.DictReader(f)
for row in data:
for key in maps.keys():
length = len(row[key])
if maps[key][1] < length:
maps[key][1] = length
return maps
def seed(self, folder, model_types):
for model_type in model_types:
for csv_file in self._csvs_on_disk(folder, model_type):
with open(csv_file, 'r') as f:
print 'processing {}'.format(csv_file)
self._insert_rows(csv.DictReader(f), model_type)
print 'processing {}: done'.format(csv_file)
def update(self, model_types):
for model_type in model_types:
response = self._query(model_type)
csv = self._read_response(response)
self._insert_rows(csv, model_type)
class Sdwis(Program):
#: testing variable to reduce query times
count = None
_result_query = """SELECT
UTV80.TSASAR.ANALYSIS_START_DT AS "AnalysisDate",
UTV80.TSALAB.LAB_ID_NUMBER AS "LabName",
UTV80.TSASAR.DETECTN_LIMIT_NUM AS "MDL",
UTV80.TSASAR.DETECTN_LIM_UOM_CD AS "MDLUnit",
UTV80.TINWSYS.TINWSYS_IS_NUMBER AS "OrgId",
UTV80.TINWSYS.NAME AS "OrgName",
UTV80.TSAANLYT.NAME AS "Param",
UTV80.TSASAR.CONCENTRATION_MSR AS "ResultValue",
UTV80.TSASAMPL.COLLLECTION_END_DT AS "SampleDate",
UTV80.TSASAMPL.COLLCTN_END_TIME AS "SampleTime",
UTV80.TSASAMPL.LAB_ASGND_ID_NUM AS "SampleId",
UTV80.TINWSF.TYPE_CODE AS "SampType",
UTV80.TINWSF.TINWSF_IS_NUMBER AS "StationId",
UTV80.TSASAR.UOM_CODE AS "Unit",
UTV80.TINLOC.LATITUDE_MEASURE AS "Lat_Y",
UTV80.TINLOC.LONGITUDE_MEASURE AS "Lon_X",
UTV80.TSAANLYT.CAS_REGISTRY_NUM AS "CAS_Reg",
UTV80.TSASAR.TSASAR_IS_NUMBER AS "IdNum"
FROM UTV80.TINWSF
JOIN UTV80.TINWSYS ON
UTV80.TINWSF.TINWSYS_IS_NUMBER = UTV80.TINWSYS.TINWSYS_IS_NUMBER
JOIN UTV80.TINLOC ON
UTV80.TINWSF.TINWSF_IS_NUMBER = UTV80.TINLOC.TINWSF_IS_NUMBER
JOIN UTV80.TSASMPPT ON
UTV80.TINWSF.TINWSF_IS_NUMBER = UTV80.TSASMPPT.TINWSF0IS_NUMBER
JOIN UTV80.TSASAMPL ON
UTV80.TSASMPPT.TSASMPPT_IS_NUMBER = UTV80.TSASAMPL.TSASMPPT_IS_NUMBER
JOIN UTV80.TSASAR ON
UTV80.TSASAMPL.TSASAMPL_IS_NUMBER = UTV80.TSASAR.TSASAMPL_IS_NUMBER
JOIN UTV80.TSAANLYT ON
UTV80.TSASAR.TSAANLYT_IS_NUMBER = UTV80.TSAANLYT.TSAANLYT_IS_NUMBER
JOIN UTV80.TSALAB ON
UTV80.TSASAMPL.TSALAB_IS_NUMBER = UTV80.TSALAB.TSALAB_IS_NUMBER
WHERE (UTV80.TINWSF.TYPE_CODE = 'SP' Or
UTV80.TINWSF.TYPE_CODE = 'WL') AND
UTV80.TSASAR.CONCENTRATION_MSR IS NOT NULL
ORDER BY UTV80.TINWSF.ST_ASGN_IDENT_CD"""
_station_query = """SELECT
UTV80.TINWSYS.TINWSYS_IS_NUMBER AS "OrgId",
UTV80.TINWSYS.NAME AS "OrgName",
UTV80.TINWSF.TINWSF_IS_NUMBER AS "StationId",
UTV80.TINWSF.NAME AS "StationName",
UTV80.TINWSF.TYPE_CODE AS "StationType",
UTV80.TINLOC.LATITUDE_MEASURE AS "Lat_Y",
UTV80.TINLOC.LONGITUDE_MEASURE AS "Lon_X",
UTV80.TINLOC.HORIZ_ACCURACY_MSR AS "HorAcc",
UTV80.TINLOC.HZ_COLLECT_METH_CD AS "HorCollMeth",
UTV80.TINLOC.HORIZ_REF_DATUM_CD AS "HorRef",
UTV80.TINLOC.VERTICAL_MEASURE AS "Elev",
UTV80.TINLOC.VERT_ACCURACY_MSR AS "ElevAcc",
UTV80.TINLOC.VER_COL_METH_CD AS "ElevMeth",
UTV80.TINLOC.VERT_REF_DATUM_CD AS "ElevRef",
MAX(UTV80.TINWLCAS.BOTTOM_DEPTH_MSR) AS "Depth",
UTV80.TINWLCAS.BOTTOM_DP_MSR_UOM AS "DepthUnit"
FROM UTV80.TINWSF
JOIN UTV80.TINWSYS ON
UTV80.TINWSF.TINWSYS_IS_NUMBER = UTV80.TINWSYS.TINWSYS_IS_NUMBER
JOIN UTV80.TINLOC ON
UTV80.TINWSF.TINWSF_IS_NUMBER = UTV80.TINLOC.TINWSF_IS_NUMBER
LEFT JOIN UTV80.TINWLCAS ON
UTV80.TINWSF.TINWSF_IS_NUMBER = UTV80.TINWLCAS.TINWSF_IS_NUMBER
WHERE (UTV80.TINWSF.TYPE_CODE = 'SP' OR
UTV80.TINWSF.TYPE_CODE = 'WL') AND
UTV80.TINLOC.LATITUDE_MEASURE != 0
GROUP BY UTV80.TINWSF.TINWSF_IS_NUMBER,
UTV80.TINWSF.NAME,
UTV80.TINWSF.TYPE_CODE,
UTV80.TINWSYS.TINWSYS_IS_NUMBER,
UTV80.TINWSYS.NAME,
UTV80.TINLOC.LATITUDE_MEASURE,
UTV80.TINLOC.LONGITUDE_MEASURE,
UTV80.TINLOC.SRC_MAP_SCALE_NUM,
UTV80.TINLOC.HORIZ_ACCURACY_MSR,
UTV80.TINLOC.HZ_COLLECT_METH_CD,
UTV80.TINLOC.HORIZ_REF_DATUM_CD,
UTV80.TINLOC.VERTICAL_MEASURE,
UTV80.TINLOC.VERT_ACCURACY_MSR,
UTV80.TINLOC.VER_COL_METH_CD,
UTV80.TINLOC.VERT_REF_DATUM_CD,
UTV80.TINWLCAS.BOTTOM_DEPTH_MSR,
UTV80.TINWLCAS.BOTTOM_DP_MSR_UOM"""
def __init__(self, location, InsertCursor):
super(Sdwis, self).__init__(location, InsertCursor)
config = ConfigParser.RawConfigParser()
file = os.path.join(
os.path.abspath(os.path.dirname(__file__)), 'secrets.cfg')
config.read(file)
user = config.get('sdwis', 'username')
password = config.get('sdwis', 'password')
server = config.get('sdwis', 'server')
instance = config.get('sdwis', 'instance')
self._connection_string = '{}/{}@{}/{}'.format(
user, password, server, instance)
def _query(self, query):
print 'querying SDWIS database'
conn = cx_Oracle.connect(self._connection_string)
cursor = conn.cursor()
results = cursor.execute(query)
if self.count is not None:
some = results.fetchmany(self.count)
cursor.close()
conn.close()
return some
return results
def _insert_rows(self, data, feature_class):
location = os.path.join(self.location, feature_class)
print 'inserting into {} SDWIS type {}'.format(location, feature_class)
if feature_class == 'Results':
Type = resultmodel.SdwisResult
elif feature_class == 'Stations':
Type = stationmodel.SdwisStation
fields = self._get_fields(Type.build_schema_map(feature_class))
if feature_class == 'Stations':
fields.append('SHAPE@XY')
with self.InsertCursor(location, fields) as curser:
for row in data:
etl = Type(row, self.normalizer)
insert_row = etl.row
curser.insertRow(insert_row)
def seed(self, model_types):
query_string = None
for model_type in model_types:
if model_type == 'Stations':
query_string = self._station_query
elif model_type == 'Results':
query_string = self._result_query
records = self._query(query_string)
self._insert_rows(records, model_type)
class Dogm(GdbProgram, Balanceable):
#: location to dogm gdb
gdb_name = 'DOGM\DOGM_AGRC.gdb'
#: results table name
results = 'DOGM_RESULT'
#: stations feature class name
stations = 'DOGM_STATION'
def __init__(self, location, SearchCursor, InsertCursor):
super(Dogm, self).__init__(location, InsertCursor)
self.SearchCursor = SearchCursor
def seed(self, folder, model_types):
#: folder - the parent folder to the data directory
#: model_types - [Stations, Results]
for model_type in model_types:
if model_type == 'Stations':
table = os.path.join(folder, self.gdb_name, self.stations)
Type = stationmodel.OgmStation
elif model_type == 'Results':
table = os.path.join(folder, self.gdb_name, self.results)
Type = resultmodel.OgmResult
location = os.path.join(self.location, model_type)
print 'inserting into {} DOGM type {}'.format(location, model_type)
fields_to_insert = None
for record in self._read_gdb(table, Type.fields):
etl = Type(record, self.normalizer)
if not fields_to_insert:
fields_to_insert = self._get_default_fields(etl.schema_map)
if model_type == 'Stations':
fields_to_insert.append('SHAPE@XY')
self._insert_row(etl.row, fields_to_insert, location)
if etl.balanceable and etl.sample_id is not None:
self.track_concentration(etl)
self.write_balance_rows(etl, location)
class Udwr(GdbProgram, Balanceable):
#: location to dogm gdb
gdb_name = 'UDWR\UDWR_AGRC.gdb'
#: results table name
results = 'UDWR_RESULTS'
#: stations feature class name
stations = 'UDWR_STATION'
def __init__(self, location, SearchCursor, InsertCursor):
super(Udwr, self).__init__(location, InsertCursor)
self.SearchCursor = SearchCursor
def seed(self, folder, model_types):
#: folder - the parent folder to the data directory
#: model_types - [Staions, Results]
for model_type in model_types:
if model_type == 'Stations':
table = os.path.join(folder, self.gdb_name, self.stations)
Type = stationmodel.DwrStation
elif model_type == 'Results':
table = os.path.join(folder, self.gdb_name, self.results)
Type = resultmodel.DwrResult
location = os.path.join(self.location, model_type)
print 'inserting into {} UDWR type {}'.format(location, model_type)
fields_to_insert = None
for record in self._read_gdb(table, Type.fields):
etl = Type(record, self.normalizer)
if not fields_to_insert:
fields_to_insert = self._get_default_fields(etl.schema_map)
if model_type == 'Stations':
fields_to_insert.append('SHAPE@XY')
self._insert_row(etl.row, fields_to_insert, location)
if etl.balanceable and etl.sample_id is not None:
self.track_concentration(etl)
self.write_balance_rows(etl, location)
class Ugs(GdbProgram, Balanceable):
#: location to dogm gdb
gdb_name = 'UGS\UGS_AGRC.gdb'
#: results table name
results = 'RESULTS'
#: stations feature class name
stations = 'STATIONS'
def __init__(self, location, SearchCursor, InsertCursor):
super(Ugs, self).__init__(location, InsertCursor)
self.SearchCursor = SearchCursor
def seed(self, folder, model_types):
#: folder - the parent folder to the data directory
#: types - [Stations, Results]
for model_type in model_types:
if model_type == 'Stations':
table = os.path.join(folder, self.gdb_name, self.stations)
Type = stationmodel.UgsStation
elif model_type == 'Results':
table = os.path.join(folder, self.gdb_name, self.results)
Type = resultmodel.UgsResult
location = os.path.join(self.location, model_type)
print 'inserting into {} UGS type {}'.format(location, model_type)
fields_to_insert = None
for record in self._read_gdb(table, Type.fields):
etl = Type(record, self.normalizer)
if not fields_to_insert:
fields_to_insert = self._get_default_fields(etl.schema_map)
if model_type == 'Stations':
fields_to_insert.append('SHAPE@XY')
self._insert_row(etl.row, fields_to_insert, location)
if etl.balanceable and etl.sample_id is not None:
self.track_concentration(etl)
self.write_balance_rows(etl, location)
|
|
import hashlib
import logging
import os
import threading
import time
import types
import uuid
from collections import namedtuple
from kazoo import security
from kazoo.client import KazooClient
from kazoo.exceptions import NoNodeError
from kazoo.exceptions import KazooException
from pykit import config
from pykit import net
from pykit import utfjson
from .exceptions import ZKWaitTimeout
logger = logging.getLogger(__name__)
PERM_TO_LONG = {
'c': 'create',
'd': 'delete',
'r': 'read',
'w': 'write',
'a': 'admin',
}
PERM_TO_SHORT = {
'create': 'c',
'delete': 'd',
'read': 'r',
'write': 'w',
'admin': 'a',
}
# We assumes that ip does not change during process running.
# Display intra ip if presents, or display pub ip.
host_ip4 = net.ips_prefer(net.get_host_ip4(), net.INN)
class PermTypeError(Exception):
pass
class ZkPathError(Exception):
pass
def close_zk(zk):
if not isinstance(zk, KazooClient):
raise TypeError('expect KazooClient or KazooClientExt, but got {t}'.format(t=type(zk)))
try:
zk.stop()
except KazooException as e:
logger.exception(repr(e) + ' while stop zk client')
try:
zk.close()
except Exception as e:
logger.exception(repr(e) + ' while close zk client')
def lock_data(node_id=None):
# deprecated
return lock_id(node_id=node_id)
def lock_id(node_id=None):
"""
Embed lock holder information into the zk node data for the lock.
`node_id` is a user defined identifier of a host.
"""
if node_id is None:
node_id = config.zk_node_id
ip = (host_ip4 + ['unknownip'])[0]
seq = [node_id, ip, str(os.getpid()), str(uuid.uuid4()).replace('-', '')]
return '-'.join(seq)
def parse_lock_data(data_str):
# deprecated
return parse_lock_id(data_str)
def parse_lock_id(data_str):
"""
Parse string generated by lock_id()
"""
node_id, ip, process_id, _uuid = (
data_str.split('-', 3) + ([None] * 4))[:4]
if type(process_id) in types.StringTypes and process_id.isdigit():
process_id = int(process_id)
else:
process_id = None
rst = {
'node_id': node_id,
'ip': ip,
'process_id': process_id,
'uuid': _uuid,
'txid': None
}
if node_id.startswith('txid:'):
rst['txid'] = node_id.split(':', 1)[1]
return rst
def make_digest(acc):
# acc = "username:password"
digest = hashlib.sha1(acc).digest().encode('base64').strip()
return digest
def make_acl_entry(username, password, permissions):
perms = ''
for c in permissions:
if c not in PERM_TO_LONG:
raise PermTypeError(c)
perms += c
return "digest:{username}:{digest}:{permissions}".format(
username=username,
digest=make_digest(username + ":" + password),
permissions=perms)
def perm_to_long(short, lower=True):
rst = []
for c in short:
c = c.lower()
if c not in PERM_TO_LONG:
raise PermTypeError(c)
rst.append(PERM_TO_LONG[c])
if not lower:
rst = [x.upper() for x in rst]
return rst
def perm_to_short(lst, lower=True):
rst = ''
for p in lst:
p = p.lower()
if p not in PERM_TO_SHORT:
raise PermTypeError(p)
rst += PERM_TO_SHORT[p]
if not lower:
rst = rst.upper()
return rst
def make_kazoo_digest_acl(acl):
# acl = (('xp', '123', 'cdrwa'),
# ('foo', 'passw', 'rw'),
# )
if acl is None:
return None
rst = []
for name, passw, perms in acl:
perm_dict = {p: True
for p in perm_to_long(perms)}
acl_entry = security.make_digest_acl(name, passw, **perm_dict)
rst.append(acl_entry)
return rst
def parse_kazoo_acl(acls):
# acls = [ACL(perms=31,
# acl_list=['ALL'],
# id=Id(scheme='digest', id=u'user:+Ir5sN1lGJEEs8xBZhZXK='))]
rst = []
for acl in acls:
if 'ALL' in acl.acl_list:
acl_list = 'cdrwa'
else:
acl_list = perm_to_short(acl.acl_list)
rst.append((acl.id.scheme, acl.id.id.split(':')[0], acl_list))
return rst
def is_backward_locking(locked_keys, key):
locked_keys = sorted(locked_keys)
assert key not in locked_keys, 'must not re-lock a key'
if len(locked_keys) == 0:
is_backward = False
else:
is_backward = key < locked_keys[-1]
return is_backward
def _init_node(zkcli, parent_path, node, val, acl, users):
path = parent_path + '/' + node
if acl is None:
acls = zkcli.get_acls(parent_path)[0]
else:
acls = [(user, users[user], perms) for user, perms in acl.items()]
acls = make_kazoo_digest_acl(acls)
if zkcli.exists(path) is None:
zkcli.create(path, value=val, acl=acls)
else:
zkcli.set_acls(path, acls)
return path
def init_hierarchy(hosts, hierarchy, users, auth):
zkcli = KazooClient(hosts)
zkcli.start()
scheme, name, passw = auth
zkcli.add_auth(scheme, name + ':' + passw)
def _init_hierarchy(hierarchy, parent_path):
if len(hierarchy) == 0:
return
for node, attr_children in hierarchy.items():
val = attr_children.get('__val__', {})
val = utfjson.dump(val)
acl = attr_children.get('__acl__')
path = _init_node(zkcli, parent_path, node, val, acl, users)
children = {k: v
for k, v in attr_children.items()
if k not in ('__val__', '__acl__')
}
_init_hierarchy(children, path)
_init_hierarchy(hierarchy, '/')
close_zk(zkcli)
def _make_zk_path(*paths):
return '/' + '/'.join([x.strip('/') for x in paths])
def export_hierarchy(zkcli, zkpath):
if zkpath != '/':
zkpath = zkpath.rstrip('/')
if not zkpath.startswith('/'):
raise ZkPathError(
'zkpath: {0} Error, Should be absolute path'.format(zkpath))
zk_node = _export_hierarchy(zkcli, zkpath)
return zk_node
def _export_hierarchy(zkcli, zkpath):
acls = {}
value, stat = zkcli.get(zkpath)
_acls, stat = zkcli.get_acls(zkpath)
_acls = parse_kazoo_acl(_acls)
for schema, user, perm in _acls:
acls.update({user: perm})
_zk_node = {'__val__': value, '__acl__': acls}
for child in zkcli.get_children(zkpath):
_zkpath = _make_zk_path(zkpath, child)
_zk_node[child] = _export_hierarchy(zkcli, _zkpath)
return _zk_node
NeedWait = namedtuple('NeedWait', [])
def _conditioned_get_loop(zkclient, path, conditioned_get, timeout=None, **kwargs):
if timeout is None:
timeout = 86400 * 365
expire_at = time.time() + timeout
lck = threading.RLock()
maybe_available = threading.Event()
maybe_available.clear()
def set_available():
with lck:
maybe_available.set()
def on_connection_change(state):
# notify it to re-get, then raise Connection related error
logger.info('connection state change: {0}'.format(state))
set_available()
zkclient.add_listener(on_connection_change)
try:
while True:
it = conditioned_get(zkclient, path, **kwargs)
it.next()
rst = it.send((NeedWait, set_available))
if rst is not NeedWait:
return rst
if maybe_available.wait(expire_at - time.time()):
with lck:
maybe_available.clear()
continue
raise ZKWaitTimeout("timeout({timeout} sec)"
" waiting for {path} to satisfy: {cond}".format(
timeout=timeout,
path=path,
cond=str(kwargs)))
finally:
try:
zkclient.remove_listener(on_connection_change)
except Exception as e:
logger.info(repr(e) + ' while removing on_connection_change')
def make_conditioned_getter(conditioned_get):
def _wrap(zkclient, path, timeout=None, **kwargs):
return _conditioned_get_loop(zkclient, path, conditioned_get, timeout=timeout, **kwargs)
_wrap.__name__ = conditioned_get.__name__
return _wrap
@make_conditioned_getter
def get_next(zkclient, path, version=-1):
NeedWait, set_available = yield
val, zstat = zkclient.get(path, watch=lambda watchevent: set_available())
if zstat.version > version:
yield val, zstat
yield NeedWait
@make_conditioned_getter
def wait_absent(zkclient, path):
NeedWait, set_available = yield
try:
zkclient.get(path, watch=lambda watchevent: set_available())
except NoNodeError as e:
logger.info(repr(e) + ' found, return')
yield None
yield NeedWait
|
|
import os
import subprocess as sp
from tqdm import tqdm
from moviepy.config import get_setting
from moviepy.decorators import (requires_duration,use_clip_fps_by_default)
from moviepy.tools import verbose_print, subprocess_call
import numpy as np
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, 'wb')
try:
import imageio
IMAGEIO_FOUND = True
except ImportError:
IMAGEIO_FOUND = False
@requires_duration
@use_clip_fps_by_default
def write_gif_with_tempfiles(clip, filename, fps=None, program= 'ImageMagick',
opt="OptimizeTransparency", fuzz=1, verbose=True,
loop=0, dispose=True, colors=None, tempfiles=False):
""" Write the VideoClip to a GIF file.
Converts a VideoClip into an animated GIF using ImageMagick
or ffmpeg. Does the same as write_gif (see this one for more
docstring), but writes every frame to a file instead of passing
them in the RAM. Useful on computers with little RAM.
"""
fileName, fileExtension = os.path.splitext(filename)
tt = np.arange(0,clip.duracion, 1.0/fps)
tempfiles = []
verbose_print(verbose, "\n[MoviePy] Building file %s\n"%filename
+40*"-"+"\n")
verbose_print(verbose, "[MoviePy] Generating GIF frames...\n")
total = int(clip.duracion*fps)+1
for i, t in tqdm(enumerate(tt), total=total):
name = "%s_GIFTEMP%04d.png"%(fileName, i+1)
tempfiles.append(name)
clip.save_frame(name, t, withmask=True)
delay = int(100.0/fps)
if program == "ImageMagick":
verbose_print(verbose, "[MoviePy] Optimizing GIF with ImageMagick... ")
cmd = [get_setting("IMAGEMAGICK_BINARY"),
'-delay' , '%d'%delay,
"-dispose" ,"%d"%(2 if dispose else 1),
"-loop" , "%d"%loop,
"%s_GIFTEMP*.png"%fileName,
"-coalesce",
"-layers", "%s"%opt,
"-fuzz", "%02d"%fuzz + "%",
]+(["-colors", "%d"%colors] if colors is not None else [])+[
filename]
elif program == "ffmpeg":
cmd = [get_setting("FFMPEG_BINARY"), '-y',
'-f', 'image2', '-r',str(fps),
'-i', fileName+'_GIFTEMP%04d.png',
'-r',str(fps),
filename]
try:
subprocess_call( cmd, verbose = verbose )
verbose_print(verbose, "[MoviePy] GIF %s is ready."%filename)
except (IOError,OSError) as err:
error = ("MoviePy Error: creation of %s failed because "
"of the following error:\n\n%s.\n\n."%(filename, str(err)))
if program == "ImageMagick":
error = error + ("This error can be due to the fact that "
"ImageMagick is not installed on your computer, or "
"(for Windows users) that you didn't specify the "
"path to the ImageMagick binary in file conf.py." )
raise IOError(error)
for f in tempfiles:
os.remove(f)
@requires_duration
@use_clip_fps_by_default
def write_gif(clip, filename, fps=None, program= 'ImageMagick',
opt="OptimizeTransparency", fuzz=1, verbose=True, withmask=True,
loop=0, dispose=True, colors=None):
""" Write the VideoClip to a GIF file, without temporary files.
Converts a VideoClip into an animated GIF using ImageMagick
or ffmpeg.
Parameters
-----------
filename
Name of the resulting gif file.
fps
Number of frames per second (see note below). If it
isn't provided, then the function will look for the clip's
``fps`` attribute (VideoFileClip, for instance, have one).
program
Software to use for the conversion, either 'ImageMagick' or
'ffmpeg'.
opt
(ImageMagick only) optimalization to apply, either
'optimizeplus' or 'OptimizeTransparency'.
fuzz
(ImageMagick only) Compresses the GIF by considering that
the colors that are less than fuzz% different are in fact
the same.
Notes
-----
The gif will be playing the clip in real time (you can
only change the frame rate). If you want the gif to be played
slower than the clip you will use ::
>>> # slow down clip 50% and make it a gif
>>> myClip.speedx(0.5).write_gif('myClip.gif')
"""
#
# We use processes chained with pipes.
#
# if program == 'ffmpeg'
# frames --ffmpeg--> gif
#
# if program == 'ImageMagick' and optimize == (None, False)
# frames --ffmpeg--> bmp frames --ImageMagick--> gif
#
#
# if program == 'ImageMagick' and optimize != (None, False)
# frames -ffmpeg-> bmp frames -ImagMag-> gif -ImagMag-> better gif
#
delay= 100.0/fps
if clip.mask is None:
withmask = False
cmd1 = [get_setting("FFMPEG_BINARY"), '-y', '-loglevel', 'error',
'-f', 'rawvideo',
'-vcodec','rawvideo', '-r', "%.02f"%fps,
'-s', "%dx%d"%(clip.w, clip.h),
'-pix_fmt', ('rgba' if withmask else 'rgb24'),
'-i', '-']
popen_params = {"stdout": DEVNULL,
"stderr": DEVNULL,
"stdin": DEVNULL}
if os.name == "nt":
popen_params["creationflags"] = 0x08000000
if program == "ffmpeg":
popen_params["stdin"] = sp.PIPE
popen_params["stdout"] = DEVNULL
proc1 = sp.Popen(cmd1+[ '-pix_fmt', ('rgba' if withmask else 'rgb24'),
'-r', "%.02f"%fps, filename], **popen_params)
else:
popen_params["stdin"] = sp.PIPE
popen_params["stdout"] = sp.PIPE
proc1 = sp.Popen(cmd1+ ['-f', 'image2pipe', '-vcodec', 'bmp', '-'],
**popen_params)
if program == 'ImageMagick':
cmd2 = [get_setting("IMAGEMAGICK_BINARY"), '-delay', "%.02f"%(delay),
"-dispose" ,"%d"%(2 if dispose else 1),
'-loop', '%d'%loop, '-', '-coalesce']
if (opt in [False, None]):
popen_params["stdin"] = proc1.stdout
popen_params["stdout"] = DEVNULL
proc2 = sp.Popen(cmd2+[filename], **popen_params)
else:
popen_params["stdin"] = proc1.stdout
popen_params["stdout"] = sp.PIPE
proc2 = sp.Popen(cmd2+['gif:-'], **popen_params)
if opt:
cmd3 = [get_setting("IMAGEMAGICK_BINARY"), '-', '-layers', opt,
'-fuzz', '%d'%fuzz+'%'
]+(["-colors", "%d"%colors] if colors is not None else [])+[
filename]
popen_params["stdin"] = proc2.stdout
popen_params["stdout"] = DEVNULL
proc3 = sp.Popen(cmd3, **popen_params)
# We send all the frames to the first process
verbose_print(verbose, "\n[MoviePy] >>>> Building file %s\n"%filename)
verbose_print(verbose, "[MoviePy] Generating GIF frames...\n")
try:
for t,frame in clip.iter_frames(fps=fps, progress_bar=True,
with_times=True, dtype="uint8"):
if withmask:
mask = 255 * clip.mask.get_frame(t)
frame = np.dstack([frame, mask]).astype('uint8')
proc1.stdin.write(frame.tostring())
except IOError as err:
error = ("[MoviePy] Error: creation of %s failed because "
"of the following error:\n\n%s.\n\n."%(filename, str(err)))
if program == "ImageMagick":
error = error + ("This can be due to the fact that "
"ImageMagick is not installed on your computer, or "
"(for Windows users) that you didn't specify the "
"path to the ImageMagick binary in file conf.py." )
raise IOError(error)
if program == 'ImageMagick':
verbose_print(verbose, "[MoviePy] Optimizing the GIF with ImageMagick...\n")
proc1.stdin.close()
proc1.wait()
if program == 'ImageMagick':
proc2.wait()
if opt:
proc3.wait()
verbose_print(verbose, "[MoviePy] >>>> File %s is ready !"%filename)
def write_gif_with_image_io(clip, filename, fps=None, opt='wu', loop=0,
colors=None, verbose=True):
"""
Writes the gif with the Python library ImageIO (calls FreeImage).
For the moment ImageIO is not installed with MoviePy. You need to install
imageio (pip install imageio) to use this.
Parameters
-----------
opt
"""
if colors is None:
colors=256
if not IMAGEIO_FOUND:
raise ImportError("Writing a gif with imageio requires ImageIO installed,"
" with e.g. 'pip install imageio'")
if fps is None:
fps = clip.fps
quantizer = 'wu' if opt!= 'nq' else 'nq'
writer = imageio.save(filename, duracion=1.0/fps,
quantizer=quantizer, palettesize=colors)
verbose_print(verbose, "\n[MoviePy] Building file %s with imageio\n"%filename)
for frame in clip.iter_frames(fps=fps, progress_bar=True, dtype='uint8'):
writer.append_data(frame)
|
|
from __future__ import unicode_literals
import os.path
import optparse
import shlex
import sys
from .compat import (
compat_expanduser,
compat_getenv,
)
from .utils import (
get_term_width,
write_string,
)
from .version import __version__
def parseOpts(overrideArguments=None):
def _readOptions(filename_bytes, default=[]):
try:
optionf = open(filename_bytes)
except IOError:
return default # silently skip if file is not present
try:
res = []
for l in optionf:
res += shlex.split(l, comments=True)
finally:
optionf.close()
return res
def _readUserConf():
xdg_config_home = compat_getenv('XDG_CONFIG_HOME')
if xdg_config_home:
userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config')
if not os.path.isfile(userConfFile):
userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf')
else:
userConfFile = os.path.join(compat_expanduser('~'), '.config', 'youtube-dl', 'config')
if not os.path.isfile(userConfFile):
userConfFile = os.path.join(compat_expanduser('~'), '.config', 'youtube-dl.conf')
userConf = _readOptions(userConfFile, None)
if userConf is None:
appdata_dir = compat_getenv('appdata')
if appdata_dir:
userConf = _readOptions(
os.path.join(appdata_dir, 'youtube-dl', 'config'),
default=None)
if userConf is None:
userConf = _readOptions(
os.path.join(appdata_dir, 'youtube-dl', 'config.txt'),
default=None)
if userConf is None:
userConf = _readOptions(
os.path.join(compat_expanduser('~'), 'youtube-dl.conf'),
default=None)
if userConf is None:
userConf = _readOptions(
os.path.join(compat_expanduser('~'), 'youtube-dl.conf.txt'),
default=None)
if userConf is None:
userConf = []
return userConf
def _format_option_string(option):
''' ('-o', '--option') -> -o, --format METAVAR'''
opts = []
if option._short_opts:
opts.append(option._short_opts[0])
if option._long_opts:
opts.append(option._long_opts[0])
if len(opts) > 1:
opts.insert(1, ', ')
if option.takes_value():
opts.append(' %s' % option.metavar)
return "".join(opts)
def _comma_separated_values_options_callback(option, opt_str, value, parser):
setattr(parser.values, option.dest, value.split(','))
def _hide_login_info(opts):
opts = list(opts)
for private_opt in ['-p', '--password', '-u', '--username', '--video-password']:
try:
i = opts.index(private_opt)
opts[i + 1] = 'PRIVATE'
except ValueError:
pass
return opts
# No need to wrap help messages if we're on a wide console
columns = get_term_width()
max_width = columns if columns else 80
max_help_position = 80
fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
fmt.format_option_strings = _format_option_string
kw = {
'version': __version__,
'formatter': fmt,
'usage': '%prog [options] url [url...]',
'conflict_handler': 'resolve',
}
parser = optparse.OptionParser(**kw)
general = optparse.OptionGroup(parser, 'General Options')
general.add_option(
'-h', '--help',
action='help',
help='print this help text and exit')
general.add_option(
'-v', '--version',
action='version',
help='print program version and exit')
general.add_option(
'-U', '--update',
action='store_true', dest='update_self',
help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
general.add_option(
'-i', '--ignore-errors',
action='store_true', dest='ignoreerrors', default=False,
help='continue on download errors, for example to skip unavailable videos in a playlist')
general.add_option(
'--abort-on-error',
action='store_false', dest='ignoreerrors',
help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
general.add_option(
'--dump-user-agent',
action='store_true', dest='dump_user_agent', default=False,
help='display the current browser identification')
general.add_option(
'--list-extractors',
action='store_true', dest='list_extractors', default=False,
help='List all supported extractors and the URLs they would handle')
general.add_option(
'--extractor-descriptions',
action='store_true', dest='list_extractor_descriptions', default=False,
help='Output descriptions of all supported extractors')
general.add_option(
'--proxy', dest='proxy',
default=None, metavar='URL',
help='Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection')
general.add_option(
'--socket-timeout',
dest='socket_timeout', type=float, default=None,
help='Time to wait before giving up, in seconds')
general.add_option(
'--default-search',
dest='default_search', metavar='PREFIX',
help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching.')
general.add_option(
'--ignore-config',
action='store_true',
help='Do not read configuration files. When given in the global configuration file /etc/youtube-dl.conf: do not read the user configuration in ~/.config/youtube-dl.conf (%APPDATA%/youtube-dl/config.txt on Windows)')
general.add_option(
'--flat-playlist',
action='store_const', dest='extract_flat', const='in_playlist',
default=False,
help='Do not extract the videos of a playlist, only list them.')
selection = optparse.OptionGroup(parser, 'Video Selection')
selection.add_option(
'--playlist-start',
dest='playliststart', metavar='NUMBER', default=1, type=int,
help='playlist video to start at (default is %default)')
selection.add_option(
'--playlist-end',
dest='playlistend', metavar='NUMBER', default=None, type=int,
help='playlist video to end at (default is last)')
selection.add_option(
'--match-title',
dest='matchtitle', metavar='REGEX',
help='download only matching titles (regex or caseless sub-string)')
selection.add_option(
'--reject-title',
dest='rejecttitle', metavar='REGEX',
help='skip download for matching titles (regex or caseless sub-string)')
selection.add_option(
'--max-downloads',
dest='max_downloads', metavar='NUMBER', type=int, default=None,
help='Abort after downloading NUMBER files')
selection.add_option(
'--min-filesize',
metavar='SIZE', dest='min_filesize', default=None,
help='Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)')
selection.add_option(
'--max-filesize',
metavar='SIZE', dest='max_filesize', default=None,
help='Do not download any videos larger than SIZE (e.g. 50k or 44.6m)')
selection.add_option(
'--date',
metavar='DATE', dest='date', default=None,
help='download only videos uploaded in this date')
selection.add_option(
'--datebefore',
metavar='DATE', dest='datebefore', default=None,
help='download only videos uploaded on or before this date (i.e. inclusive)')
selection.add_option(
'--dateafter',
metavar='DATE', dest='dateafter', default=None,
help='download only videos uploaded on or after this date (i.e. inclusive)')
selection.add_option(
'--min-views',
metavar='COUNT', dest='min_views', default=None, type=int,
help='Do not download any videos with less than COUNT views',)
selection.add_option(
'--max-views',
metavar='COUNT', dest='max_views', default=None, type=int,
help='Do not download any videos with more than COUNT views')
selection.add_option(
'--no-playlist',
action='store_true', dest='noplaylist', default=False,
help='download only the currently playing video')
selection.add_option(
'--age-limit',
metavar='YEARS', dest='age_limit', default=None, type=int,
help='download only videos suitable for the given age')
selection.add_option(
'--download-archive', metavar='FILE',
dest='download_archive',
help='Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.')
selection.add_option(
'--include-ads',
dest='include_ads', action='store_true',
help='Download advertisements as well (experimental)')
authentication = optparse.OptionGroup(parser, 'Authentication Options')
authentication.add_option(
'-u', '--username',
dest='username', metavar='USERNAME',
help='login with this account ID')
authentication.add_option(
'-p', '--password',
dest='password', metavar='PASSWORD',
help='account password')
authentication.add_option(
'-2', '--twofactor',
dest='twofactor', metavar='TWOFACTOR',
help='two-factor auth code')
authentication.add_option(
'-n', '--netrc',
action='store_true', dest='usenetrc', default=False,
help='use .netrc authentication data')
authentication.add_option(
'--video-password',
dest='videopassword', metavar='PASSWORD',
help='video password (vimeo, smotri)')
video_format = optparse.OptionGroup(parser, 'Video Format Options')
video_format.add_option(
'-f', '--format',
action='store', dest='format', metavar='FORMAT', default=None,
help='video format code, specify the order of preference using'
' slashes: -f 22/17/18 . -f mp4 , -f m4a and -f flv are also'
' supported. You can also use the special names "best",'
' "bestvideo", "bestaudio", "worst", "worstvideo" and'
' "worstaudio". By default, youtube-dl will pick the best quality.'
' Use commas to download multiple audio formats, such as'
' -f 136/137/mp4/bestvideo,140/m4a/bestaudio.'
' You can merge the video and audio of two formats into a single'
' file using -f <video-format>+<audio-format> (requires ffmpeg or'
' avconv), for example -f bestvideo+bestaudio.')
video_format.add_option(
'--all-formats',
action='store_const', dest='format', const='all',
help='download all available video formats')
video_format.add_option(
'--prefer-free-formats',
action='store_true', dest='prefer_free_formats', default=False,
help='prefer free video formats unless a specific one is requested')
video_format.add_option(
'--max-quality',
action='store', dest='format_limit', metavar='FORMAT',
help='highest quality format to download')
video_format.add_option(
'-F', '--list-formats',
action='store_true', dest='listformats',
help='list all available formats')
video_format.add_option(
'--youtube-include-dash-manifest',
action='store_true', dest='youtube_include_dash_manifest', default=True,
help=optparse.SUPPRESS_HELP)
video_format.add_option(
'--youtube-skip-dash-manifest',
action='store_false', dest='youtube_include_dash_manifest',
help='Do not download the DASH manifest on YouTube videos')
subtitles = optparse.OptionGroup(parser, 'Subtitle Options')
subtitles.add_option(
'--write-sub', '--write-srt',
action='store_true', dest='writesubtitles', default=False,
help='write subtitle file')
subtitles.add_option(
'--write-auto-sub', '--write-automatic-sub',
action='store_true', dest='writeautomaticsub', default=False,
help='write automatic subtitle file (youtube only)')
subtitles.add_option(
'--all-subs',
action='store_true', dest='allsubtitles', default=False,
help='downloads all the available subtitles of the video')
subtitles.add_option(
'--list-subs',
action='store_true', dest='listsubtitles', default=False,
help='lists all available subtitles for the video')
subtitles.add_option(
'--sub-format',
action='store', dest='subtitlesformat', metavar='FORMAT', default='srt',
help='subtitle format (default=srt) ([sbv/vtt] youtube only)')
subtitles.add_option(
'--sub-lang', '--sub-langs', '--srt-lang',
action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
default=[], callback=_comma_separated_values_options_callback,
help='languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'')
downloader = optparse.OptionGroup(parser, 'Download Options')
downloader.add_option(
'-r', '--rate-limit',
dest='ratelimit', metavar='LIMIT',
help='maximum download rate in bytes per second (e.g. 50K or 4.2M)')
downloader.add_option(
'-R', '--retries',
dest='retries', metavar='RETRIES', default=10,
help='number of retries (default is %default)')
downloader.add_option(
'--buffer-size',
dest='buffersize', metavar='SIZE', default='1024',
help='size of download buffer (e.g. 1024 or 16K) (default is %default)')
downloader.add_option(
'--no-resize-buffer',
action='store_true', dest='noresizebuffer', default=False,
help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.')
downloader.add_option(
'--test',
action='store_true', dest='test', default=False,
help=optparse.SUPPRESS_HELP)
workarounds = optparse.OptionGroup(parser, 'Workarounds')
workarounds.add_option(
'--encoding',
dest='encoding', metavar='ENCODING',
help='Force the specified encoding (experimental)')
workarounds.add_option(
'--no-check-certificate',
action='store_true', dest='no_check_certificate', default=False,
help='Suppress HTTPS certificate validation.')
workarounds.add_option(
'--prefer-insecure',
'--prefer-unsecure', action='store_true', dest='prefer_insecure',
help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)')
workarounds.add_option(
'--user-agent',
metavar='UA', dest='user_agent',
help='specify a custom user agent')
workarounds.add_option(
'--referer',
metavar='URL', dest='referer', default=None,
help='specify a custom referer, use if the video access is restricted to one domain',
)
workarounds.add_option(
'--add-header',
metavar='FIELD:VALUE', dest='headers', action='append',
help='specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times',
)
workarounds.add_option(
'--bidi-workaround',
dest='bidi_workaround', action='store_true',
help='Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
verbosity.add_option(
'-q', '--quiet',
action='store_true', dest='quiet', default=False,
help='activates quiet mode')
verbosity.add_option(
'--no-warnings',
dest='no_warnings', action='store_true', default=False,
help='Ignore warnings')
verbosity.add_option(
'-s', '--simulate',
action='store_true', dest='simulate', default=False,
help='do not download the video and do not write anything to disk',)
verbosity.add_option(
'--skip-download',
action='store_true', dest='skip_download', default=False,
help='do not download the video',)
verbosity.add_option(
'-g', '--get-url',
action='store_true', dest='geturl', default=False,
help='simulate, quiet but print URL')
verbosity.add_option(
'-e', '--get-title',
action='store_true', dest='gettitle', default=False,
help='simulate, quiet but print title')
verbosity.add_option(
'--get-id',
action='store_true', dest='getid', default=False,
help='simulate, quiet but print id')
verbosity.add_option(
'--get-thumbnail',
action='store_true', dest='getthumbnail', default=False,
help='simulate, quiet but print thumbnail URL')
verbosity.add_option(
'--get-description',
action='store_true', dest='getdescription', default=False,
help='simulate, quiet but print video description')
verbosity.add_option(
'--get-duration',
action='store_true', dest='getduration', default=False,
help='simulate, quiet but print video length')
verbosity.add_option(
'--get-filename',
action='store_true', dest='getfilename', default=False,
help='simulate, quiet but print output filename')
verbosity.add_option(
'--get-format',
action='store_true', dest='getformat', default=False,
help='simulate, quiet but print output format')
verbosity.add_option(
'-j', '--dump-json',
action='store_true', dest='dumpjson', default=False,
help='simulate, quiet but print JSON information. See --output for a description of available keys.')
verbosity.add_option(
'-J', '--dump-single-json',
action='store_true', dest='dump_single_json', default=False,
help='simulate, quiet but print JSON information for each command-line argument. If the URL refers to a playlist, dump the whole playlist information in a single line.')
verbosity.add_option(
'--newline',
action='store_true', dest='progress_with_newline', default=False,
help='output progress bar as new lines')
verbosity.add_option(
'--no-progress',
action='store_true', dest='noprogress', default=False,
help='do not print progress bar')
verbosity.add_option(
'--console-title',
action='store_true', dest='consoletitle', default=False,
help='display progress in console titlebar')
verbosity.add_option(
'-v', '--verbose',
action='store_true', dest='verbose', default=False,
help='print various debugging information')
verbosity.add_option(
'--dump-intermediate-pages',
action='store_true', dest='dump_intermediate_pages', default=False,
help='print downloaded pages to debug problems (very verbose)')
verbosity.add_option(
'--write-pages',
action='store_true', dest='write_pages', default=False,
help='Write downloaded intermediary pages to files in the current directory to debug problems')
verbosity.add_option(
'--youtube-print-sig-code',
action='store_true', dest='youtube_print_sig_code', default=False,
help=optparse.SUPPRESS_HELP)
verbosity.add_option(
'--print-traffic',
dest='debug_printtraffic', action='store_true', default=False,
help='Display sent and read HTTP traffic')
filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
filesystem.add_option(
'-a', '--batch-file',
dest='batchfile', metavar='FILE',
help='file containing URLs to download (\'-\' for stdin)')
filesystem.add_option(
'--id', default=False,
action='store_true', dest='useid', help='use only video ID in file name')
filesystem.add_option(
'-A', '--auto-number',
action='store_true', dest='autonumber', default=False,
help='number downloaded files starting from 00000')
filesystem.add_option(
'-o', '--output',
dest='outtmpl', metavar='TEMPLATE',
help=('output filename template. Use %(title)s to get the title, '
'%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
'%(autonumber)s to get an automatically incremented number, '
'%(ext)s for the filename extension, '
'%(format)s for the format description (like "22 - 1280x720" or "HD"), '
'%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"), '
'%(upload_date)s for the upload date (YYYYMMDD), '
'%(extractor)s for the provider (youtube, metacafe, etc), '
'%(id)s for the video id, '
'%(playlist_title)s, %(playlist_id)s, or %(playlist)s (=title if present, ID otherwise) for the playlist the video is in, '
'%(playlist_index)s for the position in the playlist. '
'%(height)s and %(width)s for the width and height of the video format. '
'%(resolution)s for a textual description of the resolution of the video format. '
'%% for a literal percent. '
'Use - to output to stdout. Can also be used to download to a different directory, '
'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
filesystem.add_option(
'--autonumber-size',
dest='autonumber_size', metavar='NUMBER',
help='Specifies the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given')
filesystem.add_option(
'--restrict-filenames',
action='store_true', dest='restrictfilenames', default=False,
help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames')
filesystem.add_option(
'-t', '--title',
action='store_true', dest='usetitle', default=False,
help='[deprecated] use title in file name (default)')
filesystem.add_option(
'-l', '--literal', default=False,
action='store_true', dest='usetitle',
help='[deprecated] alias of --title')
filesystem.add_option(
'-w', '--no-overwrites',
action='store_true', dest='nooverwrites', default=False,
help='do not overwrite files')
filesystem.add_option(
'-c', '--continue',
action='store_true', dest='continue_dl', default=True,
help='force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.')
filesystem.add_option(
'--no-continue',
action='store_false', dest='continue_dl',
help='do not resume partially downloaded files (restart from beginning)')
filesystem.add_option(
'--no-part',
action='store_true', dest='nopart', default=False,
help='do not use .part files - write directly into output file')
filesystem.add_option(
'--no-mtime',
action='store_false', dest='updatetime', default=True,
help='do not use the Last-modified header to set the file modification time')
filesystem.add_option(
'--write-description',
action='store_true', dest='writedescription', default=False,
help='write video description to a .description file')
filesystem.add_option(
'--write-info-json',
action='store_true', dest='writeinfojson', default=False,
help='write video metadata to a .info.json file')
filesystem.add_option(
'--write-annotations',
action='store_true', dest='writeannotations', default=False,
help='write video annotations to a .annotation file')
filesystem.add_option(
'--write-thumbnail',
action='store_true', dest='writethumbnail', default=False,
help='write thumbnail image to disk')
filesystem.add_option(
'--load-info',
dest='load_info_filename', metavar='FILE',
help='json file containing the video information (created with the "--write-json" option)')
filesystem.add_option(
'--cookies',
dest='cookiefile', metavar='FILE',
help='file to read cookies from and dump cookie jar in')
filesystem.add_option(
'--cache-dir', dest='cachedir', default=None, metavar='DIR',
help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.')
filesystem.add_option(
'--no-cache-dir', action='store_const', const=False, dest='cachedir',
help='Disable filesystem caching')
filesystem.add_option(
'--rm-cache-dir',
action='store_true', dest='rm_cachedir',
help='Delete all filesystem cache files')
postproc = optparse.OptionGroup(parser, 'Post-processing Options')
postproc.add_option(
'-x', '--extract-audio',
action='store_true', dest='extractaudio', default=False,
help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
postproc.add_option(
'--audio-format', metavar='FORMAT', dest='audioformat', default='best',
help='"best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; "%default" by default')
postproc.add_option(
'--audio-quality', metavar='QUALITY',
dest='audioquality', default='5',
help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default %default)')
postproc.add_option(
'--recode-video',
metavar='FORMAT', dest='recodevideo', default=None,
help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv)')
postproc.add_option(
'-k', '--keep-video',
action='store_true', dest='keepvideo', default=False,
help='keeps the video file on disk after the post-processing; the video is erased by default')
postproc.add_option(
'--no-post-overwrites',
action='store_true', dest='nopostoverwrites', default=False,
help='do not overwrite post-processed files; the post-processed files are overwritten by default')
postproc.add_option(
'--embed-subs',
action='store_true', dest='embedsubtitles', default=False,
help='embed subtitles in the video (only for mp4 videos)')
postproc.add_option(
'--embed-thumbnail',
action='store_true', dest='embedthumbnail', default=False,
help='embed thumbnail in the audio as cover art')
postproc.add_option(
'--add-metadata',
action='store_true', dest='addmetadata', default=False,
help='write metadata to the video file')
postproc.add_option(
'--xattrs',
action='store_true', dest='xattrs', default=False,
help='write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
postproc.add_option(
'--prefer-avconv',
action='store_false', dest='prefer_ffmpeg',
help='Prefer avconv over ffmpeg for running the postprocessors (default)')
postproc.add_option(
'--prefer-ffmpeg',
action='store_true', dest='prefer_ffmpeg',
help='Prefer ffmpeg over avconv for running the postprocessors')
postproc.add_option(
'--exec',
metavar='CMD', dest='exec_cmd',
help='Execute a command on the file after downloading, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'' )
parser.add_option_group(general)
parser.add_option_group(selection)
parser.add_option_group(downloader)
parser.add_option_group(filesystem)
parser.add_option_group(verbosity)
parser.add_option_group(workarounds)
parser.add_option_group(video_format)
parser.add_option_group(subtitles)
parser.add_option_group(authentication)
parser.add_option_group(postproc)
if overrideArguments is not None:
opts, args = parser.parse_args(overrideArguments)
if opts.verbose:
write_string('[debug] Override config: ' + repr(overrideArguments) + '\n')
else:
commandLineConf = sys.argv[1:]
if '--ignore-config' in commandLineConf:
systemConf = []
userConf = []
else:
systemConf = _readOptions('/etc/youtube-dl.conf')
if '--ignore-config' in systemConf:
userConf = []
else:
userConf = _readUserConf()
argv = systemConf + userConf + commandLineConf
opts, args = parser.parse_args(argv)
if opts.verbose:
write_string('[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n')
write_string('[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n')
write_string('[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n')
return parser, opts, args
|
|
from __future__ import unicode_literals
import re
import six
from six.moves.urllib.parse import parse_qs, urlparse
import xmltodict
from moto.core.responses import _TemplateEnvironmentMixin
from moto.s3bucket_path.utils import bucket_name_from_url as bucketpath_bucket_name_from_url, parse_key_name as bucketpath_parse_key_name, is_delete_keys as bucketpath_is_delete_keys
from .exceptions import BucketAlreadyExists, S3ClientError, InvalidPartOrder
from .models import s3_backend, get_canned_acl, FakeGrantee, FakeGrant, FakeAcl
from .utils import bucket_name_from_url, metadata_from_headers
from xml.dom import minidom
REGION_URL_REGEX = r'\.s3-(.+?)\.amazonaws\.com'
DEFAULT_REGION_NAME = 'us-east-1'
def parse_key_name(pth):
return pth.lstrip("/")
def is_delete_keys(request, path, bucket_name):
return path == u'/?delete' or (
path == u'/' and
getattr(request, "query_string", "") == "delete"
)
class ResponseObject(_TemplateEnvironmentMixin):
def __init__(self, backend):
super(ResponseObject, self).__init__()
self.backend = backend
def all_buckets(self):
# No bucket specified. Listing all buckets
all_buckets = self.backend.get_all_buckets()
template = self.response_template(S3_ALL_BUCKETS)
return template.render(buckets=all_buckets)
def subdomain_based_buckets(self, request):
host = request.headers.get('host', request.headers.get('Host'))
if host.startswith("localhost"):
# For localhost, default to path-based buckets
return False
path_based = (host == 's3.amazonaws.com' or re.match(r"s3[\.\-]([^.]*)\.amazonaws\.com", host))
return not path_based
def is_delete_keys(self, request, path, bucket_name):
if self.subdomain_based_buckets(request):
return is_delete_keys(request, path, bucket_name)
else:
return bucketpath_is_delete_keys(request, path, bucket_name)
def parse_bucket_name_from_url(self, request, url):
if self.subdomain_based_buckets(request):
return bucket_name_from_url(url)
else:
return bucketpath_bucket_name_from_url(url)
def parse_key_name(self, request, url):
if self.subdomain_based_buckets(request):
return parse_key_name(url)
else:
return bucketpath_parse_key_name(url)
def ambiguous_response(self, request, full_url, headers):
# Depending on which calling format the client is using, we don't know
# if this is a bucket or key request so we have to check
if self.subdomain_based_buckets(request):
return self.key_response(request, full_url, headers)
else:
# Using path-based buckets
return self.bucket_response(request, full_url, headers)
def bucket_response(self, request, full_url, headers):
try:
response = self._bucket_response(request, full_url, headers)
except S3ClientError as s3error:
response = s3error.code, headers, s3error.description
if isinstance(response, six.string_types):
return 200, headers, response.encode("utf-8")
else:
status_code, headers, response_content = response
return status_code, headers, response_content.encode("utf-8")
def _bucket_response(self, request, full_url, headers):
parsed_url = urlparse(full_url)
querystring = parse_qs(parsed_url.query, keep_blank_values=True)
method = request.method
region_name = DEFAULT_REGION_NAME
region_match = re.search(REGION_URL_REGEX, full_url)
if region_match:
region_name = region_match.groups()[0]
bucket_name = self.parse_bucket_name_from_url(request, full_url)
if not bucket_name:
# If no bucket specified, list all buckets
return self.all_buckets()
if hasattr(request, 'body'):
# Boto
body = request.body
else:
# Flask server
body = request.data
body = body.decode('utf-8')
if method == 'HEAD':
return self._bucket_response_head(bucket_name, headers)
elif method == 'GET':
return self._bucket_response_get(bucket_name, querystring, headers)
elif method == 'PUT':
return self._bucket_response_put(request, body, region_name, bucket_name, querystring, headers)
elif method == 'DELETE':
return self._bucket_response_delete(body, bucket_name, querystring, headers)
elif method == 'POST':
return self._bucket_response_post(request, body, bucket_name, headers)
else:
raise NotImplementedError("Method {0} has not been impelemented in the S3 backend yet".format(method))
def _bucket_response_head(self, bucket_name, headers):
self.backend.get_bucket(bucket_name)
return 200, headers, ""
def _bucket_response_get(self, bucket_name, querystring, headers):
if 'uploads' in querystring:
for unsup in ('delimiter', 'max-uploads'):
if unsup in querystring:
raise NotImplementedError("Listing multipart uploads with {} has not been implemented yet.".format(unsup))
multiparts = list(self.backend.get_all_multiparts(bucket_name).values())
if 'prefix' in querystring:
prefix = querystring.get('prefix', [None])[0]
multiparts = [upload for upload in multiparts if upload.key_name.startswith(prefix)]
template = self.response_template(S3_ALL_MULTIPARTS)
return template.render(
bucket_name=bucket_name,
uploads=multiparts)
elif 'location' in querystring:
bucket = self.backend.get_bucket(bucket_name)
template = self.response_template(S3_BUCKET_LOCATION)
return template.render(location=bucket.location)
elif 'lifecycle' in querystring:
bucket = self.backend.get_bucket(bucket_name)
if not bucket.rules:
return 404, headers, "NoSuchLifecycleConfiguration"
template = self.response_template(S3_BUCKET_LIFECYCLE_CONFIGURATION)
return template.render(rules=bucket.rules)
elif 'versioning' in querystring:
versioning = self.backend.get_bucket_versioning(bucket_name)
template = self.response_template(S3_BUCKET_GET_VERSIONING)
return template.render(status=versioning)
elif 'policy' in querystring:
policy = self.backend.get_bucket_policy(bucket_name)
if not policy:
template = self.response_template(S3_NO_POLICY)
return 404, headers, template.render(bucket_name=bucket_name)
return 200, headers, policy
elif 'website' in querystring:
website_configuration = self.backend.get_bucket_website_configuration(bucket_name)
return website_configuration
elif 'acl' in querystring:
bucket = self.backend.get_bucket(bucket_name)
template = self.response_template(S3_OBJECT_ACL_RESPONSE)
return template.render(obj=bucket)
elif 'versions' in querystring:
delimiter = querystring.get('delimiter', [None])[0]
encoding_type = querystring.get('encoding-type', [None])[0]
key_marker = querystring.get('key-marker', [None])[0]
max_keys = querystring.get('max-keys', [None])[0]
prefix = querystring.get('prefix', [None])[0]
version_id_marker = querystring.get('version-id-marker', [None])[0]
bucket = self.backend.get_bucket(bucket_name)
versions = self.backend.get_bucket_versions(
bucket_name,
delimiter=delimiter,
encoding_type=encoding_type,
key_marker=key_marker,
max_keys=max_keys,
version_id_marker=version_id_marker
)
template = self.response_template(S3_BUCKET_GET_VERSIONS)
return 200, headers, template.render(
key_list=versions,
bucket=bucket,
prefix='',
max_keys='',
delimiter='',
is_truncated='false',
)
bucket = self.backend.get_bucket(bucket_name)
prefix = querystring.get('prefix', [None])[0]
delimiter = querystring.get('delimiter', [None])[0]
result_keys, result_folders = self.backend.prefix_query(bucket, prefix, delimiter)
template = self.response_template(S3_BUCKET_GET_RESPONSE)
return 200, headers, template.render(
bucket=bucket,
prefix=prefix,
delimiter=delimiter,
result_keys=result_keys,
result_folders=result_folders
)
def _bucket_response_put(self, request, body, region_name, bucket_name, querystring, headers):
if 'versioning' in querystring:
ver = re.search('<Status>([A-Za-z]+)</Status>', body)
if ver:
self.backend.set_bucket_versioning(bucket_name, ver.group(1))
template = self.response_template(S3_BUCKET_VERSIONING)
return template.render(bucket_versioning_status=ver.group(1))
else:
return 404, headers, ""
elif 'lifecycle' in querystring:
rules = xmltodict.parse(body)['LifecycleConfiguration']['Rule']
if not isinstance(rules, list):
# If there is only one rule, xmldict returns just the item
rules = [rules]
self.backend.set_bucket_lifecycle(bucket_name, rules)
return ""
elif 'policy' in querystring:
self.backend.set_bucket_policy(bucket_name, body)
return 'True'
elif 'acl' in querystring:
acl = self._acl_from_headers(request.headers)
# TODO: Support the XML-based ACL format
self.backend.set_bucket_acl(bucket_name, acl)
return ""
elif 'website' in querystring:
self.backend.set_bucket_website_configuration(bucket_name, body)
return ""
else:
try:
new_bucket = self.backend.create_bucket(bucket_name, region_name)
except BucketAlreadyExists:
if region_name == DEFAULT_REGION_NAME:
# us-east-1 has different behavior
new_bucket = self.backend.get_bucket(bucket_name)
else:
raise
template = self.response_template(S3_BUCKET_CREATE_RESPONSE)
return 200, headers, template.render(bucket=new_bucket)
def _bucket_response_delete(self, body, bucket_name, querystring, headers):
if 'policy' in querystring:
self.backend.delete_bucket_policy(bucket_name, body)
return 204, headers, ""
elif 'lifecycle' in querystring:
bucket = self.backend.get_bucket(bucket_name)
bucket.delete_lifecycle()
return 204, headers, ""
removed_bucket = self.backend.delete_bucket(bucket_name)
if removed_bucket:
# Bucket exists
template = self.response_template(S3_DELETE_BUCKET_SUCCESS)
return 204, headers, template.render(bucket=removed_bucket)
else:
# Tried to delete a bucket that still has keys
template = self.response_template(S3_DELETE_BUCKET_WITH_ITEMS_ERROR)
return 409, headers, template.render(bucket=removed_bucket)
def _bucket_response_post(self, request, body, bucket_name, headers):
if self.is_delete_keys(request, request.path, bucket_name):
return self._bucket_response_delete_keys(request, body, bucket_name, headers)
# POST to bucket-url should create file from form
if hasattr(request, 'form'):
# Not HTTPretty
form = request.form
else:
# HTTPretty, build new form object
form = {}
for kv in request.body.decode('utf-8').split('&'):
k, v = kv.split('=')
form[k] = v
key = form['key']
if 'file' in form:
f = form['file']
else:
f = request.files['file'].stream.read()
new_key = self.backend.set_key(bucket_name, key, f)
# Metadata
metadata = metadata_from_headers(form)
new_key.set_metadata(metadata)
return 200, headers, ""
def _bucket_response_delete_keys(self, request, body, bucket_name, headers):
template = self.response_template(S3_DELETE_KEYS_RESPONSE)
keys = minidom.parseString(body).getElementsByTagName('Key')
deleted_names = []
error_names = []
for k in keys:
key_name = k.firstChild.nodeValue
success = self.backend.delete_key(bucket_name, key_name)
if success:
deleted_names.append(key_name)
else:
error_names.append(key_name)
return 200, headers, template.render(deleted=deleted_names, delete_errors=error_names)
def _handle_range_header(self, request, headers, response_content):
length = len(response_content)
last = length - 1
_, rspec = request.headers.get('range').split('=')
if ',' in rspec:
raise NotImplementedError(
"Multiple range specifiers not supported")
toint = lambda i: int(i) if i else None
begin, end = map(toint, rspec.split('-'))
if begin is not None: # byte range
end = last if end is None else min(end, last)
elif end is not None: # suffix byte range
begin = length - min(end, length)
end = last
else:
return 400, headers, ""
if begin < 0 or end > last or begin > min(end, last):
return 416, headers, ""
headers['content-range'] = "bytes {0}-{1}/{2}".format(
begin, end, length)
return 206, headers, response_content[begin:end + 1]
def key_response(self, request, full_url, headers):
try:
response = self._key_response(request, full_url, headers)
except S3ClientError as s3error:
response = s3error.code, headers, s3error.description
if isinstance(response, six.string_types):
status_code = 200
response_content = response
else:
status_code, headers, response_content = response
if status_code == 200 and 'range' in request.headers:
return self._handle_range_header(request, headers, response_content)
return status_code, headers, response_content
def _key_response(self, request, full_url, headers):
parsed_url = urlparse(full_url)
query = parse_qs(parsed_url.query, keep_blank_values=True)
method = request.method
key_name = self.parse_key_name(request, parsed_url.path)
bucket_name = self.parse_bucket_name_from_url(request, full_url)
if hasattr(request, 'body'):
# Boto
body = request.body
else:
# Flask server
body = request.data
if method == 'GET':
return self._key_response_get(bucket_name, query, key_name, headers)
elif method == 'PUT':
return self._key_response_put(request, body, bucket_name, query, key_name, headers)
elif method == 'HEAD':
return self._key_response_head(bucket_name, query, key_name, headers)
elif method == 'DELETE':
return self._key_response_delete(bucket_name, query, key_name, headers)
elif method == 'POST':
return self._key_response_post(request, body, bucket_name, query, key_name, headers)
else:
raise NotImplementedError("Method {0} has not been impelemented in the S3 backend yet".format(method))
def _key_response_get(self, bucket_name, query, key_name, headers):
if query.get('uploadId'):
upload_id = query['uploadId'][0]
parts = self.backend.list_multipart(bucket_name, upload_id)
template = self.response_template(S3_MULTIPART_LIST_RESPONSE)
return 200, headers, template.render(
bucket_name=bucket_name,
key_name=key_name,
upload_id=upload_id,
count=len(parts),
parts=parts
)
version_id = query.get('versionId', [None])[0]
key = self.backend.get_key(
bucket_name, key_name, version_id=version_id)
if 'acl' in query:
template = self.response_template(S3_OBJECT_ACL_RESPONSE)
return 200, headers, template.render(obj=key)
headers.update(key.metadata)
return 200, headers, key.value
def _key_response_put(self, request, body, bucket_name, query, key_name, headers):
if query.get('uploadId') and query.get('partNumber'):
upload_id = query['uploadId'][0]
part_number = int(query['partNumber'][0])
if 'x-amz-copy-source' in request.headers:
src = request.headers.get("x-amz-copy-source")
src_bucket, src_key = src.split("/", 1)
src_range = request.headers.get('x-amz-copy-source-range', '').split("bytes=")[-1]
try:
start_byte, end_byte = src_range.split("-")
start_byte, end_byte = int(start_byte), int(end_byte)
except ValueError:
start_byte, end_byte = None, None
key = self.backend.copy_part(
bucket_name, upload_id, part_number, src_bucket,
src_key, start_byte, end_byte)
template = self.response_template(S3_MULTIPART_UPLOAD_RESPONSE)
response = template.render(part=key)
else:
key = self.backend.set_part(
bucket_name, upload_id, part_number, body)
response = ""
headers.update(key.response_dict)
return 200, headers, response
storage_class = request.headers.get('x-amz-storage-class', 'STANDARD')
acl = self._acl_from_headers(request.headers)
if 'acl' in query:
key = self.backend.get_key(bucket_name, key_name)
# TODO: Support the XML-based ACL format
key.set_acl(acl)
return 200, headers, ""
if 'x-amz-copy-source' in request.headers:
# Copy key
src_key_parsed = urlparse(request.headers.get("x-amz-copy-source"))
src_bucket, src_key = src_key_parsed.path.split("/", 1)
src_version_id = parse_qs(src_key_parsed.query).get('versionId', [None])[0]
self.backend.copy_key(src_bucket, src_key, bucket_name, key_name,
storage=storage_class, acl=acl, src_version_id=src_version_id)
new_key = self.backend.get_key(bucket_name, key_name)
mdirective = request.headers.get('x-amz-metadata-directive')
if mdirective is not None and mdirective == 'REPLACE':
metadata = metadata_from_headers(request.headers)
new_key.set_metadata(metadata, replace=True)
template = self.response_template(S3_OBJECT_COPY_RESPONSE)
headers.update(new_key.response_dict)
return 200, headers, template.render(key=new_key)
streaming_request = hasattr(request, 'streaming') and request.streaming
closing_connection = headers.get('connection') == 'close'
if closing_connection and streaming_request:
# Closing the connection of a streaming request. No more data
new_key = self.backend.get_key(bucket_name, key_name)
elif streaming_request:
# Streaming request, more data
new_key = self.backend.append_to_key(bucket_name, key_name, body)
else:
# Initial data
new_key = self.backend.set_key(bucket_name, key_name, body,
storage=storage_class)
request.streaming = True
metadata = metadata_from_headers(request.headers)
new_key.set_metadata(metadata)
new_key.set_acl(acl)
template = self.response_template(S3_OBJECT_RESPONSE)
headers.update(new_key.response_dict)
return 200, headers, template.render(key=new_key)
def _key_response_head(self, bucket_name, query, key_name, headers):
version_id = query.get('versionId', [None])[0]
key = self.backend.get_key(bucket_name, key_name, version_id=version_id)
if key:
headers.update(key.metadata)
headers.update(key.response_dict)
return 200, headers, key.value
else:
return 404, headers, ""
def _acl_from_headers(self, headers):
canned_acl = headers.get('x-amz-acl', '')
if canned_acl:
return get_canned_acl(canned_acl)
grants = []
for header, value in headers.items():
if not header.startswith('x-amz-grant-'):
continue
permission = {
'read': 'READ',
'write': 'WRITE',
'read-acp': 'READ_ACP',
'write-acp': 'WRITE_ACP',
'full-control': 'FULL_CONTROL',
}[header[len('x-amz-grant-'):]]
grantees = []
for key_and_value in value.split(","):
key, value = re.match('([^=]+)="([^"]+)"', key_and_value.strip()).groups()
if key.lower() == 'id':
grantees.append(FakeGrantee(id=value))
else:
grantees.append(FakeGrantee(uri=value))
grants.append(FakeGrant(grantees, [permission]))
if grants:
return FakeAcl(grants)
else:
return None
def _key_response_delete(self, bucket_name, query, key_name, headers):
if query.get('uploadId'):
upload_id = query['uploadId'][0]
self.backend.cancel_multipart(bucket_name, upload_id)
return 204, headers, ""
self.backend.delete_key(bucket_name, key_name)
template = self.response_template(S3_DELETE_OBJECT_SUCCESS)
return 204, headers, template.render()
def _complete_multipart_body(self, body):
ps = minidom.parseString(body).getElementsByTagName('Part')
prev = 0
for p in ps:
pn = int(p.getElementsByTagName('PartNumber')[0].firstChild.wholeText)
if pn <= prev:
raise InvalidPartOrder()
yield (pn, p.getElementsByTagName('ETag')[0].firstChild.wholeText)
def _key_response_post(self, request, body, bucket_name, query, key_name, headers):
if body == b'' and 'uploads' in query:
metadata = metadata_from_headers(request.headers)
multipart = self.backend.initiate_multipart(bucket_name, key_name, metadata)
template = self.response_template(S3_MULTIPART_INITIATE_RESPONSE)
response = template.render(
bucket_name=bucket_name,
key_name=key_name,
upload_id=multipart.id,
)
return 200, headers, response
if query.get('uploadId'):
body = self._complete_multipart_body(body)
upload_id = query['uploadId'][0]
key = self.backend.complete_multipart(bucket_name, upload_id, body)
template = self.response_template(S3_MULTIPART_COMPLETE_RESPONSE)
return template.render(
bucket_name=bucket_name,
key_name=key.name,
etag=key.etag,
)
elif 'restore' in query:
es = minidom.parseString(body).getElementsByTagName('Days')
days = es[0].childNodes[0].wholeText
key = self.backend.get_key(bucket_name, key_name)
r = 202
if key.expiry_date is not None:
r = 200
key.restore(int(days))
return r, headers, ""
else:
raise NotImplementedError("Method POST had only been implemented for multipart uploads and restore operations, so far")
S3ResponseInstance = ResponseObject(s3_backend)
S3_ALL_BUCKETS = """<ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Owner>
<ID>bcaf1ffd86f41161ca5fb16fd081034f</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<Buckets>
{% for bucket in buckets %}
<Bucket>
<Name>{{ bucket.name }}</Name>
<CreationDate>2006-02-03T16:45:09.000Z</CreationDate>
</Bucket>
{% endfor %}
</Buckets>
</ListAllMyBucketsResult>"""
S3_BUCKET_GET_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>{{ bucket.name }}</Name>
<Prefix>{{ prefix }}</Prefix>
<MaxKeys>1000</MaxKeys>
<Delimiter>{{ delimiter }}</Delimiter>
<IsTruncated>false</IsTruncated>
{% for key in result_keys %}
<Contents>
<Key>{{ key.name }}</Key>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Contents>
{% endfor %}
{% if delimiter %}
{% for folder in result_folders %}
<CommonPrefixes>
<Prefix>{{ folder }}</Prefix>
</CommonPrefixes>
{% endfor %}
{% endif %}
</ListBucketResult>"""
S3_BUCKET_CREATE_RESPONSE = """<CreateBucketResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<CreateBucketResponse>
<Bucket>{{ bucket.name }}</Bucket>
</CreateBucketResponse>
</CreateBucketResponse>"""
S3_DELETE_BUCKET_SUCCESS = """<DeleteBucketResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<DeleteBucketResponse>
<Code>204</Code>
<Description>No Content</Description>
</DeleteBucketResponse>
</DeleteBucketResponse>"""
S3_DELETE_BUCKET_WITH_ITEMS_ERROR = """<?xml version="1.0" encoding="UTF-8"?>
<Error><Code>BucketNotEmpty</Code>
<Message>The bucket you tried to delete is not empty</Message>
<BucketName>{{ bucket.name }}</BucketName>
<RequestId>asdfasdfsdafds</RequestId>
<HostId>sdfgdsfgdsfgdfsdsfgdfs</HostId>
</Error>"""
S3_BUCKET_LOCATION = """<?xml version="1.0" encoding="UTF-8"?>
<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">{{ location }}</LocationConstraint>"""
S3_BUCKET_LIFECYCLE_CONFIGURATION = """<?xml version="1.0" encoding="UTF-8"?>
<LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
{% for rule in rules %}
<Rule>
<ID>{{ rule.id }}</ID>
<Prefix>{{ rule.prefix if rule.prefix != None }}</Prefix>
<Status>{{ rule.status }}</Status>
{% if rule.storage_class %}
<Transition>
{% if rule.transition_days %}
<Days>{{ rule.transition_days }}</Days>
{% endif %}
{% if rule.transition_date %}
<Date>{{ rule.transition_date }}</Date>
{% endif %}
<StorageClass>{{ rule.storage_class }}</StorageClass>
</Transition>
{% endif %}
{% if rule.expiration_days or rule.expiration_date %}
<Expiration>
{% if rule.expiration_days %}
<Days>{{ rule.expiration_days }}</Days>
{% endif %}
{% if rule.expiration_date %}
<Date>{{ rule.expiration_date }}</Date>
{% endif %}
</Expiration>
{% endif %}
</Rule>
{% endfor %}
</LifecycleConfiguration>
"""
S3_BUCKET_VERSIONING = """<?xml version="1.0" encoding="UTF-8"?>
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ bucket_versioning_status }}</Status>
</VersioningConfiguration>
"""
S3_BUCKET_GET_VERSIONING = """<?xml version="1.0" encoding="UTF-8"?>
{% if status is none %}
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>
{% else %}
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ status }}</Status>
</VersioningConfiguration>
{% endif %}
"""
S3_BUCKET_GET_VERSIONS = """<?xml version="1.0" encoding="UTF-8"?>
<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Name>{{ bucket.name }}</Name>
<Prefix>{{ prefix }}</Prefix>
<KeyMarker>{{ key_marker }}</KeyMarker>
<MaxKeys>{{ max_keys }}</MaxKeys>
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% for key in key_list %}
<Version>
<Key>{{ key.name }}</Key>
<VersionId>{{ key._version_id }}</VersionId>
<IsLatest>false</IsLatest>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Version>
{% endfor %}
</ListVersionsResult>
"""
S3_DELETE_KEYS_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
{% for k in deleted %}
<Deleted>
<Key>{{k}}</Key>
</Deleted>
{% endfor %}
{% for k in delete_errors %}
<Error>
<Key>{{k}}</Key>
</Error>
{% endfor %}
</DeleteResult>"""
S3_DELETE_OBJECT_SUCCESS = """<DeleteObjectResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<DeleteObjectResponse>
<Code>200</Code>
<Description>OK</Description>
</DeleteObjectResponse>
</DeleteObjectResponse>"""
S3_OBJECT_RESPONSE = """<PutObjectResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<PutObjectResponse>
<ETag>{{ key.etag }}</ETag>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
</PutObjectResponse>
</PutObjectResponse>"""
S3_OBJECT_ACL_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<AccessControlPolicy xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<AccessControlList>
{% for grant in obj.acl.grants %}
<Grant>
{% for grantee in grant.grantees %}
<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="{{ grantee.type }}">
{% if grantee.uri %}
<URI>{{ grantee.uri }}</URI>
{% endif %}
{% if grantee.id %}
<ID>{{ grantee.id }}</ID>
{% endif %}
{% if grantee.display_name %}
<DisplayName>{{ grantee.display_name }}</DisplayName>
{% endif %}
</Grantee>
{% endfor %}
{% for permission in grant.permissions %}
<Permission>{{ permission }}</Permission>
{% endfor %}
</Grant>
{% endfor %}
</AccessControlList>
</AccessControlPolicy>"""
S3_OBJECT_COPY_RESPONSE = """\
<CopyObjectResult xmlns="http://doc.s3.amazonaws.com/2006-03-01">
<ETag>{{ key.etag }}</ETag>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
</CopyObjectResult>"""
S3_MULTIPART_INITIATE_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<InitiateMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<UploadId>{{ upload_id }}</UploadId>
</InitiateMultipartUploadResult>"""
S3_MULTIPART_UPLOAD_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CopyPartResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<LastModified>{{ part.last_modified_ISO8601 }}</LastModified>
<ETag>{{ part.etag }}</ETag>
</CopyPartResult>"""
S3_MULTIPART_LIST_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<ListPartsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<UploadId>{{ upload_id }}</UploadId>
<StorageClass>STANDARD</StorageClass>
<Initiator>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Initiator>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<PartNumberMarker>1</PartNumberMarker>
<NextPartNumberMarker>{{ count }} </NextPartNumberMarker>
<MaxParts>{{ count }}</MaxParts>
<IsTruncated>false</IsTruncated>
{% for part in parts %}
<Part>
<PartNumber>{{ part.name }}</PartNumber>
<LastModified>{{ part.last_modified_ISO8601 }}</LastModified>
<ETag>{{ part.etag }}</ETag>
<Size>{{ part.size }}</Size>
</Part>
{% endfor %}
</ListPartsResult>"""
S3_MULTIPART_COMPLETE_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CompleteMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Location>http://{{ bucket_name }}.s3.amazonaws.com/{{ key_name }}</Location>
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<ETag>{{ etag }}</ETag>
</CompleteMultipartUploadResult>
"""
S3_ALL_MULTIPARTS = """<?xml version="1.0" encoding="UTF-8"?>
<ListMultipartUploadsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<KeyMarker></KeyMarker>
<UploadIdMarker></UploadIdMarker>
<MaxUploads>1000</MaxUploads>
<IsTruncated>False</IsTruncated>
{% for upload in uploads %}
<Upload>
<Key>{{ upload.key_name }}</Key>
<UploadId>{{ upload.id }}</UploadId>
<Initiator>
<ID>arn:aws:iam::111122223333:user/user1-11111a31-17b5-4fb7-9df5-b111111f13de</ID>
<DisplayName>user1-11111a31-17b5-4fb7-9df5-b111111f13de</DisplayName>
</Initiator>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<Initiated>2010-11-10T20:48:33.000Z</Initiated>
</Upload>
{% endfor %}
</ListMultipartUploadsResult>
"""
S3_NO_POLICY = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchBucketPolicy</Code>
<Message>The bucket policy does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>0D68A23BB2E2215B</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
|
|
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from stacks.utils.RMFTestCase import *
import bootstrap
import time
import subprocess
import os
import logging
import tempfile
import pprint
from ambari_commons.os_check import OSCheck
from bootstrap import PBootstrap, Bootstrap, BootstrapDefault, SharedState, HostLog, SCP, SSH
from unittest import TestCase
from subprocess import Popen
from bootstrap import AMBARI_PASSPHRASE_VAR_NAME
from mock.mock import MagicMock, call
from mock.mock import patch
from mock.mock import create_autospec
from only_for_platform import not_for_platform, os_distro_value, PLATFORM_WINDOWS
@not_for_platform(PLATFORM_WINDOWS)
class TestBootstrap(TestCase):
def setUp(self):
logging.basicConfig(level=logging.ERROR)
def test_getRemoteName(self):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6", None, "8440", "root")
res = bootstrap_obj = Bootstrap("hostname", shared_state)
utime1 = 1234
utime2 = 12345
bootstrap_obj.getUtime = MagicMock(return_value=utime1)
remote1 = bootstrap_obj.getRemoteName("/tmp/setupAgent.sh")
self.assertEquals(remote1, "/tmp/setupAgent{0}.sh".format(utime1))
bootstrap_obj.getUtime.return_value=utime2
remote1 = bootstrap_obj.getRemoteName("/tmp/setupAgent.sh")
self.assertEquals(remote1, "/tmp/setupAgent{0}.sh".format(utime1))
remote2 = bootstrap_obj.getRemoteName("/tmp/host_pass")
self.assertEquals(remote2, "/tmp/host_pass{0}".format(utime2))
# TODO: Test bootstrap timeout
# TODO: test_return_error_message_for_missing_sudo_package
def test_getAmbariPort(self):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
self.assertEquals(bootstrap_obj.getAmbariPort(),"8440")
shared_state.server_port = None
bootstrap_obj = Bootstrap("hostname", shared_state)
self.assertEquals(bootstrap_obj.getAmbariPort(),"null")
@patch.object(subprocess, "Popen")
@patch("sys.stderr")
@patch("sys.exit")
@patch.object(PBootstrap, "run")
@patch("os.path.dirname")
@patch("os.path.realpath")
def test_bootstrap_main(self, dirname_mock, realpath_mock, run_mock, exit_mock, stderr_mock, subprocess_Popen_mock):
bootstrap.main(["bootstrap.py", "hostname,hostname2", "/tmp/bootstrap", "root", "123", "sshkey_file", "setupAgent.py", "ambariServer", \
"centos6", "1.1.1", "8440", "root", "passwordfile"])
self.assertTrue(run_mock.called)
run_mock.reset_mock()
bootstrap.main(["bootstrap.py", "hostname,hostname2", "/tmp/bootstrap", "root", "123", "sshkey_file", "setupAgent.py", "ambariServer", \
"centos6", "1.1.1", "8440", "root", None])
self.assertTrue(run_mock.called)
run_mock.reset_mock()
def side_effect(retcode):
raise Exception(retcode, "sys.exit")
exit_mock.side_effect = side_effect
try:
bootstrap.main(["bootstrap.py","hostname,hostname2", "/tmp/bootstrap"])
self.fail("sys.exit(2)")
except Exception:
# Expected
pass
self.assertTrue(exit_mock.called)
@patch("os.environ")
def test_getRunSetupWithPasswordCommand(self, environ_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
environ_mock.__getitem__.return_value = "TEST_PASSPHRASE"
bootstrap_obj = Bootstrap("hostname", shared_state)
utime = 1234
bootstrap_obj.getUtime = MagicMock(return_value=utime)
ret = bootstrap_obj.getRunSetupWithPasswordCommand("hostname")
expected = "/var/lib/ambari-agent/tmp/ambari-sudo.sh -S python /var/lib/ambari-agent/tmp/setupAgent{0}.py hostname TEST_PASSPHRASE " \
"ambariServer root 8440 < /var/lib/ambari-agent/tmp/host_pass{0}".format(utime)
self.assertEquals(ret, expected)
def test_generateRandomFileName(self):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
self.assertTrue(bootstrap_obj.generateRandomFileName(None) == bootstrap_obj.getUtime())
@patch.object(OSCheck, "is_redhat_family")
@patch.object(OSCheck, "is_suse_family")
def test_getRepoDir(self, is_suse_family, is_redhat_family):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
# Suse
is_redhat_family.return_value = False
is_suse_family.return_value = True
res = bootstrap_obj.getRepoDir()
self.assertEquals(res, "/etc/zypp/repos.d")
# non-Suse
is_suse_family.return_value = False
is_redhat_family.return_value = True
res = bootstrap_obj.getRepoDir()
self.assertEquals(res, "/etc/yum.repos.d")
def test_getSetupScript(self):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
self.assertEquals(bootstrap_obj.shared_state.script_dir, "scriptDir")
def test_run_setup_agent_command_ends_with_project_version(self):
os.environ[AMBARI_PASSPHRASE_VAR_NAME] = ""
version = "1.1.1"
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
version, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
runSetupCommand = bootstrap_obj.getRunSetupCommand("hostname")
self.assertTrue(runSetupCommand.endswith(version + " 8440"))
def test_agent_setup_command_without_project_version(self):
os.environ[AMBARI_PASSPHRASE_VAR_NAME] = ""
version = None
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
version, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
runSetupCommand = bootstrap_obj.getRunSetupCommand("hostname")
self.assertTrue(runSetupCommand.endswith(" 8440"))
# TODO: test_os_check_fail_fails_bootstrap_execution
def test_host_log(self):
tmp_file, tmp_filename = tempfile.mkstemp()
dummy_log = HostLog(tmp_filename)
# First write to log
dummy_log.write("a\nb\nc")
# Read it
with open(tmp_filename) as f:
s = f.read()
etalon = "a\nb\nc\n"
self.assertEquals(s, etalon)
# Next write
dummy_log.write("Yet another string")
# Read it
with open(tmp_filename) as f:
s = f.read()
etalon = "a\nb\nc\nYet another string\n"
self.assertEquals(s, etalon)
# Should not append line end if it already exists
dummy_log.write("line break->\n")
# Read it
with open(tmp_filename) as f:
s = f.read()
etalon = "a\nb\nc\nYet another string\nline break->\n"
self.assertEquals(s, etalon)
# Cleanup
os.unlink(tmp_filename)
@patch("subprocess.Popen")
def test_SCP(self, popenMock):
params = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
"1.2.1", "8440", "root")
host_log_mock = MagicMock()
log = {'text': ""}
def write_side_effect(text):
log['text'] = log['text'] + text
host_log_mock.write.side_effect = write_side_effect
scp = SCP(params.user, params.sshPort, params.sshkey_file, "dummy-host", "src/file",
"dst/file", params.bootdir, host_log_mock)
log_sample = "log_sample"
error_sample = "error_sample"
# Successful run
process = MagicMock()
popenMock.return_value = process
process.communicate.return_value = (log_sample, error_sample)
process.returncode = 0
retcode = scp.run()
self.assertTrue(popenMock.called)
self.assertTrue(log_sample in log['text'])
self.assertTrue(error_sample in log['text'])
command_str = str(popenMock.call_args[0][0])
self.assertEquals(command_str, "['scp', '-r', '-o', 'ConnectTimeout=60', '-o', "
"'BatchMode=yes', '-o', 'StrictHostKeyChecking=no', '-P', '123', '-i', 'sshkey_file',"
" 'src/file', 'root@dummy-host:dst/file']")
self.assertEqual(retcode["exitstatus"], 0)
log['text'] = ""
#unsuccessfull run
process.returncode = 1
retcode = scp.run()
self.assertTrue(log_sample in log['text'])
self.assertTrue(error_sample in log['text'])
self.assertEqual(retcode["exitstatus"], 1)
@patch("subprocess.Popen")
def test_SSH(self, popenMock):
params = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
"1.2.1", "8440", "root")
host_log_mock = MagicMock()
log = {'text': ""}
def write_side_effect(text):
log['text'] = log['text'] + text
host_log_mock.write.side_effect = write_side_effect
ssh = SSH(params.user, params.sshPort, params.sshkey_file, "dummy-host", "dummy-command",
params.bootdir, host_log_mock)
log_sample = "log_sample"
error_sample = "error_sample"
# Successful run
process = MagicMock()
popenMock.return_value = process
process.communicate.return_value = (log_sample, error_sample)
process.returncode = 0
retcode = ssh.run()
self.assertTrue(popenMock.called)
self.assertTrue(log_sample in log['text'])
self.assertTrue(error_sample in log['text'])
command_str = str(popenMock.call_args[0][0])
self.assertEquals(command_str, "['ssh', '-o', 'ConnectTimeOut=60', '-o', "
"'StrictHostKeyChecking=no', '-o', 'BatchMode=yes', '-tt', '-i', "
"'sshkey_file', '-p', '123', 'root@dummy-host', 'dummy-command']")
self.assertEqual(retcode["exitstatus"], 0)
log['text'] = ""
#unsuccessfull run
process.returncode = 1
retcode = ssh.run()
self.assertTrue(log_sample in log['text'])
self.assertTrue(error_sample in log['text'])
self.assertEqual(retcode["exitstatus"], 1)
log['text'] = ""
# unsuccessful run with error message
process.returncode = 1
dummy_error_message = "dummy_error_message"
ssh = SSH(params.user, params.sshPort, params.sshkey_file, "dummy-host", "dummy-command",
params.bootdir, host_log_mock, errorMessage= dummy_error_message)
retcode = ssh.run()
self.assertTrue(log_sample in log['text'])
self.assertTrue(error_sample in log['text'])
self.assertTrue(dummy_error_message in log['text'])
self.assertEqual(retcode["exitstatus"], 1)
def test_getOsCheckScript(self):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
ocs = bootstrap_obj.getOsCheckScript()
self.assertEquals(ocs, "scriptDir/os_check_type.py")
@patch.object(BootstrapDefault, "getRemoteName")
def test_getOsCheckScriptRemoteLocation(self, getRemoteName_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
v = "/tmp/os_check_type1374259902.py"
getRemoteName_mock.return_value = v
ocs = bootstrap_obj.getOsCheckScriptRemoteLocation()
self.assertEquals(ocs, v)
@patch.object(BootstrapDefault, "is_suse")
def test_getRepoFile(self, is_suse_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
is_suse_mock.return_value = False
rf = bootstrap_obj.getRepoFile()
self.assertEquals(rf, "/etc/yum.repos.d/ambari.repo")
@patch.object(SSH, "__init__")
@patch.object(SSH, "run")
@patch.object(HostLog, "write")
def test_createTargetDir(self, write_mock, run_mock,
init_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
expected = 42
init_mock.return_value = None
run_mock.return_value = expected
res = bootstrap_obj.createTargetDir()
self.assertEquals(res, expected)
command = str(init_mock.call_args[0][4])
self.assertEqual(command,
"SUDO=$([ \"$EUID\" -eq 0 ] && echo || echo sudo) ; $SUDO mkdir -p /var/lib/ambari-agent/tmp ; "
"$SUDO chown -R root /var/lib/ambari-agent/tmp ; "
"$SUDO chmod 755 /var/lib/ambari-agent ; "
"$SUDO chmod 755 /var/lib/ambari-agent/data ; "
"$SUDO chmod 1777 /var/lib/ambari-agent/tmp")
@patch.object(BootstrapDefault, "getOsCheckScript")
@patch.object(BootstrapDefault, "getOsCheckScriptRemoteLocation")
@patch.object(SCP, "__init__")
@patch.object(SCP, "run")
@patch.object(HostLog, "write")
def test_copyOsCheckScript(self, write_mock, run_mock, init_mock,
getOsCheckScriptRemoteLocation_mock, getOsCheckScript_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
getOsCheckScript_mock.return_value = "OsCheckScript"
getOsCheckScriptRemoteLocation_mock.return_value = "OsCheckScriptRemoteLocation"
expected = 42
init_mock.return_value = None
run_mock.return_value = expected
res = bootstrap_obj.copyOsCheckScript()
self.assertEquals(res, expected)
input_file = str(init_mock.call_args[0][4])
remote_file = str(init_mock.call_args[0][5])
self.assertEqual(input_file, "OsCheckScript")
self.assertEqual(remote_file, "OsCheckScriptRemoteLocation")
@patch.object(BootstrapDefault, "getRemoteName")
@patch.object(BootstrapDefault, "hasPassword")
@patch.object(OSCheck, "is_suse_family")
@patch.object(OSCheck, "is_ubuntu_family")
@patch.object(OSCheck, "is_redhat_family")
def test_getRepoFile(self, is_redhat_family, is_ubuntu_family, is_suse_family, hasPassword_mock, getRemoteName_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
is_redhat_family.return_value = True
is_ubuntu_family.return_value = False
is_suse_family.return_value = False
bootstrap_obj = Bootstrap("hostname", shared_state)
# Without password
hasPassword_mock.return_value = False
getRemoteName_mock.return_value = "RemoteName"
rf = bootstrap_obj.getMoveRepoFileCommand("target")
self.assertEquals(rf, "/var/lib/ambari-agent/tmp/ambari-sudo.sh mv RemoteName target/ambari.repo")
# With password
hasPassword_mock.return_value = True
getRemoteName_mock.return_value = "RemoteName"
rf = bootstrap_obj.getMoveRepoFileCommand("target")
self.assertEquals(rf, "/var/lib/ambari-agent/tmp/ambari-sudo.sh -S mv RemoteName target/ambari.repo < RemoteName")
@patch("os.path.exists")
@patch.object(OSCheck, "is_suse_family")
@patch.object(OSCheck, "is_ubuntu_family")
@patch.object(OSCheck, "is_redhat_family")
@patch.object(BootstrapDefault, "getMoveRepoFileCommand")
@patch.object(BootstrapDefault, "getRepoDir")
@patch.object(BootstrapDefault, "getRepoFile")
@patch.object(BootstrapDefault, "getRemoteName")
@patch.object(SCP, "__init__")
@patch.object(SCP, "run")
@patch.object(SSH, "__init__")
@patch.object(SSH, "run")
@patch.object(HostLog, "write")
def test_copyNeededFiles(self, write_mock, ssh_run_mock, ssh_init_mock,
scp_run_mock, scp_init_mock,
getRemoteName_mock, getRepoFile_mock, getRepoDir,
getMoveRepoFileCommand, is_redhat_family, is_ubuntu_family, is_suse_family,
os_path_exists_mock):
#
# Ambari repo file exists
#
def os_path_exists_side_effect(*args, **kwargs):
if args[0] == getRepoFile_mock():
return True
else:
return False
os_path_exists_mock.side_effect = os_path_exists_side_effect
os_path_exists_mock.return_value = None
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
is_redhat_family.return_value = True
is_ubuntu_family.return_value = False
is_suse_family.return_value = False
bootstrap_obj = Bootstrap("hostname", shared_state)
getMoveRepoFileCommand.return_value = "MoveRepoFileCommand"
getRepoDir.return_value = "RepoDir"
getRemoteName_mock.return_value = "RemoteName"
getRepoFile_mock.return_value = "RepoFile"
expected1 = {"exitstatus": 42, "log": "log42", "errormsg": "errorMsg"}
expected2 = {"exitstatus": 17, "log": "log17", "errormsg": "errorMsg"}
expected3 = {"exitstatus": 1, "log": "log1", "errormsg": "errorMsg"}
expected4 = {"exitstatus": 17, "log": "log17", "errormsg": "errorMsg"}
scp_init_mock.return_value = None
ssh_init_mock.return_value = None
# Testing max retcode return
scp_run_mock.side_effect = [expected1, expected3]
ssh_run_mock.side_effect = [expected2, expected4]
res = bootstrap_obj.copyNeededFiles()
self.assertEquals(res, expected1["exitstatus"])
input_file = str(scp_init_mock.call_args[0][4])
remote_file = str(scp_init_mock.call_args[0][5])
self.assertEqual(input_file, "setupAgentFile")
self.assertEqual(remote_file, "RemoteName")
command = str(ssh_init_mock.call_args[0][4])
self.assertEqual(command, "/var/lib/ambari-agent/tmp/ambari-sudo.sh chmod 644 RepoFile")
# Another order
expected1 = {"exitstatus": 0, "log": "log0", "errormsg": "errorMsg"}
expected2 = {"exitstatus": 17, "log": "log17", "errormsg": "errorMsg"}
expected3 = {"exitstatus": 1, "log": "log1", "errormsg": "errorMsg"}
expected4 = {"exitstatus": 17, "log": "log17", "errormsg": "errorMsg"}
scp_run_mock.side_effect = [expected1, expected3]
ssh_run_mock.side_effect = [expected2, expected4]
res = bootstrap_obj.copyNeededFiles()
self.assertEquals(res, expected2["exitstatus"])
# yet another order
expected1 = {"exitstatus": 33, "log": "log33", "errormsg": "errorMsg"}
expected2 = {"exitstatus": 17, "log": "log17", "errormsg": "errorMsg"}
expected3 = {"exitstatus": 42, "log": "log42", "errormsg": "errorMsg"}
expected4 = {"exitstatus": 17, "log": "log17", "errormsg": "errorMsg"}
scp_run_mock.side_effect = [expected1, expected3]
ssh_run_mock.side_effect = [expected2, expected4]
res = bootstrap_obj.copyNeededFiles()
self.assertEquals(res, expected3["exitstatus"])
#
#Ambari repo file does not exist
#
os_path_exists_mock.side_effect = None
os_path_exists_mock.return_value = False
#Expectations:
# SSH will not be called at all
# SCP will be called once for copying the setup script file
scp_run_mock.reset_mock()
ssh_run_mock.reset_mock()
expectedResult = {"exitstatus": 33, "log": "log33", "errormsg": "errorMsg"}
scp_run_mock.side_effect = [expectedResult]
res = bootstrap_obj.copyNeededFiles()
self.assertFalse(ssh_run_mock.called)
self.assertEquals(res, expectedResult["exitstatus"])
@patch.object(BootstrapDefault, "getOsCheckScriptRemoteLocation")
@patch.object(SSH, "__init__")
@patch.object(SSH, "run")
@patch.object(HostLog, "write")
def test_runOsCheckScript(self, write_mock, run_mock,
init_mock, getOsCheckScriptRemoteLocation_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
getOsCheckScriptRemoteLocation_mock.return_value = "OsCheckScriptRemoteLocation"
expected = 42
init_mock.return_value = None
run_mock.return_value = expected
res = bootstrap_obj.runOsCheckScript()
self.assertEquals(res, expected)
command = str(init_mock.call_args[0][4])
self.assertEqual(command,
"chmod a+x OsCheckScriptRemoteLocation && "
"env PYTHONPATH=$PYTHONPATH:/var/lib/ambari-agent/tmp OsCheckScriptRemoteLocation centos6")
@patch.object(SSH, "__init__")
@patch.object(BootstrapDefault, "getRunSetupCommand")
@patch.object(SSH, "run")
@patch.object(HostLog, "write")
def test_runSetupAgent(self, write_mock, run_mock,
getRunSetupCommand_mock, init_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
getRunSetupCommand_mock.return_value = "RunSetupCommand"
expected = 42
init_mock.return_value = None
run_mock.return_value = expected
res = bootstrap_obj.runSetupAgent()
self.assertEquals(res, expected)
command = str(init_mock.call_args[0][4])
self.assertEqual(command, "RunSetupCommand")
@patch.object(BootstrapDefault, "hasPassword")
@patch.object(BootstrapDefault, "getRunSetupWithPasswordCommand")
@patch.object(BootstrapDefault, "getRunSetupWithoutPasswordCommand")
def test_getRunSetupCommand(self, getRunSetupWithoutPasswordCommand_mock,
getRunSetupWithPasswordCommand_mock,
hasPassword_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
# With password
hasPassword_mock.return_value = True
getRunSetupWithPasswordCommand_mock.return_value = "RunSetupWithPasswordCommand"
getRunSetupWithoutPasswordCommand_mock.return_value = "RunSetupWithoutPasswordCommand"
res = bootstrap_obj.getRunSetupCommand("dummy-host")
self.assertEqual(res, "RunSetupWithPasswordCommand")
# Without password
hasPassword_mock.return_value = False
res = bootstrap_obj.getRunSetupCommand("dummy-host")
self.assertEqual(res, "RunSetupWithoutPasswordCommand")
@patch.object(HostLog, "write")
def test_createDoneFile(self, write_mock):
tmp_dir = tempfile.gettempdir()
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", tmp_dir,
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
done_file = os.path.join(tmp_dir, "hostname.done")
expected = 42
bootstrap_obj.createDoneFile(expected)
with open(done_file) as df:
res = df.read()
self.assertEqual(res, str(expected))
os.unlink(done_file)
@patch.object(OSCheck, "is_suse_family")
@patch.object(OSCheck, "is_ubuntu_family")
@patch.object(OSCheck, "is_redhat_family")
@patch.object(SSH, "__init__")
@patch.object(SSH, "run")
@patch.object(HostLog, "write")
def test_checkSudoPackage(self, write_mock, run_mock, init_mock, is_redhat_family, is_ubuntu_family, is_suse_family):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
expected = 42
init_mock.return_value = None
run_mock.return_value = expected
is_redhat_family.return_value = True
is_ubuntu_family.return_value = False
is_suse_family.return_value = False
res = bootstrap_obj.checkSudoPackage()
self.assertEquals(res, expected)
command = str(init_mock.call_args[0][4])
self.assertEqual(command, "[ \"$EUID\" -eq 0 ] || rpm -qa | grep -e '^sudo\-'")
@patch.object(OSCheck, "is_suse_family")
@patch.object(OSCheck, "is_ubuntu_family")
@patch.object(OSCheck, "is_redhat_family")
@patch.object(SSH, "__init__")
@patch.object(SSH, "run")
@patch.object(HostLog, "write")
def test_checkSudoPackageUbuntu(self, write_mock, run_mock, init_mock,
is_redhat_family, is_ubuntu_family, is_suse_family):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "ubuntu12",
None, "8440", "root")
is_redhat_family.return_value = False
is_ubuntu_family.return_value = True
is_suse_family.return_value = False
bootstrap_obj = Bootstrap("hostname", shared_state)
expected = 42
init_mock.return_value = None
run_mock.return_value = expected
res = bootstrap_obj.checkSudoPackage()
self.assertEquals(res, expected)
command = str(init_mock.call_args[0][4])
self.assertEqual(command, "[ \"$EUID\" -eq 0 ] || dpkg --get-selections|grep -e '^sudo\s*install'")
@patch.object(SSH, "__init__")
@patch.object(SSH, "run")
@patch.object(HostLog, "write")
@patch.object(BootstrapDefault, "getPasswordFile")
def test_deletePasswordFile(self, getPasswordFile_mock, write_mock, run_mock,
init_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
expected = 42
getPasswordFile_mock.return_value = "PasswordFile"
init_mock.return_value = None
run_mock.return_value = expected
res = bootstrap_obj.deletePasswordFile()
self.assertEquals(res, expected)
command = str(init_mock.call_args[0][4])
self.assertEqual(command, "rm PasswordFile")
@patch.object(BootstrapDefault, "getPasswordFile")
@patch.object(SCP, "__init__")
@patch.object(SCP, "run")
@patch.object(SSH, "__init__")
@patch.object(SSH, "run")
@patch.object(HostLog, "write")
def test_copyPasswordFile(self, write_mock, ssh_run_mock,
ssh_init_mock, scp_run_mock,
scp_init_mock, getPasswordFile_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root", password_file="PasswordFile")
bootstrap_obj = Bootstrap("hostname", shared_state)
getPasswordFile_mock.return_value = "PasswordFile"
# Testing max retcode return
expected1 = {"exitstatus": 42, "log": "log42", "errormsg": "errorMsg"}
expected2 = {"exitstatus": 17, "log": "log17", "errormsg": "errorMsg"}
scp_init_mock.return_value = None
scp_run_mock.return_value = expected1
ssh_init_mock.return_value = None
ssh_run_mock.return_value = expected2
res = bootstrap_obj.copyPasswordFile()
self.assertEquals(res, expected1["exitstatus"])
input_file = str(scp_init_mock.call_args[0][4])
remote_file = str(scp_init_mock.call_args[0][4])
self.assertEqual(input_file, "PasswordFile")
self.assertEqual(remote_file, "PasswordFile")
command = str(ssh_init_mock.call_args[0][4])
self.assertEqual(command, "chmod 600 PasswordFile")
# Another order
expected1 = {"exitstatus": 0, "log": "log0", "errormsg": "errorMsg"}
expected2 = {"exitstatus": 17, "log": "log17", "errormsg": "errorMsg"}
scp_run_mock.return_value = expected1
ssh_run_mock.return_value = expected2
@patch.object(SSH, "__init__")
@patch.object(SSH, "run")
@patch.object(HostLog, "write")
@patch.object(BootstrapDefault, "getPasswordFile")
def test_changePasswordFileModeOnHost(self, getPasswordFile_mock, write_mock,
run_mock, init_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
expected = 42
getPasswordFile_mock.return_value = "PasswordFile"
init_mock.return_value = None
run_mock.return_value = expected
res = bootstrap_obj.changePasswordFileModeOnHost()
self.assertEquals(res, expected)
command = str(init_mock.call_args[0][4])
self.assertEqual(command, "chmod 600 PasswordFile")
@patch.object(HostLog, "write")
def test_try_to_execute(self, write_mock):
expected = 43
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
# Normal case
def act_normal_return_int():
return 43
ret = bootstrap_obj.try_to_execute(act_normal_return_int)
self.assertEqual(ret["exitstatus"], expected)
self.assertFalse(write_mock.called)
write_mock.reset_mock()
def act_normal_return():
return {"exitstatus": 43}
ret = bootstrap_obj.try_to_execute(act_normal_return)
self.assertEqual(ret["exitstatus"], expected)
self.assertFalse(write_mock.called)
write_mock.reset_mock()
# Exception scenario
def act():
raise IOError()
ret = bootstrap_obj.try_to_execute(act)
self.assertEqual(ret["exitstatus"], 177)
self.assertTrue(write_mock.called)
@patch.object(BootstrapDefault, "try_to_execute")
@patch.object(BootstrapDefault, "hasPassword")
@patch.object(BootstrapDefault, "createDoneFile")
@patch.object(HostLog, "write")
@patch("logging.warn")
@patch("logging.error")
def test_run(self, error_mock, warn_mock, write_mock, createDoneFile_mock,
hasPassword_mock, try_to_execute_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
# Testing workflow without password
bootstrap_obj.copied_password_file = False
hasPassword_mock.return_value = False
try_to_execute_mock.return_value = {"exitstatus": 0, "log":"log0", "errormsg":"errormsg0"}
bootstrap_obj.run()
self.assertEqual(try_to_execute_mock.call_count, 10) # <- Adjust if changed
self.assertTrue(createDoneFile_mock.called)
self.assertEqual(bootstrap_obj.getStatus()["return_code"], 0)
try_to_execute_mock.reset_mock()
createDoneFile_mock.reset_mock()
# Testing workflow with password
bootstrap_obj.copied_password_file = True
hasPassword_mock.return_value = True
try_to_execute_mock.return_value = {"exitstatus": 0, "log":"log0", "errormsg":"errormsg0"}
bootstrap_obj.run()
self.assertEqual(try_to_execute_mock.call_count, 13) # <- Adjust if changed
self.assertTrue(createDoneFile_mock.called)
self.assertEqual(bootstrap_obj.getStatus()["return_code"], 0)
error_mock.reset_mock()
write_mock.reset_mock()
try_to_execute_mock.reset_mock()
createDoneFile_mock.reset_mock()
# Testing workflow when some action failed before copying password
bootstrap_obj.copied_password_file = False
hasPassword_mock.return_value = False
try_to_execute_mock.side_effect = [{"exitstatus": 0, "log":"log0", "errormsg":"errormsg0"}, {"exitstatus": 1, "log":"log1", "errormsg":"errormsg1"}]
bootstrap_obj.run()
self.assertEqual(try_to_execute_mock.call_count, 2) # <- Adjust if changed
self.assertTrue("ERROR" in error_mock.call_args[0][0])
self.assertTrue("ERROR" in write_mock.call_args[0][0])
self.assertTrue(createDoneFile_mock.called)
self.assertEqual(bootstrap_obj.getStatus()["return_code"], 1)
try_to_execute_mock.reset_mock()
createDoneFile_mock.reset_mock()
# Testing workflow when some action failed after copying password
bootstrap_obj.copied_password_file = True
hasPassword_mock.return_value = True
try_to_execute_mock.side_effect = [{"exitstatus": 0, "log":"log0", "errormsg":"errormsg0"}, {"exitstatus": 42, "log":"log42", "errormsg":"errormsg42"}, {"exitstatus": 0, "log":"log0", "errormsg":"errormsg0"}]
bootstrap_obj.run()
self.assertEqual(try_to_execute_mock.call_count, 3) # <- Adjust if changed
self.assertTrue(createDoneFile_mock.called)
self.assertEqual(bootstrap_obj.getStatus()["return_code"], 42)
error_mock.reset_mock()
write_mock.reset_mock()
try_to_execute_mock.reset_mock()
createDoneFile_mock.reset_mock()
# Testing workflow when some action failed after copying password and
# removing password failed too
bootstrap_obj.copied_password_file = True
hasPassword_mock.return_value = True
try_to_execute_mock.side_effect = [{"exitstatus": 0, "log":"log0", "errormsg":"errormsg0"}, {"exitstatus": 17, "log":"log17", "errormsg":"errormsg17"}, {"exitstatus": 19, "log":"log19", "errormsg":"errormsg19"}]
bootstrap_obj.run()
self.assertEqual(try_to_execute_mock.call_count, 3) # <- Adjust if changed
self.assertTrue("ERROR" in write_mock.call_args_list[0][0][0])
self.assertTrue("ERROR" in error_mock.call_args[0][0])
self.assertTrue("WARNING" in write_mock.call_args_list[1][0][0])
self.assertTrue("WARNING" in warn_mock.call_args[0][0])
self.assertTrue(createDoneFile_mock.called)
self.assertEqual(bootstrap_obj.getStatus()["return_code"], 17)
@patch.object(BootstrapDefault, "createDoneFile")
@patch.object(HostLog, "write")
def test_interruptBootstrap(self, write_mock, createDoneFile_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
bootstrap_obj = Bootstrap("hostname", shared_state)
bootstrap_obj.interruptBootstrap()
self.assertTrue(createDoneFile_mock.called)
@patch("time.sleep")
@patch("time.time")
@patch("logging.warn")
@patch("logging.info")
@patch.object(BootstrapDefault, "start")
@patch.object(BootstrapDefault, "interruptBootstrap")
@patch.object(BootstrapDefault, "getStatus")
def test_PBootstrap(self, getStatus_mock, interruptBootstrap_mock, start_mock,
info_mock, warn_mock, time_mock, sleep_mock):
shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir",
"setupAgentFile", "ambariServer", "centos6",
None, "8440", "root")
n = 180
time = 100500
time_mock.return_value = time
hosts = []
for i in range(0, n):
hosts.append("host" + str(i))
# Testing normal case
getStatus_mock.return_value = {"return_code": 0,
"start_time": time + 999}
pbootstrap_obj = PBootstrap(hosts, shared_state)
pbootstrap_obj.run()
self.assertEqual(start_mock.call_count, n)
self.assertEqual(interruptBootstrap_mock.call_count, 0)
start_mock.reset_mock()
getStatus_mock.reset_mock()
# Testing case of timeout
def fake_return_code_generator():
call_number = 0
while True:
call_number += 1
if call_number % 5 != 0: # ~80% of hosts finish successfully
yield 0
else:
yield None
def fake_start_time_generator():
while True:
yield time - bootstrap.HOST_BOOTSTRAP_TIMEOUT - 1
return_code_generator = fake_return_code_generator()
start_time_generator = fake_start_time_generator()
def status_get_item_mock(item):
if item == "return_code":
return return_code_generator.next()
elif item == "start_time":
return start_time_generator.next()
dict_mock = MagicMock()
dict_mock.__getitem__.side_effect = status_get_item_mock
getStatus_mock.return_value = dict_mock
pbootstrap_obj.run()
self.assertEqual(start_mock.call_count, n)
self.assertEqual(interruptBootstrap_mock.call_count, n / 5)
|
|
#!/usr/bin/env python2.5
"""
test_tasks.py
"""
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# options
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
from optparse import OptionParser
import sys, os
import os.path
import StringIO
# add self to search path for testing
exe_path = os.path.split(os.path.abspath(sys.argv[0]))[0]
if __name__ == '__main__':
module_name = os.path.split(sys.argv[0])[1]
module_name = os.path.splitext(module_name)[0];
else:
module_name = __name__
parser = OptionParser(version="%prog 1.0")
parser.add_option("-t", "--target_tasks", dest="target_tasks",
action="append",
default = list(),
metavar="JOBNAME",
type="string",
help="Target task(s) of pipeline.")
parser.add_option("-f", "--forced_tasks", dest="forced_tasks",
action="append",
default = list(),
metavar="JOBNAME",
type="string",
help="Pipeline task(s) which will be included even if they are up to date.")
parser.add_option("-j", "--jobs", dest="jobs",
default=5,
metavar="jobs",
type="int",
help="Specifies the number of jobs (commands) to run simultaneously.")
parser.add_option("-v", "--verbose", dest = "verbose",
action="store_true", default=False,
help="Do not echo to shell but only print to log.")
parser.add_option("-d", "--dependency", dest="dependency_file",
default="simple.svg",
metavar="FILE",
type="string",
help="Print a dependency graph of the pipeline that would be executed "
"to FILE, but do not execute it.")
parser.add_option("-F", "--dependency_graph_format", dest="dependency_graph_format",
metavar="FORMAT",
type="string",
default = 'svg',
help="format of dependency graph file. Can be 'ps' (PostScript), "+
"'svg' 'svgz' (Structured Vector Graphics), " +
"'png' 'gif' (bitmap graphics) etc ")
parser.add_option("-n", "--just_print", dest="just_print",
action="store_true", default=False,
help="Print a description of the jobs that would be executed, "
"but do not execute them.")
parser.add_option("-M", "--minimal_rebuild_mode", dest="minimal_rebuild_mode",
action="store_true", default=False,
help="Rebuild a minimum of tasks necessary for the target. "
"Ignore upstream out of date tasks if intervening tasks are fine.")
parser.add_option("-K", "--no_key_legend_in_graph", dest="no_key_legend_in_graph",
action="store_true", default=False,
help="Do not print out legend and key for dependency graph.")
parser.add_option("-H", "--draw_graph_horizontally", dest="draw_horizontally",
action="store_true", default=False,
help="Draw horizontal dependency graph.")
parameters = [
]
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# imports
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
import StringIO
import re
import operator
import sys
from collections import defaultdict
sys.path.append(os.path.abspath(os.path.join(exe_path,"..", "..")))
from ruffus import *
import json
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Functions
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
def create_custom_file_func(params):
"""
creates function which can be used as input to @files_func
"""
def cust_func ():
for job_param in params:
yield job_param
return cust_func
def is_job_uptodate (infiles, outfiles, *extra_params):
"""
assumes first two parameters are files, checks if they are up to date
"""
return task.needs_update_check_modify_time (infiles, outfiles, *extra_params)
def test_post_task_function ():
print "Hooray"
import time
def test_job_io(infiles, outfiles, extra_params):
"""
cat input files content to output files
after writing out job parameters
"""
# dump parameters
params = (infiles, outfiles) + extra_params
sys.stdout.write(' job = %s\n' % json.dumps(params))
if isinstance(infiles, str):
infiles = [infiles]
elif infiles is None:
infiles = []
if isinstance(outfiles, str):
outfiles = [outfiles]
output_text = list()
for f in infiles:
output_text.append(open(f).read())
output_text = "".join(sorted(output_text))
output_text += json.dumps(infiles) + " -> " + json.dumps(outfiles) + "\n"
for f in outfiles:
open(f, "w").write(output_text)
time.sleep(1)
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Main logic
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# get help string
f =StringIO.StringIO()
parser.print_help(f)
helpstr = f.getvalue()
(options, remaining_args) = parser.parse_args()
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Tasks
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
#
# task1
#
@files(None, 'a.1')
def task1(infiles, outfiles, *extra_params):
"""
First task
"""
test_job_io(infiles, outfiles, extra_params)
#
# task2
#
@files_re('*.1', '(.*).1', r'\1.1', r'\1.2')
@follows(task1)
def task2(infiles, outfiles, *extra_params):
"""
Second task
"""
test_job_io(infiles, outfiles, extra_params)
#
# task3
#
@files_re('*.1', '(.*).1', r'\1.2', r'\1.3')
@follows(task2)
def task3(infiles, outfiles, *extra_params):
"""
Third task
"""
test_job_io(infiles, outfiles, extra_params)
#
# task4
#
@files_re('*.1', '(.*).1', r'\1.3', r'\1.4')
@follows(task3)
def task4(infiles, outfiles, *extra_params):
"""
Fourth task
"""
test_job_io(infiles, outfiles, extra_params)
if options.just_print:
pipeline_printout(sys.stdout, options.target_tasks, options.forced_tasks,
long_winded=True,
gnu_make_maximal_rebuild_mode = not options.minimal_rebuild_mode)
elif options.dependency_file:
pipeline_printout_graph ( open(options.dependency_file, "w"),
options.dependency_graph_format,
options.target_tasks,
options.forced_tasks,
draw_vertically = not options.draw_horizontally,
gnu_make_maximal_rebuild_mode = not options.minimal_rebuild_mode,
no_key_legend = options.no_key_legend_in_graph)
else:
pipeline_run(options.target_tasks, options.forced_tasks, multiprocess = options.jobs,
gnu_make_maximal_rebuild_mode = not options.minimal_rebuild_mode)
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pytest
from mock import Mock, patch
from calvin.tests import DummyNode
from calvin.runtime.north.actormanager import ActorManager
from calvin.runtime.north.plugins.port import queue
pytestmark = pytest.mark.unittest
class ActorManagerTests(unittest.TestCase):
def setUp(self):
n = DummyNode()
self.am = ActorManager(node=n)
n.am = self.am
n.pm.remove_ports_of_actor = Mock(return_value = [])
def tearDown(self):
pass
def _new_actor(self, a_type, a_args, **kwargs):
a_id = self.am.new(a_type, a_args, **kwargs)
a = self.am.actors.get(a_id, None)
self.assertTrue(a)
return a, a_id
def test_new_actor(self):
# Test basic actor creation
a_type = 'std.Constantify'
constant = 42
a, _ = self._new_actor(a_type, {'constant':constant})
self.assertEqual(a.constant, constant)
def test_actor_state_get(self):
# Test basic actor state retrieval
a_type = 'std.Constantify'
constant = 42
a, a_id = self._new_actor(a_type, {'constant':constant})
s = a.serialize()
self.assertEqual(s['managed']['constant'], constant)
self.assertEqual(s['private']['_id'], a_id)
def test_new_actor_from_state(self):
# Test basic actor state manipulation
a_type = 'std.Constantify'
constant = 42
a, a_id = self._new_actor(a_type, {'constant':constant})
a.constant = 43
s = a.serialize()
self.am.destroy(a_id)
self.assertEqual(len(self.am.actors), 0)
b, b_id = self._new_actor(a_type, None, state = s)
self.assertEqual(a.constant, 43)
# Assert id is preserved
self.assertEqual(a.id, a_id)
# Assert actor database is consistent
self.assertTrue(self.am.actors[a_id])
self.assertEqual(len(self.am.actors), 1)
@patch('calvin.runtime.north.storage.Storage.delete_actor')
def test_destroy_actor(self, delete_actor):
actor, actor_id = self._new_actor('std.Constantify', {'constant': 42})
self.am.destroy(actor_id)
assert actor_id not in self.am.actors
assert self.am.node.storage.delete_actor.call_args[0][0] == actor_id
self.am.node.control.log_actor_destroy.assert_called_with(actor_id)
def test_enable_actor(self):
actor, actor_id = self._new_actor('std.Constantify', {'constant': 42})
actor.enable = Mock()
self.am.enable(actor_id)
assert actor.enable.called
def test_disable_actor(self):
actor, actor_id = self._new_actor('std.Constantify', {'constant': 42})
actor.disable = Mock()
self.am.disable(actor_id)
assert actor.disable.called
def test_migrate_to_same_node_does_nothing(self):
callback_mock = Mock()
actor, actor_id = self._new_actor('std.Constantify', {'constant': 42})
actor.will_migrate = Mock()
self.am.migrate(actor_id, self.am.node.id, callback_mock)
assert not actor.will_migrate.called
assert callback_mock.called
args, kwargs = callback_mock.call_args
self.assertEqual(kwargs['status'].status, 200)
def test_migrate_non_existing_actor_returns_false(self):
callback_mock = Mock()
self.am.migrate("123", self.am.node.id, callback_mock)
assert callback_mock.called
args, kwargs = callback_mock.call_args
self.assertEqual(kwargs['status'].status, 500)
def test_migrate(self):
callback_mock = Mock()
actor, actor_id = self._new_actor('std.Constantify', {'constant': 42})
actor.outports['out'].set_queue(queue.fanout_fifo.FanoutFIFO({'queue_length': 4, 'direction': "out"}, {}))
actor.inports['in'].set_queue(queue.fanout_fifo.FanoutFIFO({'queue_length': 4, 'direction': "in"}, {}))
peer_node = DummyNode()
actor.will_migrate = Mock()
self.am.migrate(actor_id, peer_node.id, callback_mock)
assert actor.will_migrate.called
assert self.am.node.pm.disconnect.called
args, kwargs = self.am.node.pm.disconnect.call_args
self.assertEqual(kwargs['actor_id'], actor_id)
cb = kwargs['callback']
self.assertEqual(cb.kwargs['actor'], actor)
self.assertEqual(cb.kwargs['actor_type'], actor._type)
self.assertEqual(cb.kwargs['callback'], callback_mock)
self.assertEqual(cb.kwargs['node_id'], peer_node.id)
self.assertEqual(cb.kwargs['ports'], actor.connections(self.am.node.id))
self.am.node.control.log_actor_migrate.assert_called_once_with(actor_id, peer_node.id)
def test_connect(self):
actor, actor_id = self._new_actor('std.Constantify', {'constant': 42})
connection_list = [['1', '2', '3', '4'], ['5', '6', '7', '8']]
callback_mock = Mock()
self.am.connect(actor_id, connection_list, callback_mock)
self.assertEqual(self.am.node.pm.connect.call_count, 2)
calls = self.am.node.pm.connect.call_args_list
for index, (args, kwargs) in enumerate(calls):
self.assertEqual(kwargs['port_id'], connection_list[index][1])
self.assertEqual(kwargs['peer_node_id'], connection_list[index][2])
self.assertEqual(kwargs['peer_port_id'], connection_list[index][3])
callback = kwargs['callback'].kwargs
self.assertEqual(callback['peer_port_id'], connection_list[index][3])
self.assertEqual(callback['actor_id'], actor_id)
self.assertEqual(callback['peer_port_ids'], ['4', '8'])
self.assertEqual(callback['_callback'], callback_mock)
def test_connections_returns_actor_connections_for_current_node(self):
actor, actor_id = self._new_actor('std.Constantify', {'constant': 42, 'name': 'actor'})
actor.outports['out'].set_queue(queue.fanout_fifo.FanoutFIFO({'queue_length': 4, 'direction': "out"}, {}))
actor.inports['in'].set_queue(queue.fanout_fifo.FanoutFIFO({'queue_length': 4, 'direction': "in"}, {}))
expected = {
'actor_name': 'actor',
'actor_id': actor_id,
'inports': {actor.inports['in'].id: actor.inports['in'].get_peers()},
'outports': {actor.outports['out'].id: actor.outports['out'].get_peers()}
}
self.assertEqual(self.am.connections(actor_id), expected)
def test_missing_actor(self):
test_functions = [("report", ({},)), ("destroy", ()), ("enable", ()), ("disable", ()),
("connect", ([], None)), ("connections", ()), ("dump", ()),
("get_port_state", (None, ))]
for func, args in test_functions:
with pytest.raises(Exception) as excinfo:
print func
getattr(self.am, func)('123', *args)
assert "Actor '123' not found" in str(excinfo.value)
def test_actor_type(self):
actor, actor_id = self._new_actor('std.Constantify', {'constant': 42, 'name': 'actor'})
self.assertEqual(self.am.actor_type(actor_id), 'std.Constantify')
def test_actor_type_of_missing_actor(self):
self.assertEqual(self.am.actor_type("123"), 'BAD ACTOR')
def test_enabled_actors(self):
actor, actor_id = self._new_actor('std.Constantify', {'constant': 42, 'name': 'actor'})
enabled_actor, enabled_actor_id = self._new_actor('std.Constantify', {'constant': 42, 'name': 'actor'})
enabled_actor.enable()
self.assertEqual(self.am.enabled_actors(), [enabled_actor])
def test_list_actors(self):
actor_1, actor_1_id = self._new_actor('std.Constantify', {'constant': 42, 'name': 'actor'})
actor_2, actor_2_id = self._new_actor('std.Constantify', {'constant': 42, 'name': 'actor'})
actors = self.am.list_actors()
assert actor_1_id in actors
assert actor_2_id in actors
if __name__ == '__main__':
import unittest
suite = unittest.TestLoader().loadTestsFromTestCase(ActorManagerTests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import pytest
from picosvg.geometric_types import Rect
from picosvg.svg_transform import Affine2D
from picosvg.svg_types import (
SVGPath,
SVGRect,
SVGLinearGradient,
SVGRadialGradient,
)
from svg_test_helpers import *
@pytest.mark.parametrize(
"path, expected_result",
[
# path explodes to show implicit commands & becomes absolute
("m1,1 2,0 1,3", "M1,1 L3,1 L4,4"),
# Vertical, Horizontal movement
("m2,2 h2 v2 h-1 v-1 H8 V8", "M2,2 H4 V4 H3 V3 H8 V8"),
# Quadratic bezier curve
("m2,2 q1,1 2,2 Q5,5 6,6", "M2,2 Q3,3 4,4 Q5,5 6,6"),
# Elliptic arc
("m2,2 a1,1 0 0 0 3,3 A2,2 1 1 1 4,4", "M2,2 A1 1 0 0 0 5,5 A2 2 1 1 1 4,4"),
# Cubic bezier
("m2,2 c1,-1 2,4 3,3 C4 4 5 5 6 6", "M2,2 C3,1 4,6 5,5 C4,4 5,5 6,6"),
# Elliptic arc that goes haywire when stroked
("M7,5 a3,1 0,0,0 0,-3 a3,3 0 0 1 -4,2", "M7,5 A3 1 0 0 0 7,2 A3 3 0 0 1 3,4"),
# clock hand's path in which the last point must be == start point when absolutized
(
"m63.8 63.98h0.4c2.1 0 3.8-1.7 3.8-3.8v-32.4c0-2.1-1.7-3.8-3.8-3.8h-0.4"
"c-2.1 0-3.8 1.7-3.8 3.8v32.4c0 2.1 1.7 3.8 3.8 3.8z",
"M63.8,63.98 H64.2 C66.3,63.98 68,62.28 68,60.18 V27.78 "
"C68,25.68 66.3,23.98 64.2,23.98 H63.8 C61.7,23.98 60,25.68 60,27.78 "
"V60.18 C60,62.28 61.7,63.98 63.8,63.98 Z",
),
# Relative 'm' in sub-path following a closed sub-path.
# Confirms z updates currend position correctly.
# https://github.com/googlefonts/picosvg/issues/70
(
"m0,0 l0,10 l10,0 z m10,10 l0,10 l10,0 z",
"M0,0 L0,10 L10,10 Z M10,10 L10,20 L20,20 Z",
),
# Further adventures of z; it's a single backref not a stack
(
"M3,3 M1,1 l0,10 l4,0 z Z z l8,2 0,2 z m4,4 1,1 -2,0 z",
"M3,3 M1,1 L1,11 L5,11 Z Z Z L9,3 L9,5 Z M5,5 L6,6 L4,6 Z",
),
# Points very near subpath origin should collapse to that origin, test 1
# Make sure to test a command with only a single coordinate (h)
(
"M0,0 L0,5 L5,5 L1e-10,0 Z l5,-1 0,1 H-1e-9 z",
"M0,0 L0,5 L5,5 L0,0 Z L5,-1 L5,0 L0,0 Z",
),
],
)
def test_path_absolute(path: str, expected_result: str):
actual = SVGPath(d=path).absolute(inplace=True).round_floats(3, inplace=True).d
print(f"A: {actual}")
print(f"E: {expected_result}")
assert actual == expected_result
@pytest.mark.parametrize(
"path, expected_result",
[
(
"m0,0 l1,1 l0,-1 m1,2 l1,1 l0,-1 z M3,3 l1,1 l0,-1 Z",
"M0,0 l1,1 l0,-1 M2,2 l1,1 l0,-1 z M3,3 l1,1 l0,-1 Z",
),
],
)
def test_path_absolute_moveto(path: str, expected_result: str):
actual = (
SVGPath(d=path).absolute_moveto(inplace=True).round_floats(3, inplace=True).d
)
print(f"A: {actual}")
print(f"E: {expected_result}")
assert actual == expected_result
@pytest.mark.parametrize(
"path, move, expected_result",
[
# path with implicit relative lines
("m1,1 2,0 1,3", (2, 2), "M3,3 l2,0 l1,3"),
# path with implicit absolute lines
("M1,1 2,0 1,3", (2, 2), "M3,3 L4,2 L3,5"),
# Vertical, Horizontal movement
("m2,2 h2 v2 h-1 v-1 H8 V8", (-1, -2), "M1,0 h2 v2 h-1 v-1 H7 V6"),
# Quadratic bezier curve
("m2,2 q1,1 2,2 Q5,5 6,6", (3, 1), "M5,3 q1,1 2,2 Q8,6 9,7"),
# Elliptic arc
(
"m2,2 a1,1 0 0 0 3,3 A2,2 1 1 1 4,4",
(1, 3),
"M3,5 a1 1 0 0 0 3,3 A2 2 1 1 1 5,7",
),
# Cubic bezier
("m2,2 c1,-1 2,4 3,3 C4 4 5 5 6 6", (4, 2), "M6,4 c1,-1 2,4 3,3 C8,6 9,7 10,8"),
],
)
def test_path_move(path: str, move, expected_result: str):
actual = SVGPath(d=path).move(*move, inplace=True).d
print(f"A: {actual}")
print(f"E: {expected_result}")
assert actual == expected_result
@pytest.mark.parametrize(
"path, expected_result",
[
# C/S
(
"M600,800 C625,700 725,700 750,800 S875,900 900,800",
"M600,800 C625,700 725,700 750,800 C775,900 875,900 900,800",
),
# Q/T
(
"M16,12 Q20,14 16,16 T16,20 L24,20 24,12",
"M16,12 Q20,14 16,16 Q12,18 16,20 L24,20 L24,12",
),
# S without preceding C
("S875,900 900,800", "C0,0 875,900 900,800"),
# T without preceding Q
("M16,12 T16,20", "M16,12 Q16,12 16,20"),
# C/s
(
"M600,800 C625,700 725,700 750,800 s55,55 200,100",
"M600,800 C625,700 725,700 750,800 C775,900 805,855 950,900",
),
# https://github.com/rsheeter/playground/issues/4
(
"m34 23.25c14.68 0 26.62 18.39 26.62 41s-11.94 41-26.62 41-26.62-18.39-26.62-41 11.94-41 26.62-41z",
"M34,23.25 c14.68,0 26.62,18.39 26.62,41 C60.62,86.86 48.68,105.25 34,105.25 C19.32,105.25 7.38,86.86 7.38,64.25 C7.38,41.64 19.32,23.25 34,23.25 z",
),
],
)
def test_expand_shorthand(path, expected_result):
actual = (
SVGPath(d=path).expand_shorthand(inplace=True).round_floats(3, inplace=True).d
)
print(f"A: {actual}")
print(f"E: {expected_result}")
assert actual == expected_result
@pytest.mark.parametrize(
"shape, expected_bbox",
[
# plain rect
('<rect x="2" y="2" width="6" height="2" />', Rect(2, 2, 6, 2)),
# triangle
('<path d="m5,2 2.5,5 -5,0 z" />', Rect(2.5, 2, 5, 5)),
],
)
def test_bounding_box(shape, expected_bbox):
nsvg = svg(shape)
actual_bbox = nsvg.shapes()[0].bounding_box()
print(f"A: {actual_bbox}")
print(f"E: {expected_bbox}")
assert actual_bbox == expected_bbox
@pytest.mark.parametrize(
"path, expected_result",
[
(
"M-1,0 A1,1 0,0,0 1,0 z",
"M-1,0 C-1,0.552 -0.552,1 0,1 C0.552,1 1,0.552 1,0 z",
),
# relative coordinates
(
"M-1,0 a1,1 0,0,0 2,0 z",
"M-1,0 C-1,0.552 -0.552,1 0,1 C0.552,1 1,0.552 1,0 z",
),
# degenerate arcs as straight lines
("M-1,0 A0,1 0,0,0 0,1 A1,0 0,0,0 1,0 z", "M-1,0 L0,1 L1,0 z"),
# two arcs closing on each other in a circle; last point == first
(
"M4,64 A60 60 0 1 1 124,64 A60 60 0 1 1 4,64 z",
"M4,64 C4,30.863 30.863,4 64,4 C97.137,4 124,30.863 124,64 "
"C124,97.137 97.137,124 64,124 C30.863,124 4,97.137 4,64 z",
),
],
)
def test_arcs_to_cubics(path, expected_result):
actual = (
SVGPath(d=path).arcs_to_cubics(inplace=True).round_floats(3, inplace=True).d
)
print(f"A: {actual}")
print(f"E: {expected_result}")
assert actual == expected_result
@pytest.mark.parametrize(
"path, transform, expected_result",
[
# translate
(
"M1,1 L2,1 L2,2 L1,2 Z",
Affine2D.identity().translate(2, 1),
"M3,2 L4,2 L4,3 L3,3 Z",
),
# same shape as above under a degenerate transform
("M1,1 L2,1 L2,2 L1,2 Z", Affine2D.degenerate(), "M0,0"),
],
)
def test_apply_basic_transform(path, transform, expected_result):
actual = SVGPath(d=path).apply_transform(transform).round_floats(3).d
print(f"A: {actual}")
print(f"E: {expected_result}")
assert actual == expected_result
@pytest.mark.parametrize(
"path, expected_result",
[
(SVGRect(width=1, height=1), True),
# we see paths with move and nothing else in the wild
(SVGPath(d="M1,2"), False),
(SVGPath(d="M1,2 M3,4"), False),
# a straight line with only a fill (no stroke) and no area does not paint
(SVGPath(d="M1,2 L3,4 Z"), False),
# a straight line with a stroke does paint
(SVGPath(d="M1,2 L3,4", stroke="black"), True),
# a stroke with 0 width doesn't paint
(
SVGPath(d="M1,2 L3,4 L3,1 Z", fill="none", stroke="black", stroke_width=0),
False,
),
# a filled triangle does paint (no matter the invisible stroke)
(
SVGPath(d="M1,2 L3,4 L3,1 Z", fill="red", stroke="black", stroke_width=0),
True,
),
# we're explicitly told not to display, so we don't
(SVGPath(d="M1,1 L2,1 L2,2 L1,2 Z", display="none"), False),
(SVGPath(style="display:none;fill:#F5FAFC;", d="M1,1 L2,1 L2,2 L1,2 Z"), False),
],
)
def test_might_paint(path, expected_result):
assert path.might_paint() == expected_result, path
@pytest.mark.parametrize(
"shape, expected",
[
(
SVGRect(width=10, height=10, style="fill:red;opacity:0.5;"),
SVGRect(width=10, height=10, fill="red", opacity=0.5),
),
(
SVGPath(
d="M0,0 L10,0 L10,10 L0,10 Z",
style="stroke:blue;stroke-opacity:0.8;filter:url(#x);",
),
SVGPath(
d="M0,0 L10,0 L10,10 L0,10 Z",
stroke="blue",
stroke_opacity=0.8,
style="filter:url(#x);",
),
),
],
)
def test_apply_style_attribute(shape, expected):
actual = shape.apply_style_attribute()
assert actual == expected
assert shape.apply_style_attribute(inplace=True) == expected
@pytest.mark.parametrize(
"path, multiple_of, expected_result",
[
("m1,1 2,0 1,3", 0.1, "m1,1 2,0 1,3"),
# why a multiple that divides evenly into 1 is a good idea
("m1,1 2,0 1,3", 0.128, "m1.024,1.024 2.048,0 1.024,2.944"),
],
)
def test_round_multiple(path: str, multiple_of: float, expected_result: str):
actual = SVGPath(d=path).round_multiple(multiple_of, inplace=True).d
print(f"A: {actual}")
print(f"E: {expected_result}")
assert actual == expected_result
@pytest.mark.parametrize(
"shape, expected",
[
# neither fill nor stroke, unchanged
(
SVGPath(d="m1,1 2,0 1,3 z", fill="none", fill_opacity=0.0),
SVGPath(d="m1,1 2,0 1,3 z", fill="none", fill_opacity=0.0),
),
# both fill and stroke, also unchanged
(
SVGPath(
d="m1,1 2,0 1,3 z",
fill="red",
fill_opacity=0.5,
stroke="black",
stroke_opacity=0.8,
opacity=0.9,
),
SVGPath(
d="m1,1 2,0 1,3 z",
fill="red",
fill_opacity=0.5,
stroke="black",
stroke_opacity=0.8,
opacity=0.9,
),
),
# no stroke, only fill: merge and drop fill_opacity
(
SVGPath(
d="m1,1 2,0 1,3 z",
fill="red",
fill_opacity=0.5,
opacity=0.9,
),
SVGPath(
d="m1,1 2,0 1,3 z",
fill="red",
opacity=0.45, # 0.9 * 0.5
),
),
# no fill, only stroke: merge and drop stroke_opacity
(
SVGPath(
d="m1,1 2,0 1,3 z",
fill="none",
stroke="black",
stroke_opacity=0.3,
opacity=0.9,
),
SVGPath(
d="m1,1 2,0 1,3 z",
fill="none",
stroke="black",
opacity=0.27, # 0.9 * 0.3
),
),
],
)
def test_normalize_opacity(shape, expected):
assert shape.normalize_opacity() == expected
@pytest.mark.parametrize(
"el, view_box, expected",
[
# default linearGradient
(
etree.Element("linearGradient", {"id": "lg1"}),
Rect(0, 0, 10, 10),
SVGLinearGradient(
id="lg1",
x1=0.0,
y1=0.0,
x2=1.0,
y2=0.0,
gradientUnits="objectBoundingBox",
),
),
# default radialGradient
(
etree.Element("radialGradient", {"id": "rg2"}),
Rect(0, 0, 10, 10),
SVGRadialGradient(
id="rg2",
cx=0.5,
cy=0.5,
r=0.5,
fx=0.5,
fy=0.5,
fr=0.0,
gradientUnits="objectBoundingBox",
),
),
# radialGradient with gradientUnits="userSpaceOnUse" on square viewport
(
etree.Element(
"radialGradient", {"id": "rg3", "gradientUnits": "userSpaceOnUse"}
),
Rect(0, 0, 10, 10),
SVGRadialGradient(
id="rg3",
cx=5.0,
cy=5.0,
r=5.0,
fx=5.0,
fy=5.0,
fr=0.0,
gradientUnits="userSpaceOnUse",
),
),
# userSpaceOnUse & nonsquare viewport, default 'r' is 50% of normalized diagonal
(
etree.Element(
"radialGradient", {"id": "rg4", "gradientUnits": "userSpaceOnUse"}
),
Rect(0, 0, 10, 5),
SVGRadialGradient(
id="rg4",
cx=5.0,
cy=2.5,
r=0.5 * math.hypot(10, 5) / math.sqrt(2), # 3.952847
fx=5.0,
fy=2.5,
fr=0.0,
gradientUnits="userSpaceOnUse",
),
),
# fx/fy default to cx/cy when not explicitly set
(
etree.Element("radialGradient", {"id": "rg5", "cx": "20%", "cy": "40%"}),
Rect(0, 0, 10, 5),
SVGRadialGradient(
id="rg5",
cx=0.2,
cy=0.4,
r=0.5,
fx=0.2,
fy=0.4,
fr=0.0,
gradientUnits="objectBoundingBox",
),
),
# fx/fy explicitly set
(
etree.Element("radialGradient", {"id": "rg6", "fx": "20%", "fy": "40%"}),
Rect(0, 0, 10, 5),
SVGRadialGradient(
id="rg6",
cx=0.5,
cy=0.5,
r=0.5,
fx=0.2,
fy=0.4,
fr=0.0,
gradientUnits="objectBoundingBox",
),
),
# linearGradient with gradientTransform and spreadMethod
(
etree.Element(
"linearGradient",
{
"id": "lg7",
"gradientTransform": "matrix(1, 0.3, 0, 1, 0, 0)",
"spreadMethod": "reflect",
},
),
Rect(0, 0, 10, 10),
SVGLinearGradient(
id="lg7",
x1=0.0,
y1=0.0,
x2=1.0,
y2=0.0,
gradientTransform=Affine2D(1, 0.3, 0, 1, 0, 0),
spreadMethod="reflect",
),
),
],
)
def test_gradient_from_element(el, view_box, expected):
klass = type(expected)
assert klass.from_element(el, view_box) == expected
@pytest.mark.parametrize(
"path, expected_result",
[
("", ""),
("z", ""),
("M1,2", ""),
("M1,2 z", ""),
("m1,1 2,2 1,3 z M465,550 Z M1,2", "M1,1 l2,2 l1,3 z"),
("m1,1 2,2 1,3 z M465,550 Z M1,2", "M1,1 l2,2 l1,3 z"),
# the following path (from noto-emoji "foot" emoji_u1f9b6.svg) contains a
# subpath starting with relative 'm' that follows an empty subpath. Check
# the latter is removed and the subsequent 'M' is correctly absolutized.
# https://github.com/googlefonts/picosvg/issues/252#issuecomment-1026839746
(
"M0,0 c0.59,-0.63 1.39,-0.97 2.25,-0.97 m103.99,90.92 m-103.99,-93.92 c-3.52,0 -6.3,2.97 -6.07,6.48",
"M0,0 c0.59,-0.63 1.39,-0.97 2.25,-0.97 M2.25,-3.97 c-3.52,0 -6.3,2.97 -6.07,6.48",
),
],
)
def test_remove_empty_subpaths(path: str, expected_result: str):
actual = (
SVGPath(d=path)
.remove_empty_subpaths(inplace=True)
.round_floats(3, inplace=True)
.d
)
print(f"A: {actual}")
print(f"E: {expected_result}")
assert actual == expected_result
|
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Colorscale(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "sankey.link"
_path_str = "sankey.link.colorscale"
_valid_props = {"cmax", "cmin", "colorscale", "label", "name", "templateitemname"}
# cmax
# ----
@property
def cmax(self):
"""
Sets the upper bound of the color domain.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
# cmin
# ----
@property
def cmin(self):
"""
Sets the lower bound of the color domain.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. The colorscale must be an array containing
arrays mapping a normalized value to an rgb, rgba, hex, hsl,
hsv, or named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the
bounds of the colorscale in color space, use`cmin` and `cmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,Electric,
Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,RdBu,Reds,Viridis,
YlGnBu,YlOrRd.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
# label
# -----
@property
def label(self):
"""
The label of the links to color based on their concentration
within a flow.
The 'label' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["label"]
@label.setter
def label(self, val):
self["label"] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
cmax
Sets the upper bound of the color domain.
cmin
Sets the lower bound of the color domain.
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use`cmin` and `cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
label
The label of the links to color based on their
concentration within a flow.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
"""
def __init__(
self,
arg=None,
cmax=None,
cmin=None,
colorscale=None,
label=None,
name=None,
templateitemname=None,
**kwargs
):
"""
Construct a new Colorscale object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.sankey.link.Colorscale`
cmax
Sets the upper bound of the color domain.
cmin
Sets the lower bound of the color domain.
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use`cmin` and `cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
label
The label of the links to color based on their
concentration within a flow.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
Returns
-------
Colorscale
"""
super(Colorscale, self).__init__("colorscales")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sankey.link.Colorscale
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sankey.link.Colorscale`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("cmax", None)
_v = cmax if cmax is not None else _v
if _v is not None:
self["cmax"] = _v
_v = arg.pop("cmin", None)
_v = cmin if cmin is not None else _v
if _v is not None:
self["cmin"] = _v
_v = arg.pop("colorscale", None)
_v = colorscale if colorscale is not None else _v
if _v is not None:
self["colorscale"] = _v
_v = arg.pop("label", None)
_v = label if label is not None else _v
if _v is not None:
self["label"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("templateitemname", None)
_v = templateitemname if templateitemname is not None else _v
if _v is not None:
self["templateitemname"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
|
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# Smoke-tests a release candidate
#
# 1. Downloads the tar.gz, deb, RPM and zip file from the staging URL
# 2. Verifies it's sha1 hashes and GPG signatures against the release key
# 3. Installs all official plugins
# 4. Starts one node for tar.gz and zip packages and checks:
# -- if it runs with Java 1.8
# -- if the build hash given is the one that is returned by the status response
# -- if the build is a release version and not a snapshot version
# -- if all plugins are loaded
# -- if the status response returns the correct version
#
# USAGE:
#
# python3 -B ./dev-tools/smoke_test_rc.py --version 2.0.0-beta1 --hash bfa3e47
#
# to also test other plugins try run
#
# python3 -B ./dev-tools/smoke_test_rc.py --version 2.0.0-beta1 --hash bfa3e47 --plugins license,shield,watcher
#
# Note: Ensure the script is run from the elasticsearch top level directory
#
# For testing a release from sonatype try this:
#
# python3 -B dev-tools/smoke_test_rc.py --version 2.0.0-beta1 --hash bfa3e47 --fetch_url https://oss.sonatype.org/content/repositories/releases/
#
import argparse
import tempfile
import os
from os.path import basename, dirname, isdir, join
import signal
import shutil
import urllib
import urllib.request
import hashlib
import time
import socket
import json
import base64
from urllib.parse import urlparse
from http.client import HTTPConnection
def find_official_plugins():
plugins_dir = join(dirname(dirname(__file__)), 'plugins')
plugins = []
for plugin in os.listdir(plugins_dir):
if isdir(join(plugins_dir, plugin)):
plugins.append(plugin)
return plugins
DEFAULT_PLUGINS = find_official_plugins()
try:
JAVA_HOME = os.environ['JAVA_HOME']
except KeyError:
raise RuntimeError("""
Please set JAVA_HOME in the env before running release tool
On OSX use: export JAVA_HOME=`/usr/libexec/java_home -v '1.8*'`""")
# console colors
COLOR_OK = '\033[92m'
COLOR_END = '\033[0m'
def run(command, env_vars=None):
if env_vars:
for key, value in env_vars.items():
os.putenv(key, value)
print('*** Running: %s%s%s' % (COLOR_OK, command, COLOR_END))
if os.system(command):
raise RuntimeError(' FAILED: %s' % (command))
def java_exe():
path = JAVA_HOME
return 'export JAVA_HOME="%s" PATH="%s/bin:$PATH" JAVACMD="%s/bin/java"' % (path, path, path)
def verify_java_version(version):
s = os.popen('%s; java -version 2>&1' % java_exe()).read()
if ' version "%s.' % version not in s:
raise RuntimeError('got wrong version for java %s:\n%s' % (version, s))
def sha1(file):
with open(file, 'rb') as f:
return hashlib.sha1(f.read()).hexdigest()
def read_fully(file):
with open(file, encoding='utf-8') as f:
return f.read()
def wait_for_node_startup(es_dir, timeout=60, header={}):
print(' Waiting until node becomes available for at most %s seconds' % timeout)
for _ in range(timeout):
conn = None
try:
time.sleep(1)
host = get_host_from_ports_file(es_dir)
conn = HTTPConnection(host, timeout=1)
conn.request('GET', '/', headers=header)
res = conn.getresponse()
if res.status == 200:
return True
except IOError as e:
pass
#that is ok it might not be there yet
finally:
if conn:
conn.close()
return False
def download_and_verify(version, hash, files, base_url, plugins=DEFAULT_PLUGINS):
print('Downloading and verifying release %s from %s' % (version, base_url))
tmp_dir = tempfile.mkdtemp()
try:
downloaded_files = []
print(' ' + '*' * 80)
# here we create a temp gpg home where we download the release key as the only key into
# when we verify the signature it will fail if the signed key is not in the keystore and that
# way we keep the executing host unmodified since we don't have to import the key into the default keystore
gpg_home_dir = os.path.join(tmp_dir, "gpg_home_dir")
os.makedirs(gpg_home_dir, 0o700)
run('gpg --homedir %s --keyserver pool.sks-keyservers.net --recv-key D88E42B4' % gpg_home_dir)
for file in files:
name = os.path.basename(file)
print(' Smoketest file: %s' % name)
url = '%s/%s' % (base_url, file)
print(' Downloading %s' % (url))
artifact_path = os.path.join(tmp_dir, file)
downloaded_files.append(artifact_path)
current_artifact_dir = os.path.dirname(artifact_path)
urllib.request.urlretrieve(url, os.path.join(tmp_dir, file))
sha1_url = ''.join([url, '.sha1'])
checksum_file = artifact_path + ".sha1"
print(' Downloading %s' % (sha1_url))
urllib.request.urlretrieve(sha1_url, checksum_file)
print(' Verifying checksum %s' % (checksum_file))
expected = read_fully(checksum_file)
actual = sha1(artifact_path)
if expected != actual :
raise RuntimeError('sha1 hash for %s doesn\'t match %s != %s' % (name, expected, actual))
gpg_url = ''.join([url, '.asc'])
gpg_file = artifact_path + ".asc"
print(' Downloading %s' % (gpg_url))
urllib.request.urlretrieve(gpg_url, gpg_file)
print(' Verifying gpg signature %s' % (gpg_file))
run('cd %s && gpg --homedir %s --verify %s' % (current_artifact_dir, gpg_home_dir, os.path.basename(gpg_file)))
print(' ' + '*' * 80)
print()
smoke_test_release(version, downloaded_files, hash, plugins)
print(' SUCCESS')
finally:
shutil.rmtree(tmp_dir)
def get_host_from_ports_file(es_dir):
return read_fully(os.path.join(es_dir, 'logs/http.ports')).splitlines()[0]
def smoke_test_release(release, files, hash, plugins):
for release_file in files:
if not os.path.isfile(release_file):
raise RuntimeError('Smoketest failed missing file %s' % (release_file))
tmp_dir = tempfile.mkdtemp()
if release_file.endswith('tar.gz'):
run('tar -xzf %s -C %s' % (release_file, tmp_dir))
elif release_file.endswith('zip'):
run('unzip %s -d %s' % (release_file, tmp_dir))
else:
print(' Skip SmokeTest for [%s]' % release_file)
continue # nothing to do here
es_dir = os.path.join(tmp_dir, 'elasticsearch-%s' % (release))
es_run_path = os.path.join(es_dir, 'bin/elasticsearch')
print(' Smoke testing package [%s]' % release_file)
es_plugin_path = os.path.join(es_dir, 'bin/elasticsearch-plugin')
plugin_names = {}
for plugin in plugins:
print(' Install plugin [%s]' % (plugin))
run('%s; export ES_JAVA_OPTS="-Des.plugins.staging=%s"; %s %s %s' % (java_exe(), hash, es_plugin_path, 'install -b', plugin))
plugin_names[plugin] = True
if 'x-pack' in plugin_names:
headers = { 'Authorization' : 'Basic %s' % base64.b64encode(b"es_admin:foobar").decode("UTF-8") }
es_shield_path = os.path.join(es_dir, 'bin/x-pack/users')
print(" Install dummy shield user")
run('%s; %s useradd es_admin -r superuser -p foobar' % (java_exe(), es_shield_path))
else:
headers = {}
print(' Starting elasticsearch deamon from [%s]' % es_dir)
try:
run('%s; %s -Enode.name=smoke_tester -Ecluster.name=prepare_release -Escript.inline=true -Escript.stored=true -Erepositories.url.allowed_urls=http://snapshot.test* %s -Epidfile=%s -Enode.portsfile=true'
% (java_exe(), es_run_path, '-d', os.path.join(es_dir, 'es-smoke.pid')))
if not wait_for_node_startup(es_dir, header=headers):
print("elasticsearch logs:")
print('*' * 80)
logs = read_fully(os.path.join(es_dir, 'logs/prepare_release.log'))
print(logs)
print('*' * 80)
raise RuntimeError('server didn\'t start up')
try: # we now get / and /_nodes to fetch basic infos like hashes etc and the installed plugins
host = get_host_from_ports_file(es_dir)
conn = HTTPConnection(host, timeout=20)
conn.request('GET', '/', headers=headers)
res = conn.getresponse()
if res.status == 200:
version = json.loads(res.read().decode("utf-8"))['version']
if release != version['number']:
raise RuntimeError('Expected version [%s] but was [%s]' % (release, version['number']))
if version['build_snapshot']:
raise RuntimeError('Expected non snapshot version')
print(' Verify if plugins are listed in _nodes')
conn.request('GET', '/_nodes/plugins?pretty=true', headers=headers)
res = conn.getresponse()
if res.status == 200:
nodes = json.loads(res.read().decode("utf-8"))['nodes']
for _, node in nodes.items():
node_plugins = node['plugins']
for node_plugin in node_plugins:
if not plugin_names.get(node_plugin['name'].strip(), False):
raise RuntimeError('Unexpected plugin %s' % node_plugin['name'])
del plugin_names[node_plugin['name']]
if plugin_names:
raise RuntimeError('Plugins not loaded %s' % list(plugin_names.keys()))
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
finally:
conn.close()
finally:
pid_path = os.path.join(es_dir, 'es-smoke.pid')
if os.path.exists(pid_path): # try reading the pid and kill the node
pid = int(read_fully(pid_path))
os.kill(pid, signal.SIGKILL)
shutil.rmtree(tmp_dir)
print(' ' + '*' * 80)
print()
def parse_list(string):
return [x.strip() for x in string.split(',')]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='SmokeTests a Release Candidate from S3 staging repo')
parser.add_argument('--version', '-v', dest='version', default=None,
help='The Elasticsearch Version to smoke-tests', required=True)
parser.add_argument('--hash', '-s', dest='hash', default=None, required=True,
help='The hash of the unified release')
parser.add_argument('--plugins', '-p', dest='plugins', default=[], required=False, type=parse_list,
help='A list of additional plugins to smoketest')
parser.add_argument('--fetch_url', '-u', dest='url', default=None,
help='Fetched from the specified URL')
parser.set_defaults(hash=None)
parser.set_defaults(plugins=[])
parser.set_defaults(version=None)
parser.set_defaults(url=None)
args = parser.parse_args()
plugins = args.plugins
version = args.version
hash = args.hash
url = args.url
files = [ x % {'version': version} for x in [
'elasticsearch-%(version)s.tar.gz',
'elasticsearch-%(version)s.zip',
'elasticsearch-%(version)s.deb',
'elasticsearch-%(version)s.rpm'
]]
verify_java_version('1.8')
if url:
download_url = url
else:
download_url = 'https://staging.elastic.co/%s-%s/downloads/elasticsearch' % (version, hash)
download_and_verify(version, hash, files, download_url, plugins=DEFAULT_PLUGINS + plugins)
|
|
# -*- coding: utf-8 -*-
from EXOSIMS.Prototypes.OpticalSystem import OpticalSystem
import astropy.units as u
import numpy as np
import scipy.stats as st
import scipy.optimize as opt
from numpy import nan
class Nemati(OpticalSystem):
"""Nemati Optical System class
This class contains all variables and methods necessary to perform
Optical System Module calculations in exoplanet mission simulation using
the model from Nemati 2014.
Args:
specs:
user specified values
"""
def __init__(self, **specs):
OpticalSystem.__init__(self, **specs)
def calc_intTime(self, TL, sInds, fZ, fEZ, dMag, WA, mode, TK=None):
"""Finds integration times of target systems for a specific observing
mode (imaging or characterization), based on Nemati 2014 (SPIE).
Args:
TL (TargetList module):
TargetList class object
sInds (integer ndarray):
Integer indices of the stars of interest
fZ (astropy Quantity array):
Surface brightness of local zodiacal light in units of 1/arcsec2
fEZ (astropy Quantity array):
Surface brightness of exo-zodiacal light in units of 1/arcsec2
dMag (float ndarray):
Differences in magnitude between planets and their host star
WA (astropy Quantity array):
Working angles of the planets of interest in units of arcsec
mode (dict):
Selected observing mode
TK (TimeKeeping object):
Optional TimeKeeping object (default None), used to model detector
degradation effects where applicable.
Returns:
intTime (astropy Quantity array):
Integration times in units of day
"""
# electron counts
C_p, C_b, C_sp = self.Cp_Cb_Csp(TL, sInds, fZ, fEZ, dMag, WA, mode, TK=TK)
# get SNR threshold
SNR = mode['SNR']
# calculate integration time based on Nemati 2014
with np.errstate(divide='ignore', invalid='ignore'):
if mode['syst']['occulter'] is False:
intTime = np.true_divide(SNR**2.*C_b, (C_p**2. - (SNR*C_sp)**2.))
else:
intTime = np.true_divide(SNR**2.*C_b, (C_p**2.))
# infinite and NAN are set to zero
intTime[np.isinf(intTime) | np.isnan(intTime)] = 0.*u.d
# negative values are set to zero
intTime[intTime.value < 0.] = 0.*u.d
return intTime.to('day')
def calc_dMag_per_intTime(self, intTimes, TL, sInds, fZ, fEZ, WA, mode, C_b=None, C_sp=None, TK=None):
"""Finds achievable dMag for one integration time per star in the input
list at one working angle.
Args:
intTimes (astropy Quantity array):
Integration times
TL (TargetList module):
TargetList class object
sInds (integer ndarray):
Integer indices of the stars of interest
fZ (astropy Quantity array):
Surface brightness of local zodiacal light for each star in sInds
in units of 1/arcsec2
fEZ (astropy Quantity array):
Surface brightness of exo-zodiacal light for each star in sInds
in units of 1/arcsec2
WA (astropy Quantity array):
Working angle for each star in sInds in units of arcsec
mode (dict):
Selected observing mode
C_b (astropy Quantity array):
Background noise electron count rate in units of 1/s (optional)
C_sp (astropy Quantity array):
Residual speckle spatial structure (systematic error) in units of 1/s
(optional)
TK (TimeKeeping object):
Optional TimeKeeping object (default None), used to model detector
degradation effects where applicable.
Returns:
dMag (ndarray):
Achievable dMag for given integration time and working angle
"""
# cast sInds, WA, fZ, fEZ, and intTimes to arrays
sInds = np.array(sInds, ndmin=1, copy=False)
WA = np.array(WA.value, ndmin=1)*WA.unit
fZ = np.array(fZ.value, ndmin=1)*fZ.unit
fEZ = np.array(fEZ.value, ndmin=1)*fEZ.unit
intTimes = np.array(intTimes.value, ndmin=1)*intTimes.unit
assert len(intTimes) == len(sInds), "intTimes and sInds must be same length"
assert len(fEZ) == len(sInds), "fEZ must be an array of length len(sInds)"
assert len(fZ) == len(sInds), "fZ must be an array of length len(sInds)"
assert len(WA) == len(sInds), "WA must be an array of length len(sInds)"
# get scienceInstrument and starlightSuppressionSystem
inst = mode['inst']
syst = mode['syst']
# get mode wavelength
lam = mode['lam']
# get mode fractional bandwidth
BW = mode['BW']
# get mode bandwidth (including any IFS spectral resolving power)
deltaLam = lam/inst['Rs'] if 'spec' in inst['name'].lower() else mode['deltaLam']
# get star magnitude
mV = TL.starMag(sInds, lam)
# get signal to noise ratio
SNR = mode['SNR']
# spectral flux density = F0 * A * Dlam * QE * T (attenuation due to optics)
attenuation = inst['optics']*syst['optics']
F_0 = TL.starF0(sInds,mode)
C_F0 = F_0*self.pupilArea*deltaLam*inst['QE'](lam)*attenuation
# get core_thruput
core_thruput = syst['core_thruput'](lam, WA)
# calculate planet delta magnitude
dMagLim = np.zeros(len(sInds)) + TL.Completeness.dMagLim
if (C_b is None) or (C_sp is None):
_, C_b, C_sp = self.Cp_Cb_Csp(TL, sInds, fZ, fEZ, dMagLim, WA, mode, TK=TK)
intTimes[intTimes.value < 0.] = 0.
tmp = np.nan_to_num(C_b/intTimes)
assert all(tmp + C_sp**2. >= 0.) , 'Invalid value in Nemati sqrt, '
dMag = -2.5*np.log10((SNR*np.sqrt(tmp + C_sp**2.)/(C_F0*10.0**(-0.4*mV)*core_thruput*inst['PCeff'])).decompose().value)
dMag[np.where(np.isnan(dMag))[0]] = 0. # this is an error catch. if intTimes = 0, the dMag becomes infinite
return dMag
def ddMag_dt(self, intTimes, TL, sInds, fZ, fEZ, WA, mode, C_b=None, C_sp=None, TK=None):
"""Finds derivative of achievable dMag with respect to integration time
Args:
intTimes (astropy Quantity array):
Integration times
TL (TargetList module):
TargetList class object
sInds (integer ndarray):
Integer indices of the stars of interest
fZ (astropy Quantity array):
Surface brightness of local zodiacal light for each star in sInds
in units of 1/arcsec2
fEZ (astropy Quantity array):
Surface brightness of exo-zodiacal light for each star in sInds
in units of 1/arcsec2
WA (astropy Quantity array):
Working angle for each star in sInds in units of arcsec
mode (dict):
Selected observing mode
C_b (astropy Quantity array):
Background noise electron count rate in units of 1/s (optional)
C_sp (astropy Quantity array):
Residual speckle spatial structure (systematic error) in units of 1/s
(optional)
TK (TimeKeeping object):
Optional TimeKeeping object (default None), used to model detector
degradation effects where applicable.
Returns:
ddMagdt (ndarray):
Derivative of achievable dMag with respect to integration time
"""
# cast sInds, WA, fZ, fEZ, and intTimes to arrays
sInds = np.array(sInds, ndmin=1, copy=False)
WA = np.array(WA.value, ndmin=1)*WA.unit
fZ = np.array(fZ.value, ndmin=1)*fZ.unit
fEZ = np.array(fEZ.value, ndmin=1)*fEZ.unit
intTimes = np.array(intTimes.value, ndmin=1)*intTimes.unit
assert len(intTimes) == len(sInds), "intTimes and sInds must be same length"
assert len(fEZ) == len(sInds), "fEZ must be an array of length len(sInds)"
assert len(fZ) == len(sInds), "fZ must be an array of length len(sInds)"
assert len(WA) == len(sInds), "WA must be an array of length len(sInds)"
dMagLim = np.zeros(len(sInds)) + 25.
if (C_b is None) or (C_sp is None):
_, C_b, C_sp = self.Cp_Cb_Csp(TL, sInds, fZ, fEZ, dMagLim, WA, mode, TK=TK)
ddMagdt = 2.5/(2.0*np.log(10.0))*(C_b/(C_b*intTimes + (C_sp*intTimes)**2.)).to('1/s').value
return ddMagdt/u.s
|
|
#www.stuffaboutcode.com
#Raspberry Pi, Minecraft - Create 3D Model from Obj file
# Version 2 - draws complete faces rather than wireframes and uses materials
#import the minecraft.py module from the minecraft directory
import sys
sys.path.append("../")
import mcpi.minecraft as minecraft
#import minecraft block module
import mcpi.block as block
#import time, so delays can be used
import time
#import datetime, to get the time!
import datetime
# class to create 3d filled polygons
class MinecraftDrawing:
def __init__(self, mc):
self.mc = mc
# draw point
def drawPoint3d(self, x, y, z, blockType, blockData=None):
self.mc.setBlock(x,y,z,blockType,blockData)
#print "x = " + str(x) + ", y = " + str(y) + ", z = " + str(z)
# draws a face, when passed a collection of vertices which make up a polyhedron
def drawFace(self, vertices, blockType, blockData=None):
# get the edges of the face
edgesVertices = []
# persist first vertex
firstVertex = vertices[0]
# loop through vertices and get edges
vertexCount = 0
for vertex in vertices:
vertexCount+=1
if vertexCount > 1:
# got 2 vertices, get the points for the edge
edgesVertices = edgesVertices + self.getLine(lastVertex.x, lastVertex.y, lastVertex.z, vertex.x, vertex.y, vertex.z)
#print "x = " + str(lastVertex.x) + ", y = " + str(lastVertex.y) + ", z = " + str(lastVertex.z) + " x2 = " + str(vertex.x) + ", y2 = " + str(vertex.y) + ", z2 = " + str(vertex.z)
# persist the last vertex found
lastVertex = vertex
# get edge between the last and first vertices
edgesVertices = edgesVertices + self.getLine(lastVertex.x, lastVertex.y, lastVertex.z, firstVertex.x, firstVertex.y, firstVertex.z)
# sort edges vertices
def keyX( point ): return point.x
def keyY( point ): return point.y
def keyZ( point ): return point.z
edgesVertices.sort( key=keyZ )
edgesVertices.sort( key=keyY )
edgesVertices.sort( key=keyX )
# not very performant but wont have gaps between in complex models
for vertex in edgesVertices:
vertexCount+=1
# got 2 vertices, draw lines between them
if (vertexCount > 1):
self.drawLine(lastVertex.x, lastVertex.y, lastVertex.z, vertex.x, vertex.y, vertex.z, blockType, blockData)
#print "x = " + str(lastVertex.x) + ", y = " + str(lastVertex.y) + ", z = " + str(lastVertex.z) + " x2 = " + str(vertex.x) + ", y2 = " + str(vertex.y) + ", z2 = " + str(vertex.z)
# persist the last vertex found
lastVertex = vertex
# draw's all the points in a collection of vertices with a block
def drawVertices(self, vertices, blockType, blockData=None):
for vertex in vertices:
self.drawPoint3d(vertex.x, vertex.y, vertex.z, blockType, blockData)
# draw line
def drawLine(self, x1, y1, z1, x2, y2, z2, blockType, blockData):
self.drawVertices(self.getLine(x1, y1, z1, x2, y2, z2), blockType, blockData)
# returns points on a line
def getLine(self, x1, y1, z1, x2, y2, z2):
# return maximum of 2 values
def MAX(a,b):
if a > b: return a
else: return b
# return step
def ZSGN(a):
if a < 0: return -1
elif a > 0: return 1
elif a == 0: return 0
# list for vertices
vertices = []
# if the 2 points are the same, return single vertice
if (x1 == x2 and y1 == y2 and z1 == z2):
vertices.append(minecraft.Vec3(x1, y1, z1))
# else get all points in edge
else:
dx = x2 - x1
dy = y2 - y1
dz = z2 - z1
ax = abs(dx) << 1
ay = abs(dy) << 1
az = abs(dz) << 1
sx = ZSGN(dx)
sy = ZSGN(dy)
sz = ZSGN(dz)
x = x1
y = y1
z = z1
# x dominant
if (ax >= MAX(ay, az)):
yd = ay - (ax >> 1)
zd = az - (ax >> 1)
loop = True
while(loop):
vertices.append(minecraft.Vec3(x, y, z))
if (x == x2):
loop = False
if (yd >= 0):
y += sy
yd -= ax
if (zd >= 0):
z += sz
zd -= ax
x += sx
yd += ay
zd += az
# y dominant
elif (ay >= MAX(ax, az)):
xd = ax - (ay >> 1)
zd = az - (ay >> 1)
loop = True
while(loop):
vertices.append(minecraft.Vec3(x, y, z))
if (y == y2):
loop=False
if (xd >= 0):
x += sx
xd -= ay
if (zd >= 0):
z += sz
zd -= ay
y += sy
xd += ax
zd += az
# z dominant
elif(az >= MAX(ax, ay)):
xd = ax - (az >> 1)
yd = ay - (az >> 1)
loop = True
while(loop):
vertices.append(minecraft.Vec3(x, y, z))
if (z == z2):
loop=False
if (xd >= 0):
x += sx
xd -= az
if (yd >= 0):
y += sy
yd -= az
z += sz
xd += ax
yd += ay
return vertices
def load_obj(filename, defaultBlock, materials) :
V = [] #vertex
T = [] #texcoords
N = [] #normals
F = [] #face indexies
MF = [] #materials to faces
currentMaterial = defaultBlock
fh = open(filename)
for line in fh :
if line[0] == '#' : continue
line = line.strip().split(' ')
if line[0] == 'v' : #vertex
V.append(line[1:])
elif line[0] == 'vt' : #tex-coord
T.append(line[1:])
elif line[0] == 'vn' : #normal vector
N.append(line[1:])
elif line[0] == 'f' : #face
face = line[1:]
for i in range(0, len(face)) :
face[i] = face[i].split('/')
# OBJ indexies are 1 based not 0 based hence the -1
# convert indexies to integer
for j in range(0, len(face[i])) :
if face[i][j] != "":
face[i][j] = int(face[i][j]) - 1
#append the material currently in use to the face
F.append(face)
MF.append(currentMaterial)
elif line[0] == 'usemtl': # material
usemtl = line[1]
if (usemtl in materials.keys()):
currentMaterial = materials[usemtl]
else:
currentMaterial = defaultBlock
print "Warning: Couldn't find '" + str(usemtl) + "' in materials using default"
return V, T, N, F, MF
# strips the x,y,z co-ords from a vertex line, scales appropriately, rounds and converts to int
def getVertexXYZ(vertexLine, scale, startCoord, swapYZ):
# convert, round and scale
x = int((float(vertexLine[0]) * scale) + 0.5)
y = int((float(vertexLine[1]) * scale) + 0.5)
z = int((float(vertexLine[2]) * scale) + 0.5)
# add startCoord to x,y,z
x = x + startCoord.x
y = y + startCoord.y
z = z + startCoord.z
# swap y and z coord if needed
if swapYZ == True:
swap = y
y = z
z = swap
return x, y, z
# main program
if __name__ == "__main__":
print datetime.datetime.now()
#Connect to minecraft by creating the minecraft object
# - minecraft needs to be running and in a game
mc = minecraft.Minecraft.create(address="199.96.85.3")
#Create minecraft drawing class
mcDrawing = MinecraftDrawing(mc)
"""
Load objfile and set constants
COORDSSCALE = factor to scale the co-ords by
STARTCOORD = where to start the model, the relative position 0
CLEARAREA1/2 = 2 points the program should clear an area in between to put the model in
SWAPYZ = True to sway the Y and Z dimension
MATERIALS = a dictionary object which maps materials in the obj file to blocks in minecraft
DEFAULTBLOCK = the default type of block to build the model in, used if a material cant be found
"""
# Shuttle
COORDSSCALE = 6
STARTCOORD = minecraft.Vec3(-60,0,20)
CLEARAREA1 = minecraft.Vec3(-30, 5, -30)
CLEARAREA2 = minecraft.Vec3(-90, 50, 30)
DEFAULTBLOCK = [block.WOOL.id,0]
MATERIALS = {"glass": [block.GLASS.id, 0],
"bone": [block.WOOL.id, 0],
"fldkdkgrey": [block.WOOL.id, 7],
"redbrick": [block.WOOL.id, 14],
"black": [block.WOOL.id, 15],
"brass": [block.WOOL.id, 1],
"dkdkgrey": [block.WOOL.id, 7]}
SWAPYZ = True
vertices,textures,normals,faces,materials = load_obj("shuttle.obj", DEFAULTBLOCK, MATERIALS)
# Shyscraper
#COORDSSCALE = 1.4
#STARTCOORD = minecraft.Vec3(0,10,15)
#CLEARAREA1 = minecraft.Vec3(-30, -3, -15)
#CLEARAREA2 = minecraft.Vec3(30, 65, 35)
#DEFAULTBLOCK = [block.IRON_BLOCK, 0]
#MATERIALS = {}
#SWAPYZ = False
#vertices,textures,normals,faces,materials = load_obj("skyscraper.obj", DEFAULTBLOCK, MATERIALS)
# Farmhouse
#COORDSSCALE = 1
#STARTCOORD = minecraft.Vec3(10,0,15)
#CLEARAREA1 = minecraft.Vec3(-30, -3, -15)
#CLEARAREA2 = minecraft.Vec3(30, 65, 35)
#DEFAULTBLOCK = [block.IRON_BLOCK, 0]
#MATERIALS = {}
#SWAPYZ = False
#vertices,textures,normals,faces,materials = load_obj("farmhouse.obj", DEFAULTBLOCK, MATERIALS)
print "obj file loaded"
#Post a message to the minecraft chat window
mc.postToChat("Started 3d render...")
# clear a suitably large area
mc.setBlocks(CLEARAREA1.x, CLEARAREA1.y, CLEARAREA1.z, CLEARAREA2.x, CLEARAREA2.y, CLEARAREA2.z, block.AIR)
time.sleep(10)
faceCount = 0
# loop through faces
for face in faces:
faceVertices = []
# loop through vertex's in face and call drawFace function
for vertex in face:
#strip co-ords from vertex line
vertexX, vertexY, vertexZ = getVertexXYZ(vertices[vertex[0]], COORDSSCALE, STARTCOORD, SWAPYZ)
faceVertices.append(minecraft.Vec3(vertexX,vertexY,vertexZ))
# draw the face
mcDrawing.drawFace(faceVertices, materials[faceCount][0], materials[faceCount][1])
faceCount = faceCount + 1
mc.postToChat("Model complete.")
mc.player.setPos(STARTCOORD.x+20, STARTCOORD.y+30, STARTCOORD.z)
print datetime.datetime.now()
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
from contextlib import contextmanager
from distutils.version import LooseVersion
import re
import numpy as np
from numpy.testing import assert_array_equal
from xarray.core.duck_array_ops import allclose_or_equiv
import pytest
from xarray.core import utils
from xarray.core.pycompat import PY3
from xarray.testing import assert_equal, assert_identical, assert_allclose
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
import scipy
has_scipy = True
except ImportError:
has_scipy = False
try:
import pydap.client
has_pydap = True
except ImportError:
has_pydap = False
try:
import netCDF4
has_netCDF4 = True
except ImportError:
has_netCDF4 = False
try:
import h5netcdf
has_h5netcdf = True
except ImportError:
has_h5netcdf = False
try:
import Nio
has_pynio = True
except ImportError:
has_pynio = False
try:
import dask.array
import dask
dask.set_options(get=dask.get)
has_dask = True
except ImportError:
has_dask = False
try:
import matplotlib
has_matplotlib = True
except ImportError:
has_matplotlib = False
try:
import bottleneck
if LooseVersion(bottleneck.__version__) < LooseVersion('1.0'):
raise ImportError('Fall back to numpy')
has_bottleneck = True
except ImportError:
has_bottleneck = False
try:
import rasterio
has_rasterio = True
except ImportError:
has_rasterio = False
try:
import pathlib
has_pathlib = True
except ImportError:
try:
import pathlib2
has_pathlib = True
except ImportError:
has_pathlib = False
# slighly simpler construction that the full functions.
# Generally `pytest.importorskip('package')` inline is even easier
requires_matplotlib = pytest.mark.skipif(
not has_matplotlib, reason='requires matplotlib')
requires_scipy = pytest.mark.skipif(
not has_scipy, reason='requires scipy')
requires_pydap = pytest.mark.skipif(
not has_pydap, reason='requires pydap')
requires_netCDF4 = pytest.mark.skipif(
not has_netCDF4, reason='requires netCDF4')
requires_h5netcdf = pytest.mark.skipif(
not has_h5netcdf, reason='requires h5netcdf')
requires_pynio = pytest.mark.skipif(
not has_pynio, reason='requires pynio')
requires_scipy_or_netCDF4 = pytest.mark.skipif(
not has_scipy and not has_netCDF4, reason='requires scipy or netCDF4')
requires_dask = pytest.mark.skipif(
not has_dask, reason='requires dask')
requires_bottleneck = pytest.mark.skipif(
not has_bottleneck, reason='requires bottleneck')
requires_rasterio = pytest.mark.skipif(
not has_rasterio, reason='requires rasterio')
requires_pathlib = pytest.mark.skipif(
not has_pathlib, reason='requires pathlib / pathlib2'
)
try:
_SKIP_FLAKY = not pytest.config.getoption("--run-flaky")
_SKIP_NETWORK_TESTS = not pytest.config.getoption("--run-network-tests")
except ValueError:
# Can't get config from pytest, e.g., because xarray is installed instead
# of being run from a development version (and hence conftests.py is not
# available). Don't run flaky tests.
_SKIP_FLAKY = True
_SKIP_NETWORK_TESTS = True
flaky = pytest.mark.skipif(
_SKIP_FLAKY, reason="set --run-flaky option to run flaky tests")
network = pytest.mark.skipif(
_SKIP_NETWORK_TESTS,
reason="set --run-network-tests option to run tests requiring an "
"internet connection")
class TestCase(unittest.TestCase):
if PY3:
# Python 3 assertCountEqual is roughly equivalent to Python 2
# assertItemsEqual
def assertItemsEqual(self, first, second, msg=None):
return self.assertCountEqual(first, second, msg)
@contextmanager
def assertWarns(self, message):
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', message)
yield
assert len(w) > 0
assert any(message in str(wi.message) for wi in w)
def assertVariableEqual(self, v1, v2):
assert_equal(v1, v2)
def assertVariableIdentical(self, v1, v2):
assert_identical(v1, v2)
def assertVariableAllClose(self, v1, v2, rtol=1e-05, atol=1e-08):
assert_allclose(v1, v2, rtol=rtol, atol=atol)
def assertVariableNotEqual(self, v1, v2):
assert not v1.equals(v2)
def assertArrayEqual(self, a1, a2):
assert_array_equal(a1, a2)
def assertEqual(self, a1, a2):
assert a1 == a2 or (a1 != a1 and a2 != a2)
def assertAllClose(self, a1, a2, rtol=1e-05, atol=1e-8):
assert allclose_or_equiv(a1, a2, rtol=rtol, atol=atol)
def assertDatasetEqual(self, d1, d2):
assert_equal(d1, d2)
def assertDatasetIdentical(self, d1, d2):
assert_identical(d1, d2)
def assertDatasetAllClose(self, d1, d2, rtol=1e-05, atol=1e-08):
assert_allclose(d1, d2, rtol=rtol, atol=atol)
def assertCoordinatesEqual(self, d1, d2):
assert_equal(d1, d2)
def assertDataArrayEqual(self, ar1, ar2):
assert_equal(ar1, ar2)
def assertDataArrayIdentical(self, ar1, ar2):
assert_identical(ar1, ar2)
def assertDataArrayAllClose(self, ar1, ar2, rtol=1e-05, atol=1e-08):
assert_allclose(ar1, ar2, rtol=rtol, atol=atol)
@contextmanager
def raises_regex(error, pattern):
with pytest.raises(error) as excinfo:
yield
message = str(excinfo.value)
if not re.match(pattern, message):
raise AssertionError('exception %r did not match pattern %s'
% (excinfo.value, pattern))
class UnexpectedDataAccess(Exception):
pass
class InaccessibleArray(utils.NDArrayMixin):
def __init__(self, array):
self.array = array
def __getitem__(self, key):
raise UnexpectedDataAccess("Tried accessing data")
class ReturnItem(object):
def __getitem__(self, key):
return key
def source_ndarray(array):
"""Given an ndarray, return the base object which holds its memory, or the
object itself.
"""
base = getattr(array, 'base', np.asarray(array).base)
if base is None:
base = array
return base
|
|
""" Python back-end. Generates python code from ir-code. """
import contextlib
import io
import logging
import time
from ... import ir
from ...graph import relooper
def literal_label(lit):
""" Invent a nice label name for the given literal """
return "{}_{}".format(lit.function.name, lit.name)
def ir_to_python(ir_modules, f, reporter=None):
""" Convert ir-code to python code """
if reporter:
f2 = f
f = io.StringIO()
generator = IrToPythonCompiler(f, reporter)
generator.header()
for ir_module in ir_modules:
if not isinstance(ir_module, ir.Module):
raise TypeError("ir_modules must be list of ir.Module")
generator.generate(ir_module)
if reporter:
source_code = f.getvalue()
f2.write(source_code)
reporter.dump_source("Python code", source_code)
class IrToPythonCompiler:
""" Can generate python script from ir-code """
logger = logging.getLogger("ir2py")
def __init__(self, output_file, reporter):
self.output_file = output_file
self.reporter = reporter
self.stack_size = 0
self.func_ptr_map = {}
self._level = 0
def print(self, level, *args):
""" Print args to current file with level indents """
print(" " * level, end="", file=self.output_file)
print(*args, file=self.output_file)
def _indent(self):
self._level += 1
def _dedent(self):
self._level -= 1
@contextlib.contextmanager
def indented(self):
self._indent()
yield
self._dedent()
def emit(self, txt):
""" Emit python code at current indentation level """
self.print(self._level, txt)
def header(self):
""" Emit a header suitable for in a python file """
self.emit("# Automatically generated on {}".format(time.ctime()))
self.emit("# Generator {}".format(__file__))
self.emit("")
self.emit("import struct")
self.emit("import math")
self.emit("")
self.emit("_irpy_heap = bytearray()")
self.emit("_irpy_stack = bytearray()")
self.emit("HEAP_START = 0x10000000")
self.emit("_irpy_func_pointers = list()")
self.emit("_irpy_externals = {}")
self.emit("")
self.generate_builtins()
self.generate_memory_builtins()
def generate_memory_builtins(self):
self.emit("def read_mem(address, size):")
with self.indented():
self.emit("mem, address = _irpy_get_memory(address)")
self.emit("assert address+size <= len(mem), str(hex(address))")
self.emit("return mem[address:address+size]")
self.emit("")
self.emit("def write_mem(address, data):")
with self.indented():
self.emit("mem, address = _irpy_get_memory(address)")
self.emit("size = len(data)")
self.emit("assert address+size <= len(mem), str(hex(address))")
self.emit("mem[address:address+size] = data")
self.emit("")
self.emit("def _irpy_get_memory(v):")
self.print(1, "if v >= HEAP_START:")
self.print(2, "return _irpy_heap, v - HEAP_START")
self.print(1, "else:")
self.print(2, "return _irpy_stack, v")
self.emit("")
self.emit("def _irpy_heap_top():")
with self.indented():
self.emit("return len(_irpy_heap) + HEAP_START")
self.emit("")
# Generate load functions:
foo = [
(ir.f64, "d", 8),
(ir.f32, "f", 4),
(ir.i64, "q", 8),
(ir.u64, "Q", 8),
(ir.i32, "i", 4),
(ir.u32, "I", 4),
(ir.ptr, "i", 4),
(ir.i16, "h", 2),
(ir.u16, "H", 2),
(ir.i8, "b", 1),
(ir.u8, "B", 1),
]
for ty, fmt, size in foo:
# Generate load helpers:
self.emit("def load_{}(p):".format(ty.name))
self.print(
1,
'return struct.unpack("{0}", read_mem(p, {1}))[0]'.format(
fmt, size
),
)
self.emit("")
# Generate store helpers:
self.emit("def store_{}(v, p):".format(ty.name))
self.print(1, 'write_mem(p, struct.pack("{0}", v))'.format(fmt))
self.emit("")
def generate_builtins(self):
# Wrap type helper:
self.emit("def _irpy_correct(value, bits, signed):")
self.print(1, "base = 1 << bits")
self.print(1, "value %= base")
self.print(1, "if signed and value.bit_length() == bits:")
self.print(2, "return value - base")
self.print(1, "else:")
self.print(2, "return value")
self.emit("")
# More C like integer divide
self.emit("def _irpy_idiv(x, y):")
with self.indented():
self.emit("sign = False")
self.emit("if x < 0: x = -x; sign = not sign")
self.emit("if y < 0: y = -y; sign = not sign")
self.emit("v = x // y")
self.emit("return -v if sign else v")
self.emit("")
# More c like remainder:
# Note: sign of y is not relevant for result sign
self.emit("def _irpy_irem(x, y):")
self.print(1, "if x < 0:")
self.print(2, "x = -x")
self.print(2, "sign = True")
self.print(1, "else:")
self.print(2, "sign = False")
self.print(1, "if y < 0: y = -y")
self.print(1, "v = x % y")
self.print(1, "return -v if sign else v")
self.emit("")
# More c like shift left:
self.emit("def _irpy_ishl(x, amount, bits):")
with self.indented():
self.emit("amount = amount % bits")
self.emit("return x << amount")
self.emit("")
# More c like shift right:
self.emit("def _irpy_ishr(x, amount, bits):")
with self.indented():
self.emit("amount = amount % bits")
self.emit("return x >> amount")
self.emit("")
self.emit("def _irpy_alloca(amount):")
with self.indented():
self.emit("ptr = len(_irpy_stack)")
self.emit("_irpy_stack.extend(bytes(amount))")
self.emit("return (ptr, amount)")
self.emit("")
self.emit("def _irpy_free(amount):")
self.print(1, "for _ in range(amount):")
self.print(2, "_irpy_stack.pop()")
self.emit("")
def generate(self, ir_mod):
""" Write ir-code to file f """
self.mod_name = ir_mod.name
self.literals = []
self.emit("")
self.emit("# Module {}".format(ir_mod.name))
# Allocate room for global variables:
for var in ir_mod.variables:
self.emit("{} = _irpy_heap_top()".format(var.name))
if var.value:
for part in var.value:
if isinstance(part, bytes):
for byte in part:
self.emit("_irpy_heap.append({})".format(byte))
else: # pragma: no cover
raise NotImplementedError()
else:
self.emit("_irpy_heap.extend(bytes({}))".format(var.amount))
# Generate functions:
for function in ir_mod.functions:
self.generate_function(function)
# emit labeled literals:
for lit in self.literals:
self.emit("{} = _irpy_heap_top()".format(literal_label(lit)))
for val in lit.data:
self.emit("_irpy_heap.append({})".format(val))
self.emit("")
def generate_function(self, ir_function):
""" Generate a function to python code """
self.stack_size = 0
args = ",".join(a.name for a in ir_function.arguments)
self.emit("def {}({}):".format(ir_function.name, args))
with self.indented():
try:
# TODO: remove this to enable shape style:
raise ValueError
shape, _rmap = relooper.find_structure(ir_function)
src = io.StringIO()
relooper.print_shape(shape, file=src)
self.reporter.dump_source(ir_function.name, src.getvalue())
self._rmap = _rmap
self._shape_style = True
self.generate_shape(shape)
except ValueError:
self.logger.debug("Falling back to block-switch-style")
# Fall back to block switch stack!
self._shape_style = False
self.generate_function_fallback(ir_function)
# Register function for function pointers:
self.emit("_irpy_func_pointers.append({})".format(ir_function.name))
self.func_ptr_map[ir_function] = len(self.func_ptr_map)
self.emit("")
def generate_shape(self, shape):
""" Generate python code for a shape structured program """
if isinstance(shape, relooper.BasicShape):
self.generate_block(self._rmap[shape.content])
elif isinstance(shape, relooper.SequenceShape):
if shape.shapes:
for sub_shape in shape.shapes:
self.generate_shape(sub_shape)
else:
self.emit("pass")
elif isinstance(shape, relooper.IfShape):
blk = self._rmap[shape.content]
self.generate_block(blk)
with self.indented():
if shape.yes_shape:
self.generate_shape(shape.yes_shape)
else:
self.emit("pass")
if shape.no_shape:
self.emit("else:")
with self.indented():
self.generate_shape(shape.no_shape)
elif isinstance(shape, relooper.LoopShape):
self.emit("while True:")
with self.indented():
self.generate_shape(shape.body)
elif isinstance(shape, relooper.ContinueShape):
self.emit("continue")
elif isinstance(shape, relooper.BreakShape):
self.emit("break")
elif shape is None:
self.emit("pass")
else: # pragma: no cover
raise NotImplementedError(str(shape))
def generate_function_fallback(self, ir_function):
""" Generate a while-true with a switch-case on current block.
This is a non-optimal, but always working strategy.
"""
self.emit("_irpy_prev_block = None")
self.emit("_irpy_current_block = '{}'".format(ir_function.entry.name))
self.emit("while True:")
with self.indented():
for block in ir_function.blocks:
self.emit('if _irpy_current_block == "{}":'.format(block.name))
with self.indented():
self.generate_block(block)
self.emit("")
def generate_block(self, block):
""" Generate code for one block """
for ins in block:
self.generate_instruction(ins, block)
if not self._shape_style:
self.fill_phis(block)
def fill_phis(self, block):
# Generate eventual phi fill code:
phis = [p for s in block.successors for p in s.phis]
if phis:
phi_names = ", ".join(p.name for p in phis)
value_names = ", ".join(p.inputs[block].name for p in phis)
self.emit("{} = {}".format(phi_names, value_names))
def reset_stack(self):
self.emit("_irpy_free({})".format(self.stack_size))
self.stack_size = 0
def emit_jump(self, target: ir.Block):
""" Perform a jump in block mode. """
assert isinstance(target, ir.Block)
self.emit("_irpy_prev_block = _irpy_current_block")
self.emit('_irpy_current_block = "{}"'.format(target.name))
def generate_instruction(self, ins, block):
""" Generate python code for this instruction """
if isinstance(ins, ir.CJump):
a = self.fetch_value(ins.a)
b = self.fetch_value(ins.b)
if self._shape_style:
self.fill_phis(block)
self.emit("if {} {} {}:".format(a, ins.cond, b))
else:
self.emit("if {} {} {}:".format(a, ins.cond, b))
with self.indented():
self.emit_jump(ins.lab_yes)
self.emit("else:")
with self.indented():
self.emit_jump(ins.lab_no)
elif isinstance(ins, ir.Jump):
if self._shape_style:
self.fill_phis(block)
self.emit("pass")
else:
self.emit_jump(ins.target)
elif isinstance(ins, ir.Alloc):
self.emit("{} = _irpy_alloca({})".format(ins.name, ins.amount))
self.stack_size += ins.amount
elif isinstance(ins, ir.AddressOf):
src = self.fetch_value(ins.src)
self.emit("{} = {}[0]".format(ins.name, src))
elif isinstance(ins, ir.Const):
self.emit("{} = {}".format(ins.name, ins.value))
elif isinstance(ins, ir.LiteralData):
assert isinstance(ins.data, bytes)
self.literals.append(ins)
self.emit(
"{} = ({},{})".format(
ins.name, literal_label(ins), len(ins.data)
)
)
elif isinstance(ins, ir.Unop):
op = ins.operation
a = self.fetch_value(ins.a)
self.emit("{} = {}{}".format(ins.name, op, ins.a.name))
if ins.ty.is_integer:
self.emit(
"{0} = _irpy_correct({0}, {1}, {2})".format(
ins.name, ins.ty.bits, ins.ty.signed
)
)
elif isinstance(ins, ir.Binop):
self.gen_binop(ins)
elif isinstance(ins, ir.Cast):
if ins.ty.is_integer:
self.emit(
"{} = _irpy_correct(int(round({})), {}, {})".format(
ins.name, ins.src.name, ins.ty.bits, ins.ty.signed
)
)
elif ins.ty is ir.ptr:
self.emit("{} = int(round({}))".format(ins.name, ins.src.name))
elif ins.ty in [ir.f32, ir.f64]:
self.emit("{} = float({})".format(ins.name, ins.src.name))
else: # pragma: no cover
raise NotImplementedError(str(ins))
elif isinstance(ins, ir.Store):
self.gen_store(ins)
elif isinstance(ins, ir.Load):
self.gen_load(ins)
elif isinstance(ins, ir.FunctionCall):
args = ", ".join(self.fetch_value(a) for a in ins.arguments)
callee = self._fetch_callee(ins.callee)
self.emit("{} = {}({})".format(ins.name, callee, args))
elif isinstance(ins, ir.ProcedureCall):
args = ", ".join(self.fetch_value(a) for a in ins.arguments)
callee = self._fetch_callee(ins.callee)
self.emit("{}({})".format(callee, args))
elif isinstance(ins, ir.Phi):
pass # Phi is filled by predecessor
elif isinstance(ins, ir.Return):
self.reset_stack()
self.emit("return {}".format(self.fetch_value(ins.result)))
elif isinstance(ins, ir.Exit):
self.reset_stack()
self.emit("return")
else: # pragma: no cover
self.emit("not implemented: {}".format(ins))
raise NotImplementedError(str(type(ins)))
def gen_binop(self, ins):
a = self.fetch_value(ins.a)
b = self.fetch_value(ins.b)
# Assume int for now.
op = ins.operation
int_ops = {"/": "_irpy_idiv", "%": "_irpy_irem"}
shift_ops = {">>": "_irpy_ishr", "<<": "_irpy_ishl"}
if op in int_ops and ins.ty.is_integer:
fname = int_ops[op]
self.emit("{} = {}({}, {})".format(ins.name, fname, a, b))
elif op in shift_ops and ins.ty.is_integer:
fname = shift_ops[op]
self.emit(
"{} = {}({}, {}, {})".format(
ins.name, fname, a, b, ins.ty.bits
)
)
else:
self.emit("{} = {} {} {}".format(ins.name, a, op, b))
if ins.ty.is_integer:
self.emit(
"{0} = _irpy_correct({0}, {1}, {2})".format(
ins.name, ins.ty.bits, ins.ty.signed
)
)
def gen_load(self, ins):
address = self.fetch_value(ins.address)
if isinstance(ins.ty, ir.BlobDataTyp):
self.emit(
"{0} = read_mem({1}, {2})".format(
ins.name, address, ins.ty.size
)
)
else:
self.emit(
"{0} = load_{1}({2})".format(ins.name, ins.ty.name, address)
)
def gen_store(self, ins):
if isinstance(ins.value.ty, ir.BlobDataTyp):
self.emit(
"write_mem({0}, {1}, {2})".format(
ins.address.name, ins.value.ty.size, ins.value.name
)
)
else:
v = self.fetch_value(ins.value)
self.emit(
"store_{0}({2}, {1})".format(
ins.value.ty.name, ins.address.name, v
)
)
def _fetch_callee(self, callee):
""" Retrieves a callee and puts it into _fptr variable """
if isinstance(callee, ir.SubRoutine):
expr = "{}".format(callee.name)
elif isinstance(callee, ir.ExternalSubRoutine):
expr = "_irpy_externals['{}']".format(callee.name)
else:
expr = "_irpy_func_pointers[{}]".format(callee.name)
return expr
def fetch_value(self, value):
if isinstance(value, ir.SubRoutine):
# Function pointer!
fidx = self.func_ptr_map[value]
expr = str(fidx)
elif isinstance(value, ir.ExternalVariable):
expr = "_irpy_externals['{}']".format(value.name)
else:
expr = value.name
return expr
|
|
"""Tests for Philips Hue config flow."""
import asyncio
from unittest.mock import Mock, patch
import aiohue
import pytest
import voluptuous as vol
from homeassistant import config_entries, data_entry_flow
from homeassistant.components import ssdp
from homeassistant.components.hue import config_flow, const
from tests.common import MockConfigEntry, mock_coro
async def test_flow_works(hass):
"""Test config flow ."""
mock_bridge = Mock()
mock_bridge.host = "1.2.3.4"
mock_bridge.username = None
mock_bridge.config.name = "Mock Bridge"
mock_bridge.id = "aabbccddeeff"
async def mock_create_user(username):
mock_bridge.username = username
mock_bridge.create_user = mock_create_user
mock_bridge.initialize.return_value = mock_coro()
flow = config_flow.HueFlowHandler()
flow.hass = hass
flow.context = {}
with patch(
"homeassistant.components.hue.config_flow.discover_nupnp",
return_value=mock_coro([mock_bridge]),
):
result = await flow.async_step_init()
assert result["type"] == "form"
assert result["step_id"] == "link"
assert flow.context["unique_id"] == "aabbccddeeff"
result = await flow.async_step_link(user_input={})
assert result["type"] == "create_entry"
assert result["title"] == "Mock Bridge"
assert result["data"] == {
"host": "1.2.3.4",
"username": "home-assistant#test-home",
}
assert len(mock_bridge.initialize.mock_calls) == 1
async def test_flow_no_discovered_bridges(hass, aioclient_mock):
"""Test config flow discovers no bridges."""
aioclient_mock.get(const.API_NUPNP, json=[])
flow = config_flow.HueFlowHandler()
flow.hass = hass
result = await flow.async_step_init()
assert result["type"] == "abort"
async def test_flow_all_discovered_bridges_exist(hass, aioclient_mock):
"""Test config flow discovers only already configured bridges."""
aioclient_mock.get(
const.API_NUPNP, json=[{"internalipaddress": "1.2.3.4", "id": "bla"}]
)
MockConfigEntry(
domain="hue", unique_id="bla", data={"host": "1.2.3.4"}
).add_to_hass(hass)
flow = config_flow.HueFlowHandler()
flow.hass = hass
flow.context = {}
result = await flow.async_step_init()
assert result["type"] == "abort"
async def test_flow_one_bridge_discovered(hass, aioclient_mock):
"""Test config flow discovers one bridge."""
aioclient_mock.get(
const.API_NUPNP, json=[{"internalipaddress": "1.2.3.4", "id": "bla"}]
)
flow = config_flow.HueFlowHandler()
flow.hass = hass
flow.context = {}
result = await flow.async_step_init()
assert result["type"] == "form"
assert result["step_id"] == "link"
async def test_flow_two_bridges_discovered(hass, aioclient_mock):
"""Test config flow discovers two bridges."""
# Add ignored config entry. Should still show up as option.
MockConfigEntry(
domain="hue", source=config_entries.SOURCE_IGNORE, unique_id="bla"
).add_to_hass(hass)
aioclient_mock.get(
const.API_NUPNP,
json=[
{"internalipaddress": "1.2.3.4", "id": "bla"},
{"internalipaddress": "5.6.7.8", "id": "beer"},
],
)
flow = config_flow.HueFlowHandler()
flow.hass = hass
result = await flow.async_step_init()
assert result["type"] == "form"
assert result["step_id"] == "init"
with pytest.raises(vol.Invalid):
assert result["data_schema"]({"id": "not-discovered"})
result["data_schema"]({"id": "bla"})
result["data_schema"]({"id": "beer"})
async def test_flow_two_bridges_discovered_one_new(hass, aioclient_mock):
"""Test config flow discovers two bridges."""
aioclient_mock.get(
const.API_NUPNP,
json=[
{"internalipaddress": "1.2.3.4", "id": "bla"},
{"internalipaddress": "5.6.7.8", "id": "beer"},
],
)
MockConfigEntry(
domain="hue", unique_id="bla", data={"host": "1.2.3.4"}
).add_to_hass(hass)
flow = config_flow.HueFlowHandler()
flow.hass = hass
flow.context = {}
result = await flow.async_step_init()
assert result["type"] == "form"
assert result["step_id"] == "link"
assert flow.bridge.host == "5.6.7.8"
async def test_flow_timeout_discovery(hass):
"""Test config flow ."""
flow = config_flow.HueFlowHandler()
flow.hass = hass
with patch(
"homeassistant.components.hue.config_flow.discover_nupnp",
side_effect=asyncio.TimeoutError,
):
result = await flow.async_step_init()
assert result["type"] == "abort"
async def test_flow_link_timeout(hass):
"""Test config flow ."""
flow = config_flow.HueFlowHandler()
flow.hass = hass
flow.bridge = Mock()
with patch("aiohue.Bridge.create_user", side_effect=asyncio.TimeoutError):
result = await flow.async_step_link({})
assert result["type"] == "form"
assert result["step_id"] == "link"
assert result["errors"] == {"base": "linking"}
async def test_flow_link_button_not_pressed(hass):
"""Test config flow ."""
flow = config_flow.HueFlowHandler()
flow.hass = hass
flow.bridge = Mock(
username=None, create_user=Mock(side_effect=aiohue.LinkButtonNotPressed)
)
result = await flow.async_step_link({})
assert result["type"] == "form"
assert result["step_id"] == "link"
assert result["errors"] == {"base": "register_failed"}
async def test_flow_link_unknown_host(hass):
"""Test config flow ."""
flow = config_flow.HueFlowHandler()
flow.hass = hass
flow.bridge = Mock()
with patch("aiohue.Bridge.create_user", side_effect=aiohue.RequestError):
result = await flow.async_step_link({})
assert result["type"] == "form"
assert result["step_id"] == "link"
assert result["errors"] == {"base": "linking"}
async def test_bridge_ssdp(hass):
"""Test a bridge being discovered."""
flow = config_flow.HueFlowHandler()
flow.hass = hass
flow.context = {}
result = await flow.async_step_ssdp(
{
ssdp.ATTR_SSDP_LOCATION: "http://0.0.0.0/",
ssdp.ATTR_UPNP_MANUFACTURER_URL: config_flow.HUE_MANUFACTURERURL,
ssdp.ATTR_UPNP_SERIAL: "1234",
}
)
assert result["type"] == "form"
assert result["step_id"] == "link"
async def test_bridge_ssdp_discover_other_bridge(hass):
"""Test that discovery ignores other bridges."""
flow = config_flow.HueFlowHandler()
flow.hass = hass
result = await flow.async_step_ssdp(
{ssdp.ATTR_UPNP_MANUFACTURER_URL: "http://www.notphilips.com"}
)
assert result["type"] == "abort"
async def test_bridge_ssdp_emulated_hue(hass):
"""Test if discovery info is from an emulated hue instance."""
flow = config_flow.HueFlowHandler()
flow.hass = hass
flow.context = {}
result = await flow.async_step_ssdp(
{
ssdp.ATTR_SSDP_LOCATION: "http://0.0.0.0/",
ssdp.ATTR_UPNP_FRIENDLY_NAME: "HASS Bridge",
ssdp.ATTR_UPNP_MANUFACTURER_URL: config_flow.HUE_MANUFACTURERURL,
ssdp.ATTR_UPNP_SERIAL: "1234",
}
)
assert result["type"] == "abort"
assert result["reason"] == "not_hue_bridge"
async def test_bridge_ssdp_espalexa(hass):
"""Test if discovery info is from an Espalexa based device."""
flow = config_flow.HueFlowHandler()
flow.hass = hass
flow.context = {}
result = await flow.async_step_ssdp(
{
ssdp.ATTR_SSDP_LOCATION: "http://0.0.0.0/",
ssdp.ATTR_UPNP_FRIENDLY_NAME: "Espalexa (0.0.0.0)",
ssdp.ATTR_UPNP_MANUFACTURER_URL: config_flow.HUE_MANUFACTURERURL,
ssdp.ATTR_UPNP_SERIAL: "1234",
}
)
assert result["type"] == "abort"
assert result["reason"] == "not_hue_bridge"
async def test_bridge_ssdp_already_configured(hass):
"""Test if a discovered bridge has already been configured."""
MockConfigEntry(
domain="hue", unique_id="1234", data={"host": "0.0.0.0"}
).add_to_hass(hass)
flow = config_flow.HueFlowHandler()
flow.hass = hass
flow.context = {}
with pytest.raises(data_entry_flow.AbortFlow):
await flow.async_step_ssdp(
{
ssdp.ATTR_SSDP_LOCATION: "http://0.0.0.0/",
ssdp.ATTR_UPNP_MANUFACTURER_URL: config_flow.HUE_MANUFACTURERURL,
ssdp.ATTR_UPNP_SERIAL: "1234",
}
)
async def test_import_with_no_config(hass):
"""Test importing a host without an existing config file."""
flow = config_flow.HueFlowHandler()
flow.hass = hass
flow.context = {}
result = await flow.async_step_import({"host": "0.0.0.0"})
assert result["type"] == "form"
assert result["step_id"] == "link"
async def test_creating_entry_removes_entries_for_same_host_or_bridge(hass):
"""Test that we clean up entries for same host and bridge.
An IP can only hold a single bridge and a single bridge can only be
accessible via a single IP. So when we create a new entry, we'll remove
all existing entries that either have same IP or same bridge_id.
"""
orig_entry = MockConfigEntry(
domain="hue", data={"host": "0.0.0.0", "username": "aaaa"}, unique_id="id-1234",
)
orig_entry.add_to_hass(hass)
MockConfigEntry(
domain="hue", data={"host": "1.2.3.4", "username": "bbbb"}, unique_id="id-5678",
).add_to_hass(hass)
assert len(hass.config_entries.async_entries("hue")) == 2
bridge = Mock()
bridge.username = "username-abc"
bridge.config.name = "Mock Bridge"
bridge.host = "0.0.0.0"
bridge.id = "id-1234"
with patch(
"aiohue.Bridge", return_value=bridge,
):
result = await hass.config_entries.flow.async_init(
"hue", data={"host": "2.2.2.2"}, context={"source": "import"}
)
assert result["type"] == "form"
assert result["step_id"] == "link"
with patch(
"homeassistant.components.hue.config_flow.authenticate_bridge",
return_value=mock_coro(),
), patch(
"homeassistant.components.hue.async_setup_entry",
side_effect=lambda _, _2: mock_coro(True),
):
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == "create_entry"
assert result["title"] == "Mock Bridge"
assert result["data"] == {
"host": "0.0.0.0",
"username": "username-abc",
}
entries = hass.config_entries.async_entries("hue")
assert len(entries) == 2
new_entry = entries[-1]
assert orig_entry.entry_id != new_entry.entry_id
assert new_entry.unique_id == "id-1234"
async def test_bridge_homekit(hass):
"""Test a bridge being discovered via HomeKit."""
flow = config_flow.HueFlowHandler()
flow.hass = hass
flow.context = {}
result = await flow.async_step_homekit(
{
"host": "0.0.0.0",
"serial": "1234",
"manufacturerURL": config_flow.HUE_MANUFACTURERURL,
"properties": {"id": "aa:bb:cc:dd:ee:ff"},
}
)
assert result["type"] == "form"
assert result["step_id"] == "link"
async def test_bridge_homekit_already_configured(hass):
"""Test if a HomeKit discovered bridge has already been configured."""
MockConfigEntry(
domain="hue", unique_id="aabbccddeeff", data={"host": "0.0.0.0"}
).add_to_hass(hass)
flow = config_flow.HueFlowHandler()
flow.hass = hass
flow.context = {}
with pytest.raises(data_entry_flow.AbortFlow):
await flow.async_step_homekit(
{"host": "0.0.0.0", "properties": {"id": "aa:bb:cc:dd:ee:ff"}}
)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DiagnosticSettingsOperations(object):
"""DiagnosticSettingsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~$(python-base-namespace).v2021_05_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_uri, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.DiagnosticSettingsResource"
"""Gets the active diagnostic settings for the specified resource.
:param resource_uri: The identifier of the resource.
:type resource_uri: str
:param name: The name of the diagnostic setting.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiagnosticSettingsResource, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2021_05_01_preview.models.DiagnosticSettingsResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticSettingsResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DiagnosticSettingsResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/{resourceUri}/providers/Microsoft.Insights/diagnosticSettings/{name}'} # type: ignore
def create_or_update(
self,
resource_uri, # type: str
name, # type: str
parameters, # type: "_models.DiagnosticSettingsResource"
**kwargs # type: Any
):
# type: (...) -> "_models.DiagnosticSettingsResource"
"""Creates or updates diagnostic settings for the specified resource.
:param resource_uri: The identifier of the resource.
:type resource_uri: str
:param name: The name of the diagnostic setting.
:type name: str
:param parameters: Parameters supplied to the operation.
:type parameters: ~$(python-base-namespace).v2021_05_01_preview.models.DiagnosticSettingsResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiagnosticSettingsResource, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2021_05_01_preview.models.DiagnosticSettingsResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticSettingsResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DiagnosticSettingsResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DiagnosticSettingsResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/{resourceUri}/providers/Microsoft.Insights/diagnosticSettings/{name}'} # type: ignore
def delete(
self,
resource_uri, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes existing diagnostic settings for the specified resource.
:param resource_uri: The identifier of the resource.
:type resource_uri: str
:param name: The name of the diagnostic setting.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/{resourceUri}/providers/Microsoft.Insights/diagnosticSettings/{name}'} # type: ignore
def list(
self,
resource_uri, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DiagnosticSettingsResourceCollection"]
"""Gets the active diagnostic settings list for the specified resource.
:param resource_uri: The identifier of the resource.
:type resource_uri: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiagnosticSettingsResourceCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~$(python-base-namespace).v2021_05_01_preview.models.DiagnosticSettingsResourceCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticSettingsResourceCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DiagnosticSettingsResourceCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/{resourceUri}/providers/Microsoft.Insights/diagnosticSettings'} # type: ignore
|
|
# coding:utf-8
"""
Spelling corrector library, used to correct common typos in domains like
gmal.com instead of gmail.com.
The spelling corrector uses difflib which in turn uses the
Ratcliff-Obershelp algorithm [1] to compute the similarity of two strings.
This is a very fast an accurate algorithm for domain spelling correction.
The (only) public method this module has is suggest(word), which given
a domain, suggests an alternative or returns the original domain
if no suggestion exists.
[1] http://xlinux.nist.gov/dads/HTML/ratcliffObershelp.html
"""
import difflib
def suggest(word, cutoff=0.77):
"""
Given a domain and a cutoff heuristic, suggest an alternative or return the
original domain if no suggestion exists.
"""
if word in LOOKUP_TABLE:
return LOOKUP_TABLE[word]
guess = difflib.get_close_matches(word, MOST_COMMON_DOMAINS, n=1, cutoff=cutoff)
if guess and len(guess) > 0:
return guess[0]
return word
MOST_COMMON_DOMAINS = [
# mailgun :)
'mailgun.net',
# The Top ~200 domains from 30mm open events gathered during the
# last 30 days prior to 10/31/2018
'163.com',
'abv.bg',
'account.tradeshift.com',
'aim.com',
'albany.edu',
'alice.it',
'amerihome.com',
'andrew.cmu.edu',
'aol.co.uk',
'aol.com',
'aol.de',
'aol.fr',
'arcor.de',
'artifacturestudios.com',
'asu.edu',
'atproperties.com',
'att.net',
'austin.utexas.edu',
'austincc.edu',
'baylor.edu',
'bellsouth.net',
'berkeley.edu',
'bigpond.com',
'bigpond.net.au',
'binghamton.edu',
'bk.ru',
'bluewin.ch',
'blueyonder.co.uk',
'bol.com.br',
'btinternet.com',
'bu.edu',
'byui.edu',
'calpoly.edu',
'charter.net',
'cloud.upwork.com',
'colemanrg.com',
'colorado.edu',
'columbia.edu',
'comcast.net',
'compass.com',
'cornell.edu',
'cox.net',
'coyote.csusb.edu',
'cpp.edu',
'crimson.ua.edu',
'cytonn.com',
'docomo.ne.jp',
'du.edu',
'earthlink.net',
'email.arizona.edu',
'email.sc.edu',
'embarqmail.com',
'emory.edu',
'ezweb.ne.jp',
'fiu.edu',
'free.fr',
'freenet.de',
'frontier.com',
'g.austincc.edu',
'gmail.com',
'gmx.at',
'gmx.de',
'gmx.net',
'google.com',
'googlemail.com',
'guest.booking.com',
'gwu.edu',
'hawk.iit.edu',
'home.nl',
'hotmail.ca',
'hotmail.co.uk',
'hotmail.com',
'hotmail.de',
'hotmail.es',
'hotmail.fr',
'hotmail.it',
'hotmail.se',
'i.softbank.jp',
'icloud.com',
'iinet.net.au',
'illinois.edu',
'inbox.ru',
'jhu.edu',
'juno.com',
'knights.ucf.edu',
'kw.com',
'laposte.net',
'libero.it',
'list.ru',
'live.ca',
'live.co.uk',
'live.com',
'live.com.au',
'live.fr',
'live.nl',
'live.se',
'lsu.edu',
'mac.com',
'mail.com',
'mail.ru',
'mail.usf.edu',
'marketplace.amazon.co.uk',
'marketplace.amazon.com',
'marketplace.amazon.de',
'masonlive.gmu.edu',
'mavs.uta.edu',
'me.com',
'miami.edu',
'msn.com',
'msu.edu',
'my.fsu.edu',
'naver.com',
'ntlworld.com',
'ohio.edu',
'online.no',
'optonline.net',
'optusnet.com.au',
'orange.fr',
'osu.edu',
'outlook.com',
'outlook.de',
'outlook.es',
'outlook.fr',
'pace.edu',
'pegipegi.com',
'pitt.edu',
'protonmail.com',
'q.com',
'qq.com',
'rambler.ru',
'rev.com',
'roadrunner.com',
'rocketmail.com',
'rogers.com',
'rollins.edu',
'rutgers.edu',
'savaari.com',
'sbcglobal.net',
'seznam.cz',
'sfr.fr',
'shaw.ca',
'sky.com',
'skynet.be',
'spartans.ut.edu',
'stanford.edu',
'stjohns.edu',
'stonybrook.edu',
'student.gsu.edu',
'suddenlink.net',
'sympatico.ca',
'syr.edu',
't-online.de',
'talktalk.net',
'telenet.be',
'telia.com',
'telus.net',
'temple.edu',
'topper.wku.edu',
'transfix.io',
'twc.com',
'txstate.edu',
'u.northwestern.edu',
'uci.edu',
'ucr.edu',
'ucsd.edu',
'udel.edu',
'uga.edu',
'umail.ucsb.edu',
'umich.edu',
'umn.edu',
'uol.com.br',
'utexas.edu',
'uw.edu',
'uwm.edu',
'vepl.com',
'verizon.net',
'videotron.ca',
'virginia.edu',
'vt.edu',
'wanadoo.fr',
'wayne.edu',
'web.de',
'wildcats.unh.edu',
'windstream.net',
'wisc.edu',
'wp.pl',
'xtra.co.nz',
'yahoo.ca',
'yahoo.co.in',
'yahoo.co.jp',
'yahoo.co.uk',
'yahoo.com',
'yahoo.com.ar',
'yahoo.com.au',
'yahoo.com.br',
'yahoo.com.hk',
'yahoo.com.mx',
'yahoo.com.sg',
'yahoo.com.tw',
'yahoo.de',
'yahoo.es',
'yahoo.fr',
'yahoo.it',
'yandex.ru',
'ymail.com',
'ziggo.nl'
]
# domains that the corrector doesn't fix that we should fix
LOOKUP_TABLE = {
u'yahoo': u'yahoo.com',
u'gmail': u'gmail.com',
u'hotmail': u'hotmail.com',
u'live': u'live.com',
u'outlook': u'outlook.com',
u'msn': u'msn.com',
u'googlemail': u'googlemail.com',
u'aol': u'aol.com',
u'aim': u'aim.com',
u'icloud': u'icloud.com',
u'me': u'me.com',
u'mac': u'mac.com',
u'facebook': u'facebook.com',
u'comcast': u'comcast.net',
u'sbcglobal': u'sbcglobal.net',
u'bellsouth': u'bellsouth.net',
u'verizon': u'verizon.net',
u'earthlink': u'earthlink.net',
u'cox': u'cox.net',
u'charter': u'charter.net',
u'shaw': u'shaw.ca',
u'bell': u'bell.net'
}
|
|
""" Tests for engine queries """
import six
from flywheel import (Field, Composite, Model, NUMBER, STRING_SET, GlobalIndex,
DuplicateEntityException, EntityNotFoundException, Limit)
from flywheel.tests import DynamoSystemTest
# pylint: disable=C0121
class User(Model):
""" Model for testing queries """
__metadata__ = {
'global_indexes': [
GlobalIndex('name-index', 'name', 'score'),
],
}
id = Field(hash_key=True)
name = Field(range_key=True)
score = Field(data_type=NUMBER, index='score-index', default=0)
str_set = Field(data_type=STRING_SET)
bio = Field()
plan = Field()
alias = Field()
def score_merge(ts, upvotes):
""" Merge the ts and upvotes """
return ts + 1000 * upvotes
class Post(Model):
""" Model for testing composite queries """
__metadata__ = {
'global_indexes': [
GlobalIndex('name-index', 'username', 'score'),
GlobalIndex('ts-index', 'username', 'ts'),
GlobalIndex('hash-index', 'total_uid')
],
}
uid = Composite('type', 'id', hash_key=True)
type = Field()
id = Field()
score = Composite('ts', 'upvotes', range_key=True, data_type=NUMBER,
merge=score_merge)
username = Field()
ts = Field(data_type=NUMBER, default=0)
upvotes = Field(data_type=NUMBER, default=0)
total_uid = Composite('uid', 'username', merge=lambda x, y: x + ':' +
str(y))
class TestQueries(DynamoSystemTest):
""" Tests for table queries """
models = [User]
def test_first(self):
""" Query can retrieve first element of results """
u = User(id='a', name='Adam')
self.engine.save(u)
result = self.engine(User).filter(id='a').first()
self.assertEquals(result, u)
def test_first_none(self):
""" If no results, first() returns None """
result = self.engine(User).filter(id='a').first()
self.assertIsNone(result)
def test_one(self):
""" Query can retrieve first element of results """
u = User(id='a', name='Adam')
self.engine.save(u)
result = self.engine(User).filter(id='a').one()
self.assertEquals(result, u)
def test_one_many(self):
""" If many results, one() raises DuplicateEntityException """
u = User(id='a', name='Adam')
u2 = User(id='a', name='Aaron')
self.engine.save([u, u2])
# For legacy reasons, make sure it also is a ValueError.
with self.assertRaises(ValueError):
self.engine(User).filter(id='a').one()
with self.assertRaises(DuplicateEntityException):
self.engine(User).filter(id='a').one()
def test_one_none(self):
""" If no results, one() raises EntityNotFoundException """
# For legacy reasons, make sure it also is a ValueError.
with self.assertRaises(ValueError):
self.engine(User).filter(id='a').one()
with self.assertRaises(EntityNotFoundException):
self.engine(User).filter(id='a').one()
def test_count(self):
""" Can return a count instead of the models """
u = User(id='a', name='Adam')
u2 = User(id='a', name='Aaron')
self.engine.save([u, u2])
count = self.engine(User).filter(id='a').count()
self.assertEqual(count, 2)
def test_iter(self):
""" Queries can iterate over items """
u = User(id='a', name='Adam')
u2 = User(id='a', name='Aaron')
self.engine.save([u, u2])
users = [u, u2]
for item in self.engine.query(User).filter(id='a'):
self.assertTrue(item in users)
users.remove(item)
def test_force_hash_key(self):
""" Queries must specify hash key """
u = User(id='a', name='Adam')
self.engine.save(u)
with self.assertRaises(ValueError):
self.engine.query(User).all()
def test_filter_hash_key(self):
""" Queries can filter by hash key """
u = User(id='a', name='Adam')
u2 = User(id='b', name='Billy')
self.engine.save([u, u2])
results = self.engine.query(User).filter(User.id == 'a').all()
self.assertEquals(results, [u])
def test_limit(self):
""" Queries can have a limit """
u = User(id='a', name='Adam')
u2 = User(id='a', name='Aaron')
self.engine.save([u, u2])
results = self.engine.query(User).filter(User.id == 'a').limit(1).all()
self.assertEquals(len(results), 1)
def test_delete(self):
""" Queries can selectively delete items """
u = User(id='a', name='Adam')
u2 = User(id='b', name='Billy')
self.engine.save([u, u2])
count = self.engine.query(User).filter(User.id == 'a').delete()
self.assertEquals(count, 1)
results = self.engine.scan(User).all()
self.assertEquals(results, [u2])
def test_filter_chain(self):
""" Queries can chain filters """
u = User(id='a', name='Adam')
u2 = User(id='b', name='Billy')
self.engine.save([u, u2])
results = self.engine.query(User).filter(User.id == 'a')\
.filter(User.name == 'Adam').all()
self.assertEquals(results, [u])
def test_filter_and(self):
""" Queries can and filters together """
u = User(id='a', name='Adam')
u2 = User(id='b', name='Billy')
self.engine.save([u, u2])
results = self.engine.query(User).filter((User.id == 'a') &
(User.name == 'Adam')).all()
self.assertEquals(results, [u])
def test_filter_lt(self):
""" Queries can filter lt """
u = User(id='a', name='Adam')
u2 = User(id='a', name='Aaron')
self.engine.save([u, u2])
results = self.engine.query(User).filter(User.id == 'a')\
.filter(User.name < 'Adam').all()
self.assertEquals(results, [u2])
def test_filter_lte(self):
""" Queries can filter lte """
u = User(id='a', name='Aaron')
u2 = User(id='a', name='Adam')
u3 = User(id='a', name='Alison')
self.engine.save([u, u2, u3])
results = self.engine.query(User).filter(User.id == 'a')\
.filter(User.name <= u2.name).all()
self.assertEquals(len(results), 2)
self.assertTrue(u in results)
self.assertTrue(u2 in results)
def test_filter_gt(self):
""" Queries can filter gt """
u = User(id='a', name='Adam')
u2 = User(id='a', name='Aaron')
self.engine.save([u, u2])
results = self.engine.query(User).filter(User.id == 'a')\
.filter(User.name > 'Aaron').all()
self.assertEquals(results, [u])
def test_filter_gte(self):
""" Queries can filter gte """
u = User(id='a', name='Aaron')
u2 = User(id='a', name='Adam')
u3 = User(id='a', name='Alison')
self.engine.save([u, u2, u3])
results = self.engine.query(User).filter(User.id == 'a')\
.filter(User.name >= u2.name).all()
self.assertEquals(len(results), 2)
self.assertTrue(u2 in results)
self.assertTrue(u3 in results)
def test_filter_between(self):
""" Queries can filter between """
u = User(id='a', name='Aaron', score=1)
u2 = User(id='a', name='Adam', score=2)
u3 = User(id='a', name='Alison', score=3)
u4 = User(id='a', name='Andrew', score=4)
self.engine.save([u, u2, u3, u4])
results = self.engine.query(User).filter(User.id == 'a')\
.filter(User.score.between_(u2.score, u3.score)).all()
self.assertEquals(len(results), 2)
self.assertTrue(u2 in results)
self.assertTrue(u3 in results)
def test_filter_beginswith(self):
""" Queries can filter beginswith """
u = User(id='a', name='Adam')
u2 = User(id='a', name='Aaron')
self.engine.save([u, u2])
results = self.engine.query(User).filter(User.id == 'a')\
.filter(User.name.beginswith_('Ad')).all()
self.assertEquals(results, [u])
def test_filter_ne(self):
""" Queries can filter ne """
u = User(id='a', name='Adam', alias="A")
u2 = User(id='a', name='Aaron', alias="a-aron")
self.engine.save([u, u2])
ret = self.engine.query(User).filter(User.id == 'a')\
.filter(User.alias != "A").one()
self.assertEqual(ret, u2)
def test_filter_in(self):
""" Queries can filter in """
u = User(id='a', name='Adam', alias='A')
u2 = User(id='a', name='Aaron', alias='a-aron')
self.engine.save([u, u2])
ret = self.engine.query(User).filter(User.id == 'a')\
.filter(User.alias.in_(set(['A', 'b']))).one()
self.assertEqual(ret, u)
def test_filter_contains(self):
""" Queries can filter contains """
u = User(id='a', name='Adam', str_set=set(['foo', 'bar']))
u2 = User(id='a', name='Aaron', str_set=set(['bar']))
self.engine.save([u, u2])
ret = self.engine.query(User).filter(User.id == 'a')\
.filter(User.str_set.contains_('foo')).one()
self.assertEqual(ret, u)
def test_filter_null(self):
""" Queries can filter null """
u = User(id='a', name='Adam', str_set=set(['foo']))
u2 = User(id='a', name='Aaron')
self.engine.save([u, u2])
ret = self.engine.query(User).filter(User.id == 'a')\
.filter(User.str_set == None).one() # noqa
self.assertEqual(ret, u2)
def test_filter_not_null(self):
""" Queries can filter not null """
u = User(id='a', name='Adam', str_set=set(['foo']))
u2 = User(id='a', name='Aaron')
self.engine.save([u, u2])
ret = self.engine.query(User).filter(User.id == 'a')\
.filter(User.str_set != None).one() # noqa
self.assertEqual(ret, u)
def test_smart_local_index(self):
""" Queries auto-select local secondary index """
u = User(id='a', name='Adam', score=50)
u2 = User(id='a', name='Aaron', score=100)
self.engine.save([u, u2])
results = self.engine.query(User).filter(User.id == 'a')\
.filter(User.score > 75).all()
self.assertEquals(results, [u2])
def test_smart_global_index(self):
""" Queries auto-select global secondary index """
u = User(id='a', name='Adam', score=50)
u2 = User(id='b', name='Adam', score=100)
self.engine.save([u, u2])
results = self.engine.query(User).filter(User.name == 'Adam')\
.filter(User.score > 75).all()
self.assertEquals(results, [u2])
def test_double_limit(self):
""" Calling limit twice on the same query raises error """
with self.assertRaises(ValueError):
self.engine.query(User).filter(name='Adam').limit(10).limit(5).all()
def test_double_index(self):
""" Calling index twice on the same query raises error """
with self.assertRaises(ValueError):
self.engine.query(User).filter(name='Adam').index('name-index')\
.index('score-index').all()
def test_filter_non_indexed(self):
""" Queries can filter non-indexed fields """
u = User(id='a', name='Adam', bio='bar')
u2 = User(id='a', name='Billy', bio='baz')
self.engine.save([u, u2])
results = self.engine.query(User).filter(User.id == 'a')\
.filter(bio='bar').all()
self.assertEquals(results, [u])
def test_filter_or(self):
""" Queries can join filter constraints with OR """
u = User(id='a', name='Adam', bio='bar')
u2 = User(id='a', name='Billy', plan='baz')
u3 = User(id='a', name='Celine', bio='not', plan='this')
self.engine.save([u, u2, u3])
results = self.engine.query(User).filter(User.id == 'a')\
.filter(bio='bar', plan='baz').all(filter_or=True)
self.assertEqual(len(results), 2)
self.assertTrue(u in results)
self.assertTrue(u2 in results)
def test_filter_inequality(self):
""" Queries can use inequality filters on non-indexed fields """
u = User(id='a', name='Adam', bio='aaa')
u2 = User(id='a', name='Billy', bio='zzz')
self.engine.save([u, u2])
results = self.engine.query(User).filter(User.id == 'a')\
.filter(User.bio < 'ddd').all()
self.assertEquals(results, [u])
def test_limit_and_resume(self):
""" Query can provide a limit and resume later """
users = [User('a', 'a', score=1), User('a', 'b', score=2), User('a', 'c', score=2)]
self.engine.save(users)
limit = Limit(item_limit=1, strict=True)
results = self.engine.query(User).filter(id='a') \
.filter(User.score > 0).limit(limit).all()
self.assertEqual(len(results), 1)
last_evaluated_key = results[-1].index_pk_dict_('score-index')
results.extend(self.engine.query(User).filter(id='a')
.filter(User.score > 0).limit(limit).all(
exclusive_start_key=last_evaluated_key))
self.assertEqual(len(results), 2)
last_evaluated_key = results[-1].index_pk_dict_('score-index')
results.extend(self.engine.query(User).filter(id='a')
.filter(User.score > 0).limit(limit).all(
exclusive_start_key=last_evaluated_key))
self.assertEqual(len(results), 3)
# We should have seen all the items by this point
last_evaluated_key = results[-1].index_pk_dict_('score-index')
results.extend(self.engine.query(User).filter(id='a')
.filter(User.score > 0).limit(limit).all(
exclusive_start_key=last_evaluated_key))
self.assertEqual(len(results), 3)
# This fails in python 2.6
if six.PY3:
self.assertItemsEqual(results, users)
class TestCompositeQueries(DynamoSystemTest):
""" Tests for table queries """
models = [Post]
def test_composite_query(self):
""" Can query composite fields """
p = Post(type='tweet', id='1234')
self.engine.save(p)
results = self.engine(Post).filter(uid='tweet:1234').all()
self.assertEquals(results, [p])
def test_composite_query_piecewise(self):
""" Can query composite fields by individual pieces """
p = Post(type='tweet', id='1234')
self.engine.save(p)
results = self.engine(Post).filter(type='tweet', id='1234').all()
self.assertEquals(results, [p])
def test_composite_local_index(self):
""" Auto-select composite local secondary indexes """
p = Post(type='tweet', id='1234')
self.engine.save(p)
results = self.engine(Post).filter(type='tweet', id='1234',
score=0).all()
self.assertEquals(results, [p])
def test_composite_local_index_piecewise(self):
""" Auto-select composite local secondary indexes by pieces """
p = Post(type='tweet', id='1234')
self.engine.save(p)
results = self.engine(Post).filter(type='tweet', id='1234', ts=0,
upvotes=0).all()
self.assertEquals(results, [p])
def test_composite_global_index(self):
""" Auto-select composite global secondary indexes """
p = Post(type='tweet', id='1234', username='abc')
self.engine.save(p)
results = self.engine(Post).filter(username='abc', score=0) \
.index('name-index').all()
self.assertEquals(results, [p])
def test_composite_global_index_piecewise(self):
""" Use composite global secondary indexes by pieces """
p = Post(type='tweet', id='1234', username='abc')
self.engine.save(p)
results = self.engine(Post).filter(username='abc', ts=0,
upvotes=0).index('name-index').all()
self.assertEquals(results, [p])
def test_ambiguous_index(self):
""" Error raised if index name is ambiguous """
p = Post(type='tweet', id='1234', username='abc')
self.engine.save(p)
with self.assertRaises(ValueError):
self.engine(Post).filter(username='abc').all()
def test_select_index(self):
""" Index name can be specified """
p = Post(type='tweet', id='1234', username='abc')
self.engine.save(p)
results = self.engine(Post).filter(username='abc')\
.index('name-index').all()
self.assertEquals(results, [p])
def test_no_index(self):
""" If no index is found, error is raised """
p = Post(type='tweet', id='1234')
self.engine.save(p)
with self.assertRaises(ValueError):
self.engine(Post).filter(Post.username == 'a')\
.filter(Post.upvotes == 4).all()
def test_no_range(self):
""" Can query on an index even if there is no range key """
p = Post(type='tweet', id='1234', username='abc')
self.engine.save(p)
ret = self.engine.query(Post).filter(id='1234', type='tweet',
username='abc').all()
self.assertEqual(ret, [p])
class Widget(Model):
""" Test model for ordering """
id = Field(hash_key=True)
name = Field(range_key=True)
alpha = Field(data_type=int, index='alpha-index')
beta = Field(data_type=int, index='beta-index')
class TestOrder(DynamoSystemTest):
""" Test results ordering """
models = [Widget]
def _add_widgets(self):
""" Add a bunch of widgets with different alpha/beta values """
for i in range(10):
w = Widget('a', str(i), alpha=i)
w.beta = (i + 5) % 10
self.engine.save(w)
def test_default_acending(self):
""" By default results are in ascending order """
self._add_widgets()
items = self.engine(Widget).filter(id='a').index('alpha-index').all()
alpha = [item.alpha for item in items]
self.assertEquals(alpha, sorted(alpha))
def test_desc(self):
""" desc=True orders returns items in descending order """
self._add_widgets()
items = self.engine(Widget).filter(id='a')\
.index('alpha-index').all(desc=True)
alpha = [item.alpha for item in items]
alpha.reverse()
self.assertEquals(alpha, sorted(alpha))
def test_order_index(self):
""" Results are ordered by the index specified """
self._add_widgets()
items = self.engine(Widget).filter(id='a').index('beta-index').all()
beta = [item.beta for item in items]
self.assertEquals(beta, sorted(beta))
class SingleKeyModel(Model):
""" Model with a no range key """
id = Field(hash_key=True)
def __init__(self, id='a'):
super(SingleKeyModel, self).__init__(id)
class TestEngine(DynamoSystemTest):
""" Tests for misc engine functionality """
models = [Post, SingleKeyModel, Widget]
def test_get(self):
""" Fetch item directly by primary key """
p = Post(type='tweet', id='1234')
self.engine.save(p)
ret = self.engine.get(Post, uid='tweet:1234', score=0)
self.assertEqual(ret, p)
def test_get_many(self):
""" Fetch multiple items directly by primary key """
p = Post(type='tweet', id='1234')
p2 = Post(type='post', id='2345')
self.engine.save([p, p2])
ret = self.engine.get(Post,
[{'uid': 'tweet:1234', 'score': 0},
{'uid': 'post:2345', 'score': 0}])
self.assertEqual(len(ret), 2)
self.assertTrue(p in ret)
self.assertTrue(p2 in ret)
def test_query_no_range(self):
""" Can query a model that has no range key """
m = SingleKeyModel()
self.engine.save(m)
ret = self.engine(SingleKeyModel).filter(id='a').all()
self.assertEqual(ret, [m])
def test_get_composite_pieces(self):
""" Fetch item directly by pieces of composite primary key """
p = Post(type='tweet', id='1234')
self.engine.save(p)
ret = self.engine.get(Post, type='tweet', id='1234', ts=0, upvotes=0)
self.assertEqual(ret, p)
def test_get_empty(self):
""" Fetching empty list of keys returns empty list """
ret = self.engine.get(Post, [])
self.assertEqual(ret, [])
def test_get_missing(self):
""" Fetching a missing item returns None """
ret = self.engine.get(Post, uid='a', score=4)
self.assertIsNone(ret)
def test_get_smart_scope(self):
""" Models with no range key can fetch from string """
m = SingleKeyModel()
self.engine.save(m)
ret = self.engine.get(SingleKeyModel, [m.id])
self.assertEqual(ret, [m])
def test_delete_key(self):
""" Delete item directly by primary key """
m = SingleKeyModel()
self.engine.save(m)
self.engine.delete_key(SingleKeyModel, id=m.id)
self.assertIsNone(self.engine.scan(SingleKeyModel).first())
def test_delete_key_many(self):
""" Delete multiple keys directly by primary key """
def test_delete_key_composite_pieces(self):
""" Delete item directly by pieces of composite primary key """
def test_delete_smart_scope(self):
""" Models with no range key can delete from string """
m = SingleKeyModel()
self.engine.save(m)
self.engine.delete_key(SingleKeyModel, [m.id])
self.assertIsNone(self.engine.scan(SingleKeyModel).first())
def test_delete_key_empty(self):
""" No error if deleting no keys """
self.engine.delete_key(SingleKeyModel, [])
def test_model_save(self):
""" Save can overwrite item data """
p = Post(type='tweet', id='1234', username='foo')
self.engine.save(p)
p.username = 'bar'
p.save(overwrite=True)
ret = self.engine.get(Post, uid='tweet:1234', score=0)
self.assertEqual(ret.username, 'bar')
def test_exists_hkey(self):
""" engine.exists(hash_key) finds item """
m = SingleKeyModel('a')
self.engine.save(m)
self.assertTrue(self.engine.exists(SingleKeyModel, 'a'))
def test_not_exists_hkey(self):
""" engine.exists(hash_key) returns false if not found """
self.assertFalse(self.engine.exists(SingleKeyModel, 'a'))
def test_exists_hkey_rkey(self):
""" engine.exists(hash_key, range_key) finds item """
w = Widget('a', 'Aaron')
self.engine.save(w)
self.assertTrue(self.engine.exists(Widget, 'a', 'Aaron'))
def test_not_exists_hkey_rkey(self):
""" engine.exists(hash_key, range_key) returns false if not found """
self.assertFalse(self.engine.exists(Widget, 'a', 'Aaron'))
def test_exists_dict(self):
""" engine.exists(dict) finds item """
w = Widget('a', 'Aaron')
self.engine.save(w)
pkey = {
'id': 'a',
'name': 'Aaron',
}
self.assertTrue(self.engine.exists(Widget, pkey))
def test_not_exists_dict(self):
""" engine.exists(dict) returns false if not found """
pkey = {
'id': 'a',
'name': 'Aaron',
}
self.assertFalse(self.engine.exists(Widget, pkey))
def test_exists_model(self):
""" engine.exists(hash_key, range_key) finds item """
w = Widget('a', 'Aaron')
self.engine.save(w)
self.assertTrue(self.engine.exists(Widget, w))
def test_not_exists_model(self):
""" engine.exists(hash_key, range_key) returns false if not found """
w = Widget('a', 'Aaron')
self.assertFalse(self.engine.exists(Widget, w))
|
|
# Author: Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
from nose.tools import assert_equal
import numpy as np
from numpy.testing import (assert_array_equal, assert_almost_equal,
assert_allclose, assert_array_almost_equal)
from mne.channels.montage import read_montage, _set_montage, read_dig_montage
from mne.utils import _TempDir
from mne import create_info, EvokedArray
from mne.coreg import fit_matched_points
from mne.transforms import apply_trans, get_ras_to_neuromag_trans
from mne.io.constants import FIFF
from mne.io.meas_info import _read_dig_points
from mne.io.kit import read_mrk
p_dir = op.dirname(__file__)
elp = op.join(p_dir, '..', '..', 'io', 'kit', 'tests', 'data', 'test_elp.txt')
hsp = op.join(p_dir, '..', '..', 'io', 'kit', 'tests', 'data', 'test_hsp.txt')
hpi = op.join(p_dir, '..', '..', 'io', 'kit', 'tests', 'data', 'test_mrk.sqd')
def test_montage():
"""Test making montages"""
tempdir = _TempDir()
# no pep8
input_str = ["""FidNz 0.00000 10.56381 -2.05108
FidT9 -7.82694 0.45386 -3.76056
FidT10 7.82694 0.45386 -3.76056""",
"""// MatLab Sphere coordinates [degrees] Cartesian coordinates
// Label Theta Phi Radius X Y Z off sphere surface
E1 37.700 -14.000 1.000 0.7677 0.5934 -0.2419 -0.00000000000000011
E2 44.600 -0.880 1.000 0.7119 0.7021 -0.0154 0.00000000000000000
E3 51.700 11.000 1.000 0.6084 0.7704 0.1908 0.00000000000000000""", # noqa
"""# ASA electrode file
ReferenceLabel avg
UnitPosition mm
NumberPositions= 68
Positions
-86.0761 -19.9897 -47.9860
85.7939 -20.0093 -48.0310
0.0083 86.8110 -39.9830
Labels
LPA
RPA
Nz
""",
"""Site Theta Phi
Fp1 -92 -72
Fp2 92 72
F3 -60 -51
""",
"""346
EEG F3 -62.027 -50.053 85
EEG Fz 45.608 90 85
EEG F4 62.01 50.103 85
""",
"""
eeg Fp1 -95.0 -31.0 -3.0
eeg AF7 -81 -59 -3
eeg AF3 -87 -41 28
"""]
kinds = ['test.sfp', 'test.csd', 'test.elc', 'test.txt', 'test.elp',
'test.hpts']
for kind, text in zip(kinds, input_str):
fname = op.join(tempdir, kind)
with open(fname, 'w') as fid:
fid.write(text)
montage = read_montage(fname)
assert_equal(len(montage.ch_names), 3)
assert_equal(len(montage.ch_names), len(montage.pos))
assert_equal(montage.pos.shape, (3, 3))
assert_equal(montage.kind, op.splitext(kind)[0])
if kind.endswith('csd'):
dtype = [('label', 'S4'), ('theta', 'f8'), ('phi', 'f8'),
('radius', 'f8'), ('x', 'f8'), ('y', 'f8'), ('z', 'f8'),
('off_sph', 'f8')]
try:
table = np.loadtxt(fname, skip_header=2, dtype=dtype)
except TypeError:
table = np.loadtxt(fname, skiprows=2, dtype=dtype)
pos2 = np.c_[table['x'], table['y'], table['z']]
assert_array_almost_equal(pos2, montage.pos, 4)
# test transform
input_str = """
eeg Fp1 -95.0 -31.0 -3.0
eeg AF7 -81 -59 -3
eeg AF3 -87 -41 28
cardinal 2 -91 0 -42
cardinal 1 0 -91 -42
cardinal 3 0 91 -42
"""
kind = 'test_fid.hpts'
fname = op.join(tempdir, kind)
with open(fname, 'w') as fid:
fid.write(input_str)
montage = read_montage(op.join(tempdir, 'test_fid.hpts'), transform=True)
# check coordinate transformation
pos = np.array([-95.0, -31.0, -3.0])
nasion = np.array([-91, 0, -42])
lpa = np.array([0, -91, -42])
rpa = np.array([0, 91, -42])
fids = np.vstack((nasion, lpa, rpa))
trans = get_ras_to_neuromag_trans(fids[0], fids[1], fids[2])
pos = apply_trans(trans, pos)
assert_array_equal(montage.pos[0], pos)
idx = montage.ch_names.index('2')
assert_array_equal(montage.pos[idx, [0, 2]], [0, 0])
idx = montage.ch_names.index('1')
assert_array_equal(montage.pos[idx, [1, 2]], [0, 0])
idx = montage.ch_names.index('3')
assert_array_equal(montage.pos[idx, [1, 2]], [0, 0])
pos = np.array([-95.0, -31.0, -3.0])
montage_fname = op.join(tempdir, 'test_fid.hpts')
montage = read_montage(montage_fname, unit='mm')
assert_array_equal(montage.pos[0], pos * 1e-3)
# test with last
info = create_info(montage.ch_names, 1e3, ['eeg'] * len(montage.ch_names))
_set_montage(info, montage)
pos2 = np.array([c['loc'][:3] for c in info['chs']])
pos3 = np.array([c['eeg_loc'][:, 0] for c in info['chs']])
assert_array_equal(pos2, montage.pos)
assert_array_equal(pos3, montage.pos)
assert_equal(montage.ch_names, info['ch_names'])
info = create_info(
montage.ch_names, 1e3, ['eeg'] * len(montage.ch_names))
evoked = EvokedArray(
data=np.zeros((len(montage.ch_names), 1)), info=info, tmin=0)
evoked.set_montage(montage)
pos4 = np.array([c['loc'][:3] for c in evoked.info['chs']])
pos5 = np.array([c['eeg_loc'][:, 0] for c in evoked.info['chs']])
assert_array_equal(pos4, montage.pos)
assert_array_equal(pos5, montage.pos)
assert_equal(montage.ch_names, evoked.info['ch_names'])
def test_read_dig_montage():
"""Test read_dig_montage"""
names = ['nasion', 'lpa', 'rpa', '1', '2', '3', '4', '5']
montage = read_dig_montage(hsp, hpi, elp, names, unit='m', transform=False)
elp_points = _read_dig_points(elp)
hsp_points = _read_dig_points(hsp)
hpi_points = read_mrk(hpi)
assert_equal(montage.point_names, names)
assert_array_equal(montage.elp, elp_points)
assert_array_equal(montage.hsp, hsp_points)
assert_array_equal(montage.hpi, hpi_points)
assert_array_equal(montage.dev_head_t, np.identity(4))
montage = read_dig_montage(hsp, hpi, elp, names,
transform=True, dev_head_t=True)
# check coordinate transformation
# nasion
assert_almost_equal(montage.elp[0, 0], 0)
assert_almost_equal(montage.nasion[0], 0)
assert_almost_equal(montage.elp[0, 2], 0)
assert_almost_equal(montage.nasion[0], 0)
# lpa and rpa
assert_allclose(montage.elp[1:3, 1:], 0, atol=1e-16)
assert_allclose(montage.lpa[1:], 0, atol=1e-16)
assert_allclose(montage.rpa[1:], 0, atol=1e-16)
# device head transform
dev_head_t = fit_matched_points(tgt_pts=montage.elp[3:],
src_pts=montage.hpi, out='trans')
assert_array_equal(montage.dev_head_t, dev_head_t)
def test_set_dig_montage():
"""Test applying DigMontage to inst
Extensive testing of applying `dig` to info is done in test_meas_info
with `test_make_dig_points`.
"""
names = ['nasion', 'lpa', 'rpa', '1', '2', '3', '4', '5']
hsp_points = _read_dig_points(hsp)
elp_points = _read_dig_points(elp)
hpi_points = read_mrk(hpi)
p0, p1, p2 = elp_points[:3]
nm_trans = get_ras_to_neuromag_trans(p0, p1, p2)
elp_points = apply_trans(nm_trans, elp_points)
nasion_point, lpa_point, rpa_point = elp_points[:3]
hsp_points = apply_trans(nm_trans, hsp_points)
montage = read_dig_montage(hsp, hpi, elp, names, unit='m', transform=True)
info = create_info(['Test Ch'], 1e3, ['eeg'])
_set_montage(info, montage)
hs = np.array([p['r'] for i, p in enumerate(info['dig'])
if p['kind'] == FIFF.FIFFV_POINT_EXTRA])
nasion_dig = np.array([p['r'] for p in info['dig']
if all([p['ident'] == FIFF.FIFFV_POINT_NASION,
p['kind'] == FIFF.FIFFV_POINT_CARDINAL])])
lpa_dig = np.array([p['r'] for p in info['dig']
if all([p['ident'] == FIFF.FIFFV_POINT_LPA,
p['kind'] == FIFF.FIFFV_POINT_CARDINAL])])
rpa_dig = np.array([p['r'] for p in info['dig']
if all([p['ident'] == FIFF.FIFFV_POINT_RPA,
p['kind'] == FIFF.FIFFV_POINT_CARDINAL])])
hpi_dig = np.array([p['r'] for p in info['dig']
if p['kind'] == FIFF.FIFFV_POINT_HPI])
assert_array_equal(hs, hsp_points)
assert_array_equal(nasion_dig.ravel(), nasion_point)
assert_array_equal(lpa_dig.ravel(), lpa_point)
assert_array_equal(rpa_dig.ravel(), rpa_point)
assert_array_equal(hpi_dig, hpi_points)
assert_array_equal(montage.dev_head_t, info['dev_head_t']['trans'])
|
|
import target
import selectiondag
from vis import irvis
from vis import dagvis
from vis import interferencevis
from passes import jumpfix
from passes import blockmerge
from passes import unused
from passes import branchreplace
from passes import constantfold
from passes import copypropagation
from passes import mem2reg
import instructionselector
import interference
import registerallocator
import ir
import function
class Register(object):
def __init__(self,name):
self.name = name
def canContain(self,t):
if type(t) == type(self):
return True
if t in self.types:
return True
return False
def __repr__(self):
return self.name
def isPhysical(self):
return True
class StandardMachine(target.Target):
def translateModule(self,m,ofile):
#merge some obvious merges
m.packData()
for labels,sz in m.rwzdata:
for label in labels:
ofile.write(".comm %s,%d,32\n"%(label,sz))
ofile.write(".data\n")
for labels,data in m.rwdata:
for label in labels:
ofile.write("%s:\n"%label)
datastr = ''
for char in data:
datastr += '%d,'%ord(char)
ofile.write('.byte %s\n' % datastr[:-1])
ofile.write(".section .rodata\n")
for labels,data in m.rodata:
for label in labels:
ofile.write("%s:\n"%label)
datastr = ''
for char in data:
datastr += '%d,'%ord(char)
ofile.write('.byte %s\n' % datastr[:-1])
ofile.write(".text\n")
for f in m:
self.translateFunction(f,ofile)
def translateFunction(self,f,ofile):
if self.args.show_all or self.args.show_preopt_function:
irvis.showFunction(f)
if self.args.iropt:
self.doIROpt(f)
if self.args.show_all or self.args.show_postopt_function:
irvis.showFunction(f)
self.doInstructionSelection(f)
self.callingConventions(f)
#we are no longer in ssa after this point
for block in f:
self.blockFixups(block)
self.removePhiNodes(f)
if self.args.show_all or self.args.show_md_function_preallocation:
irvis.showFunction(f)
ig = interference.InterferenceGraph(f)
if self.args.show_all or self.args.show_interference:
interferencevis.showInterferenceGraph(ig)
ra = registerallocator.RegisterAllocator(self)
ra.allocate(f)
ig = interference.InterferenceGraph(f)
self.calleeSaveRegisters(f,ig)
f.resolveStack()
if self.args.show_all or self.args.show_md_function:
irvis.showFunction(f)
self.prologAndEpilog(f)
self.preEmitCleanup(f)
#linearize the function
linear = list(f)
#swap remove branch targets that will fall through
for idx,b in enumerate(linear):
terminator = b[-1]
successors = terminator.getSuccessors()
for target in successors:
nextIdx = idx + 1
if nextIdx >= len(linear):
continue
if target == linear[nextIdx]:
terminator.swapSuccessor(target,None)
ofile.write(".globl %s\n" % f.name)
outasm = []
outasm.append("%s:" % f.name)
for block in linear:
outasm.append("." + block.name + ':')
for instr in block:
if instr.isMD():
asmstr = instr.asm()
else:
raise Exception("non native opcode %s:<%s>" % (type(instr),instr))
for line in asmstr.split("\n"):
outasm.append(line)
outasm = list(map(lambda x : x.strip(),outasm))
for asmstr in outasm:
prefix = ''
if asmstr.endswith(':'):
if asmstr.startswith('.'):
prefix = ' '
else:
prefix = ' '
ofile.write(prefix + asmstr + '\n')
def calleeSaveRegisters(self,func,ig):
#ig interference graph
for block in func:
idx = 0
while idx < len(block):
instr = block[idx]
if instr.isCall():
liveset = ig.instrToLiveness[instr] - set(instr.assigned)
before = []
after = []
for var in liveset:
#raise Exception(str(liveset))
#XXX this needs to be a proper size
#XXX also should reuse these slots
ss = func.createStackSlot(8)
before.append(self.getSaveRegisterInstruction(var,ss))
after.append(self.getLoadRegisterInstruction(var,ss))
for newInstr in before:
block.insert(idx,newInstr)
idx += 1
for newInstr in after:
block.insert(idx + 1,newInstr)
idx += 1
idx += 1
def dagFixups(self,dag):
raise Exception("unimplemented")
def blockFixups(self,block):
raise Exception("unimplemented")
def removePhiNodes(self,f):
blockCopies = {}
for block in f:
idx = 0
while idx < len(block):
instr = block[idx]
if type(instr) == ir.Phi:
del block[idx]
toAdd = []
for bidx,v in enumerate(instr.read):
curBlockToAddCopy = instr.blocks[bidx]
if curBlockToAddCopy not in blockCopies:
blockCopies[curBlockToAddCopy] = []
blockCopies[curBlockToAddCopy].append(self.getCopyInstruction(instr.assigned[0],v))
continue
idx += 1
#phi functions that depend on values overwritten by other phis
#need to be sorted. This sort here manages the dependencies
def cmpFunction(i1,i2):
for x in i2.read:
if x in i1.assigned:
#i2 should come before i1
return 1
for x in i1.read:
if x in i2.assigned:
#i1 should come before i2
return -1
return 0
for block in blockCopies:
copies = blockCopies[block]
copies.sort(cmp=cmpFunction)
for copyInstr in copies:
block.insert(-1,copyInstr)
def preEmitCleanup(self,f):
for block in f:
idx = 0
naiveMoves = [instr for instr in block if instr.isMove() and instr.read[0] == instr.assigned[0] ]
block.removeInstructions(naiveMoves)
def doIROpt(self,func):
mem2reg.Mem2Reg().runOnFunction(func)
#irvis.showFunction(func)
while True:
#irvis.showFunction(func)
if unused.UnusedVars().runOnFunction(func):
continue
if copypropagation.CopyPropagation().runOnFunction(func):
continue
if constantfold.ConstantFold().runOnFunction(func):
continue
#if jumpfix.JumpFix().runOnFunction(func):
# continue
if blockmerge.BlockMerge().runOnFunction(func):
continue
if branchreplace.BranchReplace().runOnFunction(func):
continue
break
def callingConventions(self,func):
#XXX need to shift pushes to the definition to
#stop register pressure
for block in func:
idx = 0
while idx < len(block):
instr = block[idx]
if instr.isCall():
newCall = self.getCallInstruction(instr)
block[idx] = newCall
pushInstructions = []
stackChange = 0
for var in reversed(instr.read):
stackChange += 4
#TODO ... must be the proper size...
pushInstructions += self.pushArgument(var)
for pushinstr in pushInstructions:
block.insert(idx,pushinstr)
idx += 1
if len(instr.assigned) != 0:
retReg = self.getReturnReg(instr.assigned[0])
copy = self.getCopyInstruction(instr.assigned[0],retReg)
newCall.assigned = [retReg]
idx += 1
block.insert(idx,copy)
if stackChange:
idx += 1
block.insert(idx,self.getStackClearingInstruction(stackChange))
elif type(instr) == ir.Ret:
if len(instr.read):
r = self.getReturnReg(instr.read[0])
mov = self.getCopyInstruction(r,instr.read[0])
block.insert(idx,mov)
idx += 1
idx += 1
def doInstructionSelection(self,func):
for b in func:
assigned = set()
liveout = set()
#XXX ugly code,move method somewhere?, find vars that are used
#outside the current block that need an edge to liveout
for instr in b:
assigned.update(instr.assigned)
for other in func:
if b != other:
for otherinstr in other:
for read in otherinstr.read:
if read in assigned:
liveout.add(read)
sd = selectiondag.SelectionDag(b,liveout)
isel = instructionselector.InstructionSelector()
if self.args.show_all or self.args.show_selection_dag:
dagvis.showSelDAG(sd)
self.dagFixups(sd)
if self.args.show_all or self.args.show_selection_dag:
dagvis.showSelDAG(sd)
isel.select(self,sd)
if self.args.show_all or self.args.show_md_selection_dag:
dagvis.showSelDAG(sd)
newblockops = [node.instr for node in sd.ordered() if type(node.instr) not in [selectiondag.Identity,selectiondag.LiveOut]]
b.opcodes = newblockops
def prologAndEpilog(self,func):
stackSize = func.localsSize
entry = func.entry
prolog = self.getProlog(stackSize)
insertCounter = 0
for instr in prolog:
entry.insert(0 + insertCounter,instr)
insertCounter += 1
for b in func:
if type(b[-1]) == ir.Ret:
newRet = self.getRetInstruction(b[-1])
b[-1] = newRet
epilog = self.getEpilog(stackSize)
for instr in epilog:
b.insert(-1,instr)
def getEpilog(self,stackSize):
raise Exception("unimplemented")
def getProlog(self,stackSize):
raise Exception("unimplemented")
def getRegisters(self):
return []
def getMatchableInstructions(self):
raise Exception("unimplemented")
def getPossibleRegisters(self,v):
t = type(v)
return filter(lambda x : x.canContain(t),self.getRegisters())
|
|
# -*- coding: utf-8 -*-
"""Test tables"""
import pytest
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
CREATE_STMT = "CREATE TABLE t1 (c1 integer, c2 text)"
COMMENT_STMT = "COMMENT ON TABLE t1 IS 'Test table t1'"
CREATE_STOR_PARAMS = CREATE_STMT + \
" WITH (fillfactor=90, autovacuum_enabled=false)"
class TableToMapTestCase(DatabaseToMapTestCase):
"""Test mapping of created tables"""
def test_create_table(self):
"Map a table with two columns"
dbmap = self.to_map([CREATE_STMT])
assert dbmap['schema public']['table t1'] == {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}}]}
def test_map_table_comment(self):
"Map a table comment"
dbmap = self.to_map([CREATE_STMT, COMMENT_STMT])
assert dbmap['schema public']['table t1'] == {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}}],
'description': 'Test table t1'}
def test_map_table_comment_quotes(self):
"Map a table comment with quotes"
stmts = [CREATE_STMT, "COMMENT ON TABLE t1 IS "
"'A \"special\" person''s table t1'"]
dbmap = self.to_map(stmts)
assert dbmap['schema public']['table t1'] == {
'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}}],
'description': "A \"special\" person's table t1"}
def test_map_column_comments(self):
"Map two column comments"
stmts = [CREATE_STMT,
"COMMENT ON COLUMN t1.c1 IS 'Test column c1 of t1'",
"COMMENT ON COLUMN t1.c2 IS 'Test column c2 of t1'"]
dbmap = self.to_map(stmts)
assert dbmap['schema public']['table t1'] == {
'columns': [{'c1': {'type': 'integer',
'description': 'Test column c1 of t1'}},
{'c2': {'type': 'text',
'description': 'Test column c2 of t1'}}]}
def test_map_table_options(self):
"Map a table with options"
dbmap = self.to_map([CREATE_STOR_PARAMS])
expmap = {'columns': [{'c1': {'type': 'integer'}},
{'c2': {'type': 'text'}}],
'options': ["fillfactor=90", 'autovacuum_enabled=false']}
assert dbmap['schema public']['table t1'] == expmap
def test_map_inherit(self):
"Map a table that inherits from two other tables"
stmts = [CREATE_STMT, "CREATE TABLE t2 (c3 integer)",
"CREATE TABLE t3 (c4 text) INHERITS (t1, t2)"]
dbmap = self.to_map(stmts)
expmap = {'columns': [{'c1': {'type': 'integer', 'inherited': True}},
{'c2': {'type': 'text', 'inherited': True}},
{'c3': {'type': 'integer', 'inherited': True}},
{'c4': {'type': 'text'}}],
'inherits': ['t1', 't2']}
assert dbmap['schema public']['table t3'] == expmap
def test_unlogged_table(self):
"Map an unlogged table"
if self.db.version < 90100:
self.skipTest('Only available on PG 9.1')
dbmap = self.to_map(["CREATE UNLOGGED TABLE t1 (c1 integer, c2 text)"])
expmap = {'columns': [{'c1': {'type': 'integer'}},
{'c2': {'type': 'text'}}],
'unlogged': True}
assert dbmap['schema public']['table t1'] == expmap
def test_map_table_within_schema(self):
"Map a schema and a table within it"
stmts = ["CREATE SCHEMA s1",
"CREATE TABLE s1.t1 (c1 INTEGER, c2 TEXT)"]
dbmap = self.to_map(stmts)
assert dbmap['schema s1'] == {
'table t1': {'columns': [{'c1': {'type': 'integer'}},
{'c2': {'type': 'text'}}]}}
def test_map_table_quoted(self):
"Map a schema and a table both of which need to be quoted"
stmts = ['CREATE SCHEMA "a schema"',
'CREATE TABLE "a schema"."The.Table" ("column" SERIAL, '
'c2 TEXT)']
dbmap = self.to_map(stmts)
assert dbmap['schema a schema']['table The.Table'] == {
'columns': [{'column': {'type': 'integer', 'not_null': True,
'default':
'nextval(\'"a schema"."The.Table_column_seq"\'::regclass)'}},
{'c2': {'type': 'text'}}]}
def test_map_select_tables(self):
"Map two tables out of three present"
stmts = [CREATE_STMT, "CREATE TABLE t2 (c1 integer, c2 text)",
"CREATE TABLE t3 (c1 integer, c2 text)"]
dbmap = self.to_map(stmts, tables=['t2', 't1'])
expmap = {'columns': [{'c1': {'type': 'integer'}},
{'c2': {'type': 'text'}}]}
assert dbmap['schema public']['table t1'] == expmap
assert dbmap['schema public']['table t2'] == expmap
assert 'table t3' not in dbmap['schema public']
def test_map_table_sequence(self):
"Map sequence if owned by a table"
stmts = [CREATE_STMT, "CREATE TABLE t2 (c1 integer, c2 text)",
"CREATE SEQUENCE seq1", "ALTER SEQUENCE seq1 OWNED BY t2.c1",
"CREATE SEQUENCE seq2"]
dbmap = self.to_map(stmts, tables=['t2'])
self.db.execute_commit("DROP SEQUENCE seq1")
expmap = {'columns': [{'c1': {'type': 'integer'}},
{'c2': {'type': 'text'}}]}
assert 'table t1' not in dbmap['schema public']
assert dbmap['schema public']['table t2'] == expmap
assert 'sequence seq1' in dbmap['schema public']
assert not 'sequence seq2' in dbmap['schema public']
class TableToSqlTestCase(InputMapToSqlTestCase):
"""Test SQL generation of table statements from input schemas"""
def test_create_table(self):
"Create a two-column table"
inmap = self.std_map()
inmap['schema public'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}},
{'c2': {'type': 'text'}}]}})
sql = self.to_sql(inmap)
assert fix_indent(sql[0]) == CREATE_STMT
def test_create_table_quoted_idents(self):
"Create a table needing quoted identifiers"
inmap = self.std_map()
inmap['schema public'].update({'table order': {
'columns': [{'primary': {'type': 'integer'}},
{'two words': {'type': 'text'}}]}})
sql = self.to_sql(inmap, quote_reserved=True)
assert fix_indent(sql[0]) == 'CREATE TABLE "order" (' \
'"primary" integer, "two words" text)'
def test_bad_table_map(self):
"Error creating a table with a bad map"
inmap = self.std_map()
inmap['schema public'].update({'t1': {
'columns': [{'c1': {'type': 'integer'}},
{'c2': {'type': 'text'}}]}})
with pytest.raises(KeyError):
self.to_sql(inmap)
def test_missing_columns(self):
"Error creating a table with no columns"
inmap = self.std_map()
inmap['schema public'].update({'table t1': {'columns': []}})
with pytest.raises(ValueError):
self.to_sql(inmap)
def test_drop_table(self):
"Drop an existing table"
sql = self.to_sql(self.std_map(), [CREATE_STMT])
assert sql == ["DROP TABLE t1"]
def test_rename_table(self):
"Rename an existing table"
inmap = self.std_map()
inmap['schema public'].update({'table t2': {
'oldname': 't1',
'columns': [{'c1': {'type': 'integer'}},
{'c2': {'type': 'text'}}]}})
sql = self.to_sql(inmap, [CREATE_STMT])
assert sql == ["ALTER TABLE t1 RENAME TO t2"]
def test_create_table_options(self):
"Create a table with options"
inmap = self.std_map()
inmap['schema public'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}},
{'c2': {'type': 'text'}}],
'options': ["fillfactor=90", "autovacuum_enabled=false"]}})
sql = self.to_sql(inmap)
assert fix_indent(sql[0]) == CREATE_STOR_PARAMS
def test_change_table_options(self):
"Change a table's storage parameters"
inmap = self.std_map()
inmap['schema public'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}},
{'c2': {'type': 'text'}}],
'options': ["fillfactor=70"]}})
sql = self.to_sql(inmap, [CREATE_STOR_PARAMS])
assert fix_indent(sql[0]) == "ALTER TABLE t1 SET (fillfactor=70), " \
"RESET (autovacuum_enabled)"
def test_create_table_within_schema(self):
"Create a new schema and a table within it"
inmap = self.std_map()
inmap.update({'schema s1': {'table t1': {
'columns': [{'c1': {'type': 'integer'}},
{'c2': {'type': 'text'}}]}}})
sql = self.to_sql(inmap)
expsql = ["CREATE SCHEMA s1",
"CREATE TABLE s1.t1 (c1 integer, c2 text)"]
for i in range(len(expsql)):
assert fix_indent(sql[i]) == expsql[i]
def test_unlogged_table(self):
"Create an unlogged table"
inmap = self.std_map()
inmap['schema public'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}},
{'c2': {'type': 'text'}}], 'unlogged': True}})
sql = self.to_sql(inmap)
assert fix_indent(sql[0]) == \
"CREATE UNLOGGED TABLE t1 (c1 integer, c2 text)"
class TableCommentToSqlTestCase(InputMapToSqlTestCase):
"""Test SQL generation of table and column COMMENT statements"""
def _tblmap(self):
"Return a table input map with a comment"
inmap = self.std_map()
inmap['schema public'].update({'table t1': {
'description': 'Test table t1',
'columns': [{'c1': {'type': 'integer'}},
{'c2': {'type': 'text'}}]}})
return inmap
def test_table_with_comment(self):
"Create a table with a comment"
sql = self.to_sql(self._tblmap())
assert fix_indent(sql[0]) == CREATE_STMT
assert sql[1] == COMMENT_STMT
def test_comment_on_table(self):
"Create a comment for an existing table"
sql = self.to_sql(self._tblmap(), [CREATE_STMT])
assert sql == [COMMENT_STMT]
def test_table_comment_quotes(self):
"Create a table comment with quotes"
inmap = self._tblmap()
inmap['schema public']['table t1']['description'] = \
"A \"special\" person's table t1"
sql = self.to_sql(inmap, [CREATE_STMT])
assert sql == ["COMMENT ON TABLE t1 IS "
"'A \"special\" person''s table t1'"]
def test_drop_table_comment(self):
"Drop a comment on an existing table"
inmap = self._tblmap()
del inmap['schema public']['table t1']['description']
sql = self.to_sql(inmap, [CREATE_STMT, COMMENT_STMT])
assert sql == ["COMMENT ON TABLE t1 IS NULL"]
def test_change_table_comment(self):
"Change existing comment on a table"
inmap = self._tblmap()
inmap['schema public']['table t1'].update(
{'description': 'Changed table t1'})
sql = self.to_sql(inmap, [CREATE_STMT, COMMENT_STMT])
assert sql == ["COMMENT ON TABLE t1 IS 'Changed table t1'"]
def test_create_column_comments(self):
"Create a table with column comments"
inmap = self._tblmap()
inmap['schema public']['table t1']['columns'][0]['c1'].update(
description='Test column c1')
inmap['schema public']['table t1']['columns'][1]['c2'].update(
description='Test column c2')
sql = self.to_sql(inmap)
assert fix_indent(sql[0]) == CREATE_STMT
assert sql[1] == COMMENT_STMT
assert sql[2] == "COMMENT ON COLUMN t1.c1 IS 'Test column c1'"
assert sql[3] == "COMMENT ON COLUMN t1.c2 IS 'Test column c2'"
def test_add_column_comment(self):
"Add a column comment to an existing table"
inmap = self._tblmap()
inmap['schema public']['table t1']['columns'][0]['c1'].update(
description='Test column c1')
sql = self.to_sql(inmap, [CREATE_STMT, COMMENT_STMT])
assert sql[0] == "COMMENT ON COLUMN t1.c1 IS 'Test column c1'"
def test_add_column_with_comment(self):
"Add a commented column to an existing table"
inmap = self._tblmap()
inmap['schema public']['table t1']['columns'].append({'c3': {
'description': 'Test column c3', 'type': 'integer'}})
sql = self.to_sql(inmap, [CREATE_STMT, COMMENT_STMT])
assert fix_indent(sql[0]) == "ALTER TABLE t1 ADD COLUMN c3 integer"
assert sql[1] == "COMMENT ON COLUMN t1.c3 IS 'Test column c3'"
def test_drop_column_comment(self):
"Drop a column comment on an existing table"
stmts = [CREATE_STMT, COMMENT_STMT,
"COMMENT ON COLUMN t1.c1 IS 'Test column c1'"]
sql = self.to_sql(self._tblmap(), stmts)
assert sql[0] == "COMMENT ON COLUMN t1.c1 IS NULL"
def test_change_column_comment(self):
"Add a column comment to an existing table"
inmap = self._tblmap()
inmap['schema public']['table t1']['columns'][0]['c1'].update(
description='Changed column c1')
sql = self.to_sql(inmap, [CREATE_STMT, COMMENT_STMT])
assert sql[0] == "COMMENT ON COLUMN t1.c1 IS 'Changed column c1'"
class TableInheritToSqlTestCase(InputMapToSqlTestCase):
"""Test SQL generation of table inheritance statements"""
def test_table_inheritance(self):
"Create a table that inherits from another"
inmap = self.std_map()
inmap['schema public'].update({'table t1': {
'columns': [{'c1': {'type': 'integer'}},
{'c2': {'type': 'text'}}]}})
inmap['schema public'].update({'table t2': {
'columns': [{'c1': {'type': 'integer', 'inherited': True}},
{'c2': {'type': 'text', 'inherited': True}},
{'c3': {'type': 'numeric'}}], 'inherits': ['t1']}})
sql = self.to_sql(inmap)
assert fix_indent(sql[0]) == CREATE_STMT
assert fix_indent(sql[1]) == "CREATE TABLE t2 (c3 numeric) " \
"INHERITS (t1)"
def test_drop_inherited(self):
"Drop tables that inherit from others"
stmts = [CREATE_STMT, "CREATE TABLE t2 (c3 numeric) INHERITS (t1)",
"CREATE TABLE t3 (c4 date) INHERITS (t2)"]
sql = self.to_sql(self.std_map(), stmts)
assert sql == ["DROP TABLE t3", "DROP TABLE t2", "DROP TABLE t1"]
|
|
import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
from ..utils.multiclass import check_classification_targets
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive'
(default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
|
|
"""
Extensible validation for Python dictionaries.
This module implements Cerberus Validator class
:copyright: 2012-2015 by Nicola Iarocci.
:license: ISC, see LICENSE for more details.
Full documentation is available at http://cerberus.readthedocs.org/
"""
import sys
import re
import copy
from datetime import datetime
from collections import Iterable, Mapping, Sequence
from . import errors
if sys.version_info[0] == 3:
_str_type = str
_int_types = (int,)
else:
_str_type = basestring # noqa
_int_types = (int, long) # noqa
class ValidationError(ValueError):
""" Raised when the target dictionary is missing or has the wrong format
"""
pass
class SchemaError(ValueError):
""" Raised when the validation schema is missing, has the wrong format or
contains errors.
"""
pass
class Validator(object):
""" Validator class. Validates any Python dict against a validation schema,
which is provided as an argument at class instantiation, or upon calling
the :func:`validate` method.
:param schema: optional validation schema.
:param transparent_schema_rules: if ``True`` unknown schema rules will be
ignored (no SchemaError will be raised).
Defaults to ``False``. Useful you need to
extend the schema grammar beyond Cerberus'
domain.
:param ignore_none_values: If ``True`` it will ignore None values for type
checking. (no UnknownType error will be added).
Defaults to ``False``. Useful if your document
is composed from function kwargs with defaults.
:param allow_unknown: if ``True`` unknown key/value pairs (not present in
the schema) will be ignored, and validation will
pass. Defaults to ``False``, returning an 'unknown
field error' un validation.
.. versionadded:: 0.9
when 'items' is applied to a list, field name is used as key for
'validator.errors', and offending field indexes are used as keys for
field errors ({'a_list_of_strings': {1: 'not a string'}})
'type' can be a list of valid types.
'keyschema' is renamed to 'valueschema'. Closes #92.
'coerce' rule
'propertyschema' validation rule.
additional kwargs that are passed to the __init__-method of an
instance of Validator-(sub-)class are passed to child-validators.
:func:`validated` added
.. versionchanged:: 0.8.1
'dependencies' for sub-document fields. Closes #64.
'readonly' should be validated before any other validation. Closes #63.
'allow_unknown' does not apply to sub-dictionaries in a list.
Closes #67.
update mode does not ignore required fields in subdocuments. Closes #72.
'allow_unknown' does not respect custom rules. Closes #66.
.. versionadded:: 0.8
'dependencies' also support a dict of dependencies.
'allow_unknown' can be a schema used to validate unknown fields.
Support for function-based validation mode.
.. versionchanged:: 0.7.2
Successfully validate int as a float type.
.. versionchanged:: 0.7.1
Validator options like 'allow_unknown' and 'ignore_none_values' are now
taken into consideration when validating sub-dictionaries.
Make self.document always the root level document.
Up-front validation for schemas.
.. versionadded:: 0.7
'keyschema' validation rule.
'regex' validation rule.
'dependencies' validation rule.
'mix', 'max' now apply on floats and numbers too. Closes #30.
'set' data type.
.. versionadded:: 0.6
'number' (integer or float) validator.
.. versionchanged:: 0.5.0
``validator.errors`` returns a dict where keys are document fields and
values are validation errors.
.. versionchanged:: 0.4.0
:func:`validate_update` is deprecated. Use :func:`validate` with
``update=True`` instead.
Type validation is always performed first (only exception being
``nullable``). On failure, it blocks other rules on the same field.
Closes #18.
.. versionadded:: 0.2.0
`self.errors` returns an empty list when validate() has not been called.
Option so allow nullable field values.
Option to allow unknown key/value pairs.
.. versionadded:: 0.1.0
Option to ignore None values for type checking.
.. versionadded:: 0.0.3
Support for transparent schema rules.
Added new 'empty' rule for string fields.
.. versionadded:: 0.0.2
Support for addition and validation of custom data types.
"""
special_rules = "required", "nullable", "type", "dependencies", \
"readonly", "allow_unknown", "schema", "coerce"
def __init__(self, schema=None, transparent_schema_rules=False,
ignore_none_values=False, allow_unknown=False, **kwargs):
self.schema = schema
self.transparent_schema_rules = transparent_schema_rules
self.ignore_none_values = ignore_none_values
self.allow_unknown = allow_unknown
self._additional_kwargs = kwargs
if schema:
self.validate_schema(schema)
self._errors = {}
def __call__(self, *args, **kwargs):
return self.validate(*args, **kwargs)
@property
def errors(self):
"""
:rtype: a list of validation errors. Will be empty if no errors
were found during. Resets after each call to :func:`validate`.
"""
return self._errors
def validate_update(self, document, schema=None, context=None):
""" Validates a Python dictionary against a validation schema. The
difference with :func:`validate` is that the ``required`` rule will be
ignored here.
:param schema: optional validation schema. Defaults to ``None``. If not
provided here, the schema must have been provided at
class instantiation.
:return: True if validation succeeds, False otherwise. Check the
:func:`errors` property for a list of validation errors.
.. deprecated:: 0.4.0
Use :func:`validate` with ``update=True`` instead.
"""
return self._validate(document, schema, update=True, context=context)
def validate(self, document, schema=None, update=False, context=None):
""" Validates a Python dictionary against a validation schema.
:param document: the dict to validate.
:param schema: the validation schema. Defaults to ``None``. If not
provided here, the schema must have been provided at
class instantiation.
:param update: If ``True`` validation of required fields won't be
performed.
:return: True if validation succeeds, False otherwise. Check the
:func:`errors` property for a list of validation errors.
.. versionchanged:: 0.4.0
Support for update mode.
"""
return self._validate(document, schema, update=update, context=context)
def validated(self, *args, **kwargs):
""" Wrapper around ``Validator.validate`` that returns the validated
document or ``None`` if validation failed.
"""
self.validate(*args, **kwargs)
if self.errors:
return None
else:
return self.document
def _validate(self, document, schema=None, update=False, context=None):
self._errors = {}
self.update = update
if schema is not None:
self.validate_schema(schema)
self.schema = schema
elif self.schema is None:
raise SchemaError(errors.ERROR_SCHEMA_MISSING)
if document is None:
raise ValidationError(errors.ERROR_DOCUMENT_MISSING)
if not isinstance(document, Mapping):
raise ValidationError(errors.ERROR_DOCUMENT_FORMAT % str(document))
# make root document available for validators (Cerberus #42, Eve #295)
target = context if context is not None else document
try:
# might fail when dealing with complex document values
self.document = copy.deepcopy(target)
except:
# fallback on a shallow copy
self.document = copy.copy(target)
for field, value in document.items():
if self.ignore_none_values and value is None:
continue
definition = self.schema.get(field)
if definition is not None:
if value is None:
if definition.get("nullable", False) is True:
continue
else:
self._error(field, errors.ERROR_NOT_NULLABLE)
if 'coerce' in definition:
value = self._validate_coerce(definition['coerce'], field,
value)
self.document[field] = value
if 'readonly' in definition:
self._validate_readonly(definition['readonly'], field,
value)
if self.errors.get(field):
continue
if 'type' in definition:
self._validate_type(definition['type'], field, value)
if self.errors.get(field):
continue
if 'dependencies' in definition:
self._validate_dependencies(
document=self.document,
dependencies=definition["dependencies"],
field=field
)
if self.errors.get(field):
continue
if 'schema' in definition:
self._validate_schema(definition['schema'],
field,
value,
definition.get('allow_unknown'))
definition_rules = [rule for rule in definition.keys()
if rule not in self.special_rules]
for rule in definition_rules:
validatorname = "_validate_" + rule.replace(" ", "_")
validator = getattr(self, validatorname, None)
if validator:
validator(definition[rule], field, value)
else:
if self.allow_unknown:
if isinstance(self.allow_unknown, Mapping):
# validate that unknown fields matches the schema
# for unknown_fields
unknown_validator = \
self.__get_child_validator(
schema={field: self.allow_unknown})
if not unknown_validator.validate({field: value}):
self._error(field, unknown_validator.errors[field])
else:
# allow unknown field to pass without any kind of
# validation
pass
else:
self._error(field, errors.ERROR_UNKNOWN_FIELD)
if not self.update:
self._validate_required_fields(self.document)
return len(self._errors) == 0
def _error(self, field, _error):
field_errors = self._errors.get(field, [])
if not isinstance(field_errors, list):
field_errors = [field_errors]
if isinstance(_error, (_str_type, dict)):
field_errors.append(_error)
else:
field_errors.extend(_error)
if len(field_errors) == 1:
field_errors = field_errors.pop()
self._errors[field] = field_errors
def validate_schema(self, schema):
""" Validates a schema against supported rules.
:param schema: the schema to be validated as a legal cerberus schema
according to the rules of this Validator object.
.. versionadded:: 0.7.1
"""
if not isinstance(schema, Mapping):
raise SchemaError(errors.ERROR_SCHEMA_FORMAT % str(schema))
# TODO remove on next major release
def update_to_valueschema(schema, warning_printed=False):
if 'keyschema' in schema:
schema['valueschema'] = schema['keyschema']
del schema['keyschema']
if not warning_printed:
print('WARNING cerberus: `keyschema` is deprecated, '
'use `valueschema` instead')
warning_printed = True
for key, value in schema.items():
if isinstance(value, Mapping):
schema[key] = update_to_valueschema(value, warning_printed)
return schema
schema = update_to_valueschema(schema)
for field, constraints in schema.items():
if not isinstance(constraints, Mapping):
raise SchemaError(errors.ERROR_DEFINITION_FORMAT % field)
for constraint, value in constraints.items():
if constraint == 'type':
values = value if isinstance(value, list) else [value]
for value in values:
if not hasattr(self, '_validate_type_' + value):
raise SchemaError(
errors.ERROR_UNKNOWN_TYPE % value)
if 'dict' in values and 'list' in values:
if 'valueschema' in constraints and \
'schema' not in constraints: # noqa
raise SchemaError('You must provide a compleme'
'ntary `schema`')
if 'schema' in constraints and \
'valueschema' not in constraints: # noqa
raise SchemaError('You must provide a compleme'
'ntary `valueschema`')
elif constraint == 'schema':
constraint_type = constraints.get('type')
if constraint_type is not None:
if constraint_type == 'list' or \
'list' in constraint_type:
self.validate_schema({'schema': value})
elif constraint_type == 'dict' or \
'dict' in constraint_type:
self.validate_schema(value)
else:
raise SchemaError(errors.ERROR_SCHEMA_TYPE % field)
elif constraint in self.special_rules:
pass
elif constraint == 'items':
if isinstance(value, Mapping):
# list of dicts, deprecated
self.validate_schema(value)
else:
for item_schema in value:
self.validate_schema({'schema': item_schema})
elif not hasattr(self, '_validate_' + constraint):
if not self.transparent_schema_rules:
raise SchemaError(errors.ERROR_UNKNOWN_RULE % (
constraint, field))
def _validate_coerce(self, coerce, field, value):
try:
value = coerce(value)
except (TypeError, ValueError):
self._error(field, errors.ERROR_COERCION_FAILED % field)
return value
def _validate_required_fields(self, document):
""" Validates that required fields are not missing. If dependencies
are precised then validate 'required' only if all dependencies
are validated.
:param document: the document being validated.
"""
required = list(field for field, definition in self.schema.items()
if definition.get('required') is True)
missing = set(required) - set(key for key in document.keys()
if document.get(key) is not None or
not self.ignore_none_values)
for field in missing:
dependencies = self.schema[field].get('dependencies')
dependencies_validated = self._validate_dependencies(
document, dependencies, field, break_on_error=True)
if dependencies_validated:
self._error(field, errors.ERROR_REQUIRED_FIELD)
def _validate_readonly(self, read_only, field, value):
if read_only:
self._error(field, errors.ERROR_READONLY_FIELD)
def _validate_regex(self, match, field, value):
"""
.. versionadded:: 0.7
"""
if not isinstance(value, _str_type):
return
pattern = re.compile(match)
if not pattern.match(value):
self._error(field, errors.ERROR_REGEX % match)
def _validate_type(self, data_type, field, value):
def call_type_validation(_type, value):
validator = getattr(self, "_validate_type_" + _type)
validator(field, value)
if isinstance(data_type, _str_type):
call_type_validation(data_type, value)
elif isinstance(data_type, Iterable):
prev_errors = self._errors.copy()
for _type in data_type:
call_type_validation(_type, value)
if len(self._errors) == len(prev_errors):
return
else:
self._errors = prev_errors.copy()
self._error(field, errors.ERROR_BAD_TYPE % ", ".
join(data_type[:-1]) + ' or ' + data_type[-1])
def _validate_type_string(self, field, value):
if not isinstance(value, _str_type):
self._error(field, errors.ERROR_BAD_TYPE % "string")
def _validate_type_integer(self, field, value):
if not isinstance(value, _int_types):
self._error(field, errors.ERROR_BAD_TYPE % "integer")
def _validate_type_float(self, field, value):
if not isinstance(value, float) and not isinstance(value, _int_types):
self._error(field, errors.ERROR_BAD_TYPE % "float")
def _validate_type_number(self, field, value):
"""
.. versionadded:: 0.6
"""
if not isinstance(value, float) and not isinstance(value, _int_types):
self._error(field, errors.ERROR_BAD_TYPE % "number")
def _validate_type_boolean(self, field, value):
if not isinstance(value, bool):
self._error(field, errors.ERROR_BAD_TYPE % "boolean")
def _validate_type_datetime(self, field, value):
if not isinstance(value, datetime):
self._error(field, errors.ERROR_BAD_TYPE % "datetime")
def _validate_type_dict(self, field, value):
if not isinstance(value, Mapping):
self._error(field, errors.ERROR_BAD_TYPE % "dict")
def _validate_type_list(self, field, value):
if not isinstance(value, Sequence) or isinstance(
value, _str_type):
self._error(field, errors.ERROR_BAD_TYPE % "list")
def _validate_type_set(self, field, value):
if not isinstance(value, set):
self._error(field, errors.ERROR_BAD_TYPE % "set")
def _validate_maxlength(self, max_length, field, value):
if isinstance(value, Sequence):
if len(value) > max_length:
self._error(field, errors.ERROR_MAX_LENGTH % max_length)
def _validate_minlength(self, min_length, field, value):
if isinstance(value, Sequence):
if len(value) < min_length:
self._error(field, errors.ERROR_MIN_LENGTH % min_length)
def _validate_max(self, max_value, field, value):
if isinstance(value, (_int_types, float)):
if value > max_value:
self._error(field, errors.ERROR_MAX_VALUE % max_value)
def _validate_min(self, min_value, field, value):
if isinstance(value, (_int_types, float)):
if value < min_value:
self._error(field, errors.ERROR_MIN_VALUE % min_value)
def _validate_allowed(self, allowed_values, field, value):
if isinstance(value, _str_type):
if value not in allowed_values:
self._error(field, errors.ERROR_UNALLOWED_VALUE % value)
elif isinstance(value, Sequence):
disallowed = set(value) - set(allowed_values)
if disallowed:
self._error(field,
errors.ERROR_UNALLOWED_VALUES % list(disallowed))
elif isinstance(value, int):
if value not in allowed_values:
self._error(field, errors.ERROR_UNALLOWED_VALUE % value)
def _validate_empty(self, empty, field, value):
if isinstance(value, _str_type) and len(value) == 0 and not empty:
self._error(field, errors.ERROR_EMPTY_NOT_ALLOWED)
def _validate_schema(self, schema, field, value, nested_allow_unknown):
if isinstance(value, Sequence) and not isinstance(value, _str_type):
list_errors = {}
for i in range(len(value)):
validator = self.__get_child_validator(
schema={i: schema}, allow_unknown=self.allow_unknown)
validator.validate({i: value[i]}, context=self.document)
list_errors.update(validator.errors)
if len(list_errors):
self._error(field, list_errors)
elif isinstance(value, Mapping):
if 'list' in self.schema[field]['type']:
return
validator = copy.copy(self)
validator.schema = schema
if not validator.allow_unknown:
validator.allow_unknown = nested_allow_unknown
validator.validate(value, context=self.document,
update=self.update)
if len(validator.errors):
self._error(field, validator.errors)
def _validate_valueschema(self, schema, field, value):
if isinstance(value, Mapping):
for key, document in value.items():
validator = self.__get_child_validator()
validator.validate(
{key: document}, {key: schema}, context=self.document)
if len(validator.errors):
self._error(field, validator.errors)
def _validate_propertyschema(self, schema, field, value):
if isinstance(value, Mapping):
validator = self.__get_child_validator(
schema={field: {'type': 'list', 'schema': schema}})
validator.validate({field: list(value.keys())},
context=self.document)
for error in validator.errors:
self._error(field, error)
def _validate_items(self, items, field, value):
if isinstance(items, Mapping):
self._validate_items_schema(items, field, value)
elif isinstance(items, Sequence):
self._validate_items_list(items, field, value)
def _validate_items_list(self, schema, field, values):
if len(schema) != len(values):
self._error(field, errors.ERROR_ITEMS_LIST % len(schema))
else:
for i in range(len(schema)):
validator = self.__get_child_validator(schema={i: schema[i]})
validator.validate({i: values[i]}, context=self.document)
for error in validator.errors:
self.errors.setdefault(field, {})
self.errors[field].update(validator.errors)
def _validate_items_schema(self, schema, field, value):
validator = self.__get_child_validator(schema=schema)
for item in value:
validator.validate(item, context=self.document)
for field, error in validator.errors.items():
self._error(field, error)
def _validate_dependencies(self, document, dependencies, field,
break_on_error=False):
if isinstance(dependencies, _str_type):
dependencies = [dependencies]
if isinstance(dependencies, Sequence):
for dependency in dependencies:
parts = dependency.split('.')
subdoc = copy.copy(document)
for part in parts:
if part not in subdoc:
if not break_on_error:
self._error(field,
errors.ERROR_DEPENDENCIES_FIELD %
dependency)
else:
return False
else:
subdoc = subdoc[part]
elif isinstance(dependencies, Mapping):
for dep_name, dep_values in dependencies.items():
if isinstance(dep_values, _str_type):
dep_values = [dep_values]
parts = dep_name.split('.')
subdoc = copy.copy(document)
for part in parts:
if part not in subdoc:
if not break_on_error:
self._error(field,
errors.ERROR_DEPENDENCIES_FIELD_VALUE
% (dep_name, dep_values))
break
else:
return False
else:
subdoc = subdoc[part]
if isinstance(subdoc, _str_type) and subdoc not in dep_values:
if not break_on_error:
self._error(field,
errors.ERROR_DEPENDENCIES_FIELD_VALUE
% (dep_name, dep_values))
else:
return False
return True
def _validate_validator(self, validator, field, value):
# call customized validator function
validator(field, value, self._error)
def __get_child_validator(self, **kwargs):
""" creates a new instance of Validator-(sub-)class """
cumulated_kwargs = self._additional_kwargs.copy()
cumulated_kwargs.update(kwargs)
return self.__class__(**cumulated_kwargs)
|
|
# Copyright (C) 2015 Artem Chepurnoy <artemchep@gmail.com>
#
# This script is published under the terms of the MIT license.
# See http://opensource.org/licenses/mit-license.php
# Python 3 is required
import itertools
from sudoku import Clue
class _Data:
pass
class _Ceil:
def __init__(self, data):
self.data = data
self.ghost = self.value = [-i - 1 for i in range(data.size)]
self.groups = []
def __repr__(self):
return "<Ceil value:%s>" % self.value
def set(self, value):
self.ghost = self.value
self.value = [value]
# Notify the groups.
for i in self.groups:
i.on_ceil_value_set_pre(self, value)
# Notify the groups.
for i in self.groups:
i.on_ceil_value_set(self, value)
# Clean-up the ghost.
self.ghost = None
def abandon(self, value):
for i, j in enumerate(self.value):
if j == value:
# Notify the groups.
for k in self.groups:
k.on_ceil_value_abandoned_pre(self, value)
del self.value[i]
break
else:
return False
# Notify the groups.
for i in self.groups:
i.on_ceil_value_abandoned(self, value)
return True
def issubset(self, ceil):
"""
Report whether another super-position contains this super-position.
"""
if ceil is self:
return True
for v in ceil.value:
if v not in self.value:
return False
return True
class _Group:
def __init__(self, cells, data):
self.data = data
self.cells = cells
self.queue = []
# Link this group to the cells.
for ceil in cells:
ceil.groups.append(self)
# Create depth map.
self.depth = [1] * data.size
def on_ceil_value_set_pre(self, ceil, value):
# Remove the ceil from list.
self.cells.remove(ceil)
# Hidden singles
self.depth[value - 1] = -self.data.size
for i in ceil.ghost:
self.depth[-i - 1] += 1
def on_ceil_value_set(self, ceil, value):
# Naked singles
for i in self.cells:
i.abandon(-value)
def on_ceil_value_abandoned_pre(self, ceil, value):
self.depth[-value - 1] += 1
def on_ceil_value_abandoned(self, ceil, value):
self._method_hidden_singles(ceil, value)
self._method_hidden_candidates(ceil, value)
self._method_naked_candidates(ceil, value)
self._method_intersection_removal(ceil, value)
# Handle queue.
for ceil, value in self.queue:
ceil.abandon(value)
self.queue.clear()
def _method_hidden_singles(self, ceil, value):
"""
Hidden Single means that for a given digit and house only one cell
is left to place that digit. The cell itself has more than one candidate
left, the correct digit is thus hidden amongst the rest.
This is the same as the Pointing Singles.
"""
if self.depth[-value - 1] == self.data.size:
for i in self.cells:
if i is not ceil and value in i.value:
# Simplify the superposition.
for k in i.value:
if k != value:
self._post_abandon(i, k)
break
def _method_hidden_candidates(self, ceil, value):
"""
Read more: http://www.sudokuwiki.org/Hidden_Candidates
"""
cells = []
for n in range(2, int(self.data.size ** 0.5) * 2):
# Get
base_elements = [-i - 1 for i, depth in enumerate(self.depth) if depth == self.data.size - n + 1]
for v in base_elements:
merge = [0] * self.data.size
for ceil in self.cells:
if v in ceil.value:
cells.append(ceil)
for k in ceil.value:
merge[-k - 1] += 1
if len(cells) == n:
break
size = 0
for i, m in enumerate(merge):
if m != 0 and self.depth[i] >= self.data.size - n + 1:
size += 1
merge[i] = 0
if self.depth[i] != self.data.size - m + 1:
break
else:
if size == n:
for i, k in enumerate(merge):
if k != 0:
for ceil in cells:
self._post_abandon(ceil, -i - 1)
cells.clear()
def _method_naked_candidates(self, ceil, value):
"""
Naked Pair.
A Naked Pair (also known as a Conjugate Pair) is a set of two candidate
numbers sited in two cells that belong to at least one unit in common.
That is they reside in the same row, box or column.
Naked Triples
A Naked Triple is slightly more complicated because it does not always
imply three numbers each in three cells.
Any group of three cells in the same unit that contain IN TOTAL three
candidates is a Naked Triple. Each cell can have two or three numbers,
as long as in combination all three cells have only three numbers.
When this happens, the three candidates can be removed from all other
cells in the same unit.
Naked Quads
A Naked Quad is rarer, especially in its full form but still useful if they
can be spotted. The same logic from Naked Triples applies, but the reason
it is so rare is because if a Quad is present the remaining cells are
more like to be a Triple or Pair and the solver will highlight those first.
Read more: http://www.sudokuwiki.org/Naked_Candidates
"""
if len(ceil.value) == self.data.size - 1:
# 1. The length can't be equal to the size.
# 2. If the length is equal to a `size - 1`
# then it's up to a Naked Singles method to
# handle it
return
s = []
cells = []
# Find all the cells that are depending on the
# changed one.
for i in self.cells:
if i is not ceil and len(i.value) < self.data.size - 1: # Ignore Naked Singles
if ceil.issubset(i):
cells.append(i) # Have to re-check this one.
for i in cells:
for j in self.cells:
if i.issubset(j):
s.append(j)
length = len(s)
if length == len(i.value):
for v in i.value:
size = self.data.size - self.depth[-v - 1] + 1
if size == length:
# This value exists only in our naked candidates,
# so there's no need to try to remove it from others.
continue
# Remove the value from an un-linked cells.
p = 0
for j in self.cells:
if p == length or j is not s[p]:
size -= 1
self._post_abandon(j, v)
if not size:
# No cells left in which it's possible to
# abandon the value.
break
else:
p += 1
s.clear()
del s, cells
def _method_intersection_removal(self, ceil, value):
size = self.data.size
if size > self.depth[-value - 1] > size - size ** 0.5:
cells = groups = None
for i in self.cells:
if i is not ceil and value in i.value:
if cells is None:
cells = []
groups = list(i.groups)
groups.remove(self)
else:
groups = [j for j in groups if j in i.groups]
if not groups: # True if not empty
return None
cells.append(i)
if groups:
for i in groups:
for j in i.cells:
if j not in cells:
self._post_abandon(j, value)
def _post_abandon(self, ceil, value):
self.queue.append([ceil, value])
class Sudoku:
def __init__(self, sudoku):
# Parse the source of a sudoku.
sudoku = [[int(e) for e in row.split()] for row in sudoku.split('\n')]
self._data = _Data()
self._data.i = 0
self._data.log = []
self._data.size = len(sudoku)
line = list(range(self._data.size))
# Create the cells.
# noinspection PyUnusedLocal
self._cells = [[_Ceil(self._data) for j in line] for i in line]
self._cells_line = list(itertools.chain.from_iterable(self._cells))
# Init other parts.
self._init_groups()
self._init_sudoku(sudoku)
def _init_groups(self):
"""
Links all cells to groups. Group is a consists of the cells which
must have unique values. This method defines the default rules of
the Sudoku game.
"""
line = range(self._data.size)
# Create the groups.
for j in line:
# Add a row. Creating group automatically links the
# cells, so no additional moves needed.
_Group([self._cells[e][j] for e in line], self._data)
# Add a column.
_Group([self._cells[j][e] for e in line], self._data)
# Add regions.
region_size = int(self._data.size ** .5)
region = [[i // region_size, i % region_size] for i in line]
for i, j in region:
r = []
for a, b in region:
x = a + i * region_size
y = b + j * region_size
r.append([x, y])
_Group([self._cells[x][y] for x, y in r], self._data)
def _init_sudoku(self, sudoku):
"""
Loads the initial state of the sudoku from the int matrix,
passed as an argument. This should not be called manually.
:param sudoku:
The int matrix that defines the Sudoku. Zero means 'unknown value'.
:raises Exception:
"""
line = range(self._data.size)
# Apply the initial values.
for i in line:
for j in line:
v = sudoku[i][j]
if v:
ceil = self._cells[i][j]
# Check for correct data.
if -v not in ceil.value:
raise Exception
# Set the value
ceil.set(v)
self._cells_line.remove(ceil)
# Sort the best positions.
self._cells_line.sort(key=lambda e: len(e.value))
def clue(self):
if not self._cells_line:
return None # The sudoku is solved.
ceil = self._cells_line[0]
# noinspection PyUnresolvedReferences
line = list(range(self._data.size))
try:
for a in line:
for b in line:
if self._cells[a][b] is ceil:
i, j = a, b
# Break both loops.
raise LookupError()
else:
# This should never happen.
raise ValueError
except LookupError:
pass
clue = Clue()
# noinspection PyUnboundLocalVariable
clue.x = i
# noinspection PyUnboundLocalVariable
clue.y = j
clue.possibilities = [-e for e in ceil.value]
return clue
def solve(self, guess=False):
while self._cells_line:
# Choose the best candidate.
ceil = self._cells_line[0]
size = len(ceil.value)
if not size or not guess and size != 1:
# Found an empty ceil with no
# possible values or the ceil
# with multiple possible values.
return False
del self._cells_line[0]
value = ceil.value[0]
ceil.set(-value)
self._cells_line.sort(key=lambda e: len(e.value))
return True
@property
def solution(self):
return [[i.value for i in row] for row in self._cells]
|
|
import itertools
import logging
import os
import shutil
from typing import List, Tuple, Dict, TypeVar, Generator
import numpy as np
import vcf
from .segment_quality_utils import HMMSegmentationQualityCalculator
from .. import types
from ..io import io_consts, io_commons, io_denoising_calling, io_intervals_and_counts, io_vcf_parsing
from ..models.model_denoising_calling import DenoisingModelConfig, CopyNumberCallingConfig, \
HHMMClassAndCopyNumberBasicCaller
from ..models.theano_hmm import TheanoForwardBackward, TheanoViterbi
from ..structs.interval import Interval
from ..structs.metadata import IntervalListMetadata
from ..structs.metadata import SampleMetadataCollection
from ..structs.segment import IntegerCopyNumberSegment
_logger = logging.getLogger(__name__)
class ViterbiSegmentationEngine:
"""This class runs the forward-backward and Viterbi algorithm on gCNV model/calls shards for a single sample,
obtains constant copy-number segments, calculates various quality metrics, and saves the result to disk.
Note:
It is assumed that the model and calls shards are provided in order according to the SAM sequence dictionary.
It is not checked or enforced here.
"""
def __init__(self,
model_shards_paths: List[str],
calls_shards_paths: List[str],
sample_metadata_collection: SampleMetadataCollection,
sample_index: int,
output_path: str,
intervals_vcf: str = None,
clustered_vcf: str = None):
"""Initializer.
Args:
model_shards_paths: list of paths to model shards
calls_shards_paths: list of paths to calls shards
sample_metadata_collection: sample metadata collection (must contain sample being analyzed)
sample_index: index of the sample in the callset
output_path: output path for writing segmentation results
intervals_vcf: file with single-sample copy number calls for all intervals
clustered_vcf: file with clustered breakpoints and calls for each sample
"""
try:
self._validate_args(model_shards_paths, calls_shards_paths, sample_metadata_collection, sample_index,
clustered_vcf)
except AssertionError as ex:
raise AssertionError("Inconsistency detected in the provided model and calls shards.") from ex
self.sample_index = sample_index
self.output_path = output_path
self.calls_shards_paths = calls_shards_paths
self.sample_metadata_collection = sample_metadata_collection
self.denoising_config = self._get_denoising_config(model_shards_paths[0])
self.calling_config = self._get_calling_config(model_shards_paths[0])
self.intervals_vcf = intervals_vcf
self.clustered_vcf = clustered_vcf
# assemble scattered global entities (interval list, log_q_tau_tk)
_logger.info("Assembling interval list and copy-number class posterior from model shards...")
self.interval_list: List[Interval] = []
log_q_tau_tk_shards: Tuple[np.ndarray] = ()
for model_path in model_shards_paths:
self.interval_list += self._get_interval_list_from_model_shard(model_path)
log_q_tau_tk_shards += (self._get_log_q_tau_tk_from_model_shard(model_path),)
self.log_q_tau_tk: np.ndarray = np.concatenate(log_q_tau_tk_shards, axis=0)
# extract SAM header lines from one of the interval lists
self.interval_list_sam_header_lines = io_intervals_and_counts.extract_sam_header_from_file(
os.path.join(model_shards_paths[0], io_consts.default_interval_list_filename))
# sample names
self.sample_name = self._get_sample_name_from_calls_shard(calls_shards_paths[0], sample_index)
# interval list metadata
interval_list_metadata: IntervalListMetadata = IntervalListMetadata(self.interval_list)
self.ordered_contig_list = interval_list_metadata.ordered_contig_list
self.contig_interval_indices = interval_list_metadata.contig_interval_indices
self.contig_interval_lists: Dict[str, List[Interval]] = {
contig: [self.interval_list[ti] for ti in self.contig_interval_indices[contig]]
for contig in self.ordered_contig_list}
# cnv stay probability for each contig
self.cnv_stay_prob_t_j: Dict[str, np.ndarray] = dict()
for contig in self.ordered_contig_list:
contig_interval_list = self.contig_interval_lists[contig]
dist_t = np.asarray([contig_interval_list[ti + 1].distance(contig_interval_list[ti])
for ti in range(len(contig_interval_list) - 1)], dtype=types.floatX)
self.cnv_stay_prob_t_j[contig] = np.exp(-dist_t / self.calling_config.cnv_coherence_length)
# forward-backward algorithm
_logger.info("Compiling theano forward-backward function...")
self.theano_forward_backward = TheanoForwardBackward(
log_posterior_probs_output_tc=None,
resolve_nans=False,
do_thermalization=False,
do_admixing=False,
include_update_size_output=False,
include_alpha_beta_output=True)
# viterbi algorithm
_logger.info("Compiling theano Viterbi function...")
self.theano_viterbi = TheanoViterbi()
# copy-number HMM specs generator
_logger.info("Compiling theano variational HHMM...")
self.get_copy_number_hmm_specs = HHMMClassAndCopyNumberBasicCaller\
.get_compiled_copy_number_hmm_specs_theano_func()
def _viterbi_segments_generator(self) -> Generator[IntegerCopyNumberSegment, None, None]:
"""Performs Viterbi segmentation and segment quality calculation for a single sample in
the call-set and returns a generator for segments.
Returns:
a generator for segments
"""
# load copy number log emission for the sample
copy_number_log_emission_tc_shards = ()
for calls_path in self.calls_shards_paths:
copy_number_log_emission_tc_shards += (self._get_log_copy_number_emission_tc_from_calls_shard(
calls_path, self.sample_index),)
copy_number_log_emission_tc = np.concatenate(copy_number_log_emission_tc_shards, axis=0)
# iterate over contigs and perform segmentation
sample_name = self.sample_name
for contig_index, contig in enumerate(self.ordered_contig_list):
_logger.info("Segmenting contig ({0}/{1}) (contig name: {2})...".format(
contig_index + 1, len(self.ordered_contig_list), contig))
# copy-number prior probabilities for each class
contig_baseline_copy_number = self.sample_metadata_collection\
.get_sample_ploidy_metadata(sample_name)\
.get_contig_ploidy(contig)
pi_jkc = HHMMClassAndCopyNumberBasicCaller.get_copy_number_prior_for_sample_jkc(
self.calling_config.num_copy_number_states,
self.calling_config.p_alt,
np.asarray([contig_baseline_copy_number], dtype=types.med_uint))
# contig interval list and indices
contig_interval_list = self.contig_interval_lists[contig]
contig_interval_indices = self.contig_interval_indices[contig]
# mapping from intervals to contig index (since we have a single contig, all intervals map to index=0)
t_to_j_map = np.zeros((len(contig_interval_list),), dtype=types.med_uint)
# copy-number class log probability
log_q_tau_tk = self.log_q_tau_tk[contig_interval_indices, :]
# copy-number log emission probability for contig intervals
copy_number_log_emission_contig_tc = copy_number_log_emission_tc[contig_interval_indices, :]
# get HMM specs
hmm_specs = self.get_copy_number_hmm_specs(
pi_jkc, self.cnv_stay_prob_t_j[contig], log_q_tau_tk, t_to_j_map)
log_prior_c = hmm_specs[0]
log_trans_contig_tcc = hmm_specs[1]
# run forward-back algorithm
fb_result = self.theano_forward_backward.perform_forward_backward(
log_prior_c, log_trans_contig_tcc, copy_number_log_emission_contig_tc)
log_posterior_prob_tc = fb_result.log_posterior_probs_tc
log_data_likelihood = fb_result.log_data_likelihood
alpha_tc = fb_result.alpha_tc
beta_tc = fb_result.beta_tc
# initialize the segment quality calculator
segment_quality_calculator: HMMSegmentationQualityCalculator = HMMSegmentationQualityCalculator(
copy_number_log_emission_contig_tc, log_trans_contig_tcc,
alpha_tc, beta_tc, log_posterior_prob_tc, log_data_likelihood)
if self.clustered_vcf is None or self.intervals_vcf is None:
# validate args -- should be both none or neither none
if bool(self.clustered_vcf is None) != bool(self.intervals_vcf is None):
raise Exception("If clustered_vcf is provided, then intervals_vcf must be provided.")
# run viterbi algorithm
viterbi_path_t_contig = self.theano_viterbi.get_viterbi_path(
log_prior_c, log_trans_contig_tcc, copy_number_log_emission_contig_tc)
# coalesce into piecewise constant copy-number segments
segments = self._coalesce_seq_into_segments(viterbi_path_t_contig)
else:
# use events from clustered_vcf
segments = io_vcf_parsing.read_sample_segments_and_calls(self.intervals_vcf, self.clustered_vcf, self.sample_name, contig)
# calculate qualities
for call_copy_number, start_index, end_index in segments:
num_points = end_index - start_index + 1
try:
segment = IntegerCopyNumberSegment(contig,
contig_interval_list[start_index].start,
contig_interval_list[end_index].end,
num_points,
call_copy_number,
contig_baseline_copy_number)
except IndexError:
print("end index out of bounds: {0} requested, max is {1}".format(end_index, len(contig_interval_list)))
if num_points > 1:
segment.quality_some_called = segment_quality_calculator.get_segment_quality_some_called(
start_index, end_index, call_copy_number)
segment.quality_all_called = segment_quality_calculator.get_segment_quality_all_called(
start_index, end_index, call_copy_number)
segment.quality_start = segment_quality_calculator.get_segment_quality_start(
start_index, call_copy_number)
segment.quality_end = segment_quality_calculator.get_segment_quality_end(
end_index, call_copy_number)
else: # for single-interval segments, all qualities must be the same
segment.quality_some_called = segment_quality_calculator.get_segment_quality_some_called(
start_index, end_index, call_copy_number)
segment.quality_all_called = segment.quality_some_called
segment.quality_start = segment.quality_some_called
segment.quality_end = segment.quality_some_called
yield segment
def write_copy_number_segments(self):
"""Performs Viterbi segmentation and segment quality calculation for a single sample in
the call-set and saves the results to disk.
"""
sample_name = self.sample_name
_logger.info("Processing sample index: {0}, sample name: {1}...".format(self.sample_index, sample_name))
sample_output_path = os.path.join(self.output_path, io_consts.sample_folder_prefix + repr(self.sample_index))
io_commons.assert_output_path_writable(sample_output_path, try_creating_output_path=True)
# write configs, gcnvkernel version and sample name to output path
shutil.copy(os.path.join(self.calls_shards_paths[0], io_consts.default_denoising_config_json_filename),
sample_output_path)
shutil.copy(os.path.join(self.calls_shards_paths[0], io_consts.default_calling_config_json_filename),
sample_output_path)
io_commons.write_gcnvkernel_version(sample_output_path)
io_commons.write_sample_name_to_txt_file(sample_output_path, sample_name)
seg_file = os.path.join(sample_output_path, io_consts.default_copy_number_segments_tsv_filename)
with open(seg_file, 'w') as of:
# copy SAM header lines from model/calls interval list
for sam_header_line in self.interval_list_sam_header_lines:
of.write(sam_header_line + '\n')
# add sample name header
of.write('@' + io_consts.sample_name_sam_header_prefix + sample_name + '\n')
# add table column headers
of.write(IntegerCopyNumberSegment.get_header_column_string() + '\n')
# add segments
for segment in self._viterbi_segments_generator():
of.write(repr(segment) + '\n')
@staticmethod
def _validate_args(model_shards_paths: List[str],
calls_shards_paths: List[str],
sample_metadata_collection: SampleMetadataCollection,
sample_index: int,
clustered_vcf: str):
assert len(model_shards_paths) > 0, "At least one model shard must be provided."
assert len(calls_shards_paths) == len(model_shards_paths),\
"The number of model shards ({0}) and calls shards ({1}) must match.".format(
len(model_shards_paths), len(calls_shards_paths))
assert sample_index >= 0, "Sample index must be an integer non-negative number"
scattered_sample_names: List[str] = []
for model_path, calls_path in zip(model_shards_paths, calls_shards_paths):
# assert interval lists are identical
model_interval_list_file = os.path.join(model_path, io_consts.default_interval_list_filename)
calls_interval_list_file = os.path.join(calls_path, io_consts.default_interval_list_filename)
io_commons.assert_files_are_identical(model_interval_list_file, calls_interval_list_file)
# assert gcnvkernel versions are identical
model_gcnvkernel_version_file = os.path.join(model_path, io_consts.default_gcnvkernel_version_json_filename)
calls_gcnvkernel_version_file = os.path.join(calls_path, io_consts.default_gcnvkernel_version_json_filename)
try:
io_commons.assert_files_are_identical(model_gcnvkernel_version_file, calls_gcnvkernel_version_file)
except AssertionError:
_logger.warning("Different gcnvkernel versions between model and calls -- proceeding at your own risk!")
# assert denoising configs are identical
model_denoising_config_file = os.path.join(model_path, io_consts.default_denoising_config_json_filename)
calls_denoising_config_file = os.path.join(calls_path, io_consts.default_denoising_config_json_filename)
try:
io_commons.assert_files_are_identical(model_denoising_config_file, calls_denoising_config_file)
except AssertionError:
_logger.warning("Different denoising configuration between model and calls -- "
"proceeding at your own risk!")
# assert callings configs are identical
model_calling_config_file = os.path.join(model_path, io_consts.default_calling_config_json_filename)
calls_calling_config_file = os.path.join(calls_path, io_consts.default_calling_config_json_filename)
try:
io_commons.assert_files_are_identical(model_calling_config_file, calls_calling_config_file)
except AssertionError:
_logger.warning("Different calling configuration between model and calls -- "
"proceeding at your own risk!")
# extract and store sample names for the current shard
scattered_sample_names.append(
ViterbiSegmentationEngine._get_sample_name_from_calls_shard(calls_path, sample_index))
# all scattered calls have the same set of samples and in the same order
assert len(set(scattered_sample_names)) == 1,\
"The calls shards contain different sample names and/or different number of samples."
if clustered_vcf is not None:
clustered_reader = vcf.Reader(filename=clustered_vcf)
assert set(clustered_reader.samples).issuperset(set(scattered_sample_names)), \
"The clustered VCF does not contain all samples in the calls shard."
# all samples have ploidy calls in the metadata collection
sample_names = list(scattered_sample_names[0])
sample_metadata_collection.all_samples_have_ploidy_metadata(sample_names)
@staticmethod
def _get_sample_name_from_calls_shard(calls_path: str, sample_index: int) -> str:
sample_posteriors_path = io_denoising_calling.get_sample_posterior_path(calls_path, sample_index)
if not os.path.isdir(sample_posteriors_path):
raise Exception("Could not find any sample posterior calls in {0} for sample with index {1}.".
format(calls_path, sample_index))
sample_name = io_commons.get_sample_name_from_txt_file(sample_posteriors_path)
return sample_name
@staticmethod
def _get_denoising_config(input_path: str) -> DenoisingModelConfig:
return DenoisingModelConfig.from_json_file(os.path.join(
input_path, io_consts.default_denoising_config_json_filename))
@staticmethod
def _get_calling_config(input_path: str) -> CopyNumberCallingConfig:
return CopyNumberCallingConfig.from_json_file(os.path.join(
input_path, io_consts.default_calling_config_json_filename))
@staticmethod
def _get_interval_list_from_model_shard(model_path: str) -> List[Interval]:
interval_list_file = os.path.join(model_path, io_consts.default_interval_list_filename)
return io_intervals_and_counts.load_interval_list_tsv_file(interval_list_file)
@staticmethod
def _get_log_q_tau_tk_from_model_shard(model_path: str) -> np.ndarray:
return io_commons.read_ndarray_from_tsv(os.path.join(
model_path, io_consts.default_class_log_posterior_tsv_filename))
@staticmethod
def _get_log_copy_number_emission_tc_from_calls_shard(calls_path: str, sample_index: int):
return io_denoising_calling.SampleDenoisingAndCallingPosteriorsReader.\
read_ndarray_tc_with_copy_number_header(
io_denoising_calling.get_sample_posterior_path(calls_path, sample_index),
io_consts.default_copy_number_log_emission_tsv_filename)
@staticmethod
def _coalesce_seq_into_segments(seq: List[TypeVar('_T')]) -> List[Tuple[TypeVar('_T'), int, int]]:
"""Coalesces a sequence of objects into piecewise constant segments, along with start and end indices
for each constant segment.
Example:
seq = ['a', 'a', 'a', 'a', 'b', 'c', 'c', 'a', 'a', 'a']
result = [('a', 0, 3), ('b', 4, 4), ('c', 5, 6), ('a', 7, 9)]
Args:
seq: a sequence of objects that implement __equals__
Returns:
a generator for (object, start_index, end_index)
"""
for seg in itertools.groupby(enumerate(seq), key=lambda elem: elem[1]):
seg_const = seg[0]
grouper = seg[1]
start_index = grouper.__next__()[0]
end_index = start_index
try:
while True:
end_index = grouper.__next__()[0]
except StopIteration:
pass
yield (seg_const, start_index, end_index)
|
|
#! /usr/bin/env python
"""Regression test.
This will find all modules whose name is "test_*" in the test
directory, and run them. Various command line options provide
additional facilities.
Command line options:
-v: verbose -- run tests in verbose mode with output to stdout
-w: verbose2 -- re-run failed tests in verbose mode
-d: debug -- print traceback for failed tests
-q: quiet -- don't print anything except if a test fails
-x: exclude -- arguments are tests to *exclude*
-s: single -- run only a single test (see below)
-S: slow -- print the slowest 10 tests
-r: random -- randomize test execution order
-f: fromfile -- read names of tests to run from a file (see below)
-l: findleaks -- if GC is available detect tests that leak memory
-u: use -- specify which special resource intensive tests to run
-h: help -- print this text and exit
-t: threshold -- call gc.set_threshold(N)
-T: coverage -- turn on code coverage using the trace module
-D: coverdir -- Directory where coverage files are put
-N: nocoverdir -- Put coverage files alongside modules
-L: runleaks -- run the leaks(1) command just before exit
-R: huntrleaks -- search for reference leaks (needs debug build, v. slow)
-M: memlimit -- run very large memory-consuming tests
-n: nowindows -- suppress error message boxes on Windows
If non-option arguments are present, they are names for tests to run,
unless -x is given, in which case they are names for tests not to run.
If no test names are given, all tests are run.
-v is incompatible with -g and does not compare test output files.
-T turns on code coverage tracing with the trace module.
-D specifies the directory where coverage files are put.
-N Put coverage files alongside modules.
-s means to run only a single test and exit. This is useful when
doing memory analysis on the Python interpreter (which tend to consume
too many resources to run the full regression test non-stop). The
file /tmp/pynexttest is read to find the next test to run. If this
file is missing, the first test_*.py file in testdir or on the command
line is used. (actually tempfile.gettempdir() is used instead of
/tmp).
-S is used to continue running tests after an aborted run. It will
maintain the order a standard run (ie, this assumes -r is not used).
This is useful after the tests have prematurely stopped for some external
reason and you want to start running from where you left off rather
than starting from the beginning.
-f reads the names of tests from the file given as f's argument, one
or more test names per line. Whitespace is ignored. Blank lines and
lines beginning with '#' are ignored. This is especially useful for
whittling down failures involving interactions among tests.
-L causes the leaks(1) command to be run just before exit if it exists.
leaks(1) is available on Mac OS X and presumably on some other
FreeBSD-derived systems.
-R runs each test several times and examines sys.gettotalrefcount() to
see if the test appears to be leaking references. The argument should
be of the form stab:run:fname where 'stab' is the number of times the
test is run to let gettotalrefcount settle down, 'run' is the number
of times further it is run and 'fname' is the name of the file the
reports are written to. These parameters all have defaults (5, 4 and
"reflog.txt" respectively), and the minimal invocation is '-R :'.
-M runs tests that require an exorbitant amount of memory. These tests
typically try to ascertain containers keep working when containing more than
2 billion objects, which only works on 64-bit systems. There are also some
tests that try to exhaust the address space of the process, which only makes
sense on 32-bit systems with at least 2Gb of memory. The passed-in memlimit,
which is a string in the form of '2.5Gb', determines howmuch memory the
tests will limit themselves to (but they may go slightly over.) The number
shouldn't be more memory than the machine has (including swap memory). You
should also keep in mind that swap memory is generally much, much slower
than RAM, and setting memlimit to all available RAM or higher will heavily
tax the machine. On the other hand, it is no use running these tests with a
limit of less than 2.5Gb, and many require more than 20Gb. Tests that expect
to use more than memlimit memory will be skipped. The big-memory tests
generally run very, very long.
-u is used to specify which special resource intensive tests to run,
such as those requiring large file support or network connectivity.
The argument is a comma-separated list of words indicating the
resources to test. Currently only the following are defined:
all - Enable all special resources.
audio - Tests that use the audio device. (There are known
cases of broken audio drivers that can crash Python or
even the Linux kernel.)
curses - Tests that use curses and will modify the terminal's
state and output modes.
lib2to3 - Run the tests for 2to3 (They take a while.)
largefile - It is okay to run some test that may create huge
files. These tests can take a long time and may
consume >2GB of disk space temporarily.
network - It is okay to run tests that use external network
resource, e.g. testing SSL support for sockets.
bsddb - It is okay to run the bsddb testsuite, which takes
a long time to complete.
decimal - Test the decimal module against a large suite that
verifies compliance with standards.
compiler - Allow test_tokenize to verify round-trip lexing on
every file in the test library.
subprocess Run all tests for the subprocess module.
urlfetch - It is okay to download files required on testing.
To enable all resources except one, use '-uall,-<resource>'. For
example, to run all the tests except for the bsddb tests, give the
option '-uall,-bsddb'.
"""
import getopt
import os
import random
import re
import io
import sys
import time
import traceback
import warnings
from inspect import isabstract
# I see no other way to suppress these warnings;
# putting them in test_grammar.py has no effect:
warnings.filterwarnings("ignore", "hex/oct constants", FutureWarning,
".*test.test_grammar$")
if sys.maxsize > 0x7fffffff:
# Also suppress them in <string>, because for 64-bit platforms,
# that's where test_grammar.py hides them.
warnings.filterwarnings("ignore", "hex/oct constants", FutureWarning,
"<string>")
# Ignore ImportWarnings that only occur in the source tree,
# (because of modules with the same name as source-directories in Modules/)
for mod in ("ctypes", "gzip", "zipfile", "tarfile", "encodings.zlib_codec",
"test.test_zipimport", "test.test_zlib", "test.test_zipfile",
"test.test_codecs", "test.string_tests"):
warnings.filterwarnings(module=".*%s$" % (mod,),
action="ignore", category=ImportWarning)
# MacOSX (a.k.a. Darwin) has a default stack size that is too small
# for deeply recursive regular expressions. We see this as crashes in
# the Python test suite when running test_re.py and test_sre.py. The
# fix is to set the stack limit to 2048.
# This approach may also be useful for other Unixy platforms that
# suffer from small default stack limits.
if sys.platform == 'darwin':
try:
import resource
except ImportError:
pass
else:
soft, hard = resource.getrlimit(resource.RLIMIT_STACK)
newsoft = min(hard, max(soft, 1024*2048))
resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard))
from test import support
RESOURCE_NAMES = ('audio', 'curses', 'largefile', 'network', 'bsddb',
'decimal', 'compiler', 'subprocess', 'urlfetch')
def usage(msg):
print(msg, file=sys.stderr)
print("Use --help for usage", file=sys.stderr)
sys.exit(2)
def main(tests=None, testdir=None, verbose=0, quiet=False, generate=False,
exclude=False, single=False, randomize=False, fromfile=None,
findleaks=False, use_resources=None, trace=False, coverdir='coverage',
runleaks=False, huntrleaks=False, verbose2=False, print_slow=False):
"""Execute a test suite.
This also parses command-line options and modifies its behavior
accordingly.
tests -- a list of strings containing test names (optional)
testdir -- the directory in which to look for tests (optional)
Users other than the Python test suite will certainly want to
specify testdir; if it's omitted, the directory containing the
Python test suite is searched for.
If the tests argument is omitted, the tests listed on the
command-line will be used. If that's empty, too, then all *.py
files beginning with test_ will be used.
The other default arguments (verbose, quiet, generate, exclude,
single, randomize, findleaks, use_resources, trace, coverdir, and
print_slow) allow programmers calling main() directly to set the
values that would normally be set by flags on the command line.
"""
support.record_original_stdout(sys.stdout)
try:
opts, args = getopt.getopt(sys.argv[1:], 'hvgqxsSrf:lu:t:TD:NLR:wM:n',
['help', 'verbose', 'quiet', 'exclude',
'single', 'slow', 'random', 'fromfile',
'findleaks', 'use=', 'threshold=', 'trace',
'coverdir=', 'nocoverdir', 'runleaks',
'huntrleaks=', 'verbose2', 'memlimit=',
'debug', 'start=', "nowindows"
])
except getopt.error as msg:
usage(msg)
# Defaults
if use_resources is None:
use_resources = []
debug = False
start = None
for o, a in opts:
if o in ('-h', '--help'):
print(__doc__)
return
elif o in ('-v', '--verbose'):
verbose += 1
elif o in ('-w', '--verbose2'):
verbose2 = True
elif o in ('-d', '--debug'):
debug = True
elif o in ('-q', '--quiet'):
quiet = True;
verbose = 0
elif o in ('-x', '--exclude'):
exclude = True
elif o in ('-S', '--start'):
start = a
elif o in ('-s', '--single'):
single = True
elif o in ('-S', '--slow'):
print_slow = True
elif o in ('-r', '--randomize'):
randomize = True
elif o in ('-f', '--fromfile'):
fromfile = a
elif o in ('-l', '--findleaks'):
findleaks = True
elif o in ('-L', '--runleaks'):
runleaks = True
elif o in ('-t', '--threshold'):
import gc
gc.set_threshold(int(a))
elif o in ('-T', '--coverage'):
trace = True
elif o in ('-D', '--coverdir'):
coverdir = os.path.join(os.getcwd(), a)
elif o in ('-N', '--nocoverdir'):
coverdir = None
elif o in ('-R', '--huntrleaks'):
huntrleaks = a.split(':')
if len(huntrleaks) not in (2, 3):
print(a, huntrleaks)
usage('-R takes 2 or 3 colon-separated arguments')
if not huntrleaks[0]:
huntrleaks[0] = 5
else:
huntrleaks[0] = int(huntrleaks[0])
if not huntrleaks[1]:
huntrleaks[1] = 4
else:
huntrleaks[1] = int(huntrleaks[1])
if len(huntrleaks) == 2 or not huntrleaks[2]:
huntrleaks[2:] = ["reflog.txt"]
# Avoid false positives due to the character cache in
# stringobject.c filling slowly with random data
warm_char_cache()
elif o in ('-M', '--memlimit'):
support.set_memlimit(a)
elif o in ('-u', '--use'):
u = [x.lower() for x in a.split(',')]
for r in u:
if r == 'all':
use_resources[:] = RESOURCE_NAMES
continue
remove = False
if r[0] == '-':
remove = True
r = r[1:]
if r not in RESOURCE_NAMES:
usage('Invalid -u/--use option: ' + a)
if remove:
if r in use_resources:
use_resources.remove(r)
elif r not in use_resources:
use_resources.append(r)
elif o in ('-n', '--nowindows'):
import msvcrt
msvcrt.SetErrorMode(msvcrt.SEM_FAILCRITICALERRORS|
msvcrt.SEM_NOALIGNMENTFAULTEXCEPT|
msvcrt.SEM_NOGPFAULTERRORBOX|
msvcrt.SEM_NOOPENFILEERRORBOX)
try:
msvcrt.CrtSetReportMode
except AttributeError:
# release build
pass
else:
for m in [msvcrt.CRT_WARN, msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]:
msvcrt.CrtSetReportMode(m, msvcrt.CRTDBG_MODE_FILE)
msvcrt.CrtSetReportFile(m, msvcrt.CRTDBG_FILE_STDERR)
if generate and verbose:
usage("-g and -v don't go together!")
if single and fromfile:
usage("-s and -f don't go together!")
good = []
bad = []
skipped = []
resource_denieds = []
if findleaks:
try:
import gc
except ImportError:
print('No GC available, disabling findleaks.')
findleaks = False
else:
# Uncomment the line below to report garbage that is not
# freeable by reference counting alone. By default only
# garbage that is not collectable by the GC is reported.
#gc.set_debug(gc.DEBUG_SAVEALL)
found_garbage = []
if single:
from tempfile import gettempdir
filename = os.path.join(gettempdir(), 'pynexttest')
try:
fp = open(filename, 'r')
next = fp.read().strip()
tests = [next]
fp.close()
except IOError:
pass
if fromfile:
tests = []
fp = open(fromfile)
for line in fp:
guts = line.split() # assuming no test has whitespace in its name
if guts and not guts[0].startswith('#'):
tests.extend(guts)
fp.close()
# Strip .py extensions.
if args:
args = list(map(removepy, args))
if tests:
tests = list(map(removepy, tests))
stdtests = STDTESTS[:]
nottests = NOTTESTS.copy()
if exclude:
for arg in args:
if arg in stdtests:
stdtests.remove(arg)
nottests.add(arg)
args = []
tests = tests or args or findtests(testdir, stdtests, nottests)
if single:
tests = tests[:1]
# Remove all the tests that precede start if it's set.
if start:
try:
del tests[:tests.index(start)]
except ValueError:
print("Couldn't find starting test (%s), using all tests" % start)
if randomize:
random.shuffle(tests)
if trace:
import trace
tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix],
trace=False, count=True)
test_times = []
support.verbose = verbose # Tell tests to be moderately quiet
support.use_resources = use_resources
save_modules = sys.modules.keys()
for test in tests:
if not quiet:
print(test)
sys.stdout.flush()
if trace:
# If we're tracing code coverage, then we don't exit with status
# if on a false return value from main.
tracer.runctx('runtest(test, generate, verbose, quiet,'
' test_times, testdir)',
globals=globals(), locals=vars())
else:
try:
ok = runtest(test, generate, verbose, quiet, test_times,
testdir, huntrleaks)
except KeyboardInterrupt:
# print a newline separate from the ^C
print()
break
except:
raise
if ok > 0:
good.append(test)
elif ok == 0:
bad.append(test)
else:
skipped.append(test)
if ok == -2:
resource_denieds.append(test)
if findleaks:
gc.collect()
if gc.garbage:
print("Warning: test created", len(gc.garbage), end=' ')
print("uncollectable object(s).")
# move the uncollectable objects somewhere so we don't see
# them again
found_garbage.extend(gc.garbage)
del gc.garbage[:]
# Unload the newly imported modules (best effort finalization)
for module in sys.modules.keys():
if module not in save_modules and module.startswith("test."):
support.unload(module)
# The lists won't be sorted if running with -r
good.sort()
bad.sort()
skipped.sort()
if good and not quiet:
if not bad and not skipped and len(good) > 1:
print("All", end=' ')
print(count(len(good), "test"), "OK.")
if verbose:
print("CAUTION: stdout isn't compared in verbose mode:")
print("a test that passes in verbose mode may fail without it.")
if print_slow:
test_times.sort(reverse=True)
print("10 slowest tests:")
for time, test in test_times[:10]:
print("%s: %.1fs" % (test, time))
if bad:
print(count(len(bad), "test"), "failed:")
printlist(bad)
if skipped and not quiet:
print(count(len(skipped), "test"), "skipped:")
printlist(skipped)
e = _ExpectedSkips()
plat = sys.platform
if e.isvalid():
surprise = set(skipped) - e.getexpected() - set(resource_denieds)
if surprise:
print(count(len(surprise), "skip"), \
"unexpected on", plat + ":")
printlist(surprise)
else:
print("Those skips are all expected on", plat + ".")
else:
print("Ask someone to teach regrtest.py about which tests are")
print("expected to get skipped on", plat + ".")
if verbose2 and bad:
print("Re-running failed tests in verbose mode")
for test in bad:
print("Re-running test %r in verbose mode" % test)
sys.stdout.flush()
try:
support.verbose = True
ok = runtest(test, generate, True, quiet, test_times, testdir,
huntrleaks, debug)
except KeyboardInterrupt:
# print a newline separate from the ^C
print()
break
except:
raise
if single:
alltests = findtests(testdir, stdtests, nottests)
for i in range(len(alltests)):
if tests[0] == alltests[i]:
if i == len(alltests) - 1:
os.unlink(filename)
else:
fp = open(filename, 'w')
fp.write(alltests[i+1] + '\n')
fp.close()
break
else:
os.unlink(filename)
if trace:
r = tracer.results()
r.write_results(show_missing=True, summary=True, coverdir=coverdir)
if runleaks:
os.system("leaks %d" % os.getpid())
sys.exit(len(bad) > 0)
STDTESTS = [
'test_grammar',
'test_opcodes',
'test_dict',
'test_builtin',
'test_exceptions',
'test_types',
'test_unittest',
'test_doctest',
'test_doctest2',
]
NOTTESTS = {
'test_future1',
'test_future2',
}
def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
"""Return a list of all applicable test modules."""
if not testdir: testdir = findtestdir()
names = os.listdir(testdir)
tests = []
for name in names:
if name[:5] == "test_" and name[-3:] == ".py":
modname = name[:-3]
if modname not in stdtests and modname not in nottests:
tests.append(modname)
tests.sort()
return stdtests + tests
def runtest(test, generate, verbose, quiet, test_times,
testdir=None, huntrleaks=False, debug=False):
"""Run a single test.
test -- the name of the test
verbose -- if true, print more messages
quiet -- if true, don't print 'skipped' messages (probably redundant)
test_times -- a list of (time, test_name) pairs
testdir -- test directory
huntrleaks -- run multiple times to test for leaks; requires a debug
build; a triple corresponding to -R's three arguments
debug -- if true, print tracebacks for failed tests regardless of
verbose setting
Return:
-2 test skipped because resource denied
-1 test skipped for some other reason
0 test failed
1 test passed
"""
try:
return runtest_inner(test, generate, verbose, quiet, test_times,
testdir, huntrleaks)
finally:
cleanup_test_droppings(test, verbose)
def runtest_inner(test, generate, verbose, quiet, test_times,
testdir=None, huntrleaks=False, debug=False):
support.unload(test)
if not testdir:
testdir = findtestdir()
if verbose:
cfp = None
else:
cfp = io.StringIO() # XXX Should use io.StringIO()
try:
save_stdout = sys.stdout
try:
if cfp:
sys.stdout = cfp
print(test) # Output file starts with test name
if test.startswith('test.'):
abstest = test
else:
# Always import it from the test package
abstest = 'test.' + test
start_time = time.time()
the_package = __import__(abstest, globals(), locals(), [])
the_module = getattr(the_package, test)
# Old tests run to completion simply as a side-effect of
# being imported. For tests based on unittest or doctest,
# explicitly invoke their test_main() function (if it exists).
indirect_test = getattr(the_module, "test_main", None)
if indirect_test is not None:
indirect_test()
if huntrleaks:
dash_R(the_module, test, indirect_test, huntrleaks)
test_time = time.time() - start_time
test_times.append((test_time, test))
finally:
sys.stdout = save_stdout
except support.ResourceDenied as msg:
if not quiet:
print(test, "skipped --", msg)
sys.stdout.flush()
return -2
except (ImportError, support.TestSkipped) as msg:
if not quiet:
print(test, "skipped --", msg)
sys.stdout.flush()
return -1
except KeyboardInterrupt:
raise
except support.TestFailed as msg:
print("test", test, "failed --", msg)
sys.stdout.flush()
return 0
except:
type, value = sys.exc_info()[:2]
print("test", test, "crashed --", str(type) + ":", value)
sys.stdout.flush()
if verbose or debug:
traceback.print_exc(file=sys.stdout)
sys.stdout.flush()
return 0
else:
if not cfp:
return 1
output = cfp.getvalue()
expected = test + "\n"
if output == expected or huntrleaks:
return 1
print("test", test, "produced unexpected output:")
sys.stdout.flush()
reportdiff(expected, output)
sys.stdout.flush()
return 0
def cleanup_test_droppings(testname, verbose):
import shutil
# Try to clean up junk commonly left behind. While tests shouldn't leave
# any files or directories behind, when a test fails that can be tedious
# for it to arrange. The consequences can be especially nasty on Windows,
# since if a test leaves a file open, it cannot be deleted by name (while
# there's nothing we can do about that here either, we can display the
# name of the offending test, which is a real help).
for name in (support.TESTFN,
"db_home",
):
if not os.path.exists(name):
continue
if os.path.isdir(name):
kind, nuker = "directory", shutil.rmtree
elif os.path.isfile(name):
kind, nuker = "file", os.unlink
else:
raise SystemError("os.path says %r exists but is neither "
"directory nor file" % name)
if verbose:
print("%r left behind %s %r" % (testname, kind, name))
try:
nuker(name)
except Exception as msg:
print(("%r left behind %s %r and it couldn't be "
"removed: %s" % (testname, kind, name, msg)), file=sys.stderr)
def dash_R(the_module, test, indirect_test, huntrleaks):
# This code is hackish and inelegant, but it seems to do the job.
import copyreg, _abcoll
if not hasattr(sys, 'gettotalrefcount'):
raise Exception("Tracking reference leaks requires a debug build "
"of Python")
# Save current values for dash_R_cleanup() to restore.
fs = warnings.filters[:]
ps = copyreg.dispatch_table.copy()
pic = sys.path_importer_cache.copy()
abcs = {}
for abc in [getattr(_abcoll, a) for a in _abcoll.__all__]:
if not isabstract(abc):
continue
for obj in abc.__subclasses__() + [abc]:
abcs[obj] = obj._abc_registry.copy()
if indirect_test:
def run_the_test():
indirect_test()
else:
def run_the_test():
del sys.modules[the_module.__name__]
exec('import ' + the_module.__name__)
deltas = []
nwarmup, ntracked, fname = huntrleaks
repcount = nwarmup + ntracked
print("beginning", repcount, "repetitions", file=sys.stderr)
print(("1234567890"*(repcount//10 + 1))[:repcount], file=sys.stderr)
dash_R_cleanup(fs, ps, pic, abcs)
for i in range(repcount):
rc = sys.gettotalrefcount()
run_the_test()
sys.stderr.write('.')
sys.stderr.flush()
dash_R_cleanup(fs, ps, pic, abcs)
if i >= nwarmup:
deltas.append(sys.gettotalrefcount() - rc - 2)
print(file=sys.stderr)
if any(deltas):
msg = '%s leaked %s references, sum=%s' % (test, deltas, sum(deltas))
print(msg, file=sys.stderr)
refrep = open(fname, "a")
print(msg, file=refrep)
refrep.close()
def dash_R_cleanup(fs, ps, pic, abcs):
import gc, copyreg
import _strptime, linecache
import urllib.parse, urllib.request, mimetypes, doctest
import struct, filecmp, _abcoll
from distutils.dir_util import _path_created
from weakref import WeakSet
# Clear the warnings registry, so they can be displayed again
for mod in sys.modules.values():
if hasattr(mod, '__warningregistry__'):
del mod.__warningregistry__
# Restore some original values.
warnings.filters[:] = fs
copyreg.dispatch_table.clear()
copyreg.dispatch_table.update(ps)
sys.path_importer_cache.clear()
sys.path_importer_cache.update(pic)
# clear type cache
sys._clear_type_cache()
# Clear ABC registries, restoring previously saved ABC registries.
for abc in [getattr(_abcoll, a) for a in _abcoll.__all__]:
if not isabstract(abc):
continue
for obj in abc.__subclasses__() + [abc]:
obj._abc_registry = abcs.get(obj, WeakSet()).copy()
obj._abc_cache.clear()
obj._abc_negative_cache.clear()
# Clear assorted module caches.
_path_created.clear()
re.purge()
_strptime._regex_cache.clear()
urllib.parse.clear_cache()
urllib.request.urlcleanup()
linecache.clearcache()
mimetypes._default_mime_types()
filecmp._cache.clear()
struct._clearcache()
doctest.master = None
# Collect cyclic trash.
gc.collect()
def warm_char_cache():
s = bytes(range(256))
for i in range(256):
s[i:i+1]
def reportdiff(expected, output):
import difflib
print("*" * 70)
a = expected.splitlines(1)
b = output.splitlines(1)
sm = difflib.SequenceMatcher(a=a, b=b)
tuples = sm.get_opcodes()
def pair(x0, x1):
# x0:x1 are 0-based slice indices; convert to 1-based line indices.
x0 += 1
if x0 >= x1:
return "line " + str(x0)
else:
return "lines %d-%d" % (x0, x1)
for op, a0, a1, b0, b1 in tuples:
if op == 'equal':
pass
elif op == 'delete':
print("***", pair(a0, a1), "of expected output missing:")
for line in a[a0:a1]:
print("-", line, end='')
elif op == 'replace':
print("*** mismatch between", pair(a0, a1), "of expected", \
"output and", pair(b0, b1), "of actual output:")
for line in difflib.ndiff(a[a0:a1], b[b0:b1]):
print(line, end='')
elif op == 'insert':
print("***", pair(b0, b1), "of actual output doesn't appear", \
"in expected output after line", str(a1)+":")
for line in b[b0:b1]:
print("+", line, end='')
else:
print("get_opcodes() returned bad tuple?!?!", (op, a0, a1, b0, b1))
print("*" * 70)
def findtestdir():
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
return testdir
def removepy(name):
if name.endswith(".py"):
name = name[:-3]
return name
def count(n, word):
if n == 1:
return "%d %s" % (n, word)
else:
return "%d %ss" % (n, word)
def printlist(x, width=70, indent=4):
"""Print the elements of iterable x to stdout.
Optional arg width (default 70) is the maximum line length.
Optional arg indent (default 4) is the number of blanks with which to
begin each line.
"""
from textwrap import fill
blanks = ' ' * indent
print(fill(' '.join(map(str, x)), width,
initial_indent=blanks, subsequent_indent=blanks))
# Map sys.platform to a string containing the basenames of tests
# expected to be skipped on that platform.
#
# Special cases:
# test_pep277
# The _ExpectedSkips constructor adds this to the set of expected
# skips if not os.path.supports_unicode_filenames.
# test_timeout
# Controlled by test_timeout.skip_expected. Requires the network
# resource and a socket module.
#
# Tests that are expected to be skipped everywhere except on one platform
# are also handled separately.
_expectations = {
'win32':
"""
test__locale
test_bsddb3
test_crypt
test_curses
test_dbm
test_fcntl
test_fork1
test_epoll
test_dbm_gnu
test_grp
test_ioctl
test_largefile
test_kqueue
test_openpty
test_ossaudiodev
test_pipes
test_poll
test_posix
test_pty
test_pwd
test_resource
test_signal
test_syslog
test_threadsignals
test_wait3
test_wait4
""",
'linux2':
"""
test_curses
test_largefile
test_kqueue
test_ossaudiodev
""",
'mac':
"""
test_atexit
test_bsddb
test_bsddb3
test_bz2
test_crypt
test_curses
test_dbm
test_fcntl
test_fork1
test_epoll
test_grp
test_ioctl
test_largefile
test_locale
test_kqueue
test_mmap
test_openpty
test_ossaudiodev
test_poll
test_popen
test_posix
test_pty
test_pwd
test_resource
test_signal
test_sundry
test_tarfile
""",
'unixware7':
"""
test_bsddb
test_epoll
test_largefile
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_sundry
""",
'openunix8':
"""
test_bsddb
test_epoll
test_largefile
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_sundry
""",
'sco_sv3':
"""
test_asynchat
test_bsddb
test_fork1
test_epoll
test_gettext
test_largefile
test_locale
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_queue
test_sax
test_sundry
test_thread
test_threaded_import
test_threadedtempfile
test_threading
""",
'darwin':
"""
test__locale
test_bsddb
test_bsddb3
test_curses
test_epoll
test_dbm_gnu
test_largefile
test_locale
test_minidom
test_ossaudiodev
test_poll
""",
'sunos5':
"""
test_bsddb
test_curses
test_dbm
test_epoll
test_kqueue
test_dbm_gnu
test_gzip
test_openpty
test_zipfile
test_zlib
""",
'hp-ux11':
"""
test_bsddb
test_curses
test_epoll
test_dbm_gnu
test_gzip
test_largefile
test_locale
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_zipfile
test_zlib
""",
'atheos':
"""
test_curses
test_dbm_gnu
test_epoll
test_largefile
test_locale
test_kqueue
test_mhlib
test_mmap
test_poll
test_resource
""",
'cygwin':
"""
test_bsddb3
test_curses
test_dbm
test_epoll
test_ioctl
test_kqueue
test_largefile
test_locale
test_ossaudiodev
test_socketserver
""",
'os2emx':
"""
test_audioop
test_bsddb3
test_curses
test_epoll
test_kqueue
test_largefile
test_mmap
test_openpty
test_ossaudiodev
test_pty
test_resource
test_signal
""",
'freebsd4':
"""
test_bsddb
test_bsddb3
test_epoll
test_dbm_gnu
test_locale
test_ossaudiodev
test_pep277
test_pty
test_socketserver
test_tcl
test_timeout
test_urllibnet
test_multiprocessing
""",
'aix5':
"""
test_bsddb
test_bsddb3
test_bz2
test_epoll
test_dbm_gnu
test_gzip
test_kqueue
test_ossaudiodev
test_tcl
test_zipimport
test_zlib
""",
'openbsd3':
"""
test_bsddb
test_bsddb3
test_ctypes
test_epoll
test_dbm_gnu
test_locale
test_normalization
test_ossaudiodev
test_pep277
test_tcl
test_multiprocessing
""",
'netbsd3':
"""
test_bsddb
test_bsddb3
test_ctypes
test_curses
test_epoll
test_dbm_gnu
test_locale
test_ossaudiodev
test_pep277
test_tcl
test_multiprocessing
""",
}
_expectations['freebsd5'] = _expectations['freebsd4']
_expectations['freebsd6'] = _expectations['freebsd4']
_expectations['freebsd7'] = _expectations['freebsd4']
_expectations['freebsd8'] = _expectations['freebsd4']
class _ExpectedSkips:
def __init__(self):
import os.path
from test import test_timeout
self.valid = False
if sys.platform in _expectations:
s = _expectations[sys.platform]
self.expected = set(s.split())
# These are broken tests, for now skipped on every platform.
# XXX Fix these!
self.expected.add('test_nis')
# expected to be skipped on every platform, even Linux
if not os.path.supports_unicode_filenames:
self.expected.add('test_pep277')
# doctest, profile and cProfile tests fail when the codec for the
# fs encoding isn't built in because PyUnicode_Decode() adds two
# calls into Python.
encs = ("utf-8", "latin-1", "ascii", "mbcs", "utf-16", "utf-32")
if sys.getfilesystemencoding().lower() not in encs:
self.expected.add('test_profile')
self.expected.add('test_cProfile')
self.expected.add('test_doctest')
if test_timeout.skip_expected:
self.expected.add('test_timeout')
if sys.platform != "win32":
# test_sqlite is only reliable on Windows where the library
# is distributed with Python
WIN_ONLY = ["test_unicode_file", "test_winreg",
"test_winsound", "test_startfile",
"test_sqlite"]
for skip in WIN_ONLY:
self.expected.add(skip)
if sys.platform != 'sunos5':
self.expected.add('test_nis')
self.valid = True
def isvalid(self):
"Return true iff _ExpectedSkips knows about the current platform."
return self.valid
def getexpected(self):
"""Return set of test names we expect to skip on current platform.
self.isvalid() must be true.
"""
assert self.isvalid()
return self.expected
if __name__ == '__main__':
# Remove regrtest.py's own directory from the module search path. This
# prevents relative imports from working, and relative imports will screw
# up the testing framework. E.g. if both test.support and
# support are imported, they will not contain the same globals, and
# much of the testing framework relies on the globals in the
# test.support module.
mydir = os.path.abspath(os.path.normpath(os.path.dirname(sys.argv[0])))
i = pathlen = len(sys.path)
while i >= 0:
i -= 1
if os.path.abspath(os.path.normpath(sys.path[i])) == mydir:
del sys.path[i]
if len(sys.path) == pathlen:
print('Could not find %r in sys.path to remove it' % mydir)
main()
|
|
from __future__ import print_function, unicode_literals, absolute_import
import glob
import io
import itertools
import os
import compdb
from compdb.backend.json import (JSONCompilationDatabase,
compile_commands_to_json)
from compdb.backend.memory import InMemoryCompilationDatabase
from compdb.models import (CompilationDatabaseInterface, ProbeError)
from compdb.utils import (suppress, re_fullmatch, empty_iterator_wrap)
class ComplementerError(compdb.CompdbError):
'''Base exception for complementer-related errors'''
def __init__(self, complementer, message):
super(ComplementerError, self).__init__(message)
self.complementer = complementer
class ComplementerCacheNotFound(ComplementerError):
def __init__(self, complementer, directory):
super(ComplementerCacheNotFound, self).__init__(
complementer, "Could not find '{}' complementer in '{}'".format(
complementer.name, directory))
self.directory = directory
class ComplementerNameError(ComplementerError):
def __init__(self, complementer):
super(ComplementerNameError, self).__init__(
complementer, "Invalid complementer name: '{}'".format(
complementer.name))
def _chain_get_compile_commands(databases, filepath):
return itertools.chain.from_iterable((db.get_compile_commands(filepath)
for db in databases))
def _chain_get_all_files(databases):
return itertools.chain.from_iterable((db.get_all_files()
for db in databases))
def _chain_get_all_compile_commands(databases):
return itertools.chain.from_iterable((db.get_all_compile_commands()
for db in databases))
class _ComplementerWrapper(object):
def __init__(self, name, complementer):
if not self._valid_name(name):
raise ComplementerNameError(complementer)
self.name = name
self.complementer = complementer
@staticmethod
def _valid_name(name):
return re_fullmatch('[a-z][a-z0-9]*(?:_[a-z0-9]+)*', name)
@property
def cache_filename(self):
return self.name + '.json'
def complement(self, databases):
return self.complementer.complement(databases)
class CompilationDatabase(object):
def __init__(self):
self._registry = []
self._complementers = []
self._layers = [[]]
self._directories = []
self.raise_on_missing_cache = True
def register_backend(self, db_cls):
if db_cls not in self._registry:
self._registry.append(db_cls)
def add_complementer(self, name, complementer):
complementer = _ComplementerWrapper(name, complementer)
self._complementers.append(complementer)
self._layers.append([])
def _add_databases(self, probe_results):
for complemented_database, directory in probe_results:
for i, db in enumerate(complemented_database):
self._layers[i].append(db)
self._directories.append(directory)
def _add_database(self, probe_result):
self._add_databases([probe_result])
def _probe_dir1(self, directory):
for compdb_cls in self._registry:
with suppress(ProbeError):
yield compdb_cls.probe_directory(directory)
break
else:
# no compilation database found,
# calling the interface's probe_directory() function
# should raise a good probe error
CompilationDatabaseInterface.probe_directory(directory)
# make sure to raise something,
# in case probe_directory() no longer asserts
raise AssertionError
for complementer in self._complementers:
cache_path = os.path.join(directory, complementer.cache_filename)
if os.path.exists(cache_path):
yield JSONCompilationDatabase(cache_path)
elif self.raise_on_missing_cache:
raise ComplementerCacheNotFound(complementer, directory)
else:
yield InMemoryCompilationDatabase()
def _probe_dir(self, directory):
return (list(self._probe_dir1(directory)), directory)
def add_directory(self, directory):
self._add_database(self._probe_dir(directory))
def add_directories(self, directories):
"""Either all directories are added successfuly
or none if an exception is raised."""
databases = []
for directory in directories:
databases.append(self._probe_dir(directory))
self._add_databases(databases)
def _add_directory_pattern1(self, path_pattern):
# we are interested only in directories,
# glob() will list only directories if the pattern ends with os.sep
dir_pattern = os.path.join(path_pattern, '')
databases = []
# sorting makes the order predicatable, reproducible
for directory in sorted(glob.glob(dir_pattern)):
with suppress(ProbeError):
databases.append(self._probe_dir(directory))
if not databases:
raise ProbeError(
"{}: no compilation databases found".format(path_pattern))
return databases
def add_directory_pattern(self, path_pattern):
"""If no compilation database is found, a ProbeError is raised."""
self._add_databases(self._add_directory_pattern1(path_pattern))
def add_directory_patterns(self, path_patterns):
databases = []
for path_pattern in path_patterns:
databases.extend(self._add_directory_pattern1(path_pattern))
self._add_databases(databases)
def update_complements(self):
# clear all complementary databases but keep the initial database
del self._layers[1:]
# incrementally compute the complements,
# each complement depends on its predecesors
for complementer in self._complementers:
yield ('begin', {'complementer': complementer.name})
layer = complementer.complement(self._layers)
self._layers.append(layer)
for db, directory in zip(layer, self._directories):
cache_path = os.path.join(directory,
complementer.cache_filename)
yield ('saving', {'file': cache_path})
with io.open(cache_path, 'w', encoding='utf8') as f:
compile_commands_to_json(db.get_all_compile_commands(), f)
yield ('end', {'complementer': complementer.name})
def get_compile_commands(self, filepath, **kwargs):
def uniquify(compile_commands):
for compile_command in compile_commands:
yield compile_command
break
for key in kwargs:
assert key in ['unique'], "invalid named argument: {}".format(key)
ret = iter(())
for layer in self._layers:
is_empty, compile_commands = empty_iterator_wrap(
_chain_get_compile_commands(layer, filepath))
# The complementary databases aren't supposed to contain files
# from the main or precedings databases.
# This allow us to early exit as soon as a match is found.
if not is_empty:
ret = compile_commands
break
if kwargs.get('unique', False):
ret = uniquify(ret)
return ret
def get_all_files(self):
return itertools.chain.from_iterable((_chain_get_all_files(layer)
for layer in self._layers))
def get_all_compile_commands(self, **kwargs):
def uniquify(compile_commands):
serialized_files = set()
for compile_command in compile_commands:
normpath = compile_command.normfile
if normpath in serialized_files:
continue
serialized_files.add(normpath)
yield compile_command
for key in kwargs:
assert key in ['unique'], "invalid named argument: {}".format(key)
ret = itertools.chain.from_iterable(
(_chain_get_all_compile_commands(layer) for layer in self._layers))
if kwargs.get('unique', False):
ret = uniquify(ret)
return ret
|
|
# Copyright (c) 2013 Michael Bitzi
# Licensed under the MIT license http://opensource.org/licenses/MIT
import logging
from pwm.config import config
from pwm.ffi.xcb import xcb
import pwm.bar
import pwm.windows
import pwm.layout
import pwm.events
workspaces = []
current_workspace_index = 0
class Workspace:
def __init__(self):
self.windows = []
self.x = 0
self.y = pwm.bar.calculate_height()
self.width = xcb.screen.width_in_pixels
self.height = xcb.screen.height_in_pixels - self.y
self.tiling = pwm.layout.Tiling(self)
self.floating = pwm.layout.Floating(self)
self.fullscreen = pwm.layout.Fullscreen(self)
self.layouts = (self.tiling, self.floating, self.fullscreen)
def hide(self):
for w in self.windows:
# The next UnmapNotifyEvent for this window has to be ignored
pwm.windows.managed[w].ignore_unmaps += 1
xcb.core.unmap_window(w)
def show(self):
for w in self.windows:
xcb.core.map_window(w)
def add_window(self, wid):
with pwm.windows.no_enter_notify_event():
if pwm.windows.managed[wid].fullscreen:
self.fullscreen.add_window(wid)
elif pwm.windows.managed[wid].floating:
self.floating.add_window(wid)
else:
# Place new window below the one with the highest priority
column = 0
row = -1
for priority in reversed(self.windows):
if priority in self.tiling.windows:
column, row = self.tiling.path(priority)
row += 1
break
self.tiling.add_window(wid, column, row)
self.windows.append(wid)
if current() == self:
xcb.core.map_window(wid)
def _proxy_layout(self, attr, wid, *args, **kwargs):
for layout in self.layouts:
if wid in layout.windows and hasattr(layout, attr):
return getattr(layout, attr)(wid, *args, **kwargs)
def remove_window(self, wid):
with pwm.windows.no_enter_notify_event():
self._proxy_layout("remove_window", wid)
self.windows.remove(wid)
def move_window(self, wid, direction):
with pwm.windows.no_enter_notify_event():
self._proxy_layout("move", wid, direction)
def resize_window(self, wid, delta):
with pwm.windows.no_enter_notify_event():
self._proxy_layout("resize", wid, delta)
def focus_relative(self, wid, pos):
"""Focus the neighbour of a window."""
rel = self._proxy_layout("relative", wid, pos)
if rel:
pwm.windows.focus(rel)
def top_focus_priority(self):
"""Return the window which is on top of the focus priority list.
If there are no windows, return None.
"""
if self.windows:
return self.windows[-1]
return None
def handle_focus(self, wid):
"""Handle focus and rearrange the focus priority list accordingly."""
if wid not in self.windows:
return
# Simply move the window to the end of the list.
# This way all windows will be sorted by how recently they were
# focused.
self.windows.remove(wid)
self.windows.append(wid)
def toggle_floating(self, wid):
with pwm.windows.no_enter_notify_event():
self._proxy_layout("remove_window", wid)
floating = not pwm.windows.managed[wid].floating
pwm.windows.managed[wid].floating = floating
if floating:
self.floating.add_window(wid)
else:
self.tiling.add_window(wid)
def toggle_focus_layer(self):
target = not pwm.windows.managed[pwm.windows.focused].floating
for win in reversed(self.windows):
if pwm.windows.managed[win].floating == target:
pwm.windows.focus(win)
return
def toggle_fullscreen(self, wid):
info = pwm.windows.managed[wid]
if info.fullscreen:
self.remove_fullscreen(wid)
else:
self.add_fullscreen(wid)
def add_fullscreen(self, wid):
self._proxy_layout("remove_window", wid)
self.fullscreen.add_window(wid)
def remove_fullscreen(self, wid):
info = pwm.windows.managed[wid]
self.fullscreen.remove_window(wid)
if info.floating:
self.floating.add_window(wid)
else:
self.tiling.add_window(wid)
def is_urgent(self):
for wid in self.windows:
if pwm.windows.managed[wid].urgent:
return True
return False
def setup():
"""
Set up all workspaces.
"""
global workspaces
workspaces = [Workspace() for i in range(config.workspaces)]
global current_workspace_index
current_workspace_index = 0
current().show()
def destroy():
"""
Destroy all workspaces.
"""
global workspaces
workspaces = []
def current():
"""
Return the currently active workspace.
"""
return workspaces[current_workspace_index]
def switch(index):
"""
Switch to workspace at given index.
"""
global current_workspace_index
if current_workspace_index == index:
return
logging.debug("Switching to workspace {}".format(index))
with pwm.windows.no_enter_notify_event():
new_ws = workspaces[index]
new_ws.show()
current().hide()
current_workspace_index = index
pwm.windows.focus(current().top_focus_priority())
pwm.events.workspace_switched(index)
def opened():
"""
Return a generator which yields all open workspaces.
yield (index, workspace)
A workspace is considered open if it has any windows on it or if it's
the current workspace.
"""
for i in range(config.workspaces):
if i == current_workspace_index or workspaces[i].windows:
yield i, workspaces[i]
def send_window_to(wid, workspace):
"""Send the window to another workspace."""
old_ws = pwm.windows.managed[wid].workspace
old_ws.remove_window(wid)
# Prevent this window from sending a UnmapNotifyEvent, then unmap it
pwm.windows.managed[wid].ignore_unmaps += 1
xcb.core.unmap_window(wid)
new_ws = workspaces[workspace]
new_ws.add_window(wid)
pwm.windows.managed[wid].workspace = new_ws
if current() == old_ws:
pwm.windows.focus(old_ws.top_focus_priority())
|
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for the GSoC project student survey.
"""
from soc.views import forms
from soc.views import survey
from soc.views.helper import lists
from django.utils.translation import ugettext
from soc.logic.exceptions import AccessViolation
from soc.logic.exceptions import RedirectRequest
from soc.views.helper.access_checker import isSet
from soc.views.readonly_template import SurveyRecordReadOnlyTemplate
from soc.modules.gsoc.models.project_survey import ProjectSurvey
from soc.modules.gsoc.models.project_survey_record import \
GSoCProjectSurveyRecord
from soc.modules.gsoc.views import forms as gsoc_forms
from soc.modules.gsoc.views.base import RequestHandler
from soc.modules.gsoc.views.base_templates import LoggedInMsg
from soc.modules.gsoc.views.helper import url_patterns
DEF_CANNOT_ACCESS_EVALUATION = ugettext(
'Organization Administrators can view this evaluation submitted by the '
'student only after the evaluation deadline. Please visit this page '
'after the evaluation deadline has passed.')
class GSoCStudentEvaluationEditForm(gsoc_forms.SurveyEditForm):
"""Form to create/edit GSoC project survey for students.
"""
class Meta:
model = ProjectSurvey
css_prefix = 'gsoc-student-eval-edit'
exclude = ['scope', 'author', 'modified_by', 'survey_content',
'scope_path', 'link_id', 'prefix', 'read_access',
'write_access', 'taking_access', 'is_featured']
class GSoCStudentEvaluationTakeForm(gsoc_forms.SurveyTakeForm):
"""Form for students to respond to the survey during evaluations.
"""
class Meta:
model = GSoCProjectSurveyRecord
css_prefix = 'gsoc-student-eval-record'
exclude = ['project', 'org', 'user', 'survey', 'created', 'modified']
class GSoCStudentEvaluationEditPage(RequestHandler):
"""View for creating/editing student evalution.
"""
def djangoURLPatterns(self):
return [
url_patterns.url(r'eval/student/edit/%s$' % url_patterns.SURVEY,
self, name='gsoc_edit_student_evaluation'),
]
def checkAccess(self):
self.check.isHost()
self.mutator.studentEvaluationFromKwargs(raise_not_found=False)
def templatePath(self):
return 'v2/modules/gsoc/_evaluation.html'
def context(self):
if self.data.student_evaluation:
form = GSoCStudentEvaluationEditForm(
self.data.POST or None, instance=self.data.student_evaluation)
else:
form = GSoCStudentEvaluationEditForm(self.data.POST or None)
page_name = ugettext('Edit - %s' % (self.data.student_evaluation.title)) \
if self.data.student_evaluation else 'Create new student evaluation'
context = {
'page_name': page_name,
'post_url': self.redirect.survey().urlOf(
'gsoc_edit_student_evaluation'),
'forms': [form],
'error': bool(form.errors),
}
return context
def evaluationFromForm(self):
"""Create/edit the student evaluation entity from form.
Returns:
a newly created or updated student evaluation entity or None.
"""
if self.data.student_evaluation:
form = GSoCStudentEvaluationEditForm(
self.data.POST, instance=self.data.student_evaluation)
else:
form = GSoCStudentEvaluationEditForm(self.data.POST)
if not form.is_valid():
return None
form.cleaned_data['modified_by'] = self.data.user
if not self.data.student_evaluation:
form.cleaned_data['link_id'] = self.data.kwargs.get('survey')
form.cleaned_data['prefix'] = 'gsoc_program'
form.cleaned_data['author'] = self.data.user
form.cleaned_data['scope'] = self.data.program
# kwargs which defines an evaluation
fields = ['sponsor', 'program', 'survey']
key_name = '/'.join(['gsoc_program'] +
[self.data.kwargs[field] for field in fields])
entity = form.create(commit=True, key_name=key_name)
else:
entity = form.save(commit=True)
return entity
def post(self):
evaluation = self.evaluationFromForm()
if evaluation:
r = self.redirect.survey()
r.to('gsoc_edit_student_evaluation', validated=True)
else:
self.get()
class GSoCStudentEvaluationTakePage(RequestHandler):
"""View for students to submit their evaluation.
"""
def djangoURLPatterns(self):
return [
url_patterns.url(r'eval/student/%s$' % url_patterns.SURVEY_RECORD,
self, name='gsoc_take_student_evaluation'),
]
def checkAccess(self):
self.mutator.projectFromKwargs()
self.mutator.studentEvaluationFromKwargs()
self.mutator.studentEvaluationRecordFromKwargs()
assert isSet(self.data.student_evaluation)
if self.data.is_host:
return
show_url = self.data.redirect.survey_record(
self.data.student_evaluation.link_id).urlOf(
'gsoc_show_student_evaluation')
self.check.isSurveyActive(self.data.student_evaluation, show_url)
self.check.isProfileActive()
if self.data.orgAdminFor(self.data.project.org):
raise RedirectRequest(show_url)
self.check.canUserTakeSurvey(self.data.student_evaluation, 'student')
self.check.isStudentForSurvey()
def templatePath(self):
return 'v2/modules/gsoc/_evaluation_take.html'
def context(self):
if self.data.student_evaluation_record:
form = GSoCStudentEvaluationTakeForm(
self.data.student_evaluation,
self.data.POST or None, instance=self.data.student_evaluation_record)
else:
form = GSoCStudentEvaluationTakeForm(
self.data.student_evaluation, self.data.POST or None)
context = {
'page_name': '%s' % (self.data.student_evaluation.title),
'description': self.data.student_evaluation.content,
'form_top_msg': LoggedInMsg(self.data, apply_link=False,
div_name='user-login'),
'project': self.data.project.title,
'forms': [form],
'error': bool(form.errors),
}
return context
def recordEvaluationFromForm(self):
"""Create/edit a new student evaluation record based on the form input.
Returns:
a newly created or updated evaluation record entity or None
"""
if self.data.student_evaluation_record:
form = GSoCStudentEvaluationTakeForm(
self.data.student_evaluation,
self.data.POST, instance=self.data.student_evaluation_record)
else:
form = GSoCStudentEvaluationTakeForm(
self.data.student_evaluation, self.data.POST)
if not form.is_valid():
return None
if not self.data.student_evaluation_record:
form.cleaned_data['project'] = self.data.project
form.cleaned_data['org'] = self.data.project.org
form.cleaned_data['user'] = self.data.user
form.cleaned_data['survey'] = self.data.student_evaluation
entity = form.create(commit=True)
else:
entity = form.save(commit=True)
return entity
def post(self):
student_evaluation_record = self.recordEvaluationFromForm()
if student_evaluation_record:
r = self.redirect.survey_record(
self.data.student_evaluation.link_id)
r.to('gsoc_take_student_evaluation', validated=True)
else:
self.get()
class GSoCStudentEvaluationPreviewPage(RequestHandler):
"""View for the host to preview the evaluation.
"""
def djangoURLPatterns(self):
return [
url_patterns.url(
r'eval/student/preview/%s$' % url_patterns.SURVEY,
self, name='gsoc_preview_student_evaluation'),
]
def checkAccess(self):
self.check.isHost()
self.mutator.studentEvaluationFromKwargs(raise_not_found=False)
def templatePath(self):
return 'v2/modules/gsoc/_evaluation_take.html'
def context(self):
form = GSoCStudentEvaluationTakeForm(self.data.student_evaluation)
context = {
'page_name': '%s' % (self.data.student_evaluation.title),
'description': self.data.student_evaluation.content,
'form_top_msg': LoggedInMsg(self.data, apply_link=False,
div_name='user-login'),
'project': "The Project Title",
'forms': [form],
'error': bool(form.errors),
}
return context
class GSoCStudentEvaluationRecordsList(RequestHandler):
"""View for listing all records of a GSoCGProjectSurveyRecord.
"""
def djangoURLPatterns(self):
return [
url_patterns.url(
r'eval/student/records/%s$' % url_patterns.SURVEY,
self, name='gsoc_list_student_eval_records')
]
def checkAccess(self):
"""Defines access checks for this list, all hosts should be able to see it.
"""
self.check.isHost()
self.mutator.studentEvaluationFromKwargs()
def context(self):
"""Returns the context of the page to render.
"""
record_list = self._createSurveyRecordList()
page_name = ugettext('Records - %s' % (self.data.student_evaluation.title))
context = {
'page_name': page_name,
'record_list': record_list,
}
return context
def jsonContext(self):
"""Handler for JSON requests.
"""
idx = lists.getListIndex(self.request)
if idx == 0:
record_list = self._createSurveyRecordList()
return record_list.listContentResponse(
self.request, prefetch=['org', 'project']).content()
else:
super(GSoCStudentEvaluationRecordsList, self).jsonContext()
def _createSurveyRecordList(self):
"""Creates a SurveyRecordList for the requested survey.
"""
record_list = survey.SurveyRecordList(
self.data, self.data.student_evaluation, GSoCProjectSurveyRecord, idx=0)
record_list.list_config.addColumn(
'project', 'Project', lambda ent, *args: ent.project.title)
record_list.list_config.addColumn(
'org', 'Organization', lambda ent, *args: ent.org.name)
return record_list
def templatePath(self):
return 'v2/modules/gsoc/student_eval/record_list.html'
class GSoCStudentEvaluationReadOnlyTemplate(SurveyRecordReadOnlyTemplate):
"""Template to construct readonly student evaluation record.
"""
class Meta:
model = GSoCProjectSurveyRecord
css_prefix = 'gsoc-student-eval-show'
survey_name = 'Student Evaluation'
class GSoCStudentEvaluationShowPage(RequestHandler):
"""View to display the readonly page for student evaluation.
"""
def djangoURLPatterns(self):
return [
url_patterns.url(r'eval/student/show/%s$' % url_patterns.SURVEY_RECORD,
self, name='gsoc_show_student_evaluation'),
]
def checkAccess(self):
self.mutator.projectFromKwargs()
self.mutator.studentEvaluationFromKwargs()
self.mutator.studentEvaluationRecordFromKwargs()
assert isSet(self.data.project)
assert isSet(self.data.student_evaluation)
self.check.isProfileActive()
if self.data.orgAdminFor(self.data.project.org):
self.data.role = 'org_admin'
if self.data.timeline.afterSurveyEnd(self.data.student_evaluation):
return
else:
raise AccessViolation(DEF_CANNOT_ACCESS_EVALUATION)
self.check.isStudentForSurvey()
self.data.role = 'student'
def templatePath(self):
return 'v2/modules/gsoc/_survey/show.html'
def context(self):
assert isSet(self.data.program)
assert isSet(self.data.timeline)
assert isSet(self.data.student_evaluation_record)
record = self.data.student_evaluation_record
student = self.data.url_profile
context = {
'page_name': 'Student evaluation - %s' % (student.name()),
'student': student.name(),
'organization': self.data.project.org.name,
'project': self.data.project.title,
'top_msg': LoggedInMsg(self.data, apply_link=False),
'css_prefix': GSoCStudentEvaluationReadOnlyTemplate.Meta.css_prefix,
}
if record:
context['record'] = GSoCStudentEvaluationReadOnlyTemplate(record)
if self.data.timeline.surveyPeriod(self.data.student_evaluation):
if self.data.role == 'student':
context['update_link'] = self.data.redirect.survey_record(
self.data.student_evaluation.link_id).urlOf(
'gsoc_take_student_evaluation')
else:
context['submission_msg'] = ugettext(
'Bug your student to submit the evaluation.')
return context
|
|
import unittest
import os
import sys
from unittest import mock
from tempfile import TemporaryDirectory
from mkdocs.structure.pages import Page
from mkdocs.structure.files import File, Files
from mkdocs.tests.base import load_config, dedent
class PageTests(unittest.TestCase):
DOCS_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../integration/subpages/docs')
def test_homepage(self):
cfg = load_config(docs_dir=self.DOCS_DIR)
fl = File('index.md', cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])
self.assertIsNone(fl.page)
pg = Page('Foo', fl, cfg)
self.assertEqual(fl.page, pg)
self.assertEqual(pg.url, '')
self.assertEqual(pg.abs_url, None)
self.assertEqual(pg.canonical_url, None)
self.assertEqual(pg.edit_url, None)
self.assertEqual(pg.file, fl)
self.assertEqual(pg.content, None)
self.assertTrue(pg.is_homepage)
self.assertTrue(pg.is_index)
self.assertTrue(pg.is_page)
self.assertFalse(pg.is_section)
self.assertTrue(pg.is_top_level)
self.assertEqual(pg.markdown, None)
self.assertEqual(pg.meta, {})
self.assertEqual(pg.next_page, None)
self.assertEqual(pg.parent, None)
self.assertEqual(pg.previous_page, None)
self.assertEqual(pg.title, 'Foo')
self.assertEqual(pg.toc, [])
def test_nested_index_page(self):
cfg = load_config(docs_dir=self.DOCS_DIR)
fl = File('sub1/index.md', cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])
pg = Page('Foo', fl, cfg)
pg.parent = 'foo'
self.assertEqual(pg.url, 'sub1/')
self.assertEqual(pg.abs_url, None)
self.assertEqual(pg.canonical_url, None)
self.assertEqual(pg.edit_url, None)
self.assertEqual(pg.file, fl)
self.assertEqual(pg.content, None)
self.assertFalse(pg.is_homepage)
self.assertTrue(pg.is_index)
self.assertTrue(pg.is_page)
self.assertFalse(pg.is_section)
self.assertFalse(pg.is_top_level)
self.assertEqual(pg.markdown, None)
self.assertEqual(pg.meta, {})
self.assertEqual(pg.next_page, None)
self.assertEqual(pg.parent, 'foo')
self.assertEqual(pg.previous_page, None)
self.assertEqual(pg.title, 'Foo')
self.assertEqual(pg.toc, [])
def test_nested_index_page_no_parent(self):
cfg = load_config(docs_dir=self.DOCS_DIR)
fl = File('sub1/index.md', cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])
pg = Page('Foo', fl, cfg)
pg.parent = None # non-homepage at nav root level; see #1919.
self.assertEqual(pg.url, 'sub1/')
self.assertEqual(pg.abs_url, None)
self.assertEqual(pg.canonical_url, None)
self.assertEqual(pg.edit_url, None)
self.assertEqual(pg.file, fl)
self.assertEqual(pg.content, None)
self.assertFalse(pg.is_homepage)
self.assertTrue(pg.is_index)
self.assertTrue(pg.is_page)
self.assertFalse(pg.is_section)
self.assertTrue(pg.is_top_level)
self.assertEqual(pg.markdown, None)
self.assertEqual(pg.meta, {})
self.assertEqual(pg.next_page, None)
self.assertEqual(pg.parent, None)
self.assertEqual(pg.previous_page, None)
self.assertEqual(pg.title, 'Foo')
self.assertEqual(pg.toc, [])
def test_nested_index_page_no_parent_no_directory_urls(self):
cfg = load_config(docs_dir=self.DOCS_DIR, use_directory_urls=False)
fl = File('sub1/index.md', cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])
pg = Page('Foo', fl, cfg)
pg.parent = None # non-homepage at nav root level; see #1919.
self.assertEqual(pg.url, 'sub1/index.html')
self.assertEqual(pg.abs_url, None)
self.assertEqual(pg.canonical_url, None)
self.assertEqual(pg.edit_url, None)
self.assertEqual(pg.file, fl)
self.assertEqual(pg.content, None)
self.assertFalse(pg.is_homepage)
self.assertTrue(pg.is_index)
self.assertTrue(pg.is_page)
self.assertFalse(pg.is_section)
self.assertTrue(pg.is_top_level)
self.assertEqual(pg.markdown, None)
self.assertEqual(pg.meta, {})
self.assertEqual(pg.next_page, None)
self.assertEqual(pg.parent, None)
self.assertEqual(pg.previous_page, None)
self.assertEqual(pg.title, 'Foo')
self.assertEqual(pg.toc, [])
def test_nested_nonindex_page(self):
cfg = load_config(docs_dir=self.DOCS_DIR)
fl = File('sub1/non-index.md', cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])
pg = Page('Foo', fl, cfg)
pg.parent = 'foo'
self.assertEqual(pg.url, 'sub1/non-index/')
self.assertEqual(pg.abs_url, None)
self.assertEqual(pg.canonical_url, None)
self.assertEqual(pg.edit_url, None)
self.assertEqual(pg.file, fl)
self.assertEqual(pg.content, None)
self.assertFalse(pg.is_homepage)
self.assertFalse(pg.is_index)
self.assertTrue(pg.is_page)
self.assertFalse(pg.is_section)
self.assertFalse(pg.is_top_level)
self.assertEqual(pg.markdown, None)
self.assertEqual(pg.meta, {})
self.assertEqual(pg.next_page, None)
self.assertEqual(pg.parent, 'foo')
self.assertEqual(pg.previous_page, None)
self.assertEqual(pg.title, 'Foo')
self.assertEqual(pg.toc, [])
def test_page_defaults(self):
cfg = load_config()
fl = File('testing.md', cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])
pg = Page('Foo', fl, cfg)
self.assertRegex(pg.update_date, r'\d{4}-\d{2}-\d{2}')
self.assertEqual(pg.url, 'testing/')
self.assertEqual(pg.abs_url, None)
self.assertEqual(pg.canonical_url, None)
self.assertEqual(pg.edit_url, None)
self.assertEqual(pg.file, fl)
self.assertEqual(pg.content, None)
self.assertFalse(pg.is_homepage)
self.assertFalse(pg.is_index)
self.assertTrue(pg.is_page)
self.assertFalse(pg.is_section)
self.assertTrue(pg.is_top_level)
self.assertEqual(pg.markdown, None)
self.assertEqual(pg.meta, {})
self.assertEqual(pg.next_page, None)
self.assertEqual(pg.parent, None)
self.assertEqual(pg.previous_page, None)
self.assertEqual(pg.title, 'Foo')
self.assertEqual(pg.toc, [])
def test_page_no_directory_url(self):
cfg = load_config(use_directory_urls=False)
fl = File('testing.md', cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])
pg = Page('Foo', fl, cfg)
self.assertEqual(pg.url, 'testing.html')
self.assertEqual(pg.abs_url, None)
self.assertEqual(pg.canonical_url, None)
self.assertEqual(pg.edit_url, None)
self.assertEqual(pg.file, fl)
self.assertEqual(pg.content, None)
self.assertFalse(pg.is_homepage)
self.assertFalse(pg.is_index)
self.assertTrue(pg.is_page)
self.assertFalse(pg.is_section)
self.assertTrue(pg.is_top_level)
self.assertEqual(pg.markdown, None)
self.assertEqual(pg.meta, {})
self.assertEqual(pg.next_page, None)
self.assertEqual(pg.parent, None)
self.assertEqual(pg.previous_page, None)
self.assertEqual(pg.title, 'Foo')
self.assertEqual(pg.toc, [])
def test_page_canonical_url(self):
cfg = load_config(site_url='http://example.com')
fl = File('testing.md', cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])
pg = Page('Foo', fl, cfg)
self.assertEqual(pg.url, 'testing/')
self.assertEqual(pg.abs_url, '/testing/')
self.assertEqual(pg.canonical_url, 'http://example.com/testing/')
self.assertEqual(pg.edit_url, None)
self.assertEqual(pg.file, fl)
self.assertEqual(pg.content, None)
self.assertFalse(pg.is_homepage)
self.assertFalse(pg.is_index)
self.assertTrue(pg.is_page)
self.assertFalse(pg.is_section)
self.assertTrue(pg.is_top_level)
self.assertEqual(pg.markdown, None)
self.assertEqual(pg.meta, {})
self.assertEqual(pg.next_page, None)
self.assertEqual(pg.parent, None)
self.assertEqual(pg.previous_page, None)
self.assertEqual(pg.title, 'Foo')
self.assertEqual(pg.toc, [])
def test_page_canonical_url_nested(self):
cfg = load_config(site_url='http://example.com/foo/')
fl = File('testing.md', cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])
pg = Page('Foo', fl, cfg)
self.assertEqual(pg.url, 'testing/')
self.assertEqual(pg.abs_url, '/foo/testing/')
self.assertEqual(pg.canonical_url, 'http://example.com/foo/testing/')
self.assertEqual(pg.edit_url, None)
self.assertEqual(pg.file, fl)
self.assertEqual(pg.content, None)
self.assertFalse(pg.is_homepage)
self.assertFalse(pg.is_index)
self.assertTrue(pg.is_page)
self.assertFalse(pg.is_section)
self.assertTrue(pg.is_top_level)
self.assertEqual(pg.markdown, None)
self.assertEqual(pg.meta, {})
self.assertEqual(pg.next_page, None)
self.assertEqual(pg.parent, None)
self.assertEqual(pg.previous_page, None)
self.assertEqual(pg.title, 'Foo')
self.assertEqual(pg.toc, [])
def test_page_canonical_url_nested_no_slash(self):
cfg = load_config(site_url='http://example.com/foo')
fl = File('testing.md', cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])
pg = Page('Foo', fl, cfg)
self.assertEqual(pg.url, 'testing/')
self.assertEqual(pg.abs_url, '/foo/testing/')
self.assertEqual(pg.canonical_url, 'http://example.com/foo/testing/')
self.assertEqual(pg.edit_url, None)
self.assertEqual(pg.file, fl)
self.assertEqual(pg.content, None)
self.assertFalse(pg.is_homepage)
self.assertFalse(pg.is_index)
self.assertTrue(pg.is_page)
self.assertFalse(pg.is_section)
self.assertTrue(pg.is_top_level)
self.assertEqual(pg.markdown, None)
self.assertEqual(pg.meta, {})
self.assertEqual(pg.next_page, None)
self.assertEqual(pg.parent, None)
self.assertEqual(pg.previous_page, None)
self.assertEqual(pg.title, 'Foo')
self.assertEqual(pg.toc, [])
def test_predefined_page_title(self):
cfg = load_config()
fl = File('testing.md', cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])
pg = Page('Page Title', fl, cfg)
pg.read_source(cfg)
self.assertEqual(pg.url, 'testing/')
self.assertEqual(pg.abs_url, None)
self.assertEqual(pg.canonical_url, None)
self.assertEqual(pg.edit_url, None)
self.assertEqual(pg.file, fl)
self.assertEqual(pg.content, None)
self.assertFalse(pg.is_homepage)
self.assertFalse(pg.is_index)
self.assertTrue(pg.is_page)
self.assertFalse(pg.is_section)
self.assertTrue(pg.is_top_level)
self.assertTrue(pg.markdown.startswith('# Welcome to MkDocs\n'))
self.assertEqual(pg.meta, {})
self.assertEqual(pg.next_page, None)
self.assertEqual(pg.parent, None)
self.assertEqual(pg.previous_page, None)
self.assertEqual(pg.title, 'Page Title')
self.assertEqual(pg.toc, [])
def test_page_title_from_markdown(self):
cfg = load_config()
fl = File('testing.md', cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])
pg = Page(None, fl, cfg)
pg.read_source(cfg)
self.assertEqual(pg.url, 'testing/')
self.assertEqual(pg.abs_url, None)
self.assertEqual(pg.canonical_url, None)
self.assertEqual(pg.edit_url, None)
self.assertEqual(pg.file, fl)
self.assertEqual(pg.content, None)
self.assertFalse(pg.is_homepage)
self.assertFalse(pg.is_index)
self.assertTrue(pg.is_page)
self.assertFalse(pg.is_section)
self.assertTrue(pg.is_top_level)
self.assertTrue(pg.markdown.startswith('# Welcome to MkDocs\n'))
self.assertEqual(pg.meta, {})
self.assertEqual(pg.next_page, None)
self.assertEqual(pg.parent, None)
self.assertEqual(pg.previous_page, None)
self.assertEqual(pg.title, 'Welcome to MkDocs')
self.assertEqual(pg.toc, [])
def test_page_title_from_meta(self):
cfg = load_config(docs_dir=self.DOCS_DIR)
fl = File('metadata.md', cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])
pg = Page(None, fl, cfg)
pg.read_source(cfg)
self.assertEqual(pg.url, 'metadata/')
self.assertEqual(pg.abs_url, None)
self.assertEqual(pg.canonical_url, None)
self.assertEqual(pg.edit_url, None)
self.assertEqual(pg.file, fl)
self.assertEqual(pg.content, None)
self.assertFalse(pg.is_homepage)
self.assertFalse(pg.is_index)
self.assertTrue(pg.is_page)
self.assertFalse(pg.is_section)
self.assertTrue(pg.is_top_level)
self.assertTrue(pg.markdown.startswith('# Welcome to MkDocs\n'))
self.assertEqual(pg.meta, {'title': 'A Page Title'})
self.assertEqual(pg.next_page, None)
self.assertEqual(pg.parent, None)
self.assertEqual(pg.previous_page, None)
self.assertEqual(pg.title, 'A Page Title')
self.assertEqual(pg.toc, [])
def test_page_title_from_filename(self):
cfg = load_config(docs_dir=self.DOCS_DIR)
fl = File('page-title.md', cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])
pg = Page(None, fl, cfg)
pg.read_source(cfg)
self.assertEqual(pg.url, 'page-title/')
self.assertEqual(pg.abs_url, None)
self.assertEqual(pg.canonical_url, None)
self.assertEqual(pg.edit_url, None)
self.assertEqual(pg.file, fl)
self.assertEqual(pg.content, None)
self.assertFalse(pg.is_homepage)
self.assertFalse(pg.is_index)
self.assertTrue(pg.is_page)
self.assertFalse(pg.is_section)
self.assertTrue(pg.is_top_level)
self.assertTrue(pg.markdown.startswith('Page content.\n'))
self.assertEqual(pg.meta, {})
self.assertEqual(pg.next_page, None)
self.assertEqual(pg.parent, None)
self.assertEqual(pg.previous_page, None)
self.assertEqual(pg.title, 'Page title')
self.assertEqual(pg.toc, [])
def test_page_title_from_capitalized_filename(self):
cfg = load_config(docs_dir=self.DOCS_DIR)
fl = File('pageTitle.md', cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])
pg = Page(None, fl, cfg)
pg.read_source(cfg)
self.assertEqual(pg.url, 'pageTitle/')
self.assertEqual(pg.abs_url, None)
self.assertEqual(pg.canonical_url, None)
self.assertEqual(pg.edit_url, None)
self.assertEqual(pg.file, fl)
self.assertEqual(pg.content, None)
self.assertFalse(pg.is_homepage)
self.assertFalse(pg.is_index)
self.assertTrue(pg.is_page)
self.assertFalse(pg.is_section)
self.assertTrue(pg.is_top_level)
self.assertTrue(pg.markdown.startswith('Page content.\n'))
self.assertEqual(pg.meta, {})
self.assertEqual(pg.next_page, None)
self.assertEqual(pg.parent, None)
self.assertEqual(pg.previous_page, None)
self.assertEqual(pg.title, 'pageTitle')
self.assertEqual(pg.toc, [])
def test_page_title_from_homepage_filename(self):
cfg = load_config(docs_dir=self.DOCS_DIR)
fl = File('index.md', cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])
pg = Page(None, fl, cfg)
pg.read_source(cfg)
self.assertEqual(pg.url, '')
self.assertEqual(pg.abs_url, None)
self.assertEqual(pg.canonical_url, None)
self.assertEqual(pg.edit_url, None)
self.assertEqual(pg.file, fl)
self.assertEqual(pg.content, None)
self.assertTrue(pg.is_homepage)
self.assertTrue(pg.is_index)
self.assertTrue(pg.is_page)
self.assertFalse(pg.is_section)
self.assertTrue(pg.is_top_level)
self.assertTrue(pg.markdown.startswith('## Test'))
self.assertEqual(pg.meta, {})
self.assertEqual(pg.next_page, None)
self.assertEqual(pg.parent, None)
self.assertEqual(pg.previous_page, None)
self.assertEqual(pg.title, 'Home')
self.assertEqual(pg.toc, [])
def test_page_eq(self):
cfg = load_config()
fl = File('testing.md', cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])
pg = Page('Foo', fl, cfg)
self.assertTrue(pg == Page('Foo', fl, cfg))
def test_page_ne(self):
cfg = load_config()
f1 = File('testing.md', cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])
f2 = File('index.md', cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])
pg = Page('Foo', f1, cfg)
# Different Title
self.assertTrue(pg != Page('Bar', f1, cfg))
# Different File
self.assertTrue(pg != Page('Foo', f2, cfg))
def test_BOM(self):
md_src = '# An UTF-8 encoded file with a BOM'
with TemporaryDirectory() as docs_dir:
# We don't use mkdocs.tests.base.tempdir decorator here due to uniqueness of this test.
cfg = load_config(docs_dir=docs_dir)
fl = File('index.md', cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])
pg = Page(None, fl, cfg)
# Create an UTF-8 Encoded file with BOM (as Micorsoft editors do). See #1186
with open(fl.abs_src_path, 'w', encoding='utf-8-sig') as f:
f.write(md_src)
# Now read the file.
pg.read_source(cfg)
# Ensure the BOM (`\ufeff`) is removed
self.assertNotIn('\ufeff', pg.markdown)
self.assertEqual(pg.markdown, md_src)
self.assertEqual(pg.meta, {})
def test_page_edit_url(self):
configs = [
{
'repo_url': 'http://github.com/mkdocs/mkdocs'
},
{
'repo_url': 'https://github.com/mkdocs/mkdocs/'
}, {
'repo_url': 'http://example.com'
}, {
'repo_url': 'http://example.com',
'edit_uri': 'edit/master'
}, {
'repo_url': 'http://example.com',
'edit_uri': '/edit/master'
}, {
'repo_url': 'http://example.com/foo/',
'edit_uri': '/edit/master/'
}, {
'repo_url': 'http://example.com/foo',
'edit_uri': '/edit/master/'
}, {
'repo_url': 'http://example.com/foo/',
'edit_uri': '/edit/master'
}, {
'repo_url': 'http://example.com/foo/',
'edit_uri': 'edit/master/'
}, {
'repo_url': 'http://example.com/foo',
'edit_uri': 'edit/master/'
}, {
'repo_url': 'http://example.com',
'edit_uri': '?query=edit/master'
}, {
'repo_url': 'http://example.com/',
'edit_uri': '?query=edit/master/'
}, {
'repo_url': 'http://example.com',
'edit_uri': '#edit/master'
}, {
'repo_url': 'http://example.com/',
'edit_uri': '#edit/master/'
}, {
'repo_url': 'http://example.com',
'edit_uri': '' # Set to blank value
}, {
# Nothing defined
}
]
expected = [
'http://github.com/mkdocs/mkdocs/edit/master/docs/testing.md',
'https://github.com/mkdocs/mkdocs/edit/master/docs/testing.md',
None,
'http://example.com/edit/master/testing.md',
'http://example.com/edit/master/testing.md',
'http://example.com/edit/master/testing.md',
'http://example.com/edit/master/testing.md',
'http://example.com/edit/master/testing.md',
'http://example.com/foo/edit/master/testing.md',
'http://example.com/foo/edit/master/testing.md',
'http://example.com?query=edit/master/testing.md',
'http://example.com/?query=edit/master/testing.md',
'http://example.com#edit/master/testing.md',
'http://example.com/#edit/master/testing.md',
None,
None
]
for i, c in enumerate(configs):
cfg = load_config(**c)
fl = File('testing.md', cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])
pg = Page('Foo', fl, cfg)
self.assertEqual(pg.url, 'testing/')
self.assertEqual(pg.edit_url, expected[i])
def test_nested_page_edit_url(self):
configs = [
{
'repo_url': 'http://github.com/mkdocs/mkdocs'
},
{
'repo_url': 'https://github.com/mkdocs/mkdocs/'
}, {
'repo_url': 'http://example.com'
}, {
'repo_url': 'http://example.com',
'edit_uri': 'edit/master'
}, {
'repo_url': 'http://example.com',
'edit_uri': '/edit/master'
}, {
'repo_url': 'http://example.com/foo/',
'edit_uri': '/edit/master/'
}, {
'repo_url': 'http://example.com/foo',
'edit_uri': '/edit/master/'
}, {
'repo_url': 'http://example.com/foo/',
'edit_uri': '/edit/master'
}, {
'repo_url': 'http://example.com/foo/',
'edit_uri': 'edit/master/'
}, {
'repo_url': 'http://example.com/foo',
'edit_uri': 'edit/master/'
}, {
'repo_url': 'http://example.com',
'edit_uri': '?query=edit/master'
}, {
'repo_url': 'http://example.com/',
'edit_uri': '?query=edit/master/'
}, {
'repo_url': 'http://example.com',
'edit_uri': '#edit/master'
}, {
'repo_url': 'http://example.com/',
'edit_uri': '#edit/master/'
}
]
expected = [
'http://github.com/mkdocs/mkdocs/edit/master/docs/sub1/non-index.md',
'https://github.com/mkdocs/mkdocs/edit/master/docs/sub1/non-index.md',
None,
'http://example.com/edit/master/sub1/non-index.md',
'http://example.com/edit/master/sub1/non-index.md',
'http://example.com/edit/master/sub1/non-index.md',
'http://example.com/edit/master/sub1/non-index.md',
'http://example.com/edit/master/sub1/non-index.md',
'http://example.com/foo/edit/master/sub1/non-index.md',
'http://example.com/foo/edit/master/sub1/non-index.md',
'http://example.com?query=edit/master/sub1/non-index.md',
'http://example.com/?query=edit/master/sub1/non-index.md',
'http://example.com#edit/master/sub1/non-index.md',
'http://example.com/#edit/master/sub1/non-index.md'
]
for i, c in enumerate(configs):
c['docs_dir'] = self.DOCS_DIR
cfg = load_config(**c)
fl = File('sub1/non-index.md', cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])
pg = Page('Foo', fl, cfg)
self.assertEqual(pg.url, 'sub1/non-index/')
self.assertEqual(pg.edit_url, expected[i])
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
def test_nested_page_edit_url_windows(self):
configs = [
{
'repo_url': 'http://github.com/mkdocs/mkdocs'
},
{
'repo_url': 'https://github.com/mkdocs/mkdocs/'
}, {
'repo_url': 'http://example.com'
}, {
'repo_url': 'http://example.com',
'edit_uri': 'edit/master'
}, {
'repo_url': 'http://example.com',
'edit_uri': '/edit/master'
}, {
'repo_url': 'http://example.com/foo/',
'edit_uri': '/edit/master/'
}, {
'repo_url': 'http://example.com/foo',
'edit_uri': '/edit/master/'
}, {
'repo_url': 'http://example.com/foo/',
'edit_uri': '/edit/master'
}, {
'repo_url': 'http://example.com/foo/',
'edit_uri': 'edit/master/'
}, {
'repo_url': 'http://example.com/foo',
'edit_uri': 'edit/master/'
}, {
'repo_url': 'http://example.com',
'edit_uri': '?query=edit/master'
}, {
'repo_url': 'http://example.com/',
'edit_uri': '?query=edit/master/'
}, {
'repo_url': 'http://example.com',
'edit_uri': '#edit/master'
}, {
'repo_url': 'http://example.com/',
'edit_uri': '#edit/master/'
}
]
expected = [
'http://github.com/mkdocs/mkdocs/edit/master/docs/sub1/non-index.md',
'https://github.com/mkdocs/mkdocs/edit/master/docs/sub1/non-index.md',
None,
'http://example.com/edit/master/sub1/non-index.md',
'http://example.com/edit/master/sub1/non-index.md',
'http://example.com/edit/master/sub1/non-index.md',
'http://example.com/edit/master/sub1/non-index.md',
'http://example.com/edit/master/sub1/non-index.md',
'http://example.com/foo/edit/master/sub1/non-index.md',
'http://example.com/foo/edit/master/sub1/non-index.md',
'http://example.com?query=edit/master/sub1/non-index.md',
'http://example.com/?query=edit/master/sub1/non-index.md',
'http://example.com#edit/master/sub1/non-index.md',
'http://example.com/#edit/master/sub1/non-index.md'
]
for i, c in enumerate(configs):
c['docs_dir'] = self.DOCS_DIR
cfg = load_config(**c)
fl = File('sub1\\non-index.md', cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])
pg = Page('Foo', fl, cfg)
self.assertEqual(pg.url, 'sub1/non-index/')
self.assertEqual(pg.edit_url, expected[i])
def test_page_render(self):
cfg = load_config()
fl = File('testing.md', cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])
pg = Page('Foo', fl, cfg)
pg.read_source(cfg)
self.assertEqual(pg.content, None)
self.assertEqual(pg.toc, [])
pg.render(cfg, [fl])
self.assertTrue(pg.content.startswith(
'<h1 id="welcome-to-mkdocs">Welcome to MkDocs</h1>\n'
))
self.assertEqual(str(pg.toc).strip(), dedent("""
Welcome to MkDocs - #welcome-to-mkdocs
Commands - #commands
Project layout - #project-layout
"""))
def test_missing_page(self):
cfg = load_config()
fl = File('missing.md', cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])
pg = Page('Foo', fl, cfg)
self.assertRaises(OSError, pg.read_source, cfg)
class SourceDateEpochTests(unittest.TestCase):
def setUp(self):
self.default = os.environ.get('SOURCE_DATE_EPOCH', None)
os.environ['SOURCE_DATE_EPOCH'] = '0'
def test_source_date_epoch(self):
cfg = load_config()
fl = File('testing.md', cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])
pg = Page('Foo', fl, cfg)
self.assertEqual(pg.update_date, '1970-01-01')
def tearDown(self):
if self.default is not None:
os.environ['SOURCE_DATE_EPOCH'] = self.default
else:
del os.environ['SOURCE_DATE_EPOCH']
class RelativePathExtensionTests(unittest.TestCase):
DOCS_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../integration/subpages/docs')
def get_rendered_result(self, files):
cfg = load_config(docs_dir=self.DOCS_DIR)
fs = []
for f in files:
fs.append(File(f.replace('/', os.sep), cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls']))
pg = Page('Foo', fs[0], cfg)
pg.read_source(cfg)
pg.render(cfg, Files(fs))
return pg.content
@mock.patch('mkdocs.structure.pages.open', mock.mock_open(read_data='[link](non-index.md)'))
def test_relative_html_link(self):
self.assertEqual(
self.get_rendered_result(['index.md', 'non-index.md']),
'<p><a href="non-index/">link</a></p>' # No trailing /
)
@mock.patch('mkdocs.structure.pages.open', mock.mock_open(read_data='[link](index.md)'))
def test_relative_html_link_index(self):
self.assertEqual(
self.get_rendered_result(['non-index.md', 'index.md']),
'<p><a href="..">link</a></p>'
)
@mock.patch('mkdocs.structure.pages.open', mock.mock_open(read_data='[link](sub2/index.md)'))
def test_relative_html_link_sub_index(self):
self.assertEqual(
self.get_rendered_result(['index.md', 'sub2/index.md']),
'<p><a href="sub2/">link</a></p>' # No trailing /
)
@mock.patch('mkdocs.structure.pages.open', mock.mock_open(read_data='[link](sub2/non-index.md)'))
def test_relative_html_link_sub_page(self):
self.assertEqual(
self.get_rendered_result(['index.md', 'sub2/non-index.md']),
'<p><a href="sub2/non-index/">link</a></p>' # No trailing /
)
@mock.patch('mkdocs.structure.pages.open', mock.mock_open(read_data='[link](file%20name.md)'))
def test_relative_html_link_with_encoded_space(self):
self.assertEqual(
self.get_rendered_result(['index.md', 'file name.md']),
'<p><a href="file%20name/">link</a></p>'
)
@mock.patch('mkdocs.structure.pages.open', mock.mock_open(read_data='[link](file name.md)'))
def test_relative_html_link_with_unencoded_space(self):
self.assertEqual(
self.get_rendered_result(['index.md', 'file name.md']),
'<p><a href="file%20name/">link</a></p>'
)
@mock.patch('mkdocs.structure.pages.open', mock.mock_open(read_data='[link](../index.md)'))
def test_relative_html_link_parent_index(self):
self.assertEqual(
self.get_rendered_result(['sub2/non-index.md', 'index.md']),
'<p><a href="../..">link</a></p>'
)
@mock.patch('mkdocs.structure.pages.open', mock.mock_open(read_data='[link](non-index.md#hash)'))
def test_relative_html_link_hash(self):
self.assertEqual(
self.get_rendered_result(['index.md', 'non-index.md']),
'<p><a href="non-index/#hash">link</a></p>'
)
@mock.patch('mkdocs.structure.pages.open', mock.mock_open(read_data='[link](sub2/index.md#hash)'))
def test_relative_html_link_sub_index_hash(self):
self.assertEqual(
self.get_rendered_result(['index.md', 'sub2/index.md']),
'<p><a href="sub2/#hash">link</a></p>'
)
@mock.patch('mkdocs.structure.pages.open', mock.mock_open(read_data='[link](sub2/non-index.md#hash)'))
def test_relative_html_link_sub_page_hash(self):
self.assertEqual(
self.get_rendered_result(['index.md', 'sub2/non-index.md']),
'<p><a href="sub2/non-index/#hash">link</a></p>'
)
@mock.patch('mkdocs.structure.pages.open', mock.mock_open(read_data='[link](#hash)'))
def test_relative_html_link_hash_only(self):
self.assertEqual(
self.get_rendered_result(['index.md']),
'<p><a href="#hash">link</a></p>'
)
@mock.patch('mkdocs.structure.pages.open', mock.mock_open(read_data=''))
def test_relative_image_link_from_homepage(self):
self.assertEqual(
self.get_rendered_result(['index.md', 'image.png']),
'<p><img alt="image" src="image.png" /></p>' # no opening ./
)
@mock.patch('mkdocs.structure.pages.open', mock.mock_open(read_data=''))
def test_relative_image_link_from_subpage(self):
self.assertEqual(
self.get_rendered_result(['sub2/non-index.md', 'image.png']),
'<p><img alt="image" src="../../image.png" /></p>'
)
@mock.patch('mkdocs.structure.pages.open', mock.mock_open(read_data=''))
def test_relative_image_link_from_sibling(self):
self.assertEqual(
self.get_rendered_result(['non-index.md', 'image.png']),
'<p><img alt="image" src="../image.png" /></p>'
)
@mock.patch('mkdocs.structure.pages.open', mock.mock_open(read_data='*__not__ a link*.'))
def test_no_links(self):
self.assertEqual(
self.get_rendered_result(['index.md']),
'<p><em><strong>not</strong> a link</em>.</p>'
)
@mock.patch('mkdocs.structure.pages.open', mock.mock_open(read_data='[link](non-existant.md)'))
def test_bad_relative_html_link(self):
with self.assertLogs('mkdocs', level='WARNING') as cm:
self.assertEqual(
self.get_rendered_result(['index.md']),
'<p><a href="non-existant.md">link</a></p>'
)
self.assertEqual(
cm.output,
["WARNING:mkdocs.structure.pages:Documentation file 'index.md' contains a link "
"to 'non-existant.md' which is not found in the documentation files."]
)
@mock.patch('mkdocs.structure.pages.open', mock.mock_open(read_data='[external](http://example.com/index.md)'))
def test_external_link(self):
self.assertEqual(
self.get_rendered_result(['index.md']),
'<p><a href="http://example.com/index.md">external</a></p>'
)
@mock.patch('mkdocs.structure.pages.open', mock.mock_open(read_data='[absolute link](/path/to/file.md)'))
def test_absolute_link(self):
self.assertEqual(
self.get_rendered_result(['index.md']),
'<p><a href="/path/to/file.md">absolute link</a></p>'
)
@mock.patch('mkdocs.structure.pages.open', mock.mock_open(read_data='<mail@example.com>'))
def test_email_link(self):
self.assertEqual(
self.get_rendered_result(['index.md']),
# Markdown's default behavior is to obscure email addresses by entity-encoding them.
# The following is equivalent to: '<p><a href="mailto:mail@example.com">mail@example.com</a></p>'
'<p><a href="mailto:mail@e'
'xample.com">mail@'
'example.com</a></p>'
)
|
|
# Copyright 2015-2016 Florian Lehner. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import requests
import simplejson as json
import time
def __execute_request__(kind=None, url=None, headers=None, payload=None):
if kind == "post":
try:
response = requests.post(url, data=payload, headers=headers)
except requests.exceptions.RequestException as e:
print "RequestException"
return None
except requests.exceptions.ConnectionError as e:
print "ConnectionError"
return None
except requests.exceptions.TooManyRedirects as e:
print "TooManyRedirects"
return None
except requests.exceptions.ConnectTimeout as e:
print "ConnectTimeout"
return None
except requests.exceptions.ReadTimeout as e:
print "ReadTimeout"
return None
except requests.exceptions.Timeout as e:
print "Timeout"
return None
elif kind == "get":
try:
response = requests.get(url, data=payload, headers=headers)
except requests.exceptions.RequestException as e:
print "RequestException"
return None
except requests.exceptions.ConnectionError as e:
print "ConnectionError"
return None
except requests.exceptions.TooManyRedirects as e:
print "TooManyRedirects"
return None
except requests.exceptions.ConnectTimeout as e:
print "ConnectTimeout"
return None
except requests.exceptions.ReadTimeout as e:
print "ReadTimeout"
return None
except requests.exceptions.Timeout as e:
print "Timeout"
return None
else:
print "Unknown type of http-request"
return None
return response
# currently supported options from the OSC API
_options=( "captureMode", "captureModeSupport",
"exposureProgram", "exposureProgramSupport",
"iso", "isoSupport",
"shutterSpeed", "shutterSpeedSupport",
"aperture", "apertureSupport",
"whiteBalance", "whiteBalanceSupport",
"exposureCompensation", "exposureCompensationSupport",
"fileFormat", "fileFormatSupport",
"exposureDelay", "exposureDelay",
"sleepDelay", "sleepDelaySupport",
"offDelay", "offDelaySupport",
"totalSpace", "remainingSpace",
"gpsInfo", "dateTimeZone",
"hdr", "hdrSupport",
"exposureBracket", "exposureBracketSupport",
"gyro", "gyroSupport",
"imageStabilization", "imageStabilizationSupport",
"wifiPassword"
)
class OSCAPI:
def __init__(self, ip, port):
"""
A device supporting the OSC-API
:param ip: IP of the device you want to connect to
:param port: Port you want to connect to
"""
self.ip = ip
self.port = port
self.sid = None
self.header = { "User-Agent":"pyOSCapi",
"X-XSRF-Protected":"1"}
self.options = {}
self.cmds = []
def connect(self):
"""
Opens a connection
"""
url = "http://" + self.ip + ":" + self.port +"/osc/commands/execute"
data = json.dumps({"name":"camera.startSession"})
self.header["Content-Type"] = "application/json; charset=utf-8"
req = __execute_request__("post", url, self.header, data)
if req == None:
return None
rep = req.json()
if rep["state"]:
if rep["state"] == "done":
self.sid = (rep["results"]["sessionId"])
return rep
def update(self):
"""
Updates the session
"""
url = "http://" + self.ip + ":" + self.port +"/osc/commands/execute"
data = json.dumps({"name":"camera.updateSession", "parameters":{"sessionId":self.sid}})
self.header["Content-Type"] = "application/json; charset=utf-8"
req = __execute_request__("post", url, self.header, data)
if req == None:
return None
rep = req.json()
return rep
def disconnect(self):
"""
Close the connection
"""
url = "http://" + self.ip + ":" + self.port +"/osc/commands/execute"
data = json.dumps({"name":"camera.closeSession", "parameters":{"sessionId":self.sid}})
self.header["Content-Type"] = "application/json; charset=utf-8"
self.header["Connection"] = "close"
req = __execute_request__("post", url, self.header, data)
if req == None:
return None
rep = req.json()
return rep
def _checkProgress(self):
time.sleep(1)
rep = self.state()
# this is a workaround which works only for bublcams
#
# the osc api does not define how to integrate the
# inProgress status in the /osc/state
if "_bublCommands" in rep["state"]:
cmdStatus = rep["state"]["_bublCommands"]
for tmp in cmdStatus:
if tmp["state"]:
if tmp["state"] == "inProgress":
rep = self._checkProgress()
return rep
def takePicture(self, wait=True):
"""
Take a picture via the API
:param wait: If True, method will return after taking the picture is done.
Else you will have to wait by youself.
"""
url = "http://" + self.ip + ":" + self.port +"/osc/commands/execute"
data = json.dumps({"name":"camera.takePicture", "parameters":{"sessionId":self.sid}})
self.header["Content-Type"] = "application/json; charset=utf-8"
req = __execute_request__("post", url, self.header, data)
if req == None:
return None
rep = req.json()
if wait == True:
if rep["state"]:
if rep["state"] == "inProgress":
rep = self._checkProgress()
return rep
def listImages(self, count, size, thumbs):
"""
List the content which is stored on the device
:param count: Desired number of entries
:param size: maximum size of the returned thumbnail
:param thumbs: If True, you will get thumbnails in return.
"""
url = "http://" + self.ip + ":" + self.port +"/osc/commands/execute"
data = json.dumps({"name":"camera.listImages", "parameters":{"entryCount":count, "maxSize":size, "includeThumb":bool(thumbs)}})
self.header["Content-Type"] = "application/json; charset=utf-8"
req = __execute_request__("post", url, self.header, data)
if req == None:
return None
rep = req.json()
return rep
def deleteImage(self, fileUri=None):
"""
Delete image on the device
:param fileUri: URI of the image
"""
if fileUri == None:
return
url = "http://" + self.ip + ":" + self.port +"/osc/commands/execute"
data = json.dumps({"name":"camera.delete", "parameters":{"fileUri":fileUri}})
self.header["Content-Type"] = "application/json; charset=utf-8"
req = __execute_request__("post", url, self.header, data)
if req == None:
return None
rep = req.json()
return rep
def getImage(self, fileUri=None):
"""
Get image from the device
:param fileUri: URI of the image
"""
if fileUri == None:
return
url = "http://" + self.ip + ":" + self.port +"/osc/commands/execute"
data = json.dumps({"name":"camera.getImage", "parameters":{"fileUri":fileUri}})
self.header["Content-Type"] = "application/json; charset=utf-8"
req = __execute_request__("post", url, self.header, data)
if req == None:
return None
return req
def getImageMetadata(self, fileUri=None):
"""
Get the metadata to a image from the device
:param fileUri: URI of the image
"""
if fileUri == None:
return
url = "http://" + self.ip + ":" + self.port +"/osc/commands/execute"
data = json.dumps({"name":"camera.getMetadata", "parameters":{"fileUri":fileUri}})
self.header["Content-Type"] = "application/json; charset=utf-8"
req = __execute_request__("post", url, self.header, data)
if req == None:
return None
rep = req.json()
return rep
def getOptions(self, optionlist=_options):
"""
Check which options the device supports
:param optionlist: specified option you want to check
"""
url = "http://" + self.ip + ":" + self.port +"/osc/commands/execute"
data = json.dumps({"name":"camera.getOptions", "parameters":{"sessionId":self.sid, "optionNames":optionlist}})
self.header["Content-Type"] = "application/json; charset=utf-8"
req = __execute_request__("post", url, self.header, data)
if req == None:
return None
rep = req.json()
for key in rep:
if key == "results":
for option in rep[key]["options"]:
if option in _options:
self.options[option] = rep[key]["options"][option]
return rep
def setOption(self, settings=None):
"""
Change settings of the device
:param settings: Option and Parameter you want to set
"""
if settings == None:
return
if not self.options:
self.getOptions()
for opt in settings:
if not self.options.has_key(opt):
return
url = "http://" + self.ip + ":" + self.port +"/osc/commands/execute"
data = json.dumps({"name":"camera.setOptions", "parameters":{"sessionId":self.sid, "options":settings}})
self.header["Content-Type"] = "application/json; charset=utf-8"
req = __execute_request__("post", url, self.header, data)
if req == None:
return None
rep = req.json()
return rep
def info(self):
"""
Returns basic information about the device and functionality it supports
"""
url = "http://" + self.ip + ":" + self.port +"/osc/info"
req = __execute_request__("get", url, self.header)
if req == None:
return None
rep = req.json()
for key in rep:
if key == "api":
self.cmds += (rep[key])
return rep
def state(self):
"""
Returns the state attribute of the device
"""
url = "http://" + self.ip + ":" + self.port +"/osc/state"
req = __execute_request__("post", url, self.header)
if req == None:
return None
rep = req.json()
return rep
def getCmds(self):
"""
Returns the list of commands the device supports
"""
if not self.cmds:
self.info()
return self.cmds
def getSid(self):
"""
Returns the current session id
"""
return self.sid
def execCustomCmd(self, cmd=None, payload=None, contentType=None):
"""
Execute your own command
:param cmd: Command for the request
:param payload: Additional data for the command
:param contentType: Additional headeri information
"""
if cmd == None:
return
url = "http://" + self.ip + ":" + self.port + cmd
req = __execute_request__("post", url, contentType, payload)
if req == None:
return None
rep = req.json()
return rep
|
|
# Default Django settings. Override these with settings in the module
# pointed-to by the DJANGO_SETTINGS_MODULE environment variable.
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
####################
# CORE #
####################
DEBUG = False
TEMPLATE_DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# Whether to use the "Etag" header. This saves bandwidth but slows down performance.
USE_ETAGS = False
# People who get code error notifications.
# In the format (('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com'))
ADMINS = ()
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ()
# Hosts/domain names that are valid for this site.
# "*" matches anything, ".example.com" matches example.com and all subdomains
ALLOWED_HOSTS = []
# Local time zone for this installation. All choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box.
LANGUAGES = (
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('es-ve', gettext_noop('Venezuelan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy-nl', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmal')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
)
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ("he", "ar", "fa", "ur")
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = ()
LANGUAGE_COOKIE_NAME = 'django_language'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# Email address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Whether to send broken-link emails. Deprecated, must be removed in 1.8.
SEND_BROKEN_LINK_EMAILS = False
# Database connection info. If left empty, will default to the dummy backend.
DATABASES = {}
# Classes used to implement DB routing behavior.
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
# List of strings representing installed apps.
INSTALLED_APPS = ()
# List of locations of the template source files, in search order.
TEMPLATE_DIRS = ()
# List of callables that know how to import templates from various sources.
# See the comments in django/core/template/loader.py for interface
# documentation.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
# 'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
)
# Output to use in template system for invalid (e.g. misspelled) variables.
TEMPLATE_STRING_IF_INVALID = ''
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = (
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# )
DISALLOWED_USER_AGENTS = ()
ABSOLUTE_URL_OVERRIDES = {}
# Tuple of strings representing allowed prefixes for the {% ssi %} tag.
# Example: ('/home/html', '/var/www')
ALLOWED_INCLUDE_ROOTS = ()
# If this is a admin settings module, this should be a list of
# settings modules (in the format 'foo.bar.baz') for which this admin
# is an admin.
ADMIN_FOR = ()
# List of compiled regular expression objects representing URLs that need not
# be reported by BrokenLinkEmailsMiddleware. Here are a few examples:
# import re
# IGNORABLE_404_URLS = (
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$),
# re.compile(r'^/robots.txt$),
# re.compile(r'^/phpmyadmin/),
# re.compile(r'\.(cgi|php|pl)$'),
# )
IGNORABLE_404_URLS = ()
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
)
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see http://docs.python.org/lib/os-file-dir.html.
FILE_UPLOAD_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
)
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# Do you want to manage transactions manually?
# Hint: you really don't!
TRANSACTIONS_MANAGED = False
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'SAMEORIGIN'
USE_X_FORWARDED_HOST = False
# The Python dotted path to the WSGI application that Django's internal servers
# (runserver, runfcgi) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
# If your Django app is behind a proxy that sets a header to specify secure
# connections, AND that proxy ensures that user-submitted headers with the
# same name are ignored (so that people can't spoof it), set this value to
# a tuple of (header_name, header_value). For any requests that come in with
# that header/value, request.is_secure() will return True.
# WARNING! Only set this if you fully understand what you're doing. Otherwise,
# you may be opening yourself up to a security risk.
SECURE_PROXY_SSL_HEADER = None
##############
# MIDDLEWARE #
##############
# List of middleware classes to use. Order is important; in the request phase,
# this middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# 'django.middleware.http.ConditionalGetMiddleware',
# 'django.middleware.gzip.GZipMiddleware',
)
############
# SESSIONS #
############
SESSION_CACHE_ALIAS = 'default' # Cache to store session data if using the cache session backend.
SESSION_COOKIE_NAME = 'sessionid' # Cookie name. This can be whatever you want.
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_DOMAIN = None # A string like ".example.com", or None for standard domain cookie.
SESSION_COOKIE_SECURE = False # Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_PATH = '/' # The path of the session cookie.
SESSION_COOKIE_HTTPONLY = True # Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others)
SESSION_SAVE_EVERY_REQUEST = False # Whether to save the session data on every request.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False # Whether a user's session cookie expires when the Web browser is closed.
SESSION_ENGINE = 'django.contrib.sessions.backends.db' # The module to store session data
SESSION_FILE_PATH = None # Directory to store session files if using the file session module. If None, the backend will use a sensible default.
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer' # class to serialize session data
#########
# CACHE #
#########
# The cache backends to use.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
####################
# COMMENTS #
####################
COMMENTS_ALLOW_PROFANITIES = False
# The profanities that will trigger a validation error in
# CommentDetailsForm.clean_comment. All of these should be in lowercase.
PROFANITIES_LIST = ()
##################
# AUTHENTICATION #
##################
AUTH_USER_MODEL = 'auth.User'
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher',
'django.contrib.auth.hashers.UnsaltedMD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
)
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = False
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'django.utils.log.dictConfig'
# Custom logging configuration.
LOGGING = {}
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = ()
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = ()
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
|
|
#!/usr/bin/python
# Time-stamp: <2015-12-13 20:06:19 marine>
# Project : Snow in the F - layer
# Subproject : seismic observations
# Author : Marine Lasbleis
import math
import scipy.io as io
import numpy as np
import json # for writing in files
import os.path
import matplotlib.pyplot as plt
import param
# Compute the bulk modulus K for PREM + load the PREM and AK135 profiles
def Load_PREM_AK135_PREM2():
# Load the profiles PREM and AK135. radius in m, alpha in m/s, Ks in Pa.
Observations = io.loadmat(
'/Users/marine/ownCloud/Research/PREM/AK_PREM.mat')
Ks_PREM, alpha_AK, alpha_PREM, r_AK, r_PREM, rho_AK, rho_PREM = \
Observations['Ks_PREM'] * 1e9, Observations['alpha_AK'] * 1000., Observations['alpha_PREM'], Observations['r_AK'] * 1000., \
Observations['r_PREM'] * 1000., Observations['rho_AK'] * \
1000., Observations['rho_PREM']
# Load the F-layer only (thickness = d, define in the param.py)
# for PREM
hminP = (r_PREM >= 1221e3).argmax()
hmaxP = (r_PREM >= 1221e3 + param.d).argmax()
Vp_PREM = alpha_PREM[hminP + 1:hmaxP + 1]
radius_PREM = r_PREM[hminP + 1:hmaxP + 1]
# for AK135
hmaxA = (r_AK <= 1200.5e3).argmax()
hminA = (r_AK <= 1220e3 + param.d).argmax()
# Vp_AK=alpha_AK[hminA-1:hmaxA-1]
# radius_AK=r_AK[hminA-1:hmaxA-1]
K_AK = np.zeros(r_AK.size)
# define the profile for PREM2 (as given in the paper)
r_PREM2_1 = np.linspace(0., 1010.0e3, 10)
alpha_PREM2_1 = 11.2622 - 6.364 * (r_PREM2_1 / 6371.e3)**2
r_PREM2_2 = np.linspace(1010.0e3, 1221.5e3, 10)
alpha_PREM2_2 = 11.3041 - 1.2730 * (r_PREM2_2 / 6371.e3)
r_PREM2_3 = np.linspace(1221.5e3, 1621.5e3, 10)
alpha_PREM2_3 = 4.0354 + 82.008 * (r_PREM2_3 / 6371.e3) - 347.769 * (
r_PREM2_3 / 6371.e3)**2 + 468.786 * (r_PREM2_3 / 6371.e3)**3.
r_PREM2 = np.concatenate((r_PREM2_1, r_PREM2_2, r_PREM2_3))
alpha_PREM2 = np.concatenate(
(alpha_PREM2_1, alpha_PREM2_2, alpha_PREM2_3)) * 1000.
# radius_PREM2=r_PREM2[20:30]
# Vp_PREM2=alpha_PREM2[20:30]
K_PREM2 = np.zeros(30)
rho_PREM2 = np.zeros(30)
# Bulk modulus
# Bulk modulus K: from PREM
KPREM = Ks_PREM
# from Labrosse 2003/2015
ric = np.linspace(0, 3500e3, 30)
Kprem_labrosse2015 = (param.K0 - param.K0 * param.Kprim0 *
(ric**2. / param.Lrho**2 + 4. / 5. * ric**4. / param.Lrho**4))
rho_Labrosse2015 = param.rho0 * \
(1 - ric**2 / param.Lrho**2 - param.Arho * ric**4 / param.Lrho**4)
K_labrosse2003 = 1777e9 * rho_Labrosse2015 / \
7.5e3 * (np.log(rho_Labrosse2015 / 7.5e3) + 1)
K_approx = KPREM[0] - (KPREM[1] - KPREM[0]) / (radius_PREM[1] - radius_PREM[0]) * \
radius_PREM[0] + (KPREM[1] - KPREM[0]) / \
(radius_PREM[1] - radius_PREM[0]) * ric
K_approx = K_approx * 1.e8
DATA_PREM = {'r': r_PREM.tolist(), 'Vp': alpha_PREM.tolist(), 'K': KPREM.tolist(
), 'hmin': hminP + 1, 'hmax': hmaxP + 1, 'rho': rho_PREM.tolist()}
f = open("F_layer_PREM.dat", 'w')
json.dump(DATA_PREM, f)
f.close()
DATA_PREM2 = {'r': r_PREM2.tolist(), 'Vp': alpha_PREM2.tolist(
), 'K': K_PREM2.tolist(), 'hmin': 20, 'hmax': 30, 'rho': rho_PREM2.tolist()}
f = open("F_layer_PREM2.dat", 'w')
json.dump(DATA_PREM2, f)
f.close()
DATA_AK135 = {'r': r_AK.tolist(), 'Vp': alpha_AK.tolist(
), 'hmin': hminA - 1, 'K': K_AK.tolist(), 'hmax': hmaxA - 1, 'rho': rho_AK.tolist()}
f = open("F_layer_AK135.dat", 'w')
json.dump(DATA_AK135, f)
f.close()
Vp_Labrosse = np.sqrt(Kprem_labrosse2015 / rho_Labrosse2015)
DATA_Labrosse = {'r': ric.tolist(), 'Vp': Vp_Labrosse.tolist(
), 'hmin': 1, 'K': Kprem_labrosse2015.tolist(), 'hmax': 30, 'rho': rho_Labrosse2015.tolist()}
f = open("F_layer_Labrosse.dat", 'w')
json.dump(DATA_Labrosse, f)
f.close()
def calcK(hicb, rICB=param.rICp):
"""Return the value of the bulk modulus with a linear approximation of PREM at ICB
hicb in m, K in Pa.
"""
if os.path.isfile('F_layer_PREM.dat') == 0:
print "Load seismic informations."
print ".dat files containing seismic informations are created."
Load_PREM_AK135_PREM2()
try:
with open('F_layer_PREM.dat', 'r') as file:
data = json.load(file)
hmin = data['hmin']
hmax = data['hmax']
radius = np.array(data['r'])
radius = radius[hmin:hmax]
K = np.array(data['K'])
K = K[hmin:hmax]
except IOError as e:
print "Unable to open file DATA_PREM.dat. Please check the function Load_PREM_AK1135_PREM2"
ric = rICB + hicb
return (K[0] - (K[1] - K[0]) / (radius[1] - radius[0]) * radius[0] + (K[1] - K[0]) / (radius[1] - radius[0]) * ric)
def Figures_seism():
""" plot seismic observations for the F-layer: PREM, PREM2, AK135 and fit by Labrosse 2015.
Use files : 'F_layer_PREM.dat', 'F_layer_PREM2.dat', 'F_layer_AK135.dat', 'F_layer_Labrosse.dat'
Data are json files with: r, Vp, K, rho, hmax, hmin
If a file does not exist, please run Load_PREM_AK135_PREM2() to obtain data from the Matlab file.
6 figures :
0 1 2
3 4 5
0: Vp in the whole Earth
1: Vp zoom in the F-layer
2: K in the F-layer
3: density profile
"""
f, axa = plt.subplots(2, 3)
# 0 1 2
# 3 4 5
# 0: Vp in the whole Earth
# 1: Vp zoom in the F-layer
# 2: K in the F-layer
# 3: density profile
for fichier in ['F_layer_PREM.dat', 'F_layer_PREM2.dat', 'F_layer_AK135.dat', 'F_layer_Labrosse.dat']:
try:
with open(fichier, 'r') as file:
print "=========", fichier, "========="
data = json.load(file)
r = np.array(data['r']) / param.rICp
Vp = np.array(data['Vp'])
K = np.array(data['K'])
rho = np.array(data['rho'])
hmax = data['hmax']
hmin = data['hmin']
axa[0, 0].plot(r, Vp, label=fichier)
axa[0, 1].plot(r[hmin:hmax], Vp[hmin:hmax], label=fichier)
axa[0, 2].plot(r, K, label=fichier)
axa[1, 0].plot(r, rho, label=fichier)
axa[1, 1].plot(r, rho, label=fichier)
# axa[1,0].plot(r,rho)
except IOError as e:
print "The data file does not exist. Please check the function Load_PREM_AK1135_PREM2 and particularly for file ", files
axa[0, 0].set_title('Vp (m/s)')
axa[0, 0].axis([0, 2, 9000., 12000.])
axa[0, 0].set_xlabel('r/r_icb')
axa[0, 1].set_title('ZOOM - Vp (m/s)')
axa[0, 1].set_xlabel('r/r_icb')
axa[0, 1].axis([0.95, 1.35, 10200, 10400])
axa[0, 2].set_title('Ks (Pa)')
axa[0, 2].scatter(1., 1.3047e12)
axa[0, 2].axis([0., 2., 0.9e12, 1.8e12])
axa[1, 0].set_title('Rho (kg/m^3)')
axa[1, 0].axis([0, 2, 11000., 13500.])
axa[1, 0].set_xlabel('r/r_icb')
axa[1, 1].set_title('ZOOM - Rho (kg/m^3)')
axa[1, 1].axis([0.95, 1.35, 11800., 12200.])
axa[1, 1].set_xlabel('r/r_icb')
axa[0, 0].legend(prop={'size': 10})
axa[0, 1].legend(prop={'size': 10})
axa[0, 2].legend(prop={'size': 8})
axa[1, 0].legend(prop={'size': 8})
axa[1, 1].legend(prop={'size': 8})
# plt.show()
def plotVp(z_, Vp_, save=0, name="Vp.pdf"):
fig, ax = plt.subplots()
for fichier in ['F_layer_PREM.dat', 'F_layer_PREM2.dat', 'F_layer_AK135.dat']:
try:
with open(fichier, 'r') as file:
data = json.load(file)
hmin = data['hmin']
hmax = data['hmax']
radius = np.array(data['r'])[hmin:hmax]
Vp = np.array(data['Vp'])[hmin:hmax]
print file, radius[1], radius[-1]
except IOError as e:
print "Unable to open file", file, ". Please check the function Load_PREM_AK1135_PREM2"
ax.plot(Vp / 1e3, radius / param.rICp)
ax.plot(Vp_ / 1e3, z_ / param.rICp)
if save == 1:
plt.savefig(name)
if __name__ == '__main__':
Load_PREM_AK135_PREM2()
Figures_seism()
plt.show()
|
|
from __future__ import absolute_import
from unet_collection.layer_utils import *
from tensorflow.keras.layers import Input, Conv3D
from tensorflow.keras.layers import BatchNormalization, Activation, concatenate, multiply
from tensorflow.keras.layers import ReLU, LeakyReLU, PReLU, ELU
from tensorflow.keras.models import Model
def UNET_left3D(X, channel, kernel_size=3, stack_num=2, activation='ReLU',
pool=True, batch_norm=False, name='left0'):
'''
The encoder block of U-net.
UNET_left(X, channel, kernel_size=3, stack_num=2, activation='ReLU',
pool=True, batch_norm=False, name='left0')
Input
----------
X: input tensor.
channel: number of convolution filters.
kernel_size: size of 2-d convolution kernels.
stack_num: number of convolutional layers.
activation: one of the `tensorflow.keras.layers` interface, e.g., 'ReLU'.
pool: True or 'max' for MaxPooling2D.
'ave' for AveragePooling2D.
False for strided conv + batch norm + activation.
batch_norm: True for batch normalization, False otherwise.
name: prefix of the created keras layers.
Output
----------
X: output tensor.
'''
pool_size = 2
X = encode_layer3D(X, channel, pool_size, pool, activation=activation,
batch_norm=batch_norm, name='{}_encode'.format(name))
X = CONV_stack3D(X, channel, kernel_size, stack_num=stack_num, activation=activation,
batch_norm=batch_norm, name='{}_conv'.format(name))
return X
def UNET_right3D(X, X_list, channel, kernel_size=3,
stack_num=2, activation='ReLU',
unpool=True, batch_norm=False, concat=True, name='right0'):
'''
The decoder block of U-net.
Input
----------
X: input tensor.
X_list: a list of other tensors that connected to the input tensor.
channel: number of convolution filters.
kernel_size: size of 2-d convolution kernels.
stack_num: number of convolutional layers.
activation: one of the `tensorflow.keras.layers` interface, e.g., 'ReLU'.
unpool: True or 'bilinear' for Upsampling2D with bilinear interpolation.
'nearest' for Upsampling2D with nearest interpolation.
False for Conv2DTranspose + batch norm + activation.
batch_norm: True for batch normalization, False otherwise.
concat: True for concatenating the corresponded X_list elements.
name: prefix of the created keras layers.
Output
----------
X: output tensor.
'''
pool_size = 2
X = decode_layer3D(X, channel, pool_size, unpool,
activation=activation, batch_norm=batch_norm, name='{}_decode'.format(name))
# linear convolutional layers before concatenation
X = CONV_stack3D(X, channel, kernel_size, stack_num=1, activation=activation,
batch_norm=batch_norm, name='{}_conv_before_concat'.format(name))
if concat:
# <--- *stacked convolutional can be applied here
X = concatenate([X,]+X_list, axis=-1, name=name+'_concat')
# Stacked convolutions after concatenation
X = CONV_stack3D(X, channel, kernel_size, stack_num=stack_num, activation=activation,
batch_norm=batch_norm, name=name+'_conv_after_concat')
return X
def unet_plus_3d(input_size, filter_num, n_labels,
stack_num_down=2, stack_num_up=2,
activation='ReLU', output_activation='Softmax',
batch_norm=False, pool=True, unpool=True, deep_supervision=False, name='xnet'):
'''
U-net++ or nested U-net
unet_plus_2d(input_size, filter_num, n_labels,
stack_num_down=2, stack_num_up=2,
activation='ReLU', output_activation='Softmax',
batch_norm=False, pool=True, unpool=True, deep_supervision=False, name='xnet')
----------
Zhou, Z., Siddiquee, M.M.R., Tajbakhsh, N. and Liang, J., 2018. Unet++: A nested u-net architecture
for medical image segmentation. In Deep Learning in Medical Image Analysis and Multimodal Learning
for Clinical Decision Support (pp. 3-11). Springer, Cham.
Input
----------
input_size: a tuple that defines the shape of input, e.g., (None, None, 3)
filter_num: an iterable that defines number of filters for each \
down- and upsampling level. E.g., [64, 128, 256, 512]
the depth is expected as `len(filter_num)`
n_labels: number of output labels.
stack_num_down: number of convolutional layers per downsampling level/block.
stack_num_up: number of convolutional layers (after concatenation) per upsampling level/block.
activation: one of the `tensorflow.keras.layers` or `keras_unet_collection.activations` interfaces, e.g., ReLU
output_activation: one of the `tensorflow.keras.layers` or `keras_unet_collection.activations` interfaces or 'Sigmoid'.
Default option is Softmax
if None is received, then linear activation is applied.
batch_norm: True for batch normalization.
pool: True for maxpooling, False for strided convolutional layers.
unpool: True for unpooling (i.e., reflective padding), False for transpose convolutional layers.
deep_supervision: True for a model that supports deep supervision. Details see Zhou et al. (2018).
name: prefix of the created keras layers.
Output
----------
X: a keras model
'''
depth_ = len(filter_num)
IN = Input(input_size)
X = IN
# allocate nested lists for collecting output tensors
X_nest_skip = [[] for _ in range(depth_)]
# downsampling blocks (same as in 'unet_2d')
X = CONV_stack3D(X, filter_num[0],
stack_num=stack_num_down,
activation=activation,
batch_norm=batch_norm, name='{}_down0'.format(name))
X_nest_skip[0].append(X)
for i, f in enumerate(filter_num[1:]):
X = UNET_left3D(X, f, stack_num=stack_num_down,
activation=activation,
pool=pool, batch_norm=batch_norm,
name='{}_down{}'.format(name, i+1))
X_nest_skip[0].append(X)
for nest_lev in range(1, depth_):
# subset filter numbers to the current upsampling level
filter_num_sub = filter_num[:(depth_-nest_lev)]
# loop over individual upsamling levels
for i, f in enumerate(filter_num_sub[::-1]):
# collecting previous downsampling outputs
previous_skip = []
for previous_lev in range(nest_lev):
previous_skip.append(X_nest_skip[previous_lev][i])
# upsamping block that concatenates all available (same feature map size) down-/upsampling outputs
X_nest_skip[nest_lev].append(
UNET_right3D(X_nest_skip[nest_lev-1][i+1],
previous_skip, filter_num[i],
stack_num=stack_num_up,
activation=activation, name='xnet_{}{}'.format(nest_lev, i)))
# output
if deep_supervision:
OUT_list = []
print('----------\ndeep_supervision = True\nnames of output tensors are listed as follows (the last one is the final output):')
for i in range(1, depth_):
print('\t{}_output_sup{}'.format(name, i))
OUT_list.append(CONV_output3D(X_nest_skip[i][0], n_labels, kernel_size=1,
activation=output_activation,
name='{}_output_sup{}'.format(name, i)))
print('\t{}_output_final'.format(name))
OUT_list.append(CONV_output3D(X_nest_skip[-1][0],
n_labels, kernel_size=1,
activation=output_activation,
name='{}_output'.format(name)))
else:
OUT = CONV_output3D(X_nest_skip[-1][0],
n_labels, kernel_size=1,
activation=output_activation,
name='{}_output'.format(name))
OUT_list = [OUT,]
# model
model = Model(inputs=[IN], outputs=OUT_list, name='{}_model'.format(name))
return model
|
|
""" Tests internal math routines
"""
import random
import itertools
import numpy as np
import pytest
import moldesign
from moldesign import units as u
from moldesign.mathutils import spherical_harmonics as harmonics
from . import helpers
from .molecule_fixtures import *
registered_types = {}
__PYTEST_MARK__ = ['math', 'gaussians']
def typedfixture(*types, **kwargs):
"""This is a decorator that lets us associate fixtures with one or more arbitrary types.
We'll later use this type to determine what tests to run on the result"""
def fixture_wrapper(func):
for t in types:
registered_types.setdefault(t, []).append(func.__name__)
return pytest.fixture(**kwargs)(func)
return fixture_wrapper
@pytest.fixture
def std_1d_gaussian():
g = moldesign.orbitals.gaussians.Gaussian([0.0]*u.angstrom,
1.0/u.angstrom ** 2)
return g
@typedfixture('basis_fn')
def std_3d_gaussian():
g = moldesign.orbitals.gaussians.Gaussian([0.0, 0.0, 0.0]*u.angstrom,
1.0/u.angstrom ** 2)
return g
@typedfixture('basis_fn')
def cartesian_3d_gaussian():
g = moldesign.orbitals.CartesianGaussian(
center=[random.random() for i in range(3)]*u.angstrom,
powers=[1, 3, 0],
alpha=1.1/u.angstrom ** 2,
coeff=1.0)
return g
@typedfixture('basis_fn')
def spherical_3d_gaussian():
g = moldesign.orbitals.SphericalGaussian(
center=[random.random() for i in range(3)]*u.angstrom,
l=3, m=-2,
alpha=1.1/u.angstrom ** 2,
coeff=1.0)
return g
@pytest.mark.parametrize('objkey', ['std_1d_gaussian','std_3d_gaussian'])
@pytest.mark.screening
def test_gaussian_integral_and_dimensionality(objkey, request):
g = request.getfixturevalue(objkey)
assert g.ndim == len(g.center)
intval = g.integral
expectval = g.coeff*(np.pi/g.alpha) ** (g.ndim/2.0)
_assert_almost_equal(intval,
expectval,
decimal=10)
@pytest.fixture
def linear_combination():
return _make_rando_linear_combination(True)
def _make_rando_gaussian(withunits=True):
if withunits:
length = u.angstrom
else:
length = 1.0
return moldesign.orbitals.Gaussian((np.random.rand(3)-0.5)*1.0 * length,
(random.random()*5)/(length ** 2),
coeff=random.random())
def _make_rando_cartesian_gaussian(powers, withunits=True):
if withunits:
length = u.angstrom
else:
length = 1.0
return moldesign.orbitals.CartesianGaussian((np.random.rand(3)-0.5)*1.0 * length,
(random.random()*5)/(length ** 2),
powers=powers,
coeff=random.random())
def _make_rando_spherical_gaussian(l,m, withunits=True):
if withunits:
length = u.angstrom
else:
length = 1.0
return moldesign.orbitals.SphericalGaussian((np.random.rand(3)-0.5)*1.0 * length,
(random.random()*5)/(length ** 2),
l,m,
coeff=random.random())
def _make_rando_linear_combination(withunits=True):
gaussians = []
if withunits:
length = u.angstrom
else:
length = 1.0
center = (np.random.rand(3)-0.5)*1.0 * length
for pwr in [(0,0,0), (1,1,1), (3,2,1)]:
gaussians.append(
moldesign.orbitals.CartesianGaussian(
center=center,
powers=pwr,
alpha=(10.0 * (random.random()+3))/(length**2),
coeff=1/(np.sqrt(3.0))))
lc = moldesign.orbitals.PrimitiveSum(gaussians)
lc.ndims = 3 # so it works with the test suite
return lc
@pytest.mark.parametrize('withunits', [True, False], ids=['quantity','number'])
def test_numerical_vs_analytical_overlap_gauss(withunits):
p1 = _make_rando_gaussian(withunits)
p2 = _make_rando_gaussian(withunits)
_assert_numerical_analytical_overlaps_match(p1, p2)
@pytest.mark.parametrize('withunits', [True, False], ids=['quantity','number'])
def test_numerical_vs_analytical_overlap_cartesian(withunits):
p1 = _make_rando_cartesian_gaussian((1,2,3), withunits)
p2 = _make_rando_cartesian_gaussian((1,0,1), withunits)
_assert_numerical_analytical_overlaps_match(p1, p2)
@pytest.mark.parametrize('withunits', [True, False], ids=['quantity','number'])
def test_numerical_vs_analytical_overlap_spherical(withunits):
p1 = _make_rando_spherical_gaussian(1,-1, withunits)
p2 = _make_rando_spherical_gaussian(2,0, withunits)
_assert_numerical_analytical_overlaps_match(p1, p2)
@pytest.mark.parametrize('withunits', [True, False], ids=['quantity','number'])
def test_numerical_vs_analytical_overlap_linear_combination(withunits):
p1 = _make_rando_linear_combination(withunits)
p2 = _make_rando_linear_combination(withunits)
_assert_numerical_analytical_overlaps_match(p1, p2)
def _assert_numerical_analytical_overlaps_match(g1, g2):
olap = g1.overlap(g2)
try:
prod = g1*g2
except NotImplementedError:
assert isinstance(g1, moldesign.orbitals.SphericalGaussian)
assert isinstance(g2, moldesign.orbitals.SphericalGaussian)
else:
helpers.assert_almost_equal(prod.integral, olap)
def assert_with_resolution(npoints):
allpoints, grid = helpers.generate_grid(g1, g2, npoints)
with np.errstate(under='ignore'):
prodvals = g1(allpoints) * g2(allpoints)
numsum = prodvals.sum() * grid.dx * grid.dy * grid.dz
helpers.assert_almost_equal(numsum, olap, decimal=4)
# If numerical isn't equal to analytical, try again with higher resolution
# to make sure the failure isn't due to a sparse grid:
try:
assert_with_resolution(64)
except AssertionError:
pass
else:
return
try:
assert_with_resolution(128)
except AssertionError:
pass
else:
return
assert_with_resolution(256)
@pytest.mark.parametrize('withunits', [False, True])
def test_gaussian_multiplication_amplitudes(withunits):
g1 = _make_rando_gaussian(withunits)
g2 = _make_rando_gaussian(withunits)
_assert_same_function_values(g1, g2, withunits)
# parameterizations across a sample of cartesian gaussians
test_powers = ((0,0,0), (1,0,0), (0,1,0), (0,0,1), (2,0,0), (1,1,1), (2,0,2), (4,1,1))
cartesian_test_suite = list(itertools.product(test_powers, test_powers, [True, False]))
cartesian_test_ids = ['[%d%d%d]*[%d%d%d]/%s' % (p[0] + p[1] + ('units' if p[2] else 'c-num',))
for p in cartesian_test_suite]
@pytest.mark.parametrize('p1,p2,withunits',
cartesian_test_suite,
ids=cartesian_test_ids)
def test_cartesian_gaussian_multiplication_amplitudes(p1, p2, withunits):
""" Tests that ``g1(x) * g2(x) == (g1 * g2)(x)``
"""
g1 = _make_rando_cartesian_gaussian(p1, withunits)
g2 = _make_rando_cartesian_gaussian(p2, withunits)
_assert_same_function_values(g1, g2, withunits)
def _assert_same_function_values(g1, g2, withunits):
testcoords = 6.0*(np.random.rand(50, 3)-0.5)
if withunits:
testcoords = testcoords*u.angstrom
g1g2 = g1*g2
gvals = g1g2(testcoords)
g1vals = g1(testcoords)
g2vals = g2(testcoords)
prodvals = g1vals*g2vals
helpers.assert_almost_equal(prodvals, gvals)
def test_initial_gaussian_normalization_gaussian():
center = np.random.rand(3) * u.angstrom
exp = 5.12 / u.angstrom**2
g2 = moldesign.orbitals.Gaussian(center, exp, normalized=True)
helpers.assert_almost_equal(1.0, _numerical_norm(g2), decimal=3)
helpers.assert_almost_equal(1.0, g2.norm)
def test_initial_gaussian_normalization_with_prefactor():
center = np.random.rand(3) * u.angstrom
exp = 5.12 / u.angstrom**2
g1 = moldesign.orbitals.Gaussian(center, exp, coeff=3.0*u.angstrom, normalized=True)
helpers.assert_almost_equal(3.0*u.angstrom, _numerical_norm(g1), decimal=3)
helpers.assert_almost_equal(3.0*u.angstrom, g1.norm)
def test_initial_normalization_cartesian():
center = np.random.rand(3) * u.angstrom
exp = 5.12 / u.angstrom**2
for powers in itertools.product(range(4), range(4), range(4)):
g2 = moldesign.orbitals.CartesianGaussian(center, exp, powers, normalized=True)
helpers.assert_almost_equal(1.0, _numerical_norm(g2), decimal=3)
helpers.assert_almost_equal(1.0, g2.norm)
def test_initial_normalization_cartesian_with_prefactor():
center = np.random.rand(3) * u.angstrom
exp = 5.12 / u.angstrom**2
for powers in itertools.product(range(4), range(4), range(4)):
g1 = moldesign.orbitals.CartesianGaussian(center, exp, powers, coeff=3.0, normalized=True)
helpers.assert_almost_equal(3.0, _numerical_norm(g1), decimal=3)
helpers.assert_almost_equal(3.0, g1.norm)
def test_initial_normalization_spherical():
center = np.random.rand(3) * u.angstrom
exp = 5.12 / u.angstrom**2
for l in range(5):
for m in range(-l, l+1):
g2 = moldesign.orbitals.SphericalGaussian(center, exp, l, m, normalized=True)
helpers.assert_almost_equal(1.0, _numerical_norm(g2), decimal=3)
helpers.assert_almost_equal(1.0, g2.norm)
def test_initial_normalization_spherical_with_prefactor():
center = np.random.rand(3) * u.angstrom
exp = 5.12 / u.angstrom**2
for l in range(5):
for m in range(-l, l+1):
g1 = moldesign.orbitals.SphericalGaussian(center, exp, l, m,
coeff=3.0 * u.angstrom, normalized=True)
helpers.assert_almost_equal(3.0 * u.angstrom, _numerical_norm(g1), decimal=3)
helpers.assert_almost_equal(3.0 * u.angstrom, g1.norm)
def _numerical_norm(g):
allpoints, grid = helpers.generate_grid(g)
with np.errstate(under='ignore'):
vals = g(allpoints)
numnorm = np.sqrt(grid.dx * grid.dy * grid.dz * (vals**2).sum())
return numnorm
@pytest.mark.parametrize('objkey', registered_types['basis_fn'])
def test_gaussian_function_values(objkey, request):
g = request.getfixturevalue(objkey)
for idim in range(g.ndims):
coord = g.center.copy()
randoffset = 4.0 * (random.random() - 0.5) * g.alpha**-0.5
coord[idim] += randoffset
funcval = _gfuncval(g, coord)
retval = g(coord)
_assert_almost_equal(funcval, retval)
@pytest.mark.parametrize('objkey', registered_types['basis_fn'])
def test_vectorized_gaussian_function_evaluations(objkey, request):
g = request.getfixturevalue(objkey)
coords = np.zeros((5, g.ndims)) * g.center.units
for i in range(5):
coords[i] = g.center
randoffset = 4.0 * (random.random() - 0.5) * g.alpha**-0.5
idim = random.randrange(g.ndims)
coords[i, idim] += randoffset
vector_results = g(coords)
expected = u.array([g(c) for c in coords])
if vector_results.dimensionless:
vector_results = vector_results._magnitude
_assert_almost_equal(vector_results, expected, decimal=8)
@pytest.mark.parametrize('objkey', registered_types['basis_fn'] + ['linear_combination'])
def test_gaussian_str_and_repr_works(objkey, request):
g1 = request.getfixturevalue(objkey)
str(g1)
repr(g1)
@pytest.mark.parametrize('objkey', registered_types['basis_fn'])
def test_normalized_gaussian_self_overlap_is_unity(objkey, request):
g1 = request.getfixturevalue(objkey)
g2 = g1.copy()
g1.coeff = -10.0
g2.coeff = 12341.1832
olap = g1.overlap(g2, normalized=True)
assert abs(-1.0 - olap) < 1e-12
g1.coeff = 10.0
olap = g1.overlap(g2, normalized=True)
assert abs(1.0 - olap) < 1e-12
@pytest.mark.parametrize('objkey', registered_types['basis_fn'])
def test_normalization(objkey, request):
g1 = request.getfixturevalue(objkey)
oldnorm = g1.norm
g1.coeff = (random.random() - 0.5) * 428.23
try:
assert g1.norm != oldnorm
except u.DimensionalityError:
pass # this is a reasonable thing to happen too
g1.normalize()
assert abs(g1.norm - 1.0) < 1e-12
def test_linear_combination_normalization(linear_combination):
g1 = linear_combination
oldnorm = g1.norm
prefactor = (random.random() - 0.5) * 428.23
for prim in g1:
prim.coeff *= prefactor
try:
assert g1.norm != oldnorm
except u.DimensionalityError:
pass # this is a reasonable thing to happen too
g1.normalize()
assert abs(g1.norm - 1.0) < 1e-12
def _gfuncval(g, coord):
r = g.center - coord
if len(coord.shape) > 1:
r2 = np.sum(r**2, axis=1)
else:
r2 = np.sum(r**2)
fv = g.coeff * np.exp(-g.alpha * r2)
if isinstance(g, moldesign.orbitals.SphericalGaussian):
fv *= r2**(g.l/2.0) * harmonics.Y(g.l, g.m)(coord - g.center)
elif isinstance(g, moldesign.orbitals.CartesianGaussian): # assume cartesian
for r, r0, pow in zip(coord, g.center, g.powers):
if pow != 0:
fv *= (r-r0)**pow
return fv
def _assert_almost_equal(a, b, *args, **kwargs):
a_is_quantity = hasattr(a,'units')
b_is_quantity = hasattr(b,'units')
if not (a_is_quantity or b_is_quantity):
return np.testing.assert_almost_equal(a, b,
*args, **kwargs)
else:
assert a_is_quantity and b_is_quantity
units = a.units
return np.testing.assert_almost_equal(a.value_in(units),
b.value_in(units),
*args, **kwargs)
def test_convert_cartesian_label_to_array_of_integer_powers():
from moldesign.orbitals.gaussians import cart_to_powers
assert cart_to_powers('y') == [0, 1, 0]
assert cart_to_powers('xxyz') == [2, 1, 1]
assert cart_to_powers('zx^3') == [3,0,1]
@pytest.mark.parametrize('key', registered_types['basis_fn'] + ['linear_combination'])
def test_numerical_vs_analytical_norm(key, request):
g = request.getfixturevalue(key)
numnorm = _numerical_norm(g)
helpers.assert_almost_equal(g.norm, numnorm)
@pytest.mark.screening
def test_s_orbitals_equivalent_among_gaussian_types():
center = np.random.rand(3) * u.angstrom
exp = 5.12 / u.angstrom**2
g_bare = moldesign.orbitals.Gaussian(center, exp)
g_cart = moldesign.orbitals.CartesianGaussian(center, exp, [0,0,0])
g_sphr = moldesign.orbitals.SphericalGaussian(center, exp, 0, 0)
for gauss in (g_bare, g_cart, g_sphr):
# normalize to amplitude of 1.0 at center
gauss.coeff = gauss.coeff / gauss(center)
assert gauss(center) == 1.0
_assert_orbitals_equivalent(g_bare, g_cart)
_assert_orbitals_equivalent(g_bare, g_sphr)
# real spherical harmonics that can be represented as a single cartesian term:
LM_TO_CART = {(1,-1): (0,1,0),
(1,0): (0,0,1),
(1,1): (1,0,0),
(2,-2): (1,1,0),
(2,-1): (0,1,1),
(2,1): (1,0,1),
(3,-2): (1,1,1)}
@pytest.mark.parametrize('lm,powers',
LM_TO_CART.items(),
ids=['lm:%d,%d, xyz:%d%d%d' % (args[0] + args[1])
for args in LM_TO_CART.items()])
def test_orbitals_same_in_cartesian_and_spherical(lm, powers):
center = np.random.rand(3) * u.angstrom
exp = 5.12 / u.angstrom**2
g_cart = moldesign.orbitals.CartesianGaussian(center, exp, powers)
g_sphr = moldesign.orbitals.SphericalGaussian(center, exp, *lm)
_assert_orbitals_equivalent(g_cart, g_sphr)
@pytest.mark.parametrize('l', range(4), ids=lambda x:'l=%s' % x)
def test_spherical_to_cartesian(l):
for m in range(-l,l+1):
center = np.random.rand(3)*u.angstrom
exp = random.random()*2.0/u.angstrom ** 2
bf = moldesign.orbitals.SphericalGaussian(center, exp, l, m, normalized=True)
_assert_orbitals_equivalent(bf, bf.to_cart())
def _assert_orbitals_equivalent(g1, g2):
helpers.assert_almost_equal(g1.norm,
g2.norm)
testcoords = 6.0*(np.random.rand(50, 3)-0.5)*u.angstrom
helpers.assert_almost_equal(g1(testcoords),
g2(testcoords))
def test_pyscf_and_mdt_norms_are_the_same(h2_rhf_augccpvdz):
mol = h2_rhf_augccpvdz
basis = mol.wfn.aobasis
for bf in basis:
assert abs(bf.norm - 1.0) < 1e-12
def test_pyscf_and_mdt_overlaps_are_the_same(h2_rhf_augccpvdz):
mol = h2_rhf_augccpvdz
basis = mol.wfn.aobasis
calc_overlap_mat = []
for i in range(len(basis)):
calc_overlap_mat.append(
[basis[i].overlap(basis[j]) for j in range(len(basis))]
)
overlaps = u.array(calc_overlap_mat)
assert isinstance(overlaps, np.ndarray) or overlaps.units == u.dimensionless
np.testing.assert_allclose(mol.wfn.aobasis.overlaps,
overlaps,
atol=5.0e-7)
|
|
from __future__ import division
from math import ceil
from django.contrib import messages
from django.contrib.admin.views.decorators import staff_member_required
from django.http import Http404
from django.shortcuts import redirect, render
from redis.exceptions import ResponseError
from rq import requeue_job, Worker
from rq.exceptions import NoSuchJobError
from rq.job import Job
from rq.registry import (DeferredJobRegistry, FinishedJobRegistry,
StartedJobRegistry)
from django_rq.queues import get_connection, get_queue_by_index
from django_rq.settings import QUEUES_LIST
from . import settings
from .serializer import default_secure_serializer as secure_serializer
def use_actual_name(job):
if 'secure_redis.secure_rq.secure_job_proxy' != job.func_name:
return
encrypted_func_name = job.args[0]
actual_func_name = secure_serializer.loads(encrypted_func_name)
job.func_name = actual_func_name
@staff_member_required
def stats(request):
queues = []
for index, config in enumerate(QUEUES_LIST):
queue = get_queue_by_index(index)
connection = queue.connection
queue_data = {
'name': queue.name,
'jobs': queue.count,
'index': index,
'connection_kwargs': connection.connection_pool.connection_kwargs
}
if queue.name == 'failed':
queue_data['workers'] = '-'
queue_data['finished_jobs'] = '-'
queue_data['started_jobs'] = '-'
queue_data['deferred_jobs'] = '-'
else:
connection = get_connection(queue.name)
all_workers = Worker.all(connection=connection)
queue_workers = [worker for worker in all_workers if queue in worker.queues]
queue_data['workers'] = len(queue_workers)
finished_job_registry = FinishedJobRegistry(queue.name, connection)
started_job_registry = StartedJobRegistry(queue.name, connection)
deferred_job_registry = DeferredJobRegistry(queue.name, connection)
queue_data['finished_jobs'] = len(finished_job_registry)
queue_data['started_jobs'] = len(started_job_registry)
queue_data['deferred_jobs'] = len(deferred_job_registry)
queues.append(queue_data)
context_data = {'queues': queues}
return render(request, 'django_rq/stats.html', context_data)
@staff_member_required
def jobs(request, queue_index):
queue_index = int(queue_index)
queue = get_queue_by_index(queue_index)
items_per_page = 100
num_jobs = queue.count
page = int(request.GET.get('page', 1))
if num_jobs > 0:
last_page = int(ceil(num_jobs / items_per_page))
page_range = range(1, last_page + 1)
offset = items_per_page * (page - 1)
jobs = queue.get_jobs(offset, items_per_page)
else:
jobs = []
page_range = []
###
# Custom logic here
for job in jobs:
use_actual_name(job)
##
context_data = {
'queue': queue,
'queue_index': queue_index,
'jobs': jobs,
'num_jobs': num_jobs,
'page': page,
'page_range': page_range,
'job_status': 'Queued',
}
return render(request, 'django_rq/jobs.html', context_data)
@staff_member_required
def finished_jobs(request, queue_index):
queue_index = int(queue_index)
queue = get_queue_by_index(queue_index)
registry = FinishedJobRegistry(queue.name, queue.connection)
items_per_page = 100
num_jobs = len(registry)
page = int(request.GET.get('page', 1))
jobs = []
if num_jobs > 0:
last_page = int(ceil(num_jobs / items_per_page))
page_range = range(1, last_page + 1)
offset = items_per_page * (page - 1)
job_ids = registry.get_job_ids(offset, items_per_page)
for job_id in job_ids:
try:
jobs.append(Job.fetch(job_id, connection=queue.connection))
except NoSuchJobError:
pass
else:
page_range = []
###
# Custom logic here
for job in jobs:
use_actual_name(job)
##
context_data = {
'queue': queue,
'queue_index': queue_index,
'jobs': jobs,
'num_jobs': num_jobs,
'page': page,
'page_range': page_range,
'job_status': 'Finished',
}
return render(request, 'django_rq/jobs.html', context_data)
@staff_member_required
def started_jobs(request, queue_index):
queue_index = int(queue_index)
queue = get_queue_by_index(queue_index)
registry = StartedJobRegistry(queue.name, queue.connection)
items_per_page = 100
num_jobs = len(registry)
page = int(request.GET.get('page', 1))
jobs = []
if num_jobs > 0:
last_page = int(ceil(num_jobs / items_per_page))
page_range = range(1, last_page + 1)
offset = items_per_page * (page - 1)
job_ids = registry.get_job_ids(offset, items_per_page)
for job_id in job_ids:
try:
jobs.append(Job.fetch(job_id, connection=queue.connection))
except NoSuchJobError:
pass
else:
page_range = []
###
# Custom logic here
for job in jobs:
use_actual_name(job)
##
context_data = {
'queue': queue,
'queue_index': queue_index,
'jobs': jobs,
'num_jobs': num_jobs,
'page': page,
'page_range': page_range,
'job_status': 'Started',
}
return render(request, 'django_rq/jobs.html', context_data)
@staff_member_required
def deferred_jobs(request, queue_index):
queue_index = int(queue_index)
queue = get_queue_by_index(queue_index)
registry = DeferredJobRegistry(queue.name, queue.connection)
items_per_page = 100
num_jobs = len(registry)
page = int(request.GET.get('page', 1))
jobs = []
if num_jobs > 0:
last_page = int(ceil(num_jobs / items_per_page))
page_range = range(1, last_page + 1)
offset = items_per_page * (page - 1)
job_ids = registry.get_job_ids(offset, items_per_page)
for job_id in job_ids:
try:
jobs.append(Job.fetch(job_id, connection=queue.connection))
except NoSuchJobError:
pass
else:
page_range = []
###
# Custom logic here
for job in jobs:
use_actual_name(job)
##
context_data = {
'queue': queue,
'queue_index': queue_index,
'jobs': jobs,
'num_jobs': num_jobs,
'page': page,
'page_range': page_range,
'job_status': 'Deferred',
}
return render(request, 'django_rq/jobs.html', context_data)
@staff_member_required
def job_detail(request, queue_index, job_id):
queue_index = int(queue_index)
queue = get_queue_by_index(queue_index)
try:
job = Job.fetch(job_id, connection=queue.connection)
except NoSuchJobError:
raise Http404("Couldn't find job with this ID: %s" % job_id)
###
# Custom logic here
use_actual_name(job)
##
context_data = {
'queue_index': queue_index,
'job': job,
'queue': queue,
}
return render(request, 'django_rq/job_detail.html', context_data)
@staff_member_required
def delete_job(request, queue_index, job_id):
queue_index = int(queue_index)
queue = get_queue_by_index(queue_index)
job = Job.fetch(job_id, connection=queue.connection)
if request.method == 'POST':
# Remove job id from queue and delete the actual job
queue.connection._lrem(queue.key, 0, job.id)
job.delete()
messages.info(request, 'You have successfully deleted %s' % job.id)
return redirect('rq_jobs', queue_index)
###
# Custom logic here
use_actual_name(job)
##
context_data = {
'queue_index': queue_index,
'job': job,
'queue': queue,
}
return render(request, 'django_rq/delete_job.html', context_data)
@staff_member_required
def requeue_job_view(request, queue_index, job_id):
queue_index = int(queue_index)
queue = get_queue_by_index(queue_index)
job = Job.fetch(job_id, connection=queue.connection)
if request.method == 'POST':
requeue_job(job_id, connection=queue.connection)
messages.info(request, 'You have successfully requeued %s' % job.id)
return redirect('rq_job_detail', queue_index, job_id)
###
# Custom logic here
use_actual_name(job)
##
context_data = {
'queue_index': queue_index,
'job': job,
'queue': queue,
}
return render(request, 'django_rq/delete_job.html', context_data)
@staff_member_required
def clear_queue(request, queue_index):
queue_index = int(queue_index)
queue = get_queue_by_index(queue_index)
if request.method == 'POST':
try:
queue.empty()
messages.info(request, 'You have successfully cleared the queue %s' % queue.name)
except ResponseError as e:
if 'EVALSHA' in e.message:
messages.error(request, 'This action is not supported on Redis versions < 2.6.0, please use the bulk delete command instead')
else:
raise e
return redirect('rq_jobs', queue_index)
context_data = {
'queue_index': queue_index,
'queue': queue,
}
return render(request, 'django_rq/clear_queue.html', context_data)
@staff_member_required
def requeue_all(request, queue_index):
queue_index = int(queue_index)
queue = get_queue_by_index(queue_index)
jobs = queue.get_jobs()
if request.method == 'POST':
# Confirmation received
for job in jobs:
requeue_job(job.id, connection=queue.connection)
messages.info(request, 'You have successfully requeued all %d jobs!' % len(jobs))
return redirect('rq_jobs', queue_index)
context_data = {
'queue_index': queue_index,
'queue': queue,
'total_jobs':len(jobs),
}
return render(request, 'django_rq/requeue_all.html', context_data)
@staff_member_required
def actions(request, queue_index):
queue_index = int(queue_index)
queue = get_queue_by_index(queue_index)
if request.method == 'POST' and request.POST.get('action', False):
# confirm action
if request.POST.get('_selected_action', False):
context_data = {
'queue_index': queue_index,
'action': request.POST['action'],
'job_ids': request.POST.getlist('_selected_action'),
'queue': queue,
}
return render(request, 'django_rq/confirm_action.html', context_data)
# do confirmed action
elif request.POST.get('job_ids', False):
job_ids = request.POST.getlist('job_ids')
if request.POST['action'] == 'delete':
for job_id in job_ids:
job = Job.fetch(job_id, connection=queue.connection)
# Remove job id from queue and delete the actual job
queue.connection._lrem(queue.key, 0, job.id)
job.delete()
messages.info(request, 'You have successfully deleted %s jobs!' % len(job_ids))
elif request.POST['action'] == 'requeue':
for job_id in job_ids:
requeue_job(job_id, connection=queue.connection)
messages.info(request, 'You have successfully requeued %d jobs!' % len(job_ids))
return redirect('rq_jobs', queue_index)
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Utilities for sampling and creating different types of Tasks."""
import copy
import functools
import json
from typing import Callable, Dict, Tuple, Text, Any, Optional
import numpy as np
import sonnet as snt
from task_set import datasets
import tensorflow.compat.v1 as tf
def sample_log_int(rng, low, high):
"""Sample an integer logrithmically between `low` and `high`."""
sample = rng.uniform(np.log(float(low)), np.log(float(high)))
return int(np.round(np.exp(sample)))
def sample_linear_int(rng, low, high):
"""Sample an integer linearly between `low` and `high`."""
sample = rng.uniform(float(low), float(high))
return int(np.round(sample))
def sample_log_float(rng, low,
high):
"""Sample a float value logrithmically between `low` and `high`."""
return float(np.exp(rng.uniform(np.log(float(low)), np.log(float(high)))))
def sample_bool(rng, p):
"""Sample a boolean that is True `p` percent of time."""
if not 0.0 <= p <= 1.0:
raise ValueError("p must be between 0 and 1.")
return rng.uniform(0.0, 1.0) < p
def maybe_center(do_center, image):
"""Possibly center image data from [0,1] -> [-1, 1].
This assumes the image tensor is scaled between [0, 1].
Args:
do_center: To do the centering or not.
image: [0, 1] Scaled image to be centered.
Returns:
A possibly centered image.
"""
if do_center:
return image * 2.0 - 1.0
else:
return image
### Activations
_activation_fn_map = {
"relu": (6, tf.nn.relu),
"tanh": (3, tf.tanh),
"cos": (1, tf.cos),
"elu": (1, tf.nn.elu),
"sigmoid": (1, tf.nn.sigmoid),
"swish": (1, lambda x: x * tf.nn.sigmoid(x)),
"leaky_relu4": (1, lambda x: tf.nn.leaky_relu(x, alpha=0.4)),
"leaky_relu2": (1, lambda x: tf.nn.leaky_relu(x, alpha=0.2)),
"leaky_relu1": (1, lambda x: tf.nn.leaky_relu(x, alpha=0.1)),
}
def sample_activation(rng):
"""Sample an activation function name."""
names, values = zip(*sorted(_activation_fn_map.items()))
weights, _ = zip(*values)
probs = weights / np.sum(weights)
return rng.choice(names, p=probs)
def get_activation(name):
return _activation_fn_map[name][1]
### Initializers
# Dictionary with keys containing string names of the initializer
# and values containing a tuple: (weight, sample_fn, fn) where
# weight: is the weight used for sampling elements.
# sample_fn: is a callable going from a np.RandomState to some extra information
# or None representing no extra information.
# fn: the actual function that does the initialization with args equal to the
# the content returned by sample_fn.
_initializer_name_map = {
"he_normal": (2, None, tf.initializers.he_normal),
"he_uniform": (2, None, tf.initializers.he_uniform),
"glorot_normal": (2, None, tf.initializers.glorot_normal),
"glorot_uniform": (2, None, tf.initializers.glorot_uniform),
"orthogonal": (1, lambda rng: sample_log_float(rng, 0.1, 10),
tf.initializers.orthogonal),
"random_uniform": (1, lambda rng: sample_log_float(rng, 0.1, 10),
lambda s: tf.initializers.random_uniform(-s, s)),
"random_normal": (1, lambda rng: sample_log_float(rng, 0.1, 10),
lambda s: tf.initializers.random_normal(stddev=s)),
"truncated_normal": (1, lambda rng: sample_log_float(rng, 0.1, 10),
lambda s: tf.initializers.random_normal(stddev=s)),
"variance_scaling": (1, lambda rng: sample_log_float(rng, 0.1, 10),
tf.initializers.variance_scaling),
}
# This initializer stores names of the initializer used and the dictionary
# of extra parameters needed by the initializer constructor.
InitializerConfig = Tuple[Text, Optional[float]]
def sample_initializer(rng):
"""Sample a config for a random TensorFlow initializer."""
names, values = zip(*sorted(_initializer_name_map.items()))
weights, _, _ = zip(*values)
probs = weights / np.sum(weights)
name = rng.choice(names, p=probs)
_, sample_fn, _ = _initializer_name_map[name]
return name, sample_fn(rng) if sample_fn else None
def get_initializer(cfg):
"""Get an initializer from the given config.
Args:
cfg: config generated by `sample_initializer`.
Returns:
A tensorflow initializer.
"""
name, arg = cfg
_, _, make_fn = _initializer_name_map[name]
return make_fn(arg) if arg else make_fn()
### Architecture Components
RNNCoreConfig = Tuple[Text, Dict[Text, Any]]
def sample_rnn_core(rng):
"""Sample a RNN core.
This core is a small (at most 128 hidden units) RNN cell used for recurrent
problems.
Args:
rng: np.random.RandomState
Returns:
cfg (nested python objects) representing the rnn core.
"""
core = rng.choice(["vrnn", "gru", "lstm"])
# if the distribution used for the initializers should be linked across
# different weight matricies of the core. Typically people use the same
# distributions so we up weight that 4x more likely than unlinked.
linked = rng.choice([True, True, True, True, False])
args = {}
if core == "vrnn":
if linked:
init = sample_initializer(rng)
args["hidden_to_hidden"] = init
args["in_to_hidden"] = init
else:
args["hidden_to_hidden"] = sample_initializer(rng)
args["in_to_hidden"] = sample_initializer(rng)
args["act_fn"] = sample_activation(rng)
args["core_dim"] = sample_log_int(rng, 32, 128)
elif core == "gru":
args["core_dim"] = sample_log_int(rng, 32, 128)
if linked:
init = sample_initializer(rng)
for init_key in ["wh", "wz", "wr", "uh", "uz", "ur"]:
args[init_key] = init
else:
for init_key in ["wh", "wz", "wr", "uh", "uz", "ur"]:
args[init_key] = sample_initializer(rng)
elif core == "lstm":
args["w_gates"] = sample_initializer(rng)
args["core_dim"] = sample_log_int(rng, 32, 128)
return core, args
def get_rnn_core(cfg):
"""Get the Sonnet rnn cell from the given config.
Args:
cfg: config generated from `sample_rnn_core`.
Returns:
A Sonnet module with the given config.
"""
name, args = cfg
if name == "lstm":
init = {}
init = {"w_gates": get_initializer(args["w_gates"])}
return snt.LSTM(args["core_dim"], initializers=init)
elif name == "gru":
init = {}
for init_key in ["wh", "wz", "wr", "uh", "uz", "ur"]:
init[init_key] = get_initializer(args[init_key])
return snt.GRU(args["core_dim"], initializers=init)
elif name == "vrnn":
init = {
"in_to_hidden": {
"w": get_initializer(args["in_to_hidden"])
},
"hidden_to_hidden": {
"w": get_initializer(args["hidden_to_hidden"])
},
}
act_fn = get_activation(args["act_fn"])
return snt.VanillaRNN(
args["core_dim"], initializers=init, activation=act_fn)
else:
raise ValueError("No core for name [%s] found." % name)
### Datasets
AugmentationConfig = Dict[Text, Any]
def sample_augmentation(rng):
return {
"crop_amount": int(rng.choice([0, 0, 0, 1, 2])),
"flip_left_right": bool(rng.choice([False, True])),
"flip_up_down": bool(rng.choice([False, True])),
"do_color_aug": bool(rng.choice([False, True])),
"brightness": sample_log_float(rng, 0.001, 64. / 255.),
"saturation": sample_log_float(rng, 0.01, 1.0),
"hue": sample_log_float(rng, 0.01, 0.5),
"contrast": sample_log_float(rng, 0.01, 1.0),
}
Example = Any
def get_augmentation(cfg):
"""Get augmentation function from the given augmentation config."""
def augment(example):
"""Augment the image in given example."""
img = example["image"]
channels = img.shape.as_list()[2]
if cfg["crop_amount"]:
height = img.shape.as_list()[0]
width = img.shape.as_list()[1]
img = tf.image.random_crop(
img,
(height - cfg["crop_amount"], width - cfg["crop_amount"], channels))
if cfg["flip_left_right"]:
img = tf.image.random_flip_left_right(img)
if cfg["flip_up_down"]:
img = tf.image.random_flip_up_down(img)
if cfg["do_color_aug"] and channels == 3:
img = tf.image.random_brightness(img, max_delta=cfg["brightness"])
img = tf.image.random_saturation(
img, lower=1.0 - cfg["saturation"], upper=1.0 + cfg["saturation"])
img = tf.image.random_hue(img, max_delta=cfg["hue"])
img = tf.image.random_contrast(
img, lower=1.0 - cfg["contrast"], upper=1.0 + cfg["contrast"])
# copy to not override the input.
example = copy.copy(example)
example["image"] = img
return example
return augment
def _make_just_train(dataset, just_train):
"""Converts a datasets object to maybe use just the training dataset."""
if just_train:
return datasets.Datasets(dataset.train, dataset.train, dataset.train,
dataset.train)
else:
return dataset
ImageConfig = Dict[Text, Any]
def sample_mnist_and_fashion_mnist(rng):
bs = int(sample_log_int(rng, 8, 512))
num_train = int(sample_linear_int(rng, 1000, 55000))
return {
"bs": bs,
"num_train": num_train,
"num_classes": 10,
"just_train": sample_bool(rng, 0.1),
}
def sample_cifar_image(rng):
bs = int(sample_log_int(rng, 8, 256))
num_train = int(sample_linear_int(rng, 1000, 50000))
return {
"bs": bs,
"num_train": num_train,
"just_train": sample_bool(rng, 0.2),
}
def sample_default_image(rng):
bs = int(sample_log_int(rng, 8, 256))
return {
"bs": bs,
"just_train": sample_bool(rng, 0.2),
"num_train": None, # use the full dataset.
}
_n_valid_for_smaller_datasets = {
"coil100_32x32": 800,
"deep_weeds_32x32": 2000,
}
def _get_image(
name,
config,
cache=False,
augmentation_fn=None,
):
"""Get an image dataset object from name and config."""
# Some datasets are not big enough for the default number of validation images
if name in _n_valid_for_smaller_datasets:
num_per_valid = _n_valid_for_smaller_datasets[name]
else:
num_per_valid = 5000
return datasets.get_image_datasets(
name,
batch_size=config["bs"],
num_train=config["num_train"],
shuffle_buffer=10000,
cache_dataset=cache,
augmentation_fn=augmentation_fn,
num_per_valid=num_per_valid)
partial = functools.partial # pylint: disable=invalid-name
_name_to_image_dataset_map = {
"mnist": (sample_mnist_and_fashion_mnist,
partial(_get_image, "mnist", cache=True)),
"fashion_mnist": (sample_mnist_and_fashion_mnist,
partial(_get_image, "fashion_mnist", cache=True)),
"cifar10": (sample_cifar_image, partial(_get_image, "cifar10", cache=True)),
"cifar100":
(sample_cifar_image, partial(_get_image, "cifar100", cache=True)),
"food101_32x32": (sample_default_image,
partial(_get_image, "food101_32x32", cache=True)),
"coil100_32x32": (sample_default_image,
partial(_get_image, "coil100_32x32", cache=True)),
"deep_weeds_32x32": (sample_default_image,
partial(_get_image, "deep_weeds_32x32", cache=True)),
"sun397_32x32": (sample_default_image,
partial(_get_image, "sun397_32x32", cache=True)),
"imagenet_resized/32x32":
(sample_default_image,
partial(_get_image, "imagenet_resized/32x32", cache=True)),
"imagenet_resized/16x16":
(sample_default_image,
partial(_get_image, "imagenet_resized/16x16", cache=True)),
}
ImageDatasetConfig = Tuple[Text, ImageConfig, Optional[AugmentationConfig]]
def sample_image_dataset(rng):
name = rng.choice(sorted(_name_to_image_dataset_map.keys()))
if sample_bool(rng, 0.3):
augmentation = sample_augmentation(rng)
else:
augmentation = None
return name, _name_to_image_dataset_map[name][0](rng), augmentation
def get_image_dataset(cfg):
aug_cfg = cfg[2]
augmentation_fn = get_augmentation(aug_cfg) if aug_cfg else None
return _name_to_image_dataset_map[cfg[0]][1](
cfg[1], augmentation_fn=augmentation_fn)
TextClassificationConfig = Dict[Text, Any]
def sample_text_classification(
rng):
if sample_bool(rng, 0.2):
num_train = sample_linear_int(rng, 1000, 50000)
else:
num_train = None
return {
"bs": int(sample_log_int(rng, 8, 512)),
"num_train": num_train,
"max_token": 8185,
"just_train": sample_bool(rng, 0.2),
"patch_length": int(sample_log_int(rng, 8, 128)),
}
def get_text_classification(
dataset_name, config):
return datasets.random_slice_text_data(
dataset_name=dataset_name,
batch_size=config["bs"],
num_train=config["num_train"],
patch_length=config["patch_length"],
cache_dataset=True,
num_per_valid=3000)
AmazonBytesConfig = Dict[Text, Any]
_name_to_text_dataset_map = {}
for _dataset_name in [
"imdb_reviews/subwords8k"
"imdb_reviews/bytes",
"tokenized_amazon_reviews/Books_v1_02_bytes",
"tokenized_amazon_reviews/Camera_v1_00_bytes",
"tokenized_amazon_reviews/Home_v1_00_bytes",
"tokenized_amazon_reviews/Video_v1_00_bytes",
"tokenized_amazon_reviews/Books_v1_02_subwords8k",
"tokenized_amazon_reviews/Camera_v1_00_subwords8k",
"tokenized_amazon_reviews/Home_v1_00_subwords8k",
"tokenized_amazon_reviews/Video_v1_00_subwords8k",
]:
# TODO(lmetz) this is a typo (not passing _dataset_name). That being said,
# we have already generated data and figures.
_name_to_text_dataset_map[_dataset_name] = (sample_text_classification,
functools.partial(
get_text_classification,
"imdb_reviews/subwords8k"))
TextDatasetConfig = Tuple[Text, Dict[Text, Any]]
def sample_text_dataset(rng):
name = rng.choice(sorted(_name_to_text_dataset_map.keys()))
return name, _name_to_text_dataset_map[name][0](rng)
def get_text_dataset(cfg):
return _name_to_text_dataset_map[cfg[0]][1](cfg[1])
ByteConfig = Dict[Text, Any]
def sample_byte_config(rng):
"""Samples a configuration for bytes datasets."""
if sample_bool(rng, 0.2):
num_train = sample_linear_int(rng, 1000, 50000)
else:
num_train = None
return {
"patch_length": sample_log_int(rng, 10, 160),
"batch_size": sample_log_int(rng, 8, 512),
"just_train": sample_bool(rng, 0.2),
"num_train": num_train,
}
def get_byte_dataset(config, name):
"""Return the Datasets object for the corresponding config."""
return _make_just_train(
datasets.random_slice_text_data(
dataset_name=name,
batch_size=config["batch_size"],
patch_length=config["patch_length"],
num_per_valid=3000,
shuffle_buffer=10000,
cache_dataset=True,
num_train=config["num_train"],
), config["just_train"])
_name_to_char_sequence_dataset_map = {}
for _dataset_name in [
"lm1b/bytes",
"imdb_reviews/bytes",
"tokenized_wikipedia/20190301.zh_bytes",
"tokenized_wikipedia/20190301.ru_bytes",
"tokenized_wikipedia/20190301.ja_bytes",
"tokenized_wikipedia/20190301.hsb_bytes",
"tokenized_wikipedia/20190301.en_bytes",
"tokenized_amazon_reviews/Books_v1_02_bytes",
"tokenized_amazon_reviews/Camera_v1_00_bytes",
"tokenized_amazon_reviews/Home_v1_00_bytes",
"tokenized_amazon_reviews/Video_v1_00_bytes",
]:
_name_to_char_sequence_dataset_map[_dataset_name] = (sample_byte_config,
functools.partial(
get_byte_dataset,
name=_dataset_name))
def sample_char_lm_dataset(rng):
name = rng.choice(sorted(_name_to_char_sequence_dataset_map.keys()))
return name, _name_to_char_sequence_dataset_map[name][0](rng)
def get_char_lm_dataset(cfg):
name, args = cfg
return _name_to_char_sequence_dataset_map[name][1](args)
Config = Dict[Text, Any]
def _make_get_word_dataset(
dataset_name):
"""Makes a function that returns the datasets object with tf.data.Datasets."""
def _make(config):
return _make_just_train(
datasets.random_slice_text_data(
dataset_name=dataset_name,
batch_size=config["batch_size"],
patch_length=config["patch_length"],
num_train=config["num_train"],
cache_dataset=True,
num_per_valid=10000,
shuffle_buffer=10000,
), config["just_train"])
return _make
def sample_word_dataset_config(rng):
if sample_bool(rng, 0.2):
num_train = sample_linear_int(rng, 1000, 50000)
else:
num_train = None
return {
"patch_length": sample_log_int(rng, 10, 256),
"batch_size": sample_log_int(rng, 8, 512),
"just_train": sample_bool(rng, 0.2),
"num_train": num_train,
}
_name_to_word_sequence_dataset_map = {}
for _dataset_name in [
"lm1b/subwords8k",
"imdb_reviews/subwords8k",
"tokenized_wikipedia/20190301.zh_subwords8k",
"tokenized_wikipedia/20190301.ru_subwords8k",
"tokenized_wikipedia/20190301.ja_subwords8k",
"tokenized_wikipedia/20190301.hsb_subwords8k",
"tokenized_wikipedia/20190301.en_subwords8k",
"tokenized_amazon_reviews/Books_v1_02_subwords8k",
"tokenized_amazon_reviews/Camera_v1_00_subwords8k",
"tokenized_amazon_reviews/Home_v1_00_subwords8k",
"tokenized_amazon_reviews/Video_v1_00_subwords8k",
]:
_name_to_word_sequence_dataset_map[_dataset_name] = (
sample_word_dataset_config,
functools.partial(_make_get_word_dataset(_dataset_name)))
def sample_word_lm_dataset(rng):
name = rng.choice(sorted(_name_to_word_sequence_dataset_map.keys()))
return name, _name_to_word_sequence_dataset_map[name][0](rng)
def get_word_lm_dataset(cfg):
name, args = cfg
return _name_to_word_sequence_dataset_map[name][1](args)
def pretty_json_dumps(dd):
"""Pretty print a json serialized dictionary with one key, value per line.
Args:
dd: Dictionary with keys containing strings and values containing json
serializable object.
Returns:
string containing json representation
"""
if not isinstance(dd, dict):
raise ValueError("Only dicts supported at this time.")
content = "{\n"
lines = []
for l, n in sorted(dd.items(), key=lambda x: x[0]):
lines.append("\"%s\":%s" % (l, json.dumps(n)))
content += ",\n".join(lines)
content += "\n}"
return content
def accuracy(label, logits):
"""Computes accuracy from given label and logits."""
return tf.reduce_mean(tf.to_float(tf.equal(label, tf.argmax(logits, axis=1))))
|
|
import time
from typing import Tuple, cast
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
from dateutil import parser
MAX_INCIDENTS_TO_FETCH = 250
class Client(BaseClient):
def __init__(self, server_url, verify, proxy, headers, auth):
super().__init__(base_url=server_url, verify=verify, proxy=proxy, headers=headers, auth=auth)
def host_get_hosts_request(self):
headers = self._headers
response = self._http_request('GET', 'api/v1/hosts.json', headers=headers)
return response
def host_get_request(self, id):
headers = self._headers
response = self._http_request('GET', 'api/v1/hosts/' + id + '.json', headers=headers)
return response
def host_get_export_request(self, export_format):
headers = self._headers
response = self._http_request('GET', 'api/v1/hosts/export.' + export_format, headers=headers, resp_type="Raw")
return response
def host_get_query_request(self, request):
data = {"c": request, "l": 50}
headers = self._headers
response = self._http_request('POST', 'api/v1/hosts/query.json', json_data=data, headers=headers)
return response
def host_update_request(self, label, id):
data = {"host": {"label": label}}
headers = self._headers
response = self._http_request('PUT', 'api/v1/hosts/' + id + '.json', json_data=data, headers=headers)
return response
def asset_get_assets_request(self, detail_level):
params = assign_params(detail_level=detail_level)
headers = self._headers
response = self._http_request('GET', 'api/v1/assets.json', params=params, headers=headers)
return response
def asset_get_request(self, id):
headers = self._headers
response = self._http_request('GET', 'api/v1/assets/' + id + '.json', headers=headers)
return response
def asset_get_query_request(self, request):
data = {"c": request, "l": 50}
headers = self._headers
response = self._http_request('POST', 'api/v1/assets/query.json', json_data=data, headers=headers)
return response
def asset_create_request(self, id_, name, priority, type_, authenticatied, tags, location_specifiers):
data = {"asset": {"authenticatied": authenticatied, "id": id_, "location_specifiers": location_specifiers,
"name": name, "priority": priority, "tags": tags, "type": type_}}
headers = self._headers
response = self._http_request('POST', 'api/v1/assets.json', json_data=data, headers=headers)
return response
def asset_update_request(self, id, name, priority, type_, authenticatied, tags, location_specifiers):
data = {"asset": {"authenticatied": authenticatied, "location_specifiers": location_specifiers,
"name": name, "priority": priority, "tags": tags, "type": type_}}
headers = self._headers
response = self._http_request('PUT', 'api/v1/assets/' + id + '.json', json_data=data, headers=headers)
return response
def asset_delete_request(self, id, name, priority, type_, authenticatied, tags, location_specifiers):
data = {"authenticatied": authenticatied, "location_specifiers": location_specifiers,
"name": name, "priority": priority, "tags": tags, "type": type_}
headers = self._headers
response = self._http_request('DELETE', 'api/v1/assets/' + id + '.json', json_data=data, headers=headers)
return response
def user_get_users_request(self):
headers = self._headers
response = self._http_request('GET', 'api/v1/users.json', headers=headers)
return response
def user_get_request(self, id):
headers = self._headers
response = self._http_request('GET', 'api/v1/users/' + id + '.json', headers=headers)
return response
def user_get_query_request(self, request):
data = {"c": request, "l": 50}
headers = self._headers
response = self._http_request('POST', 'api/v1/users/query.json', json_data=data, headers=headers)
return response
def user_create_request(self, username, email, first_name, last_name, phone_number, mfa_enabled, mfa_method, is_super):
data = {"user": {"email": email, "first_name": first_name, "last_name": last_name,
"mfa_enabled": mfa_enabled, "mfa_method": mfa_method,
"phone_number": phone_number, "username": username, "is_super": is_super}}
headers = self._headers
response = self._http_request('POST', 'api/v1/users.json', json_data=data, headers=headers)
return response
def user_delete_request(self, id):
headers = self._headers
response = self._http_request('DELETE', 'api/v1/users/' + id + '.json', headers=headers)
return response
def user_reset_password_request(self, id):
headers = self._headers
response = self._http_request('POST', 'api/v1/users/' + id + '/reset.json', headers=headers)
return response
def user_reset_email_request(self, id):
headers = self._headers
response = self._http_request('POST', 'api/v1/users/' + id + '/reset_email.json', headers=headers)
return response
def user_lock_account_request(self, id):
headers = self._headers
response = self._http_request('POST', 'api/v1/users/' + id + '/lock.json', headers=headers)
return response
def user_unlock_account_request(self, id):
headers = self._headers
response = self._http_request('POST', 'api/v1/users/' + id + '/unlock.json', headers=headers)
return response
def user_get_permissions_request(self, id):
headers = self._headers
response = self._http_request('GET', 'api/v1/users/' + id + '/permissions.json', headers=headers)
return response
def vulnerabilities_get_request(self):
headers = self._headers
response = self._http_request('GET', 'api/v1/vulnerabilities.json', headers=headers)
return response
def vulnerabilities_get_export_request(self, export_format):
headers = self._headers
response = self._http_request('GET', 'api/v1/vulnerabilities/export.' + export_format, headers=headers,
resp_type="Raw")
return response
def vulnerabilities_get_details_request(self, id):
headers = self._headers
response = self._http_request('GET', 'api/v1/vulnerabilities/' + id + '.json', headers=headers)
return response
def vulnerabilities_get_query_request(self, request, limit, o):
data = {"c": request,
"l": limit, "o": o, "s": {"date_opened": "asc"}}
headers = self._headers
response = self._http_request('POST', 'api/v1/vulnerabilities/query.json', json_data=data, headers=headers)
return response
def vulnerabilities_retest_request(self, id):
headers = self._headers
response = self._http_request('POST', 'api/v1/vulnerabilities/' + id + '/retest.json', headers=headers)
return response
def vulnerabilities_risk_accept_request(self, id, value):
data = {"value": value}
headers = self._headers
response = self._http_request('POST', 'api/v1/vulnerabilities/' + id + '/risk_accept.json',
json_data=data, headers=headers)
return response
def vulnerabilities_add_annotation_request(self, id, text):
annotation = {
"text": text
}
data = {
"annotation": annotation
}
headers = self._headers
response = self._http_request('POST', 'api/v1/vulnerabilities/' + id + '/annotations.json',
json_data=data, headers=headers)
return response
def host_get_hosts_command(client, args):
response = client.host_get_hosts_request()['hosts']
readable_output = tableToMarkdown('Hosts', response, ['os_name', 'id', 'location', 'status'])
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='Edgescan.HostGetHosts',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def fetch_incidents(client: Client, max_results: int, last_run: Dict[str, int],
first_fetch_time: Optional[int], cvss_score: Optional[float],
risk_more_than: Optional[str], cvss_score_greater_than: Optional[float]
) -> Tuple[Dict[str, int], List[dict]]:
# Get the last fetch time, if exists
last_fetch = last_run.get('last_fetch', None)
# Get the offset
offset = last_run.get('offset', 0)
# Handle first fetch time
if last_fetch is None:
# if missing, use what provided via first_fetch_time
last_fetch = first_fetch_time
else:
# otherwise use the stored last fetch
last_fetch = int(last_fetch)
# for type checking, making sure that latest_created_time is int
latest_created_time = cast(int, last_fetch)
# Initialize an empty list of incidents to return
# Each incident is a dict with a string as a key
incidents: List[Dict[str, Any]] = []
request = {
"risk_more_than": risk_more_than,
"cvss_score_greater_than": cvss_score_greater_than,
"cvss_score": cvss_score,
"date_opened_after": str(datetime.fromtimestamp(last_fetch).isoformat()) + ".000Z" # type: ignore
}
if cvss_score == "" or cvss_score is None:
del request['cvss_score']
if cvss_score_greater_than == "" or cvss_score_greater_than is None:
del request['cvss_score_greater_than']
if risk_more_than == "" or risk_more_than is None:
del request['risk_more_than']
response = client.vulnerabilities_get_query_request(request=request, limit=max_results, o=offset)
offset += max_results
total = response['total']
alerts = response['vulnerabilities']
for alert in alerts:
# If no created_time set is as epoch (0). We use time in ms so we must
date_opened = alert.get('date_opened', '0')
dt = parser.parse(date_opened)
incident_created_time = int(time.mktime(dt.timetuple()))
# If no name is present it will throw an exception
incident_name = alert['name']
incident = {
'name': incident_name,
'occurred': date_opened,
'rawJSON': json.dumps(alert),
'severity': alert.get('severity', 'Low'),
}
incidents.append(incident)
# Update last run and add incident if the incident is newer than last fetch
if incident_created_time > latest_created_time and offset >= total:
latest_created_time = incident_created_time + 1
if offset >= total:
offset = 0
# Save the next_run as a dict with the last_fetch key to be stored
next_run = {
'last_fetch': latest_created_time,
'offset': offset,
'total': total
}
return next_run, incidents
def test_module(client: Client) -> str:
try:
client.host_get_hosts_request()['hosts']
except DemistoException as e:
if 'Forbidden' in str(e):
return 'Authorization Error: make sure API Key is correctly set'
else:
raise e
return 'ok'
def host_get_command(client, args):
id = args.get('id')
response = client.host_get_request(id)['host']
readable_output = tableToMarkdown('Host', response, ['os_name', 'id', 'location', 'status', 'services'])
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='Edgescan.HostGet',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def host_get_export_command(client, args):
export_format = args.get("format", "json")
response = client.host_get_export_request(export_format=export_format)
filename = response.headers['Content-Disposition'].split("=")[1].replace('"', "")
file = fileResult(filename=filename, data=response.content, file_type=EntryType.ENTRY_INFO_FILE)
return file
def host_get_query_command(client, args):
response = client.host_get_query_request(args)['hosts']
readable_output = tableToMarkdown('Hosts query', response, ['os_name', 'id', 'location', 'status', 'services'])
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='Edgescan.HostGetQuery',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def host_update_command(client, args):
label = args.get('label')
id = args.get('id')
response = client.host_update_request(label=label, id=id)
command_results = CommandResults(
outputs_prefix='Edgescan.HostUpdate',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def asset_get_assets_command(client, args):
detail_level = args.get('detail_level')
response = client.asset_get_assets_request(detail_level)['assets']
readable_output = tableToMarkdown('Assets', response,
['id', 'name', 'asset_status', 'blocked_status', 'hostname'])
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='Edgescan.AssetGetAssets',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def asset_get_command(client, args):
id = args.get('id')
response = client.asset_get_request(id=id)['asset']
readable_output = tableToMarkdown('Asset', response)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='Edgescan.AssetGet',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def asset_get_query_command(client, args):
response = client.asset_get_query_request(args)['assets']
readable_output = tableToMarkdown('Assets query', response,
['id', 'name', 'asset_status', 'blocked_status', 'hostname'])
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='Edgescan.AssetGetQuery',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def asset_create_command(client, args):
id_ = args.get('id')
name = args.get('name')
priority = args.get('priority')
type_ = args.get('type')
authenticatied = args.get('authenticatied')
tags = args.get('tags')
location_secifiers = args.get('location_secifiers')
response = client.asset_create_request(id_, name, priority, type_, authenticatied, tags, location_secifiers)['asset']
command_results = CommandResults(
outputs_prefix='Edgescan.AssetCreate',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def asset_update_command(client, args):
id = args.get('id')
name = args.get('name')
priority = args.get('priority')
type_ = args.get('type')
authenticatied = args.get('authenticatied')
tags = args.get('tags')
location_specifiers = args.get('location_specifiers')
response = client.asset_update_request(id, name, priority, type_, authenticatied, tags, location_specifiers)['asset']
command_results = CommandResults(
outputs_prefix='Edgescan.AssetUpdate',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def asset_delete_command(client, args):
name = args.get('name')
id = args.get('id')
priority = args.get('priority')
type_ = args.get('type')
authenticatied = args.get('authenticatied')
tags = args.get('tags')
location_specifiers = args.get('location_specifiers')
response = client.asset_delete_request(id, name, priority, type_, authenticatied, tags, location_specifiers)['asset']
command_results = CommandResults(
outputs_prefix='Edgescan.AssetDelete',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def user_get_users_command(client, args):
response = client.user_get_users_request()['users']
readable_output = tableToMarkdown('Users', response,
['id', 'username', 'email', 'phone_number', "mfa_enabled"])
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='Edgescan.UserGetUsers',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def user_get_command(client, args):
id = args.get('id')
response = client.user_get_request(id=id)['user']
readable_output = tableToMarkdown('User', response,
['id', 'username', 'email', 'phone_number', "mfa_enabled"])
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='Edgescan.UserGet',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def user_get_query_command(client, args):
response = client.user_get_query_request(args)['users']
readable_output = tableToMarkdown('User query', response,
['id', 'username', 'email', 'phone_number', "mfa_enabled"])
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='Edgescan.UserGetQuery',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def user_create_command(client, args):
username = args.get('username')
email = args.get('email')
first_name = args.get('first_name')
last_name = args.get('last_name')
phone_number = args.get('phone_number')
mfa_enabled = args.get('mfa_enabled')
mfa_method = args.get('mfa_method')
is_super = args.get('is_super')
response = client.user_create_request(username, email, first_name, last_name,
phone_number, mfa_enabled, mfa_method, is_super)['user']
readable_output = tableToMarkdown('User created', response)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='Edgescan.UserCreate',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def user_delete_command(client, args):
id = args.get('id')
response = client.user_delete_request(id=id)['user']
readable_output = tableToMarkdown('User deleted', response)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='Edgescan.UserDelete',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def user_reset_password_command(client, args):
id = args.get('id')
response = client.user_reset_password_request(id=id)
command_results = CommandResults(
outputs_prefix='Edgescan.UserResetPassword',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def user_reset_email_command(client, args):
id = args.get('id')
response = client.user_reset_email_request(id=id)
command_results = CommandResults(
outputs_prefix='Edgescan.UserResetEmail',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def user_lock_account_command(client, args):
id = args.get('id')
response = client.user_lock_account_request(id=id)['user']
readable_output = tableToMarkdown('User locked', response)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='Edgescan.UserLockAccount',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def user_unlock_account_command(client, args):
id = args.get('id')
response = client.user_unlock_account_request(id=id)['user']
readable_output = tableToMarkdown('User unlocked', response)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='Edgescan.UserUnlockAccount',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def user_get_permissions_command(client, args):
id = args.get('id')
response = client.user_get_permissions_request(id=id)['permissions']
readable_output = tableToMarkdown('User permissions', response)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='Edgescan.UserGetPermissions',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def vulnerabilities_get_command(client, args):
response = client.vulnerabilities_get_request()['vulnerabilities']
readable_output = tableToMarkdown('Vulnerabilities', response, ['id', 'asset_id', 'name', 'severity', 'cvss_score'])
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='Edgescan.VulnerabilitiesGet',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def vulnerabilities_get_export_command(client, args):
export_format = args.get("format", "json")
response = client.vulnerabilities_get_export_request(export_format=export_format)
filename = response.headers['Content-Disposition'].split("=")[1].replace('"', "")
file = fileResult(filename=filename, data=response.content, file_type=EntryType.ENTRY_INFO_FILE)
return file
def vulnerabilities_get_details_command(client, args):
id = args.get('id')
response = client.vulnerabilities_get_details_request(id)['vulnerability']
readable_output = tableToMarkdown('Vulnerability ID:' + id, response)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='Edgescan.VulnerabilitiesGetDetails',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def vulnerabilities_get_query_command(client, args):
response = client.vulnerabilities_get_query_request(args, 50, 0)['vulnerabilities']
readable_output = tableToMarkdown('Vulnerabilities', response,
['id', 'asset_id', 'name', 'severity', 'cvss_score'])
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='Edgescan.VulnerabilitiesGetQuery',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def vulnerabilities_retest_command(client, args):
id = args.get('id')
response = client.vulnerabilities_retest_request(id=id)
readable_output = tableToMarkdown('Vulnerability retested ID:' + id, response)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='Edgescan.VulnerabilitiesRetest',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def vulnerabilities_risk_accept_command(client, args):
value = args.get('value')
id = args.get('id')
response = client.vulnerabilities_risk_accept_request(value=value, id=id)
readable_output = tableToMarkdown('Vulnerability Risk-accepted ID:' + id, response)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='Edgescan.VulnerabilitiesRiskAccept',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def vulnerabilities_add_annotation_command(client, args):
text = args.get('text')
id = args.get('id')
response = client.vulnerabilities_add_annotation_request(text=text, id=id)['annotation']
readable_output = tableToMarkdown('Annotation added:' + id, response)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='Edgescan.AnnotationAdd',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
def main():
params = demisto.params()
args = demisto.args()
url = params.get('url')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers = {}
headers['X-API-TOKEN'] = params['api_key']
# How much time before the first fetch to retrieve incidents
first_fetch_time = arg_to_datetime(
arg=demisto.params().get('first_fetch', '3 days'),
arg_name='First fetch time',
required=True
)
first_fetch_timestamp = int(first_fetch_time.timestamp()) if first_fetch_time else None
# Using assert as a type guard (since first_fetch_time is always an int when required=True)
assert isinstance(first_fetch_timestamp, int)
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
requests.packages.urllib3.disable_warnings()
client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None)
commands = {
'edgescan-host-get-hosts': host_get_hosts_command,
'edgescan-host-get': host_get_command,
'edgescan-host-get-export': host_get_export_command,
'edgescan-host-get-query': host_get_query_command,
'edgescan-host-update': host_update_command,
'edgescan-asset-get-assets': asset_get_assets_command,
'edgescan-asset-get': asset_get_command,
'edgescan-asset-get-query': asset_get_query_command,
'edgescan-asset-create': asset_create_command,
'edgescan-asset-update': asset_update_command,
'edgescan-asset-delete': asset_delete_command,
'edgescan-user-get-users': user_get_users_command,
'edgescan-user-get': user_get_command,
'edgescan-user-get-query': user_get_query_command,
'edgescan-user-create': user_create_command,
'edgescan-user-delete': user_delete_command,
'edgescan-user-reset-password': user_reset_password_command,
'edgescan-user-reset-email': user_reset_email_command,
'edgescan-user-lock-account': user_lock_account_command,
'edgescan-user-unlock-account': user_unlock_account_command,
'edgescan-user-get-permissions': user_get_permissions_command,
'edgescan-vulnerabilities-get': vulnerabilities_get_command,
'edgescan-vulnerabilities-get-export': vulnerabilities_get_export_command,
'edgescan-vulnerabilities-get-details': vulnerabilities_get_details_command,
'edgescan-vulnerabilities-get-query': vulnerabilities_get_query_command,
'edgescan-vulnerabilities-retest': vulnerabilities_retest_command,
'edgescan-vulnerabilities-risk-accept': vulnerabilities_risk_accept_command,
'edgescan-vulnerabilities-add-annotation': vulnerabilities_add_annotation_command,
}
if command == 'test-module':
result = test_module(client)
return_results(result)
elif command == 'fetch-incidents':
# Set and define the fetch incidents command to run after activated via integration settings.
cvss_score = demisto.params().get('cvss_score', None)
cvss_score_greater_than = demisto.params().get('cvss_score_greater_than', None)
risk_more_than = demisto.params().get('risk_more_than', None)
# Convert the argument to an int using helper function or set to MAX_INCIDENTS_TO_FETCH
max_results = arg_to_number(
arg=demisto.params().get('max_fetch'),
arg_name='max_fetch',
required=False
)
if not max_results or max_results > MAX_INCIDENTS_TO_FETCH:
max_results = MAX_INCIDENTS_TO_FETCH
if cvss_score and cvss_score_greater_than:
raise DemistoException('Both cvss_score and cvs_score_greater_than have been provided. Please provide '
'at most one.')
next_run, incidents = fetch_incidents(
client=client,
max_results=max_results,
last_run=demisto.getLastRun(), # getLastRun() gets the last run dict
first_fetch_time=first_fetch_timestamp,
cvss_score=cvss_score,
risk_more_than=risk_more_than,
cvss_score_greater_than=cvss_score_greater_than
)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(str(e))
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
|
|
from __future__ import unicode_literals, division, absolute_import
import sys
from distutils.version import LooseVersion
from ssl import SSLError
from urllib2 import URLError
from sqlalchemy import Column, Integer, String
from flexget import db_schema, plugin, options
from flexget.event import event
from flexget.logger import console
from flexget.utils.database import with_session
from flexget.utils.template import RenderError
try:
import telegram
from telegram.error import TelegramError
except ImportError:
telegram = None
_MIN_TELEGRAM_VER = '3.2.0'
_PLUGIN_NAME = 'send_telegram'
_PARSERS = ['markdown', 'html']
_TOKEN_ATTR = 'bot_token'
_TMPL_ATTR = 'template'
_PARSE_ATTR = 'parse_mode'
_RCPTS_ATTR = 'recipients'
_USERNAME_ATTR = 'username'
_FULLNAME_ATTR = 'fullname'
_FIRSTNAME_ATTR = 'first'
_SURNAME_ATTR = 'sur'
_GROUP_ATTR = 'group'
ChatIdsBase = db_schema.versioned_base('telegram_chat_ids', 0)
class ChatIdEntry(ChatIdsBase):
__tablename__ = 'telegram_chat_ids'
id = Column(Integer, primary_key=True)
username = Column(String, index=True, nullable=True)
firstname = Column(String, index=True, nullable=True)
surname = Column(String, index=True, nullable=True)
group = Column(String, index=True, nullable=True)
def __str__(self):
x = ['id={0}'.format(self.id)]
if self.username:
x.append('username={0}'.format(self.username))
if self.firstname:
x.append('firstname={0}'.format(self.firstname))
if self.surname:
x.append('surname={0}'.format(self.surname))
if self.group:
x.append('group={0}'.format(self.group))
return ' '.join(x)
class SendTelegram(object):
"""Send a message to one or more Telegram users or groups upon accepting a download.
Preparations::
* Install 'python-telegram-bot' python pkg (i.e. `pip install python-telegram-bot`)
* Create a bot & obtain a token for it (see https://core.telegram.org/bots#botfather).
* For direct messages (not to a group), start a conversation with the bot and click "START" in the Telegram app.
* For group messages, add the bot to the desired group and send a start message to the bot: "/start" (mind the
leading '/').
Configuration example::
my-task:
send_telegram:
bot_token: token
template: {{title}}
use_markdown: no
recipients:
- username: my-user-name
- group: my-group-name
- fullname:
first: my-first-name
sur: my-sur-name
Bootstrapping and testing the bot::
* Execute: `flexget send_telegram bootstrap`.
Look at the console output and make sure that the operation was successful.
* Execute: `flexget send_telegram test-msg`.
This will send a test message for every recipient you've configured.
Configuration notes::
You may use any combination of recipients types (`username`, `group` or `fullname`) - 0 or more of each (but you
need at least one total...).
`template`::
Optional. The template from the example is the default.
`parse_mode`::
Optional. Whether the template uses `markdown` or `html` formatting.
NOTE: The markdown parser will fall back to basic parsing if there is a parsing error. This can be cause due to
unclosed tags (watch out for wandering underscore when using markdown)
`username` vs. `fullname`::
Not all Telegram users have a username. In such cases you would have to use the `fullname` approach. Otherwise, it
is much easier to use the `username` configuration.
"""
log = None # initialized during plugin.register
""":type: flexget.logger.FlexGetLogger"""
_token = None
_tmpl = None
_use_markdown = False
_usernames = None
_fullnames = None
_groups = None
_bot = None
schema = {
'type': 'object',
'properties': {
_TOKEN_ATTR: {'type': 'string'},
_TMPL_ATTR: {'type': 'string', 'default': '{{title}}'},
_PARSE_ATTR: {'type': 'string', 'enum': _PARSERS},
_RCPTS_ATTR: {
'type': 'array',
'minItems': 1,
'items': {
'oneOf': [
{
'type': 'object',
'properties': {
_USERNAME_ATTR: {'type': 'string'},
},
'required': [_USERNAME_ATTR],
'additionalProperties': False,
},
{
'type': 'object',
'properties': {
_FULLNAME_ATTR: {
'type': 'object',
'properties': {
_FIRSTNAME_ATTR: {'type': 'string'},
_SURNAME_ATTR: {'type': 'string'},
},
'required': [_FIRSTNAME_ATTR, _SURNAME_ATTR],
'additionalProperties': False,
},
},
'required': [_FULLNAME_ATTR],
'additionalProperties': False,
},
{
'type': 'object',
'properties': {
_GROUP_ATTR: {'type': 'string'},
},
'required': [_GROUP_ATTR],
'additionalProperties': False,
},
],
},
},
},
'required': [_TOKEN_ATTR, _RCPTS_ATTR],
'additionalProperties': False,
}
def _parse_config(self, config):
"""
:type config: dict
"""
self._token = config[_TOKEN_ATTR]
self._tmpl = config[_TMPL_ATTR]
self._parse_mode = config.get(_PARSE_ATTR)
self._usernames = []
self._fullnames = []
self._groups = []
for i in config[_RCPTS_ATTR]:
if _USERNAME_ATTR in i:
self._usernames.append(i[_USERNAME_ATTR])
elif _FULLNAME_ATTR in i:
fullname = i[_FULLNAME_ATTR]
firstname = fullname[_FIRSTNAME_ATTR]
surname = fullname[_SURNAME_ATTR]
self._fullnames.append((firstname, surname))
elif _GROUP_ATTR in i:
self._groups.append(i[_GROUP_ATTR])
def on_task_output(self, task, config):
"""makes this plugin count as output (stops warnings about missing outputs)"""
pass
def on_task_exit(self, task, config):
"""Send telegram message(s) at exit"""
session = task.session
chat_ids = self._real_init(session, config)
if not chat_ids:
return
self._send_msgs(task, chat_ids)
def _real_init(self, session, config, ):
self._enforce_telegram_plugin_ver()
self._parse_config(config)
self.log.debug('token={0} parse_mode={5}, tmpl={4!r} usernames={1} fullnames={2} groups={3}'.format(
self._token, self._usernames, self._fullnames, self._groups, self._tmpl, self._parse_mode))
self._init_bot()
chat_ids = self._get_chat_ids_n_update_db(session)
return chat_ids
def bootstrap(self, session, config):
"""bootstrap the plugin configuration and update db with cached chat_ids"""
console('{0} - bootstrapping...'.format(_PLUGIN_NAME))
chat_ids = self._real_init(session, config)
found_usernames = [x.username for x in chat_ids if x.username]
found_fullnames = [(x.firstname, x.surname) for x in chat_ids if x.firstname]
found_grps = [x.group for x in chat_ids if x.group]
missing_usernames = [x for x in self._usernames if x not in found_usernames]
missing_fullnames = [x for x in self._fullnames if x not in found_fullnames]
missing_grps = [x for x in self._groups if x not in found_grps]
if missing_usernames or missing_fullnames or missing_grps:
for i in missing_usernames:
console('ERR: could not find chat_id for username: {0}'.format(i))
for i in missing_fullnames:
console('ERR: could not find chat_id for fullname: {0} {1}'.format(*i))
for i in missing_grps:
console('ERR: could not find chat_id for group: {0}'.format(i))
res = False
else:
console('{0} - bootstrap was successful'.format(_PLUGIN_NAME))
res = True
return res
def test_msg(self, session, config):
"""send test message to configured recipients"""
console('{0} loading chat_ids...'.format(_PLUGIN_NAME))
chat_ids = self._real_init(session, config)
console('{0} sending test message(s)...'.format(_PLUGIN_NAME))
for chat_id in (x.id for x in chat_ids):
self._bot.sendMessage(chat_id=chat_id, text='test message from flexget')
return True
def _init_bot(self):
self._bot = telegram.Bot(self._token)
self._check_token()
def _check_token(self):
try:
self._bot.getMe()
except UnicodeDecodeError as e:
self.log.trace('bot.getMe() raised: {!r}'.format(e))
raise plugin.PluginWarning('invalid bot token')
except (URLError, SSLError) as e:
self.log.error('Could not connect Telegram servers at this time, please try again later: %s', e.args[0])
except TelegramError as e:
self.log.error('Could not connect Telegram servers at this time, please try again later: %s', e.message)
@staticmethod
def _enforce_telegram_plugin_ver():
if telegram is None:
raise plugin.PluginWarning('missing python-telegram-bot pkg')
elif not hasattr(telegram, str('__version__')):
raise plugin.PluginWarning('invalid or old python-telegram-bot pkg')
elif LooseVersion(telegram.__version__) < str(_MIN_TELEGRAM_VER):
raise plugin.PluginWarning('old python-telegram-bot ({0})'.format(telegram.__version__))
def _send_msgs(self, task, chat_ids):
kwargs = dict()
if self._parse_mode == 'markdown':
kwargs['parse_mode'] = telegram.ParseMode.MARKDOWN
elif self._parse_mode == 'html':
kwargs['parse_mode'] = telegram.ParseMode.HTML
for entry in task.accepted:
msg = self._render_msg(entry, self._tmpl)
for chat_id in (x.id for x in chat_ids):
try:
self._bot.sendMessage(chat_id=chat_id, text=msg, **kwargs)
except TelegramError as e:
if kwargs.get('parse_mode') and "can't parse message text" in e.message:
self.log.warning(
'Failed to render message using parse mode %s. Falling back to basic parsing: %s' % (
kwargs['parse_mode'], e.message))
del kwargs['parse_mode']
try:
self._bot.sendMessage(chat_id=chat_id, text=msg, **kwargs)
except TelegramError as e:
self.log.error('Cannot send message, : %s' % e.message)
continue
else:
self.log.error('Cannot send message, : %s' % e.message)
continue
def _render_msg(self, entry, tmpl):
"""
:type entry: flexget.entry.Entry
:type tmpl: str
:rtype: str
"""
try:
msg = entry.render(tmpl)
except RenderError as e:
title = entry.get('title')
self.log.error('render error; title={0} err={1}'.format(title, e))
msg = title
return msg
def _get_chat_ids_n_update_db(self, session):
"""
:type session: sqlalchemy.orm.Session
:rtype: list[ChatIdEntry]
"""
usernames = self._usernames[:]
fullnames = self._fullnames[:]
groups = self._groups[:]
chat_ids, has_new_chat_ids = self._get_chat_ids(session, usernames, fullnames, groups)
self.log.debug('chat_ids={0}'.format(chat_ids))
if not chat_ids:
self.log.warning('no chat id found')
else:
if usernames:
self.log.warning('no chat id found for usernames: {0}'.format(usernames))
if fullnames:
self.log.warning('no chat id found for fullnames: {0}'.format(fullnames))
if groups:
self.log.warning('no chat id found for groups: {0}'.format(groups))
if has_new_chat_ids:
self._update_db(session, chat_ids)
return chat_ids
def _get_chat_ids(self, session, usernames, fullnames, groups):
"""get chat ids for `usernames`, `fullnames` & `groups`.
entries with a matching chat ids will be removed from the input lists.
:type session: sqlalchemy.orm.Session
:type usernames: list[str]
:type fullnames: list[tuple[str, str]]
:type groups: list[str]
:returns: chat ids, new chat ids found?
:rtype: list[ChatIdEntry], bool
"""
chat_ids = list()
self.log.debug('loading cached chat ids')
chat_ids = self._get_cached_chat_ids(session, usernames, fullnames, groups)
self.log.debug('found {0} cached chat_ids: {1}'.format(len(chat_ids), ['{0}'.format(x) for x in chat_ids]))
if not (usernames or fullnames or groups):
self.log.debug('all chat ids found in cache')
return chat_ids, False
self.log.debug('loading new chat ids')
new_chat_ids = list(self._get_new_chat_ids(usernames, fullnames, groups))
self.log.debug('found {0} new chat_ids: {1}'.format(len(new_chat_ids), ['{0}'.format(x) for x in new_chat_ids]))
chat_ids.extend(new_chat_ids)
return chat_ids, bool(new_chat_ids)
@staticmethod
def _get_cached_chat_ids(session, usernames, fullnames, groups):
"""get chat ids from the cache (DB). remove found entries from `usernames`, `fullnames` & `groups`
:type session: sqlalchemy.orm.Session
:type usernames: list[str]
:type fullnames: list[tuple[str, str]]
:type groups: list[str]
:rtype: list[ChatIdEntry]
"""
chat_ids = list()
cached_usernames = dict((x.username, x)
for x in session.query(ChatIdEntry).filter(ChatIdEntry.username != None).all())
cached_fullnames = dict(((x.firstname, x.surname), x)
for x in session.query(ChatIdEntry).filter(ChatIdEntry.firstname != None).all())
cached_groups = dict((x.group, x)
for x in session.query(ChatIdEntry).filter(ChatIdEntry.group != None).all())
len_ = len(usernames)
for i, username in enumerate(reversed(usernames)):
item = cached_usernames.get(username)
if item:
chat_ids.append(item)
usernames.pop(len_ - i - 1)
len_ = len(fullnames)
for i, fullname in enumerate(reversed(fullnames)):
item = cached_fullnames.get(fullname)
if item:
chat_ids.append(item)
fullnames.pop(len_ - i - 1)
len_ = len(groups)
for i, grp in enumerate(reversed(groups)):
item = cached_groups.get(grp)
if item:
chat_ids.append(item)
groups.pop(len_ - i - 1)
return chat_ids
def _get_new_chat_ids(self, usernames, fullnames, groups):
"""get chat ids by querying the telegram `bot`
:type usernames: list[str]
:type fullnames: list[tuple[str, str]]
:type groups: list[str]
:rtype: __generator[ChatIdEntry]
"""
upd_usernames, upd_fullnames, upd_groups = self._get_bot_updates()
len_ = len(usernames)
for i, username in enumerate(reversed(usernames)):
chat = upd_usernames.get(username)
if chat is not None:
entry = ChatIdEntry(id=chat.id, username=chat.username, firstname=chat.first_name,
surname=chat.last_name)
yield entry
usernames.pop(len_ - i - 1)
len_ = len(fullnames)
for i, fullname in enumerate(reversed(fullnames)):
chat = upd_fullnames.get(fullname)
if chat is not None:
entry = ChatIdEntry(id=chat.id, username=chat.username, firstname=chat.first_name,
surname=chat.last_name)
yield entry
fullnames.pop(len_ - i - 1)
len_ = len(groups)
for i, grp in enumerate(reversed(groups)):
chat = upd_groups.get(grp)
if chat is not None:
entry = ChatIdEntry(id=chat.id, group=chat.title)
yield entry
groups.pop(len_ - i - 1)
def _get_bot_updates(self):
"""get updated chats info from telegram
:type bot: telegram.Bot
:rtype: (dict[str, telegram.User], dict[(str, str), telegram.User], dict[str, telegram.GroupChat])
"""
# highly unlikely, but if there are more than 100 msgs waiting for the bot, we should not miss one
updates = []
last_upd = 0
while 1:
ups = self._bot.getUpdates(last_upd, limit=100)
updates.extend(ups)
if len(ups) < 100:
break
last_upd = ups[-1].update_id
usernames = dict()
fullnames = dict()
groups = dict()
for chat in (x.message.chat for x in updates):
if chat.type == 'private':
usernames[chat.username] = chat
fullnames[(chat.first_name, chat.last_name)] = chat
elif chat.type in ('group', 'supergroup' or 'channel'):
groups[chat.title] = chat
else:
self.log.warn('unknown chat type: {0}'.format(type(chat)))
return usernames, fullnames, groups
def _update_db(self, session, chat_ids):
"""Update the DB with found `chat_ids`
:type session: sqlalchemy.orm.Session
:type chat_ids: list[ChatIdEntry]
"""
self.log.info('saving updated chat_ids to db')
# avoid duplicate chat_ids. (this is possible if configuration specified both username & fullname
chat_ids_d = dict((x.id, x) for x in chat_ids)
session.add_all(chat_ids_d.itervalues())
session.commit()
def _guess_task_name(manager):
for task in manager.tasks:
if _get_config(manager, task) is not None:
break
else:
task = None
return task
def _get_config(manager, task):
return manager.config['tasks'][task].get(_PLUGIN_NAME)
@with_session()
def do_cli(manager, args, session=None):
"""
:type manager: flexget.Manager
"""
task_name = _guess_task_name(manager)
config = _get_config(manager, task_name)
plugin_info = plugin.get_plugin_by_name(_PLUGIN_NAME)
send_telegram = plugin_info.instance
""":type: SendTelegram"""
if args.action == 'bootstrap':
res = send_telegram.bootstrap(session, config)
elif args.action == 'test-msg':
res = send_telegram.test_msg(session, config)
else:
raise RuntimeError('unknown action')
sys.exit(int(not res))
@event('plugin.register')
def register_plugin():
plugin.register(SendTelegram, _PLUGIN_NAME, api_ver=2)
@event('options.register')
def register_parser_arguments():
parser = options.register_command(_PLUGIN_NAME, do_cli, help='{0} cli'.format(_PLUGIN_NAME))
""":type: options.CoreArgumentParser"""
subp = parser.add_subparsers(dest='action')
bsp = subp.add_parser('bootstrap', help='bootstrap the plugin according to config')
bsp.add_argument('--tasks', )
subp.add_parser('test-msg', help='send test message to all configured recipients')
|
|
"""Recurrent neural network language model with static graph optimizations.
This is a modified version of the standard Chainer Penn Tree Bank (ptb)
example that
includes static subgraph optimizations. It is mostly unchanged
from the original model except that that the RNN is unrolled for `bproplen`
slices inside of a static chain.
This was required because the `LSTM` link used by the ptb example
is not fully compatible with the static subgraph
optimizations feature. Specifically, it does not support
multiple calls in the same iteration unless it is called from
inside a single static chain.
This code is ported from the following implementation written in Torch.
https://github.com/tomsercu/lstm
This code is a custom loop version of train_ptb.py. That is, we train
models without using the Trainer class in chainer and instead write a
training loop that manually computes the loss of minibatches and
applies an optimizer to update the model.
"""
from __future__ import print_function
import argparse
import copy
import numpy as np
import random
import chainer
from chainer import configuration
from chainer.dataset import convert
import chainer.functions as F
from chainer.functions.loss import softmax_cross_entropy
import chainer.links as L
from chainer import serializers
from chainer import static_graph
# Definition of a recurrent net for language modeling
class RNNForLMSlice(chainer.Chain):
def __init__(self, n_vocab, n_units):
super(RNNForLMSlice, self).__init__()
with self.init_scope():
self.embed = L.EmbedID(n_vocab, n_units)
self.l1 = L.LSTM(n_units, n_units)
self.l2 = L.LSTM(n_units, n_units)
self.l3 = L.Linear(n_units, n_vocab)
for param in self.params():
param.data[...] = np.random.uniform(-0.1, 0.1, param.data.shape)
def reset_state(self):
self.l1.reset_state()
self.l2.reset_state()
def __call__(self, x):
h0 = self.embed(x)
h1 = self.l1(F.dropout(h0))
h2 = self.l2(F.dropout(h1))
y = self.l3(F.dropout(h2))
return y
class RNNForLMUnrolled(chainer.Chain):
def __init__(self, n_vocab, n_units):
super(RNNForLMUnrolled, self).__init__()
with self.init_scope():
self.rnn = RNNForLMSlice(n_vocab, n_units)
@static_graph(verbosity_level=1)
def __call__(self, words):
"""Perform a forward pass on the supplied list of words.
The RNN is unrolled for a number of time slices equal to the
length of the supplied word sequence.
Args:
words_labels (list of Variable): The list of input words to the
unrolled neural network.
Returns the corresponding lest of output variables of the same
length as the input sequence.
"""
outputs = []
for ind in range(len(words)):
word = words[ind]
y = self.rnn(word)
outputs.append(y)
return outputs
# Dataset iterator to create a batch of sequences at different positions.
# This iterator returns a pair of current words and the next words. Each
# example is a part of sequences starting from the different offsets
# equally spaced within the whole sequence.
class ParallelSequentialIterator(chainer.dataset.Iterator):
def __init__(self, dataset, batch_size, repeat=True):
self.dataset = dataset
self.batch_size = batch_size # batch size
# Number of completed sweeps over the dataset. In this case, it is
# incremented if every word is visited at least once after the last
# increment.
self.epoch = 0
# True if the epoch is incremented at the last iteration.
self.is_new_epoch = False
self.repeat = repeat
length = len(dataset)
# Offsets maintain the position of each sequence in the mini-batch.
self.offsets = [i * length // batch_size for i in range(batch_size)]
# NOTE: this is not a count of parameter updates. It is just a count of
# calls of ``__next__``.
self.iteration = 0
# use -1 instead of None internally
self._previous_epoch_detail = -1.
def __next__(self):
# This iterator returns a list representing a mini-batch. Each item
# indicates a different position in the original sequence. Each item is
# represented by a pair of two word IDs. The first word is at the
# "current" position, while the second word at the next position.
# At each iteration, the iteration count is incremented, which pushes
# forward the "current" position.
length = len(self.dataset)
if not self.repeat and self.iteration * self.batch_size >= length:
# If not self.repeat, this iterator stops at the end of the first
# epoch (i.e., when all words are visited once).
raise StopIteration
cur_words = self.get_words()
self._previous_epoch_detail = self.epoch_detail
self.iteration += 1
next_words = self.get_words()
epoch = self.iteration * self.batch_size // length
self.is_new_epoch = self.epoch < epoch
if self.is_new_epoch:
self.epoch = epoch
return list(zip(cur_words, next_words))
@property
def epoch_detail(self):
# Floating point version of epoch.
return self.iteration * self.batch_size / len(self.dataset)
@property
def previous_epoch_detail(self):
if self._previous_epoch_detail < 0:
return None
return self._previous_epoch_detail
def get_words(self):
# It returns a list of current words.
return [self.dataset[(offset + self.iteration) % len(self.dataset)]
for offset in self.offsets]
def serialize(self, serializer):
# It is important to serialize the state to be recovered on resume.
self.iteration = serializer('iteration', self.iteration)
self.epoch = serializer('epoch', self.epoch)
try:
self._previous_epoch_detail = serializer(
'previous_epoch_detail', self._previous_epoch_detail)
except KeyError:
# guess previous_epoch_detail for older version
self._previous_epoch_detail = self.epoch + \
(self.current_position - self.batch_size) / len(self.dataset)
if self.epoch_detail > 0:
self._previous_epoch_detail = max(
self._previous_epoch_detail, 0.)
else:
self._previous_epoch_detail = -1.
def main():
np.random.seed(0)
random.seed(1)
parser = argparse.ArgumentParser()
parser.add_argument('--batchsize', '-b', type=int, default=20,
help='Number of examples in each mini-batch')
parser.add_argument('--bproplen', '-l', type=int, default=25,
help='Number of words in each mini-batch '
'(= length of truncated BPTT)')
parser.add_argument('--epoch', '-e', type=int, default=39,
help='Number of sweeps over the dataset to train')
parser.add_argument('--gpu', '-g', type=int, default=0,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--gradclip', '-c', type=float, default=5,
help='Gradient norm threshold to clip')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--test', action='store_true',
help='Use tiny datasets for quick tests')
parser.set_defaults(test=False)
parser.add_argument('--unit', '-u', type=int, default=650,
help='Number of LSTM units in each layer')
args = parser.parse_args()
def evaluate(model, iter):
# Evaluation routine to be used for validation and test.
evaluator = model.copy() # to use different state
evaluator.rnn.reset_state() # initialize state
sum_perp = 0
data_count = 0
words = []
labels = []
lossfun = softmax_cross_entropy.softmax_cross_entropy
with configuration.using_config('train', False):
for batch in copy.copy(iter):
word, label = convert.concat_examples(batch, args.gpu)
words.append(word)
labels.append(label)
data_count += 1
outputs = evaluator(words)
for ind in range(len(outputs)):
y = outputs[ind]
label = labels[ind]
loss = lossfun(y, label)
sum_perp += loss.data
return np.exp(float(sum_perp) / data_count)
# Load the Penn Tree Bank long word sequence dataset
train, val, test = chainer.datasets.get_ptb_words()
n_vocab = max(train) + 1 # train is just an array of integers
print('#vocab =', n_vocab)
if args.test:
train = train[:100]
val = val[:100]
test = test[:100]
# Create the dataset iterators
train_iter = ParallelSequentialIterator(train, args.batchsize)
val_iter = ParallelSequentialIterator(val, 1, repeat=False)
test_iter = ParallelSequentialIterator(test, 1, repeat=False)
# Prepare an RNNLM model
model = RNNForLMUnrolled(n_vocab, args.unit)
lossfun = softmax_cross_entropy.softmax_cross_entropy
if args.gpu >= 0:
# Make the specified GPU current
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
# Set up an optimizer
optimizer = chainer.optimizers.SGD(lr=1.0)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.GradientClipping(args.gradclip))
sum_perp = 0
count = 0
iteration = 0
while train_iter.epoch < args.epoch:
iteration += 1
words = []
labels = []
# Progress the dataset iterator for bprop_len words at each iteration.
for i in range(args.bproplen):
# Get the next batch (a list of tuples of two word IDs)
batch = train_iter.__next__()
# Concatenate the word IDs to matrices and send them to the device
# self.converter does this job
# (it is chainer.dataset.concat_examples by default)
word, label = convert.concat_examples(batch, args.gpu)
words.append(word)
labels.append(label)
count += 1
outputs = model(words)
loss = 0
for ind in range(len(outputs)):
y = outputs[ind]
label = labels[ind]
loss += lossfun(y, label)
sum_perp += loss.data
optimizer.target.cleargrads() # Clear the parameter gradients
loss.backward() # Backprop
loss.unchain_backward() # Truncate the graph
optimizer.update() # Update the parameters
if iteration % 20 == 0:
print('iteration: ', iteration)
print('training perplexity: ', np.exp(float(sum_perp) / count))
sum_perp = 0
count = 0
if train_iter.is_new_epoch:
print('Evaluating model on validation set...')
print('epoch: ', train_iter.epoch)
print('validation perplexity: ', evaluate(model, val_iter))
# Evaluate on test dataset
print('test')
test_perp = evaluate(model, test_iter)
print('test perplexity:', test_perp)
# Save the model and the optimizer
print('save the model')
serializers.save_npz('rnnlm.model', model)
print('save the optimizer')
serializers.save_npz('rnnlm.state', optimizer)
if __name__ == '__main__':
main()
|
|
"""Translation helper functions."""
import locale
import os
import re
import sys
import warnings
import gettext as gettext_module
from cStringIO import StringIO
from threading import local
from django.utils.importlib import import_module
from django.utils.safestring import mark_safe, SafeData
# Translations are cached in a dictionary for every language+app tuple.
# The active translations are stored by threadid to make them thread local.
_translations = {}
_active = local()
# The default translation is based on the settings file.
_default = None
# This is a cache for normalized accept-header languages to prevent multiple
# file lookups when checking the same locale on repeated requests.
_accepted = {}
# magic gettext number to separate context from message
CONTEXT_SEPARATOR = u"\x04"
# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9.
accept_language_re = re.compile(r'''
([A-Za-z]{1,8}(?:-[A-Za-z]{1,8})*|\*) # "en", "en-au", "x-y-z", "*"
(?:;q=(0(?:\.\d{,3})?|1(?:.0{,3})?))? # Optional "q=1.00", "q=0.8"
(?:\s*,\s*|$) # Multiple accepts per header.
''', re.VERBOSE)
def to_locale(language, to_lower=False):
"""
Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is
True, the last component is lower-cased (en_us).
"""
p = language.find('-')
if p >= 0:
if to_lower:
return language[:p].lower()+'_'+language[p+1:].lower()
else:
# Get correct locale for sr-latn
if len(language[p+1:]) > 2:
return language[:p].lower()+'_'+language[p+1].upper()+language[p+2:].lower()
return language[:p].lower()+'_'+language[p+1:].upper()
else:
return language.lower()
def to_language(locale):
"""Turns a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
return locale[:p].lower()+'-'+locale[p+1:].lower()
else:
return locale.lower()
class DjangoTranslation(gettext_module.GNUTranslations):
"""
This class sets up the GNUTranslations context with regard to output
charset. Django uses a defined DEFAULT_CHARSET as the output charset on
Python 2.4.
"""
def __init__(self, *args, **kw):
gettext_module.GNUTranslations.__init__(self, *args, **kw)
self.set_output_charset('utf-8')
self.django_output_charset = 'utf-8'
self.__language = '??'
def merge(self, other):
self._catalog.update(other._catalog)
def set_language(self, language):
self.__language = language
self.__to_language = to_language(language)
def language(self):
return self.__language
def to_language(self):
return self.__to_language
def __repr__(self):
return "<DjangoTranslation lang:%s>" % self.__language
def translation(language):
"""
Returns a translation object.
This translation object will be constructed out of multiple GNUTranslations
objects by merging their catalogs. It will construct a object for the
requested language and add a fallback to the default language, if it's
different from the requested language.
"""
global _translations
t = _translations.get(language, None)
if t is not None:
return t
from django.conf import settings
globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
if settings.SETTINGS_MODULE is not None:
parts = settings.SETTINGS_MODULE.split('.')
project = import_module(parts[0])
projectpath = os.path.join(os.path.dirname(project.__file__), 'locale')
else:
projectpath = None
def _fetch(lang, fallback=None):
global _translations
res = _translations.get(lang, None)
if res is not None:
return res
loc = to_locale(lang)
def _translation(path):
try:
t = gettext_module.translation('django', path, [loc], DjangoTranslation)
t.set_language(lang)
return t
except IOError:
return None
res = _translation(globalpath)
# We want to ensure that, for example, "en-gb" and "en-us" don't share
# the same translation object (thus, merging en-us with a local update
# doesn't affect en-gb), even though they will both use the core "en"
# translation. So we have to subvert Python's internal gettext caching.
base_lang = lambda x: x.split('-', 1)[0]
if base_lang(lang) in [base_lang(trans) for trans in _translations]:
res._info = res._info.copy()
res._catalog = res._catalog.copy()
def _merge(path):
t = _translation(path)
if t is not None:
if res is None:
return t
else:
res.merge(t)
return res
for appname in reversed(settings.INSTALLED_APPS):
app = import_module(appname)
apppath = os.path.join(os.path.dirname(app.__file__), 'locale')
if os.path.isdir(apppath):
res = _merge(apppath)
localepaths = [os.path.normpath(path) for path in settings.LOCALE_PATHS]
if (projectpath and os.path.isdir(projectpath) and
os.path.normpath(projectpath) not in localepaths):
res = _merge(projectpath)
for localepath in reversed(settings.LOCALE_PATHS):
if os.path.isdir(localepath):
res = _merge(localepath)
if res is None:
if fallback is not None:
res = fallback
else:
return gettext_module.NullTranslations()
_translations[lang] = res
return res
default_translation = _fetch(settings.LANGUAGE_CODE)
current_translation = _fetch(language, fallback=default_translation)
return current_translation
def activate(language):
"""
Fetches the translation object for a given tuple of application name and
language and installs it as the current translation object for the current
thread.
"""
_active.value = translation(language)
def deactivate():
"""
Deinstalls the currently active translation object so that further _ calls
will resolve against the default translation object, again.
"""
if hasattr(_active, "value"):
del _active.value
def deactivate_all():
"""
Makes the active translation object a NullTranslations() instance. This is
useful when we want delayed translations to appear as the original string
for some reason.
"""
_active.value = gettext_module.NullTranslations()
def get_language():
"""Returns the currently selected language."""
t = getattr(_active, "value", None)
if t is not None:
try:
return t.to_language()
except AttributeError:
pass
# If we don't have a real translation object, assume it's the default language.
from django.conf import settings
return settings.LANGUAGE_CODE
def get_language_bidi():
"""
Returns selected language's BiDi layout.
* False = left-to-right layout
* True = right-to-left layout
"""
from django.conf import settings
base_lang = get_language().split('-')[0]
return base_lang in settings.LANGUAGES_BIDI
def catalog():
"""
Returns the current active catalog for further processing.
This can be used if you need to modify the catalog or want to access the
whole message catalog instead of just translating one string.
"""
global _default
t = getattr(_active, "value", None)
if t is not None:
return t
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return _default
def do_translate(message, translation_function):
"""
Translates 'message' using the given 'translation_function' name -- which
will be either gettext or ugettext. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default
eol_message = message.replace('\r\n', '\n').replace('\r', '\n')
t = getattr(_active, "value", None)
if t is not None:
result = getattr(t, translation_function)(eol_message)
else:
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
result = getattr(_default, translation_function)(eol_message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
def gettext(message):
return do_translate(message, 'gettext')
def ugettext(message):
return do_translate(message, 'ugettext')
def pgettext(context, message):
result = do_translate(
u"%s%s%s" % (context, CONTEXT_SEPARATOR, message), 'ugettext')
if CONTEXT_SEPARATOR in result:
# Translation not found
result = message
return result
def gettext_noop(message):
"""
Marks strings for translation but doesn't translate them now. This can be
used to store strings in global variables that should stay in the base
language (because they might be used externally) and will be translated
later.
"""
return message
def do_ntranslate(singular, plural, number, translation_function):
global _default
t = getattr(_active, "value", None)
if t is not None:
return getattr(t, translation_function)(singular, plural, number)
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return getattr(_default, translation_function)(singular, plural, number)
def ngettext(singular, plural, number):
"""
Returns a UTF-8 bytestring of the translation of either the singular or
plural, based on the number.
"""
return do_ntranslate(singular, plural, number, 'ngettext')
def ungettext(singular, plural, number):
"""
Returns a unicode strings of the translation of either the singular or
plural, based on the number.
"""
return do_ntranslate(singular, plural, number, 'ungettext')
def npgettext(context, singular, plural, number):
result = do_ntranslate(u"%s%s%s" % (context, CONTEXT_SEPARATOR, singular),
u"%s%s%s" % (context, CONTEXT_SEPARATOR, plural),
number, 'ungettext')
if CONTEXT_SEPARATOR in result:
# Translation not found
result = do_ntranslate(singular, plural, number, 'ungettext')
return result
def all_locale_paths():
"""
Returns a list of paths to user-provides languages files.
"""
from django.conf import settings
globalpath = os.path.join(
os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
return [globalpath] + list(settings.LOCALE_PATHS)
def check_for_language(lang_code):
"""
Checks whether there is a global language file for the given language
code. This is used to decide whether a user-provided language is
available. This is only used for language codes from either the cookies or
session and during format localization.
"""
for path in all_locale_paths():
if gettext_module.find('django', path, [to_locale(lang_code)]) is not None:
return True
return False
def get_language_from_request(request):
"""
Analyzes the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
"""
global _accepted
from django.conf import settings
supported = dict(settings.LANGUAGES)
if hasattr(request, 'session'):
lang_code = request.session.get('django_language', None)
if lang_code in supported and lang_code is not None and check_for_language(lang_code):
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
if lang_code and lang_code not in supported:
lang_code = lang_code.split('-')[0] # e.g. if fr-ca is not supported fallback to fr
if lang_code and lang_code in supported and check_for_language(lang_code):
return lang_code
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
break
# We have a very restricted form for our language files (no encoding
# specifier, since they all must be UTF-8 and only one possible
# language each time. So we avoid the overhead of gettext.find() and
# work out the MO file manually.
# 'normalized' is the root name of the locale in POSIX format (which is
# the format used for the directories holding the MO files).
normalized = locale.locale_alias.get(to_locale(accept_lang, True))
if not normalized:
continue
# Remove the default encoding from locale_alias.
normalized = normalized.split('.')[0]
if normalized in _accepted:
# We've seen this locale before and have an MO file for it, so no
# need to check again.
return _accepted[normalized]
for lang, dirname in ((accept_lang, normalized),
(accept_lang.split('-')[0], normalized.split('_')[0])):
if lang.lower() not in supported:
continue
for path in all_locale_paths():
if os.path.exists(os.path.join(path, dirname, 'LC_MESSAGES', 'django.mo')):
_accepted[normalized] = lang
return lang
return settings.LANGUAGE_CODE
dot_re = re.compile(r'\S')
def blankout(src, char):
"""
Changes every non-whitespace character to the given char.
Used in the templatize function.
"""
return dot_re.sub(char, src)
inline_re = re.compile(r"""^\s*trans\s+((?:".*?")|(?:'.*?'))\s*""")
block_re = re.compile(r"""^\s*blocktrans(?:\s+|$)""")
endblock_re = re.compile(r"""^\s*endblocktrans$""")
plural_re = re.compile(r"""^\s*plural$""")
constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""")
def templatize(src, origin=None):
"""
Turns a Django template into something that is understood by xgettext. It
does so by translating the Django translation tags into standard gettext
function invocations.
"""
from django.template import (Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK,
TOKEN_COMMENT, TRANSLATOR_COMMENT_MARK)
out = StringIO()
intrans = False
inplural = False
singular = []
plural = []
incomment = False
comment = []
for t in Lexer(src, origin).tokenize():
if incomment:
if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment':
content = ''.join(comment)
translators_comment_start = None
for lineno, line in enumerate(content.splitlines(True)):
if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
translators_comment_start = lineno
for lineno, line in enumerate(content.splitlines(True)):
if translators_comment_start is not None and lineno >= translators_comment_start:
out.write(' # %s' % line)
else:
out.write(' #\n')
incomment = False
comment = []
else:
comment.append(t.contents)
elif intrans:
if t.token_type == TOKEN_BLOCK:
endbmatch = endblock_re.match(t.contents)
pluralmatch = plural_re.match(t.contents)
if endbmatch:
if inplural:
out.write(' ngettext(%r,%r,count) ' % (''.join(singular), ''.join(plural)))
for part in singular:
out.write(blankout(part, 'S'))
for part in plural:
out.write(blankout(part, 'P'))
else:
out.write(' gettext(%r) ' % ''.join(singular))
for part in singular:
out.write(blankout(part, 'S'))
intrans = False
inplural = False
singular = []
plural = []
elif pluralmatch:
inplural = True
else:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
raise SyntaxError("Translation blocks must not include other block tags: %s (%sline %d)" % (t.contents, filemsg, t.lineno))
elif t.token_type == TOKEN_VAR:
if inplural:
plural.append('%%(%s)s' % t.contents)
else:
singular.append('%%(%s)s' % t.contents)
elif t.token_type == TOKEN_TEXT:
contents = t.contents.replace('%', '%%')
if inplural:
plural.append(contents)
else:
singular.append(contents)
else:
if t.token_type == TOKEN_BLOCK:
imatch = inline_re.match(t.contents)
bmatch = block_re.match(t.contents)
cmatches = constant_re.findall(t.contents)
if imatch:
g = imatch.group(1)
if g[0] == '"': g = g.strip('"')
elif g[0] == "'": g = g.strip("'")
out.write(' gettext(%r) ' % g)
elif bmatch:
for fmatch in constant_re.findall(t.contents):
out.write(' _(%s) ' % fmatch)
intrans = True
inplural = False
singular = []
plural = []
elif cmatches:
for cmatch in cmatches:
out.write(' _(%s) ' % cmatch)
elif t.contents == 'comment':
incomment = True
else:
out.write(blankout(t.contents, 'B'))
elif t.token_type == TOKEN_VAR:
parts = t.contents.split('|')
cmatch = constant_re.match(parts[0])
if cmatch:
out.write(' _(%s) ' % cmatch.group(1))
for p in parts[1:]:
if p.find(':_(') >= 0:
out.write(' %s ' % p.split(':',1)[1])
else:
out.write(blankout(p, 'F'))
elif t.token_type == TOKEN_COMMENT:
out.write(' # %s' % t.contents)
else:
out.write(blankout(t.contents, 'X'))
return out.getvalue()
def parse_accept_lang_header(lang_string):
"""
Parses the lang_string, which is the body of an HTTP Accept-Language
header, and returns a list of (lang, q-value), ordered by 'q' values.
Any format errors in lang_string results in an empty list being returned.
"""
result = []
pieces = accept_language_re.split(lang_string)
if pieces[-1]:
return []
for i in range(0, len(pieces) - 1, 3):
first, lang, priority = pieces[i : i + 3]
if first:
return []
priority = priority and float(priority) or 1.0
result.append((lang, priority))
result.sort(key=lambda k: k[1], reverse=True)
return result
|
|
from unittest import mock, skipUnless
from django.db import connection
from django.db.models import Index
from django.db.utils import DatabaseError
from django.test import TransactionTestCase, skipUnlessDBFeature
from .models import Article, ArticleReporter, City, District, Reporter
class IntrospectionTests(TransactionTestCase):
available_apps = ['introspection']
def test_table_names(self):
tl = connection.introspection.table_names()
self.assertEqual(tl, sorted(tl))
self.assertIn(Reporter._meta.db_table, tl, "'%s' isn't in table_list()." % Reporter._meta.db_table)
self.assertIn(Article._meta.db_table, tl, "'%s' isn't in table_list()." % Article._meta.db_table)
def test_django_table_names(self):
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE django_ixn_test_table (id INTEGER);')
tl = connection.introspection.django_table_names()
cursor.execute("DROP TABLE django_ixn_test_table;")
self.assertNotIn('django_ixn_test_table', tl,
"django_table_names() returned a non-Django table")
def test_django_table_names_retval_type(self):
# Table name is a list #15216
tl = connection.introspection.django_table_names(only_existing=True)
self.assertIs(type(tl), list)
tl = connection.introspection.django_table_names(only_existing=False)
self.assertIs(type(tl), list)
def test_table_names_with_views(self):
with connection.cursor() as cursor:
try:
cursor.execute(
'CREATE VIEW introspection_article_view AS SELECT headline '
'from introspection_article;')
except DatabaseError as e:
if 'insufficient privileges' in str(e):
self.fail("The test user has no CREATE VIEW privileges")
else:
raise
try:
self.assertIn('introspection_article_view', connection.introspection.table_names(include_views=True))
self.assertNotIn('introspection_article_view', connection.introspection.table_names())
finally:
with connection.cursor() as cursor:
cursor.execute('DROP VIEW introspection_article_view')
def test_unmanaged_through_model(self):
tables = connection.introspection.django_table_names()
self.assertNotIn(ArticleReporter._meta.db_table, tables)
def test_installed_models(self):
tables = [Article._meta.db_table, Reporter._meta.db_table]
models = connection.introspection.installed_models(tables)
self.assertEqual(models, {Article, Reporter})
def test_sequence_list(self):
sequences = connection.introspection.sequence_list()
reporter_seqs = [seq for seq in sequences if seq['table'] == Reporter._meta.db_table]
self.assertEqual(len(reporter_seqs), 1, 'Reporter sequence not found in sequence_list()')
self.assertEqual(reporter_seqs[0]['column'], 'id')
def test_get_table_description_names(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual([r[0] for r in desc],
[f.column for f in Reporter._meta.fields])
def test_get_table_description_types(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual(
[datatype(r[1], r) for r in desc],
['AutoField' if connection.features.can_introspect_autofield else 'IntegerField',
'CharField', 'CharField', 'CharField',
'BigIntegerField' if connection.features.can_introspect_big_integer_field else 'IntegerField',
'BinaryField' if connection.features.can_introspect_binary_field else 'TextField',
'SmallIntegerField' if connection.features.can_introspect_small_integer_field else 'IntegerField']
)
def test_get_table_description_col_lengths(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual(
[r[3] for r in desc if datatype(r[1], r) == 'CharField'],
[30, 30, 254]
)
@skipUnlessDBFeature('can_introspect_null')
def test_get_table_description_nullable(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
nullable_by_backend = connection.features.interprets_empty_strings_as_nulls
self.assertEqual(
[r[6] for r in desc],
[False, nullable_by_backend, nullable_by_backend, nullable_by_backend, True, True, False]
)
@skipUnlessDBFeature('can_introspect_autofield')
def test_bigautofield(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, City._meta.db_table)
self.assertIn('BigAutoField', [datatype(r[1], r) for r in desc])
# Regression test for #9991 - 'real' types in postgres
@skipUnlessDBFeature('has_real_datatype')
def test_postgresql_real_type(self):
with connection.cursor() as cursor:
cursor.execute("CREATE TABLE django_ixn_real_test_table (number REAL);")
desc = connection.introspection.get_table_description(cursor, 'django_ixn_real_test_table')
cursor.execute('DROP TABLE django_ixn_real_test_table;')
self.assertEqual(datatype(desc[0][1], desc[0]), 'FloatField')
@skipUnlessDBFeature('can_introspect_foreign_keys')
def test_get_relations(self):
with connection.cursor() as cursor:
relations = connection.introspection.get_relations(cursor, Article._meta.db_table)
# That's {field_name: (field_name_other_table, other_table)}
expected_relations = {
'reporter_id': ('id', Reporter._meta.db_table),
'response_to_id': ('id', Article._meta.db_table),
}
self.assertEqual(relations, expected_relations)
# Removing a field shouldn't disturb get_relations (#17785)
body = Article._meta.get_field('body')
with connection.schema_editor() as editor:
editor.remove_field(Article, body)
with connection.cursor() as cursor:
relations = connection.introspection.get_relations(cursor, Article._meta.db_table)
with connection.schema_editor() as editor:
editor.add_field(Article, body)
self.assertEqual(relations, expected_relations)
@skipUnless(connection.vendor == 'sqlite', "This is an sqlite-specific issue")
def test_get_relations_alt_format(self):
"""
With SQLite, foreign keys can be added with different syntaxes and
formatting.
"""
create_table_statements = [
"CREATE TABLE track(id, art_id INTEGER, FOREIGN KEY(art_id) REFERENCES {}(id));",
"CREATE TABLE track(id, art_id INTEGER, FOREIGN KEY (art_id) REFERENCES {}(id));"
]
for statement in create_table_statements:
with connection.cursor() as cursor:
cursor.fetchone = mock.Mock(return_value=[statement.format(Article._meta.db_table)])
relations = connection.introspection.get_relations(cursor, 'mocked_table')
self.assertEqual(relations, {'art_id': ('id', Article._meta.db_table)})
@skipUnlessDBFeature('can_introspect_foreign_keys')
def test_get_key_columns(self):
with connection.cursor() as cursor:
key_columns = connection.introspection.get_key_columns(cursor, Article._meta.db_table)
self.assertEqual(
set(key_columns),
{('reporter_id', Reporter._meta.db_table, 'id'),
('response_to_id', Article._meta.db_table, 'id')})
def test_get_primary_key_column(self):
with connection.cursor() as cursor:
primary_key_column = connection.introspection.get_primary_key_column(cursor, Article._meta.db_table)
pk_fk_column = connection.introspection.get_primary_key_column(cursor, District._meta.db_table)
self.assertEqual(primary_key_column, 'id')
self.assertEqual(pk_fk_column, 'city_id')
def test_get_constraints_index_types(self):
with connection.cursor() as cursor:
constraints = connection.introspection.get_constraints(cursor, Article._meta.db_table)
index = {}
index2 = {}
for key, val in constraints.items():
if val['columns'] == ['headline', 'pub_date']:
index = val
if val['columns'] == ['headline', 'response_to_id', 'pub_date', 'reporter_id']:
index2 = val
self.assertEqual(index['type'], Index.suffix)
self.assertEqual(index2['type'], Index.suffix)
@skipUnlessDBFeature('supports_index_column_ordering')
def test_get_constraints_indexes_orders(self):
"""
Indexes have the 'orders' key with a list of 'ASC'/'DESC' values.
"""
with connection.cursor() as cursor:
constraints = connection.introspection.get_constraints(cursor, Article._meta.db_table)
indexes_verified = 0
expected_columns = [
['reporter_id'],
['headline', 'pub_date'],
['response_to_id'],
['headline', 'response_to_id', 'pub_date', 'reporter_id'],
]
for key, val in constraints.items():
if val['index'] and not (val['primary_key'] or val['unique']):
self.assertIn(val['columns'], expected_columns)
self.assertEqual(val['orders'], ['ASC'] * len(val['columns']))
indexes_verified += 1
self.assertEqual(indexes_verified, 4)
def datatype(dbtype, description):
"""Helper to convert a data type into a string."""
dt = connection.introspection.get_field_type(dbtype, description)
if type(dt) is tuple:
return dt[0]
else:
return dt
|
|
"""
Basic unit tests for the HTTP Router.
Not complete by any means, but having something to start with means we can
add issues as they occur so we have automated regression testing.
"""
import time
from datetime import datetime
from django.test import TestCase, TransactionTestCase
from nose.tools import nottest
from rapidsms_httprouter.models import Message
from rapidsms_httprouter.qos_messages import get_alarms
from rapidsms_httprouter.router import get_router
from rapidsms.models import Backend, Connection
from rapidsms.apps.base import AppBase
from rapidsms.messages.outgoing import OutgoingMessage
from django.conf import settings
from django.core.management import call_command
class MassTextTest(TestCase):
def setUp(self):
(self.backend_1, created) = Backend.objects.get_or_create(name="MTT_test_backend_1")
(self.connection_1, created) = Connection.objects.get_or_create(backend=self.backend_1, identity="20000")
(self.backend_2, created) = Backend.objects.get_or_create(name="MTT_test_backend_2")
(self.connection_2, created) = Connection.objects.get_or_create(backend=self.backend_2, identity="20001")
def clean_data(self):
Connection.objects.get(identity="20000").delete()
Connection.objects.get(identity="20001").delete()
Backend.objects.get(name="MTT_test_backend_1").delete()
Backend.objects.get(name="MTT_test_backend_2").delete()
def tearDown(self):
self.clean_data()
def test_can_send_mass_text_with_no_batch_name(self):
messages_sent = Message.mass_text("MassTestTest-MESSAGE", [self.connection_1, self.connection_2])
self.assertEqual(len(messages_sent), 2, "Should have sent 2 messages")
def test_can_send_mass_text_with_batch_name(self):
messages_sent = Message.mass_text("MassTestTest-MESSAGE", [self.connection_1, self.connection_2],
batch_name="FOO")
message_1 = Message.objects.get(pk=messages_sent[0].pk)
message_2 = Message.objects.get(pk=messages_sent[1].pk)
self.assertEqual(message_1.batch.name, "FOO")
self.assertEqual(message_2.batch.name, "FOO")
@nottest #BROKEN
class BackendTest(TransactionTestCase):
def setUp(self):
(self.backend, created) = Backend.objects.get_or_create(name="test_backend")
(self.connection, created) = Connection.objects.get_or_create(backend=self.backend, identity='2067799294')
(self.backend2, created) = Backend.objects.get_or_create(name="test_backend2")
(self.connection2, created) = Connection.objects.get_or_create(backend=self.backend2, identity='2067799291')
settings.SMS_APPS = []
def tearDown(self):
settings.ROUTER_URL = None
def testNoRouterURL(self):
# send something off
router = get_router()
# tests that messages are correctly build
msg1 = router.add_outgoing(self.connection, "test")
# sleep a teeny bit to let it send
self.assertEquals('test_backend', msg1.connection.backend.name)
self.assertEquals('2067799294', msg1.connection.identity)
self.assertEquals('test', msg1.text)
self.assertEquals('O', msg1.direction)
self.assertEquals('Q', msg1.status)
def testSimpleRouterURL(self):
# set our router URL
settings.ROUTER_URL = "http://mykannel.com/cgi-bin/sendsms?from=1234&text=%(text)s&to=%(recipient)s&smsc=%(backend)s&id=%(id)s"
# monkey patch the router's fetch_url request
def test_fetch_url(self, url):
test_fetch_url.url = url
return 200
router = get_router()
msg1 = router.add_outgoing(self.connection, "test")
# sleep to let our sending thread take care of things
# TODO: this is pretty fragile but good enough for now
time.sleep(2)
msg1 = Message.objects.get(id=msg1.id)
self.assertEquals('O', msg1.direction)
self.assertEquals('S', msg1.status)
# check whether our url was set right
self.assertEquals(
"http://mykannel.com/cgi-bin/sendsms?from=1234&text=test&to=2067799294&smsc=test_backend&id=%d" % msg1.id,
test_fetch_url.url)
def testRouterDictURL(self):
# set our router URL
settings.ROUTER_URL = {
"default": "http://mykannel.com/cgi-bin/sendsms?from=1234&text=%(text)s&to=%(recipient)s&smsc=%(backend)s&id=%(id)s",
"test_backend2": "http://mykannel2.com/cgi-bin/sendsms?from=1234&text=%(text)s&to=%(recipient)s&smsc=%(backend)s&id=%(id)s"
}
# monkey patch the router's fetch_url request
def test_fetch_url(self, url):
test_fetch_url.url = url
return 200
router = get_router()
msg1 = router.add_outgoing(self.connection, "test")
# sleep to let our sending thread take care of things
# TODO: this is pretty fragile but good enough for now
time.sleep(2)
msg1 = Message.objects.get(id=msg1.id)
self.assertEquals('O', msg1.direction)
self.assertEquals('S', msg1.status)
# check whether our url was set right
self.assertEquals(
"http://mykannel.com/cgi-bin/sendsms?from=1234&text=test&to=2067799294&smsc=test_backend&id=%d" % msg1.id,
test_fetch_url.url)
# now send to our other backend
msg2 = router.add_outgoing(self.connection2, "test2")
# sleep to let our sending thread take care of things
# TODO: this is pretty fragile but good enough for now
time.sleep(2)
msg2 = Message.objects.get(id=msg2.id)
self.assertEquals('O', msg2.direction)
self.assertEquals('S', msg2.status)
# check whether our url was set right again
self.assertEquals(
"http://mykannel2.com/cgi-bin/sendsms?from=1234&text=test2&to=2067799291&smsc=test_backend2&id=%d" % msg2.id,
test_fetch_url.url)
@nottest #BROKEN
class RouterTest(TestCase):
def setUp(self):
(self.backend, created) = Backend.objects.get_or_create(name="test_backend")
(self.connection, created) = Connection.objects.get_or_create(backend=self.backend, identity='2067799294')
# configure with bare minimum to run the http router
settings.SMS_APPS = []
def testAddMessage(self):
router = get_router()
# tests that messages are correctly build
msg1 = router.add_message('test', '+250788383383', 'test', 'I', 'P')
self.assertEquals('test', msg1.connection.backend.name)
self.assertEquals('250788383383', msg1.connection.identity)
self.assertEquals('test', msg1.text)
self.assertEquals('I', msg1.direction)
self.assertEquals('P', msg1.status)
# test that connetions are reused and that numbers are normalized
msg2 = router.add_message('test', '250788383383', 'test', 'I', 'P')
self.assertEquals(msg2.connection.pk, msg1.connection.pk)
# test that connections are reused and that numbers are normalized
msg3 = router.add_message('test', '250-7883-83383', 'test', 'I', 'P')
self.assertEquals(msg3.connection.pk, msg1.connection.pk)
# allow letters, maybe shortcodes are using mappings to numbers
msg4 = router.add_message('test', 'asdfASDF', 'test', 'I', 'P')
self.assertEquals('asdfasdf', msg4.connection.identity)
def testAddBulk(self):
connection2 = Connection.objects.create(backend=self.backend, identity='8675309')
connection3 = Connection.objects.create(backend=self.backend, identity='8675310')
connection4 = Connection.objects.create(backend=self.backend, identity='8675311')
# test that mass texting works with a single number
msgs = Message.mass_text('Jenny I got your number', [self.connection])
self.assertEquals(msgs.count(), 1)
self.assertEquals(msgs[0].text, 'Jenny I got your number')
# test no connections are re-created
self.assertEquals(msgs[0].connection.pk, self.connection.pk)
msgs = Message.mass_text('Jenny dont change your number',
[self.connection, connection2, connection3, connection4], status='L')
self.assertEquals(str(msgs.values_list('status', flat=True).distinct()[0]), 'L')
self.assertEquals(msgs.count(), 4)
# test duplicate connections don't create duplicate messages
msgs = Message.mass_text('Turbo King is the greatest!', [self.connection, self.connection])
self.assertEquals(msgs.count(), 1)
def testRouter(self):
router = get_router()
msg = OutgoingMessage(self.connection, "test")
db_msg = router.handle_outgoing(msg)
# assert a db message was created
self.assertTrue(db_msg.pk)
self.assertEqual(db_msg.text, "test")
self.assertEqual(db_msg.direction, "O")
self.assertEqual(db_msg.connection, self.connection)
self.assertEqual(db_msg.status, 'Q')
# check our queue
msgs = Message.objects.filter(status='Q')
self.assertEqual(1, len(msgs))
# now mark the message as delivered
router.mark_delivered(db_msg.pk)
# load it back up
db_msg = Message.objects.get(id=db_msg.pk)
# assert it looks ok now
self.assertEqual(db_msg.text, "test")
self.assertEqual(db_msg.direction, 'O')
self.assertEqual(db_msg.connection, self.connection)
self.assertEqual(db_msg.status, 'D')
def testAppCancel(self):
router = get_router()
class CancelApp(AppBase):
# cancel outgoing phases by returning True
def outgoing(self, msg):
return False
@property
def name(self):
return "ReplyApp"
try:
router.apps.append(CancelApp(router))
msg = OutgoingMessage(self.connection, "test")
db_msg = router.handle_outgoing(msg)
# assert a db message was created, but also cancelled
self.assertTrue(db_msg.pk)
self.assertEqual(db_msg.text, "test")
self.assertEqual(db_msg.direction, "O")
self.assertEqual(db_msg.connection, self.connection)
self.assertEqual(db_msg.status, 'C')
finally:
router.apps = []
def testAppReply(self):
router = get_router()
class ReplyApp(AppBase):
def handle(self, msg):
# make sure a db message was given to us
if not msg.db_message:
raise Exception("ReplyApp was not handed a db message")
# and trigger a reply
msg.respond("reply")
# return that we handled it
return True
@property
def name(self):
return "ReplyApp"
class ExceptionApp(AppBase):
def handle(self, msg):
raise Exception("handle() process was not shortcut by ReplyApp returning True")
try:
router.apps.append(ReplyApp(router))
router.apps.append(ExceptionApp(router))
db_msg = router.handle_incoming(self.backend.name, self.connection.identity, "test send")
# assert a db message was created and handled
self.assertTrue(db_msg.pk)
self.assertEqual(db_msg.text, "test send")
self.assertEqual(db_msg.direction, "I")
self.assertEqual(db_msg.connection, self.connection)
self.assertEqual(db_msg.status, 'H')
# assert that a response was associated
responses = db_msg.responses.all()
self.assertEqual(1, len(responses))
response = responses[0]
self.assertEqual(response.text, "reply")
self.assertEqual(response.direction, "O")
self.assertEqual(response.connection, self.connection)
self.assertEqual(response.status, "Q")
finally:
router.apps = []
@nottest #BROKEN
class ViewTest(TestCase):
def setUp(self):
(self.backend, created) = Backend.objects.get_or_create(name="test_backend")
(self.connection, created) = Connection.objects.get_or_create(backend=self.backend, identity='2067799294')
# add an echo app
class EchoApp(AppBase):
def handle(self, msg):
msg.respond("echo %s" % msg.text)
return True
router = get_router()
router.apps.append(EchoApp(router))
def tearDown(self):
get_router().apps = []
def testViews(self):
import json
response = self.client.get("/router/outbox")
outbox = json.loads(response.content)
self.assertEquals(0, len(outbox['outbox']))
# send a message
response = self.client.get("/router/receive?backend=test_backend&sender=2067799294&message=test")
message = json.loads(response.content)['message']
# basic validation that the message was handled
self.assertEquals("I", message['direction'])
self.assertEquals("H", message['status'])
self.assertEquals("test_backend", message['backend'])
self.assertEquals("2067799294", message['contact'])
self.assertEquals("test", message['text'])
# make sure we can load it from the db by its id
self.assertTrue(Message.objects.get(pk=message['id']))
# check that the message exists in our outbox
response = self.client.get("/router/outbox")
outbox = json.loads(response.content)
self.assertEquals(1, len(outbox['outbox']))
# do it again, this checks that getting the outbox is not an action which removes messages
# from the outbox
response = self.client.get("/router/outbox")
outbox = json.loads(response.content)
self.assertEquals(1, len(outbox['outbox']))
message = outbox['outbox'][0]
self.assertEquals("O", message['direction'])
self.assertEquals("Q", message['status'])
self.assertEquals("test_backend", message['backend'])
self.assertEquals("2067799294", message['contact'])
self.assertEquals("echo test", message['text'])
# test sending errant delivery report
response = self.client.get("/router/delivered")
self.assertEquals(400, response.status_code)
# mark the message as delivered
response = self.client.get("/router/delivered?message_id=" + str(message['id']))
self.assertEquals(200, response.status_code)
# make sure it has been marked as delivered
db_message = Message.objects.get(pk=message['id'])
self.assertEquals('D', db_message.status)
# and that our outbox is now empty
response = self.client.get("/router/outbox")
outbox = json.loads(response.content)
self.assertEquals(0, len(outbox['outbox']))
def testSecurity(self):
try:
settings.ROUTER_PASSWORD = "foo"
# no dice without password
response = self.client.get("/router/outbox")
self.assertEquals(400, response.status_code)
response = self.client.get("/router/outbox?password=bar")
self.assertEquals(400, response.status_code)
# works with a pword
response = self.client.get("/router/outbox?password=foo")
self.assertEquals(200, response.status_code)
msg_count = Message.objects.all().count()
# delivered doesn't work without pword
response = self.client.get("/router/receive?backend=test_backend&sender=2067799294&message=test")
self.assertEquals(400, response.status_code)
# assert the msg wasn't processed
self.assertEquals(msg_count, Message.objects.all().count())
response = self.client.get(
"/router/receive?backend=test_backend&sender=2067799294&message=test&password=foo")
self.assertEquals(200, response.status_code)
# now we have one new incoming message and one new outgoing message
self.assertEquals(msg_count + 2, Message.objects.all().count())
# grab the last message and let's test the delivery report
message = Message.objects.filter(direction='O').order_by('-id')[0]
# no dice w/o password
response = self.client.get("/router/delivered?message_id=" + str(message.pk))
self.assertEquals(400, response.status_code)
# but works with it
response = self.client.get("/router/delivered?password=foo&message_id=" + str(message.pk))
self.assertEquals(200, response.status_code)
# make sure the message was marked as delivered
message = Message.objects.get(id=message.id)
self.assertEquals('D', message.status)
finally:
settings.ROUTER_PASSWORD = None
@nottest #BROKEN
class QOSTest(TestCase):
def setUp(self):
dct = dict(
getattr(settings, 'MODEM_BACKENDS', {}).items() + getattr(settings, 'SHORTCODE_BACKENDS', {}).items())
# dct = dict(getattr(settings, 'MODEM_BACKENDS', {}).items())
for bkend, identity in dct.items():
Connection.objects.create(identity=identity, backend=Backend.objects.create(name=bkend))
for shortcode_backend, backend_names in settings.ALLOWED_MODEMS.items():
identity = settings.SHORTCODE_BACKENDS[shortcode_backend]
for bkend in backend_names:
Connection.objects.create(identity=identity, backend=Backend.objects.get(name=bkend))
def fake_incoming(self, message, connection=None):
if connection is None:
connection = self.connection
router = get_router()
router.handle_incoming(connection.backend.name, connection.identity, message)
def testMsgsSent(self):
#Jenifer sends out messages to all short codes (6767, 8500) via the various modems (mtn-modem, utl-modem) etc
call_command('send_qos_messages')
self.assertEquals(Message.objects.filter(direction='O').count(), 13)
for msg in Message.objects.filter(direction='O'):
self.assertEquals(msg.text, datetime.now().strftime('%Y-%m-%d %H'))
self.assertEquals(Message.objects.filter(direction='O', connection__identity='6767').count(), 4)
self.assertEquals(Message.objects.filter(direction='O', connection__identity='8500').count(), 4)
self.assertEquals(Message.objects.filter(direction='O', connection__identity='6200').count(), 3)
self.assertEquals(Message.objects.filter(direction='O', connection__identity='8200').count(), 2)
def testNoAlarms(self):
#Jenifer sends out messages to all short codes (6767, 8500) via the various modems (mtn-modem, utl-modem) etc
call_command('send_qos_messages')
#Through the various apps, all short codes send back replies to Jennifer
for connection in Connection.objects.filter(backend__name__endswith='modem'):
self.fake_incoming(datetime.now().strftime('%Y-%m-%d %H'), connection)
#Jennifer kicks in with the monitoring service
call_command('monitor_qos_messages')
alarms = get_alarms()
#no alarms expected since all apps replied
self.assertEquals(len(alarms), 0)
def testAlarms(self):
#Jenifer sends out messages to all short codes (6767, 8500) via the various modems (mtn-modem, utl-modem) etc
call_command('send_qos_messages')
#Only a few modems reply with messages to Jenny
for connection in Connection.objects.filter(backend__name__endswith='modem').exclude(
identity__in=['256777773260', '256752145316', '256711957281', '256701205129'])[:5]:
self.fake_incoming(datetime.now().strftime('%Y-%m-%d %H'), connection)
#Jennifer kicks in with the monitoring service
call_command('monitor_qos_messages')
alarms = get_alarms()
#Jenny complains about her missing replies
self.assertEquals(len(alarms), 8)
#Poor Jenny spams everyone in protest
msgs = []
for msg in Message.objects.filter(direction='O').exclude(
connection__identity__in=Message.objects.filter(direction='I').values_list('connection__identity')):
identity = msg.connection.identity
modem = msg.connection.backend
network = msg.connection.backend.name.split('-')[0]
msgs.append('Jennifer did not get a reply from %s using the %s, %s appears to be down!' % (
identity, modem, network.upper()))
self.assertEquals(msgs, get_alarms())
|
|
'''
Integration tests for backend locks.
'''
import etcd
import pylibmc
import redis
import sherlock
import time
import unittest
class TestRedisLock(unittest.TestCase):
def setUp(self):
try:
self.client = redis.StrictRedis(host='redis')
except Exception as err:
print(str(err))
raise Exception('You must have Redis server running on localhost '
'to be able to run integration tests.')
self.lock_name = 'test_lock'
def test_acquire(self):
lock = sherlock.RedisLock(self.lock_name, client=self.client)
self.assertTrue(lock._acquire())
self.assertEqual(self.client.get(self.lock_name).decode('UTF-8'),
str(lock._owner))
def test_acquire_with_namespace(self):
lock = sherlock.RedisLock(self.lock_name, client=self.client,
namespace='ns')
self.assertTrue(lock._acquire())
self.assertEqual(
self.client.get('ns_%s' % self.lock_name).decode('UTF-8'),
str(lock._owner))
def test_acquire_once_only(self):
lock1 = sherlock.RedisLock(self.lock_name, client=self.client)
lock2 = sherlock.RedisLock(self.lock_name, client=self.client)
self.assertTrue(lock1._acquire())
self.assertFalse(lock2._acquire())
def test_acquire_check_expiry(self):
lock = sherlock.RedisLock(self.lock_name, client=self.client, expire=1)
lock.acquire()
time.sleep(2)
self.assertFalse(lock.locked())
def test_acquire_check_expire_is_not_set(self):
lock = sherlock.RedisLock(self.lock_name, client=self.client,
expire=None)
lock.acquire()
time.sleep(2)
self.assertTrue(self.client.ttl(self.lock_name) < 0)
def test_release(self):
lock = sherlock.RedisLock(self.lock_name, client=self.client)
lock._acquire()
lock._release()
self.assertEqual(self.client.get(self.lock_name), None)
def test_release_with_namespace(self):
lock = sherlock.RedisLock(self.lock_name, client=self.client,
namespace='ns')
lock._acquire()
lock._release()
self.assertEqual(self.client.get('ns_%s' % self.lock_name), None)
def test_release_own_only(self):
lock1 = sherlock.RedisLock(self.lock_name, client=self.client)
lock2 = sherlock.RedisLock(self.lock_name, client=self.client)
lock1._acquire()
self.assertRaises(sherlock.LockException, lock2._release)
lock1._release()
def test_locked(self):
lock = sherlock.RedisLock(self.lock_name, client=self.client)
lock._acquire()
self.assertTrue(lock._locked)
lock._release()
self.assertFalse(lock._locked)
def test_deleting_lock_object_releases_the_lock(self):
lock = sherlock.lock.RedisLock(self.lock_name, client=self.client)
lock.acquire()
self.assertEqual(self.client.get(self.lock_name).decode('UTF-8'),
str(lock._owner))
del lock
self.assertEqual(self.client.get(self.lock_name), None)
def tearDown(self):
self.client.delete(self.lock_name)
self.client.delete('ns_%s' % self.lock_name)
class TestEtcdLock(unittest.TestCase):
def setUp(self):
self.client = etcd.Client(host='etcd')
self.lock_name = 'test_lock'
def test_acquire(self):
lock = sherlock.EtcdLock(self.lock_name, client=self.client)
self.assertTrue(lock._acquire())
self.assertEqual(self.client.get(self.lock_name).value,
str(lock._owner))
def test_acquire_with_namespace(self):
lock = sherlock.EtcdLock(self.lock_name, client=self.client,
namespace='ns')
self.assertTrue(lock._acquire())
self.assertEqual(self.client.get('/ns/%s' % self.lock_name).value,
str(lock._owner))
def test_acquire_once_only(self):
lock1 = sherlock.EtcdLock(self.lock_name, client=self.client)
lock2 = sherlock.EtcdLock(self.lock_name, client=self.client)
self.assertTrue(lock1._acquire())
self.assertFalse(lock2._acquire())
def test_acquire_check_expiry(self):
lock = sherlock.EtcdLock(self.lock_name, client=self.client, expire=1)
lock.acquire()
time.sleep(2)
self.assertFalse(lock.locked())
def test_acquire_check_expire_is_not_set(self):
lock = sherlock.EtcdLock(self.lock_name, client=self.client,
expire=None)
lock.acquire()
time.sleep(2)
self.assertEquals(self.client.get(self.lock_name).ttl, None)
def test_release(self):
lock = sherlock.EtcdLock(self.lock_name, client=self.client)
lock._acquire()
lock._release()
self.assertRaises(etcd.EtcdKeyNotFound, self.client.get, self.lock_name)
def test_release_with_namespace(self):
lock = sherlock.EtcdLock(self.lock_name, client=self.client,
namespace='ns')
lock._acquire()
lock._release()
self.assertRaises(etcd.EtcdKeyNotFound, self.client.get, '/ns/%s' % self.lock_name)
def test_release_own_only(self):
lock1 = sherlock.EtcdLock(self.lock_name, client=self.client)
lock2 = sherlock.EtcdLock(self.lock_name, client=self.client)
lock1._acquire()
self.assertRaises(sherlock.LockException, lock2._release)
lock1._release()
def test_locked(self):
lock = sherlock.EtcdLock(self.lock_name, client=self.client)
lock._acquire()
self.assertTrue(lock._locked)
lock._release()
self.assertFalse(lock._locked)
def test_deleting_lock_object_releases_the_lock(self):
lock = sherlock.lock.EtcdLock(self.lock_name, client=self.client)
lock.acquire()
self.assertEqual(self.client.get(self.lock_name).value, str(lock._owner))
del lock
self.assertRaises(etcd.EtcdKeyNotFound, self.client.get, self.lock_name)
def tearDown(self):
try:
self.client.delete(self.lock_name)
except etcd.EtcdKeyNotFound:
pass
try:
self.client.delete('/ns/%s' % self.lock_name)
except etcd.EtcdKeyNotFound:
pass
class TestMCLock(unittest.TestCase):
def setUp(self):
self.client = pylibmc.Client(['memcached'], binary=True)
self.lock_name = 'test_lock'
def test_acquire(self):
lock = sherlock.MCLock(self.lock_name, client=self.client)
self.assertTrue(lock._acquire())
self.assertEqual(self.client.get(self.lock_name), str(lock._owner))
def test_acquire_with_namespace(self):
lock = sherlock.MCLock(self.lock_name, client=self.client,
namespace='ns')
self.assertTrue(lock._acquire())
self.assertEqual(self.client.get('ns_%s' % self.lock_name),
str(lock._owner))
def test_acquire_once_only(self):
lock1 = sherlock.MCLock(self.lock_name, client=self.client)
lock2 = sherlock.MCLock(self.lock_name, client=self.client)
self.assertTrue(lock1._acquire())
self.assertFalse(lock2._acquire())
def test_acquire_check_expiry(self):
lock = sherlock.MCLock(self.lock_name, client=self.client, expire=1)
lock.acquire()
time.sleep(2)
self.assertFalse(lock.locked())
def test_release(self):
lock = sherlock.MCLock(self.lock_name, client=self.client)
lock._acquire()
lock._release()
self.assertEqual(self.client.get(self.lock_name), None)
def test_release_with_namespace(self):
lock = sherlock.MCLock(self.lock_name, client=self.client,
namespace='ns')
lock._acquire()
lock._release()
self.assertEqual(self.client.get('ns_%s' % self.lock_name), None)
def test_release_own_only(self):
lock1 = sherlock.MCLock(self.lock_name, client=self.client)
lock2 = sherlock.MCLock(self.lock_name, client=self.client)
lock1._acquire()
self.assertRaises(sherlock.LockException, lock2._release)
lock1._release()
def test_locked(self):
lock = sherlock.MCLock(self.lock_name, client=self.client)
lock._acquire()
self.assertTrue(lock._locked)
lock._release()
self.assertFalse(lock._locked)
def test_deleting_lock_object_releases_the_lock(self):
lock = sherlock.lock.MCLock(self.lock_name, client=self.client)
lock.acquire()
self.assertEqual(self.client.get(self.lock_name), str(lock._owner))
del lock
self.assertEqual(self.client.get(self.lock_name), None)
def tearDown(self):
self.client.delete(self.lock_name)
self.client.delete('ns_%s' % self.lock_name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.