code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module contains a BigQuery Hook, as well as a very basic PEP 249
implementation for BigQuery.
"""
import time
from builtins import range
from past.builtins import basestring
from airflow import AirflowException
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
from airflow.hooks.dbapi_hook import DbApiHook
from airflow.utils.log.logging_mixin import LoggingMixin
from apiclient.discovery import HttpError, build
from googleapiclient import errors
from pandas_gbq.gbq import \
_check_google_client_version as gbq_check_google_client_version
from pandas_gbq import read_gbq
from pandas_gbq.gbq import \
_test_google_api_imports as gbq_test_google_api_imports
from pandas_gbq.gbq import GbqConnector
class BigQueryHook(GoogleCloudBaseHook, DbApiHook, LoggingMixin):
"""
Interact with BigQuery. This hook uses the Google Cloud Platform
connection.
"""
conn_name_attr = 'bigquery_conn_id'
def __init__(self,
bigquery_conn_id='bigquery_default',
delegate_to=None,
use_legacy_sql=True):
super(BigQueryHook, self).__init__(
gcp_conn_id=bigquery_conn_id, delegate_to=delegate_to)
self.use_legacy_sql = use_legacy_sql
def get_conn(self):
"""
Returns a BigQuery PEP 249 connection object.
"""
service = self.get_service()
project = self._get_field('project')
return BigQueryConnection(
service=service,
project_id=project,
use_legacy_sql=self.use_legacy_sql)
def get_service(self):
"""
Returns a BigQuery service object.
"""
http_authorized = self._authorize()
return build(
'bigquery', 'v2', http=http_authorized, cache_discovery=False)
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
Insertion is currently unsupported. Theoretically, you could use
BigQuery's streaming API to insert rows into a table, but this hasn't
been implemented.
"""
raise NotImplementedError()
def get_pandas_df(self, sql, parameters=None, dialect=None):
"""
Returns a Pandas DataFrame for the results produced by a BigQuery
query. The DbApiHook method must be overridden because Pandas
doesn't support PEP 249 connections, except for SQLite. See:
https://github.com/pydata/pandas/blob/master/pandas/io/sql.py#L447
https://github.com/pydata/pandas/issues/6900
:param sql: The BigQuery SQL to execute.
:type sql: string
:param parameters: The parameters to render the SQL query with (not
used, leave to override superclass method)
:type parameters: mapping or iterable
:param dialect: Dialect of BigQuery SQL โ legacy SQL or standard SQL
defaults to use `self.use_legacy_sql` if not specified
:type dialect: string in {'legacy', 'standard'}
"""
if dialect is None:
dialect = 'legacy' if self.use_legacy_sql else 'standard'
return read_gbq(sql,
project_id=self._get_field('project'),
dialect=dialect,
verbose=False)
def table_exists(self, project_id, dataset_id, table_id):
"""
Checks for the existence of a table in Google BigQuery.
:param project_id: The Google cloud project in which to look for the
table. The connection supplied to the hook must provide access to
the specified project.
:type project_id: string
:param dataset_id: The name of the dataset in which to look for the
table.
:type dataset_id: string
:param table_id: The name of the table to check the existence of.
:type table_id: string
"""
service = self.get_service()
try:
service.tables().get(
projectId=project_id, datasetId=dataset_id,
tableId=table_id).execute()
return True
except errors.HttpError as e:
if e.resp['status'] == '404':
return False
raise
class BigQueryPandasConnector(GbqConnector):
"""
This connector behaves identically to GbqConnector (from Pandas), except
that it allows the service to be injected, and disables a call to
self.get_credentials(). This allows Airflow to use BigQuery with Pandas
without forcing a three legged OAuth connection. Instead, we can inject
service account credentials into the binding.
"""
def __init__(self,
project_id,
service,
reauth=False,
verbose=False,
dialect='legacy'):
super(BigQueryPandasConnector, self).__init__(project_id)
gbq_check_google_client_version()
gbq_test_google_api_imports()
self.project_id = project_id
self.reauth = reauth
self.service = service
self.verbose = verbose
self.dialect = dialect
class BigQueryConnection(object):
"""
BigQuery does not have a notion of a persistent connection. Thus, these
objects are small stateless factories for cursors, which do all the real
work.
"""
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def close(self):
""" BigQueryConnection does not have anything to close. """
pass
def commit(self):
""" BigQueryConnection does not support transactions. """
pass
def cursor(self):
""" Return a new :py:class:`Cursor` object using the connection. """
return BigQueryCursor(*self._args, **self._kwargs)
def rollback(self):
raise NotImplementedError(
"BigQueryConnection does not have transactions")
class BigQueryBaseCursor(LoggingMixin):
"""
The BigQuery base cursor contains helper methods to execute queries against
BigQuery. The methods can be used directly by operators, in cases where a
PEP 249 cursor isn't needed.
"""
def __init__(self, service, project_id, use_legacy_sql=True):
self.service = service
self.project_id = project_id
self.use_legacy_sql = use_legacy_sql
self.running_job_id = None
def create_empty_table(self,
project_id,
dataset_id,
table_id,
schema_fields=None,
time_partitioning={},
labels=None
):
"""
Creates a new, empty table in the dataset.
:param project_id: The project to create the table into.
:type project_id: str
:param dataset_id: The dataset to create the table into.
:type dataset_id: str
:param table_id: The Name of the table to be created.
:type table_id: str
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
:param labels: a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
**Example**: ::
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
:type schema_fields: list
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#timePartitioning
:type time_partitioning: dict
:return:
"""
project_id = project_id if project_id is not None else self.project_id
table_resource = {
'tableReference': {
'tableId': table_id
}
}
if schema_fields:
table_resource['schema'] = {'fields': schema_fields}
if time_partitioning:
table_resource['timePartitioning'] = time_partitioning
if labels:
table_resource['labels'] = labels
self.log.info('Creating Table %s:%s.%s',
project_id, dataset_id, table_id)
try:
self.service.tables().insert(
projectId=project_id,
datasetId=dataset_id,
body=table_resource).execute()
self.log.info('Table created successfully: %s:%s.%s',
project_id, dataset_id, table_id)
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content)
)
def create_external_table(self,
external_project_dataset_table,
schema_fields,
source_uris,
source_format='CSV',
autodetect=False,
compression='NONE',
ignore_unknown_values=False,
max_bad_records=0,
skip_leading_rows=0,
field_delimiter=',',
quote_character=None,
allow_quoted_newlines=False,
allow_jagged_rows=False,
src_fmt_configs={},
labels=None
):
"""
Creates a new external table in the dataset with the data in Google
Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
for more details about these parameters.
:param external_project_dataset_table:
The dotted (<project>.|<project>:)<dataset>.<table>($<partition>) BigQuery
table name to create external table.
If <project> is not included, project will be the
project defined in the connection json.
:type external_project_dataset_table: string
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
:type schema_fields: list
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: string
:param autodetect: Try to detect schema and format options automatically.
Any option specified explicitly will be honored.
:type autodetect: bool
:param compression: [Optional] The compression type of the data source.
Possible values include GZIP and NONE.
The default value is NONE.
This setting is ignored for Google Cloud Bigtable,
Google Cloud Datastore backups and Avro formats.
:type compression: string
:param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns
are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result.
:type ignore_unknown_values: bool
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: string
:param quote_character: The value that is used to quote data sections in a CSV
file.
:type quote_character: string
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not
(false).
:type allow_quoted_newlines: boolean
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing
trailing columns are treated as bad records, and if there are too many bad
records, an invalid error is returned in the job result. Only applicable when
soure_format is CSV.
:type allow_jagged_rows: bool
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
:param labels: a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
"""
project_id, dataset_id, external_table_id = \
_split_tablename(table_input=external_project_dataset_table,
default_project_id=self.project_id,
var_name='external_project_dataset_table')
# bigquery only allows certain source formats
# we check to make sure the passed source format is valid
# if it's not, we raise a ValueError
# Refer to this link for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.sourceFormat
source_format = source_format.upper()
allowed_formats = [
"CSV", "NEWLINE_DELIMITED_JSON", "AVRO", "GOOGLE_SHEETS",
"DATASTORE_BACKUP", "PARQUET"
]
if source_format not in allowed_formats:
raise ValueError("{0} is not a valid source format. "
"Please use one of the following types: {1}"
.format(source_format, allowed_formats))
compression = compression.upper()
allowed_compressions = ['NONE', 'GZIP']
if compression not in allowed_compressions:
raise ValueError("{0} is not a valid compression format. "
"Please use one of the following types: {1}"
.format(compression, allowed_compressions))
table_resource = {
'externalDataConfiguration': {
'autodetect': autodetect,
'sourceFormat': source_format,
'sourceUris': source_uris,
'compression': compression,
'ignoreUnknownValues': ignore_unknown_values
},
'tableReference': {
'projectId': project_id,
'datasetId': dataset_id,
'tableId': external_table_id,
}
}
if schema_fields:
table_resource['externalDataConfiguration'].update({
'schema': {
'fields': schema_fields
}
})
self.log.info('Creating external table: %s', external_project_dataset_table)
if max_bad_records:
table_resource['externalDataConfiguration']['maxBadRecords'] = max_bad_records
# if following fields are not specified in src_fmt_configs,
# honor the top-level params for backward-compatibility
if 'skipLeadingRows' not in src_fmt_configs:
src_fmt_configs['skipLeadingRows'] = skip_leading_rows
if 'fieldDelimiter' not in src_fmt_configs:
src_fmt_configs['fieldDelimiter'] = field_delimiter
if 'quote_character' not in src_fmt_configs:
src_fmt_configs['quote'] = quote_character
if 'allowQuotedNewlines' not in src_fmt_configs:
src_fmt_configs['allowQuotedNewlines'] = allow_quoted_newlines
if 'allowJaggedRows' not in src_fmt_configs:
src_fmt_configs['allowJaggedRows'] = allow_jagged_rows
src_fmt_to_param_mapping = {
'CSV': 'csvOptions',
'GOOGLE_SHEETS': 'googleSheetsOptions'
}
src_fmt_to_configs_mapping = {
'csvOptions': [
'allowJaggedRows', 'allowQuotedNewlines',
'fieldDelimiter', 'skipLeadingRows',
'quote'
],
'googleSheetsOptions': ['skipLeadingRows']
}
if source_format in src_fmt_to_param_mapping.keys():
valid_configs = src_fmt_to_configs_mapping[
src_fmt_to_param_mapping[source_format]
]
src_fmt_configs = {
k: v
for k, v in src_fmt_configs.items() if k in valid_configs
}
table_resource['externalDataConfiguration'][src_fmt_to_param_mapping[
source_format]] = src_fmt_configs
if labels:
table_resource['labels'] = labels
try:
self.service.tables().insert(
projectId=project_id,
datasetId=dataset_id,
body=table_resource
).execute()
self.log.info('External table created successfully: %s',
external_project_dataset_table)
except HttpError as err:
raise Exception(
'BigQuery job failed. Error was: {}'.format(err.content)
)
def run_query(self,
bql=None,
sql=None,
destination_dataset_table=False,
write_disposition='WRITE_EMPTY',
allow_large_results=False,
flatten_results=False,
udf_config=False,
use_legacy_sql=None,
maximum_billing_tier=None,
maximum_bytes_billed=None,
create_disposition='CREATE_IF_NEEDED',
query_params=None,
labels=None,
schema_update_options=(),
priority='INTERACTIVE',
time_partitioning={}):
"""
Executes a BigQuery SQL query. Optionally persists results in a BigQuery
table. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param bql: (Deprecated. Use `sql` parameter instead) The BigQuery SQL
to execute.
:type bql: string
:param sql: The BigQuery SQL to execute.
:type sql: string
:param destination_dataset_table: The dotted <dataset>.<table>
BigQuery table to save the query results.
:type destination_dataset_table: string
:param write_disposition: What to do if the table already exists in
BigQuery.
:type write_disposition: string
:param allow_large_results: Whether to allow large results.
:type allow_large_results: boolean
:param flatten_results: If true and query uses legacy SQL dialect, flattens
all nested and repeated fields in the query results. ``allowLargeResults``
must be true if this is set to false. For standard SQL queries, this
flag is ignored and results are never flattened.
:type flatten_results: boolean
:param udf_config: The User Defined Function configuration for the query.
See https://cloud.google.com/bigquery/user-defined-functions for details.
:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
If `None`, defaults to `self.use_legacy_sql`.
:type use_legacy_sql: boolean
:type udf_config: list
:param maximum_billing_tier: Positive integer that serves as a
multiplier of the basic price.
:type maximum_billing_tier: integer
:param maximum_bytes_billed: Limits the bytes billed for this job.
Queries that will have bytes billed beyond this limit will fail
(without incurring a charge). If unspecified, this will be
set to your project default.
:type maximum_bytes_billed: float
:param create_disposition: Specifies whether the job is allowed to
create new tables.
:type create_disposition: string
:param query_params a dictionary containing query parameter types and
values, passed to BigQuery
:type query_params: dict
:param labels a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
:param schema_update_options: Allows the schema of the desitination
table to be updated as a side effect of the query job.
:type schema_update_options: tuple
:param priority: Specifies a priority for the query.
Possible values include INTERACTIVE and BATCH.
The default value is INTERACTIVE.
:type priority: string
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and
expiration as per API specifications. Note that 'field' is not available in
conjunction with dataset.table$partition.
:type time_partitioning: dict
"""
# TODO remove `bql` in Airflow 2.0 - Jira: [AIRFLOW-2513]
sql = bql if sql is None else sql
if bql:
import warnings
warnings.warn('Deprecated parameter `bql` used in '
'`BigQueryBaseCursor.run_query` '
'Use `sql` parameter instead to pass the sql to be '
'executed. `bql` parameter is deprecated and '
'will be removed in a future version of '
'Airflow.',
category=DeprecationWarning)
if sql is None:
raise TypeError('`BigQueryBaseCursor.run_query` missing 1 required '
'positional argument: `sql`')
# BigQuery also allows you to define how you want a table's schema to change
# as a side effect of a query job
# for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.schemaUpdateOptions
allowed_schema_update_options = [
'ALLOW_FIELD_ADDITION', "ALLOW_FIELD_RELAXATION"
]
if not set(allowed_schema_update_options).issuperset(
set(schema_update_options)):
raise ValueError(
"{0} contains invalid schema update options. "
"Please only use one or more of the following options: {1}"
.format(schema_update_options, allowed_schema_update_options))
if use_legacy_sql is None:
use_legacy_sql = self.use_legacy_sql
configuration = {
'query': {
'query': sql,
'useLegacySql': use_legacy_sql,
'maximumBillingTier': maximum_billing_tier,
'maximumBytesBilled': maximum_bytes_billed,
'priority': priority
}
}
if destination_dataset_table:
assert '.' in destination_dataset_table, (
'Expected destination_dataset_table in the format of '
'<dataset>.<table>. Got: {}').format(destination_dataset_table)
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_dataset_table,
default_project_id=self.project_id)
configuration['query'].update({
'allowLargeResults': allow_large_results,
'flattenResults': flatten_results,
'writeDisposition': write_disposition,
'createDisposition': create_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
}
})
if udf_config:
assert isinstance(udf_config, list)
configuration['query'].update({
'userDefinedFunctionResources': udf_config
})
if query_params:
if self.use_legacy_sql:
raise ValueError("Query paramaters are not allowed when using "
"legacy SQL")
else:
configuration['query']['queryParameters'] = query_params
if labels:
configuration['labels'] = labels
time_partitioning = _cleanse_time_partitioning(
destination_dataset_table,
time_partitioning
)
if time_partitioning:
configuration['query'].update({
'timePartitioning': time_partitioning
})
if schema_update_options:
if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]:
raise ValueError("schema_update_options is only "
"allowed if write_disposition is "
"'WRITE_APPEND' or 'WRITE_TRUNCATE'.")
else:
self.log.info(
"Adding experimental "
"'schemaUpdateOptions': {0}".format(schema_update_options))
configuration['query'][
'schemaUpdateOptions'] = schema_update_options
return self.run_with_configuration(configuration)
def run_extract( # noqa
self,
source_project_dataset_table,
destination_cloud_storage_uris,
compression='NONE',
export_format='CSV',
field_delimiter=',',
print_header=True,
labels=None):
"""
Executes a BigQuery extract command to copy data from BigQuery to
Google Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param source_project_dataset_table: The dotted <dataset>.<table>
BigQuery table to use as the source data.
:type source_project_dataset_table: string
:param destination_cloud_storage_uris: The destination Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). Follows
convention defined here:
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
:type destination_cloud_storage_uris: list
:param compression: Type of compression to use.
:type compression: string
:param export_format: File format to export.
:type export_format: string
:param field_delimiter: The delimiter to use when extracting to a CSV.
:type field_delimiter: string
:param print_header: Whether to print a header for a CSV file extract.
:type print_header: boolean
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
"""
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
configuration = {
'extract': {
'sourceTable': {
'projectId': source_project,
'datasetId': source_dataset,
'tableId': source_table,
},
'compression': compression,
'destinationUris': destination_cloud_storage_uris,
'destinationFormat': export_format,
}
}
if labels:
configuration['labels'] = labels
if export_format == 'CSV':
# Only set fieldDelimiter and printHeader fields if using CSV.
# Google does not like it if you set these fields for other export
# formats.
configuration['extract']['fieldDelimiter'] = field_delimiter
configuration['extract']['printHeader'] = print_header
return self.run_with_configuration(configuration)
def run_copy(self,
source_project_dataset_tables,
destination_project_dataset_table,
write_disposition='WRITE_EMPTY',
create_disposition='CREATE_IF_NEEDED',
labels=None):
"""
Executes a BigQuery copy command to copy data from one BigQuery table
to another. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
For more details about these parameters.
:param source_project_dataset_tables: One or more dotted
(project:|project.)<dataset>.<table>
BigQuery tables to use as the source data. Use a list if there are
multiple source tables.
If <project> is not included, project will be the project defined
in the connection json.
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: (project:|project.)<dataset>.<table>
:type destination_project_dataset_table: string
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: string
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: string
:param labels a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
"""
source_project_dataset_tables = ([
source_project_dataset_tables
] if not isinstance(source_project_dataset_tables, list) else
source_project_dataset_tables)
source_project_dataset_tables_fixup = []
for source_project_dataset_table in source_project_dataset_tables:
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
source_project_dataset_tables_fixup.append({
'projectId':
source_project,
'datasetId':
source_dataset,
'tableId':
source_table
})
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id)
configuration = {
'copy': {
'createDisposition': create_disposition,
'writeDisposition': write_disposition,
'sourceTables': source_project_dataset_tables_fixup,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table
}
}
}
if labels:
configuration['labels'] = labels
return self.run_with_configuration(configuration)
def run_load(self,
destination_project_dataset_table,
schema_fields,
source_uris,
source_format='CSV',
create_disposition='CREATE_IF_NEEDED',
skip_leading_rows=0,
write_disposition='WRITE_EMPTY',
field_delimiter=',',
max_bad_records=0,
quote_character=None,
ignore_unknown_values=False,
allow_quoted_newlines=False,
allow_jagged_rows=False,
schema_update_options=(),
src_fmt_configs={},
time_partitioning={}):
"""
Executes a BigQuery load command to load data from Google Cloud Storage
to BigQuery. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param destination_project_dataset_table:
The dotted (<project>.|<project>:)<dataset>.<table>($<partition>) BigQuery
table to load data into. If <project> is not included, project will be the
project defined in the connection json. If a partition is specified the
operator will automatically append the data, create a new partition or create
a new DAY partitioned table.
:type destination_project_dataset_table: string
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
:type schema_fields: list
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: string
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: string
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: string
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: string
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param quote_character: The value that is used to quote data sections in a CSV
file.
:type quote_character: string
:param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns
are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result.
:type ignore_unknown_values: bool
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not
(false).
:type allow_quoted_newlines: boolean
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing
trailing columns are treated as bad records, and if there are too many bad
records, an invalid error is returned in the job result. Only applicable when
soure_format is CSV.
:type allow_jagged_rows: bool
:param schema_update_options: Allows the schema of the desitination
table to be updated as a side effect of the load job.
:type schema_update_options: tuple
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and
expiration as per API specifications. Note that 'field' is not available in
conjunction with dataset.table$partition.
:type time_partitioning: dict
"""
# bigquery only allows certain source formats
# we check to make sure the passed source format is valid
# if it's not, we raise a ValueError
# Refer to this link for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).sourceFormat
source_format = source_format.upper()
allowed_formats = [
"CSV", "NEWLINE_DELIMITED_JSON", "AVRO", "GOOGLE_SHEETS",
"DATASTORE_BACKUP", "PARQUET"
]
if source_format not in allowed_formats:
raise ValueError("{0} is not a valid source format. "
"Please use one of the following types: {1}"
.format(source_format, allowed_formats))
# bigquery also allows you to define how you want a table's schema to change
# as a side effect of a load
# for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schemaUpdateOptions
allowed_schema_update_options = [
'ALLOW_FIELD_ADDITION', "ALLOW_FIELD_RELAXATION"
]
if not set(allowed_schema_update_options).issuperset(
set(schema_update_options)):
raise ValueError(
"{0} contains invalid schema update options. "
"Please only use one or more of the following options: {1}"
.format(schema_update_options, allowed_schema_update_options))
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id,
var_name='destination_project_dataset_table')
configuration = {
'load': {
'createDisposition': create_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
},
'sourceFormat': source_format,
'sourceUris': source_uris,
'writeDisposition': write_disposition,
'ignoreUnknownValues': ignore_unknown_values
}
}
time_partitioning = _cleanse_time_partitioning(
destination_project_dataset_table,
time_partitioning
)
if time_partitioning:
configuration['load'].update({
'timePartitioning': time_partitioning
})
if schema_fields:
configuration['load']['schema'] = {'fields': schema_fields}
if schema_update_options:
if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]:
raise ValueError("schema_update_options is only "
"allowed if write_disposition is "
"'WRITE_APPEND' or 'WRITE_TRUNCATE'.")
else:
self.log.info(
"Adding experimental "
"'schemaUpdateOptions': {0}".format(schema_update_options))
configuration['load'][
'schemaUpdateOptions'] = schema_update_options
if max_bad_records:
configuration['load']['maxBadRecords'] = max_bad_records
# if following fields are not specified in src_fmt_configs,
# honor the top-level params for backward-compatibility
if 'skipLeadingRows' not in src_fmt_configs:
src_fmt_configs['skipLeadingRows'] = skip_leading_rows
if 'fieldDelimiter' not in src_fmt_configs:
src_fmt_configs['fieldDelimiter'] = field_delimiter
if 'ignoreUnknownValues' not in src_fmt_configs:
src_fmt_configs['ignoreUnknownValues'] = ignore_unknown_values
if quote_character is not None:
src_fmt_configs['quote'] = quote_character
if allow_quoted_newlines:
src_fmt_configs['allowQuotedNewlines'] = allow_quoted_newlines
src_fmt_to_configs_mapping = {
'CSV': [
'allowJaggedRows', 'allowQuotedNewlines', 'autodetect',
'fieldDelimiter', 'skipLeadingRows', 'ignoreUnknownValues',
'nullMarker', 'quote'
],
'DATASTORE_BACKUP': ['projectionFields'],
'NEWLINE_DELIMITED_JSON': ['autodetect', 'ignoreUnknownValues'],
'PARQUET': ['autodetect', 'ignoreUnknownValues'],
'AVRO': [],
}
valid_configs = src_fmt_to_configs_mapping[source_format]
src_fmt_configs = {
k: v
for k, v in src_fmt_configs.items() if k in valid_configs
}
configuration['load'].update(src_fmt_configs)
if allow_jagged_rows:
configuration['load']['allowJaggedRows'] = allow_jagged_rows
return self.run_with_configuration(configuration)
def run_with_configuration(self, configuration):
"""
Executes a BigQuery SQL query. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about the configuration parameter.
:param configuration: The configuration parameter maps directly to
BigQuery's configuration field in the job object. See
https://cloud.google.com/bigquery/docs/reference/v2/jobs for
details.
"""
jobs = self.service.jobs()
job_data = {'configuration': configuration}
# Send query and wait for reply.
query_reply = jobs \
.insert(projectId=self.project_id, body=job_data) \
.execute()
self.running_job_id = query_reply['jobReference']['jobId']
# Wait for query to finish.
keep_polling_job = True
while (keep_polling_job):
try:
job = jobs.get(
projectId=self.project_id,
jobId=self.running_job_id).execute()
if (job['status']['state'] == 'DONE'):
keep_polling_job = False
# Check if job had errors.
if 'errorResult' in job['status']:
raise Exception(
'BigQuery job failed. Final error was: {}. The job was: {}'.
format(job['status']['errorResult'], job))
else:
self.log.info('Waiting for job to complete : %s, %s',
self.project_id, self.running_job_id)
time.sleep(5)
except HttpError as err:
if err.resp.status in [500, 503]:
self.log.info(
'%s: Retryable error, waiting for job to complete: %s',
err.resp.status, self.running_job_id)
time.sleep(5)
else:
raise Exception(
'BigQuery job status check failed. Final error was: %s',
err.resp.status)
return self.running_job_id
def poll_job_complete(self, job_id):
jobs = self.service.jobs()
try:
job = jobs.get(projectId=self.project_id, jobId=job_id).execute()
if (job['status']['state'] == 'DONE'):
return True
except HttpError as err:
if err.resp.status in [500, 503]:
self.log.info(
'%s: Retryable error while polling job with id %s',
err.resp.status, job_id)
else:
raise Exception(
'BigQuery job status check failed. Final error was: %s',
err.resp.status)
return False
def cancel_query(self):
"""
Cancel all started queries that have not yet completed
"""
jobs = self.service.jobs()
if (self.running_job_id and
not self.poll_job_complete(self.running_job_id)):
self.log.info('Attempting to cancel job : %s, %s', self.project_id,
self.running_job_id)
jobs.cancel(
projectId=self.project_id,
jobId=self.running_job_id).execute()
else:
self.log.info('No running BigQuery jobs to cancel.')
return
# Wait for all the calls to cancel to finish
max_polling_attempts = 12
polling_attempts = 0
job_complete = False
while (polling_attempts < max_polling_attempts and not job_complete):
polling_attempts = polling_attempts + 1
job_complete = self.poll_job_complete(self.running_job_id)
if (job_complete):
self.log.info('Job successfully canceled: %s, %s',
self.project_id, self.running_job_id)
elif (polling_attempts == max_polling_attempts):
self.log.info(
"Stopping polling due to timeout. Job with id %s "
"has not completed cancel and may or may not finish.",
self.running_job_id)
else:
self.log.info('Waiting for canceled job with id %s to finish.',
self.running_job_id)
time.sleep(5)
def get_schema(self, dataset_id, table_id):
"""
Get the schema for a given datset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:param dataset_id: the dataset ID of the requested table
:param table_id: the table ID of the requested table
:return: a table schema
"""
tables_resource = self.service.tables() \
.get(projectId=self.project_id, datasetId=dataset_id, tableId=table_id) \
.execute()
return tables_resource['schema']
def get_tabledata(self, dataset_id, table_id,
max_results=None, selected_fields=None, page_token=None,
start_index=None):
"""
Get the data of a given dataset.table and optionally with selected columns.
see https://cloud.google.com/bigquery/docs/reference/v2/tabledata/list
:param dataset_id: the dataset ID of the requested table.
:param table_id: the table ID of the requested table.
:param max_results: the maximum results to return.
:param selected_fields: List of fields to return (comma-separated). If
unspecified, all fields are returned.
:param page_token: page token, returned from a previous call,
identifying the result set.
:param start_index: zero based index of the starting row to read.
:return: map containing the requested rows.
"""
optional_params = {}
if max_results:
optional_params['maxResults'] = max_results
if selected_fields:
optional_params['selectedFields'] = selected_fields
if page_token:
optional_params['pageToken'] = page_token
if start_index:
optional_params['startIndex'] = start_index
return (self.service.tabledata().list(
projectId=self.project_id,
datasetId=dataset_id,
tableId=table_id,
**optional_params).execute())
def run_table_delete(self, deletion_dataset_table,
ignore_if_missing=False):
"""
Delete an existing table from the dataset;
If the table does not exist, return an error unless ignore_if_missing
is set to True.
:param deletion_dataset_table: A dotted
(<project>.|<project>:)<dataset>.<table> that indicates which table
will be deleted.
:type deletion_dataset_table: str
:param ignore_if_missing: if True, then return success even if the
requested table does not exist.
:type ignore_if_missing: boolean
:return:
"""
assert '.' in deletion_dataset_table, (
'Expected deletion_dataset_table in the format of '
'<dataset>.<table>. Got: {}').format(deletion_dataset_table)
deletion_project, deletion_dataset, deletion_table = \
_split_tablename(table_input=deletion_dataset_table,
default_project_id=self.project_id)
try:
self.service.tables() \
.delete(projectId=deletion_project,
datasetId=deletion_dataset,
tableId=deletion_table) \
.execute()
self.log.info('Deleted table %s:%s.%s.', deletion_project,
deletion_dataset, deletion_table)
except HttpError:
if not ignore_if_missing:
raise Exception('Table deletion failed. Table does not exist.')
else:
self.log.info('Table does not exist. Skipping.')
def run_table_upsert(self, dataset_id, table_resource, project_id=None):
"""
creates a new, empty table in the dataset;
If the table already exists, update the existing table.
Since BigQuery does not natively allow table upserts, this is not an
atomic operation.
:param dataset_id: the dataset to upsert the table into.
:type dataset_id: str
:param table_resource: a table resource. see
https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:type table_resource: dict
:param project_id: the project to upsert the table into. If None,
project will be self.project_id.
:return:
"""
# check to see if the table exists
table_id = table_resource['tableReference']['tableId']
project_id = project_id if project_id is not None else self.project_id
tables_list_resp = self.service.tables().list(
projectId=project_id, datasetId=dataset_id).execute()
while True:
for table in tables_list_resp.get('tables', []):
if table['tableReference']['tableId'] == table_id:
# found the table, do update
self.log.info('Table %s:%s.%s exists, updating.',
project_id, dataset_id, table_id)
return self.service.tables().update(
projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=table_resource).execute()
# If there is a next page, we need to check the next page.
if 'nextPageToken' in tables_list_resp:
tables_list_resp = self.service.tables()\
.list(projectId=project_id,
datasetId=dataset_id,
pageToken=tables_list_resp['nextPageToken'])\
.execute()
# If there is no next page, then the table doesn't exist.
else:
# do insert
self.log.info('Table %s:%s.%s does not exist. creating.',
project_id, dataset_id, table_id)
return self.service.tables().insert(
projectId=project_id,
datasetId=dataset_id,
body=table_resource).execute()
def run_grant_dataset_view_access(self,
source_dataset,
view_dataset,
view_table,
source_project=None,
view_project=None):
"""
Grant authorized view access of a dataset to a view table.
If this view has already been granted access to the dataset, do nothing.
This method is not atomic. Running it may clobber a simultaneous update.
:param source_dataset: the source dataset
:type source_dataset: str
:param view_dataset: the dataset that the view is in
:type view_dataset: str
:param view_table: the table of the view
:type view_table: str
:param source_project: the project of the source dataset. If None,
self.project_id will be used.
:type source_project: str
:param view_project: the project that the view is in. If None,
self.project_id will be used.
:type view_project: str
:return: the datasets resource of the source dataset.
"""
# Apply default values to projects
source_project = source_project if source_project else self.project_id
view_project = view_project if view_project else self.project_id
# we don't want to clobber any existing accesses, so we have to get
# info on the dataset before we can add view access
source_dataset_resource = self.service.datasets().get(
projectId=source_project, datasetId=source_dataset).execute()
access = source_dataset_resource[
'access'] if 'access' in source_dataset_resource else []
view_access = {
'view': {
'projectId': view_project,
'datasetId': view_dataset,
'tableId': view_table
}
}
# check to see if the view we want to add already exists.
if view_access not in access:
self.log.info(
'Granting table %s:%s.%s authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table, source_project,
source_dataset)
access.append(view_access)
return self.service.datasets().patch(
projectId=source_project,
datasetId=source_dataset,
body={
'access': access
}).execute()
else:
# if view is already in access, do nothing.
self.log.info(
'Table %s:%s.%s already has authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table, source_project,
source_dataset)
return source_dataset_resource
def delete_dataset(self,
project_id,
dataset_id
):
"""
Delete a dataset of Big query in your project.
:param project_id: The name of the project where we have the dataset .
:type project_id: str
:param dataset_id: The dataset to be delete.
:type dataset_id: str
:return:
"""
project_id = project_id if project_id is not None else self.project_id
self.log.info('Deleting from project: %s Dataset:%s',
project_id, dataset_id)
try:
self.service.datasets().delete(
projectId=project_id,
datasetId=dataset_id).execute()
self.log.info('Dataset deleted successfully: In project %s Dataset %s',
project_id, dataset_id)
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content)
)
class BigQueryCursor(BigQueryBaseCursor):
"""
A very basic BigQuery PEP 249 cursor implementation. The PyHive PEP 249
implementation was used as a reference:
https://github.com/dropbox/PyHive/blob/master/pyhive/presto.py
https://github.com/dropbox/PyHive/blob/master/pyhive/common.py
"""
def __init__(self, service, project_id, use_legacy_sql=True):
super(BigQueryCursor, self).__init__(
service=service,
project_id=project_id,
use_legacy_sql=use_legacy_sql)
self.buffersize = None
self.page_token = None
self.job_id = None
self.buffer = []
self.all_pages_loaded = False
@property
def description(self):
""" The schema description method is not currently implemented. """
raise NotImplementedError
def close(self):
""" By default, do nothing """
pass
@property
def rowcount(self):
""" By default, return -1 to indicate that this is not supported. """
return -1
def execute(self, operation, parameters=None):
"""
Executes a BigQuery query, and returns the job ID.
:param operation: The query to execute.
:type operation: string
:param parameters: Parameters to substitute into the query.
:type parameters: dict
"""
sql = _bind_parameters(operation,
parameters) if parameters else operation
self.job_id = self.run_query(sql)
def executemany(self, operation, seq_of_parameters):
"""
Execute a BigQuery query multiple times with different parameters.
:param operation: The query to execute.
:type operation: string
:param seq_of_parameters: List of dictionary parameters to substitute into the
query.
:type seq_of_parameters: list
"""
for parameters in seq_of_parameters:
self.execute(operation, parameters)
def fetchone(self):
""" Fetch the next row of a query result set. """
return self.next()
def next(self):
"""
Helper method for fetchone, which returns the next row from a buffer.
If the buffer is empty, attempts to paginate through the result set for
the next page, and load it into the buffer.
"""
if not self.job_id:
return None
if len(self.buffer) == 0:
if self.all_pages_loaded:
return None
query_results = (self.service.jobs().getQueryResults(
projectId=self.project_id,
jobId=self.job_id,
pageToken=self.page_token).execute())
if 'rows' in query_results and query_results['rows']:
self.page_token = query_results.get('pageToken')
fields = query_results['schema']['fields']
col_types = [field['type'] for field in fields]
rows = query_results['rows']
for dict_row in rows:
typed_row = ([
_bq_cast(vs['v'], col_types[idx])
for idx, vs in enumerate(dict_row['f'])
])
self.buffer.append(typed_row)
if not self.page_token:
self.all_pages_loaded = True
else:
# Reset all state since we've exhausted the results.
self.page_token = None
self.job_id = None
self.page_token = None
return None
return self.buffer.pop(0)
def fetchmany(self, size=None):
"""
Fetch the next set of rows of a query result, returning a sequence of sequences
(e.g. a list of tuples). An empty sequence is returned when no more rows are
available. The number of rows to fetch per call is specified by the parameter.
If it is not given, the cursor's arraysize determines the number of rows to be
fetched. The method should try to fetch as many rows as indicated by the size
parameter. If this is not possible due to the specified number of rows not being
available, fewer rows may be returned. An :py:class:`~pyhive.exc.Error`
(or subclass) exception is raised if the previous call to
:py:meth:`execute` did not produce any result set or no call was issued yet.
"""
if size is None:
size = self.arraysize
result = []
for _ in range(size):
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result
def fetchall(self):
"""
Fetch all (remaining) rows of a query result, returning them as a sequence of
sequences (e.g. a list of tuples).
"""
result = []
while True:
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result
def get_arraysize(self):
""" Specifies the number of rows to fetch at a time with .fetchmany() """
return self._buffersize if self.buffersize else 1
def set_arraysize(self, arraysize):
""" Specifies the number of rows to fetch at a time with .fetchmany() """
self.buffersize = arraysize
arraysize = property(get_arraysize, set_arraysize)
def setinputsizes(self, sizes):
""" Does nothing by default """
pass
def setoutputsize(self, size, column=None):
""" Does nothing by default """
pass
def _bind_parameters(operation, parameters):
""" Helper method that binds parameters to a SQL query. """
# inspired by MySQL Python Connector (conversion.py)
string_parameters = {}
for (name, value) in parameters.iteritems():
if value is None:
string_parameters[name] = 'NULL'
elif isinstance(value, basestring):
string_parameters[name] = "'" + _escape(value) + "'"
else:
string_parameters[name] = str(value)
return operation % string_parameters
def _escape(s):
""" Helper method that escapes parameters to a SQL query. """
e = s
e = e.replace('\\', '\\\\')
e = e.replace('\n', '\\n')
e = e.replace('\r', '\\r')
e = e.replace("'", "\\'")
e = e.replace('"', '\\"')
return e
def _bq_cast(string_field, bq_type):
"""
Helper method that casts a BigQuery row to the appropriate data types.
This is useful because BigQuery returns all fields as strings.
"""
if string_field is None:
return None
elif bq_type == 'INTEGER':
return int(string_field)
elif bq_type == 'FLOAT' or bq_type == 'TIMESTAMP':
return float(string_field)
elif bq_type == 'BOOLEAN':
assert string_field in set(['true', 'false'])
return string_field == 'true'
else:
return string_field
def _split_tablename(table_input, default_project_id, var_name=None):
assert default_project_id is not None, "INTERNAL: No default project is specified"
def var_print(var_name):
if var_name is None:
return ""
else:
return "Format exception for {var}: ".format(var=var_name)
if table_input.count('.') + table_input.count(':') > 3:
raise Exception(('{var}Use either : or . to specify project '
'got {input}').format(
var=var_print(var_name), input=table_input))
cmpt = table_input.rsplit(':', 1)
project_id = None
rest = table_input
if len(cmpt) == 1:
project_id = None
rest = cmpt[0]
elif len(cmpt) == 2 and cmpt[0].count(':') <= 1:
if cmpt[-1].count('.') != 2:
project_id = cmpt[0]
rest = cmpt[1]
else:
raise Exception(('{var}Expect format of (<project:)<dataset>.<table>, '
'got {input}').format(
var=var_print(var_name), input=table_input))
cmpt = rest.split('.')
if len(cmpt) == 3:
assert project_id is None, ("{var}Use either : or . to specify project"
).format(var=var_print(var_name))
project_id = cmpt[0]
dataset_id = cmpt[1]
table_id = cmpt[2]
elif len(cmpt) == 2:
dataset_id = cmpt[0]
table_id = cmpt[1]
else:
raise Exception(
('{var}Expect format of (<project.|<project:)<dataset>.<table>, '
'got {input}').format(var=var_print(var_name), input=table_input))
if project_id is None:
if var_name is not None:
log = LoggingMixin().log
log.info('Project not included in {var}: {input}; '
'using project "{project}"'.format(
var=var_name,
input=table_input,
project=default_project_id))
project_id = default_project_id
return project_id, dataset_id, table_id
def _cleanse_time_partitioning(destination_dataset_table, time_partitioning_in):
# if it is a partitioned table ($ is in the table name) add partition load option
time_partitioning_out = {}
if destination_dataset_table and '$' in destination_dataset_table:
assert not time_partitioning_in.get('field'), (
"Cannot specify field partition and partition name "
"(dataset.table$partition) at the same time"
)
time_partitioning_out['type'] = 'DAY'
time_partitioning_out.update(time_partitioning_in)
return time_partitioning_out
| danielvdende/incubator-airflow | airflow/contrib/hooks/bigquery_hook.py | Python | apache-2.0 | 66,015 |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2010-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
import sys
from edb.common import ast
from edb.common.ast import match as astmatch
from . import ast as pgast
for name, cls in pgast.__dict__.items():
if isinstance(cls, type) and issubclass(cls, ast.AST):
adapter = astmatch.MatchASTMeta(
name, (astmatch.MatchASTNode, ), {'__module__': __name__},
adapts=cls)
setattr(sys.modules[__name__], name, adapter)
| edgedb/edgedb | edb/pgsql/astmatch.py | Python | apache-2.0 | 1,119 |
#!/usr/bin/python
import os
import sys
from time import time, sleep
import shlex
import threading
import subprocess
import multiprocessing
import tempfile
import fnmatch
sys.path.append('automation/trex_control_plane/stl')
from trex_stl_lib.api import *
def run_server(command):
return subprocess.Popen(shlex.split(command), stdout = subprocess.PIPE, stderr = subprocess.PIPE, close_fds = True)
def run_command(command, timeout = 15, cwd = None):
# pipes might stuck, even with timeout
with tempfile.TemporaryFile() as stdout_file, tempfile.TemporaryFile() as stderr_file:
proc = subprocess.Popen(shlex.split(command), stdout = stdout_file, stderr = stderr_file, cwd = cwd, close_fds = True)
if timeout > 0:
poll_rate = 0.1
for i in range(int(timeout/poll_rate)):
sleep(poll_rate)
if proc.poll() is not None: # process stopped
break
if proc.poll() is None:
proc.kill() # timeout
return (errno.ETIME, '', 'Timeout on running: %s' % command)
else:
proc.wait()
stdout_file.seek(0)
stderr_file.seek(0)
return (proc.returncode, stdout_file.read().decode(errors = 'replace'), stderr_file.read().decode(errors = 'replace'))
def get_trex_cmds():
ret_code, stdout, stderr = run_command('ps -u root --format pid,comm,cmd')
if ret_code:
raise Exception('Failed to determine running processes, stderr: %s' % stderr)
trex_cmds_list = []
for line in stdout.splitlines():
pid, proc_name, full_cmd = line.strip().split(' ', 2)
pid = pid.strip()
full_cmd = full_cmd.strip()
if proc_name.find('t-rex-64') >= 0:
trex_cmds_list.append((pid, full_cmd))
else:
if full_cmd.find('t-rex-64') >= 0:
trex_cmds_list.append((pid, full_cmd))
return trex_cmds_list
def is_any_core ():
ret_code, stdout, stderr = run_command('ls')
assert(ret_code==0);
l= stdout.split()
for file in l:
if fnmatch.fnmatch(file, 'core.*'):
return True
return False
def kill_all_trexes():
trex_cmds_list = get_trex_cmds()
if not trex_cmds_list:
return False
for pid, cmd in trex_cmds_list:
run_command('kill %s' % pid)
ret_code_ps, _, _ = run_command('ps -p %s' % pid)
if not ret_code_ps:
run_command('kill -9 %s' % pid)
ret_code_ps, _, _ = run_command('ps -p %s' % pid)
if not ret_code_ps:
pass;
return True
def term_all_trexes():
trex_cmds_list = get_trex_cmds()
if not trex_cmds_list:
return False
for pid, cmd in trex_cmds_list:
print pid
run_command('kill -INT %s' % pid)
return True
def run_one_iter ():
try:
server = run_server('./t-rex-64-debug-gdb-bt -i -c 4 --iom 0')
print "sleep 1 sec"
time.sleep(1);
crash=True;
if True:
c = STLClient()
print 'Connecting to server'
c.connect()
print 'Connected'
print 'Mapping'
print 'Map: %s' % stl_map_ports(c)
c.disconnect()
crash=False;
except Exception as e:
print(e)
finally :
if crash:
print "Crash seen, wait for the info"
# wait the process to make the core file
loop=0;
while True:
if server.poll() is not None: # server ended
print 'Server stopped.\nReturn code: %s\nStderr: %s\nStdout: %s' % (server.returncode, server.stdout.read().decode(errors = 'replace'), server.stderr.read().decode(errors = 'replace'))
break;
time.sleep(1);
loop=loop+1;
if loop >600:
print "Timeout on crash!!"
break;
return 1
else:
print "kill process ",server.pid
term_all_trexes();
kill_all_trexes();
return 0
def loop_inter ():
kill_all_trexes()
cnt=0;
while True:
print (time.strftime("%H:%M:%S")),
print "Iter",cnt
ret=run_one_iter ()
if ret==1:
break;
cnt=cnt+1;
if is_any_core ():
print "stop due to core file"
break;
loop_inter ()
| dimagol/trex-core | scripts/simple_start_server.py | Python | apache-2.0 | 4,454 |
from django.conf.urls.defaults import patterns, include, url
from mothra.settings import LOGIN_URL
from mothra.settings import DEBUG, STATIC_DOC_ROOT
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'mothra.views.home', name='home'),
# url(r'^mothra/', include('mothra.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^', include('signuplogin.urls')),
url(r'^', include('website.urls')),
url(r'^', include('gdpr.urls')),
#url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^workflows/', include('workflows.urls')),
url(r'^streams/', include('streams.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^api/', include('workflows.api_urls')),
url('^' + LOGIN_URL[1:] + '$', 'django.contrib.auth.views.login', name='login'),
url(r'^logout/$', 'django.contrib.auth.views.logout', name='logout'),
url(r'^change-password/$', 'django.contrib.auth.views.password_change', name='password change'),
url(r'^password-changed/$', 'django.contrib.auth.views.password_change_done', name='password change done'),
url(r'^password_reset/$', 'django.contrib.auth.views.password_reset'),
url(r'^password_reset/done/$', 'django.contrib.auth.views.password_reset_done'),
url(r'^reset/(?P<uidb36>[0-9A-Za-z]{1,13})-(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', 'django.contrib.auth.views.password_reset_confirm'),
url(r'^reset/done/$', 'django.contrib.auth.views.password_reset_complete'),
)
## debug stuff to serve static media
if DEBUG:
urlpatterns += patterns('',
(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': STATIC_DOC_ROOT}),
)
| janezkranjc/clowdflows | mothra/urls.py | Python | gpl-3.0 | 1,968 |
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.watchlist.watchlistrule import WatchListRule
class WatchListRuleTest(unittest.TestCase):
def test_instruction_list(self):
instructions = ['a', 'b']
rule = WatchListRule('definition1', instructions[:])
self.assertEqual(instructions, rule.instructions())
def test_remove_instruction(self):
instructions = ['a', 'b']
rule = WatchListRule('definition1', instructions[:])
rule.remove_instruction('b')
self.assertEqual(['a'], rule.instructions())
def test_simple_definition(self):
definition_name = 'definition1'
rule = WatchListRule(definition_name, [])
self.assertTrue(rule.match([definition_name]))
self.assertFalse(rule.match([definition_name + '1']))
def test_complex_definition(self):
definition_name1 = 'definition1'
definition_name2 = 'definition2'
definition_name3 = 'definition3'
rule = WatchListRule(definition_name1 + '|' + definition_name2 + '|' + definition_name3, [])
self.assertTrue(rule.match([definition_name1]))
self.assertTrue(rule.match([definition_name2]))
self.assertTrue(rule.match([definition_name3]))
self.assertFalse(rule.match([definition_name1 + '1']))
self.assertFalse(rule.match([definition_name2 + '1']))
self.assertFalse(rule.match([definition_name3 + '1']))
| Debian/openjfx | modules/web/src/main/native/Tools/Scripts/webkitpy/common/watchlist/watchlistrule_unittest.py | Python | gpl-2.0 | 2,944 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for constructing RNN Cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from tensorflow.contrib.compiler import jit
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.rnn.python.ops import core_rnn_cell
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base as base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import nn_impl
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
def _get_concat_variable(name, shape, dtype, num_shards):
"""Get a sharded variable concatenated into one tensor."""
sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards)
if len(sharded_variable) == 1:
return sharded_variable[0]
concat_name = name + "/concat"
concat_full_name = vs.get_variable_scope().name + "/" + concat_name + ":0"
for value in ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES):
if value.name == concat_full_name:
return value
concat_variable = array_ops.concat(sharded_variable, 0, name=concat_name)
ops.add_to_collection(ops.GraphKeys.CONCATENATED_VARIABLES, concat_variable)
return concat_variable
def _get_sharded_variable(name, shape, dtype, num_shards):
"""Get a list of sharded variables with the given dtype."""
if num_shards > shape[0]:
raise ValueError("Too many shards: shape=%s, num_shards=%d" % (shape,
num_shards))
unit_shard_size = int(math.floor(shape[0] / num_shards))
remaining_rows = shape[0] - unit_shard_size * num_shards
shards = []
for i in range(num_shards):
current_size = unit_shard_size
if i < remaining_rows:
current_size += 1
shards.append(
vs.get_variable(
name + "_%d" % i, [current_size] + shape[1:], dtype=dtype))
return shards
def _norm(g, b, inp, scope):
shape = inp.get_shape()[-1:]
gamma_init = init_ops.constant_initializer(g)
beta_init = init_ops.constant_initializer(b)
with vs.variable_scope(scope):
# Initialize beta and gamma for use by layer_norm.
vs.get_variable("gamma", shape=shape, initializer=gamma_init)
vs.get_variable("beta", shape=shape, initializer=beta_init)
normalized = layers.layer_norm(inp, reuse=True, scope=scope)
return normalized
class CoupledInputForgetGateLSTMCell(rnn_cell_impl.RNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
The default non-peephole implementation is based on:
http://www.bioinf.jku.at/publications/older/2604.pdf
S. Hochreiter and J. Schmidhuber.
"Long Short-Term Memory". Neural Computation, 9(8):1735-1780, 1997.
The peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
The coupling of input and forget gate is based on:
http://arxiv.org/pdf/1503.04069.pdf
Greff et al. "LSTM: A Search Space Odyssey"
The class uses optional peep-hole connections, and an optional projection
layer.
Layer normalization implementation is based on:
https://arxiv.org/abs/1607.06450.
"Layer Normalization"
Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton
and is applied before the internal nonlinearities.
"""
def __init__(self,
num_units,
use_peepholes=False,
initializer=None,
num_proj=None,
proj_clip=None,
num_unit_shards=1,
num_proj_shards=1,
forget_bias=1.0,
state_is_tuple=True,
activation=math_ops.tanh,
reuse=None,
layer_norm=False,
norm_gain=1.0,
norm_shift=0.0):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
num_unit_shards: How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
num_proj_shards: How to split the projection matrix. If >1, the
projection matrix is stored across num_proj_shards.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. By default (False), they are concatenated
along the column axis. This default behavior will soon be deprecated.
activation: Activation function of the inner states.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
layer_norm: If `True`, layer normalization will be applied.
norm_gain: float, The layer normalization gain initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
norm_shift: float, The layer normalization shift initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
"""
super(CoupledInputForgetGateLSTMCell, self).__init__(_reuse=reuse)
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._initializer = initializer
self._num_proj = num_proj
self._proj_clip = proj_clip
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation
self._reuse = reuse
self._layer_norm = layer_norm
self._norm_gain = norm_gain
self._norm_shift = norm_shift
if num_proj:
self._state_size = (
rnn_cell_impl.LSTMStateTuple(num_units, num_proj)
if state_is_tuple else num_units + num_proj)
self._output_size = num_proj
else:
self._state_size = (
rnn_cell_impl.LSTMStateTuple(num_units, num_units)
if state_is_tuple else 2 * num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: if `state_is_tuple` is False, this must be a state Tensor,
`2-D, batch x state_size`. If `state_is_tuple` is True, this must be a
tuple of state Tensors, both `2-D`, with column sizes `c_state` and
`m_state`.
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
sigmoid = math_ops.sigmoid
num_proj = self._num_units if self._num_proj is None else self._num_proj
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
concat_w = _get_concat_variable(
"W", [input_size.value + num_proj, 3 * self._num_units], dtype,
self._num_unit_shards)
b = vs.get_variable(
"B",
shape=[3 * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
# j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat([inputs, m_prev], 1)
lstm_matrix = math_ops.matmul(cell_inputs, concat_w)
# If layer nomalization is applied, do not add bias
if not self._layer_norm:
lstm_matrix = nn_ops.bias_add(lstm_matrix, b)
j, f, o = array_ops.split(value=lstm_matrix, num_or_size_splits=3, axis=1)
# Apply layer normalization
if self._layer_norm:
j = _norm(self._norm_gain, self._norm_shift, j, "transform")
f = _norm(self._norm_gain, self._norm_shift, f, "forget")
o = _norm(self._norm_gain, self._norm_shift, o, "output")
# Diagonal connections
if self._use_peepholes:
w_f_diag = vs.get_variable(
"W_F_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"W_O_diag", shape=[self._num_units], dtype=dtype)
if self._use_peepholes:
f_act = sigmoid(f + self._forget_bias + w_f_diag * c_prev)
else:
f_act = sigmoid(f + self._forget_bias)
c = (f_act * c_prev + (1 - f_act) * self._activation(j))
# Apply layer normalization
if self._layer_norm:
c = _norm(self._norm_gain, self._norm_shift, c, "state")
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
concat_w_proj = _get_concat_variable("W_P",
[self._num_units, self._num_proj],
dtype, self._num_proj_shards)
m = math_ops.matmul(m, concat_w_proj)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
new_state = (
rnn_cell_impl.LSTMStateTuple(c, m)
if self._state_is_tuple else array_ops.concat([c, m], 1))
return m, new_state
class TimeFreqLSTMCell(rnn_cell_impl.RNNCell):
"""Time-Frequency Long short-term memory unit (LSTM) recurrent network cell.
This implementation is based on:
Tara N. Sainath and Bo Li
"Modeling Time-Frequency Patterns with LSTM vs. Convolutional Architectures
for LVCSR Tasks." submitted to INTERSPEECH, 2016.
It uses peep-hole connections and optional cell clipping.
"""
def __init__(self,
num_units,
use_peepholes=False,
cell_clip=None,
initializer=None,
num_unit_shards=1,
forget_bias=1.0,
feature_size=None,
frequency_skip=1,
reuse=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_unit_shards: int, How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
forget_bias: float, Biases of the forget gate are initialized by default
to 1 in order to reduce the scale of forgetting at the beginning
of the training.
feature_size: int, The size of the input feature the LSTM spans over.
frequency_skip: int, The amount the LSTM filter is shifted by in
frequency.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(TimeFreqLSTMCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_unit_shards = num_unit_shards
self._forget_bias = forget_bias
self._feature_size = feature_size
self._frequency_skip = frequency_skip
self._state_size = 2 * num_units
self._output_size = num_units
self._reuse = reuse
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: state Tensor, 2D, batch x state_size.
Returns:
A tuple containing:
- A 2D, batch x output_dim, Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is num_units.
- A 2D, batch x state_size, Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
sigmoid = math_ops.sigmoid
tanh = math_ops.tanh
freq_inputs = self._make_tf_features(inputs)
dtype = inputs.dtype
actual_input_size = freq_inputs[0].get_shape().as_list()[1]
concat_w = _get_concat_variable(
"W", [actual_input_size + 2 * self._num_units, 4 * self._num_units],
dtype, self._num_unit_shards)
b = vs.get_variable(
"B",
shape=[4 * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
# Diagonal connections
if self._use_peepholes:
w_f_diag = vs.get_variable(
"W_F_diag", shape=[self._num_units], dtype=dtype)
w_i_diag = vs.get_variable(
"W_I_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"W_O_diag", shape=[self._num_units], dtype=dtype)
# initialize the first freq state to be zero
m_prev_freq = array_ops.zeros([int(inputs.get_shape()[0]), self._num_units],
dtype)
for fq in range(len(freq_inputs)):
c_prev = array_ops.slice(state, [0, 2 * fq * self._num_units],
[-1, self._num_units])
m_prev = array_ops.slice(state, [0, (2 * fq + 1) * self._num_units],
[-1, self._num_units])
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat([freq_inputs[fq], m_prev, m_prev_freq], 1)
lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)
i, j, f, o = array_ops.split(
value=lstm_matrix, num_or_size_splits=4, axis=1)
if self._use_peepholes:
c = (
sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev +
sigmoid(i + w_i_diag * c_prev) * tanh(j))
else:
c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) * tanh(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * tanh(c)
else:
m = sigmoid(o) * tanh(c)
m_prev_freq = m
if fq == 0:
state_out = array_ops.concat([c, m], 1)
m_out = m
else:
state_out = array_ops.concat([state_out, c, m], 1)
m_out = array_ops.concat([m_out, m], 1)
return m_out, state_out
def _make_tf_features(self, input_feat):
"""Make the frequency features.
Args:
input_feat: input Tensor, 2D, batch x num_units.
Returns:
A list of frequency features, with each element containing:
- A 2D, batch x output_dim, Tensor representing the time-frequency feature
for that frequency index. Here output_dim is feature_size.
Raises:
ValueError: if input_size cannot be inferred from static shape inference.
"""
input_size = input_feat.get_shape().with_rank(2)[-1].value
if input_size is None:
raise ValueError("Cannot infer input_size from static shape inference.")
num_feats = int(
(input_size - self._feature_size) / (self._frequency_skip)) + 1
freq_inputs = []
for f in range(num_feats):
cur_input = array_ops.slice(input_feat, [0, f * self._frequency_skip],
[-1, self._feature_size])
freq_inputs.append(cur_input)
return freq_inputs
class GridLSTMCell(rnn_cell_impl.RNNCell):
"""Grid Long short-term memory unit (LSTM) recurrent network cell.
The default is based on:
Nal Kalchbrenner, Ivo Danihelka and Alex Graves
"Grid Long Short-Term Memory," Proc. ICLR 2016.
http://arxiv.org/abs/1507.01526
When peephole connections are used, the implementation is based on:
Tara N. Sainath and Bo Li
"Modeling Time-Frequency Patterns with LSTM vs. Convolutional Architectures
for LVCSR Tasks." submitted to INTERSPEECH, 2016.
The code uses optional peephole connections, shared_weights and cell clipping.
"""
def __init__(self,
num_units,
use_peepholes=False,
share_time_frequency_weights=False,
cell_clip=None,
initializer=None,
num_unit_shards=1,
forget_bias=1.0,
feature_size=None,
frequency_skip=None,
num_frequency_blocks=None,
start_freqindex_list=None,
end_freqindex_list=None,
couple_input_forget_gates=False,
state_is_tuple=True,
reuse=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: (optional) bool, default False. Set True to enable
diagonal/peephole connections.
share_time_frequency_weights: (optional) bool, default False. Set True to
enable shared cell weights between time and frequency LSTMs.
cell_clip: (optional) A float value, default None, if provided the cell
state is clipped by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices, default None.
num_unit_shards: (optional) int, default 1, How to split the weight
matrix. If > 1,the weight matrix is stored across num_unit_shards.
forget_bias: (optional) float, default 1.0, The initial bias of the
forget gates, used to reduce the scale of forgetting at the beginning
of the training.
feature_size: (optional) int, default None, The size of the input feature
the LSTM spans over.
frequency_skip: (optional) int, default None, The amount the LSTM filter
is shifted by in frequency.
num_frequency_blocks: [required] A list of frequency blocks needed to
cover the whole input feature splitting defined by start_freqindex_list
and end_freqindex_list.
start_freqindex_list: [optional], list of ints, default None, The
starting frequency index for each frequency block.
end_freqindex_list: [optional], list of ints, default None. The ending
frequency index for each frequency block.
couple_input_forget_gates: (optional) bool, default False, Whether to
couple the input and forget gates, i.e. f_gate = 1.0 - i_gate, to reduce
model parameters and computation cost.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. By default (False), they are concatenated
along the column axis. This default behavior will soon be deprecated.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
Raises:
ValueError: if the num_frequency_blocks list is not specified
"""
super(GridLSTMCell, self).__init__(_reuse=reuse)
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._share_time_frequency_weights = share_time_frequency_weights
self._couple_input_forget_gates = couple_input_forget_gates
self._state_is_tuple = state_is_tuple
self._cell_clip = cell_clip
self._initializer = initializer
self._num_unit_shards = num_unit_shards
self._forget_bias = forget_bias
self._feature_size = feature_size
self._frequency_skip = frequency_skip
self._start_freqindex_list = start_freqindex_list
self._end_freqindex_list = end_freqindex_list
self._num_frequency_blocks = num_frequency_blocks
self._total_blocks = 0
self._reuse = reuse
if self._num_frequency_blocks is None:
raise ValueError("Must specify num_frequency_blocks")
for block_index in range(len(self._num_frequency_blocks)):
self._total_blocks += int(self._num_frequency_blocks[block_index])
if state_is_tuple:
state_names = ""
for block_index in range(len(self._num_frequency_blocks)):
for freq_index in range(self._num_frequency_blocks[block_index]):
name_prefix = "state_f%02d_b%02d" % (freq_index, block_index)
state_names += ("%s_c, %s_m," % (name_prefix, name_prefix))
self._state_tuple_type = collections.namedtuple("GridLSTMStateTuple",
state_names.strip(","))
self._state_size = self._state_tuple_type(*(
[num_units, num_units] * self._total_blocks))
else:
self._state_tuple_type = None
self._state_size = num_units * self._total_blocks * 2
self._output_size = num_units * self._total_blocks * 2
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
@property
def state_tuple_type(self):
return self._state_tuple_type
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, [batch, feature_size].
state: Tensor or tuple of Tensors, 2D, [batch, state_size], depends on the
flag self._state_is_tuple.
Returns:
A tuple containing:
- A 2D, [batch, output_dim], Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is num_units.
- A 2D, [batch, state_size], Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
batch_size = inputs.shape[0].value or array_ops.shape(inputs)[0]
freq_inputs = self._make_tf_features(inputs)
m_out_lst = []
state_out_lst = []
for block in range(len(freq_inputs)):
m_out_lst_current, state_out_lst_current = self._compute(
freq_inputs[block],
block,
state,
batch_size,
state_is_tuple=self._state_is_tuple)
m_out_lst.extend(m_out_lst_current)
state_out_lst.extend(state_out_lst_current)
if self._state_is_tuple:
state_out = self._state_tuple_type(*state_out_lst)
else:
state_out = array_ops.concat(state_out_lst, 1)
m_out = array_ops.concat(m_out_lst, 1)
return m_out, state_out
def _compute(self,
freq_inputs,
block,
state,
batch_size,
state_prefix="state",
state_is_tuple=True):
"""Run the actual computation of one step LSTM.
Args:
freq_inputs: list of Tensors, 2D, [batch, feature_size].
block: int, current frequency block index to process.
state: Tensor or tuple of Tensors, 2D, [batch, state_size], it depends on
the flag state_is_tuple.
batch_size: int32, batch size.
state_prefix: (optional) string, name prefix for states, defaults to
"state".
state_is_tuple: boolean, indicates whether the state is a tuple or Tensor.
Returns:
A tuple, containing:
- A list of [batch, output_dim] Tensors, representing the output of the
LSTM given the inputs and state.
- A list of [batch, state_size] Tensors, representing the LSTM state
values given the inputs and previous state.
"""
sigmoid = math_ops.sigmoid
tanh = math_ops.tanh
num_gates = 3 if self._couple_input_forget_gates else 4
dtype = freq_inputs[0].dtype
actual_input_size = freq_inputs[0].get_shape().as_list()[1]
concat_w_f = _get_concat_variable(
"W_f_%d" % block,
[actual_input_size + 2 * self._num_units, num_gates * self._num_units],
dtype, self._num_unit_shards)
b_f = vs.get_variable(
"B_f_%d" % block,
shape=[num_gates * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
if not self._share_time_frequency_weights:
concat_w_t = _get_concat_variable("W_t_%d" % block, [
actual_input_size + 2 * self._num_units, num_gates * self._num_units
], dtype, self._num_unit_shards)
b_t = vs.get_variable(
"B_t_%d" % block,
shape=[num_gates * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
if self._use_peepholes:
# Diagonal connections
if not self._couple_input_forget_gates:
w_f_diag_freqf = vs.get_variable(
"W_F_diag_freqf_%d" % block, shape=[self._num_units], dtype=dtype)
w_f_diag_freqt = vs.get_variable(
"W_F_diag_freqt_%d" % block, shape=[self._num_units], dtype=dtype)
w_i_diag_freqf = vs.get_variable(
"W_I_diag_freqf_%d" % block, shape=[self._num_units], dtype=dtype)
w_i_diag_freqt = vs.get_variable(
"W_I_diag_freqt_%d" % block, shape=[self._num_units], dtype=dtype)
w_o_diag_freqf = vs.get_variable(
"W_O_diag_freqf_%d" % block, shape=[self._num_units], dtype=dtype)
w_o_diag_freqt = vs.get_variable(
"W_O_diag_freqt_%d" % block, shape=[self._num_units], dtype=dtype)
if not self._share_time_frequency_weights:
if not self._couple_input_forget_gates:
w_f_diag_timef = vs.get_variable(
"W_F_diag_timef_%d" % block, shape=[self._num_units], dtype=dtype)
w_f_diag_timet = vs.get_variable(
"W_F_diag_timet_%d" % block, shape=[self._num_units], dtype=dtype)
w_i_diag_timef = vs.get_variable(
"W_I_diag_timef_%d" % block, shape=[self._num_units], dtype=dtype)
w_i_diag_timet = vs.get_variable(
"W_I_diag_timet_%d" % block, shape=[self._num_units], dtype=dtype)
w_o_diag_timef = vs.get_variable(
"W_O_diag_timef_%d" % block, shape=[self._num_units], dtype=dtype)
w_o_diag_timet = vs.get_variable(
"W_O_diag_timet_%d" % block, shape=[self._num_units], dtype=dtype)
# initialize the first freq state to be zero
m_prev_freq = array_ops.zeros([batch_size, self._num_units], dtype)
c_prev_freq = array_ops.zeros([batch_size, self._num_units], dtype)
for freq_index in range(len(freq_inputs)):
if state_is_tuple:
name_prefix = "%s_f%02d_b%02d" % (state_prefix, freq_index, block)
c_prev_time = getattr(state, name_prefix + "_c")
m_prev_time = getattr(state, name_prefix + "_m")
else:
c_prev_time = array_ops.slice(
state, [0, 2 * freq_index * self._num_units], [-1, self._num_units])
m_prev_time = array_ops.slice(
state, [0, (2 * freq_index + 1) * self._num_units],
[-1, self._num_units])
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat(
[freq_inputs[freq_index], m_prev_time, m_prev_freq], 1)
# F-LSTM
lstm_matrix_freq = nn_ops.bias_add(
math_ops.matmul(cell_inputs, concat_w_f), b_f)
if self._couple_input_forget_gates:
i_freq, j_freq, o_freq = array_ops.split(
value=lstm_matrix_freq, num_or_size_splits=num_gates, axis=1)
f_freq = None
else:
i_freq, j_freq, f_freq, o_freq = array_ops.split(
value=lstm_matrix_freq, num_or_size_splits=num_gates, axis=1)
# T-LSTM
if self._share_time_frequency_weights:
i_time = i_freq
j_time = j_freq
f_time = f_freq
o_time = o_freq
else:
lstm_matrix_time = nn_ops.bias_add(
math_ops.matmul(cell_inputs, concat_w_t), b_t)
if self._couple_input_forget_gates:
i_time, j_time, o_time = array_ops.split(
value=lstm_matrix_time, num_or_size_splits=num_gates, axis=1)
f_time = None
else:
i_time, j_time, f_time, o_time = array_ops.split(
value=lstm_matrix_time, num_or_size_splits=num_gates, axis=1)
# F-LSTM c_freq
# input gate activations
if self._use_peepholes:
i_freq_g = sigmoid(i_freq + w_i_diag_freqf * c_prev_freq +
w_i_diag_freqt * c_prev_time)
else:
i_freq_g = sigmoid(i_freq)
# forget gate activations
if self._couple_input_forget_gates:
f_freq_g = 1.0 - i_freq_g
else:
if self._use_peepholes:
f_freq_g = sigmoid(f_freq + self._forget_bias + w_f_diag_freqf *
c_prev_freq + w_f_diag_freqt * c_prev_time)
else:
f_freq_g = sigmoid(f_freq + self._forget_bias)
# cell state
c_freq = f_freq_g * c_prev_freq + i_freq_g * tanh(j_freq)
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c_freq = clip_ops.clip_by_value(c_freq, -self._cell_clip,
self._cell_clip)
# pylint: enable=invalid-unary-operand-type
# T-LSTM c_freq
# input gate activations
if self._use_peepholes:
if self._share_time_frequency_weights:
i_time_g = sigmoid(i_time + w_i_diag_freqf * c_prev_freq +
w_i_diag_freqt * c_prev_time)
else:
i_time_g = sigmoid(i_time + w_i_diag_timef * c_prev_freq +
w_i_diag_timet * c_prev_time)
else:
i_time_g = sigmoid(i_time)
# forget gate activations
if self._couple_input_forget_gates:
f_time_g = 1.0 - i_time_g
else:
if self._use_peepholes:
if self._share_time_frequency_weights:
f_time_g = sigmoid(f_time + self._forget_bias + w_f_diag_freqf *
c_prev_freq + w_f_diag_freqt * c_prev_time)
else:
f_time_g = sigmoid(f_time + self._forget_bias + w_f_diag_timef *
c_prev_freq + w_f_diag_timet * c_prev_time)
else:
f_time_g = sigmoid(f_time + self._forget_bias)
# cell state
c_time = f_time_g * c_prev_time + i_time_g * tanh(j_time)
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c_time = clip_ops.clip_by_value(c_time, -self._cell_clip,
self._cell_clip)
# pylint: enable=invalid-unary-operand-type
# F-LSTM m_freq
if self._use_peepholes:
m_freq = sigmoid(o_freq + w_o_diag_freqf * c_freq +
w_o_diag_freqt * c_time) * tanh(c_freq)
else:
m_freq = sigmoid(o_freq) * tanh(c_freq)
# T-LSTM m_time
if self._use_peepholes:
if self._share_time_frequency_weights:
m_time = sigmoid(o_time + w_o_diag_freqf * c_freq +
w_o_diag_freqt * c_time) * tanh(c_time)
else:
m_time = sigmoid(o_time + w_o_diag_timef * c_freq +
w_o_diag_timet * c_time) * tanh(c_time)
else:
m_time = sigmoid(o_time) * tanh(c_time)
m_prev_freq = m_freq
c_prev_freq = c_freq
# Concatenate the outputs for T-LSTM and F-LSTM for each shift
if freq_index == 0:
state_out_lst = [c_time, m_time]
m_out_lst = [m_time, m_freq]
else:
state_out_lst.extend([c_time, m_time])
m_out_lst.extend([m_time, m_freq])
return m_out_lst, state_out_lst
def _make_tf_features(self, input_feat, slice_offset=0):
"""Make the frequency features.
Args:
input_feat: input Tensor, 2D, [batch, num_units].
slice_offset: (optional) Python int, default 0, the slicing offset is only
used for the backward processing in the BidirectionalGridLSTMCell. It
specifies a different starting point instead of always 0 to enable the
forward and backward processing look at different frequency blocks.
Returns:
A list of frequency features, with each element containing:
- A 2D, [batch, output_dim], Tensor representing the time-frequency
feature for that frequency index. Here output_dim is feature_size.
Raises:
ValueError: if input_size cannot be inferred from static shape inference.
"""
input_size = input_feat.get_shape().with_rank(2)[-1].value
if input_size is None:
raise ValueError("Cannot infer input_size from static shape inference.")
if slice_offset > 0:
# Padding to the end
inputs = array_ops.pad(input_feat,
array_ops.constant(
[0, 0, 0, slice_offset],
shape=[2, 2],
dtype=dtypes.int32), "CONSTANT")
elif slice_offset < 0:
# Padding to the front
inputs = array_ops.pad(input_feat,
array_ops.constant(
[0, 0, -slice_offset, 0],
shape=[2, 2],
dtype=dtypes.int32), "CONSTANT")
slice_offset = 0
else:
inputs = input_feat
freq_inputs = []
if not self._start_freqindex_list:
if len(self._num_frequency_blocks) != 1:
raise ValueError("Length of num_frequency_blocks"
" is not 1, but instead is %d",
len(self._num_frequency_blocks))
num_feats = int(
(input_size - self._feature_size) / (self._frequency_skip)) + 1
if num_feats != self._num_frequency_blocks[0]:
raise ValueError(
"Invalid num_frequency_blocks, requires %d but gets %d, please"
" check the input size and filter config are correct." %
(self._num_frequency_blocks[0], num_feats))
block_inputs = []
for f in range(num_feats):
cur_input = array_ops.slice(
inputs, [0, slice_offset + f * self._frequency_skip],
[-1, self._feature_size])
block_inputs.append(cur_input)
freq_inputs.append(block_inputs)
else:
if len(self._start_freqindex_list) != len(self._end_freqindex_list):
raise ValueError("Length of start and end freqindex_list"
" does not match %d %d",
len(self._start_freqindex_list),
len(self._end_freqindex_list))
if len(self._num_frequency_blocks) != len(self._start_freqindex_list):
raise ValueError("Length of num_frequency_blocks"
" is not equal to start_freqindex_list %d %d",
len(self._num_frequency_blocks),
len(self._start_freqindex_list))
for b in range(len(self._start_freqindex_list)):
start_index = self._start_freqindex_list[b]
end_index = self._end_freqindex_list[b]
cur_size = end_index - start_index
block_feats = int(
(cur_size - self._feature_size) / (self._frequency_skip)) + 1
if block_feats != self._num_frequency_blocks[b]:
raise ValueError(
"Invalid num_frequency_blocks, requires %d but gets %d, please"
" check the input size and filter config are correct." %
(self._num_frequency_blocks[b], block_feats))
block_inputs = []
for f in range(block_feats):
cur_input = array_ops.slice(
inputs,
[0, start_index + slice_offset + f * self._frequency_skip],
[-1, self._feature_size])
block_inputs.append(cur_input)
freq_inputs.append(block_inputs)
return freq_inputs
class BidirectionalGridLSTMCell(GridLSTMCell):
"""Bidirectional GridLstm cell.
The bidirection connection is only used in the frequency direction, which
hence doesn't affect the time direction's real-time processing that is
required for online recognition systems.
The current implementation uses different weights for the two directions.
"""
def __init__(self,
num_units,
use_peepholes=False,
share_time_frequency_weights=False,
cell_clip=None,
initializer=None,
num_unit_shards=1,
forget_bias=1.0,
feature_size=None,
frequency_skip=None,
num_frequency_blocks=None,
start_freqindex_list=None,
end_freqindex_list=None,
couple_input_forget_gates=False,
backward_slice_offset=0,
reuse=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: (optional) bool, default False. Set True to enable
diagonal/peephole connections.
share_time_frequency_weights: (optional) bool, default False. Set True to
enable shared cell weights between time and frequency LSTMs.
cell_clip: (optional) A float value, default None, if provided the cell
state is clipped by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices, default None.
num_unit_shards: (optional) int, default 1, How to split the weight
matrix. If > 1,the weight matrix is stored across num_unit_shards.
forget_bias: (optional) float, default 1.0, The initial bias of the
forget gates, used to reduce the scale of forgetting at the beginning
of the training.
feature_size: (optional) int, default None, The size of the input feature
the LSTM spans over.
frequency_skip: (optional) int, default None, The amount the LSTM filter
is shifted by in frequency.
num_frequency_blocks: [required] A list of frequency blocks needed to
cover the whole input feature splitting defined by start_freqindex_list
and end_freqindex_list.
start_freqindex_list: [optional], list of ints, default None, The
starting frequency index for each frequency block.
end_freqindex_list: [optional], list of ints, default None. The ending
frequency index for each frequency block.
couple_input_forget_gates: (optional) bool, default False, Whether to
couple the input and forget gates, i.e. f_gate = 1.0 - i_gate, to reduce
model parameters and computation cost.
backward_slice_offset: (optional) int32, default 0, the starting offset to
slice the feature for backward processing.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(BidirectionalGridLSTMCell, self).__init__(
num_units, use_peepholes, share_time_frequency_weights, cell_clip,
initializer, num_unit_shards, forget_bias, feature_size, frequency_skip,
num_frequency_blocks, start_freqindex_list, end_freqindex_list,
couple_input_forget_gates, True, reuse)
self._backward_slice_offset = int(backward_slice_offset)
state_names = ""
for direction in ["fwd", "bwd"]:
for block_index in range(len(self._num_frequency_blocks)):
for freq_index in range(self._num_frequency_blocks[block_index]):
name_prefix = "%s_state_f%02d_b%02d" % (direction, freq_index,
block_index)
state_names += ("%s_c, %s_m," % (name_prefix, name_prefix))
self._state_tuple_type = collections.namedtuple(
"BidirectionalGridLSTMStateTuple", state_names.strip(","))
self._state_size = self._state_tuple_type(*(
[num_units, num_units] * self._total_blocks * 2))
self._output_size = 2 * num_units * self._total_blocks * 2
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, [batch, num_units].
state: tuple of Tensors, 2D, [batch, state_size].
Returns:
A tuple containing:
- A 2D, [batch, output_dim], Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is num_units.
- A 2D, [batch, state_size], Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
batch_size = inputs.shape[0].value or array_ops.shape(inputs)[0]
fwd_inputs = self._make_tf_features(inputs)
if self._backward_slice_offset:
bwd_inputs = self._make_tf_features(inputs, self._backward_slice_offset)
else:
bwd_inputs = fwd_inputs
# Forward processing
with vs.variable_scope("fwd"):
fwd_m_out_lst = []
fwd_state_out_lst = []
for block in range(len(fwd_inputs)):
fwd_m_out_lst_current, fwd_state_out_lst_current = self._compute(
fwd_inputs[block],
block,
state,
batch_size,
state_prefix="fwd_state",
state_is_tuple=True)
fwd_m_out_lst.extend(fwd_m_out_lst_current)
fwd_state_out_lst.extend(fwd_state_out_lst_current)
# Backward processing
bwd_m_out_lst = []
bwd_state_out_lst = []
with vs.variable_scope("bwd"):
for block in range(len(bwd_inputs)):
# Reverse the blocks
bwd_inputs_reverse = bwd_inputs[block][::-1]
bwd_m_out_lst_current, bwd_state_out_lst_current = self._compute(
bwd_inputs_reverse,
block,
state,
batch_size,
state_prefix="bwd_state",
state_is_tuple=True)
bwd_m_out_lst.extend(bwd_m_out_lst_current)
bwd_state_out_lst.extend(bwd_state_out_lst_current)
state_out = self._state_tuple_type(*(fwd_state_out_lst + bwd_state_out_lst))
# Outputs are always concated as it is never used separately.
m_out = array_ops.concat(fwd_m_out_lst + bwd_m_out_lst, 1)
return m_out, state_out
# pylint: disable=protected-access
_Linear = core_rnn_cell._Linear # pylint: disable=invalid-name
# pylint: enable=protected-access
class AttentionCellWrapper(rnn_cell_impl.RNNCell):
"""Basic attention cell wrapper.
Implementation based on https://arxiv.org/abs/1409.0473.
"""
def __init__(self,
cell,
attn_length,
attn_size=None,
attn_vec_size=None,
input_size=None,
state_is_tuple=True,
reuse=None):
"""Create a cell with attention.
Args:
cell: an RNNCell, an attention is added to it.
attn_length: integer, the size of an attention window.
attn_size: integer, the size of an attention vector. Equal to
cell.output_size by default.
attn_vec_size: integer, the number of convolutional features calculated
on attention state and a size of the hidden layer built from
base cell state. Equal attn_size to by default.
input_size: integer, the size of a hidden linear layer,
built from inputs and attention. Derived from the input tensor
by default.
state_is_tuple: If True, accepted and returned states are n-tuples, where
`n = len(cells)`. By default (False), the states are all
concatenated along the column axis.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if cell returns a state tuple but the flag
`state_is_tuple` is `False` or if attn_length is zero or less.
"""
super(AttentionCellWrapper, self).__init__(_reuse=reuse)
if not rnn_cell_impl._like_rnncell(cell): # pylint: disable=protected-access
raise TypeError("The parameter cell is not RNNCell.")
if nest.is_sequence(cell.state_size) and not state_is_tuple:
raise ValueError(
"Cell returns tuple of states, but the flag "
"state_is_tuple is not set. State size is: %s" % str(cell.state_size))
if attn_length <= 0:
raise ValueError(
"attn_length should be greater than zero, got %s" % str(attn_length))
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if attn_size is None:
attn_size = cell.output_size
if attn_vec_size is None:
attn_vec_size = attn_size
self._state_is_tuple = state_is_tuple
self._cell = cell
self._attn_vec_size = attn_vec_size
self._input_size = input_size
self._attn_size = attn_size
self._attn_length = attn_length
self._reuse = reuse
self._linear1 = None
self._linear2 = None
self._linear3 = None
@property
def state_size(self):
size = (self._cell.state_size, self._attn_size,
self._attn_size * self._attn_length)
if self._state_is_tuple:
return size
else:
return sum(list(size))
@property
def output_size(self):
return self._attn_size
def call(self, inputs, state):
"""Long short-term memory cell with attention (LSTMA)."""
if self._state_is_tuple:
state, attns, attn_states = state
else:
states = state
state = array_ops.slice(states, [0, 0], [-1, self._cell.state_size])
attns = array_ops.slice(states, [0, self._cell.state_size],
[-1, self._attn_size])
attn_states = array_ops.slice(
states, [0, self._cell.state_size + self._attn_size],
[-1, self._attn_size * self._attn_length])
attn_states = array_ops.reshape(attn_states,
[-1, self._attn_length, self._attn_size])
input_size = self._input_size
if input_size is None:
input_size = inputs.get_shape().as_list()[1]
if self._linear1 is None:
self._linear1 = _Linear([inputs, attns], input_size, True)
inputs = self._linear1([inputs, attns])
cell_output, new_state = self._cell(inputs, state)
if self._state_is_tuple:
new_state_cat = array_ops.concat(nest.flatten(new_state), 1)
else:
new_state_cat = new_state
new_attns, new_attn_states = self._attention(new_state_cat, attn_states)
with vs.variable_scope("attn_output_projection"):
if self._linear2 is None:
self._linear2 = _Linear([cell_output, new_attns], self._attn_size, True)
output = self._linear2([cell_output, new_attns])
new_attn_states = array_ops.concat(
[new_attn_states, array_ops.expand_dims(output, 1)], 1)
new_attn_states = array_ops.reshape(
new_attn_states, [-1, self._attn_length * self._attn_size])
new_state = (new_state, new_attns, new_attn_states)
if not self._state_is_tuple:
new_state = array_ops.concat(list(new_state), 1)
return output, new_state
def _attention(self, query, attn_states):
conv2d = nn_ops.conv2d
reduce_sum = math_ops.reduce_sum
softmax = nn_ops.softmax
tanh = math_ops.tanh
with vs.variable_scope("attention"):
k = vs.get_variable("attn_w",
[1, 1, self._attn_size, self._attn_vec_size])
v = vs.get_variable("attn_v", [self._attn_vec_size])
hidden = array_ops.reshape(attn_states,
[-1, self._attn_length, 1, self._attn_size])
hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME")
if self._linear3 is None:
self._linear3 = _Linear(query, self._attn_vec_size, True)
y = self._linear3(query)
y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size])
s = reduce_sum(v * tanh(hidden_features + y), [2, 3])
a = softmax(s)
d = reduce_sum(
array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2])
new_attns = array_ops.reshape(d, [-1, self._attn_size])
new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1])
return new_attns, new_attn_states
class HighwayWrapper(rnn_cell_impl.RNNCell):
"""RNNCell wrapper that adds highway connection on cell input and output.
Based on:
R. K. Srivastava, K. Greff, and J. Schmidhuber, "Highway networks",
arXiv preprint arXiv:1505.00387, 2015.
https://arxiv.org/abs/1505.00387
"""
def __init__(self,
cell,
couple_carry_transform_gates=True,
carry_bias_init=1.0):
"""Constructs a `HighwayWrapper` for `cell`.
Args:
cell: An instance of `RNNCell`.
couple_carry_transform_gates: boolean, should the Carry and Transform gate
be coupled.
carry_bias_init: float, carry gates bias initialization.
"""
self._cell = cell
self._couple_carry_transform_gates = couple_carry_transform_gates
self._carry_bias_init = carry_bias_init
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self._cell.zero_state(batch_size, dtype)
def _highway(self, inp, out):
input_size = inp.get_shape().with_rank(2)[1].value
carry_weight = vs.get_variable("carry_w", [input_size, input_size])
carry_bias = vs.get_variable(
"carry_b", [input_size],
initializer=init_ops.constant_initializer(self._carry_bias_init))
carry = math_ops.sigmoid(nn_ops.xw_plus_b(inp, carry_weight, carry_bias))
if self._couple_carry_transform_gates:
transform = 1 - carry
else:
transform_weight = vs.get_variable("transform_w",
[input_size, input_size])
transform_bias = vs.get_variable(
"transform_b", [input_size],
initializer=init_ops.constant_initializer(-self._carry_bias_init))
transform = math_ops.sigmoid(
nn_ops.xw_plus_b(inp, transform_weight, transform_bias))
return inp * carry + out * transform
def __call__(self, inputs, state, scope=None):
"""Run the cell and add its inputs to its outputs.
Args:
inputs: cell inputs.
state: cell state.
scope: optional cell scope.
Returns:
Tuple of cell outputs and new state.
Raises:
TypeError: If cell inputs and outputs have different structure (type).
ValueError: If cell inputs and outputs have different structure (value).
"""
outputs, new_state = self._cell(inputs, state, scope=scope)
nest.assert_same_structure(inputs, outputs)
# Ensure shapes match
def assert_shape_match(inp, out):
inp.get_shape().assert_is_compatible_with(out.get_shape())
nest.map_structure(assert_shape_match, inputs, outputs)
res_outputs = nest.map_structure(self._highway, inputs, outputs)
return (res_outputs, new_state)
class LayerNormBasicLSTMCell(rnn_cell_impl.RNNCell):
"""LSTM unit with layer normalization and recurrent dropout.
This class adds layer normalization and recurrent dropout to a
basic LSTM unit. Layer normalization implementation is based on:
https://arxiv.org/abs/1607.06450.
"Layer Normalization"
Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton
and is applied before the internal nonlinearities.
Recurrent dropout is base on:
https://arxiv.org/abs/1603.05118
"Recurrent Dropout without Memory Loss"
Stanislau Semeniuta, Aliaksei Severyn, Erhardt Barth.
"""
def __init__(self,
num_units,
forget_bias=1.0,
input_size=None,
activation=math_ops.tanh,
layer_norm=True,
norm_gain=1.0,
norm_shift=0.0,
dropout_keep_prob=1.0,
dropout_prob_seed=None,
reuse=None):
"""Initializes the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
input_size: Deprecated and unused.
activation: Activation function of the inner states.
layer_norm: If `True`, layer normalization will be applied.
norm_gain: float, The layer normalization gain initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
norm_shift: float, The layer normalization shift initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
dropout_keep_prob: unit Tensor or float between 0 and 1 representing the
recurrent dropout probability value. If float and 1.0, no dropout will
be applied.
dropout_prob_seed: (optional) integer, the randomness seed.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(LayerNormBasicLSTMCell, self).__init__(_reuse=reuse)
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._activation = activation
self._forget_bias = forget_bias
self._keep_prob = dropout_keep_prob
self._seed = dropout_prob_seed
self._layer_norm = layer_norm
self._norm_gain = norm_gain
self._norm_shift = norm_shift
self._reuse = reuse
@property
def state_size(self):
return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units)
@property
def output_size(self):
return self._num_units
def _norm(self, inp, scope, dtype=dtypes.float32):
shape = inp.get_shape()[-1:]
gamma_init = init_ops.constant_initializer(self._norm_gain)
beta_init = init_ops.constant_initializer(self._norm_shift)
with vs.variable_scope(scope):
# Initialize beta and gamma for use by layer_norm.
vs.get_variable("gamma", shape=shape, initializer=gamma_init, dtype=dtype)
vs.get_variable("beta", shape=shape, initializer=beta_init, dtype=dtype)
normalized = layers.layer_norm(inp, reuse=True, scope=scope)
return normalized
def _linear(self, args):
out_size = 4 * self._num_units
proj_size = args.get_shape()[-1]
dtype = args.dtype
weights = vs.get_variable("kernel", [proj_size, out_size], dtype=dtype)
out = math_ops.matmul(args, weights)
if not self._layer_norm:
bias = vs.get_variable("bias", [out_size], dtype=dtype)
out = nn_ops.bias_add(out, bias)
return out
def call(self, inputs, state):
"""LSTM cell with layer normalization and recurrent dropout."""
c, h = state
args = array_ops.concat([inputs, h], 1)
concat = self._linear(args)
dtype = args.dtype
i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1)
if self._layer_norm:
i = self._norm(i, "input", dtype=dtype)
j = self._norm(j, "transform", dtype=dtype)
f = self._norm(f, "forget", dtype=dtype)
o = self._norm(o, "output", dtype=dtype)
g = self._activation(j)
if (not isinstance(self._keep_prob, float)) or self._keep_prob < 1:
g = nn_ops.dropout(g, self._keep_prob, seed=self._seed)
new_c = (
c * math_ops.sigmoid(f + self._forget_bias) + math_ops.sigmoid(i) * g)
if self._layer_norm:
new_c = self._norm(new_c, "state", dtype=dtype)
new_h = self._activation(new_c) * math_ops.sigmoid(o)
new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h)
return new_h, new_state
class NASCell(rnn_cell_impl.RNNCell):
"""Neural Architecture Search (NAS) recurrent network cell.
This implements the recurrent cell from the paper:
https://arxiv.org/abs/1611.01578
Barret Zoph and Quoc V. Le.
"Neural Architecture Search with Reinforcement Learning" Proc. ICLR 2017.
The class uses an optional projection layer.
"""
def __init__(self, num_units, num_proj=None, use_biases=False, reuse=None):
"""Initialize the parameters for a NAS cell.
Args:
num_units: int, The number of units in the NAS cell
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
use_biases: (optional) bool, If True then use biases within the cell. This
is False by default.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(NASCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._num_proj = num_proj
self._use_biases = use_biases
self._reuse = reuse
if num_proj is not None:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_proj)
self._output_size = num_proj
else:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def call(self, inputs, state):
"""Run one step of NAS Cell.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: This must be a tuple of state Tensors, both `2-D`, with column
sizes `c_state` and `m_state`.
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
NAS Cell after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of NAS Cell after reading `inputs`
when the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
sigmoid = math_ops.sigmoid
tanh = math_ops.tanh
relu = nn_ops.relu
num_proj = self._num_units if self._num_proj is None else self._num_proj
(c_prev, m_prev) = state
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
# Variables for the NAS cell. W_m is all matrices multiplying the
# hiddenstate and W_inputs is all matrices multiplying the inputs.
concat_w_m = vs.get_variable("recurrent_kernel",
[num_proj, 8 * self._num_units], dtype)
concat_w_inputs = vs.get_variable(
"kernel", [input_size.value, 8 * self._num_units], dtype)
m_matrix = math_ops.matmul(m_prev, concat_w_m)
inputs_matrix = math_ops.matmul(inputs, concat_w_inputs)
if self._use_biases:
b = vs.get_variable(
"bias",
shape=[8 * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
m_matrix = nn_ops.bias_add(m_matrix, b)
# The NAS cell branches into 8 different splits for both the hiddenstate
# and the input
m_matrix_splits = array_ops.split(
axis=1, num_or_size_splits=8, value=m_matrix)
inputs_matrix_splits = array_ops.split(
axis=1, num_or_size_splits=8, value=inputs_matrix)
# First layer
layer1_0 = sigmoid(inputs_matrix_splits[0] + m_matrix_splits[0])
layer1_1 = relu(inputs_matrix_splits[1] + m_matrix_splits[1])
layer1_2 = sigmoid(inputs_matrix_splits[2] + m_matrix_splits[2])
layer1_3 = relu(inputs_matrix_splits[3] * m_matrix_splits[3])
layer1_4 = tanh(inputs_matrix_splits[4] + m_matrix_splits[4])
layer1_5 = sigmoid(inputs_matrix_splits[5] + m_matrix_splits[5])
layer1_6 = tanh(inputs_matrix_splits[6] + m_matrix_splits[6])
layer1_7 = sigmoid(inputs_matrix_splits[7] + m_matrix_splits[7])
# Second layer
l2_0 = tanh(layer1_0 * layer1_1)
l2_1 = tanh(layer1_2 + layer1_3)
l2_2 = tanh(layer1_4 * layer1_5)
l2_3 = sigmoid(layer1_6 + layer1_7)
# Inject the cell
l2_0 = tanh(l2_0 + c_prev)
# Third layer
l3_0_pre = l2_0 * l2_1
new_c = l3_0_pre # create new cell
l3_0 = l3_0_pre
l3_1 = tanh(l2_2 + l2_3)
# Final layer
new_m = tanh(l3_0 * l3_1)
# Projection layer if specified
if self._num_proj is not None:
concat_w_proj = vs.get_variable("projection_weights",
[self._num_units, self._num_proj], dtype)
new_m = math_ops.matmul(new_m, concat_w_proj)
new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_m)
return new_m, new_state
class UGRNNCell(rnn_cell_impl.RNNCell):
"""Update Gate Recurrent Neural Network (UGRNN) cell.
Compromise between a LSTM/GRU and a vanilla RNN. There is only one
gate, and that is to determine whether the unit should be
integrating or computing instantaneously. This is the recurrent
idea of the feedforward Highway Network.
This implements the recurrent cell from the paper:
https://arxiv.org/abs/1611.09913
Jasmine Collins, Jascha Sohl-Dickstein, and David Sussillo.
"Capacity and Trainability in Recurrent Neural Networks" Proc. ICLR 2017.
"""
def __init__(self,
num_units,
initializer=None,
forget_bias=1.0,
activation=math_ops.tanh,
reuse=None):
"""Initialize the parameters for an UGRNN cell.
Args:
num_units: int, The number of units in the UGRNN cell
initializer: (optional) The initializer to use for the weight matrices.
forget_bias: (optional) float, default 1.0, The initial bias of the
forget gate, used to reduce the scale of forgetting at the beginning
of the training.
activation: (optional) Activation function of the inner states.
Default is `tf.tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(UGRNNCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._initializer = initializer
self._forget_bias = forget_bias
self._activation = activation
self._reuse = reuse
self._linear = None
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
"""Run one step of UGRNN.
Args:
inputs: input Tensor, 2D, batch x input size.
state: state Tensor, 2D, batch x num units.
Returns:
new_output: batch x num units, Tensor representing the output of the UGRNN
after reading `inputs` when previous state was `state`. Identical to
`new_state`.
new_state: batch x num units, Tensor representing the state of the UGRNN
after reading `inputs` when previous state was `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
sigmoid = math_ops.sigmoid
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
with vs.variable_scope(
vs.get_variable_scope(), initializer=self._initializer):
cell_inputs = array_ops.concat([inputs, state], 1)
if self._linear is None:
self._linear = _Linear(cell_inputs, 2 * self._num_units, True)
rnn_matrix = self._linear(cell_inputs)
[g_act, c_act] = array_ops.split(
axis=1, num_or_size_splits=2, value=rnn_matrix)
c = self._activation(c_act)
g = sigmoid(g_act + self._forget_bias)
new_state = g * state + (1.0 - g) * c
new_output = new_state
return new_output, new_state
class IntersectionRNNCell(rnn_cell_impl.RNNCell):
"""Intersection Recurrent Neural Network (+RNN) cell.
Architecture with coupled recurrent gate as well as coupled depth
gate, designed to improve information flow through stacked RNNs. As the
architecture uses depth gating, the dimensionality of the depth
output (y) also should not change through depth (input size == output size).
To achieve this, the first layer of a stacked Intersection RNN projects
the inputs to N (num units) dimensions. Therefore when initializing an
IntersectionRNNCell, one should set `num_in_proj = N` for the first layer
and use default settings for subsequent layers.
This implements the recurrent cell from the paper:
https://arxiv.org/abs/1611.09913
Jasmine Collins, Jascha Sohl-Dickstein, and David Sussillo.
"Capacity and Trainability in Recurrent Neural Networks" Proc. ICLR 2017.
The Intersection RNN is built for use in deeply stacked
RNNs so it may not achieve best performance with depth 1.
"""
def __init__(self,
num_units,
num_in_proj=None,
initializer=None,
forget_bias=1.0,
y_activation=nn_ops.relu,
reuse=None):
"""Initialize the parameters for an +RNN cell.
Args:
num_units: int, The number of units in the +RNN cell
num_in_proj: (optional) int, The input dimensionality for the RNN.
If creating the first layer of an +RNN, this should be set to
`num_units`. Otherwise, this should be set to `None` (default).
If `None`, dimensionality of `inputs` should be equal to `num_units`,
otherwise ValueError is thrown.
initializer: (optional) The initializer to use for the weight matrices.
forget_bias: (optional) float, default 1.0, The initial bias of the
forget gates, used to reduce the scale of forgetting at the beginning
of the training.
y_activation: (optional) Activation function of the states passed
through depth. Default is 'tf.nn.relu`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(IntersectionRNNCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._initializer = initializer
self._forget_bias = forget_bias
self._num_input_proj = num_in_proj
self._y_activation = y_activation
self._reuse = reuse
self._linear1 = None
self._linear2 = None
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
"""Run one step of the Intersection RNN.
Args:
inputs: input Tensor, 2D, batch x input size.
state: state Tensor, 2D, batch x num units.
Returns:
new_y: batch x num units, Tensor representing the output of the +RNN
after reading `inputs` when previous state was `state`.
new_state: batch x num units, Tensor representing the state of the +RNN
after reading `inputs` when previous state was `state`.
Raises:
ValueError: If input size cannot be inferred from `inputs` via
static shape inference.
ValueError: If input size != output size (these must be equal when
using the Intersection RNN).
"""
sigmoid = math_ops.sigmoid
tanh = math_ops.tanh
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
with vs.variable_scope(
vs.get_variable_scope(), initializer=self._initializer):
# read-in projections (should be used for first layer in deep +RNN
# to transform size of inputs from I --> N)
if input_size.value != self._num_units:
if self._num_input_proj:
with vs.variable_scope("in_projection"):
if self._linear1 is None:
self._linear1 = _Linear(inputs, self._num_units, True)
inputs = self._linear1(inputs)
else:
raise ValueError("Must have input size == output size for "
"Intersection RNN. To fix, num_in_proj should "
"be set to num_units at cell init.")
n_dim = i_dim = self._num_units
cell_inputs = array_ops.concat([inputs, state], 1)
if self._linear2 is None:
self._linear2 = _Linear(cell_inputs, 2 * n_dim + 2 * i_dim, True)
rnn_matrix = self._linear2(cell_inputs)
gh_act = rnn_matrix[:, :n_dim] # b x n
h_act = rnn_matrix[:, n_dim:2 * n_dim] # b x n
gy_act = rnn_matrix[:, 2 * n_dim:2 * n_dim + i_dim] # b x i
y_act = rnn_matrix[:, 2 * n_dim + i_dim:2 * n_dim + 2 * i_dim] # b x i
h = tanh(h_act)
y = self._y_activation(y_act)
gh = sigmoid(gh_act + self._forget_bias)
gy = sigmoid(gy_act + self._forget_bias)
new_state = gh * state + (1.0 - gh) * h # passed thru time
new_y = gy * inputs + (1.0 - gy) * y # passed thru depth
return new_y, new_state
_REGISTERED_OPS = None
class CompiledWrapper(rnn_cell_impl.RNNCell):
"""Wraps step execution in an XLA JIT scope."""
def __init__(self, cell, compile_stateful=False):
"""Create CompiledWrapper cell.
Args:
cell: Instance of `RNNCell`.
compile_stateful: Whether to compile stateful ops like initializers
and random number generators (default: False).
"""
self._cell = cell
self._compile_stateful = compile_stateful
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self._cell.zero_state(batch_size, dtype)
def __call__(self, inputs, state, scope=None):
if self._compile_stateful:
compile_ops = True
else:
def compile_ops(node_def):
global _REGISTERED_OPS
if _REGISTERED_OPS is None:
_REGISTERED_OPS = op_def_registry.get_registered_ops()
return not _REGISTERED_OPS[node_def.op].is_stateful
with jit.experimental_jit_scope(compile_ops=compile_ops):
return self._cell(inputs, state, scope=scope)
def _random_exp_initializer(minval, maxval, seed=None, dtype=dtypes.float32):
"""Returns an exponential distribution initializer.
Args:
minval: float or a scalar float Tensor. With value > 0. Lower bound of the
range of random values to generate.
maxval: float or a scalar float Tensor. With value > minval. Upper bound of
the range of random values to generate.
seed: An integer. Used to create random seeds.
dtype: The data type.
Returns:
An initializer that generates tensors with an exponential distribution.
"""
def _initializer(shape, dtype=dtype, partition_info=None):
del partition_info # Unused.
return math_ops.exp(
random_ops.random_uniform(
shape, math_ops.log(minval), math_ops.log(maxval), dtype,
seed=seed))
return _initializer
class PhasedLSTMCell(rnn_cell_impl.RNNCell):
"""Phased LSTM recurrent network cell.
https://arxiv.org/pdf/1610.09513v1.pdf
"""
def __init__(self,
num_units,
use_peepholes=False,
leak=0.001,
ratio_on=0.1,
trainable_ratio_on=True,
period_init_min=1.0,
period_init_max=1000.0,
reuse=None):
"""Initialize the Phased LSTM cell.
Args:
num_units: int, The number of units in the Phased LSTM cell.
use_peepholes: bool, set True to enable peephole connections.
leak: float or scalar float Tensor with value in [0, 1]. Leak applied
during training.
ratio_on: float or scalar float Tensor with value in [0, 1]. Ratio of the
period during which the gates are open.
trainable_ratio_on: bool, weather ratio_on is trainable.
period_init_min: float or scalar float Tensor. With value > 0.
Minimum value of the initialized period.
The period values are initialized by drawing from the distribution:
e^U(log(period_init_min), log(period_init_max))
Where U(.,.) is the uniform distribution.
period_init_max: float or scalar float Tensor.
With value > period_init_min. Maximum value of the initialized period.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(PhasedLSTMCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._leak = leak
self._ratio_on = ratio_on
self._trainable_ratio_on = trainable_ratio_on
self._period_init_min = period_init_min
self._period_init_max = period_init_max
self._reuse = reuse
self._linear1 = None
self._linear2 = None
self._linear3 = None
@property
def state_size(self):
return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units)
@property
def output_size(self):
return self._num_units
def _mod(self, x, y):
"""Modulo function that propagates x gradients."""
return array_ops.stop_gradient(math_ops.mod(x, y) - x) + x
def _get_cycle_ratio(self, time, phase, period):
"""Compute the cycle ratio in the dtype of the time."""
phase_casted = math_ops.cast(phase, dtype=time.dtype)
period_casted = math_ops.cast(period, dtype=time.dtype)
shifted_time = time - phase_casted
cycle_ratio = self._mod(shifted_time, period_casted) / period_casted
return math_ops.cast(cycle_ratio, dtype=dtypes.float32)
def call(self, inputs, state):
"""Phased LSTM Cell.
Args:
inputs: A tuple of 2 Tensor.
The first Tensor has shape [batch, 1], and type float32 or float64.
It stores the time.
The second Tensor has shape [batch, features_size], and type float32.
It stores the features.
state: rnn_cell_impl.LSTMStateTuple, state from previous timestep.
Returns:
A tuple containing:
- A Tensor of float32, and shape [batch_size, num_units], representing the
output of the cell.
- A rnn_cell_impl.LSTMStateTuple, containing 2 Tensors of float32, shape
[batch_size, num_units], representing the new state and the output.
"""
(c_prev, h_prev) = state
(time, x) = inputs
in_mask_gates = [x, h_prev]
if self._use_peepholes:
in_mask_gates.append(c_prev)
with vs.variable_scope("mask_gates"):
if self._linear1 is None:
self._linear1 = _Linear(in_mask_gates, 2 * self._num_units, True)
mask_gates = math_ops.sigmoid(self._linear1(in_mask_gates))
[input_gate, forget_gate] = array_ops.split(
axis=1, num_or_size_splits=2, value=mask_gates)
with vs.variable_scope("new_input"):
if self._linear2 is None:
self._linear2 = _Linear([x, h_prev], self._num_units, True)
new_input = math_ops.tanh(self._linear2([x, h_prev]))
new_c = (c_prev * forget_gate + input_gate * new_input)
in_out_gate = [x, h_prev]
if self._use_peepholes:
in_out_gate.append(new_c)
with vs.variable_scope("output_gate"):
if self._linear3 is None:
self._linear3 = _Linear(in_out_gate, self._num_units, True)
output_gate = math_ops.sigmoid(self._linear3(in_out_gate))
new_h = math_ops.tanh(new_c) * output_gate
period = vs.get_variable(
"period", [self._num_units],
initializer=_random_exp_initializer(self._period_init_min,
self._period_init_max))
phase = vs.get_variable(
"phase", [self._num_units],
initializer=init_ops.random_uniform_initializer(0.,
period.initial_value))
ratio_on = vs.get_variable(
"ratio_on", [self._num_units],
initializer=init_ops.constant_initializer(self._ratio_on),
trainable=self._trainable_ratio_on)
cycle_ratio = self._get_cycle_ratio(time, phase, period)
k_up = 2 * cycle_ratio / ratio_on
k_down = 2 - k_up
k_closed = self._leak * cycle_ratio
k = array_ops.where(cycle_ratio < ratio_on, k_down, k_closed)
k = array_ops.where(cycle_ratio < 0.5 * ratio_on, k_up, k)
new_c = k * new_c + (1 - k) * c_prev
new_h = k * new_h + (1 - k) * h_prev
new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h)
return new_h, new_state
class ConvLSTMCell(rnn_cell_impl.RNNCell):
"""Convolutional LSTM recurrent network cell.
https://arxiv.org/pdf/1506.04214v1.pdf
"""
def __init__(self,
conv_ndims,
input_shape,
output_channels,
kernel_shape,
use_bias=True,
skip_connection=False,
forget_bias=1.0,
initializers=None,
name="conv_lstm_cell"):
"""Construct ConvLSTMCell.
Args:
conv_ndims: Convolution dimensionality (1, 2 or 3).
input_shape: Shape of the input as int tuple, excluding the batch size.
output_channels: int, number of output channels of the conv LSTM.
kernel_shape: Shape of kernel as in tuple (of size 1,2 or 3).
use_bias: Use bias in convolutions.
skip_connection: If set to `True`, concatenate the input to the
output of the conv LSTM. Default: `False`.
forget_bias: Forget bias.
name: Name of the module.
Raises:
ValueError: If `skip_connection` is `True` and stride is different from 1
or if `input_shape` is incompatible with `conv_ndims`.
"""
super(ConvLSTMCell, self).__init__(name=name)
if conv_ndims != len(input_shape) - 1:
raise ValueError("Invalid input_shape {} for conv_ndims={}.".format(
input_shape, conv_ndims))
self._conv_ndims = conv_ndims
self._input_shape = input_shape
self._output_channels = output_channels
self._kernel_shape = kernel_shape
self._use_bias = use_bias
self._forget_bias = forget_bias
self._skip_connection = skip_connection
self._total_output_channels = output_channels
if self._skip_connection:
self._total_output_channels += self._input_shape[-1]
state_size = tensor_shape.TensorShape(
self._input_shape[:-1] + [self._output_channels])
self._state_size = rnn_cell_impl.LSTMStateTuple(state_size, state_size)
self._output_size = tensor_shape.TensorShape(
self._input_shape[:-1] + [self._total_output_channels])
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def call(self, inputs, state, scope=None):
cell, hidden = state
new_hidden = _conv([inputs, hidden], self._kernel_shape,
4 * self._output_channels, self._use_bias)
gates = array_ops.split(
value=new_hidden, num_or_size_splits=4, axis=self._conv_ndims + 1)
input_gate, new_input, forget_gate, output_gate = gates
new_cell = math_ops.sigmoid(forget_gate + self._forget_bias) * cell
new_cell += math_ops.sigmoid(input_gate) * math_ops.tanh(new_input)
output = math_ops.tanh(new_cell) * math_ops.sigmoid(output_gate)
if self._skip_connection:
output = array_ops.concat([output, inputs], axis=-1)
new_state = rnn_cell_impl.LSTMStateTuple(new_cell, output)
return output, new_state
class Conv1DLSTMCell(ConvLSTMCell):
"""1D Convolutional LSTM recurrent network cell.
https://arxiv.org/pdf/1506.04214v1.pdf
"""
def __init__(self, name="conv_1d_lstm_cell", **kwargs):
"""Construct Conv1DLSTM. See `ConvLSTMCell` for more details."""
super(Conv1DLSTMCell, self).__init__(conv_ndims=1, **kwargs)
class Conv2DLSTMCell(ConvLSTMCell):
"""2D Convolutional LSTM recurrent network cell.
https://arxiv.org/pdf/1506.04214v1.pdf
"""
def __init__(self, name="conv_2d_lstm_cell", **kwargs):
"""Construct Conv2DLSTM. See `ConvLSTMCell` for more details."""
super(Conv2DLSTMCell, self).__init__(conv_ndims=2, **kwargs)
class Conv3DLSTMCell(ConvLSTMCell):
"""3D Convolutional LSTM recurrent network cell.
https://arxiv.org/pdf/1506.04214v1.pdf
"""
def __init__(self, name="conv_3d_lstm_cell", **kwargs):
"""Construct Conv3DLSTM. See `ConvLSTMCell` for more details."""
super(Conv3DLSTMCell, self).__init__(conv_ndims=3, **kwargs)
def _conv(args, filter_size, num_features, bias, bias_start=0.0):
"""convolution:
Args:
args: a Tensor or a list of Tensors of dimension 3D, 4D or 5D,
batch x n, Tensors.
filter_size: int tuple of filter height and width.
num_features: int, number of features.
bias_start: starting value to initialize the bias; 0 by default.
Returns:
A 3D, 4D, or 5D Tensor with shape [batch ... num_features]
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
# Calculate the total size of arguments on dimension 1.
total_arg_size_depth = 0
shapes = [a.get_shape().as_list() for a in args]
shape_length = len(shapes[0])
for shape in shapes:
if len(shape) not in [3, 4, 5]:
raise ValueError("Conv Linear expects 3D, 4D "
"or 5D arguments: %s" % str(shapes))
if len(shape) != len(shapes[0]):
raise ValueError("Conv Linear expects all args "
"to be of same Dimension: %s" % str(shapes))
else:
total_arg_size_depth += shape[-1]
dtype = [a.dtype for a in args][0]
# determine correct conv operation
if shape_length == 3:
conv_op = nn_ops.conv1d
strides = 1
elif shape_length == 4:
conv_op = nn_ops.conv2d
strides = shape_length * [1]
elif shape_length == 5:
conv_op = nn_ops.conv3d
strides = shape_length * [1]
# Now the computation.
kernel = vs.get_variable(
"kernel", filter_size + [total_arg_size_depth, num_features], dtype=dtype)
if len(args) == 1:
res = conv_op(args[0], kernel, strides, padding="SAME")
else:
res = conv_op(
array_ops.concat(axis=shape_length - 1, values=args),
kernel,
strides,
padding="SAME")
if not bias:
return res
bias_term = vs.get_variable(
"biases", [num_features],
dtype=dtype,
initializer=init_ops.constant_initializer(bias_start, dtype=dtype))
return res + bias_term
class GLSTMCell(rnn_cell_impl.RNNCell):
"""Group LSTM cell (G-LSTM).
The implementation is based on:
https://arxiv.org/abs/1703.10722
O. Kuchaiev and B. Ginsburg
"Factorization Tricks for LSTM Networks", ICLR 2017 workshop.
"""
def __init__(self,
num_units,
initializer=None,
num_proj=None,
number_of_groups=1,
forget_bias=1.0,
activation=math_ops.tanh,
reuse=None):
"""Initialize the parameters of G-LSTM cell.
Args:
num_units: int, The number of units in the G-LSTM cell
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
number_of_groups: (optional) int, number of groups to use.
If `number_of_groups` is 1, then it should be equivalent to LSTM cell
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
activation: Activation function of the inner states.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already
has the given variables, an error is raised.
Raises:
ValueError: If `num_units` or `num_proj` is not divisible by
`number_of_groups`.
"""
super(GLSTMCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._initializer = initializer
self._num_proj = num_proj
self._forget_bias = forget_bias
self._activation = activation
self._number_of_groups = number_of_groups
if self._num_units % self._number_of_groups != 0:
raise ValueError("num_units must be divisible by number_of_groups")
if self._num_proj:
if self._num_proj % self._number_of_groups != 0:
raise ValueError("num_proj must be divisible by number_of_groups")
self._group_shape = [
int(self._num_proj / self._number_of_groups),
int(self._num_units / self._number_of_groups)
]
else:
self._group_shape = [
int(self._num_units / self._number_of_groups),
int(self._num_units / self._number_of_groups)
]
if num_proj:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_proj)
self._output_size = num_proj
else:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_units)
self._output_size = num_units
self._linear1 = None
self._linear2 = None
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def _get_input_for_group(self, inputs, group_id, group_size):
"""Slices inputs into groups to prepare for processing by cell's groups
Args:
inputs: cell input or it's previous state,
a Tensor, 2D, [batch x num_units]
group_id: group id, a Scalar, for which to prepare input
group_size: size of the group
Returns:
subset of inputs corresponding to group "group_id",
a Tensor, 2D, [batch x num_units/number_of_groups]
"""
return array_ops.slice(
input_=inputs,
begin=[0, group_id * group_size],
size=[self._batch_size, group_size],
name=("GLSTM_group%d_input_generation" % group_id))
def call(self, inputs, state):
"""Run one step of G-LSTM.
Args:
inputs: input Tensor, 2D, [batch x num_units].
state: this must be a tuple of state Tensors, both `2-D`,
with column sizes `c_state` and `m_state`.
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
G-LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- LSTMStateTuple representing the new state of G-LSTM cell
after reading `inputs` when the previous state was `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
(c_prev, m_prev) = state
self._batch_size = inputs.shape[0].value or array_ops.shape(inputs)[0]
dtype = inputs.dtype
scope = vs.get_variable_scope()
with vs.variable_scope(scope, initializer=self._initializer):
i_parts = []
j_parts = []
f_parts = []
o_parts = []
for group_id in range(self._number_of_groups):
with vs.variable_scope("group%d" % group_id):
x_g_id = array_ops.concat(
[
self._get_input_for_group(inputs, group_id,
self._group_shape[0]),
self._get_input_for_group(m_prev, group_id,
self._group_shape[0])
],
axis=1)
if self._linear1 is None:
self._linear1 = _Linear(x_g_id, 4 * self._group_shape[1], False)
R_k = self._linear1(x_g_id) # pylint: disable=invalid-name
i_k, j_k, f_k, o_k = array_ops.split(R_k, 4, 1)
i_parts.append(i_k)
j_parts.append(j_k)
f_parts.append(f_k)
o_parts.append(o_k)
bi = vs.get_variable(
name="bias_i",
shape=[self._num_units],
dtype=dtype,
initializer=init_ops.constant_initializer(0.0, dtype=dtype))
bj = vs.get_variable(
name="bias_j",
shape=[self._num_units],
dtype=dtype,
initializer=init_ops.constant_initializer(0.0, dtype=dtype))
bf = vs.get_variable(
name="bias_f",
shape=[self._num_units],
dtype=dtype,
initializer=init_ops.constant_initializer(0.0, dtype=dtype))
bo = vs.get_variable(
name="bias_o",
shape=[self._num_units],
dtype=dtype,
initializer=init_ops.constant_initializer(0.0, dtype=dtype))
i = nn_ops.bias_add(array_ops.concat(i_parts, axis=1), bi)
j = nn_ops.bias_add(array_ops.concat(j_parts, axis=1), bj)
f = nn_ops.bias_add(array_ops.concat(f_parts, axis=1), bf)
o = nn_ops.bias_add(array_ops.concat(o_parts, axis=1), bo)
c = (
math_ops.sigmoid(f + self._forget_bias) * c_prev +
math_ops.sigmoid(i) * math_ops.tanh(j))
m = math_ops.sigmoid(o) * self._activation(c)
if self._num_proj is not None:
with vs.variable_scope("projection"):
if self._linear2 is None:
self._linear2 = _Linear(m, self._num_proj, False)
m = self._linear2(m)
new_state = rnn_cell_impl.LSTMStateTuple(c, m)
return m, new_state
class LayerNormLSTMCell(rnn_cell_impl.RNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
The default non-peephole implementation is based on:
http://www.bioinf.jku.at/publications/older/2604.pdf
S. Hochreiter and J. Schmidhuber.
"Long Short-Term Memory". Neural Computation, 9(8):1735-1780, 1997.
The peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
The class uses optional peep-hole connections, optional cell clipping, and
an optional projection layer.
Layer normalization implementation is based on:
https://arxiv.org/abs/1607.06450.
"Layer Normalization"
Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton
and is applied before the internal nonlinearities.
"""
def __init__(self,
num_units,
use_peepholes=False,
cell_clip=None,
initializer=None,
num_proj=None,
proj_clip=None,
forget_bias=1.0,
activation=None,
layer_norm=False,
norm_gain=1.0,
norm_shift=0.0,
reuse=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training. Must set it manually to `0.0` when restoring from
CudnnLSTM trained checkpoints.
activation: Activation function of the inner states. Default: `tanh`.
layer_norm: If `True`, layer normalization will be applied.
norm_gain: float, The layer normalization gain initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
norm_shift: float, The layer normalization shift initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
When restoring from CudnnLSTM-trained checkpoints, must use
CudnnCompatibleLSTMCell instead.
"""
super(LayerNormLSTMCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_proj = num_proj
self._proj_clip = proj_clip
self._forget_bias = forget_bias
self._activation = activation or math_ops.tanh
self._layer_norm = layer_norm
self._norm_gain = norm_gain
self._norm_shift = norm_shift
if num_proj:
self._state_size = (rnn_cell_impl.LSTMStateTuple(num_units, num_proj))
self._output_size = num_proj
else:
self._state_size = (rnn_cell_impl.LSTMStateTuple(num_units, num_units))
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def _linear(self,
args,
output_size,
bias,
bias_initializer=None,
kernel_initializer=None,
layer_norm=False):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a Variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_initializer: starting value to initialize the bias
(default is all zeros).
kernel_initializer: starting value to initialize the weight.
layer_norm: boolean, whether to apply layer normalization.
Returns:
A 2D Tensor with shape [batch x output_size] taking value
sum_i(args[i] * W[i]), where each W[i] is a newly created Variable.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape() for a in args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError("linear is expecting 2D arguments: %s" % shapes)
if shape[1].value is None:
raise ValueError("linear expects shape[1] to be provided for shape %s, "
"but saw %s" % (shape, shape[1]))
else:
total_arg_size += shape[1].value
dtype = [a.dtype for a in args][0]
# Now the computation.
scope = vs.get_variable_scope()
with vs.variable_scope(scope) as outer_scope:
weights = vs.get_variable(
"kernel", [total_arg_size, output_size],
dtype=dtype,
initializer=kernel_initializer)
if len(args) == 1:
res = math_ops.matmul(args[0], weights)
else:
res = math_ops.matmul(array_ops.concat(args, 1), weights)
if not bias:
return res
with vs.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
if bias_initializer is None:
bias_initializer = init_ops.constant_initializer(0.0, dtype=dtype)
biases = vs.get_variable(
"bias", [output_size], dtype=dtype, initializer=bias_initializer)
if not layer_norm:
res = nn_ops.bias_add(res, biases)
return res
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: this must be a tuple of state Tensors,
both `2-D`, with column sizes `c_state` and
`m_state`.
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
sigmoid = math_ops.sigmoid
(c_prev, m_prev) = state
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
scope = vs.get_variable_scope()
with vs.variable_scope(scope, initializer=self._initializer) as unit_scope:
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
lstm_matrix = self._linear(
[inputs, m_prev],
4 * self._num_units,
bias=True,
bias_initializer=None,
layer_norm=self._layer_norm)
i, j, f, o = array_ops.split(
value=lstm_matrix, num_or_size_splits=4, axis=1)
if self._layer_norm:
i = _norm(self._norm_gain, self._norm_shift, i, "input")
j = _norm(self._norm_gain, self._norm_shift, j, "transform")
f = _norm(self._norm_gain, self._norm_shift, f, "forget")
o = _norm(self._norm_gain, self._norm_shift, o, "output")
# Diagonal connections
if self._use_peepholes:
with vs.variable_scope(unit_scope):
w_f_diag = vs.get_variable(
"w_f_diag", shape=[self._num_units], dtype=dtype)
w_i_diag = vs.get_variable(
"w_i_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"w_o_diag", shape=[self._num_units], dtype=dtype)
if self._use_peepholes:
c = (
sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev +
sigmoid(i + w_i_diag * c_prev) * self._activation(j))
else:
c = (
sigmoid(f + self._forget_bias) * c_prev +
sigmoid(i) * self._activation(j))
if self._layer_norm:
c = _norm(self._norm_gain, self._norm_shift, c, "state")
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
with vs.variable_scope("projection"):
m = self._linear(m, self._num_proj, bias=False)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
new_state = (rnn_cell_impl.LSTMStateTuple(c, m))
return m, new_state
class SRUCell(rnn_cell_impl._LayerRNNCell):
"""SRU, Simple Recurrent Unit
Implementation based on
Training RNNs as Fast as CNNs (cf. https://arxiv.org/abs/1709.02755).
This variation of RNN cell is characterized by the simplified data
dependence
between hidden states of two consecutive time steps. Traditionally, hidden
states from a cell at time step t-1 needs to be multiplied with a matrix
W_hh before being fed into the ensuing cell at time step t.
This flavor of RNN replaces the matrix multiplication between h_{t-1}
and W_hh with a pointwise multiplication, resulting in performance
gain.
Args:
num_units: int, The number of units in the SRU cell.
activation: Nonlinearity to use. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
name: (optional) String, the name of the layer. Layers with the same name
will share weights, but to avoid mistakes we require reuse=True in such
cases.
"""
def __init__(self, num_units, activation=None, reuse=None, name=None):
super(SRUCell, self).__init__(_reuse=reuse, name=name)
self._num_units = num_units
self._activation = activation or math_ops.tanh
# Restrict inputs to be 2-dimensional matrices
self.input_spec = base_layer.InputSpec(ndim=2)
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def build(self, inputs_shape):
if inputs_shape[1].value is None:
raise ValueError(
"Expected inputs.shape[-1] to be known, saw shape: %s" % inputs_shape)
input_depth = inputs_shape[1].value
self._kernel = self.add_variable(
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
shape=[input_depth, 4 * self._num_units])
self._bias = self.add_variable(
rnn_cell_impl._BIAS_VARIABLE_NAME,
shape=[2 * self._num_units],
initializer=init_ops.constant_initializer(0.0, dtype=self.dtype))
self._built = True
def call(self, inputs, state):
"""Simple recurrent unit (SRU) with num_units cells."""
U = math_ops.matmul(inputs, self._kernel)
x_bar, f_intermediate, r_intermediate, x_tx = array_ops.split(
value=U, num_or_size_splits=4, axis=1)
f_r = math_ops.sigmoid(
nn_ops.bias_add(
array_ops.concat([f_intermediate, r_intermediate], 1), self._bias))
f, r = array_ops.split(value=f_r, num_or_size_splits=2, axis=1)
c = f * state + (1.0 - f) * x_bar
h = r * self._activation(c) + (1.0 - r) * x_tx
return h, c
class WeightNormLSTMCell(rnn_cell_impl.RNNCell):
"""Weight normalized LSTM Cell. Adapted from `rnn_cell_impl.LSTMCell`.
The weight-norm implementation is based on:
https://arxiv.org/abs/1602.07868
Tim Salimans, Diederik P. Kingma.
Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks
The default LSTM implementation based on:
http://www.bioinf.jku.at/publications/older/2604.pdf
S. Hochreiter and J. Schmidhuber.
"Long Short-Term Memory". Neural Computation, 9(8):1735-1780, 1997.
The class uses optional peephole connections, optional cell clipping
and an optional projection layer.
The optional peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
"""
def __init__(self,
num_units,
norm=True,
use_peepholes=False,
cell_clip=None,
initializer=None,
num_proj=None,
proj_clip=None,
forget_bias=1,
activation=None,
reuse=None):
"""Initialize the parameters of a weight-normalized LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
norm: If `True`, apply normalization to the weight matrices. If False,
the result is identical to that obtained from `rnn_cell_impl.LSTMCell`
use_peepholes: bool, set `True` to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
activation: Activation function of the inner states. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(WeightNormLSTMCell, self).__init__(_reuse=reuse)
self._scope = "wn_lstm_cell"
self._num_units = num_units
self._norm = norm
self._initializer = initializer
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._num_proj = num_proj
self._proj_clip = proj_clip
self._activation = activation or math_ops.tanh
self._forget_bias = forget_bias
self._weights_variable_name = "kernel"
self._bias_variable_name = "bias"
if num_proj:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_proj)
self._output_size = num_proj
else:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def _normalize(self, weight, name):
"""Apply weight normalization.
Args:
weight: a 2D tensor with known number of columns.
name: string, variable name for the normalizer.
Returns:
A tensor with the same shape as `weight`.
"""
output_size = weight.get_shape().as_list()[1]
g = vs.get_variable(name, [output_size], dtype=weight.dtype)
return nn_impl.l2_normalize(weight, dim=0) * g
def _linear(self,
args,
output_size,
norm,
bias,
bias_initializer=None,
kernel_initializer=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_initializer: starting value to initialize the bias
(default is all zeros).
kernel_initializer: starting value to initialize the weight.
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape() for a in args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError("linear is expecting 2D arguments: %s" % shapes)
if shape[1].value is None:
raise ValueError("linear expects shape[1] to be provided for shape %s, "
"but saw %s" % (shape, shape[1]))
else:
total_arg_size += shape[1].value
dtype = [a.dtype for a in args][0]
# Now the computation.
scope = vs.get_variable_scope()
with vs.variable_scope(scope) as outer_scope:
weights = vs.get_variable(
self._weights_variable_name, [total_arg_size, output_size],
dtype=dtype,
initializer=kernel_initializer)
if norm:
wn = []
st = 0
with ops.control_dependencies(None):
for i in range(len(args)):
en = st + shapes[i][1].value
wn.append(
self._normalize(weights[st:en, :], name="norm_{}".format(i)))
st = en
weights = array_ops.concat(wn, axis=0)
if len(args) == 1:
res = math_ops.matmul(args[0], weights)
else:
res = math_ops.matmul(array_ops.concat(args, 1), weights)
if not bias:
return res
with vs.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
if bias_initializer is None:
bias_initializer = init_ops.constant_initializer(0.0, dtype=dtype)
biases = vs.get_variable(
self._bias_variable_name, [output_size],
dtype=dtype,
initializer=bias_initializer)
return nn_ops.bias_add(res, biases)
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: A tuple of state Tensors, both `2-D`, with column sizes
`c_state` and `m_state`.
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
dtype = inputs.dtype
num_units = self._num_units
sigmoid = math_ops.sigmoid
c, h = state
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
with vs.variable_scope(self._scope, initializer=self._initializer):
concat = self._linear(
[inputs, h], 4 * num_units, norm=self._norm, bias=True)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1)
if self._use_peepholes:
w_f_diag = vs.get_variable("w_f_diag", shape=[num_units], dtype=dtype)
w_i_diag = vs.get_variable("w_i_diag", shape=[num_units], dtype=dtype)
w_o_diag = vs.get_variable("w_o_diag", shape=[num_units], dtype=dtype)
new_c = (
c * sigmoid(f + self._forget_bias + w_f_diag * c) +
sigmoid(i + w_i_diag * c) * self._activation(j))
else:
new_c = (
c * sigmoid(f + self._forget_bias) +
sigmoid(i) * self._activation(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
new_c = clip_ops.clip_by_value(new_c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
new_h = sigmoid(o + w_o_diag * new_c) * self._activation(new_c)
else:
new_h = sigmoid(o) * self._activation(new_c)
if self._num_proj is not None:
with vs.variable_scope("projection"):
new_h = self._linear(
new_h, self._num_proj, norm=self._norm, bias=False)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
new_h = clip_ops.clip_by_value(new_h, -self._proj_clip,
self._proj_clip)
# pylint: enable=invalid-unary-operand-type
new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h)
return new_h, new_state
| rabipanda/tensorflow | tensorflow/contrib/rnn/python/ops/rnn_cell.py | Python | apache-2.0 | 115,152 |
# This file has to be run in pure Python mode!
# Imports from the CO๐CEPT code
from commons import *
plt = get_matplotlib().pyplot
# Absolute path and name of this test
this_dir = os.path.dirname(os.path.realpath(__file__))
this_test = os.path.basename(os.path.dirname(this_dir))
# Begin analysis
masterprint(f'Analysing {this_dir} data ...')
# Read in the three 3D render images
render3D_path = f'{this_dir}/output/render3D_snapshot.png'
render3D_0_path = f'{this_dir}/output/subdir/snapshot_0.png'
render3D_1_path = f'{this_dir}/output/subdir/snapshot_1.png'
render3D = plt.imread(render3D_path)
render3D_0 = plt.imread(render3D_0_path)
render3D_1 = plt.imread(render3D_1_path)
# The two identical 3D renders should be exactly equal
if not np.all(render3D_0 == render3D_1):
abort('The 3D renders "{}" and "{}" are not identical!'.format(render3D_0, render3D_1))
# The dimensions of the images should be as stated in
# render3D.param_0 and render3D.param_1.
for r, p, param_i in zip(
(render3D, render3D_0),
(render3D_path, render3D_0_path),
(f'{this_dir}/render3D.param_0', f'{this_dir}/render3D.param_1'),
):
module_dict = load_source('param', param_i).__dict__
shape = r.shape[:2]
if shape[0] != shape[1] or shape[0] != module_dict['render3D_resolution']:
masterprint('done')
abort('The 3D render "{}" is not of size {}x{}!'
.format(p, module_dict['render3D_resolution'], module_dict['render3D_resolution']))
# There should be some completely black pixels in the first 3D render
# and some completely white pixels in the second (and third) 3D render
# due to the text.
if not np.any(render3D[:, :, :3] > [0.99]*3):
abort('The scale factor text do not seem to '
'be white on 3D render "{}".'.format(render3D_path))
if not np.any(render3D_0[:, :, :3] < [0.01]*3):
abort('The scale factor text do not seem to '
'be black on 3D render "{}".'.format(render3D_0_path))
# Done analysing
masterprint('done')
| jmd-dk/concept | test/render/analyze.py | Python | gpl-3.0 | 2,006 |
# u(k) = (900-3k) r^(k-1)
# u'(k) = -3 r^(k-1) + (900-3k) r^(k-1) ln r
# u(n,r) = 900(1-r**n)/(1-r)-((1-r**n)/(1-r)-n*r**n)*3/(1-r)
# u(n,r) = 900(1-r**n)/(1-r)-((1-r**n)/(1-r)-n*r**n)*3/(1-r)
def u(n,r):
return 900*(1-r**n)/(1-r)-((1-r**n)/(1-r)-n*r**n)*3/(1-r)
# integer division result too large for a float
# 0.9933333333333333 <1 ็ ๆๅคง?
def main():
l = 1+1/10000
r = 1+1/100
while l < r:
m = (l+r)/2
if m == l or m == r:
break;
res = u(5000,m);
N = -600_000_000_000
print(m,res,res < N)
if res < N:
r = m
else:
l = m
print(round(l,12))
main()
| CroMarmot/MyOICode | ProjectEuler/p235.py | Python | gpl-3.0 | 671 |
# Bzrflag
# Copyright 2008-2011 Brigham Young University
#
# This file is part of Bzrflag.
#
# Bzrflag is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# Bzrflag is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# Bzrflag. If not, see <http://www.gnu.org/licenses/>.
#
# Inquiries regarding any further use of Bzrflag, please contact the Copyright
# Licensing Office, Brigham Young University, 3760 HBLL, Provo, UT 84602,
# (801) 422-9339 or 422-3821, e-mail copyright@byu.edu.
"""Configuration class for the Bzrflag game."""
__author__ = "BYU AML Lab <kseppi@byu.edu>"
__copyright__ = "Copyright 2008-2011 Brigham Young University"
__license__ = "GNU GPL"
import sys
import os
import optparse
import ConfigParser
import logging
import world
class ParseError(Exception): pass
class ArgumentError(Exception): pass
logger = logging.getLogger("config.py")
class Config:
"""Config class:
Parses command line options and the --config file if given.
"""
def __init__(self, args=None):
self.options = self.parse_cli_args(args)
self.setup_world()
def get(self, key, default):
"""Return value of given key, or defalt if option[key] = None."""
if self.options[key] is None:
return default
return self.options[key]
def __getitem__(self, key):
return self.options[key]
def setup_world(self):
"""Parse the world file"""
if not self.options['world']:
raise ArgumentError('no world defined')
if not os.path.isfile(self.options['world']):
raise ArgumentError('world file not found: %s'\
%self.options['world'])
text = open(self.options['world']).read()
size = int(self.options['world_size'])
results = world.World.parser(size, size).parseString(text)
if not results:
raise ParseError('invalid world file: %s'%config['world'])
self.world = results[0]
def parse_cli_args(self, args):
"""Parse command line arguments."""
p = optparse.OptionParser()
p.add_option('-d','--debug',
action='store_true',
dest='debug',
help='turn on verbose debugging')
p.add_option('--test',
action='store_true',
dest='test',
help='run in test mode (no GUI)')
p.add_option('--debug-out',
dest='debug_out',
help='output filename for debug messages')
p.add_option('--window-size',
dest='window_size',
default='800x800',
help='size of the window to use, ex. 800x800')
## game behavior
p.add_option('--world',
dest='world',
default='maps/four_ls.bzw',
help='specify a world.bzw map to use')
p.add_option('--world-size',
dest='world_size',
default='800',
help='specify the world size (it is always square)')
p.add_option('--config',
dest='config',
help='set the config file')
p.add_option('--python-console',
dest='python_console', default=False,
action='store_true',
help='use interactive python shell')
p.add_option('--telnet-console',
dest='telnet_console', default=False,
action='store_true',
help='use interactive telnet shell (and log server response)')
p.add_option('--no-report-obstacles',
action='store_true', default=False,
dest='no_report_obstacles',
help='report obstacles? (turn off to force use\
of the occupancy grid)')
p.add_option('--occgrid-width', type='int',
default=50, help='width of reported occupancy grid')
## tank behavior
p.add_option('--max-shots',
type='int',
dest='max_shots',default=20,
help='set the max shots')
p.add_option('--inertia-linear',
dest='inertia_linear',
type='int',default=1,
help='set the linear inertia')
p.add_option('--inertia-angular',
dest='inertia_angular',
type='int',default=1,
help='set the angular inertia')
p.add_option('--seed',
type='int',default=-1,
dest='random_seed',
help='set the random seed for world initialization')
p.add_option('--angular-velocity',
type='float',
dest='angular_velocity',
help='set the angular velocity for tanks (float)')
p.add_option('--rejoin-time',
type='int',
dest='rejoin_time',
help='set the rejoin delay')
p.add_option('--explode-time',
type='int',
dest='explode_time',
help='[insert help] what does this do?')
p.add_option('--grab-own-flag',
action='store_false',
dest='grab_own_flag',
help='enables grabbing your own flag')
p.add_option('--friendly-fire',
action='store_false',
default='true',
dest='friendly_fire',
help="don't die on friendly fire")
p.add_option('--respawn-time',
type='int',default=10,
dest='respawn_time',
help='set the respawn time')
p.add_option('--time-limit',
type='int',default=300000,
dest='time_limit',
help='set the time limit')
g = optparse.OptionGroup(p, 'Team Defaults')
p.add_option_group(g)
g.add_option('--default-tanks',
dest='default_tanks',type='int',default=10,
help='specify the default number of tanks')
## random sensor noise
g.add_option('--default-posnoise',
dest='default_posnoise',type='float',default=0,
help='specify the default positional noise')
g.add_option('--default-velnoise',
dest='default_velnoise',type='float',default=0,
help='specify the default velocity noise')
g.add_option('--default-angnoise',
dest='default_angnoise',type='float',default=0,
help='specify the default angular noise')
## For the occupancy grid, the probabitities of sensor accuracy
# p(1|1) // true positive
# p(1|0) // false positive, easily obtainable from true positive;
# false positive = 1 - true positive
# p(0|0) // true negative
# p(0|1) // false negative = 1 - true negative
g.add_option('--default-true-positive',
dest='default_true_positive',type='float',default=1,
help='the true positive probability: p(1|1)')
g.add_option('--default-true-negative',
dest='default_true_negative',type='float',default=1,
help='the true negative probability: p(0|0)')
for color in ['red','green','blue','purple']:
title = '%s Team Options' % color.capitalize()
g = optparse.OptionGroup(p, title)
p.add_option_group(g)
g.add_option('--%s-port'%color,
dest='%s_port'%color,type='int',default=0,
help='specify the port for the %s team'%color)
g.add_option('--%s-tanks'%color,
dest='%s_tanks'%color,type='int',
help='specify the number of tanks for the %s team'%color)
g.add_option('--%s-posnoise'%color,
dest='%s_posnoise'%color,type='float',
help='specify the posnoise for the %s team'%color)
g.add_option('--%s-velnoise'%color,
dest='%s_velnoise'%color,type='float',
help='specify the velnoise for the %s team'%color)
g.add_option('--%s-angnoise'%color,
dest='%s_angnoise'%color,type='float',
help='specify the angnoise for the %s team'%color)
g.add_option('--%s-true-positive' % color,
dest='%s_true_positive' % color, type='float',
help='the true positive probability for %s' % color)
g.add_option('--%s-true-negative' % color,
dest='%s_true_negative' % color, type='float',
help='the true negative probability for %s' % color)
opts, args = p.parse_args(args)
if opts.config:
configfile = ConfigParser.ConfigParser()
if not len(configfile.read(opts.config)):
raise Exception('config file not found')
if not 'global' in configfile.sections():
raise Exception('Invalid config file. Make sure "[global]"\
is at the top')
config = dict(configfile.items('global'))
for key in config:
if not hasattr(opts,key):
raise Exception('invalid configuration option: %s'%key)
if getattr(opts,key) == None:
type = p.get_option('--'+key.replace('_','-')).type
value = config[key]
if type == 'int':
value = int(value)
setattr(opts,key,value)
#if args:
#p.parse_error('No positional arguments are allowed.')
return vars(opts)
| sm-github/bzrflag | bzrflag/config.py | Python | gpl-3.0 | 9,861 |
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os
import sys
class CreateSymlinkResolver(object):
def __init__(self,resolver,dependency,symlinks_path,ctx):
self.resolver=resolver
self.dependency=dependency
self.symlinks_path=symlinks_path
self.ctx=ctx
assert os.path.isabs(self.symlinks_path)
def resolve(self):
path=self.resolver.resolve()
if not path:
return path
link_path=os.path.join(self.symlinks_path,self.dependency.name)
if os.path.exists(link_path):
if os.path.realpath(link_path)==os.path.realpath(path):
self.dependency.is_symlink=True
self.dependency.real_path=os.path.realpath(path)
return link_path
os_symlink=getattr(os,"symlink",None)
if not callable(os_symlink)and sys.platform=='win32':
def symlink_windows(target,link_path):
cmd='mklink /J "{}" "{}"'.format(link_path.replace('/','\\'),target.replace('/','\\'))
self.ctx.cmd_and_log(cmd)
os_symlink=symlink_windows
try:
self.ctx.to_log('wurf: CreateSymlinkResolver {} -> {}'.format(link_path,path))
if os.path.lexists(link_path):
if sys.platform=='win32':
os.rmdir(link_path)
else:
os.unlink(link_path)
os_symlink(path,link_path)
except Exception as e:
self.ctx.logger.debug("Symlink creation failed for: {}".format(self.dependency.name),exc_info=True)
return path
self.dependency.is_symlink=True
self.dependency.real_path=path
return link_path
def __repr__(self):
return"%s(%r)"%(self.__class__.__name__,self.__dict__)
| looopTools/sw9-source | .waf-1.9.8-6657823688b736c1d1a4e2c4e8e198b4/waflib/extras/wurf/create_symlink_resolver.py | Python | mit | 1,573 |
import requests
from bs4 import BeautifulSoup
#
#
# get seed article
#
#
dave_2003_doi = "10.1.1.13.2424"
dave_2003 = "http://citeseerx.ist.psu.edu/viewdoc/summary?doi=" + dave_2003_doi
print(dave_2003)
r = requests.get(dave_2003)
print(r)
data = r.text
soup = BeautifulSoup(data)
print(soup)
#
#
# get seed article citations by page
#
#
dave_2003_citing = "http://citeseerx.ist.psu.edu/showciting?doi=" + dave_2003_doi
dave_2003_r_citing = requests.get(dave_2003_citing)
print(dave_2003_r_citing)
data = dave_2003_r_citing.text
soup = BeautifulSoup(data)
#
#
# get seed article citation results
#
#
page2 = "http://citeseerx.ist.psu.edu/showciting?doi=10.1.1.13.2424&sort=cite&start=10"
print(page2)
print(soup)
results = soup.find_all('div', class_='result')
result = results[0]
| chendaniely/attitude-citation-network | analysis/chend/code/PY/01-get_csci_pages.py | Python | mit | 794 |
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
from datetime import timedelta, datetime
from starthinker.util.google_api import API_DCM
from starthinker.util.bigquery import table_exists, rows_to_table, query_to_rows
from starthinker.util.cm import get_profile_for_api
from starthinker.util.data import get_rows
CHANGELOGS_TABLE = 'CM_Change_Logs'
CHANGELOGS_SCHEMA = [
{
'name': 'userProfileId',
'type': 'INTEGER'
},
{
'name': 'accountId',
'type': 'INTEGER'
},
{
'name': 'subaccountId',
'type': 'INTEGER'
},
{
'name': 'id',
'type': 'INTEGER'
},
{
'name': 'transactionId',
'type': 'INTEGER'
},
{
'name': 'objectType',
'type': 'STRING'
},
{
'name': 'objectId',
'type': 'INTEGER'
},
{
'name': 'action',
'type': 'STRING'
},
{
'name': 'fieldName',
'type': 'STRING'
},
{
'name': 'changeTime',
'type': 'TIMESTAMP'
},
{
'name': 'oldValue',
'type': 'STRING'
},
{
'name': 'newValue',
'type': 'STRING'
},
]
def get_changelogs(config, task, accounts, start):
if config.verbose:
print('CM CHANGE LOGS', accounts)
for account_id in accounts:
is_superuser, profile_id = get_profile_for_api(config, task['auth'],
account_id)
kwargs = {'profileId': profile_id, 'minChangeTime': start}
if is_superuser:
kwargs['accountId'] = account_id
for changelog in API_DCM(
config, 'user', iterate=True,
internal=is_superuser).changeLogs().list(**kwargs).execute():
yield [
changelog.get('userProfileId'),
changelog['accountId'],
changelog.get('subaccountId'),
changelog['id'],
changelog['transactionId'],
changelog['objectType'],
changelog['objectId'],
changelog['action'],
changelog.get('fieldName'),
changelog['changeTime'],
changelog.get('oldValue'),
changelog.get('newValue'),
]
def dcm_log(config, task):
if config.verbose:
print('DCM LOG')
accounts = list(get_rows(config, 'user', task['accounts']))
# determine start log date
if table_exists(config, task['out']['auth'], config.project,
task['out']['dataset'], CHANGELOGS_TABLE):
start = next(
query_to_rows(
config, task['out']['auth'], config.project,
task['out']['dataset'],
'SELECT FORMAT_TIMESTAMP("%%Y-%%m-%%dT%%H:%%M:%%S-00:00", MAX(changeTime), "UTC") FROM `%s`'
% CHANGELOGS_TABLE, 1, False))[0]
disposition = 'WRITE_APPEND'
else:
start = (datetime.utcnow() - timedelta(days=int(task['days']))
).strftime('%Y-%m-%dT%H:%M:%S-00:00')
disposition = 'WRITE_TRUNCATE'
# load new logs
rows = get_changelogs(config, task, accounts, start)
if rows:
rows_to_table(config, task['out']['auth'], config.project,
task['out']['dataset'], CHANGELOGS_TABLE, rows,
CHANGELOGS_SCHEMA, 0, disposition)
| google/starthinker | starthinker/task/dcm_log/run.py | Python | apache-2.0 | 3,909 |
# -*- coding: utf8 -*-
"""
Demonstration of USM QE Tests Run Logging
=========================================
This is just an example of usmqe flavored pytest test cases so that one can
try how the reporting and logging works without setting up the whole test
enviroment required for actuall tests.
"""
import pytest
@pytest.fixture
def fixture_error():
raise Exception
@pytest.fixture(params=[1, 2])
def parametrized_fixture(request):
return [request.param]
def test_pass_one():
pytest.check(True, 'good')
def test_pass_many():
pytest.check(True, 'one')
pytest.check(True, 'two')
pytest.check(True, 'three')
@pytest.mark.parametrize("x", [1, 2, 3])
@pytest.mark.parametrize("y", ["a", "b"])
def test_pass_parametrized(x, y):
pytest.check(len(x*y) == x)
pytest.check(y in x*y)
def test_pass_parametrized_fixture(parametrized_fixture):
pytest.check(len(parametrized_fixture) > 0)
def test_fail_one_check():
pytest.check(False)
def test_fail_many_check():
pytest.check(True, 'good')
pytest.check(False, 'ops')
pytest.check(False, 'doh')
pytest.check(False, 'doh')
def test_fail_one_exception():
# mrglog doesn't handle this
raise Exception
def test_error_in_fixture(fixture_error):
pytest.check(True)
def test_xfail_one():
pytest.check(False, issue='BZ 439858')
def test_xfail_many():
pytest.check(True, 'good')
pytest.check(False, issue='BZ 439858') # this failure is waived known issue
def test_fail_anyway():
pytest.check(True, 'good')
pytest.check(False, issue='BZ 439858') # this failure is waived known issue
pytest.check(False, 'this sucks')
| fbalak/usmqe-tests | usmqe_tests/demo/test_logging.py | Python | gpl-3.0 | 1,671 |
from django.conf.urls.defaults import *
from .views import handle_token
urlpatterns = patterns('',
url(r'^token/(.*)/$', handle_token, name='token_handle'),
)
| redvasily/django-tokens | tokens/urls.py | Python | bsd-3-clause | 165 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('finaid', '0012_updates_to_finaidreviewdata'),
]
operations = [
migrations.AddField(
model_name='financialaidapplication',
name='application_type',
field=models.CharField(default=b'general', help_text='Application Classification', max_length=64, verbose_name='Application Type', choices=[(b'general', 'General Applicant'), (b'staff', 'PyCon Staff/Volunteer'), (b'speaker', 'Speaker'), (b'core_dev', 'Python Core Developer'), (b'psf_board', 'PSF Board Member'), (b'outstanding_community_member', 'Outstanding Community Member')]),
),
]
| PyCon/pycon | pycon/finaid/migrations/0013_financialaidapplication_application_type.py | Python | bsd-3-clause | 778 |
import smtplib
from osmhm import config
def basic_send_mail(to, subject, msg):
import os
program = '/usr/sbin/sendmail'
email = os.popen("%s -t" % program, "w")
email.write("From: %s\n" % config.email_user)
email.write("Reply-to: %s\n" % config.email_user)
email.write("To: %s\n" % to)
email.write("Subject: %s\n" % subject)
email.write("\n")
email.write("%s\n" % msg)
status = email.close()
def send_notification(notify_list, notification_type, notifier=basic_send_mail):
SUBJECT = 'OSM Hall Monitor Notification |'
messages = {}
subjects = {}
tos = {}
for entry in notify_list:
if entry['address'] is not '':
if notification_type == 'user':
MSG = """
Dear %s,
OSM Hall Monitor has detected an event for your consideration.
**User alert**
Time of event: %s
Username: https://www.openstreetmap.org/user/%s
Changeset: https://www.openstreetmap.org/changeset/%s
Additions: %s
Modifications: %s
Deletions: %s
Reason user is watched: %s
Problem? Feedback? Reply to this message.
Best,
OSM Hall Monitor
""" % (entry['author'], entry['timestamp'], entry['username'], entry['changesetid'], entry['create'], entry['modify'], entry['delete'], entry['reason'])
TO = entry['address']
NEWSUBJECT = '%s User %s ' % (SUBJECT, entry['username'])
elif notification_type == 'object':
if 'n' == entry['element'][0]:
pre = 'node'
elif 'w' == entry['element'][0]:
pre = 'way'
elif 'r' == entry['element'][0]:
pre = 'relation'
if entry['action'] == 1:
act = 'create'
elif entry['action'] == 2:
act = 'modify'
elif entry['action'] == 4:
act = 'delete'
MSG = """
Dear %s,
OSM Hall Monitor has detected an event for your consideration.
**Object alert**
Time of event: %s
Object: https://www.openstreetmap.org/%s/%s
Changeset: https://www.openstreetmap.org/changeset/%s
Action: %s
User performing: https://www.openstreetmap.org/user/%s
Reason object is watched: %s
Problem? Feedback? Reply to this message.
Best,
OSM Hall Monitor
""" % (entry['author'], entry['timestamp'], pre, entry['element'][1:], entry['changesetid'], act, entry['username'], entry['reason'])
TO = entry['address']
NEWSUBJECT = '%s Object %s ' % (SUBJECT, entry['element'])
else:
print('Notification type unknown')
continue
else:
continue
addr = TO
if addr not in tos:
tos[addr] = TO
subjects[addr] = NEWSUBJECT
messages[addr] = MSG
else:
subjects[addr] = '%s: multiple events' % (SUBJECT)
messages[addr] += '\r\n---NEXT ALERT---\r\n'+MSG
for email in messages:
try:
notifier(tos[email], subjects[email], messages[email])
except Exception as e:
print('Issue sending notification: ', str(e))
pass
| ethan-nelson/osm_hall_monitor | osmhm/send_notification.py | Python | mit | 3,262 |
"""
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.exceptions import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.utils import compute_sample_weight
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", "mae")
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
presort=True),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
presort=True),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = ["DecisionTreeClassifier", "DecisionTreeRegressor",
"ExtraTreeClassifier", "ExtraTreeRegressor"]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [[-2, -1, 1]] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=.6).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=0.).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=0.0).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=1.1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_impurity_split=-1.0).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_split():
"""Test min_samples_split parameter"""
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test for integer parameter
est = TreeEstimator(min_samples_split=10,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
# test for float parameter
est = TreeEstimator(min_samples_split=0.2,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test integer parameter
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
# test float parameter
est = TreeEstimator(min_samples_leaf=0.1,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_min_impurity_split():
# test if min_impurity_split creates leaves with impurity
# [0, min_impurity_split) when min_samples_leaf = 1 and
# min_samples_split = 2.
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
min_impurity_split = .5
# verify leaf nodes without min_impurity_split less than
# impurity 1e-7
est = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
random_state=0)
assert_less_equal(est.min_impurity_split, 1e-7,
"Failed, min_impurity_split = {0} > 1e-7".format(
est.min_impurity_split))
est.fit(X, y)
for node in range(est.tree_.node_count):
if (est.tree_.children_left[node] == TREE_LEAF or
est.tree_.children_right[node] == TREE_LEAF):
assert_equal(est.tree_.impurity[node], 0.,
"Failed with {0} "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
# verify leaf nodes have impurity [0,min_impurity_split] when using min_impurity_split
est = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
min_impurity_split=min_impurity_split,
random_state=0)
est.fit(X, y)
for node in range(est.tree_.node_count):
if (est.tree_.children_left[node] == TREE_LEAF or
est.tree_.children_right[node] == TREE_LEAF):
assert_greater_equal(est.tree_.impurity[node], 0,
"Failed with {0}, "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
assert_less_equal(est.tree_.impurity[node], min_impurity_split,
"Failed with {0}, "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
def test_pickle():
for name, TreeEstimator in ALL_TREES.items():
if "Classifier" in name:
X, y = iris.data, iris.target
else:
X, y = boston.data, boston.target
est = TreeEstimator(random_state=0)
est.fit(X, y)
score = est.score(X, y)
fitted_attribute = dict()
for attribute in ["max_depth", "node_count", "capacity"]:
fitted_attribute[attribute] = getattr(est.tree_, attribute)
serialized_object = pickle.dumps(est)
est2 = pickle.loads(serialized_object)
assert_equal(type(est2), est.__class__)
score2 = est2.score(X, y)
assert_equal(score, score2,
"Failed to generate same score after pickling "
"with {0}".format(name))
for attribute in fitted_attribute:
assert_equal(getattr(est2.tree_, attribute),
fitted_attribute[attribute],
"Failed to generate same attribute {0} after "
"pickling with {1}".format(attribute, name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = compute_sample_weight("balanced", unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if not est.presort:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 100)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0], [1]], [0, 1]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-3 <= value.flat[0] < 3,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._utils import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.tree_.decision_path(X1).toarray(),
d.tree_.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
d.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
s.tree_.decision_path(X1).toarray())
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
@ignore_warnings
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, [X])
@ignore_warnings
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if not TreeEstimator().presort:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
def check_presort_sparse(est, X, y):
assert_raises(ValueError, est.fit, X, y)
def test_presort_sparse():
ests = (DecisionTreeClassifier(presort=True),
DecisionTreeRegressor(presort=True))
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for est, sparse_matrix in product(ests, sparse_matrices):
yield check_presort_sparse, est, sparse_matrix(X), y
def test_decision_path_hardcoded():
X = iris.data
y = iris.target
est = DecisionTreeClassifier(random_state=0, max_depth=1).fit(X, y)
node_indicator = est.decision_path(X[:2]).toarray()
assert_array_equal(node_indicator, [[1, 1, 0], [1, 0, 1]])
def check_decision_path(name):
X = iris.data
y = iris.target
n_samples = X.shape[0]
TreeEstimator = ALL_TREES[name]
est = TreeEstimator(random_state=0, max_depth=2)
est.fit(X, y)
node_indicator_csr = est.decision_path(X)
node_indicator = node_indicator_csr.toarray()
assert_equal(node_indicator.shape, (n_samples, est.tree_.node_count))
# Assert that leaves index are correct
leaves = est.apply(X)
leave_indicator = [node_indicator[i, j] for i, j in enumerate(leaves)]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
# Ensure only one leave node per sample
all_leaves = est.tree_.children_left == TREE_LEAF
assert_array_almost_equal(np.dot(node_indicator, all_leaves),
np.ones(shape=n_samples))
# Ensure max depth is consistent with sum of indicator
max_depth = node_indicator.sum(axis=1).max()
assert_less_equal(est.tree_.max_depth, max_depth)
def test_decision_path():
for name in ALL_TREES:
yield (check_decision_path, name)
def check_no_sparse_y_support(name):
X, y = X_multilabel, csr_matrix(y_multilabel)
TreeEstimator = ALL_TREES[name]
assert_raises(TypeError, TreeEstimator(random_state=0).fit, X, y)
def test_no_sparse_y_support():
# Currently we don't support sparse y
for name in ALL_TREES:
yield (check_no_sparse_y_support, name)
def test_mae():
# check MAE criterion produces correct results
# on small toy dataset
dt_mae = DecisionTreeRegressor(random_state=0, criterion="mae",
max_leaf_nodes=2)
dt_mae.fit([[3],[5],[3],[8],[5]],[6,7,3,4,3])
assert_array_equal(dt_mae.tree_.impurity, [1.4, 1.5, 4.0/3.0])
assert_array_equal(dt_mae.tree_.value.flat, [4, 4.5, 4.0])
dt_mae.fit([[3],[5],[3],[8],[5]],[6,7,3,4,3], [0.6,0.3,0.1,1.0,0.3])
assert_array_equal(dt_mae.tree_.impurity, [7.0/2.3, 3.0/0.7, 4.0/1.6])
assert_array_equal(dt_mae.tree_.value.flat, [4.0, 6.0, 4.0])
| sonnyhu/scikit-learn | sklearn/tree/tests/test_tree.py | Python | bsd-3-clause | 55,494 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to work with checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import six
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import py_checkpoint_reader
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"load_checkpoint", "load_variable", "list_variables",
"checkpoints_iterator", "init_from_checkpoint"
]
@tf_export("train.load_checkpoint")
def load_checkpoint(ckpt_dir_or_file):
"""Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`.
If `ckpt_dir_or_file` resolves to a directory with multiple checkpoints,
reader for the latest checkpoint is returned.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint
file.
Returns:
`CheckpointReader` object.
Raises:
ValueError: If `ckpt_dir_or_file` resolves to a directory with no
checkpoints.
"""
filename = _get_checkpoint_filename(ckpt_dir_or_file)
if filename is None:
raise ValueError("Couldn't find 'checkpoint' file or checkpoints in "
"given directory %s" % ckpt_dir_or_file)
return py_checkpoint_reader.NewCheckpointReader(filename)
@tf_export("train.load_variable")
def load_variable(ckpt_dir_or_file, name):
"""Returns the tensor value of the given variable in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
name: Name of the variable to return.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
"""
# TODO(b/29227106): Fix this in the right place and remove this.
if name.endswith(":0"):
name = name[:-2]
reader = load_checkpoint(ckpt_dir_or_file)
return reader.get_tensor(name)
@tf_export("train.list_variables")
def list_variables(ckpt_dir_or_file):
"""Returns list of all variables in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
Returns:
List of tuples `(name, shape)`.
"""
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
result = []
for name in names:
result.append((name, variable_map[name]))
return result
def wait_for_new_checkpoint(checkpoint_dir,
last_checkpoint=None,
seconds_to_sleep=1,
timeout=None):
"""Waits until a new checkpoint file is found.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
last_checkpoint: The last checkpoint path used or `None` if we're expecting
a checkpoint for the first time.
seconds_to_sleep: The number of seconds to sleep for before looking for a
new checkpoint.
timeout: The maximum number of seconds to wait. If left as `None`, then the
process will wait indefinitely.
Returns:
a new checkpoint path, or None if the timeout was reached.
"""
logging.info("Waiting for new checkpoint at %s", checkpoint_dir)
stop_time = time.time() + timeout if timeout is not None else None
while True:
checkpoint_path = checkpoint_management.latest_checkpoint(checkpoint_dir)
if checkpoint_path is None or checkpoint_path == last_checkpoint:
if stop_time is not None and time.time() + seconds_to_sleep > stop_time:
return None
time.sleep(seconds_to_sleep)
else:
logging.info("Found new checkpoint at %s", checkpoint_path)
return checkpoint_path
@tf_export("train.checkpoints_iterator")
def checkpoints_iterator(checkpoint_dir,
min_interval_secs=0,
timeout=None,
timeout_fn=None):
"""Continuously yield new checkpoint files as they appear.
The iterator only checks for new checkpoints when control flow has been
reverted to it. This means it can miss checkpoints if your code takes longer
to run between iterations than `min_interval_secs` or the interval at which
new checkpoints are written.
The `timeout` argument is the maximum number of seconds to block waiting for
a new checkpoint. It is used in combination with the `timeout_fn` as
follows:
* If the timeout expires and no `timeout_fn` was specified, the iterator
stops yielding.
* If a `timeout_fn` was specified, that function is called and if it returns
a true boolean value the iterator stops yielding.
* If the function returns a false boolean value then the iterator resumes the
wait for new checkpoints. At this point the timeout logic applies again.
This behavior gives control to callers on what to do if checkpoints do not
come fast enough or stop being generated. For example, if callers have a way
to detect that the training has stopped and know that no new checkpoints
will be generated, they can provide a `timeout_fn` that returns `True` when
the training has stopped. If they know that the training is still going on
they return `False` instead.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
min_interval_secs: The minimum number of seconds between yielding
checkpoints.
timeout: The maximum number of seconds to wait between checkpoints. If left
as `None`, then the process will wait indefinitely.
timeout_fn: Optional function to call after a timeout. If the function
returns True, then it means that no new checkpoints will be generated and
the iterator will exit. The function is called with no arguments.
Yields:
String paths to latest checkpoint files as they arrive.
"""
checkpoint_path = None
while True:
new_checkpoint_path = wait_for_new_checkpoint(
checkpoint_dir, checkpoint_path, timeout=timeout)
if new_checkpoint_path is None:
if not timeout_fn:
# timed out
logging.info("Timed-out waiting for a checkpoint.")
return
if timeout_fn():
# The timeout_fn indicated that we are truly done.
return
else:
# The timeout_fn indicated that more checkpoints may come.
continue
start = time.time()
checkpoint_path = new_checkpoint_path
yield checkpoint_path
time_to_next_eval = start + min_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
@tf_export(v1=["train.init_from_checkpoint"])
def init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""Replaces `tf.Variable` initializers so they load from a checkpoint file.
Values are not loaded immediately, but when the initializer is run
(typically by running a `tf.compat.v1.global_variables_initializer` op).
Note: This overrides default initialization ops of specified variables and
redefines dtype.
Assignment map supports following syntax:
* `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in
current `scope_name` from `checkpoint_scope_name` with matching tensor
names.
* `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` -
will initialize `scope_name/variable_name` variable
from `checkpoint_scope_name/some_other_variable`.
* `'scope_variable_name': variable` - will initialize given `tf.Variable`
object with tensor 'scope_variable_name' from the checkpoint.
* `'scope_variable_name': list(variable)` - will initialize list of
partitioned variables with tensor 'scope_variable_name' from the checkpoint.
* `'/': 'scope_name/'` - will load all variables in current `scope_name` from
checkpoint's root (e.g. no scope).
Supports loading into partitioned variables, which are represented as
`'<variable>/part_<part #>'`.
Example:
```python
# Say, '/tmp/model.ckpt' has the following tensors:
# -- name='old_scope_1/var1', shape=[20, 2]
# -- name='old_scope_1/var2', shape=[50, 4]
# -- name='old_scope_2/var3', shape=[100, 100]
# Create new model's variables
with tf.compat.v1.variable_scope('new_scope_1'):
var1 = tf.compat.v1.get_variable('var1', shape=[20, 2],
initializer=tf.compat.v1.zeros_initializer())
with tf.compat.v1.variable_scope('new_scope_2'):
var2 = tf.compat.v1.get_variable('var2', shape=[50, 4],
initializer=tf.compat.v1.zeros_initializer())
# Partition into 5 variables along the first axis.
var3 = tf.compat.v1.get_variable(name='var3', shape=[100, 100],
initializer=tf.compat.v1.zeros_initializer(),
partitioner=lambda shape, dtype: [5, 1])
# Initialize all variables in `new_scope_1` from `old_scope_1`.
init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/': 'new_scope_1'})
# Use names to specify which variables to initialize from checkpoint.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': 'new_scope_1/var1',
'old_scope_1/var2': 'new_scope_2/var2'})
# Or use tf.Variable objects to identify what to initialize.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': var1,
'old_scope_1/var2': var2})
# Initialize partitioned variables using variable's name
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': 'new_scope_2/var3'})
# Or specify the list of tf.Variable objects.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': var3._get_variable_list()})
```
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of the variables in the
checkpoint and values are current variables or names of current variables
(in default graph).
Raises:
ValueError: If missing variables in current graph, or if missing
checkpoints or tensors in checkpoints.
"""
init_from_checkpoint_fn = lambda _: _init_from_checkpoint(
ckpt_dir_or_file, assignment_map)
if distribution_strategy_context.get_cross_replica_context():
init_from_checkpoint_fn(None)
else:
distribution_strategy_context.get_replica_context().merge_call(
init_from_checkpoint_fn)
def _init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""See `init_from_checkpoint` for documentation."""
ckpt_file = _get_checkpoint_filename(ckpt_dir_or_file)
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
for tensor_name_in_ckpt, current_var_or_name in sorted(
six.iteritems(assignment_map)):
var = None
# Check if this is Variable object or list of Variable objects (in case of
# partitioned variables).
if _is_variable(current_var_or_name) or (
isinstance(current_var_or_name, list)
and all(_is_variable(v) for v in current_var_or_name)):
var = current_var_or_name
else:
store_vars = vs._get_default_variable_store()._vars # pylint:disable=protected-access
# Check if this variable is in var_store.
var = store_vars.get(current_var_or_name, None)
# Also check if variable is partitioned as list.
if var is None:
var = _collect_partitioned_variable(current_var_or_name, store_vars)
if var is not None:
# If 1 to 1 mapping was provided, find variable in the checkpoint.
if tensor_name_in_ckpt not in variable_map:
raise ValueError("Tensor %s is not found in %s checkpoint %s" % (
tensor_name_in_ckpt, ckpt_dir_or_file, variable_map
))
if _is_variable(var):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(
variable_map[tensor_name_in_ckpt]):
raise ValueError(
"Shape of variable %s (%s) doesn't match with shape of "
"tensor %s (%s) from checkpoint reader." % (
var.name, str(var.get_shape()),
tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt])
))
var_name = var.name
else:
var_name = ",".join(v.name for v in var)
_set_variable_or_list_initializer(var, ckpt_file, tensor_name_in_ckpt)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, tensor_name_in_ckpt)
else:
scopes = ""
# TODO(vihanjain): Support list of 'current_var_or_name' here.
if "/" in current_var_or_name:
scopes = current_var_or_name[:current_var_or_name.rindex("/")]
if not tensor_name_in_ckpt.endswith("/"):
raise ValueError(
"Assignment map with scope only name {} should map to scope only "
"{}. Should be 'scope/': 'other_scope/'.".format(
scopes, tensor_name_in_ckpt))
# If scope to scope mapping was provided, find all variables in the scope
# and create variable to variable mapping.
scope_variables = set()
for var_name in store_vars:
if not scopes or var_name.startswith(scopes + "/"):
# Consume /part_ if partitioned variable.
if "/part_" in var_name:
var_name = var_name[:var_name.index("/part_")]
scope_variables.add(var_name)
for var_name in sorted(scope_variables):
# Lookup name with specified prefix and suffix from current variable.
# If tensor_name given is '/' (root), don't use it for full name.
full_tensor_name = var_name[len(scopes):]
if current_var_or_name != "/":
full_tensor_name = full_tensor_name[1:]
if tensor_name_in_ckpt != "/":
full_tensor_name = tensor_name_in_ckpt + full_tensor_name
# Remove trailing '/', if any, in the full_tensor_name
if full_tensor_name.endswith("/"):
full_tensor_name = full_tensor_name[:-1]
if full_tensor_name not in variable_map:
raise ValueError(
"Tensor %s (%s in %s) is not found in %s checkpoint" % (
full_tensor_name, var_name[len(scopes) + 1:],
tensor_name_in_ckpt, ckpt_dir_or_file
))
var = store_vars.get(var_name, None)
if var is None:
var = _collect_partitioned_variable(var_name, store_vars)
_set_variable_or_list_initializer(var, ckpt_file, full_tensor_name)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, full_tensor_name)
def _get_checkpoint_filename(ckpt_dir_or_file):
"""Returns checkpoint filename given directory or specific checkpoint file."""
if gfile.IsDirectory(ckpt_dir_or_file):
return checkpoint_management.latest_checkpoint(ckpt_dir_or_file)
return ckpt_dir_or_file
def _set_checkpoint_initializer(variable,
ckpt_file,
tensor_name,
slice_spec,
name="checkpoint_initializer"):
"""Overrides given variable's initialization op.
Sets variable initializer to assign op that initializes variable from tensor's
value in the checkpoint.
Args:
variable: `tf.Variable` object.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
slice_spec: Slice specification for loading partitioned tensors.
name: Name of the operation.
"""
base_type = variable.dtype.base_dtype
# Do not colocate with variable since RestoreV2 op only runs on CPU and
# colocation will force variable (and other ops that colocate with variable)
# to be on CPU as well. It is okay to place the variable's initializer op on
# CPU since it will only be run once at the start.
with ops.device(variable.device), ops.device("/cpu:0"):
restore_op = io_ops.restore_v2(
ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
names_to_saveables = saveable_object_util.op_list_to_dict([variable])
saveable_objects = []
for name, op in names_to_saveables.items():
for s in saveable_object_util.saveable_objects_for_op(op, name):
saveable_objects.append(s)
assert len(saveable_objects) == 1 # Should be only one variable.
init_op = saveable_objects[0].restore([restore_op], restored_shapes=None)
# pylint:disable=protected-access
variable._initializer_op = init_op
restore_op.set_shape(variable.shape)
variable._initial_value = restore_op
# pylint:enable=protected-access
def _set_variable_or_list_initializer(variable_or_list, ckpt_file,
tensor_name):
"""Overrides initialization op of given variable or list of variables.
Calls `_set_checkpoint_initializer` for each variable in the given list of
variables.
Args:
variable_or_list: `tf.Variable` object or a list of `tf.Variable` objects.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
Raises:
ValueError: if all objects in `variable_or_list` are not partitions of the
same large variable.
"""
if isinstance(variable_or_list, (list, tuple)):
# A set of slices.
slice_name = None
for v in variable_or_list:
slice_info = v._save_slice_info # pylint:disable=protected-access
if slice_name is None:
slice_name = slice_info.full_name
elif slice_name != slice_info.full_name:
raise ValueError("Slices must all be from the same tensor: %s != %s" %
(slice_name, slice_info.full_name))
_set_checkpoint_initializer(v, ckpt_file, tensor_name, slice_info.spec)
else:
_set_checkpoint_initializer(variable_or_list, ckpt_file, tensor_name, "")
def _is_variable(x):
return (isinstance(x, variables.Variable) or
resource_variable_ops.is_resource_variable(x))
def _collect_partitioned_variable(name, all_vars):
"""Returns list of `tf.Variable` that comprise the partitioned variable."""
if name + "/part_0" in all_vars:
var = []
i = 0
while name + "/part_%d" % i in all_vars:
var.append(all_vars[name + "/part_%d" % i])
i += 1
return var
return None
| xzturn/tensorflow | tensorflow/python/training/checkpoint_utils.py | Python | apache-2.0 | 19,433 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys, re, csv
def copy_to_imagedrop_upload(src_filepath, destdir=None):
import pycurl, os, shutil, re
regex_colorstyle = re.compile(r'^.*?/[0-9]{9}[_altm0-6]{,6}?\.[jpngJPNG]{3}$')
if not regex_colorstyle.findall(src_filepath):
print src_filepath.split('/')[-1], ' Is Not a valid Bluefly Colorstyle File or Alt Out of Range'
return
else:
if not destdir:
destdir = '/mnt/Post_Complete/ImageDrop'
imagedrop = os.path.abspath(destdir)
localFileName = src_filepath.split('/')[-1]
imagedropFilePath = os.path.join(imagedrop, localFileName.lower())
try:
if os.path.isfile(imagedropFilePath):
try:
os.remove(imagedropFilePath)
#os.rename(src_filepath, imagedropFilePath)
shutil.copyfile(src_filepath, imagedropFilePath)
return True
except:
print 'Error ', imagedropFilePath
return False
#shutil.copyfile(src_filepath, imagedropFilePath
else:
##os.rename(src_filepath, imagedropFilePath)
shutil.copyfile(src_filepath, imagedropFilePath)
return True
except:
return False
def rename_retouched_file(img):
import os,re
regex_coded = re.compile(r'.+?/[1-9][0-9]{8}_[1-6]\.[jJpPnNgG]{3}')
imgfilepath = img
if re.findall(regex_coded,imgfilepath):
filedir = imgfilepath.split('/')[:-1]
filedir = '/'.join(filedir)
print filedir
filename = imgfilepath.split('/')[-1]
colorstyle = str(filename[:9])
testimg = filename.split('_')[-1]
alttest = testimg.split('.')[0]
ext = filename.split('.')[-1]
ext = ".{}".format(ext.lower())
if str.isdigit(alttest) & len(alttest) == 1:
if alttest == '1':
src_img_primary = img.replace('_1.','.')
os.rename(img, src_img_primary)
return src_img_primary
else:
alttest = int(alttest)
print alttest
alttest = alttest - 1
alt = '_alt0{}'.format(str(alttest))
print alt
if alt:
filename = "{}{}{}".format(colorstyle,alt,ext)
renamed = os.path.join(filedir, filename)
print renamed
if renamed:
os.rename(img, renamed)
if os.path.isfile(renamed):
return renamed
else:
return img
def get_aspect_ratio(img):
from PIL import Image
try:
im = Image.open(img)
w,h = im.size
aspect_ratio = str(round(float(int(h))/float(int(w)),2))
return aspect_ratio
except IOError:
pass
def get_dimensions(img):
from PIL import Image
try:
im = Image.open(img)
w,h = im.size
dimensions = "{0}x{1}".format(int(w),int(h))
return dimensions
except IOError:
pass
def get_exif_metadata_value(img, exiftag=None):
import pyexiv2
image_metadata = pyexiv2.ImageMetadata(img)
metadata = image_metadata.read()
if exiftag:
exifvalue = metadata[exiftag]
return (exiftag, exifvalue)
else:
metadict = {}
for mtag, mvalue in metadata.iteritems():
metadict[mtag] = mvalue
return metadict
def get_image_color_minmax(img):
import subprocess, os, sys, re
try:
ret = subprocess.check_output(['convert', img, '-median', '3', '+dither', '-colors', '2', '-trim', '+repage', '-gravity', 'center', '-crop', "50%", '-depth', '8', '-format', '%c',"histogram:info:-"])
except:
return ''
colorlow = str(ret).split('\n')[0].strip(' ')
colorlow = re.sub(re.compile(r',\W'),',',colorlow).replace(':','',1).replace('(','').replace(')','').replace(' ',' ').split(' ')
colorhigh = str(ret).split('\n')[1].strip(' ')
colorhigh = re.sub(re.compile(r',\W'),',',colorhigh).replace(':','',1).replace('(','').replace(')','').replace(' ',' ').split(' ')
fields_top = ['low_rgb_avg', 'high_rgb_avg']
fields_level2 = ['total_pixels', 'rgb_vals', 'webcolor_id', 'color_profile_vals']
colorlow = zip(fields_level2,colorlow)
colorhigh = zip(fields_level2,colorhigh)
if len(colorhigh) == len(colorlow):
coloravgs = dict(colorlow),dict(colorhigh)
colordata = zip(fields_top, coloravgs)
colordata = dict(colordata)
colordata['comp_level'] = 'InRange'
return colordata
elif len(colorhigh) < len(colorlow):
coloravgs = dict(colorlow)
colordata = {}
colordata[fields_top[0]] = coloravgs
colordata[fields_top[1]] = {'total_pixels': 0}
colordata['comp_level'] = 'Bright'
return colordata
elif len(colorhigh) > len(colorlow):
coloravgs = dict(colorhigh)
colordata = {}
colordata[fields_top[1]] = coloravgs
colordata[fields_top[0]] == {'total_pixels': 0}
colordata['comp_level'] = 'Dark'
return colordata
def evaluate_color_values(colordata):
high_range_pixels = ''
low_range_pixels = ''
high_range_pixels = float((colordata['high_rgb_avg']['total_pixels']))
low_range_pixels = float((colordata['low_rgb_avg']['total_pixels']))
try:
if low_range_pixels >= high_range_pixels and high_range_pixels != 0:
r,g,b = colordata['high_rgb_avg']['rgb_vals'].split(',')
r,g,b = float(r),float(g),float(b)
high_avg = float(round((r+b+g)/3,2))
r,g,b = colordata['low_rgb_avg']['rgb_vals'].split(',')
r,g,b = float(r),float(g),float(b)
low_avg = float(round((r+b+g)/3,2))
ratio = round(float(float(low_range_pixels)/float(high_range_pixels)),2)
print high_avg/(low_avg*ratio)
return high_avg,low_avg,ratio, 'LOW'
elif low_range_pixels < high_range_pixels and low_range_pixels != 0:
r,g,b = colordata['high_rgb_avg']['rgb_vals'].split(',')
r,g,b = float(r),float(g),float(b)
high_avg = float(round((r+b+g)/3,2))
r,g,b = colordata['low_rgb_avg']['rgb_vals'].split(',')
r,g,b = float(r),float(g),float(b)
low_avg = float(round((r+b+g)/3,2))
ratio = round(float(float(low_range_pixels)/float(high_range_pixels)),2)
print low_avg/(high_avg*ratio)
return high_avg,low_avg,ratio, 'HIGH'
except TypeError:
print "Type Error"
pass
except ValueError:
print "Value Error", colordata
pass
def sort_files_by_values(directory):
import os,glob
filevalue_dict = {}
fileslist = directory
count = len(fileslist)
for f in fileslist:
values = {}
colordata = get_image_color_minmax(f)
try:
high,low,ratio, ratio_range = evaluate_color_values(colordata)
values['ratio'] = ratio
values['ratio_range'] = ratio_range
if ratio_range == 'LOW':
values['low'] = low ##
values['high'] = high
if ratio_range == 'HIGH':
values['high'] = high ##
values['low'] = low
filevalue_dict[f] = values
count -= 1
print "{0} Files Remaining".format(count)
except TypeError:
filevalue_dict[f] = {'ratio_range': 'OutOfRange'}
count -= 1
print "{0} Files Remaining-TypeError".format(count)
pass
except ZeroDivisionError:
filevalue_dict[f] = {'ratio_range': 'OutOfRange'}
count -= 1
print "{0} Files Remaining-ZeroDivision".format(count)
pass
return filevalue_dict
def subproc_magick_large_jpg(img, destdir=None):
import subprocess,os,re
regex_coded = re.compile(r'^.+?/[1-9][0-9]{8}_[1-6]\.jpg$')
regex_alt = re.compile(r'^.+?/[1-9][0-9]{8}_\w+?0[1-6]\.[JjPpNnGg]{3}$')
regex_valid_style = re.compile(r'^.+?/[1-9][0-9]{8}_?.*?\.[JjPpNnGg]{3}$')
os.chdir(os.path.dirname(img))
if not destdir:
destdir = os.path.abspath('.')
else:
destdir = os.path.abspath(destdir)
if not regex_alt.findall(img):
outfile = os.path.join(destdir, img.split('/')[-1][:9] + '_l.jpg')
dimensions = ''
aspect_ratio = get_aspect_ratio(img)
dimensions = get_dimensions(img)
width = dimensions.split('x')[0]
height = dimensions.split('x')[1]
if aspect_ratio == '1.2':
vert_horiz = '400x480'
elif float(aspect_ratio) > float(1.2):
vert_horiz = 'x480'
elif float(aspect_ratio) < float(1.2):
vert_horiz = '400x'
dimensions = "400x480"
print dimensions,vert_horiz
if regex_valid_style.findall(img):
subprocess.call([
'convert',
'-colorspace',
'sRGB',
img,
'-background',
'white',
"-filter",
"Spline",
"-filter",
"Cosine",
"-define",
"filter:blur=0.9891028367558475",
"-distort",
"Resize",
vert_horiz,
'-extent',
dimensions,
"-colorspace",
"sRGB",
"-format",
"jpeg",
'-unsharp',
'2x1.24+0.5+0',
'-quality',
'95',
outfile
])
return outfile
else:
return img
else:
pass
def subproc_magick_medium_jpg(img, destdir=None):
import subprocess,os,re
regex_coded = re.compile(r'^.+?/[1-9][0-9]{8}_[1-6]\.jpg$')
regex_alt = re.compile(r'^.+?/[1-9][0-9]{8}_\w+?0[1-6]\.[JjPpNnGg]{3}$')
regex_valid_style = re.compile(r'^.+?/[1-9][0-9]{8}_?.*?\.[JjPpNnGg]{3}$')
os.chdir(os.path.dirname(img))
if not destdir:
destdir = os.path.abspath('.')
else:
destdir = os.path.abspath(destdir)
if regex_alt.findall(img):
outfile = os.path.join(destdir, img.split('/')[-1].split('.')[0] + '.jpg')
else:
outfile = os.path.join(destdir, img.split('/')[-1][:9] + '_m.jpg')
dimensions = ''
aspect_ratio = get_aspect_ratio(img)
dimensions = get_dimensions(img)
width = dimensions.split('x')[0]
height = dimensions.split('x')[1]
if aspect_ratio == '1.2':
vert_horiz = '200x240'
elif float(aspect_ratio) > float(1.2):
vert_horiz = 'x240'
elif float(aspect_ratio) < float(1.2):
vert_horiz = '200x'
dimensions = '200x240'
print dimensions,vert_horiz
if regex_valid_style.findall(img):
subprocess.call([
'convert',
'-colorspace',
'sRGB',
img,
'-background',
'white',
"-filter",
"Spline",
"-filter",
"Cosine",
"-define",
"fliter:blur=0.9891028367558475",
"-distort",
"Resize",
vert_horiz,
'-extent',
dimensions,
"-colorspace",
"sRGB",
"-format",
"jpeg",
'-unsharp',
'2x1.1+0.5+0',
'-quality',
'95',
outfile
])
return outfile
else:
return img
def subproc_magick_png(img, rgbmean=None, destdir=None):
import subprocess,re,os
regex_coded = re.compile(r'^.+?/[1-9][0-9]{8}_[1-6]\.jpg$')
regex_alt = re.compile(r'^.+?/[1-9][0-9]{8}_\w+?0[1-6]\.[JjPpNnGg]{3}$')
regex_valid_style = re.compile(r'^.+?/[1-9][0-9]{8}_?.*?\.[JjPpNnGg]{3}$')
modulator = ''
modulate = ''
if not destdir:
destdir = '.'
#imgdestpng_out = os.path.join(tmp_processing, os.path.basename(imgsrc_jpg))
os.chdir(os.path.dirname(img))
if not rgbmean:
ratio_range = 'OutOfRange'
else:
try:
ratio_range = rgbmean['ratio_range']
except:
ratio_range = 'OutOfRange'
pass
if ratio_range != 'OutOfRange':
high = rgbmean['high']
low = rgbmean['low']
ratio = rgbmean['ratio']
#rgbmean = float(128)
#rgbmean = get_image_color_minmax(img)
if ratio_range == 'LOW':
if float(round(high,2)) > float(240):
modulator = '-modulate'
modulate = '104,100'
elif float(round(high,2)) > float(200):
modulator = '-modulate'
modulate = '107,110'
elif float(round(high,2)) > float(150):
modulator = '-modulate'
modulate = '110,110'
else:
modulator = '-modulate'
modulate = '112,110'
elif ratio_range == 'HIGH':
if float(round(high,2)) > float(230):
modulator = '-modulate'
modulate = '100,100'
elif float(round(high,2)) > float(200):
modulator = '-modulate'
modulate = '103,100'
elif float(round(high,2)) > float(150):
modulator = '-modulate'
modulate = '105,105'
else:
modulator = '-modulate'
modulate = '108,107'
elif ratio_range == 'OutOfRange':
modulator = '-modulate'
modulate = '100,100'
format = img.split('.')[-1]
os.chdir(os.path.dirname(img))
## Destination name
if not destdir:
destdir = os.path.abspath('.')
else:
destdir = os.path.abspath(destdir)
outfile = os.path.join(destdir, img.split('/')[-1].split('.')[0] + '.png')
dimensions = ''
## Get variable values for processing
aspect_ratio = get_aspect_ratio(img)
dimensions = get_dimensions(img)
width = dimensions.split('x')[0]
height = dimensions.split('x')[1]
if aspect_ratio == '1.2':
vert_horiz = '{0}x{1}'.format(width,height)
dimensions = '{0}x{1}'.format(int(width),int(height))
elif float(aspect_ratio) > float(int(1.2)):
vert_horiz = 'x{0}'.format(height)
w = float(0.8) * float(height)
#w = float(round(w,2)*float(aspect_ratio))
dimensions = '{0}x{1}'.format(int(w),int(height))
print "W",w, aspect_ratio
elif float(aspect_ratio) < float(1.2):
vert_horiz = '{0}x'.format(width)
h = float(1.2) * float(width)
#h = float(round(h,2)*float(aspect_ratio))
dimensions = '{0}x{1}'.format(int(width),int(h))
print "H",h, aspect_ratio
if not dimensions:
dimensions = '100%'
vert_horiz = '100%'
subprocess.call([
'convert',
'-format',
format,
img,
'-define',
'png:preserve-colormap',
'-define',
'png:format\=png24',
'-define',
'png:compression-level\=N',
'-define',
'png:compression-strategy\=N',
'-define',
'png:compression-filter\=N',
'-format',
'png',
'-modulate',
modulate,
"-define",
"filter:blur=0.625",
#"filter:blur=0.88549061701764",
"-distort",
"Resize",
vert_horiz,
'-background',
'white',
'-gravity',
'center',
'-extent',
dimensions,
"-colorspace",
"sRGB",
'-unsharp',
'2x2.7+0.5+0',
'-quality',
'95',
os.path.join(destdir, img.split('/')[-1].split('.')[0] + '.png')
])
print 'Done {}'.format(img)
return os.path.join(destdir, img.split('/')[-1].split('.')[0] + '.png')
def upload_imagedrop(root_dir):
import os, sys, re, csv, shutil, glob
archive_uploaded = os.path.join(root_dir, 'uploaded')
tmp_failed = os.path.join(root_dir, 'failed_upload')
try:
os.makedirs(archive_uploaded, 16877)
except OSError:
try:
shutil.rmtree(archive_uploaded, ignore_errors = True)
os.makedirs(archive_uploaded, 16877)
except:
pass
try:
os.makedirs(tmp_failed, 16877)
except:
pass
import time
upload_tmp_loading = glob.glob(os.path.join(root_dir, '*.*g'))
for upload_file in upload_tmp_loading:
try:
code = copy_to_imagedrop_upload(upload_file)
if code == True or code == '200':
try:
shutil.move(upload_file, archive_uploaded)
time.sleep(float(.1))
print "1stTryOK", upload_file
except:
dst_file = upload_file.replace(root_dir, archive_uploaded)
try:
if os.path.exists(dst_file):
os.remove(dst_file)
shutil.move(upload_file, archive_uploaded)
except:
pass
else:
print "Uploaded {}".format(upload_file)
time.sleep(float(.1))
try:
shutil.move(upload_file, archive_uploaded)
except shutil.Error:
pass
except OSError:
print "Error moving Finals to Arch {}".format(file)
shutil.move(upload_file, tmp_failed)
pass
try:
if os.path.isdir(sys.argv[2]):
finaldir = os.path.abspath(sys.argv[2])
for f in glob.glob(os.path.join(archive_uploaded, '*.*g')):
shutil.move(f, finaldir)
except:
print 'Failed to Archive {}'.format(upload_tmp_loading)
pass
def main(root_img_dir=None, destdir=None):
import sys,glob,shutil,os,re
from image_processing.magick_tweaks import convert_img_srgb
regex_coded = re.compile(r'^.+?/[1-9][0-9]{8}_[1-6]\.[JjPpNnGg]{3}$')
regex_alt = re.compile(r'^.+?/[1-9][0-9]{8}_\w+?0[1-6]\.[JjPpNnGg]{3}$')
regex_valid_style = re.compile(r'^.+?/[1-9][0-9]{8}_?.*?\.[JjPpNnGg]{3}$')
if not root_img_dir:
try:
root_img_dir = sys.argv[1]
except IndexError:
root_img_dir = os.path.abspath('/mnt/Post_Complete/Complete_Archive/MARKETPLACE')
pass
else:
pass
if not destdir:
try:
destdir = os.path.abspath(sys.argv[2])
if destdir.isdigit():
destdir = os.path.join(root_img_dir, 'output')
except IndexError:
destdir = os.path.join(root_img_dir, 'output')
try:
os.makedirs(destdir, 16877)
except OSError:
pass
if not os.path.isdir(destdir):
os.makedirs(destdir, 16877)
if os.path.isdir(root_img_dir):
imgs_renamed = [rename_retouched_file(f) for f in (glob.glob(os.path.join(root_img_dir,'*.??[gG]')))]
img_dict = sort_files_by_values(glob.glob(os.path.join(root_img_dir,'*.??[gG]')))
for k,v in img_dict.items():
try:
img = k
## Convert profile of source img if CMYK ignores if RGB
convert_img_srgb.main(image_file=img)
## Get color pixel values from source img
rgbmean = v.items()
## Generate png from source then jpgs from png
pngout = subproc_magick_png(img, rgbmean=dict(rgbmean), destdir=destdir)
subproc_magick_large_jpg(pngout, destdir=destdir)
subproc_magick_medium_jpg(pngout, destdir=destdir)
except AttributeError:
print 'SOMETHING IS WRONG WITH THE IMAGE Error {}'.format(img)
pass
else:
img = root_img_dir
if regex_coded.findall(img):
img = rename_retouched_file(img)
pngout = subproc_magick_png(img, destdir=destdir)
subproc_magick_large_jpg(pngout, destdir=destdir)
subproc_magick_medium_jpg(pngout, destdir=destdir)
upload_imagedrop(destdir)
failed_dir = os.path.join(destdir,'failed_upload','*.??[gG]')
# while True:
# if glob.glob(failed_dir):
# destdir = failed_dir
# failed_dir = os.path.join(destdir,'failed_upload','*.??[gG]')
# upload_imagedrop(destdir)
#print 'NOT UPLOADING YET'
if __name__ == '__main__':
main()
| relic7/prodimages | python/jbmodules/image_processing/magick_processes/magicColorspaceModAspctLoad_ArgSafe.py | Python | mit | 20,749 |
from typing import Iterable, List
from gi.repository import Gtk
from gaphor.core.eventmanager import EventManager
from gaphor.transaction import Transaction
class TxData:
def __init__(self, event_manager):
self.event_manager = event_manager
self.txs: List[Transaction] = []
def begin(self):
self.txs.append(Transaction(self.event_manager))
def commit(self):
assert self.txs
tx = self.txs.pop()
tx.commit()
def transactional_tool(
*tools: Gtk.Gesture, event_manager: EventManager = None
) -> Iterable[Gtk.Gesture]:
tx_data = TxData(event_manager)
for tool in tools:
tool.connect("begin", on_begin, tx_data)
tool.connect_after("end", on_end, tx_data)
return tools
def on_begin(gesture, _sequence, tx_data):
tx_data.begin()
def on_end(gesture, _sequence, tx_data):
tx_data.commit()
| amolenaar/gaphor | gaphor/diagram/diagramtools/txtool.py | Python | lgpl-2.1 | 891 |
"""Shared OS X support functions."""
import os
import re
import sys
__all__ = [
'compiler_fixup',
'customize_config_vars',
'customize_compiler',
'get_platform_osx',
]
# configuration variables that may contain universal build flags,
# like "-arch" or "-isdkroot", that may need customization for
# the user environment
_UNIVERSAL_CONFIG_VARS = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS', 'BASECFLAGS',
'BLDSHARED', 'LDSHARED', 'CC', 'CXX',
'PY_CFLAGS', 'PY_LDFLAGS', 'PY_CPPFLAGS',
'PY_CORE_CFLAGS', 'PY_CORE_LDFLAGS')
# configuration variables that may contain compiler calls
_COMPILER_CONFIG_VARS = ('BLDSHARED', 'LDSHARED', 'CC', 'CXX')
# prefix added to original configuration variable names
_INITPRE = '_OSX_SUPPORT_INITIAL_'
def _find_executable(executable, path=None):
"""Tries to find 'executable' in the directories listed in 'path'.
A string listing directories separated by 'os.pathsep'; defaults to
os.environ['PATH']. Returns the complete filename or None if not found.
"""
if path is None:
path = os.environ['PATH']
paths = path.split(os.pathsep)
base, ext = os.path.splitext(executable)
if (sys.platform == 'win32') and (ext != '.exe'):
executable = executable + '.exe'
if not os.path.isfile(executable):
for p in paths:
f = os.path.join(p, executable)
if os.path.isfile(f):
# the file exists, we have a shot at spawn working
return f
return None
else:
return executable
def _read_output(commandstring):
"""Output from successful command execution or None"""
# Similar to os.popen(commandstring, "r").read(),
# but without actually using os.popen because that
# function is not usable during python bootstrap.
# tempfile is also not available then.
import contextlib
try:
import tempfile
fp = tempfile.NamedTemporaryFile()
except ImportError:
fp = open("/tmp/_osx_support.%s"%(
os.getpid(),), "w+b")
with contextlib.closing(fp) as fp:
cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name)
return fp.read().decode('utf-8').strip() if not os.system(cmd) else None
def _find_build_tool(toolname):
"""Find a build tool on current path or using xcrun"""
return (_find_executable(toolname)
or _read_output("/usr/bin/xcrun -find %s" % (toolname,))
or ''
)
_SYSTEM_VERSION = None
def _get_system_version():
"""Return the OS X system version as a string"""
# Reading this plist is a documented way to get the system
# version (see the documentation for the Gestalt Manager)
# We avoid using platform.mac_ver to avoid possible bootstrap issues during
# the build of Python itself (distutils is used to build standard library
# extensions).
global _SYSTEM_VERSION
if _SYSTEM_VERSION is None:
_SYSTEM_VERSION = ''
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except OSError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
try:
m = re.search(r'<key>ProductUserVisibleVersion</key>\s*'
r'<string>(.*?)</string>', f.read())
finally:
f.close()
if m is not None:
_SYSTEM_VERSION = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
return _SYSTEM_VERSION
def _remove_original_values(_config_vars):
"""Remove original unmodified values for testing"""
# This is needed for higher-level cross-platform tests of get_platform.
for k in list(_config_vars):
if k.startswith(_INITPRE):
del _config_vars[k]
def _save_modified_value(_config_vars, cv, newvalue):
"""Save modified and original unmodified value of configuration var"""
oldvalue = _config_vars.get(cv, '')
if (oldvalue != newvalue) and (_INITPRE + cv not in _config_vars):
_config_vars[_INITPRE + cv] = oldvalue
_config_vars[cv] = newvalue
def _supports_universal_builds():
"""Returns True if universal builds are supported on this system"""
# As an approximation, we assume that if we are running on 10.4 or above,
# then we are running with an Xcode environment that supports universal
# builds, in particular -isysroot and -arch arguments to the compiler. This
# is in support of allowing 10.4 universal builds to run on 10.3.x systems.
osx_version = _get_system_version()
if osx_version:
try:
osx_version = tuple(int(i) for i in osx_version.split('.'))
except ValueError:
osx_version = ''
return bool(osx_version >= (10, 4)) if osx_version else False
def _find_appropriate_compiler(_config_vars):
"""Find appropriate C compiler for extension module builds"""
# Issue #13590:
# The OSX location for the compiler varies between OSX
# (or rather Xcode) releases. With older releases (up-to 10.5)
# the compiler is in /usr/bin, with newer releases the compiler
# can only be found inside Xcode.app if the "Command Line Tools"
# are not installed.
#
# Furthermore, the compiler that can be used varies between
# Xcode releases. Up to Xcode 4 it was possible to use 'gcc-4.2'
# as the compiler, after that 'clang' should be used because
# gcc-4.2 is either not present, or a copy of 'llvm-gcc' that
# miscompiles Python.
# skip checks if the compiler was overridden with a CC env variable
if 'CC' in os.environ:
return _config_vars
# The CC config var might contain additional arguments.
# Ignore them while searching.
cc = oldcc = _config_vars['CC'].split()[0]
if not _find_executable(cc):
# Compiler is not found on the shell search PATH.
# Now search for clang, first on PATH (if the Command LIne
# Tools have been installed in / or if the user has provided
# another location via CC). If not found, try using xcrun
# to find an uninstalled clang (within a selected Xcode).
# NOTE: Cannot use subprocess here because of bootstrap
# issues when building Python itself (and os.popen is
# implemented on top of subprocess and is therefore not
# usable as well)
cc = _find_build_tool('clang')
elif os.path.basename(cc).startswith('gcc'):
# Compiler is GCC, check if it is LLVM-GCC
data = _read_output("'%s' --version"
% (cc.replace("'", "'\"'\"'"),))
if data and 'llvm-gcc' in data:
# Found LLVM-GCC, fall back to clang
cc = _find_build_tool('clang')
if not cc:
raise SystemError(
"Cannot locate working compiler")
if cc != oldcc:
# Found a replacement compiler.
# Modify config vars using new compiler, if not already explicitly
# overridden by an env variable, preserving additional arguments.
for cv in _COMPILER_CONFIG_VARS:
if cv in _config_vars and cv not in os.environ:
cv_split = _config_vars[cv].split()
cv_split[0] = cc if cv != 'CXX' else cc + '++'
_save_modified_value(_config_vars, cv, ' '.join(cv_split))
return _config_vars
def _remove_universal_flags(_config_vars):
"""Remove all universal build arguments from config vars"""
for cv in _UNIVERSAL_CONFIG_VARS:
# Do not alter a config var explicitly overridden by env var
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
flags = re.sub(r'-arch\s+\w+\s', ' ', flags, flags=re.ASCII)
flags = re.sub(r'-isysroot\s*\S+', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def _remove_unsupported_archs(_config_vars):
"""Remove any unsupported archs from config vars"""
# Different Xcode releases support different sets for '-arch'
# flags. In particular, Xcode 4.x no longer supports the
# PPC architectures.
#
# This code automatically removes '-arch ppc' and '-arch ppc64'
# when these are not supported. That makes it possible to
# build extensions on OSX 10.7 and later with the prebuilt
# 32-bit installer on the python.org website.
# skip checks if the compiler was overridden with a CC env variable
if 'CC' in os.environ:
return _config_vars
if re.search(r'-arch\s+ppc', _config_vars['CFLAGS']) is not None:
# NOTE: Cannot use subprocess here because of bootstrap
# issues when building Python itself
status = os.system(
"""echo 'int main{};' | """
"""'%s' -c -arch ppc -x c -o /dev/null /dev/null 2>/dev/null"""
%(_config_vars['CC'].replace("'", "'\"'\"'"),))
if status:
# The compile failed for some reason. Because of differences
# across Xcode and compiler versions, there is no reliable way
# to be sure why it failed. Assume here it was due to lack of
# PPC support and remove the related '-arch' flags from each
# config variables not explicitly overridden by an environment
# variable. If the error was for some other reason, we hope the
# failure will show up again when trying to compile an extension
# module.
for cv in _UNIVERSAL_CONFIG_VARS:
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
flags = re.sub(r'-arch\s+ppc\w*\s', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def _override_all_archs(_config_vars):
"""Allow override of all archs with ARCHFLAGS env var"""
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for cv in _UNIVERSAL_CONFIG_VARS:
if cv in _config_vars and '-arch' in _config_vars[cv]:
flags = _config_vars[cv]
flags = re.sub(r'-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def _check_for_unavailable_sdk(_config_vars):
"""Remove references to any SDKs not available"""
# If we're on OSX 10.5 or later and the user tries to
# compile an extension using an SDK that is not present
# on the current machine it is better to not use an SDK
# than to fail. This is particularly important with
# the standalone Command Line Tools alternative to a
# full-blown Xcode install since the CLT packages do not
# provide SDKs. If the SDK is not present, it is assumed
# that the header files and dev libs have been installed
# to /usr and /System/Library by either a standalone CLT
# package or the CLT component within Xcode.
cflags = _config_vars.get('CFLAGS', '')
m = re.search(r'-isysroot\s*(\S+)', cflags)
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
for cv in _UNIVERSAL_CONFIG_VARS:
# Do not alter a config var explicitly overridden by env var
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
flags = re.sub(r'-isysroot\s*\S+(?:\s|$)', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def compiler_fixup(compiler_so, cc_args):
"""
This function will strip '-isysroot PATH' and '-arch ARCH' from the
compile flags if the user has specified one them in extra_compile_flags.
This is needed because '-arch ARCH' adds another architecture to the
build, without a way to remove an architecture. Furthermore GCC will
barf if multiple '-isysroot' arguments are present.
"""
stripArch = stripSysroot = False
compiler_so = list(compiler_so)
if not _supports_universal_builds():
# OSX before 10.4.0, these don't support -arch and -isysroot at
# all.
stripArch = stripSysroot = True
else:
stripArch = '-arch' in cc_args
stripSysroot = any(arg for arg in cc_args if arg.startswith('-isysroot'))
if stripArch or 'ARCHFLAGS' in os.environ:
while True:
try:
index = compiler_so.index('-arch')
# Strip this argument and the next one:
del compiler_so[index:index+2]
except ValueError:
break
if 'ARCHFLAGS' in os.environ and not stripArch:
# User specified different -arch flags in the environ,
# see also distutils.sysconfig
compiler_so = compiler_so + os.environ['ARCHFLAGS'].split()
if stripSysroot:
while True:
indices = [i for i,x in enumerate(compiler_so) if x.startswith('-isysroot')]
if not indices:
break
index = indices[0]
if compiler_so[index] == '-isysroot':
# Strip this argument and the next one:
del compiler_so[index:index+2]
else:
# It's '-isysroot/some/path' in one arg
del compiler_so[index:index+1]
# Check if the SDK that is used during compilation actually exists,
# the universal build requires the usage of a universal SDK and not all
# users have that installed by default.
sysroot = None
argvar = cc_args
indices = [i for i,x in enumerate(cc_args) if x.startswith('-isysroot')]
if not indices:
argvar = compiler_so
indices = [i for i,x in enumerate(compiler_so) if x.startswith('-isysroot')]
for idx in indices:
if argvar[idx] == '-isysroot':
sysroot = argvar[idx+1]
break
else:
sysroot = argvar[idx][len('-isysroot'):]
break
if sysroot and not os.path.isdir(sysroot):
from distutils import log
log.warn("Compiling with an SDK that doesn't seem to exist: %s",
sysroot)
log.warn("Please check your Xcode installation")
return compiler_so
def customize_config_vars(_config_vars):
"""Customize Python build configuration variables.
Called internally from sysconfig with a mutable mapping
containing name/value pairs parsed from the configured
makefile used to build this interpreter. Returns
the mapping updated as needed to reflect the environment
in which the interpreter is running; in the case of
a Python from a binary installer, the installed
environment may be very different from the build
environment, i.e. different OS levels, different
built tools, different available CPU architectures.
This customization is performed whenever
distutils.sysconfig.get_config_vars() is first
called. It may be used in environments where no
compilers are present, i.e. when installing pure
Python dists. Customization of compiler paths
and detection of unavailable archs is deferred
until the first extension module build is
requested (in distutils.sysconfig.customize_compiler).
Currently called from distutils.sysconfig
"""
if not _supports_universal_builds():
# On Mac OS X before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
_remove_universal_flags(_config_vars)
# Allow user to override all archs with ARCHFLAGS env var
_override_all_archs(_config_vars)
# Remove references to sdks that are not found
_check_for_unavailable_sdk(_config_vars)
return _config_vars
def customize_compiler(_config_vars):
"""Customize compiler path and configuration variables.
This customization is performed when the first
extension module build is requested
in distutils.sysconfig.customize_compiler).
"""
# Find a compiler to use for extension module builds
_find_appropriate_compiler(_config_vars)
# Remove ppc arch flags if not supported here
_remove_unsupported_archs(_config_vars)
# Allow user to override all archs with ARCHFLAGS env var
_override_all_archs(_config_vars)
return _config_vars
def get_platform_osx(_config_vars, osname, release, machine):
"""Filter values for get_platform()"""
# called from get_platform() in sysconfig and distutils.util
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
macver = _config_vars.get('MACOSX_DEPLOYMENT_TARGET', '')
macrelease = _get_system_version() or macver
macver = macver or macrelease
if macver:
release = macver
osname = "macosx"
# Use the original CFLAGS value, if available, so that we
# return the same machine type for the platform string.
# Otherwise, distutils may consider this a cross-compiling
# case and disallow installs.
cflags = _config_vars.get(_INITPRE+'CFLAGS',
_config_vars.get('CFLAGS', ''))
if macrelease:
try:
macrelease = tuple(int(i) for i in macrelease.split('.')[0:2])
except ValueError:
macrelease = (10, 0)
else:
# assume no universal support
macrelease = (10, 0)
if (macrelease >= (10, 4)) and '-arch' in cflags.strip():
# The universal build will build fat binaries, but not on
# systems before 10.4
machine = 'fat'
archs = re.findall(r'-arch\s+(\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r" % (archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxsize >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
# See 'i386' case
if sys.maxsize >= 2**32:
machine = 'ppc64'
else:
machine = 'ppc'
return (osname, release, machine)
| prefetchnta/questlab | bin/x64bin/python/37/Lib/_osx_support.py | Python | lgpl-2.1 | 20,113 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from antlr3 import *
from antlr3.compat import set, frozenset
HIDDEN = BaseRecognizer.HIDDEN
THIRD=13
SEPTEMBER=36
WEDNESDAY=22
JULY=34
APRIL=31
DIGITS=8
OCTOBER=37
MAY=32
DAY=19
MARCH=30
EOF=-1
MONTH=27
FRIDAY=24
UNKNOWN_TOKEN=44
TIME=5
SYNCHRONIZED=9
QUARTER=40
COMMA=10
DIGIT=7
FOURTH=14
SECOND=12
NOVEMBER=38
SATURDAY=25
TO=42
EVERY=6
FEBRUARY=29
MONDAY=20
SUNDAY=26
JUNE=33
OF=4
JANUARY=28
MINUTES=18
FIFTH=15
WS=43
THURSDAY=23
DECEMBER=39
AUGUST=35
FROM=41
TUESDAY=21
HOURS=17
FIRST=11
FOURTH_OR_FIFTH=16
class GrocLexer(Lexer):
grammarFileName = "Groc.g"
antlr_version = version_str_to_tuple("3.1.1")
antlr_version_str = "3.1.1"
def __init__(self, input=None, state=None):
if state is None:
state = RecognizerSharedState()
Lexer.__init__(self, input, state)
self.dfa26 = self.DFA26(
self, 26,
eot = self.DFA26_eot,
eof = self.DFA26_eof,
min = self.DFA26_min,
max = self.DFA26_max,
accept = self.DFA26_accept,
special = self.DFA26_special,
transition = self.DFA26_transition
)
def mTIME(self, ):
try:
_type = TIME
_channel = DEFAULT_CHANNEL
pass
alt1 = 4
LA1 = self.input.LA(1)
if LA1 == 48:
LA1_1 = self.input.LA(2)
if (LA1_1 == 58) :
alt1 = 1
elif ((48 <= LA1_1 <= 57)) :
alt1 = 2
else:
nvae = NoViableAltException("", 1, 1, self.input)
raise nvae
elif LA1 == 49:
LA1_2 = self.input.LA(2)
if (LA1_2 == 58) :
alt1 = 1
elif ((48 <= LA1_2 <= 57)) :
alt1 = 3
else:
nvae = NoViableAltException("", 1, 2, self.input)
raise nvae
elif LA1 == 50:
LA1_3 = self.input.LA(2)
if ((48 <= LA1_3 <= 51)) :
alt1 = 4
elif (LA1_3 == 58) :
alt1 = 1
else:
nvae = NoViableAltException("", 1, 3, self.input)
raise nvae
elif LA1 == 51 or LA1 == 52 or LA1 == 53 or LA1 == 54 or LA1 == 55 or LA1 == 56 or LA1 == 57:
alt1 = 1
else:
nvae = NoViableAltException("", 1, 0, self.input)
raise nvae
if alt1 == 1:
pass
self.mDIGIT()
elif alt1 == 2:
pass
pass
self.match(48)
self.mDIGIT()
elif alt1 == 3:
pass
pass
self.match(49)
self.mDIGIT()
elif alt1 == 4:
pass
pass
self.match(50)
self.matchRange(48, 51)
self.match(58)
pass
self.matchRange(48, 53)
self.mDIGIT()
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mSYNCHRONIZED(self, ):
try:
_type = SYNCHRONIZED
_channel = DEFAULT_CHANNEL
pass
self.match("synchronized")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mFIRST(self, ):
try:
_type = FIRST
_channel = DEFAULT_CHANNEL
pass
alt2 = 2
LA2_0 = self.input.LA(1)
if (LA2_0 == 49) :
alt2 = 1
elif (LA2_0 == 102) :
alt2 = 2
else:
nvae = NoViableAltException("", 2, 0, self.input)
raise nvae
if alt2 == 1:
pass
self.match("1st")
elif alt2 == 2:
pass
self.match("first")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mSECOND(self, ):
try:
_type = SECOND
_channel = DEFAULT_CHANNEL
pass
alt3 = 2
LA3_0 = self.input.LA(1)
if (LA3_0 == 50) :
alt3 = 1
elif (LA3_0 == 115) :
alt3 = 2
else:
nvae = NoViableAltException("", 3, 0, self.input)
raise nvae
if alt3 == 1:
pass
self.match("2nd")
elif alt3 == 2:
pass
self.match("second")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mTHIRD(self, ):
try:
_type = THIRD
_channel = DEFAULT_CHANNEL
pass
alt4 = 2
LA4_0 = self.input.LA(1)
if (LA4_0 == 51) :
alt4 = 1
elif (LA4_0 == 116) :
alt4 = 2
else:
nvae = NoViableAltException("", 4, 0, self.input)
raise nvae
if alt4 == 1:
pass
self.match("3rd")
elif alt4 == 2:
pass
self.match("third")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mFOURTH(self, ):
try:
_type = FOURTH
_channel = DEFAULT_CHANNEL
pass
pass
self.match("4th")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mFIFTH(self, ):
try:
_type = FIFTH
_channel = DEFAULT_CHANNEL
pass
pass
self.match("5th")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mFOURTH_OR_FIFTH(self, ):
try:
_type = FOURTH_OR_FIFTH
_channel = DEFAULT_CHANNEL
pass
alt5 = 2
LA5_0 = self.input.LA(1)
if (LA5_0 == 102) :
LA5_1 = self.input.LA(2)
if (LA5_1 == 111) :
alt5 = 1
elif (LA5_1 == 105) :
alt5 = 2
else:
nvae = NoViableAltException("", 5, 1, self.input)
raise nvae
else:
nvae = NoViableAltException("", 5, 0, self.input)
raise nvae
if alt5 == 1:
pass
pass
self.match("fourth")
_type = FOURTH;
elif alt5 == 2:
pass
pass
self.match("fifth")
_type = FIFTH;
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mDAY(self, ):
try:
_type = DAY
_channel = DEFAULT_CHANNEL
pass
self.match("day")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mMONDAY(self, ):
try:
_type = MONDAY
_channel = DEFAULT_CHANNEL
pass
self.match("mon")
alt6 = 2
LA6_0 = self.input.LA(1)
if (LA6_0 == 100) :
alt6 = 1
if alt6 == 1:
pass
self.match("day")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mTUESDAY(self, ):
try:
_type = TUESDAY
_channel = DEFAULT_CHANNEL
pass
self.match("tue")
alt7 = 2
LA7_0 = self.input.LA(1)
if (LA7_0 == 115) :
alt7 = 1
if alt7 == 1:
pass
self.match("sday")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mWEDNESDAY(self, ):
try:
_type = WEDNESDAY
_channel = DEFAULT_CHANNEL
pass
self.match("wed")
alt8 = 2
LA8_0 = self.input.LA(1)
if (LA8_0 == 110) :
alt8 = 1
if alt8 == 1:
pass
self.match("nesday")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mTHURSDAY(self, ):
try:
_type = THURSDAY
_channel = DEFAULT_CHANNEL
pass
self.match("thu")
alt9 = 2
LA9_0 = self.input.LA(1)
if (LA9_0 == 114) :
alt9 = 1
if alt9 == 1:
pass
self.match("rsday")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mFRIDAY(self, ):
try:
_type = FRIDAY
_channel = DEFAULT_CHANNEL
pass
self.match("fri")
alt10 = 2
LA10_0 = self.input.LA(1)
if (LA10_0 == 100) :
alt10 = 1
if alt10 == 1:
pass
self.match("day")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mSATURDAY(self, ):
try:
_type = SATURDAY
_channel = DEFAULT_CHANNEL
pass
self.match("sat")
alt11 = 2
LA11_0 = self.input.LA(1)
if (LA11_0 == 117) :
alt11 = 1
if alt11 == 1:
pass
self.match("urday")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mSUNDAY(self, ):
try:
_type = SUNDAY
_channel = DEFAULT_CHANNEL
pass
self.match("sun")
alt12 = 2
LA12_0 = self.input.LA(1)
if (LA12_0 == 100) :
alt12 = 1
if alt12 == 1:
pass
self.match("day")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mJANUARY(self, ):
try:
_type = JANUARY
_channel = DEFAULT_CHANNEL
pass
self.match("jan")
alt13 = 2
LA13_0 = self.input.LA(1)
if (LA13_0 == 117) :
alt13 = 1
if alt13 == 1:
pass
self.match("uary")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mFEBRUARY(self, ):
try:
_type = FEBRUARY
_channel = DEFAULT_CHANNEL
pass
self.match("feb")
alt14 = 2
LA14_0 = self.input.LA(1)
if (LA14_0 == 114) :
alt14 = 1
if alt14 == 1:
pass
self.match("ruary")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mMARCH(self, ):
try:
_type = MARCH
_channel = DEFAULT_CHANNEL
pass
self.match("mar")
alt15 = 2
LA15_0 = self.input.LA(1)
if (LA15_0 == 99) :
alt15 = 1
if alt15 == 1:
pass
self.match("ch")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mAPRIL(self, ):
try:
_type = APRIL
_channel = DEFAULT_CHANNEL
pass
self.match("apr")
alt16 = 2
LA16_0 = self.input.LA(1)
if (LA16_0 == 105) :
alt16 = 1
if alt16 == 1:
pass
self.match("il")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mMAY(self, ):
try:
_type = MAY
_channel = DEFAULT_CHANNEL
pass
self.match("may")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mJUNE(self, ):
try:
_type = JUNE
_channel = DEFAULT_CHANNEL
pass
self.match("jun")
alt17 = 2
LA17_0 = self.input.LA(1)
if (LA17_0 == 101) :
alt17 = 1
if alt17 == 1:
pass
self.match(101)
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mJULY(self, ):
try:
_type = JULY
_channel = DEFAULT_CHANNEL
pass
self.match("jul")
alt18 = 2
LA18_0 = self.input.LA(1)
if (LA18_0 == 121) :
alt18 = 1
if alt18 == 1:
pass
self.match(121)
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mAUGUST(self, ):
try:
_type = AUGUST
_channel = DEFAULT_CHANNEL
pass
self.match("aug")
alt19 = 2
LA19_0 = self.input.LA(1)
if (LA19_0 == 117) :
alt19 = 1
if alt19 == 1:
pass
self.match("ust")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mSEPTEMBER(self, ):
try:
_type = SEPTEMBER
_channel = DEFAULT_CHANNEL
pass
self.match("sep")
alt20 = 2
LA20_0 = self.input.LA(1)
if (LA20_0 == 116) :
alt20 = 1
if alt20 == 1:
pass
self.match("tember")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mOCTOBER(self, ):
try:
_type = OCTOBER
_channel = DEFAULT_CHANNEL
pass
self.match("oct")
alt21 = 2
LA21_0 = self.input.LA(1)
if (LA21_0 == 111) :
alt21 = 1
if alt21 == 1:
pass
self.match("ober")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mNOVEMBER(self, ):
try:
_type = NOVEMBER
_channel = DEFAULT_CHANNEL
pass
self.match("nov")
alt22 = 2
LA22_0 = self.input.LA(1)
if (LA22_0 == 101) :
alt22 = 1
if alt22 == 1:
pass
self.match("ember")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mDECEMBER(self, ):
try:
_type = DECEMBER
_channel = DEFAULT_CHANNEL
pass
self.match("dec")
alt23 = 2
LA23_0 = self.input.LA(1)
if (LA23_0 == 101) :
alt23 = 1
if alt23 == 1:
pass
self.match("ember")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mMONTH(self, ):
try:
_type = MONTH
_channel = DEFAULT_CHANNEL
pass
pass
self.match("month")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mQUARTER(self, ):
try:
_type = QUARTER
_channel = DEFAULT_CHANNEL
pass
pass
self.match("quarter")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mEVERY(self, ):
try:
_type = EVERY
_channel = DEFAULT_CHANNEL
pass
pass
self.match("every")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mHOURS(self, ):
try:
_type = HOURS
_channel = DEFAULT_CHANNEL
pass
pass
self.match("hours")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mMINUTES(self, ):
try:
_type = MINUTES
_channel = DEFAULT_CHANNEL
pass
alt24 = 2
LA24_0 = self.input.LA(1)
if (LA24_0 == 109) :
LA24_1 = self.input.LA(2)
if (LA24_1 == 105) :
LA24_2 = self.input.LA(3)
if (LA24_2 == 110) :
LA24_3 = self.input.LA(4)
if (LA24_3 == 115) :
alt24 = 1
elif (LA24_3 == 117) :
alt24 = 2
else:
nvae = NoViableAltException("", 24, 3, self.input)
raise nvae
else:
nvae = NoViableAltException("", 24, 2, self.input)
raise nvae
else:
nvae = NoViableAltException("", 24, 1, self.input)
raise nvae
else:
nvae = NoViableAltException("", 24, 0, self.input)
raise nvae
if alt24 == 1:
pass
self.match("mins")
elif alt24 == 2:
pass
self.match("minutes")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mCOMMA(self, ):
try:
_type = COMMA
_channel = DEFAULT_CHANNEL
pass
pass
self.match(44)
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mOF(self, ):
try:
_type = OF
_channel = DEFAULT_CHANNEL
pass
pass
self.match("of")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mFROM(self, ):
try:
_type = FROM
_channel = DEFAULT_CHANNEL
pass
pass
self.match("from")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mTO(self, ):
try:
_type = TO
_channel = DEFAULT_CHANNEL
pass
pass
self.match("to")
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mWS(self, ):
try:
_type = WS
_channel = DEFAULT_CHANNEL
pass
if (9 <= self.input.LA(1) <= 10) or self.input.LA(1) == 13 or self.input.LA(1) == 32:
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
_channel=HIDDEN;
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mDIGIT(self, ):
try:
_type = DIGIT
_channel = DEFAULT_CHANNEL
pass
pass
self.matchRange(48, 57)
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mDIGITS(self, ):
try:
_type = DIGITS
_channel = DEFAULT_CHANNEL
alt25 = 2
LA25_0 = self.input.LA(1)
if ((48 <= LA25_0 <= 57)) :
LA25_1 = self.input.LA(2)
if ((48 <= LA25_1 <= 57)) :
LA25_2 = self.input.LA(3)
if ((48 <= LA25_2 <= 57)) :
alt25 = 2
else:
alt25 = 1
else:
nvae = NoViableAltException("", 25, 1, self.input)
raise nvae
else:
nvae = NoViableAltException("", 25, 0, self.input)
raise nvae
if alt25 == 1:
pass
pass
self.mDIGIT()
self.mDIGIT()
elif alt25 == 2:
pass
pass
self.mDIGIT()
self.mDIGIT()
self.mDIGIT()
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mUNKNOWN_TOKEN(self, ):
try:
_type = UNKNOWN_TOKEN
_channel = DEFAULT_CHANNEL
pass
pass
self.matchAny()
self._state.type = _type
self._state.channel = _channel
finally:
pass
def mTokens(self):
alt26 = 41
alt26 = self.dfa26.predict(self.input)
if alt26 == 1:
pass
self.mTIME()
elif alt26 == 2:
pass
self.mSYNCHRONIZED()
elif alt26 == 3:
pass
self.mFIRST()
elif alt26 == 4:
pass
self.mSECOND()
elif alt26 == 5:
pass
self.mTHIRD()
elif alt26 == 6:
pass
self.mFOURTH()
elif alt26 == 7:
pass
self.mFIFTH()
elif alt26 == 8:
pass
self.mFOURTH_OR_FIFTH()
elif alt26 == 9:
pass
self.mDAY()
elif alt26 == 10:
pass
self.mMONDAY()
elif alt26 == 11:
pass
self.mTUESDAY()
elif alt26 == 12:
pass
self.mWEDNESDAY()
elif alt26 == 13:
pass
self.mTHURSDAY()
elif alt26 == 14:
pass
self.mFRIDAY()
elif alt26 == 15:
pass
self.mSATURDAY()
elif alt26 == 16:
pass
self.mSUNDAY()
elif alt26 == 17:
pass
self.mJANUARY()
elif alt26 == 18:
pass
self.mFEBRUARY()
elif alt26 == 19:
pass
self.mMARCH()
elif alt26 == 20:
pass
self.mAPRIL()
elif alt26 == 21:
pass
self.mMAY()
elif alt26 == 22:
pass
self.mJUNE()
elif alt26 == 23:
pass
self.mJULY()
elif alt26 == 24:
pass
self.mAUGUST()
elif alt26 == 25:
pass
self.mSEPTEMBER()
elif alt26 == 26:
pass
self.mOCTOBER()
elif alt26 == 27:
pass
self.mNOVEMBER()
elif alt26 == 28:
pass
self.mDECEMBER()
elif alt26 == 29:
pass
self.mMONTH()
elif alt26 == 30:
pass
self.mQUARTER()
elif alt26 == 31:
pass
self.mEVERY()
elif alt26 == 32:
pass
self.mHOURS()
elif alt26 == 33:
pass
self.mMINUTES()
elif alt26 == 34:
pass
self.mCOMMA()
elif alt26 == 35:
pass
self.mOF()
elif alt26 == 36:
pass
self.mFROM()
elif alt26 == 37:
pass
self.mTO()
elif alt26 == 38:
pass
self.mWS()
elif alt26 == 39:
pass
self.mDIGIT()
elif alt26 == 40:
pass
self.mDIGITS()
elif alt26 == 41:
pass
self.mUNKNOWN_TOKEN()
DFA26_eot = DFA.unpack(
u"\1\uffff\4\30\2\27\1\30\1\27\2\30\12\27\5\uffff\1\37\1\uffff\2"
u"\37\46\uffff\1\112\6\uffff"
)
DFA26_eof = DFA.unpack(
u"\113\uffff"
)
DFA26_min = DFA.unpack(
u"\1\0\4\60\1\141\1\145\1\60\1\150\2\60\2\141\1\145\1\141\1\160\1"
u"\143\1\157\1\165\1\166\1\157\5\uffff\1\72\1\uffff\2\72\4\uffff"
u"\1\143\2\uffff\1\146\1\uffff\1\151\2\uffff\1\151\5\uffff\1\156"
u"\1\162\3\uffff\1\154\16\uffff\1\164\6\uffff"
)
DFA26_max = DFA.unpack(
u"\1\uffff\1\72\1\163\1\156\1\162\1\171\1\162\1\164\1\165\1\164\1"
u"\72\1\145\1\157\1\145\2\165\1\146\1\157\1\165\1\166\1\157\5\uffff"
u"\1\72\1\uffff\2\72\4\uffff\1\160\2\uffff\1\162\1\uffff\1\157\2"
u"\uffff\1\165\5\uffff\1\156\1\171\3\uffff\1\156\16\uffff\1\164\6"
u"\uffff"
)
DFA26_accept = DFA.unpack(
u"\25\uffff\1\42\1\46\1\51\1\47\1\1\1\uffff\1\3\2\uffff\1\4\1\50"
u"\1\5\1\2\1\uffff\1\17\1\20\1\uffff\1\10\1\uffff\1\22\1\6\1\uffff"
u"\1\13\1\45\1\7\1\11\1\34\2\uffff\1\41\1\14\1\21\1\uffff\1\24\1"
u"\30\1\32\1\43\1\33\1\36\1\37\1\40\1\42\1\46\1\31\1\16\1\44\1\15"
u"\1\uffff\1\23\1\25\1\26\1\27\1\35\1\12"
)
DFA26_special = DFA.unpack(
u"\1\0\112\uffff"
)
DFA26_transition = [
DFA.unpack(u"\11\27\2\26\2\27\1\26\22\27\1\26\13\27\1\25\3\27\1\1"
u"\1\2\1\3\1\4\1\7\1\11\4\12\47\27\1\17\2\27\1\13\1\23\1\6\1\27\1"
u"\24\1\27\1\16\2\27\1\14\1\21\1\20\1\27\1\22\1\27\1\5\1\10\2\27"
u"\1\15\uff88\27"),
DFA.unpack(u"\12\32\1\31"),
DFA.unpack(u"\12\34\1\31\70\uffff\1\33"),
DFA.unpack(u"\4\35\6\37\1\31\63\uffff\1\36"),
DFA.unpack(u"\12\37\1\31\67\uffff\1\40"),
DFA.unpack(u"\1\43\3\uffff\1\42\17\uffff\1\44\3\uffff\1\41"),
DFA.unpack(u"\1\50\3\uffff\1\45\5\uffff\1\46\2\uffff\1\47"),
DFA.unpack(u"\12\37\1\31\71\uffff\1\51"),
DFA.unpack(u"\1\52\6\uffff\1\54\5\uffff\1\53"),
DFA.unpack(u"\12\37\1\31\71\uffff\1\55"),
DFA.unpack(u"\12\37\1\31"),
DFA.unpack(u"\1\56\3\uffff\1\57"),
DFA.unpack(u"\1\61\7\uffff\1\62\5\uffff\1\60"),
DFA.unpack(u"\1\63"),
DFA.unpack(u"\1\64\23\uffff\1\65"),
DFA.unpack(u"\1\66\4\uffff\1\67"),
DFA.unpack(u"\1\70\2\uffff\1\71"),
DFA.unpack(u"\1\72"),
DFA.unpack(u"\1\73"),
DFA.unpack(u"\1\74"),
DFA.unpack(u"\1\75"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\31"),
DFA.unpack(u""),
DFA.unpack(u"\1\31"),
DFA.unpack(u"\1\31"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\36\14\uffff\1\100"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\46\13\uffff\1\33"),
DFA.unpack(u""),
DFA.unpack(u"\1\101\5\uffff\1\102"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\40\13\uffff\1\103"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\104"),
DFA.unpack(u"\1\105\6\uffff\1\106"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\110\1\uffff\1\107"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\111"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"")
]
class DFA26(DFA):
def specialStateTransition(self_, s, input):
self = self_.recognizer
_s = s
if s == 0:
LA26_0 = input.LA(1)
s = -1
if (LA26_0 == 48):
s = 1
elif (LA26_0 == 49):
s = 2
elif (LA26_0 == 50):
s = 3
elif (LA26_0 == 51):
s = 4
elif (LA26_0 == 115):
s = 5
elif (LA26_0 == 102):
s = 6
elif (LA26_0 == 52):
s = 7
elif (LA26_0 == 116):
s = 8
elif (LA26_0 == 53):
s = 9
elif ((54 <= LA26_0 <= 57)):
s = 10
elif (LA26_0 == 100):
s = 11
elif (LA26_0 == 109):
s = 12
elif (LA26_0 == 119):
s = 13
elif (LA26_0 == 106):
s = 14
elif (LA26_0 == 97):
s = 15
elif (LA26_0 == 111):
s = 16
elif (LA26_0 == 110):
s = 17
elif (LA26_0 == 113):
s = 18
elif (LA26_0 == 101):
s = 19
elif (LA26_0 == 104):
s = 20
elif (LA26_0 == 44):
s = 21
elif ((9 <= LA26_0 <= 10) or LA26_0 == 13 or LA26_0 == 32):
s = 22
elif ((0 <= LA26_0 <= 8) or (11 <= LA26_0 <= 12) or (14 <= LA26_0 <= 31) or (33 <= LA26_0 <= 43) or (45 <= LA26_0 <= 47) or (58 <= LA26_0 <= 96) or (98 <= LA26_0 <= 99) or LA26_0 == 103 or LA26_0 == 105 or (107 <= LA26_0 <= 108) or LA26_0 == 112 or LA26_0 == 114 or (117 <= LA26_0 <= 118) or (120 <= LA26_0 <= 65535)):
s = 23
if s >= 0:
return s
nvae = NoViableAltException(self_.getDescription(), 26, _s, input)
self_.error(nvae)
raise nvae
def main(argv, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr):
from antlr3.main import LexerMain
main = LexerMain(GrocLexer)
main.stdin = stdin
main.stdout = stdout
main.stderr = stderr
main.execute(argv)
if __name__ == '__main__':
main(sys.argv)
| adviti/melange | thirdparty/google_appengine/google/appengine/cron/GrocLexer.py | Python | apache-2.0 | 33,181 |
#!/usr/bin/env python
from __future__ import print_function
import trollius as asyncio
from trollius import From
import trollius_redis
import logging
import six
if __name__ == '__main__':
loop = asyncio.get_event_loop()
# Enable logging
logging.getLogger().addHandler(logging.StreamHandler())
logging.getLogger().setLevel(logging.INFO)
def run():
# Create a new redis connection (this will also auto reconnect)
connection = yield From(
trollius_redis.Connection.create(u'localhost', 6379))
try:
while True:
# Get input (always use executor for blocking calls)
text = yield From(
loop.run_in_executor(
None, six.moves.input, u'Enter message: '))
# Publish value
try:
yield From(connection.publish(u'our-channel', text))
print(u'Published.')
except trollius_redis.Error as e:
print(u'Published failed', repr(e))
finally:
connection.close()
loop.run_until_complete(run())
| benjolitz/trollius-redis | examples/pubsub/sender.py | Python | bsd-2-clause | 1,152 |
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
'''
ebook-meta
'''
import sys, os
from calibre.utils.config import StringConfig
from calibre.customize.ui import metadata_readers, metadata_writers, force_identifiers
from calibre.ebooks.metadata.meta import get_metadata, set_metadata
from calibre.ebooks.metadata import string_to_authors, authors_to_sort_string, \
title_sort, MetaInformation
from calibre.ebooks.lrf.meta import LRFMetaFile
from calibre import prints
from calibre.utils.date import parse_date
USAGE='%%prog ebook_file [' + _('options') + ']\n' + \
_('''
Read/Write metadata from/to ebook files.
Supported formats for reading metadata: %(read)s
Supported formats for writing metadata: %(write)s
Different file types support different kinds of metadata. If you try to set
some metadata on a file type that does not support it, the metadata will be
silently ignored.
''')
def config():
c = StringConfig('')
c.add_opt('title', ['-t', '--title'],
help=_('Set the title.'))
c.add_opt('authors', ['-a', '--authors'],
help=_('Set the authors. Multiple authors should be separated '
'by the & character. Author names should be in the order '
'Firstname Lastname.'))
c.add_opt('title_sort', ['--title-sort'],
help=_('The version of the title to be used for sorting. '
'If unspecified, and the title is specified, it will '
'be auto-generated from the title.'))
c.add_opt('author_sort', ['--author-sort'],
help=_('String to be used when sorting by author. '
'If unspecified, and the author(s) are specified, it will '
'be auto-generated from the author(s).'))
c.add_opt('cover', ['--cover'],
help=_('Set the cover to the specified file.'))
c.add_opt('comments', ['-c', '--comments'],
help=_('Set the ebook description.'))
c.add_opt('publisher', ['-p', '--publisher'],
help=_('Set the ebook publisher.'))
c.add_opt('category', ['--category'],
help=_('Set the book category.'))
c.add_opt('series', ['-s', '--series'],
help=_('Set the series this ebook belongs to.'))
c.add_opt('series_index', ['-i', '--index'],
help=_('Set the index of the book in this series.'))
c.add_opt('rating', ['-r', '--rating'],
help=_('Set the rating. Should be a number between 1 and 5.'))
c.add_opt('isbn', ['--isbn'],
help=_('Set the ISBN of the book.'))
c.add_opt('identifiers', ['--identifier'], action='append',
help=_('Set the identifiers for the book, can be specified multiple times.'
' For example: --identifier uri:http://acme.com --identifier isbn:12345'
' To remove an identifier, specify no value, --identifier isbn:'
' Note that for EPUB files, an identifier marked as the package identifier cannot be removed.'))
c.add_opt('tags', ['--tags'],
help=_('Set the tags for the book. Should be a comma separated list.'))
c.add_opt('book_producer', ['-k', '--book-producer'],
help=_('Set the book producer.'))
c.add_opt('language', ['-l', '--language'],
help=_('Set the language.'))
c.add_opt('pubdate', ['-d', '--date'],
help=_('Set the published date.'))
c.add_opt('get_cover', ['--get-cover'],
help=_('Get the cover from the ebook and save it at as the '
'specified file.'))
c.add_opt('to_opf', ['--to-opf'],
help=_('Specify the name of an OPF file. The metadata will '
'be written to the OPF file.'))
c.add_opt('from_opf', ['--from-opf'],
help=_('Read metadata from the specified OPF file and use it to '
'set metadata in the ebook. Metadata specified on the '
'command line will override metadata read from the OPF file'))
c.add_opt('lrf_bookid', ['--lrf-bookid'],
help=_('Set the BookID in LRF files'))
return c
def filetypes():
readers = set([])
for r in metadata_readers():
readers = readers.union(set(r.file_types))
return readers
def option_parser():
writers = set([])
for w in metadata_writers():
writers = writers.union(set(w.file_types))
ft, w = ', '.join(sorted(filetypes())), ', '.join(sorted(writers))
return config().option_parser(USAGE%dict(read=ft, write=w))
def do_set_metadata(opts, mi, stream, stream_type):
mi = MetaInformation(mi)
for x in ('guide', 'toc', 'manifest', 'spine'):
setattr(mi, x, None)
from_opf = getattr(opts, 'from_opf', None)
if from_opf is not None:
from calibre.ebooks.metadata.opf2 import OPF
opf_mi = OPF(open(from_opf, 'rb')).to_book_metadata()
mi.smart_update(opf_mi)
for pref in config().option_set.preferences:
if pref.name in ('to_opf', 'from_opf', 'authors', 'title_sort',
'author_sort', 'get_cover', 'cover', 'tags',
'lrf_bookid', 'identifiers'):
continue
val = getattr(opts, pref.name, None)
if val is not None:
setattr(mi, pref.name, val)
if getattr(opts, 'authors', None) is not None:
mi.authors = string_to_authors(opts.authors)
mi.author_sort = authors_to_sort_string(mi.authors)
if getattr(opts, 'author_sort', None) is not None:
mi.author_sort = opts.author_sort
if getattr(opts, 'title_sort', None) is not None:
mi.title_sort = opts.title_sort
elif getattr(opts, 'title', None) is not None:
mi.title_sort = title_sort(opts.title)
if getattr(opts, 'tags', None) is not None:
mi.tags = [t.strip() for t in opts.tags.split(',')]
if getattr(opts, 'series', None) is not None:
mi.series = opts.series.strip()
if getattr(opts, 'series_index', None) is not None:
mi.series_index = float(opts.series_index.strip())
if getattr(opts, 'pubdate', None) is not None:
mi.pubdate = parse_date(opts.pubdate, assume_utc=False, as_utc=False)
if getattr(opts, 'identifiers', None):
val = {k.strip():v.strip() for k, v in (x.partition(':')[0::2] for x in opts.identifiers)}
if val:
orig = mi.get_identifiers()
orig.update(val)
val = {k:v for k, v in orig.iteritems() if k and v}
mi.set_identifiers(val)
if getattr(opts, 'cover', None) is not None:
ext = os.path.splitext(opts.cover)[1].replace('.', '').upper()
mi.cover_data = (ext, open(opts.cover, 'rb').read())
with force_identifiers:
set_metadata(stream, mi, stream_type)
def main(args=sys.argv):
parser = option_parser()
opts, args = parser.parse_args(args)
if len(args) < 2:
parser.print_help()
prints(_('No file specified'), file=sys.stderr)
return 1
path = args[1]
stream_type = os.path.splitext(path)[1].replace('.', '').lower()
trying_to_set = False
for pref in config().option_set.preferences:
if pref.name in ('to_opf', 'get_cover'):
continue
if getattr(opts, pref.name) is not None:
trying_to_set = True
break
with open(path, 'rb') as stream:
mi = get_metadata(stream, stream_type, force_read_metadata=True)
if trying_to_set:
prints(_('Original metadata')+'::')
metadata = unicode(mi)
if trying_to_set:
metadata = '\t'+'\n\t'.join(metadata.split('\n'))
prints(metadata, safe_encode=True)
if trying_to_set:
with open(path, 'r+b') as stream:
do_set_metadata(opts, mi, stream, stream_type)
stream.seek(0)
stream.flush()
lrf = None
if stream_type == 'lrf':
if opts.lrf_bookid is not None:
lrf = LRFMetaFile(stream)
lrf.book_id = opts.lrf_bookid
mi = get_metadata(stream, stream_type, force_read_metadata=True)
prints('\n' + _('Changed metadata') + '::')
metadata = unicode(mi)
metadata = '\t'+'\n\t'.join(metadata.split('\n'))
prints(metadata, safe_encode=True)
if lrf is not None:
prints('\tBookID:', lrf.book_id)
if opts.to_opf is not None:
from calibre.ebooks.metadata.opf2 import OPFCreator
opf = OPFCreator(os.getcwdu(), mi)
with open(opts.to_opf, 'wb') as f:
opf.render(f)
prints(_('OPF created in'), opts.to_opf)
if opts.get_cover is not None:
if mi.cover_data and mi.cover_data[1]:
with open(opts.get_cover, 'wb') as f:
f.write(mi.cover_data[1])
prints(_('Cover saved to'), f.name)
else:
prints(_('No cover found'), file=sys.stderr)
return 0
if __name__ == '__main__':
sys.exit(main())
| hazrpg/calibre | src/calibre/ebooks/metadata/cli.py | Python | gpl-3.0 | 9,166 |
import pygame, sys, os, random, math, time, copy, json
from pygame import Rect, draw, QUIT, MOUSEMOTION, MOUSEBUTTONDOWN
from chordConversions import *
from display import resize_img
class Hero:
def __init__(self, screen, maxAmo, foodAddTo, powersToGet, speedFactor,
moneyFactor, image, aimImage):
self.sizeScaleFactor = screen.get_width()
self.screenSize = (screen.get_width(), screen.get_height())
rectSize = self.sizeScaleFactor * .1
self.boundRect = Rect(0,0, rectSize, rectSize)
self.wantedPos = [screen.get_width()/2, screen.get_height()/2]
self.boundRect.center = self.wantedPos
#the money to buy food
self.incomePerTurn = moneyFactor * speedFactor * 1
self.powersToGet = powersToGet
self.powerUp = PowerType(PowerType.noneType, None, 0, None, "")
self.foodAddTo = foodAddTo
self.speedFactor = speedFactor * self.sizeScaleFactor / 300
self.shootSpeedFactor = speedFactor
self.screen = screen
self.color = (0, 100, 100)
self.deadZone = .5
self.moveSpeed = 350
self.fDisplay = pygame.font.SysFont('Courier New', int(self.screenSize[0]/40))
self.fontColor = (255,0,0)
self.textStartPos = (0, self.screenSize[1] - self.fDisplay.get_height())
self.textTop = self.textStartPos[1]
self.tPowerX = self.screenSize[0] / 5 + self.textStartPos[0]
self.refreshText()
#self.image = pygame.transform.scale(image, (int(rectSize), int(rectSize)))
self.image = resize_img(image, rectSize, True)
self.aimImage = pygame.transform.scale(aimImage, (int(rectSize/2), int(rectSize/2)))
self.aimRect = self.aimImage.get_rect()
#moves the amount specified
def move(self, moveChords):
if self.powerUp.timeLeft > 0:
self.powerUp.timeLeft -= self.speedFactor *2
else:
self.powerUp.type = PowerType.noneType
self.direction = moveChords[1]
moveChords[0] -= self.deadZone
if moveChords[0] < 0:
moveChords[0] = 0
moveChords[0] *= self.speedFactor * self.moveSpeed
moveCartChords = list(polar_to_cart(moveChords))
## Don't go past edges
for dimNum in range(2):
if ((self.wantedPos[dimNum] < 0 and moveCartChords[dimNum] < 0)
or (self.wantedPos[dimNum] > self.screenSize[dimNum] and moveCartChords[dimNum] > 0)):
moveCartChords[dimNum] = 0
self.wantedPos[0] += moveCartChords[0]
self.wantedPos[1] += moveCartChords[1]
self.money += self.incomePerTurn;
self.refreshText()
self.boundRect.center = self.wantedPos
def get_powers(self):
for powerUp in self.powersToGet.allPowerUps:
if powerUp.boundRect.colliderect(self.boundRect):
if powerUp.powerType.type == PowerType.money:
self.money += powerUp.powerType.value
else:
self.powerUp = copy.copy(powerUp.powerType)
self.refreshText()
self.powersToGet.remove(powerUp)
#shoots food in the derection it's facing
def shoot(self):
foodCost = self.foodAddTo.get_food_cost()
shootFood = False
if self.powerUp.type == PowerType.freeFood:
shootFood = True #Has power up
elif self.money >= foodCost:
self.money -= foodCost #no power up, so charge
shootFood = True
if shootFood:
#actually shoot the food
self.foodAddTo.add_food(self.boundRect.center, (self.shootSpeedFactor, self.direction))
self.refreshText()
#re displays the money text
def refreshText(self):
try:
self.tDisplay = self.fDisplay.render('Money: ' + str(round(self.money, 2)), 1, self.fontColor)
except:
pass #the money is not entered by the game manager yet
self.tPower = self.fDisplay.render('Power: ', 1, self.fontColor)
tPowerEnd = self.tPowerX + self.tPower.get_rect().width
if self.powerUp.type != PowerType.noneType:
self.tPowerType = self.fDisplay.render(self.powerUp.name, 1, self.fontColor)
self.tPowerImageX = tPowerEnd
self.tPowerTypeX = self.tPowerImageX + self.powerUp.image.get_rect().width
else:
self.tPowerType = self.fDisplay.render("None", 1, self.fontColor)
self.tPowerTypeX = tPowerEnd
#draws the hero on the screen
def draw(self):
self.screen.blit(self.tDisplay, self.textStartPos)
self.screen.blit(self.tPower, (self.tPowerX, self.textTop))
self.screen.blit(self.tPowerType, (self.tPowerTypeX, self.textTop))
if self.powerUp.type != PowerType.noneType:
self.screen.blit(self.powerUp.image, (self.tPowerImageX, self.textTop))
self.screen.blit(self.image, self.boundRect)
aimLoc = list(polar_to_cart((self.boundRect.width * 2, self.direction)))
self.aimRect.centerx = self.boundRect.centerx + aimLoc[0]
self.aimRect.centery = self.boundRect.centery + aimLoc[1]
self.screen.blit(self.aimImage, self.aimRect)
class PeopleImages:
def __init__(self, peopleImages, images=None, dead=None, eating=None, normal=None):
if peopleImages:
self.images = []
for image in peopleImages.images:
self.images.append(image)
self.dead = peopleImages.dead
self.eating = peopleImages.eating
self.normal = peopleImages.normal
else:
self.images = images
self.dead = dead
self.eating = eating
self.normal = normal
# For storing the sounds that the person can make
class PersonSounds:
pass
class Person:
SIZE_FACTOR = .07
def __init__(self, screen, images, sounds, otherPeopleGroup, widthToHeight, moveSpeedFactor):
self.sizeScaleFactor = screen.get_width()
self.screenSize = (screen.get_width(), screen.get_height())
width = self.sizeScaleFactor * Person.SIZE_FACTOR
self.boundRect = Rect(0,0, width, width * widthToHeight)
self.boundRect.center = self.wantedPos = [random.randint(0, self.screenSize[0]),
random.randint(0, self.screenSize[1])]
self.growFactor = 1.1
self.allImages = PeopleImages(images)
self.set_size([self.boundRect.width, self.boundRect.height])
#The probability that the person will change direction or stop when they move
self.changeDirectionProb = .03
self.stopProb = .5
self.screen = screen
self.sounds = sounds
self.speedFactor = moveSpeedFactor
self.moveSpeed = self.speedFactor * self.sizeScaleFactor * 1.5
#health
self.fullHealth = 1.0
self.health = self.fullHealth
self.healthDecreaceAmount = self.speedFactor * 1.5
self.isAlive = True
self.timeToDigest = 0.0
self.set_rand_dir()
self.moveAmount = list(polar_to_cart((self.moveDistance, self.moveAngle)))
self.isHungry = True
self.fullfillment = 0
self.otherPeople = otherPeopleGroup
self.donationWait = 1.0
self.lastDonationTime = self.donationWait
def move(self):
if(random.random() < self.changeDirectionProb):
#change direction
if(random.random() < self.stopProb):
#stop
self.moveDistance = 0.0
else:
#move somewhere
self.set_rand_dir()
self.moveAmount = list(polar_to_cart((self.moveDistance, self.moveAngle)))
#Turn back at the edges of the screen
for dimNum in range(2):
if ((self.wantedPos[dimNum] < 0 and self.moveAmount[dimNum] < 0) or
(self.wantedPos[dimNum] > self.screenSize[dimNum] and self.moveAmount[dimNum] > 0)):
self.moveAmount[dimNum] *= -1.0
if self.timeToDigest > 0:
self.timeToDigest -= self.speedFactor
self.wantedPos[0] += self.moveAmount[0]
self.wantedPos[1] += self.moveAmount[1]
self.boundRect.center = self.wantedPos
#returns the location of the donation if it's made
def donate(self, numPowerUps):
if not self.isHungry:
if self.lastDonationTime >= self.donationWait:
self.lastDonationTime = 0.0
donationType = random.randint(0, numPowerUps - 1)
return self.boundRect.center, donationType
else:
self.lastDonationTime += self.speedFactor
return None, None
#returns true if dead
def decrease_health(self):
if self.isHungry:
self.health -= self.healthDecreaceAmount
if self.health > 0:
return False
else:
self.isAlive = False
return True
else:
return False
def set_size(self, size):
for index in range(2):
size[index] = int(size[index]+.5)
self.boundRect.width = size[0]
self.boundRect.height = size[1]
#self.allImages.dead = pygame.transform.scale(self.allImages.dead, size)
self.allImages.eating = pygame.transform.scale(self.allImages.eating, size)
self.allImages.normal = pygame.transform.scale(self.allImages.normal, size)
for imageNum in range(len(self.allImages.images)):
self.allImages.images[imageNum] = pygame.transform.scale(self.allImages.images[imageNum], size)
#eat any food that is touching, return true if the food was eaten
#also sets the person to normal if they ate enough
def try_eat_food(self, food):
if self.isHungry and self.timeToDigest <= 0:
if self.boundRect.colliderect(food.boundRect):
self.health += food.healthGain
self.fullfillment += food.fullfill
self.timeToDigest = food.timeToDigest
self.sounds.eatingSound.play()
self.set_size([self.boundRect.width * self.growFactor, self.boundRect.height * self.growFactor])
if self.fullfillment >= 1:
#person is normal now
self.isHungry = False
#check to make sure the person didn't die
if self in self.otherPeople.allPeople:
self.otherPeople.set_normal(self)
return True
return False
def set_rand_dir(self):
self.moveAngle = random.random() * 2* math.pi
self.moveDistance = self.moveSpeed
def draw(self):
if self.isHungry:
if self.timeToDigest <= 0:
#Gets the appropriate image bassed on the person's health.
if self.health >= 1:
imageNum = len(self.allImages.images) - 1
else:
imageNum = int(math.floor(self.health * len(self.allImages.images)))
image = self.allImages.images[imageNum]
else:
image = self.allImages.eating
else:
image = self.allImages.normal
self.screen.blit(image, self.boundRect)
class PeopleGroup:
# sounds is a structure of sounds
def __init__(self, screen, sounds, peopleImages, speedFactor):
self.allPeople = []
self.hungryPeople = []
self.normalPeople = []
# calculate bassed on the width to height ratio of the first image
oneImageRect = peopleImages.images[0].get_rect()
self.widthToHeight = float(oneImageRect.height) / float(oneImageRect.width)
self.screen = screen
self.speedFactor = speedFactor
self.sizeScaleFactor = screen.get_width()
self.personMoveSpeed = self.speedFactor * self.sizeScaleFactor
self.peopleImages = peopleImages
self.sounds = sounds
## self.donateFood = None
self.stuffToDonate = None
def reset_people(self, numPeople):
self.allPeople = []
self.normalPeople = []
for personNum in range(numPeople):
self.add_person()
def set_normal(self, person):
if not person.isHungry:
#they are fully fed
self.hungryPeople.remove(person)
self.normalPeople.append(person)
def move_all(self):
deadCount = 0
for person in self.allPeople:
person.move()
for hungryPerson in self.hungryPeople:
if hungryPerson.decrease_health():
#they died
self.hungryPeople.remove(hungryPerson)
self.allPeople.remove(hungryPerson)
deadCount += 1
#donate
if self.stuffToDonate:
for normalPerson in self.normalPeople:
donateLocation, donateType = normalPerson.donate(len(self.stuffToDonate.powerUpTypes))
if not donateLocation == None:
self.stuffToDonate.add(donateLocation, donateType)
return deadCount
def add_person(self):
personToAdd = Person(self.screen, self.peopleImages, self.sounds, self, self.widthToHeight, self.speedFactor)
self.allPeople.append(personToAdd)
self.hungryPeople.append(personToAdd)
def draw_all(self):
for person in self.allPeople:
person.draw()
class FoodType:
def __init__(self, name, image, healthGain, fullfillment, cost, timeToDigest):
self.healthGain = healthGain
self.fulfill = fullfillment
self.cost = cost
self.name = name
self.image = image
self.timeToDigest = timeToDigest
class Food:
#initialises the food to move in a particular direction
def __init__(self, screen, foodType, startPos, movePolarChords, allPeopleGroup):
self.screen = screen
self.sizeScaleFactor = screen.get_width()
self.screenSize = screen.get_width(), screen.get_height()
self.moveMagnatude = movePolarChords[0] * self.sizeScaleFactor
rectSize = self.sizeScaleFactor * .03
self.boundRect = Rect(0,0, rectSize, rectSize)
self.boundRect.center = self.wantedPos = list(startPos)
self.healthGain = foodType.healthGain
self.fullfill = foodType.fulfill
self.image = pygame.transform.scale(foodType.image, (int(rectSize), int(rectSize)))
self.timeToDigest = foodType.timeToDigest
self.moveAmount = polar_to_cart((self.moveMagnatude , movePolarChords[1]))
self.allPeopleGroup = allPeopleGroup
self.calc_people_can_eat()
#TODO: deal with vertical slope, person in oposite direction
def calc_people_can_eat(self):
if self.moveMagnatude < self.allPeopleGroup.personMoveSpeed:
#the people are faster than the food, so anyone could get to the food
self.peopleCanEat = copy.copy(self.allPeopleGroup.hungryPeople)
else:
self.peopleCanEat = []
#tells whether the line is horisontal or vertical cause the calc is easier that way
isHorVer = [self.moveAmount[1] == 0, self.moveAmount[0] == 0]
if (not isHorVer[0]) and (not isHorVer[1]):
#the slope of the food trajectory line
slope = float(self.moveAmount[1])/float(self.moveAmount[0])
invSlope = -1.0/slope
for person in self.allPeopleGroup.hungryPeople:
personPos = []
#normalise the person cordinats
for dimNum in range(2):
personPos.append(person.boundRect.center[dimNum] - self.boundRect.center[dimNum])
closestPoint = [0, 0]
#calculate closest Point depending on the condition
if isHorVer[0]:
#it's horiontal
if (self.moveAmount[0] > 0) == (personPos[0] > 0):
#shot tword the person
closestPoint = [personPos[0], 0]
#else the closest point is (0,0)
elif isHorVer[1]:
#it's vertical
if (self.moveAmount[1] > 0) == (personPos[1] > 0):
#shot tword the person
closestPoint = [0, personPos[1]]
#else the closest point is (0,0)
else:
#not horisontal or vertical
if (self.moveAmount[1] > 0) == (personPos[1] > invSlope* personPos[0]):
#the food is being shot twords the person
closestPoint[0] = (personPos[0]+ slope* personPos[1])/(1 + slope ** 2)
closestPoint[1] = closestPoint[0] * slope
#else:
#it's shot away, but it could still feed the person if they're close enough
#closest point is 0,0
if closestPoint == [0,0]:
timeFromFood = self.boundRect.width / self.moveMagnatude #save the calculation
else:
#calculate distance from food to closest point
timeFromFood = ((self.distance((0,0), closestPoint) + self.boundRect.width) /
self.moveMagnatude)
#calculate distance from person to closest point and get the time
minTimeFromPerson = ((self.distance(personPos, closestPoint) - person.boundRect.width) /
person.moveSpeed - person.moveSpeed)
if minTimeFromPerson <= timeFromFood:
self.peopleCanEat.append(person)
def distance(self, point1, point2):
return math.sqrt(float((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2))
#has all the people try to eat the food, returns True if it was eaten
def try_feed(self):
for person in self.peopleCanEat:
if person.isHungry and person.isAlive:
if(person.try_eat_food(self)):
return True
else:
self.peopleCanEat.remove(person)
return False
def move(self):
self.wantedPos[0] += self.moveAmount[0]
self.wantedPos[1] += self.moveAmount[1]
self.boundRect.center = self.wantedPos
def is_in_screen(self):
return (self.boundRect.right > 0 and self.boundRect.left < self.screenSize[0]
and self.boundRect.bottom > 0 and self.boundRect.top < self.screenSize[1])
def draw(self):
#draw.rect(self.screen, self.color, self.boundRect)
self.screen.blit(self.image, self.boundRect)
class FoodGroup:
def __init__(self, screen, peopleGroup, foodTypes):
self.screen = screen
self.allFood = []
self.people = peopleGroup
self.fDisplay = pygame.font.SysFont('Courier New', 20)
self.allFoodTypes = foodTypes
self.set_food_type(0)
def next_food_type(self):
if self.foodTypeNum <= len(self.allFoodTypes) - 2:
self.set_food_type(self.foodTypeNum + 1)
else:
self.set_food_type(0)
def set_food_type(self, foodTypeNum):
self.foodTypeNum = foodTypeNum
self.currentFoodType = self.allFoodTypes[foodTypeNum]
self.refresh_display()
def get_food_cost(self):
return self.currentFoodType.cost
def add_food(self, startPos, movePolarChords):
self.allFood.append(Food(self.screen, self.currentFoodType, startPos, movePolarChords, self.people))
def move_all(self):
for food in self.allFood:
if food.try_feed():
#the food was eaten
self.allFood.remove(food)
return
food.move()
#remove the food if it's out of range
if not food.is_in_screen():
self.allFood.remove(food)
#shows a screen displaying the food types and properties
def display_food_info(self):
displaySize = int(self.screen.get_height()/len(self.allFoodTypes))
fDisplay = pygame.font.SysFont('Courier New', displaySize)
for foodNum in range(len(self.allFoodTypes)):
food = self.allFoodTypes[foodNum]
displayLoc = foodNum*displaySize
image = pygame.transform.scale(food.image, (int(displaySize), int(displaySize)))
tDisplay = self.fDisplay.render("Cost: {}, Health gain: {}%".format(food.cost, food.healthGain *100),
1, (0,255,0))
displayRect = tDisplay.get_rect()
displayRect.topleft = (displaySize, displayLoc)
self.screen.blit(image, (0, displayLoc))
self.screen.blit(tDisplay, displayRect)
def refresh_display(self):
self.currentFoodImage = pygame.transform.scale(self.currentFoodType.image,
(self.screen.get_height()/20, self.screen.get_height()/20))
self.displayRect = self.currentFoodImage.get_rect()
self.displayRect.bottomright = self.screen.get_width(), self.screen.get_height()
def draw_all(self):
for food in self.allFood:
food.draw()
self.screen.blit(self.currentFoodImage, self.displayRect)
class PowerType:
noneType = -1
money = 0
freeFood = 1
superFood = 2
def __init__(self, pType, value, duration, image, name):
self.type = pType
self.value = value
self.image = image
self.timeLeft = duration
self.name = name
class PowerUp:
def __init__(self, location, duration, powerType):
self.powerType = powerType
self.boundRect = powerType.image.get_rect()
self.boundRect.center = location
self.timeLeft = duration
class PowerUpGroup:
def __init__(self, screen, sizeFactor, speedFactor, powerUpTypes):
self.sizeScaleFactor = screen.get_width()
self.size = int(self.sizeScaleFactor * sizeFactor)
self.screen = screen
#Set the images to the right size and the power ups to the right types
for powerUpNum in range(len(powerUpTypes)):
powerUpTypes[powerUpNum].type = powerUpNum
powerUpTypes[powerUpNum].image = pygame.transform.scale(powerUpTypes[powerUpNum].image,
(self.size, self.size))
self.powerUpTypes = powerUpTypes
self.duration = 1
self.speedFactor = speedFactor
self.allPowerUps = []
def add(self, location, powerNum):
self.allPowerUps.append(PowerUp(location, self.duration, self.powerUpTypes[powerNum]))
def remove(self, powerToRemove):
self.allPowerUps.remove(powerToRemove)
def turn_display(self):
for powerUp in self.allPowerUps:
self.screen.blit(powerUp.powerType.image, powerUp.boundRect)
#decreace the time the money has and remove it if it expiered
powerUp.timeLeft -= self.speedFactor
if powerUp.timeLeft < 0:
self.remove(powerUp)
#Makes funny images go across the screen
class Distraction:
def __init__(self, screen, speedFactor, displayProb, distractionImages, speechImages):
self.screen = screen
self.screenSize = (self.screen.get_width(), self.screen.get_height())
screenAve = (self.screenSize[0] + self.screenSize[1])/2
self.distractionImages = self.scale_images(distractionImages, screenAve)
self.speechImages = self.scale_images(speechImages, screenAve)
self.speed = speedFactor * self.screenSize[0]
self.speechChangeProb = speedFactor * 10
self.speechNum = 0
self.displayProb = displayProb
self.displayPos = [self.screenSize[0], 0]
self.set_speech()
#returns true if the image is visible on the screen
def is_displayed(self):
return self.displayPos[0] < self.screenSize[0]
def scale_images(self, originalImages, screenAve):
newImages = []
for image in originalImages:
rect = image.get_rect()
newImages.append(pygame.transform.scale(image, (
int(rect.width * screenAve/600), int(rect.height * screenAve/600))))
return newImages
def turn_draw(self):
if not self.is_displayed():
if random.random() < self.displayProb:
#reset the displayPos to display again
self.displayPos = [0, random.random() * self.screenSize[1]]
self.imageNum = random.randint(0, len(self.distractionImages)-1)
self.set_speech()
else:
if random.random() < self.speechChangeProb:
self.set_speech()
self.displayPos[0] += self.speed
self.speechInfo[1].left = self.displayPos[0]
self.screen.blit(self.distractionImages[self.imageNum], self.displayPos)
self.screen.blit(self.speechInfo[0], self.speechInfo[1])
def set_speech(self):
speechImage = self.speechImages[random.randint(0, len(self.speechImages)-1)]
speechRect = speechImage.get_rect()
speechRect.bottom = self.displayPos[1]
self.speechInfo = [speechImage, speechRect]
# Gives the first item in the a list to meet a requirement
# If there is no such item, gives None
# If this was called before, it gives the same value, unless the item no longer exists
# Constructor: list and the function that returns true if it meets the requirement
class FirstGetter:
def __init__(self, lst, is_met):
self.lst = lst
self.is_met = is_met
self.preFirst = None
def get_first(self):
if self.preFirst == None or self.preFirst not in self.lst:
#select another element
for el in self.lst:
if self.is_met(el):
self.preFirst = el
return el
self.preFirst = None
return self.preFirst
# displays a tip for the user over a game object when a certian condidion is met
# displays when is_tip(obj) is true
# getRect
class TipShower:
def __init__(self, objects, is_tip, getRect, tipImg, screen):
self.firstGetter = FirstGetter(objects, is_tip)
self.tipImg = tipImg
self.screen = screen
self.getRect = getRect
self.loopStart = time.clock()
# displays the tip next to the specified object
def show_tip(self, obj):
objRect = self.getRect(obj)
imgRect = self.tipImg.get_rect()
imgRect.bottom = objRect.top
imgRect.left = objRect.left
self.screen.blit(self.tipImg, imgRect)
# returns true if it displayed the tip
def show_tip_first(self):
first = self.firstGetter.get_first()
if not first == None:
self.show_tip(first)
return not first == None
# Shows the tips when the conditions are met for the specified amount of time
class AllTipShower:
def __init__(self, showTime, screen):
self.screen = screen
self.tips = {}
self.canShow = True
self.showTime = showTime
# getRect(object) gives the bounding rectangle of the object
# the name should be unique
def add_tip(self, name, objects, is_tip, getRect, tipImg):
newTip = TipShower(objects, is_tip, getRect, tipImg, self.screen)
newTip.time = 0
newTip.preShow = False
self.tips[name] = newTip
def modify_tip(self, name, objects):
self.tips[name].firstGetter.lst = objects
#shows all the tips that should be shown
def show_tips(self):
if self.canShow:
self.canShow = False
for tipShower in self.tips.values():
if tipShower.time < self.showTime:
self.canShow = True
showed = tipShower.show_tip_first()
if showed:
# set the clock
tipShower.time += time.clock() - tipShower.loopStart
tipShower.loopStart = time.clock()
else:
tipShower.loopStart = time.clock()
| zenev/charityGame | feedingGame/gameObjects.py | Python | gpl-2.0 | 29,777 |
# coding: utf-8
from setuptools import setup
setup(
name='pushover',
packages=['pushover'],
description='',
long_description='',
entry_points={
'console_scripts': ['pushover = pushover.cli:main'],
},
install_requires=['pyyaml', 'requests'],
)
| bope/pushover | setup.py | Python | bsd-2-clause | 281 |
# Pretty-printer commands.
# Copyright (C) 2010, 2011 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""GDB commands for working with pretty-printers."""
import copy
import gdb
import re
def parse_printer_regexps(arg):
"""Internal utility to parse a pretty-printer command argv.
Arguments:
arg: The arguments to the command. The format is:
[object-regexp [name-regexp]].
Individual printers in a collection are named as
printer-name;subprinter-name.
Returns:
The result is a 3-tuple of compiled regular expressions, except that
the resulting compiled subprinter regexp is None if not provided.
Raises:
SyntaxError: an error processing ARG
"""
argv = gdb.string_to_argv(arg);
argc = len(argv)
object_regexp = "" # match everything
name_regexp = "" # match everything
subname_regexp = None
if argc > 3:
raise SyntaxError("too many arguments")
if argc >= 1:
object_regexp = argv[0]
if argc >= 2:
name_subname = argv[1].split(";", 1)
name_regexp = name_subname[0]
if len(name_subname) == 2:
subname_regexp = name_subname[1]
# That re.compile raises SyntaxError was determined empirically.
# We catch it and reraise it to provide a slightly more useful
# error message for the user.
try:
object_re = re.compile(object_regexp)
except SyntaxError:
raise SyntaxError("invalid object regexp: %s" % object_regexp)
try:
name_re = re.compile (name_regexp)
except SyntaxError:
raise SyntaxError("invalid name regexp: %s" % name_regexp)
if subname_regexp is not None:
try:
subname_re = re.compile(subname_regexp)
except SyntaxError:
raise SyntaxError("invalid subname regexp: %s" % subname_regexp)
else:
subname_re = None
return(object_re, name_re, subname_re)
def printer_enabled_p(printer):
"""Internal utility to see if printer (or subprinter) is enabled."""
if hasattr(printer, "enabled"):
return printer.enabled
else:
return True
class InfoPrettyPrinter(gdb.Command):
"""GDB command to list all registered pretty-printers.
Usage: info pretty-printer [object-regexp [name-regexp]]
OBJECT-REGEXP is a regular expression matching the objects to list.
Objects are "global", the program space's file, and the objfiles within
that program space.
NAME-REGEXP matches the name of the pretty-printer.
Individual printers in a collection are named as
printer-name;subprinter-name.
"""
def __init__ (self):
super(InfoPrettyPrinter, self).__init__("info pretty-printer",
gdb.COMMAND_DATA)
@staticmethod
def enabled_string(printer):
"""Return "" if PRINTER is enabled, otherwise " [disabled]"."""
if printer_enabled_p(printer):
return ""
else:
return " [disabled]"
@staticmethod
def printer_name(printer):
"""Return the printer's name."""
if hasattr(printer, "name"):
return printer.name
if hasattr(printer, "__name__"):
return printer.__name__
# This "shouldn't happen", but the public API allows for
# direct additions to the pretty-printer list, and we shouldn't
# crash because someone added a bogus printer.
# Plus we want to give the user a way to list unknown printers.
return "unknown"
def list_pretty_printers(self, pretty_printers, name_re, subname_re):
"""Print a list of pretty-printers."""
# A potential enhancement is to provide an option to list printers in
# "lookup order" (i.e. unsorted).
sorted_pretty_printers = copy.copy(pretty_printers)
sorted_pretty_printers.sort(lambda x, y:
cmp(self.printer_name(x),
self.printer_name(y)))
for printer in sorted_pretty_printers:
name = self.printer_name(printer)
enabled = self.enabled_string(printer)
if name_re.match(name):
print " %s%s" % (name, enabled)
if (hasattr(printer, "subprinters") and
printer.subprinters is not None):
sorted_subprinters = copy.copy(printer.subprinters)
sorted_subprinters.sort(lambda x, y:
cmp(self.printer_name(x),
self.printer_name(y)))
for subprinter in sorted_subprinters:
if (not subname_re or
subname_re.match(subprinter.name)):
print (" %s%s" %
(subprinter.name,
self.enabled_string(subprinter)))
def invoke1(self, title, printer_list,
obj_name_to_match, object_re, name_re, subname_re):
""""Subroutine of invoke to simplify it."""
if printer_list and object_re.match(obj_name_to_match):
print title
self.list_pretty_printers(printer_list, name_re, subname_re)
def invoke(self, arg, from_tty):
"""GDB calls this to perform the command."""
(object_re, name_re, subname_re) = parse_printer_regexps(arg)
self.invoke1("global pretty-printers:", gdb.pretty_printers,
"global", object_re, name_re, subname_re)
cp = gdb.current_progspace()
self.invoke1("progspace %s pretty-printers:" % cp.filename,
cp.pretty_printers, "progspace",
object_re, name_re, subname_re)
for objfile in gdb.objfiles():
self.invoke1(" objfile %s pretty-printers:" % objfile.filename,
objfile.pretty_printers, objfile.filename,
object_re, name_re, subname_re)
def count_enabled_printers(pretty_printers):
"""Return a 2-tuple of number of enabled and total printers."""
enabled = 0
total = 0
for printer in pretty_printers:
if (hasattr(printer, "subprinters")
and printer.subprinters is not None):
if printer_enabled_p(printer):
for subprinter in printer.subprinters:
if printer_enabled_p(subprinter):
enabled += 1
total += len(printer.subprinters)
else:
if printer_enabled_p(printer):
enabled += 1
total += 1
return (enabled, total)
def count_all_enabled_printers():
"""Return a 2-tuble of the enabled state and total number of all printers.
This includes subprinters.
"""
enabled_count = 0
total_count = 0
(t_enabled, t_total) = count_enabled_printers(gdb.pretty_printers)
enabled_count += t_enabled
total_count += t_total
(t_enabled, t_total) = count_enabled_printers(gdb.current_progspace().pretty_printers)
enabled_count += t_enabled
total_count += t_total
for objfile in gdb.objfiles():
(t_enabled, t_total) = count_enabled_printers(objfile.pretty_printers)
enabled_count += t_enabled
total_count += t_total
return (enabled_count, total_count)
def pluralize(text, n, suffix="s"):
"""Return TEXT pluralized if N != 1."""
if n != 1:
return "%s%s" % (text, suffix)
else:
return text
def show_pretty_printer_enabled_summary():
"""Print the number of printers enabled/disabled.
We count subprinters individually.
"""
(enabled_count, total_count) = count_all_enabled_printers()
print "%d of %d printers enabled" % (enabled_count, total_count)
def do_enable_pretty_printer_1 (pretty_printers, name_re, subname_re, flag):
"""Worker for enabling/disabling pretty-printers.
Arguments:
pretty_printers: list of pretty-printers
name_re: regular-expression object to select printers
subname_re: regular expression object to select subprinters or None
if all are affected
flag: True for Enable, False for Disable
Returns:
The number of printers affected.
This is just for informational purposes for the user.
"""
total = 0
for printer in pretty_printers:
if (hasattr(printer, "name") and name_re.match(printer.name) or
hasattr(printer, "__name__") and name_re.match(printer.__name__)):
if (hasattr(printer, "subprinters") and
printer.subprinters is not None):
if not subname_re:
# Only record printers that change state.
if printer_enabled_p(printer) != flag:
for subprinter in printer.subprinters:
if printer_enabled_p(subprinter):
total += 1
# NOTE: We preserve individual subprinter settings.
printer.enabled = flag
else:
# NOTE: Whether this actually disables the subprinter
# depends on whether the printer's lookup function supports
# the "enable" API. We can only assume it does.
for subprinter in printer.subprinters:
if subname_re.match(subprinter.name):
# Only record printers that change state.
if (printer_enabled_p(printer) and
printer_enabled_p(subprinter) != flag):
total += 1
subprinter.enabled = flag
else:
# This printer has no subprinters.
# If the user does "disable pretty-printer .* .* foo"
# should we disable printers that don't have subprinters?
# How do we apply "foo" in this context? Since there is no
# "foo" subprinter it feels like we should skip this printer.
# There's still the issue of how to handle
# "disable pretty-printer .* .* .*", and every other variation
# that can match everything. For now punt and only support
# "disable pretty-printer .* .*" (i.e. subname is elided)
# to disable everything.
if not subname_re:
# Only record printers that change state.
if printer_enabled_p(printer) != flag:
total += 1
printer.enabled = flag
return total
def do_enable_pretty_printer (arg, flag):
"""Internal worker for enabling/disabling pretty-printers."""
(object_re, name_re, subname_re) = parse_printer_regexps(arg)
total = 0
if object_re.match("global"):
total += do_enable_pretty_printer_1(gdb.pretty_printers,
name_re, subname_re, flag)
cp = gdb.current_progspace()
if object_re.match("progspace"):
total += do_enable_pretty_printer_1(cp.pretty_printers,
name_re, subname_re, flag)
for objfile in gdb.objfiles():
if object_re.match(objfile.filename):
total += do_enable_pretty_printer_1(objfile.pretty_printers,
name_re, subname_re, flag)
if flag:
state = "enabled"
else:
state = "disabled"
print "%d %s %s" % (total, pluralize("printer", total), state)
# Print the total list of printers currently enabled/disabled.
# This is to further assist the user in determining whether the result
# is expected. Since we use regexps to select it's useful.
show_pretty_printer_enabled_summary()
# Enable/Disable one or more pretty-printers.
#
# This is intended for use when a broken pretty-printer is shipped/installed
# and the user wants to disable that printer without disabling all the other
# printers.
#
# A useful addition would be -v (verbose) to show each printer affected.
class EnablePrettyPrinter (gdb.Command):
"""GDB command to enable the specified pretty-printer.
Usage: enable pretty-printer [object-regexp [name-regexp]]
OBJECT-REGEXP is a regular expression matching the objects to examine.
Objects are "global", the program space's file, and the objfiles within
that program space.
NAME-REGEXP matches the name of the pretty-printer.
Individual printers in a collection are named as
printer-name;subprinter-name.
"""
def __init__(self):
super(EnablePrettyPrinter, self).__init__("enable pretty-printer",
gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
"""GDB calls this to perform the command."""
do_enable_pretty_printer(arg, True)
class DisablePrettyPrinter (gdb.Command):
"""GDB command to disable the specified pretty-printer.
Usage: disable pretty-printer [object-regexp [name-regexp]]
OBJECT-REGEXP is a regular expression matching the objects to examine.
Objects are "global", the program space's file, and the objfiles within
that program space.
NAME-REGEXP matches the name of the pretty-printer.
Individual printers in a collection are named as
printer-name;subprinter-name.
"""
def __init__(self):
super(DisablePrettyPrinter, self).__init__("disable pretty-printer",
gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
"""GDB calls this to perform the command."""
do_enable_pretty_printer(arg, False)
def register_pretty_printer_commands():
"""Call from a top level script to install the pretty-printer commands."""
InfoPrettyPrinter()
EnablePrettyPrinter()
DisablePrettyPrinter()
register_pretty_printer_commands()
| ptriller/dcpu-binutils | gdb/python/lib/gdb/command/pretty_printers.py | Python | gpl-2.0 | 14,698 |
from pyramid.response import Response
from pyramid.view import (
view_config,
view_defaults,
)
import colander
from sqlalchemy.exc import DBAPIError
from sqlalchemy.orm import exc
from pyramid.httpexceptions import (HTTPFound, HTTPNotFound,)
import deform
from deform import (widget, ValidationFailure)
from pkg_resources import resource_filename
from deform import ZPTRendererFactory
from .models import (
DBSession,
gsClassStudent,
gsUser,
gsStudent,
gsClass,
gsClassNote
)
from .listView import ListView
class Student(colander.MappingSchema):
studentid = colander.SchemaNode(colander.Integer())
FirstName = colander.SchemaNode(colander.String())
LastName = colander.SchemaNode(colander.String())
class Students(colander.SequenceSchema):
Students = Student()
class gsClassSchema(colander.MappingSchema):
#validator to check if class already exists
def check_class_exists(node, value):
try:
gsclass = DBSession.query(gsClass).filter_by(classCode=value['classCode']).one()
except exc.NoResultFound:
pass
except exc.MultipleResultsFound:
raise colander.Invalid(node, 'This class already exists')
#class details
classCode = colander.SchemaNode(colander.String(), title="Class Code")
cohort = colander.SchemaNode(colander.Integer(), title="Cohort")
students = Students()
class classView():
def __init__(self, request):
self.request = request
@view_config(route_name='addclass', renderer='templates/formView.pt')
def addformView(self):
#addclass does not allow submission of already existing class
schema = gsClassSchema(validator=gsClassSchema.check_class_exists)
classform = deform.Form(schema, buttons=('submit',))
if 'submit' in self.request.POST:
#Handle submitted form
controls = self.request.POST.items()
try:
#validate form
appstruct = classform.validate(controls)
except ValidationFailure as e:
return {'form':e.render()}
#populate gsclass with data from validated form, ready to submit to database
gsclass = gsClass(classCode=appstruct['classCode'], cohort=appstruct['cohort'])
DBSession.add(gsclass)
#After successful form submission, return to list of classes
return HTTPFound(self.request.route_url("listclasses"))
#render blank form for creation of new class
form = classform.render()
return dict(form=form)
@view_config(route_name='modifyclass', renderer='templates/formView.pt')
def modifyformView(self):
rclasscode = self.request.matchdict['classcode']
#check that class exists before continuing
try:
gsclass = DBSession.query(gsClass).filter_by(classCode=rclasscode).one()
except exc.NoResultFound:
detail = "Class " + rclasscode + " does not exist."
return HTTPNotFound(comment='Class does not exist', detail=detail)
#modifyclass allows database submission of existing class
schema = gsClassSchema()
if 'Delete' in self.request.POST:
confirm_delete = deform.Button(name='confirm_delete', css_class='delete button', title="Yes, really delete " + self.request.params['classCode'])
classform = deform.Form(schema, buttons=(confirm_delete,))
elif 'confirm_delete' in self.request.POST:
DBSession.delete(gsclass)
return HTTPFound(self.request.route_url("listclasses"))
else:
classform = deform.Form(schema, buttons=('Delete', 'Submit'))
if 'submit' in self.request.POST:
#Handle submitted form
controls = self.request.POST.items()
try:
#validate form
appstruct = classform.validate(controls)
except ValidationFailure as e:
return {'form':e.render()}
#populate gsclass with data from validated form, ready to submit to database
gsclass.classCode=appstruct['classCode']
gsclass.cohort=appstruct['cohort']
DBSession.add(gsclass)
#After successful form submission, return to list of classes
return HTTPFound(self.request.route_url("listclasses"))
#modifyclass requires form to be prefilled with data from the database
allstudents = DBSession.query(gsStudent).filter_by(cohort=gsclass.cohort).order_by(gsStudent.LastName).all()
studentsstruct = []
for student in allstudents:
studentsstruct.append({'studentid': student.id, 'FirstName': student.FirstName, 'LastName': student.LastName})
appstruct = {'classCode': gsclass.classCode, 'cohort': gsclass.cohort, 'students': studentsstruct}
deform_templates = resource_filename('deform', 'templates')
search_path = ('/Users/jaduff/Documents/projects/programming/python/goodstanding/goodstanding/templates/forms')
renderer = ZPTRendererFactory(search_path)
form = classform.render(appstruct, renderer=renderer)
return dict(form=form)
@view_config(route_name='listclasses', renderer='templates/classlistView.pt')
def listclassesView(self):
gsclasses = DBSession.query(gsClass).all()
bottomlinks = [{'name': 'Add Class', 'url': self.request.route_url("addclass")}]
return dict(gsclasses=gsclasses, title="My Classes", bottomlinks=bottomlinks, req=self.request)
@view_config(route_name='viewclass', renderer='templates/classstudentsView.pt')
def classstudentsView(self):
rclasscode = self.request.matchdict['classcode']
try:
gsclass = DBSession.query(gsClass).filter_by(classCode=rclasscode).one()
except exc.NoResultFound:
detail = "Class " + rclasscode + " does not exist."
return HTTPNotFound(comment='Class does not exist', detail=detail)
bottomlinks = [{'name': 'Edit Class', 'url': self.request.route_url("modifyclass", classcode=gsclass.classCode)}]
return dict(gsclass=gsclass, bottomlinks=bottomlinks, req=self.request)
| jaduff/goodstanding | goodstanding/classviews.py | Python | bsd-3-clause | 6,248 |
import datetime
import itertools
import logging
import re
import sys
import time
import threading
from sgevents.loop import LoopController
from .logs import log_globals
from .utils import parse_interval, get_shotgun
log = logging.getLogger(__name__)
class Scanner(object):
def __init__(self, schema, last_time=None, types=None, projects=None, config=None):
self.schema = schema
self.last_time = parse_interval(last_time) if isinstance(last_time, basestring) else last_time
self.types = types
self.projects = projects
self._log_counter = itertools.count(1)
self.shotgun = get_shotgun('sgapi', config=config)
self.loop_controller = LoopController()
def scan(self, interval=None):
interval = parse_interval(interval) if interval else None
sleep_target = time.time()
while True:
log_globals.meta = {'scan': next(self._log_counter)}
scan_start = datetime.datetime.utcnow() # would be great if this matched the sleep target
self._scan()
self.last_time = scan_start - datetime.timedelta(seconds=1) # need some leeway
if not interval:
break
# Figure out the next time
while sleep_target < time.time():
sleep_target += interval
delay = sleep_target - time.time()
log.info('sleeping %ds until next scan' % delay)
self.loop_controller.sleep(delay)
def _scan(self):
base_filters = []
if self.last_time:
if isinstance(self.last_time, (int, float)):
self.last_time = datetime.datetime.utcnow() - datetime.timedelta(seconds=self.last_time)
base_filters.append(('updated_at', 'greater_than', self.last_time))
log.info('scan starting')
counts = {}
for entity_type in sorted(self.schema._entity_types.itervalues(), key=lambda e: e.type_name):
if self.types and entity_type.type_name not in self.types:
continue
#log.info('scanning %ss' % entity_type.type_name)
filters = base_filters[:]
if self.projects and entity_type.type_name not in ('ApiUser', 'HumanUser'):
# Need to make sure the project field actually exists.
project_field = entity_type.fields['project']
if project_field and project_field.is_cached():
filters.append(('project', 'in', [{'type': 'Project', 'id': pid} for pid in self.projects]))
return_fields = sorted(name for name, field in entity_type.fields.iteritems() if field.is_cached())
for entity in self._find_active_and_retired(entity_type.type_name, filters, return_fields, threads=1, per_page=100):
for key in return_fields:
value = entity.get(key)
if isinstance(value, datetime.datetime):
entity[key] = value.isoformat() + 'Z'
log.info('updating %s %s %d%s' % (
'active' if entity['_active'] else 'retired',
entity['type'],
entity['id'],
' "%s"' % entity['name'][:40] if entity.get('name') else ''
))
self.schema.create_or_update(entity.pop('type'), entity, create_with_id=True)
counts[entity_type.type_name] = counts.get(entity_type.type_name, 0) + 1
summary = ', '.join('%d %ss' % (count, name) for name, count in sorted(counts.iteritems()) if count)
log.info('scan completed; updated %s' % (summary or 'nothing'))
def _find_active_and_retired(self, *args, **kwargs):
for active in True, False:
kwargs['retired_only'] = not active
for e in self.shotgun.find(*args, **kwargs):
e['_active'] = active
yield e
| westernx/sgcache | sgcache/scanner.py | Python | bsd-3-clause | 3,935 |
class Node:
all = []
def __init__(self, name, stepSize):
Node.all.append(self)
self.name = name
self.stepSize = stepSize
self.connections = []
class Connection:
all = []
def __init__(self, pre, post, latency, maxBuffer = 1000):
Connection.all.append(self)
pre.connections.append(self)
self.name = '%s->%s' % (pre.name, post.name)
self.pre = pre
self.post = post
self.latency = latency
self.allowedBuffer = maxBuffer
# Build a test graph
a = Node('a', 13)
b = Node('b', 3)
c = Node('c', 5)
d = Node('d', 7)
e = Node('e', 11)
Connection(a, b, 0)
Connection(b, c, 0)
Connection(c, d, 8)
Connection(d, b, 7)
Connection(c, e, 50)
Connection(e, b, 50)
Connection(d, e, 40)
Connection(e, d, 60)
# Search for loops
def depthFirst(x, path):
if x.inPath:
# Search path to detect beginning of loop
for cIndex, c in enumerate(path):
if c.pre is x:
loop = path[cIndex:]
print('Found loop', [c.name for c in loop])
# Compute how much headroom we have for buffering
totalDelay = 0
for c in loop:
totalDelay += c.latency - c.pre.stepSize
# If negative we will not be able to make it in time
# around the loop even without any extra buffering
if totalDelay < 0:
print('Too short latency (%g) around loop: ' % -totalDelay)
print([c.name for c in loop])
return False
# Distribute totalDelay as allowed buffering uniformly over loop
# (we could do better by considering constraints form other loops)
bufDelay = totalDelay / len(loop)
for c in loop:
c.allowedBuffer = min(c.allowedBuffer,
bufDelay // c.pre.stepSize)
# Calculate and print out debug info
print('Distributing delays',
[(c.name,
bufDelay // c.pre.stepSize,
c.allowedBuffer)
for c in loop])
totalBufferedDelay = 0
for c in loop:
totalBufferedDelay += c.latency - c.pre.stepSize*c.allowedBuffer
print('Total buffered delay', totalBufferedDelay)
return True
# Mark as processed (remove from main loop forest)
x.visited = True
x.inPath = True
# Recurse in depth-first order
for c in x.connections:
depthFirst(c.post, path+[c])
x.inPath = False
def findLoops(nodeList):
for x in nodeList:
x.visited = False
x.inPath = False
# Do depth-first traversal of forest
for x in nodeList:
if not x.visited:
depthFirst(x, [])
findLoops(Node.all)
for c in Connection.all:
print('%s %d'%(c.name, c.allowedBuffer))
def simulate(c):
s = 0
r = 0
while s < 100:
sStart = s
# Advance receive time as much as possible
# still ensuring that oldest data arrives in time
while r + c.post.stepSize <= s + c.latency:
r += c.post.stepSize
# Advance send time to match receive time
bCount = 0
while r + c.post.stepSize >= s + c.latency:
s += c.pre.stepSize
bCount += 1
# Advance send time according to precalculated buffer
if bCount < c.allowedBuffer:
s += c.pre.stepSize * (c.allowedBuffer - bCount)
print('%s data from %d-%d, sent at %d, received at %d, latency %d (%d)'
% (c.name, sStart, s, s, r, r-sStart, c.latency))
def simulate_sender(c):
s = 0
r = 0
bCount = 0
while s < 100:
sStart = s
# Advance receive time as much as possible
# still ensuring that oldest data arrives in time
while r + c.post.stepSize <= s + c.latency:
r += c.post.stepSize
# Advance send time to match receive time
if r + c.post.stepSize < s + c.latency and bCount >= c.allowedBuffer:
print('%s data from %d-%d, sent at %d, received at %d, latency %d (%d)'
% (c.name, sStart, s, s, r, r-sStart, c.latency))
bCount = 0
s += c.pre.stepSize
bCount += 1
for c in Connection.all:
simulate(c)
| favreau/music | doc/scheduling-algorithm.py | Python | gpl-3.0 | 4,266 |
# coding: utf-8
from .event import Event
__all__ = ['Event']
| infotim/python-libsse | src/libsse/__init__.py | Python | mit | 63 |
import sys
from datetime import datetime
from threading import local
from django.conf import settings
from django.template import Node
from django.utils import simplejson
from django.utils.encoding import force_unicode, smart_str
from django.utils.hashcompat import sha_constructor
from debug_toolbar.utils import ms_from_timedelta, tidy_stacktrace, get_template_info, \
get_stack
from debug_toolbar.utils.compat.db import connections
# TODO:This should be set in the toolbar loader as a default and panels should
# get a copy of the toolbar object with access to its config dictionary
SQL_WARNING_THRESHOLD = getattr(settings, 'DEBUG_TOOLBAR_CONFIG', {}) \
.get('SQL_WARNING_THRESHOLD', 500)
class SQLQueryTriggered(Exception):
"""Thrown when template panel triggers a query"""
pass
class ThreadLocalState(local):
def __init__(self):
self.enabled = True
@property
def Wrapper(self):
if self.enabled:
return NormalCursorWrapper
return ExceptionCursorWrapper
def recording(self, v):
self.enabled = v
state = ThreadLocalState()
recording = state.recording # export function
def CursorWrapper(*args, **kwds): # behave like a class
return state.Wrapper(*args, **kwds)
class ExceptionCursorWrapper(object):
"""
Wraps a cursor and raises an exception on any operation.
Used in Templates panel.
"""
def __init__(self, cursor, db, logger):
pass
def __getattr__(self, attr):
raise SQLQueryTriggered()
class NormalCursorWrapper(object):
"""
Wraps a cursor and logs queries.
"""
def __init__(self, cursor, db, logger):
self.cursor = cursor
# Instance of a BaseDatabaseWrapper subclass
self.db = db
# logger must implement a ``record`` method
self.logger = logger
def execute(self, sql, params=()):
__traceback_hide__ = True
start = datetime.now()
try:
return self.cursor.execute(sql, params)
finally:
stop = datetime.now()
duration = ms_from_timedelta(stop - start)
enable_stacktraces = getattr(settings, 'DEBUG_TOOLBAR_CONFIG', {}) \
.get('ENABLE_STACKTRACES', True)
if enable_stacktraces:
stacktrace = tidy_stacktrace(reversed(get_stack()))
else:
stacktrace = []
_params = ''
try:
_params = simplejson.dumps([force_unicode(x, strings_only=True) for x in params])
except TypeError:
pass # object not JSON serializable
template_info = None
cur_frame = sys._getframe().f_back
try:
while cur_frame is not None:
if cur_frame.f_code.co_name == 'render':
node = cur_frame.f_locals['self']
if isinstance(node, Node):
template_info = get_template_info(node.source)
break
cur_frame = cur_frame.f_back
except:
pass
del cur_frame
alias = getattr(self.db, 'alias', 'default')
conn = connections[alias].connection
# HACK: avoid imports
if conn:
engine = conn.__class__.__module__.split('.', 1)[0]
else:
engine = 'unknown'
params = {
'engine': engine,
'alias': alias,
'sql': self.db.ops.last_executed_query(self.cursor, sql, params),
'duration': duration,
'raw_sql': sql,
'params': _params,
'hash': sha_constructor(settings.SECRET_KEY + smart_str(sql) + _params).hexdigest(),
'stacktrace': stacktrace,
'start_time': start,
'stop_time': stop,
'is_slow': (duration > SQL_WARNING_THRESHOLD),
'is_select': sql.lower().strip().startswith('select'),
'template_info': template_info,
}
if engine == 'psycopg2':
params.update({
'trans_id': self.logger.get_transaction_id(alias),
'trans_status': conn.get_transaction_status(),
'iso_level': conn.isolation_level,
'encoding': conn.encoding,
})
# We keep `sql` to maintain backwards compatibility
self.logger.record(**params)
def executemany(self, sql, param_list):
return self.cursor.executemany(sql, param_list)
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
| viswimmer1/PythonGenerator | data/python_files/30552425/db.py | Python | gpl-2.0 | 4,966 |
import os
from subprocess import call
import tempfile
import click
from kazoo.client import KazooClient
import six
zk = None
def init(hosts):
global zk
zk = KazooClient(hosts=hosts)
zk.start(timeout=5)
def main():
global zk
cli(auto_envvar_prefix='ZEEK')
if zk is not None and zk.connected:
zk.close()
@click.group()
@click.option('--hosts',
'-H',
default='localhost:2181',
help="ZooKeeper connection string",
show_default=True)
def cli(hosts):
"""View your ZooKeeper data from the command line"""
init(hosts)
@cli.command()
@click.argument('path')
def ls(path):
""" List the contents of a specified path.
Arguments:
PATH the path to list the contents of."""
echo(path)
for p in children(path):
echo(p)
@cli.command()
@click.argument('path')
def find(path):
""" Find all children of a specified path.
Arguments:
PATH the path to search for children."""
echo(path)
for p in walk(path):
echo(p)
@cli.command()
@click.argument('name')
def locate(name):
""" Find all nodes matching name.
Arguments:
NAME the name to match."""
for p in walk('/'):
if name == p.split('/')[-1]:
echo(p)
@cli.command()
@click.argument('path')
@click.option('--recursive',
'-r',
is_flag=True,
help="create parent nodes if they don't exist")
def touch(path, recursive):
""" Create the specified node.
Arguments:
PATH the node to edit."""
create_node(path, recursive)
@cli.command()
@click.argument('path')
@click.argument('value')
@click.option('--create',
'-c',
is_flag=True,
help="create parent nodes if they don't exist")
def set(path, value, create):
""" Set a specified node
Arguments:
PATH the node to edit.
VALUE the value of the node"""
create_node(path, create)
zk.set(path, six.b(str(value)))
@cli.command()
@click.argument('path')
def vi(path):
""" Edit a specified node
Arguments:
PATH the node to edit."""
editor = os.environ.get('EDITOR', 'vim')
create_node(path)
with tempfile.NamedTemporaryFile(suffix=".tmp") as tmp:
if zk.exists(path):
node = zk.get(path)
tmp.write(node[0])
tmp.flush()
call([editor, tmp.name])
zk.set(path, six.b(open(tmp.name).read().strip()))
@cli.command()
@click.argument('path')
def rm(path):
""" Remove a specified node
Arguments:
PATH the node to edit."""
if zk.exists(path):
zk.delete(path)
else:
click.echo('%s does not exist' % path)
def children(path):
"""Generator that yields the children of the specified path"""
global zk
for c in zk.get_children(path):
if path == '/':
yield '/%s' % c
else:
yield '%s/%s' % (path, c)
def walk(path):
"""Generator that yields the children of the given path recursively"""
for c in children(path):
yield c
for x in walk(c):
yield x
def parents(path, ascending=False):
"""Generator that yields the full path of all parents"""
if path == '/':
yield path
return
parts = path.split('/')
indexes = range(len(parts) - 1)
if not ascending:
indexes.reverse()
for i in indexes:
yield '/' + '/'.join(parts[1:i+1])
def create_node(path, recursive=False):
if recursive:
for parent in parents(path, ascending=True):
if not zk.exists(parent):
zk.create(parent)
if zk.exists(path):
click.echo('%s already exists' % path)
else:
zk.create(path)
def echo(path):
"""Echos a ZooKeeper node path and value"""
click.echo('{0} - {1}'.format(path, zk.get(path)[0].decode('utf-8')))
| krockode/zeek | zeek.py | Python | apache-2.0 | 3,951 |
import ddt
from django.core.management import call_command
from django.core.management.base import CommandError
from mock import patch
from oscar.core.loading import get_model
from ecommerce.extensions.test.factories import create_order
from ecommerce.tests.factories import PartnerFactory
from ecommerce.tests.testcases import TestCase
LOGGER_NAME = 'ecommerce.extensions.order.management.commands.update_order_lines_partner'
OrderLine = get_model('order', 'Line')
@ddt.ddt
class UpdateOrderLinePartnerTests(TestCase):
"""Tests for update_order_lines_partner management command."""
PARTNER_CODE = 'testX'
YES_NO_PATCH_LOCATION = 'ecommerce.extensions.order.management.commands.update_order_lines_partner.query_yes_no'
def assert_error_log(self, error_msg, *args):
"""Helper to call command and assert error log."""
with self.assertRaisesRegex(CommandError, error_msg):
call_command('update_order_lines_partner', *args)
def test_partner_required(self):
"""Test that command raises partner required error."""
err_msg = 'Error: the following arguments are required: --partner'
self.assert_error_log(
err_msg,
'sku12345'
)
def test_partner_does_not_exist(self):
"""Test that command raises partner does not exist error."""
self.assert_error_log(
'No Partner exists for code {}.'.format(self.PARTNER_CODE),
'sku12345',
'--partner={}'.format(self.PARTNER_CODE)
)
def test_one_or_more_sku_required(self):
"""Test that command raises one or more SKUs required error."""
self.assert_error_log(
'update_order_lines_partner requires one or more <SKU>s.',
'--partner={}'.format(self.PARTNER_CODE)
)
@ddt.data(True, False)
def test_update_order_lines_partner(self, yes_no_value):
"""Test that command works as expected."""
new_partner = PartnerFactory(short_code=self.PARTNER_CODE)
order = create_order()
order_line = order.lines.first()
self.assertNotEqual(order_line.partner, new_partner)
with patch(self.YES_NO_PATCH_LOCATION) as mocked_yes_no:
mocked_yes_no.return_value = yes_no_value
call_command('update_order_lines_partner', order_line.partner_sku, '--partner={}'.format(self.PARTNER_CODE))
order_line = OrderLine.objects.get(partner_sku=order_line.partner_sku)
if yes_no_value:
# Verify that partner is updated
self.assertEqual(order_line.partner, new_partner)
self.assertEqual(order_line.partner_name, new_partner.name)
else:
# Verify that partner is not updated
self.assertNotEqual(order_line.partner, new_partner)
self.assertNotEqual(order_line.partner_name, new_partner.name)
| edx/ecommerce | ecommerce/extensions/order/management/commands/tests/test_update_order_lines_partner.py | Python | agpl-3.0 | 2,921 |
import datetime
print(datetime.datetime.today().ctime())
| codermoji-contrib/python | start/First steps in the library/datetime/showdate3.py | Python | mit | 57 |
class CreationDestruction (object) :
def __init__ (self) :
print("constructeur")
def __new__ (self) :
print("__new__")
return object.__new__ (self)
def __del__ (self) :
print("__del__")
print("a")
m = CreationDestruction ()
print("b")
m2 = m
print("c")
del m
print("d")
del m2
print("e")
| sdpython/teachpyx | _todo/programme/new_delete.py | Python | mit | 356 |
'''
Update the CO21 and HI intensities in the dlfit.fits file, which contains fits
to the Draine models for M33. The old version used an older and incomplete IRAM
CO(2-1) map, and the HI are from the 14B-088 integrated intensity.
'''
import numpy as np
from astropy.io import fits
from astropy.table import Table, Column
from astropy.coordinates import SkyCoord
import astropy.units as u
from astropy.utils.console import ProgressBar
from spectral_cube import Projection
from radio_beam import Beam
from os.path import join as osjoin
from paths import iram_co21_data_path, data_path, fourteenB_wGBT_HI_file_dict
from constants import hi_freq, beam_eff_30m_druard
tab = Table.read(osjoin(data_path, "dlfit.fits"))
co21_mom0 = Projection.from_hdu(fits.open(iram_co21_data_path("m33.co21_iram.mom0.fits"))[0])
co21_rms = Projection.from_hdu(fits.open(iram_co21_data_path("m33.rms.masked.fits"))[0])
hi_mom0 = Projection.from_hdu(fits.open(fourteenB_wGBT_HI_file_dict['Moment0'])[0])
# Convolve to the lowest resolution used for the fits: 500 um Herschel
# Actually @low-sky used a 60'' beam for the Draine model fitting
beam = Beam(60. * u.arcsec)
smooth_co21 = co21_mom0.convolve_to(beam)
# Remove regions outside of the original map extent
smooth_co21[np.isnan(co21_rms)] = np.NaN
# And the HI
smooth_hi = hi_mom0.convolve_to(beam)
# Now convert HI from Jy m / s to K km / s
smooth_hi = (smooth_hi.value * beam.jtok(hi_freq) / 1000.) * u.km / u.s
# Convert to K km/s
smooth_co21 = smooth_co21.to(u.K * u.km / u.s)
# Now match the points in the table to the map
dec_map, ra_map = smooth_co21.spatial_coordinate_map
spatial_coords = SkyCoord(ra=ra_map,
dec=dec_map,
frame='icrs')
dec_map, ra_map = hi_mom0.spatial_coordinate_map
spatial_coords_hi = SkyCoord(ra=ra_map,
dec=dec_map,
frame='icrs')
ra_map[np.isnan(co21_rms)] = np.NaN
dec_map[np.isnan(co21_rms)] = np.NaN
ra_limits = [np.nanmin(ra_map).value, np.nanmax(ra_map).value]
dec_limits = [np.nanmin(dec_map).value, np.nanmax(dec_map).value]
new_co21 = np.zeros((len(tab)))
new_hi = np.zeros((len(tab)))
for i, (ra, dec) in enumerate(ProgressBar(zip(tab['RA'], tab['DEC']))):
posn = SkyCoord(ra=ra * u.deg, dec=dec * u.deg, frame='icrs')
if ra < ra_limits[0] or ra > ra_limits[1] or dec < dec_limits[0] or dec > dec_limits[1]:
new_co21[i] = np.NaN
else:
# min_posn = spatial_coords.separation(posn).argmin()
mask = spatial_coords.separation(posn) < 60. * u.arcsec
# twod_posn = np.unravel_index(min_posn, co21_mom0.shape)
# new_co21[i] = smooth_co21[twod_posn].value
new_co21[i] = np.nanmean(smooth_co21[mask].value)
mask_hi = spatial_coords_hi.separation(posn) < 60. * u.arcsec
new_hi[i] = np.nanmean(smooth_hi[mask_hi].value)
# Correct CO for beam efficiency
beam_eff = beam_eff_30m_druard
tab['CO21'] = Column(new_co21 / beam_eff)
tab['HI'] = Column(new_hi)
tab.write(osjoin(data_path, "updated_dlfit.fits"), format='fits',
overwrite=True)
| e-koch/VLA_Lband | ancillary_data/IRAM30m_CO21/update_dlfit.py | Python | mit | 3,114 |
#!/usr/bin/env python3
import argparse
import sys
from ros_buildfarm.argument import add_argument_package_name
from ros_buildfarm.argument import add_argument_rosdistro_name
from ros_buildfarm.argument import add_argument_skip_download_sourcedeb
from ros_buildfarm.argument import add_argument_sourcedeb_dir
from ros_buildfarm.binarydeb_job import get_sourcedeb
from ros_buildfarm.common import Scope
def main(argv=sys.argv[1:]):
with Scope('SUBSECTION', 'get sourcedeb'):
parser = argparse.ArgumentParser(
description='Get released package sourcedeb')
add_argument_rosdistro_name(parser)
add_argument_package_name(parser)
add_argument_sourcedeb_dir(parser)
add_argument_skip_download_sourcedeb(parser)
args = parser.parse_args(argv)
return get_sourcedeb(
args.rosdistro_name, args.package_name, args.sourcedeb_dir,
args.skip_download_sourcedeb)
if __name__ == '__main__':
sys.exit(main())
| mani-monaj/ros_buildfarm | scripts/release/get_sourcedeb.py | Python | apache-2.0 | 997 |
from kivy.uix.boxlayout import BoxLayout
from kivy.properties import StringProperty, BooleanProperty
class NamedCheckBox(BoxLayout):
name = StringProperty("")
is_checked = BooleanProperty(False)
font_group_id = StringProperty("default_checkbox")
def set_checked(self, is_checked):
self.ids.checkbox.active = is_checked | Kovak/KivyNBT | uix/namedcheckbox.py | Python | mit | 344 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'open_newick.ui'
#
# Created: Tue Jan 10 15:56:56 2012
# by: PyQt4 UI code generator 4.7.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_OpenNewick(object):
def setupUi(self, OpenNewick):
OpenNewick.setObjectName("OpenNewick")
OpenNewick.resize(569, 353)
self.comboBox = QtGui.QComboBox(OpenNewick)
self.comboBox.setGeometry(QtCore.QRect(460, 300, 81, 23))
self.comboBox.setObjectName("comboBox")
self.widget = QtGui.QWidget(OpenNewick)
self.widget.setGeometry(QtCore.QRect(30, 10, 371, 321))
self.widget.setObjectName("widget")
self.retranslateUi(OpenNewick)
QtCore.QMetaObject.connectSlotsByName(OpenNewick)
def retranslateUi(self, OpenNewick):
OpenNewick.setWindowTitle(QtGui.QApplication.translate("OpenNewick", "Dialog", None, QtGui.QApplication.UnicodeUTF8))
| khughitt/ete | ete_dev/treeview/_open_newick.py | Python | gpl-3.0 | 1,001 |
"""
* The MIT License (MIT)
*
* Copyright (c) 2017 Chris Smolen
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
"""
import os.path
import json
CONFIG_DEFAULT = {
"wifi_ssid": "YOURSSID",
"wifi_pass": "YOUR_PASSWORD",
"topic1": "YOUR_MQTTT_TOPIC",
"topic2": "YOUR_MQTT_TOPIC",
"mqtt_id": "NAME_FOR_DEVICE",
"mqtt_server": "MQTT_SERVER_IP_OR_HOST",
"mqtt_user": "MQTT_USERNAME",
"mqtt_pass": "MQTT_PASSWORD"
}
DASH_DEFAULT = {
"DASH_DEVICE_1": "XX:XX:XX:XX:XX:XX",
"DASH_DEVICE_2": "XX:XX:XX:XX:XX:XX",
"DASH_DEVICE_3": "XX:XX:XX:XX:XX:XX",
"DASH_DEVICE_4": "XX:XX:XX:XX:XX:XX",
"DASH_DEVICE_5": "XX:XX:XX:XX:XX:XX",
"DASH_DEVICE_6": "XX:XX:XX:XX:XX:XX"
}
def config():
if not os.path.isfile('config.json'): # Used to check if the file exists or not
with open('config.json', 'w') as createfile:
createfile.write(json.dumps(CONFIG_DEFAULT))
with open('config.json') as config:
config = json.load(config)
return config
else:
with open('config.json') as config:
config = json.load(config)
return config
def dash():
if not os.path.isfile('dash.json'):
with open('dash.json', 'w') as createfile:
createfile.write(json.dumps(DASH_DEFAULT))
with open('dash.json') as dash:
dash = json.load(dash)
return dash
else:
with open('config.json') as dash:
dash = json.load(dash)
return dash
| smolz/Misc-Python-Projects | dash-mqtt/jsonconfig.py | Python | mit | 2,576 |
# -*- Mode: Python; test-case-name: flumotion.test.test_component -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
"""
worker-side objects for components
"""
import os
import time
import socket
from twisted.internet import reactor, error, defer
from twisted.spread import pb
from twisted.python import reflect
from zope.interface import implements
from flumotion.common import interfaces, errors, log, planet, medium
from flumotion.common import componentui, common, messages
from flumotion.common import interfaces, reflectcall, debug
from flumotion.common.i18n import N_, gettexter
from flumotion.common.planet import moods
from flumotion.common.poller import Poller
from flumotion.twisted import pb as fpb
from flumotion.twisted.flavors import IStateCacheableListener
__version__ = "$Rev$"
T_ = gettexter()
class ComponentClientFactory(fpb.ReconnectingFPBClientFactory):
"""
I am a client factory for a component logging in to the manager.
"""
logCategory = 'component'
perspectiveInterface = interfaces.IComponentMedium
def __init__(self, component):
"""
@param component: L{flumotion.component.component.BaseComponent}
"""
# doing this as a class method triggers a doc error
fpb.ReconnectingFPBClientFactory.__init__(self)
self.component = component
# make a medium to interface with the manager
self.medium = component.componentMediumClass(component)
component.setMedium(self.medium)
self.maxDelay = 10
# get the interfaces implemented by the component medium class
#FIXME: interface
#self.interfaces = self.medium.__class__.__implements__
self.logName = component.name
def clientConnectionMade(self, broker):
self.medium.broker = broker
fpb.ReconnectingFPBClientFactory.clientConnectionMade(self, broker)
# vmethod implementation
def gotDeferredLogin(self, d):
def remoteDisconnected(remoteReference):
if reactor.killed:
self.log('Connection to manager lost due to shutdown')
else:
self.warning('Lost connection to manager, '
'will attempt to reconnect')
def loginCallback(reference):
self.info("Logged in to manager")
self.debug("remote reference %r" % reference)
self.medium.setRemoteReference(reference)
reference.notifyOnDisconnect(remoteDisconnected)
def loginFailedDisconnect(failure):
# We _always_ disconnect. Then, we may log a more specific failure
# message at a higher warning level.
self.debug('Login failed, reason: %s, disconnecting', failure)
self.disconnect()
return failure
def accessDeniedErrback(failure):
failure.trap(errors.NotAuthenticatedError)
self.warning('Access denied.')
def connectionRefusedErrback(failure):
failure.trap(error.ConnectionRefusedError)
self.warning('Connection to manager refused.')
def alreadyLoggedInErrback(failure):
failure.trap(errors.AlreadyConnectedError)
self.warning('Component with id %s is already logged in.',
self.medium.authenticator.avatarId)
def loginFailedErrback(failure):
self.warning('Login failed, reason: %s' % failure)
d.addCallback(loginCallback)
d.addErrback(loginFailedDisconnect)
d.addErrback(accessDeniedErrback)
d.addErrback(connectionRefusedErrback)
d.addErrback(alreadyLoggedInErrback)
d.addErrback(loginFailedErrback)
# we want to save the authenticator
def startLogin(self, authenticator):
self.medium.setAuthenticator(authenticator)
return fpb.ReconnectingFPBClientFactory.startLogin(self, authenticator)
def _maybeDeferredChain(procs, *args, **kwargs):
"""
Creates a deferred chain created by chaining calls to the given
procedures, each of them made with the given args and kwargs.
Only the result of the last procedure is returned; results for the
other procedures are discarded.
Failures triggered during any of the procedure short-circuit execution
of the other procedures and should be handled by the errbacks attached
to the deferred returned here.
@rtype: L{twisted.internet.defer.Deferred}
"""
def call_proc(_, p):
log.debug('', 'calling %r', p)
return p(*args, **kwargs)
if not procs:
return defer.succeed(None)
p, procs = procs[0], procs[1:]
d = defer.maybeDeferred(call_proc, None, p)
for p in procs:
d.addCallback(call_proc, p)
return d
# needs to be before BaseComponent because BaseComponent references it
class BaseComponentMedium(medium.PingingMedium):
"""
I am a medium interfacing with a manager-side avatar.
I implement a Referenceable for the manager's avatar to call on me.
I have a remote reference to the manager's avatar to call upon.
I am created by the L{ComponentClientFactory}.
@cvar authenticator: the authenticator used to log in to manager
@type authenticator: L{flumotion.twisted.pb.Authenticator}
"""
implements(interfaces.IComponentMedium)
logCategory = 'basecompmed'
def __init__(self, component):
"""
@param component: L{flumotion.component.component.BaseComponent}
"""
self.comp = component
self.authenticator = None
self.broker = None
def setRemoteReference(self, reference):
self.broker = None # We no longer need that reference
medium.PingingMedium.setRemoteReference(self, reference)
### our methods
def setup(self, config):
pass
def getManagerIP(self):
"""
Return the manager IP as seen by us.
"""
assert self.remote or self.broker
broker = self.broker or self.remote.broker
peer = broker.transport.getPeer()
try:
host = peer.host
except AttributeError:
host = peer[1]
res = socket.gethostbyname(host)
self.debug("getManagerIP(): we think the manager's IP is %r" % res)
return res
def getIP(self):
"""
Return the IP of this component based on connection to the manager.
Note: this is insufficient in general, and should be replaced by
network mapping stuff later.
"""
assert self.remote, "%r does not have a remote connection" % self
host = self.remote.broker.transport.getHost()
self.debug("getIP(): using %r as our IP", host.host)
return host.host
def setAuthenticator(self, authenticator):
"""
Set the authenticator the client factory has used to log in to the
manager. Can be reused by the component's medium to make
feed connections which also get authenticated by the manager's
bouncer.
@type authenticator: L{flumotion.twisted.pb.Authenticator}
"""
self.authenticator = authenticator
### pb.Referenceable remote methods
### called from manager by our avatar
def remote_getState(self):
"""
Return the state of the component, which will be serialized to a
L{flumotion.common.planet.ManagerJobState} object.
@rtype: L{flumotion.common.planet.WorkerJobState}
@returns: state of component
"""
# we can only get the IP after we have a remote reference, so add it
# here
self.comp.state.set('manager-ip', self.getManagerIP())
return self.comp.state
def remote_getConfig(self):
"""
Return the configuration of the component.
@rtype: dict
@returns: component's current configuration
"""
return self.comp.config
def remote_stop(self):
self.info('Stopping component')
return self.comp.stop()
def remote_reloadComponent(self):
"""Reload modules in the component."""
from flumotion.common.reload import reloadFlumotion
reloadFlumotion()
def remote_getUIState(self):
"""Get a WorkerComponentUIState containing details needed to
present an admin-side UI state
"""
return self.comp.uiState
def remote_getMood(self):
"""Get mood of the component
"""
return self.comp.getMood()
def remote_getMasterClockInfo(self):
"""
Base implementation of getMasterClockInfo, can be overridden by
subclasses. By default, just returns None.
"""
return None
def remote_getVersions(self):
return debug.getVersions()
def remote_setFluDebug(self, debug):
"""
Sets the Flumotion debugging levels based on the passed debug string.
@since: 0.6.0
"""
self.debug('Setting Flumotion debug level to %s' % debug)
self.comp.uiState.set('flu-debug', debug)
log.setDebug(debug)
def remote_modifyProperty(self, property, value):
"""
Modifies a component property on the fly
@since: 0.9.0
"""
self.info('Modifying property %s to %s', property, value)
return self.comp.modifyProperty(property, value)
class BaseComponent(common.InitMixin, log.Loggable):
"""
I am the base class for all Flumotion components.
@ivar name: the name of the component
@type name: string
@ivar medium: the component's medium
@type medium: L{BaseComponentMedium}
@ivar uiState: state of the component to be shown in a UI.
Contains at least the following keys.
- cpu-percent: percentage of CPU use in last interval
- start-time: time when component was started, in epoch
seconds
- current-time: current time in epoch seconds, as seen on
component's machine, which might be out of
sync
- virtual-size: virtual memory size in bytes
Subclasses can add additional keys for their respective UI.
@type uiState: L{componentui.WorkerComponentUIState}
@cvar componentMediumClass: the medium class to use for this component
@type componentMediumClass: child class of L{BaseComponentMedium}
"""
logCategory = 'basecomp'
componentMediumClass = BaseComponentMedium
implements(IStateCacheableListener)
def __init__(self, config, haveError=None):
"""
Subclasses should not override __init__ at all.
Instead, they should implement init(), which will be called
by this implementation automatically.
L{flumotion.common.common.InitMixin} for more details.
"""
self.debug("initializing %r with config %r", type(self), config)
self.config = config
self._haveError = haveError
# this will call self.init() for all implementors of init()
common.InitMixin.__init__(self)
self.setup()
# BaseComponent interface for subclasses related to component protocol
def init(self):
"""
A subclass should do as little as possible in its init method.
In particular, it should not try to access resources.
Failures during init are marshalled back to the manager through
the worker's remote_create method, since there is no component state
proxied to the manager yet at the time of init.
"""
self.state = planet.WorkerJobState()
self.name = self.config['name']
self.state.set('pid', os.getpid())
self.setMood(moods.waking)
self.medium = None # the medium connecting us to the manager's avatar
self.uiState = componentui.WorkerComponentUIState()
self.uiState.addKey('cpu-percent')
self.uiState.addKey('start-time')
self.uiState.addKey('current-time')
self.uiState.addKey('virtual-size')
self.uiState.addKey('total-memory')
self.uiState.addKey('num-cpus')
self.uiState.addKey('flu-debug')
self.uiState.addKey('properties')
self.uiState.addHook(self)
self.plugs = {}
self._happyWaits = []
# Start the cpu-usage updating.
self._lastTime = time.time()
self._lastClock = time.clock()
self._cpuPoller = Poller(self._pollCPU, 5, start=False)
self._memoryPoller = Poller(self._pollMemory, 60, start=False)
self._cpuPollerDC = None
self._memoryPollerDC = None
self._shutdownHook = None
### IStateCacheable Interface
def observerAppend(self, observer, num):
"""
Triggered when a uiState observer was added.
Default implementation is to start the memory and cpu pollers.
Note:
Subclasses can override me but should chain me up to start these
pollers
"""
self.debug("observer has started watching us, starting pollers")
if not self._cpuPoller.running and not self._cpuPollerDC:
self._cpuPollerDC = reactor.callLater(0,
self._cpuPoller.start,
immediately=True)
if not self._memoryPoller.running and not self._memoryPollerDC:
self._memoryPollerDC = reactor.callLater(0,
self._memoryPoller.start,
immediately=True)
def observerRemove(self, observer, num):
"""
Triggered when a uiState observer has left.
Default implementation is to stop the memory and cpu pollers
when the total number of observers denoted by the 'num'
argument becomes zero.
Note:
Subclasses can override me but should chain me up to stop these
pollers
"""
if num == 0:
self.debug("no more observers left, shutting down pollers")
# Cancel any pending callLaters
if self._cpuPollerDC:
self._cpuPollerDC.cancel()
self._cpuPollerDC = None
if self._memoryPollerDC:
self._memoryPollerDC.cancel()
self._memoryPollerDC = None
if self._cpuPoller:
self._cpuPoller.stop()
if self._memoryPoller:
self._memoryPoller.stop()
def do_check(self):
"""
Subclasses can implement me to run any checks before the component
performs setup.
Messages can be added to the component state's 'messages' list key.
Any error messages added will trigger the component going to sad,
with L{flumotion.common.errors.ComponentSetupError} being raised
before getting to setup stage; do_setup() will not be called.
In the event of a fatal problem that can't be expressed through an
error message, this method should raise an exception or return a
failure.
It is not necessary to chain up in this function. The return
value may be a deferred.
"""
return defer.maybeDeferred(self.check_properties,
self.config['properties'],
self.addMessage)
def check_properties(self, properties, addMessage):
"""
BaseComponent convenience vmethod for running checks.
A component implementation can override this method to run any
checks that it needs to. Typically, a check_properties
implementation will call the provided addMessage() callback to
note warnings or errors. For errors, addMessage() will set
component's mood to sad, which will abort the init process
before getting to do_setup().
@param properties: The component's properties
@type properties: dict of string => object
@param addMessage: Thunk to add a message to the component
state. Will raise an exception if the
message is of level ERROR.
@type addMessage: L{flumotion.common.messages.Message} -> None
"""
pass
def do_setup(self):
"""
Subclasses can implement me to set up the component before it is
started. It should set up the component, possibly opening files
and resources.
Non-programming errors should not be raised, but returned as a
failing deferred.
The return value may be a deferred.
"""
plug_starts = []
for socket, plugs in self.config['plugs'].items():
self.plugs[socket] = []
for plug in plugs:
entry = plug['entries']['default']
instance = reflectcall.reflectCall(entry['module-name'],
entry['function-name'],
plug)
self.plugs[socket].append(instance)
self.debug('Starting plug %r on socket %s',
instance, socket)
plug_starts.append(instance.start)
# Call check methods, starting from the base class and working down to
# subclasses.
checks = common.get_all_methods(self, 'do_check', False)
def checkErrorCallback(result):
# if the mood is now sad, it means an error was encountered
# during check, and we should return a failure here.
# since the checks are responsible for adding a message,
# this is a handled error.
current = self.state.get('mood')
if current == moods.sad.value:
self.warning('Running checks made the component sad.')
raise errors.ComponentSetupHandledError()
checks.append(checkErrorCallback)
return _maybeDeferredChain(plug_starts + checks, self)
def do_stop(self):
"""
BaseComponent vmethod for stopping.
The component should do any cleanup it needs, but must not set the
component's mood to sleeping.
@Returns: L{twisted.internet.defer.Deferred}
"""
plug_stops = []
for socket, plugs in self.plugs.items():
for plug in plugs:
self.debug('Stopping plug %r on socket %s', plug, socket)
plug_stops.append(plug.stop)
for message in self.state.get('messages'):
# FIXME: not necessary
self.state.remove('messages', message)
# Cancel any pending callLaters
if self._cpuPollerDC:
self._cpuPollerDC.cancel()
self._cpuPollerDC = None
if self._memoryPollerDC:
self._memoryPollerDC.cancel()
self._memoryPollerDC = None
if self._cpuPoller:
self._cpuPoller.stop()
self._cpuPoller = None
if self._memoryPoller:
self._memoryPoller.stop()
self._memoryPoller = None
if self._shutdownHook:
self.debug('_stoppedCallback: firing shutdown hook')
self._shutdownHook()
return _maybeDeferredChain(plug_stops, self)
### BaseComponent implementation related to compoment protocol
def setup(self):
"""
Sets up the component. Called during __init__, so be sure not
to raise exceptions, instead adding messages to the component
state.
"""
def run_setups():
setups = common.get_all_methods(self, 'do_setup', False)
return _maybeDeferredChain(setups, self)
def setup_complete(_):
self.debug('setup completed')
self.setup_completed()
def got_error(failure):
txt = log.getFailureMessage(failure)
self.debug('got_error: %s', txt)
if not failure.check(errors.ComponentSetupHandledError):
self.warning('Setup failed: %s', txt)
m = messages.Error(T_(N_("Could not setup component.")),
debug=txt,
mid="component-setup-%s" % self.name)
# will call setMood(moods.sad)
self.addMessage(m)
# swallow
return None
self.setMood(moods.waking)
self.uiState.set('start-time', time.time())
self.uiState.set('total-memory', self._getTotalMemory())
self.uiState.set('num-cpus', self._getNumberOfCPUs())
self.uiState.set('flu-debug', log.getDebug())
d = run_setups()
d.addCallbacks(setup_complete, got_error)
# all status info via messages and the mood
def setup_completed(self):
self.debug('turning happy')
self.setMood(moods.happy)
def setShutdownHook(self, shutdownHook):
"""
Set the shutdown hook for this component (replacing any previous hook).
When a component is stopped, then this hook will be fired.
"""
self._shutdownHook = shutdownHook
def stop(self):
"""
Tell the component to stop.
The connection to the manager will be closed.
The job process will also finish.
"""
self.debug('BaseComponent.stop')
# Set ourselves to waking while we're shutting down.
self.setMood(moods.waking)
# Run stop methods, starting from the subclass, up to this base class.
stops = common.get_all_methods(self, 'do_stop', True)
return _maybeDeferredChain(stops, self)
### BaseComponent public methods
def getName(self):
return self.name
def setWorkerName(self, workerName):
self.state.set('workerName', workerName)
def getWorkerName(self):
return self.state.get('workerName')
def setMedium(self, medium):
assert isinstance(medium, BaseComponentMedium)
self.medium = medium
self.medium.logName = self.getName()
for plugs in self.plugs.values():
for plug in plugs:
self._export_plug_interface(plug, medium)
def setMood(self, mood):
"""
Set the given mood on the component if it's different from the current
one.
"""
current = self.state.get('mood')
if current == mood.value:
self.log('already in mood %r' % mood)
return
elif current == moods.sad.value:
self.info('tried to set mood to %r, but already sad :-(' % mood)
return
self.doLog(log.DEBUG, -2, 'MOOD changed to %r by caller', mood)
self.state.set('mood', mood.value)
if mood == moods.happy:
while self._happyWaits:
self._happyWaits.pop(0).callback(None)
elif mood == moods.sad:
while self._happyWaits:
self._happyWaits.pop(0).errback(errors.ComponentStartError())
def getMood(self):
"""
Gets the mood on the component.
@rtype: int
"""
return self.state.get('mood')
def waitForHappy(self):
mood = self.getMood()
if mood == moods.happy.value:
return defer.succeed(None)
elif mood == moods.sad.value:
return defer.fail(errors.ComponentStartError())
else:
d = defer.Deferred()
self._happyWaits.append(d)
return d
def addMessage(self, message):
"""
Add a message to the component.
If any of the messages is an error, the component will turn sad.
@type message: L{flumotion.common.messages.Message}
"""
self.state.append('messages', message)
if message.level == messages.ERROR:
self.debug('error message, turning sad')
self.setMood(moods.sad)
if self._haveError:
self._haveError(message)
def warnDeprecatedProperties(self, list):
"""
Add a warning messages for deprecated properties.
@param list: list of property names.
@type list: list of str
"""
msg = ("Your configuration uses deprecated properties. "
"Please update your configuration and correct them.\n")
m = messages.Warning(T_(N_(msg)), mid="deprecated")
for prop in list:
m.add(T_(N_(
"Please remove '%s' property.\n"), prop))
self.addMessage(m)
self.warning(msg)
def fixRenamedProperties(self, properties, list):
"""
Fix properties that have been renamed from a previous version,
and add a warning for them.
@param properties: properties; will be modified as a result.
@type properties: dict
@param list: list of (old, new) tuples of property names.
@type list: list of tuple of (str, str)
"""
found = []
for old, new in list:
if old in properties:
found.append((old, new))
if found:
m = messages.Warning(T_(N_(
"Your configuration uses deprecated properties. "
"Please update your configuration and correct them.\n")),
mid="deprecated")
for old, new in found:
m.add(T_(N_(
"Please rename '%s' to '%s'.\n"),
old, new))
self.debug("Setting new property '%s' to %r", new,
properties[old])
properties[new] = properties[old]
del properties[old]
self.addMessage(m)
def adminCallRemote(self, methodName, *args, **kwargs):
"""
Call a remote method on all admin client views on this component.
This gets serialized through the manager and multiplexed to all
admin clients, and from there on to all views connected to each
admin client model.
Because there can be any number of admin clients that this call
will go out do, it does not make sense to have one return value.
This function will return None always.
"""
if self.medium:
self.medium.callRemote("adminCallRemote", methodName,
*args, **kwargs)
else:
self.debug('asked to adminCallRemote(%s, *%r, **%r), but '
'no manager.'
% (methodName, args, kwargs))
def modifyProperty(self, property_name, value):
"""
Modifies a property of the compoment.
Components with modifiable properties (properties that can be changed
on the fly) should implement modify_property_(propertyName) to receive
the call
@param property_name: Name of the property to change
@type property_name: str
@param value: Value to set
"""
# Transform property name to camel case:
# max-reconnections-delay -> MaxReconnectionsDelay
p = ''.join([t.title() for t in property_name.split('-')])
method_name = "modify_property_%s" % p
if not hasattr(self, method_name):
raise errors.PropertyNotModifiableError("%s" % (property_name))
method = getattr(self, method_name)
if not method(value):
return False
self.config['properties'][property_name] = value
self.uiState.set('properties', self.config['properties'])
return True
def checkPropertyType(self, property_name, value, allowed_type):
"""
Check that the value to be set in a property is of the correct type
@returns: True if the value is of the correct type
"""
if type(value) != allowed_type:
self.warning("Could not set the property %s in %s. "
"'value' must be of %s", property_name, self,
allowed_type)
return False
return True
def _export_plug_interface(self, plug, medium):
try:
namespace = plug.get_namespace()
except AttributeError:
self.debug("Plug %r does not provide namespace, "
"its interface will not be exposed", plug)
return
self.debug("Exposing plug's %r interface in namespace %r",
plug, namespace)
for method in filter(callable,
[getattr(plug, m) for m in dir(plug)
if m.startswith('remote_')]):
if namespace:
name = "".join(("remote_", namespace, "_",
method.__name__[len("remote_"):]))
else:
name = method.__name__
self.debug("Exposing method %r as %r in %r", method, name, medium)
setattr(medium, name, method)
def _pollCPU(self):
self._cpuPollerDC = None
# update CPU time stats
nowTime = time.time()
nowClock = time.clock()
deltaTime = nowTime - self._lastTime
deltaClock = nowClock - self._lastClock
self._lastTime = nowTime
self._lastClock = nowClock
# deltaClock can be < 0 if time.clock() wrapped around
if deltaClock >= 0:
CPU = deltaClock/deltaTime
self.log('latest CPU use: %r', CPU)
self.uiState.set('cpu-percent', CPU)
self.uiState.set('current-time', nowTime)
def _pollMemory(self):
self._memoryPollerDC = None
# Figure out our virtual memory size and report that.
# I don't know a nicer way to find vsize than groping /proc/
handle = open('/proc/%d/stat' % os.getpid())
line = handle.read()
handle.close()
fields = line.split()
# field 1 (comm) could potentially contain spaces and thus split over
# multiple list items, but our processes do not contain spaces
vsize = int(fields[22])
self.log('vsize is %d', vsize)
self.uiState.set('virtual-size', vsize)
def _getTotalMemory(self):
f = open("/proc/meminfo")
memtotal = f.readline()
f.close()
return int(memtotal[memtotal.index(":") + 1: -3]) * 1024
def _getNumberOfCPUs(self):
try:
return open('/proc/cpuinfo').read().count('processor\t:')
except IOError:
self.debug('Can not determine number of CPUs on this system')
return 1
| offlinehacker/flumotion | flumotion/component/component.py | Python | gpl-2.0 | 31,473 |
import unittest
from test import test_support
from contextlib import closing
import gc
import pickle
import select
import signal
import subprocess
import traceback
import sys, os, time, errno
if sys.platform in ('os2', 'riscos'):
raise unittest.SkipTest("Can't test signal on %s" % sys.platform)
class HandlerBCalled(Exception):
pass
def exit_subprocess():
"""Use os._exit(0) to exit the current subprocess.
Otherwise, the test catches the SystemExit and continues executing
in parallel with the original test, so you wind up with an
exponential number of tests running concurrently.
"""
os._exit(0)
def ignoring_eintr(__func, *args, **kwargs):
try:
return __func(*args, **kwargs)
except EnvironmentError as e:
if e.errno != errno.EINTR:
raise
return None
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class InterProcessSignalTests(unittest.TestCase):
MAX_DURATION = 20 # Entire test should last at most 20 sec.
def setUp(self):
self.using_gc = gc.isenabled()
gc.disable()
def tearDown(self):
if self.using_gc:
gc.enable()
def format_frame(self, frame, limit=None):
return ''.join(traceback.format_stack(frame, limit=limit))
def handlerA(self, signum, frame):
self.a_called = True
if test_support.verbose:
print "handlerA invoked from signal %s at:\n%s" % (
signum, self.format_frame(frame, limit=1))
def handlerB(self, signum, frame):
self.b_called = True
if test_support.verbose:
print "handlerB invoked from signal %s at:\n%s" % (
signum, self.format_frame(frame, limit=1))
raise HandlerBCalled(signum, self.format_frame(frame))
def wait(self, child):
"""Wait for child to finish, ignoring EINTR."""
while True:
try:
child.wait()
return
except OSError as e:
if e.errno != errno.EINTR:
raise
def run_test(self):
# Install handlers. This function runs in a sub-process, so we
# don't worry about re-setting the default handlers.
signal.signal(signal.SIGHUP, self.handlerA)
signal.signal(signal.SIGUSR1, self.handlerB)
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
signal.signal(signal.SIGALRM, signal.default_int_handler)
# Variables the signals will modify:
self.a_called = False
self.b_called = False
# Let the sub-processes know who to send signals to.
pid = os.getpid()
if test_support.verbose:
print "test runner's pid is", pid
child = ignoring_eintr(subprocess.Popen, ['kill', '-HUP', str(pid)])
if child:
self.wait(child)
if not self.a_called:
time.sleep(1) # Give the signal time to be delivered.
self.assertTrue(self.a_called)
self.assertFalse(self.b_called)
self.a_called = False
# Make sure the signal isn't delivered while the previous
# Popen object is being destroyed, because __del__ swallows
# exceptions.
del child
try:
child = subprocess.Popen(['kill', '-USR1', str(pid)])
# This wait should be interrupted by the signal's exception.
self.wait(child)
time.sleep(1) # Give the signal time to be delivered.
self.fail('HandlerBCalled exception not raised')
except HandlerBCalled:
self.assertTrue(self.b_called)
self.assertFalse(self.a_called)
if test_support.verbose:
print "HandlerBCalled exception caught"
child = ignoring_eintr(subprocess.Popen, ['kill', '-USR2', str(pid)])
if child:
self.wait(child) # Nothing should happen.
try:
signal.alarm(1)
# The race condition in pause doesn't matter in this case,
# since alarm is going to raise a KeyboardException, which
# will skip the call.
signal.pause()
# But if another signal arrives before the alarm, pause
# may return early.
time.sleep(1)
except KeyboardInterrupt:
if test_support.verbose:
print "KeyboardInterrupt (the alarm() went off)"
except:
self.fail("Some other exception woke us from pause: %s" %
traceback.format_exc())
else:
self.fail("pause returned of its own accord, and the signal"
" didn't arrive after another second.")
# Issue 3864. Unknown if this affects earlier versions of freebsd also.
@unittest.skipIf(sys.platform=='freebsd6',
'inter process signals not reliable (do not mix well with threading) '
'on freebsd6')
def test_main(self):
# This function spawns a child process to insulate the main
# test-running process from all the signals. It then
# communicates with that child process over a pipe and
# re-raises information about any exceptions the child
# raises. The real work happens in self.run_test().
os_done_r, os_done_w = os.pipe()
with closing(os.fdopen(os_done_r)) as done_r, \
closing(os.fdopen(os_done_w, 'w')) as done_w:
child = os.fork()
if child == 0:
# In the child process; run the test and report results
# through the pipe.
try:
done_r.close()
# Have to close done_w again here because
# exit_subprocess() will skip the enclosing with block.
with closing(done_w):
try:
self.run_test()
except:
pickle.dump(traceback.format_exc(), done_w)
else:
pickle.dump(None, done_w)
except:
print 'Uh oh, raised from pickle.'
traceback.print_exc()
finally:
exit_subprocess()
done_w.close()
# Block for up to MAX_DURATION seconds for the test to finish.
r, w, x = select.select([done_r], [], [], self.MAX_DURATION)
if done_r in r:
tb = pickle.load(done_r)
if tb:
self.fail(tb)
else:
os.kill(child, signal.SIGKILL)
self.fail('Test deadlocked after %d seconds.' %
self.MAX_DURATION)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class BasicSignalTests(unittest.TestCase):
def trivial_signal_handler(self, *args):
pass
def test_out_of_range_signal_number_raises_error(self):
self.assertRaises(ValueError, signal.getsignal, 4242)
self.assertRaises(ValueError, signal.signal, 4242,
self.trivial_signal_handler)
def test_setting_signal_handler_to_none_raises_error(self):
self.assertRaises(TypeError, signal.signal,
signal.SIGUSR1, None)
def test_getsignal(self):
hup = signal.signal(signal.SIGHUP, self.trivial_signal_handler)
self.assertEqual(signal.getsignal(signal.SIGHUP),
self.trivial_signal_handler)
signal.signal(signal.SIGHUP, hup)
self.assertEqual(signal.getsignal(signal.SIGHUP), hup)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
class WindowsSignalTests(unittest.TestCase):
def test_issue9324(self):
# Updated for issue #10003, adding SIGBREAK
handler = lambda x, y: None
for sig in (signal.SIGABRT, signal.SIGBREAK, signal.SIGFPE,
signal.SIGILL, signal.SIGINT, signal.SIGSEGV,
signal.SIGTERM):
# Set and then reset a handler for signals that work on windows
signal.signal(sig, signal.signal(sig, handler))
with self.assertRaises(ValueError):
signal.signal(-1, handler)
with self.assertRaises(ValueError):
signal.signal(7, handler)
class WakeupFDTests(unittest.TestCase):
def test_invalid_fd(self):
fd = test_support.make_bad_fd()
self.assertRaises(ValueError, signal.set_wakeup_fd, fd)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class WakeupSignalTests(unittest.TestCase):
TIMEOUT_FULL = 10
TIMEOUT_HALF = 5
def test_wakeup_fd_early(self):
import select
signal.alarm(1)
before_time = time.time()
# We attempt to get a signal during the sleep,
# before select is called
time.sleep(self.TIMEOUT_FULL)
mid_time = time.time()
self.assertTrue(mid_time - before_time < self.TIMEOUT_HALF)
select.select([self.read], [], [], self.TIMEOUT_FULL)
after_time = time.time()
self.assertTrue(after_time - mid_time < self.TIMEOUT_HALF)
def test_wakeup_fd_during(self):
import select
signal.alarm(1)
before_time = time.time()
# We attempt to get a signal during the select call
self.assertRaises(select.error, select.select,
[self.read], [], [], self.TIMEOUT_FULL)
after_time = time.time()
self.assertTrue(after_time - before_time < self.TIMEOUT_HALF)
def setUp(self):
import fcntl
self.alrm = signal.signal(signal.SIGALRM, lambda x,y:None)
self.read, self.write = os.pipe()
flags = fcntl.fcntl(self.write, fcntl.F_GETFL, 0)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(self.write, fcntl.F_SETFL, flags)
self.old_wakeup = signal.set_wakeup_fd(self.write)
def tearDown(self):
signal.set_wakeup_fd(self.old_wakeup)
os.close(self.read)
os.close(self.write)
signal.signal(signal.SIGALRM, self.alrm)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class SiginterruptTest(unittest.TestCase):
def setUp(self):
"""Install a no-op signal handler that can be set to allow
interrupts or not, and arrange for the original signal handler to be
re-installed when the test is finished.
"""
self.signum = signal.SIGUSR1
oldhandler = signal.signal(self.signum, lambda x,y: None)
self.addCleanup(signal.signal, self.signum, oldhandler)
def readpipe_interrupted(self):
"""Perform a read during which a signal will arrive. Return True if the
read is interrupted by the signal and raises an exception. Return False
if it returns normally.
"""
# Create a pipe that can be used for the read. Also clean it up
# when the test is over, since nothing else will (but see below for
# the write end).
r, w = os.pipe()
self.addCleanup(os.close, r)
# Create another process which can send a signal to this one to try
# to interrupt the read.
ppid = os.getpid()
pid = os.fork()
if pid == 0:
# Child code: sleep to give the parent enough time to enter the
# read() call (there's a race here, but it's really tricky to
# eliminate it); then signal the parent process. Also, sleep
# again to make it likely that the signal is delivered to the
# parent process before the child exits. If the child exits
# first, the write end of the pipe will be closed and the test
# is invalid.
try:
time.sleep(0.2)
os.kill(ppid, self.signum)
time.sleep(0.2)
finally:
# No matter what, just exit as fast as possible now.
exit_subprocess()
else:
# Parent code.
# Make sure the child is eventually reaped, else it'll be a
# zombie for the rest of the test suite run.
self.addCleanup(os.waitpid, pid, 0)
# Close the write end of the pipe. The child has a copy, so
# it's not really closed until the child exits. We need it to
# close when the child exits so that in the non-interrupt case
# the read eventually completes, otherwise we could just close
# it *after* the test.
os.close(w)
# Try the read and report whether it is interrupted or not to
# the caller.
try:
d = os.read(r, 1)
return False
except OSError, err:
if err.errno != errno.EINTR:
raise
return True
def test_without_siginterrupt(self):
"""If a signal handler is installed and siginterrupt is not called
at all, when that signal arrives, it interrupts a syscall that's in
progress.
"""
i = self.readpipe_interrupted()
self.assertTrue(i)
# Arrival of the signal shouldn't have changed anything.
i = self.readpipe_interrupted()
self.assertTrue(i)
def test_siginterrupt_on(self):
"""If a signal handler is installed and siginterrupt is called with
a true value for the second argument, when that signal arrives, it
interrupts a syscall that's in progress.
"""
signal.siginterrupt(self.signum, 1)
i = self.readpipe_interrupted()
self.assertTrue(i)
# Arrival of the signal shouldn't have changed anything.
i = self.readpipe_interrupted()
self.assertTrue(i)
def test_siginterrupt_off(self):
"""If a signal handler is installed and siginterrupt is called with
a false value for the second argument, when that signal arrives, it
does not interrupt a syscall that's in progress.
"""
signal.siginterrupt(self.signum, 0)
i = self.readpipe_interrupted()
self.assertFalse(i)
# Arrival of the signal shouldn't have changed anything.
i = self.readpipe_interrupted()
self.assertFalse(i)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class ItimerTest(unittest.TestCase):
def setUp(self):
self.hndl_called = False
self.hndl_count = 0
self.itimer = None
self.old_alarm = signal.signal(signal.SIGALRM, self.sig_alrm)
def tearDown(self):
signal.signal(signal.SIGALRM, self.old_alarm)
if self.itimer is not None: # test_itimer_exc doesn't change this attr
# just ensure that itimer is stopped
signal.setitimer(self.itimer, 0)
def sig_alrm(self, *args):
self.hndl_called = True
if test_support.verbose:
print("SIGALRM handler invoked", args)
def sig_vtalrm(self, *args):
self.hndl_called = True
if self.hndl_count > 3:
# it shouldn't be here, because it should have been disabled.
raise signal.ItimerError("setitimer didn't disable ITIMER_VIRTUAL "
"timer.")
elif self.hndl_count == 3:
# disable ITIMER_VIRTUAL, this function shouldn't be called anymore
signal.setitimer(signal.ITIMER_VIRTUAL, 0)
if test_support.verbose:
print("last SIGVTALRM handler call")
self.hndl_count += 1
if test_support.verbose:
print("SIGVTALRM handler invoked", args)
def sig_prof(self, *args):
self.hndl_called = True
signal.setitimer(signal.ITIMER_PROF, 0)
if test_support.verbose:
print("SIGPROF handler invoked", args)
def test_itimer_exc(self):
# XXX I'm assuming -1 is an invalid itimer, but maybe some platform
# defines it ?
self.assertRaises(signal.ItimerError, signal.setitimer, -1, 0)
# Negative times are treated as zero on some platforms.
if 0:
self.assertRaises(signal.ItimerError,
signal.setitimer, signal.ITIMER_REAL, -1)
def test_itimer_real(self):
self.itimer = signal.ITIMER_REAL
signal.setitimer(self.itimer, 1.0)
if test_support.verbose:
print("\ncall pause()...")
signal.pause()
self.assertEqual(self.hndl_called, True)
# Issue 3864. Unknown if this affects earlier versions of freebsd also.
@unittest.skipIf(sys.platform in ('freebsd6', 'netbsd5'),
'itimer not reliable (does not mix well with threading) on some BSDs.')
def test_itimer_virtual(self):
self.itimer = signal.ITIMER_VIRTUAL
signal.signal(signal.SIGVTALRM, self.sig_vtalrm)
signal.setitimer(self.itimer, 0.3, 0.2)
start_time = time.time()
while time.time() - start_time < 60.0:
# use up some virtual time by doing real work
_ = pow(12345, 67890, 10000019)
if signal.getitimer(self.itimer) == (0.0, 0.0):
break # sig_vtalrm handler stopped this itimer
else: # Issue 8424
self.skipTest("timeout: likely cause: machine too slow or load too "
"high")
# virtual itimer should be (0.0, 0.0) now
self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
# Issue 3864. Unknown if this affects earlier versions of freebsd also.
@unittest.skipIf(sys.platform=='freebsd6',
'itimer not reliable (does not mix well with threading) on freebsd6')
def test_itimer_prof(self):
self.itimer = signal.ITIMER_PROF
signal.signal(signal.SIGPROF, self.sig_prof)
signal.setitimer(self.itimer, 0.2, 0.2)
start_time = time.time()
while time.time() - start_time < 60.0:
# do some work
_ = pow(12345, 67890, 10000019)
if signal.getitimer(self.itimer) == (0.0, 0.0):
break # sig_prof handler stopped this itimer
else: # Issue 8424
self.skipTest("timeout: likely cause: machine too slow or load too "
"high")
# profiling itimer should be (0.0, 0.0) now
self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
def test_main():
test_support.run_unittest(BasicSignalTests, InterProcessSignalTests,
WakeupFDTests, WakeupSignalTests,
SiginterruptTest, ItimerTest,
WindowsSignalTests)
if __name__ == "__main__":
test_main()
| j5shi/Thruster | pylibs/test/test_signal.py | Python | gpl-2.0 | 19,406 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'batch_iv_analysis/batch-iv-analysis.ui'
#
# Created by: PyQt5 UI code generator 5.8
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_batch_iv_analysis(object):
def setupUi(self, batch_iv_analysis):
batch_iv_analysis.setObjectName("batch_iv_analysis")
batch_iv_analysis.resize(847, 809)
self.centralwidget = QtWidgets.QWidget(batch_iv_analysis)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.tehTabs = QtWidgets.QTabWidget(self.centralwidget)
self.tehTabs.setObjectName("tehTabs")
self.resultsTabs = QtWidgets.QWidget()
self.resultsTabs.setObjectName("resultsTabs")
self.gridLayout_3 = QtWidgets.QGridLayout(self.resultsTabs)
self.gridLayout_3.setContentsMargins(0, 0, 0, 0)
self.gridLayout_3.setObjectName("gridLayout_3")
self.tableWidget = QtWidgets.QTableWidget(self.resultsTabs)
self.tableWidget.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.tableWidget.setAlternatingRowColors(True)
self.tableWidget.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.tableWidget.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectItems)
self.tableWidget.setColumnCount(0)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setRowCount(0)
self.tableWidget.horizontalHeader().setCascadingSectionResizes(False)
self.tableWidget.horizontalHeader().setDefaultSectionSize(92)
self.tableWidget.horizontalHeader().setSortIndicatorShown(True)
self.tableWidget.verticalHeader().setVisible(False)
self.tableWidget.verticalHeader().setDefaultSectionSize(30)
self.tableWidget.verticalHeader().setSortIndicatorShown(True)
self.gridLayout_3.addWidget(self.tableWidget, 0, 0, 1, 1)
self.tehTabs.addTab(self.resultsTabs, "")
self.plotTab = QtWidgets.QWidget()
self.plotTab.setObjectName("plotTab")
self.tehTabs.addTab(self.plotTab, "")
self.settingsTab = QtWidgets.QWidget()
self.settingsTab.setObjectName("settingsTab")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.settingsTab)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.verticalLayout_4.setSizeConstraint(QtWidgets.QLayout.SetMaximumSize)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.formLayout_2 = QtWidgets.QFormLayout()
self.formLayout_2.setFieldGrowthPolicy(QtWidgets.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout_2.setObjectName("formLayout_2")
self.attemptCharEqnFitLabel = QtWidgets.QLabel(self.settingsTab)
self.attemptCharEqnFitLabel.setObjectName("attemptCharEqnFitLabel")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.attemptCharEqnFitLabel)
self.attemptCharEqnFitCheckBox = QtWidgets.QCheckBox(self.settingsTab)
self.attemptCharEqnFitCheckBox.setChecked(True)
self.attemptCharEqnFitCheckBox.setObjectName("attemptCharEqnFitCheckBox")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.attemptCharEqnFitCheckBox)
self.doFastAndSloppyMathLabel = QtWidgets.QLabel(self.settingsTab)
self.doFastAndSloppyMathLabel.setObjectName("doFastAndSloppyMathLabel")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.doFastAndSloppyMathLabel)
self.doFastAndSloppyMathCheckBox = QtWidgets.QCheckBox(self.settingsTab)
self.doFastAndSloppyMathCheckBox.setChecked(True)
self.doFastAndSloppyMathCheckBox.setObjectName("doFastAndSloppyMathCheckBox")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.doFastAndSloppyMathCheckBox)
self.lowerVoltageCutoffLabel = QtWidgets.QLabel(self.settingsTab)
self.lowerVoltageCutoffLabel.setObjectName("lowerVoltageCutoffLabel")
self.formLayout_2.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.lowerVoltageCutoffLabel)
self.lowerVoltageCutoffLineEdit = QtWidgets.QLineEdit(self.settingsTab)
self.lowerVoltageCutoffLineEdit.setObjectName("lowerVoltageCutoffLineEdit")
self.formLayout_2.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.lowerVoltageCutoffLineEdit)
self.upperVoltageCutoffLabel = QtWidgets.QLabel(self.settingsTab)
self.upperVoltageCutoffLabel.setObjectName("upperVoltageCutoffLabel")
self.formLayout_2.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.upperVoltageCutoffLabel)
self.upperVoltageCutoffLineEdit = QtWidgets.QLineEdit(self.settingsTab)
self.upperVoltageCutoffLineEdit.setObjectName("upperVoltageCutoffLineEdit")
self.formLayout_2.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.upperVoltageCutoffLineEdit)
self.fitMethodLabel = QtWidgets.QLabel(self.settingsTab)
self.fitMethodLabel.setObjectName("fitMethodLabel")
self.formLayout_2.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.fitMethodLabel)
self.fitMethodComboBox = QtWidgets.QComboBox(self.settingsTab)
self.fitMethodComboBox.setObjectName("fitMethodComboBox")
self.fitMethodComboBox.addItem("")
self.fitMethodComboBox.addItem("")
self.fitMethodComboBox.addItem("")
self.formLayout_2.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.fitMethodComboBox)
self.verbosityLabel = QtWidgets.QLabel(self.settingsTab)
self.verbosityLabel.setObjectName("verbosityLabel")
self.formLayout_2.setWidget(5, QtWidgets.QFormLayout.LabelRole, self.verbosityLabel)
self.verbositySpinBox = QtWidgets.QSpinBox(self.settingsTab)
self.verbositySpinBox.setMaximum(2)
self.verbositySpinBox.setObjectName("verbositySpinBox")
self.formLayout_2.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.verbositySpinBox)
self.analysisThreadsLabel = QtWidgets.QLabel(self.settingsTab)
self.analysisThreadsLabel.setObjectName("analysisThreadsLabel")
self.formLayout_2.setWidget(7, QtWidgets.QFormLayout.LabelRole, self.analysisThreadsLabel)
self.analysisThreadsSpinBox = QtWidgets.QSpinBox(self.settingsTab)
self.analysisThreadsSpinBox.setMinimum(1)
self.analysisThreadsSpinBox.setProperty("value", 8)
self.analysisThreadsSpinBox.setObjectName("analysisThreadsSpinBox")
self.formLayout_2.setWidget(7, QtWidgets.QFormLayout.FieldRole, self.analysisThreadsSpinBox)
self.useMultithreadingModeLabel = QtWidgets.QLabel(self.settingsTab)
self.useMultithreadingModeLabel.setObjectName("useMultithreadingModeLabel")
self.formLayout_2.setWidget(6, QtWidgets.QFormLayout.LabelRole, self.useMultithreadingModeLabel)
self.useMultithreadingModeCheckBox = QtWidgets.QCheckBox(self.settingsTab)
self.useMultithreadingModeCheckBox.setChecked(True)
self.useMultithreadingModeCheckBox.setObjectName("useMultithreadingModeCheckBox")
self.formLayout_2.setWidget(6, QtWidgets.QFormLayout.FieldRole, self.useMultithreadingModeCheckBox)
self.verticalLayout_4.addLayout(self.formLayout_2)
self.resetSettingsButton = QtWidgets.QPushButton(self.settingsTab)
self.resetSettingsButton.setObjectName("resetSettingsButton")
self.verticalLayout_4.addWidget(self.resetSettingsButton, 0, QtCore.Qt.AlignRight)
self.horizontalLayout.addLayout(self.verticalLayout_4)
self.tehTabs.addTab(self.settingsTab, "")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.Rsh_ub = QtWidgets.QLineEdit(self.tab)
self.Rsh_ub.setGeometry(QtCore.QRect(250, 190, 113, 32))
self.Rsh_ub.setObjectName("Rsh_ub")
self.Rsh_lb = QtWidgets.QLineEdit(self.tab)
self.Rsh_lb.setGeometry(QtCore.QRect(120, 190, 113, 32))
self.Rsh_lb.setObjectName("Rsh_lb")
self.label_3 = QtWidgets.QLabel(self.tab)
self.label_3.setGeometry(QtCore.QRect(20, 50, 61, 20))
self.label_3.setObjectName("label_3")
self.I0_lb = QtWidgets.QLineEdit(self.tab)
self.I0_lb.setGeometry(QtCore.QRect(120, 40, 113, 32))
self.I0_lb.setObjectName("I0_lb")
self.n_ub = QtWidgets.QLineEdit(self.tab)
self.n_ub.setGeometry(QtCore.QRect(250, 240, 113, 32))
self.n_ub.setObjectName("n_ub")
self.label_6 = QtWidgets.QLabel(self.tab)
self.label_6.setGeometry(QtCore.QRect(20, 200, 81, 20))
self.label_6.setObjectName("label_6")
self.label = QtWidgets.QLabel(self.tab)
self.label.setGeometry(QtCore.QRect(290, 10, 41, 20))
self.label.setObjectName("label")
self.line_6 = QtWidgets.QFrame(self.tab)
self.line_6.setGeometry(QtCore.QRect(20, 220, 351, 20))
self.line_6.setFrameShape(QtWidgets.QFrame.HLine)
self.line_6.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_6.setObjectName("line_6")
self.label_2 = QtWidgets.QLabel(self.tab)
self.label_2.setGeometry(QtCore.QRect(160, 10, 41, 20))
self.label_2.setObjectName("label_2")
self.n_lb = QtWidgets.QLineEdit(self.tab)
self.n_lb.setGeometry(QtCore.QRect(120, 240, 113, 32))
self.n_lb.setObjectName("n_lb")
self.label_7 = QtWidgets.QLabel(self.tab)
self.label_7.setGeometry(QtCore.QRect(20, 250, 81, 20))
self.label_7.setObjectName("label_7")
self.I0_ub = QtWidgets.QLineEdit(self.tab)
self.I0_ub.setGeometry(QtCore.QRect(250, 40, 113, 32))
self.I0_ub.setObjectName("I0_ub")
self.Rs_lb = QtWidgets.QLineEdit(self.tab)
self.Rs_lb.setGeometry(QtCore.QRect(120, 140, 113, 32))
self.Rs_lb.setObjectName("Rs_lb")
self.line_4 = QtWidgets.QFrame(self.tab)
self.line_4.setGeometry(QtCore.QRect(20, 120, 351, 20))
self.line_4.setFrameShape(QtWidgets.QFrame.HLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.line_3 = QtWidgets.QFrame(self.tab)
self.line_3.setGeometry(QtCore.QRect(20, 70, 351, 20))
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.Rs_ub = QtWidgets.QLineEdit(self.tab)
self.Rs_ub.setGeometry(QtCore.QRect(250, 140, 113, 32))
self.Rs_ub.setObjectName("Rs_ub")
self.line_2 = QtWidgets.QFrame(self.tab)
self.line_2.setGeometry(QtCore.QRect(120, 20, 251, 20))
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.line = QtWidgets.QFrame(self.tab)
self.line.setGeometry(QtCore.QRect(233, 20, 20, 251))
self.line.setFrameShape(QtWidgets.QFrame.VLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.label_4 = QtWidgets.QLabel(self.tab)
self.label_4.setGeometry(QtCore.QRect(20, 100, 61, 20))
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(self.tab)
self.label_5.setGeometry(QtCore.QRect(20, 150, 71, 20))
self.label_5.setObjectName("label_5")
self.line_5 = QtWidgets.QFrame(self.tab)
self.line_5.setGeometry(QtCore.QRect(20, 170, 351, 20))
self.line_5.setFrameShape(QtWidgets.QFrame.HLine)
self.line_5.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_5.setObjectName("line_5")
self.Iph_ub = QtWidgets.QLineEdit(self.tab)
self.Iph_ub.setGeometry(QtCore.QRect(250, 90, 113, 32))
self.Iph_ub.setObjectName("Iph_ub")
self.Iph_lb = QtWidgets.QLineEdit(self.tab)
self.Iph_lb.setGeometry(QtCore.QRect(120, 90, 113, 32))
self.Iph_lb.setObjectName("Iph_lb")
self.tehTabs.addTab(self.tab, "")
self.gridLayout.addWidget(self.tehTabs, 1, 0, 1, 1)
batch_iv_analysis.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(batch_iv_analysis)
self.menubar.setGeometry(QtCore.QRect(0, 0, 847, 25))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuTools = QtWidgets.QMenu(self.menubar)
self.menuTools.setObjectName("menuTools")
batch_iv_analysis.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(batch_iv_analysis)
self.statusbar.setEnabled(True)
self.statusbar.setObjectName("statusbar")
batch_iv_analysis.setStatusBar(self.statusbar)
self.tehDock = QtWidgets.QDockWidget(batch_iv_analysis)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tehDock.sizePolicy().hasHeightForWidth())
self.tehDock.setSizePolicy(sizePolicy)
self.tehDock.setMinimumSize(QtCore.QSize(93, 118))
self.tehDock.setFeatures(QtWidgets.QDockWidget.DockWidgetFloatable|QtWidgets.QDockWidget.DockWidgetMovable)
self.tehDock.setAllowedAreas(QtCore.Qt.BottomDockWidgetArea|QtCore.Qt.LeftDockWidgetArea|QtCore.Qt.RightDockWidgetArea)
self.tehDock.setObjectName("tehDock")
self.dockWidgetContents = QtWidgets.QWidget()
self.dockWidgetContents.setObjectName("dockWidgetContents")
self.gridLayout_2 = QtWidgets.QGridLayout(self.dockWidgetContents)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.eventLog = QtWidgets.QTextBrowser(self.dockWidgetContents)
self.eventLog.setObjectName("eventLog")
self.gridLayout_2.addWidget(self.eventLog, 0, 0, 1, 1)
self.tehDock.setWidget(self.dockWidgetContents)
batch_iv_analysis.addDockWidget(QtCore.Qt.DockWidgetArea(8), self.tehDock)
self.actionQuit = QtWidgets.QAction(batch_iv_analysis)
self.actionQuit.setObjectName("actionQuit")
self.actionOpen = QtWidgets.QAction(batch_iv_analysis)
self.actionOpen.setObjectName("actionOpen")
self.actionSave = QtWidgets.QAction(batch_iv_analysis)
self.actionSave.setObjectName("actionSave")
self.actionClear_Table = QtWidgets.QAction(batch_iv_analysis)
self.actionClear_Table.setEnabled(True)
self.actionClear_Table.setObjectName("actionClear_Table")
self.actionFsadf = QtWidgets.QAction(batch_iv_analysis)
self.actionFsadf.setObjectName("actionFsadf")
self.actionSet_Bounds = QtWidgets.QAction(batch_iv_analysis)
self.actionSet_Bounds.setObjectName("actionSet_Bounds")
self.actionWatch = QtWidgets.QAction(batch_iv_analysis)
self.actionWatch.setObjectName("actionWatch")
self.actionEnable_Watching = QtWidgets.QAction(batch_iv_analysis)
self.actionEnable_Watching.setCheckable(True)
self.actionEnable_Watching.setChecked(False)
self.actionEnable_Watching.setObjectName("actionEnable_Watching")
self.actionWatch_2 = QtWidgets.QAction(batch_iv_analysis)
self.actionWatch_2.setObjectName("actionWatch_2")
self.actionFit_Constraints = QtWidgets.QAction(batch_iv_analysis)
self.actionFit_Constraints.setObjectName("actionFit_Constraints")
self.menuFile.addAction(self.actionOpen)
self.menuFile.addAction(self.actionWatch_2)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionSave)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionQuit)
self.menuTools.addAction(self.actionClear_Table)
self.menuTools.addAction(self.actionEnable_Watching)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuTools.menuAction())
self.retranslateUi(batch_iv_analysis)
self.tehTabs.setCurrentIndex(0)
self.fitMethodComboBox.setCurrentIndex(2)
self.actionQuit.triggered.connect(batch_iv_analysis.close)
QtCore.QMetaObject.connectSlotsByName(batch_iv_analysis)
def retranslateUi(self, batch_iv_analysis):
_translate = QtCore.QCoreApplication.translate
batch_iv_analysis.setWindowTitle(_translate("batch_iv_analysis", "batch-iv-analysis"))
self.tableWidget.setSortingEnabled(True)
self.tehTabs.setTabText(self.tehTabs.indexOf(self.resultsTabs), _translate("batch_iv_analysis", "Results"))
self.tehTabs.setTabText(self.tehTabs.indexOf(self.plotTab), _translate("batch_iv_analysis", "Plots"))
self.attemptCharEqnFitLabel.setText(_translate("batch_iv_analysis", "Attempt Char. Eqn. Fit"))
self.doFastAndSloppyMathLabel.setText(_translate("batch_iv_analysis", "Do Fast and Sloppy Math"))
self.lowerVoltageCutoffLabel.setToolTip(_translate("batch_iv_analysis", "<html><head/><body><p>Any data points below this voltage will be ignored</p></body></html>"))
self.lowerVoltageCutoffLabel.setText(_translate("batch_iv_analysis", "Lower Voltage Cutoff"))
self.lowerVoltageCutoffLineEdit.setToolTip(_translate("batch_iv_analysis", "<html><head/><body><p>Any data points below this voltage will be ignored</p></body></html>"))
self.lowerVoltageCutoffLineEdit.setText(_translate("batch_iv_analysis", "-inf"))
self.upperVoltageCutoffLabel.setToolTip(_translate("batch_iv_analysis", "<html><head/><body><p>Any data points above this voltage will be ignored</p></body></html>"))
self.upperVoltageCutoffLabel.setText(_translate("batch_iv_analysis", "Upper Voltage Cutoff"))
self.upperVoltageCutoffLineEdit.setToolTip(_translate("batch_iv_analysis", "<html><head/><body><p>Any data points above this voltage will be ignored</p></body></html>"))
self.upperVoltageCutoffLineEdit.setText(_translate("batch_iv_analysis", "inf"))
self.fitMethodLabel.setToolTip(_translate("batch_iv_analysis", "<html><head/><body><p>Fit method to use in scipy.optimize.least_squares routine</p></body></html>"))
self.fitMethodLabel.setText(_translate("batch_iv_analysis", "Fit Method"))
self.fitMethodComboBox.setToolTip(_translate("batch_iv_analysis", "<html><head/><body><p>Fit method to use in scipy.optimize.least_squares routine</p></body></html>"))
self.fitMethodComboBox.setItemText(0, _translate("batch_iv_analysis", "Trust Region Reflective"))
self.fitMethodComboBox.setItemText(1, _translate("batch_iv_analysis", "dogleg"))
self.fitMethodComboBox.setItemText(2, _translate("batch_iv_analysis", "Levenberg-Marquardt"))
self.verbosityLabel.setToolTip(_translate("batch_iv_analysis", "<html><head/><body><p>Higher verbosity will generate more output status messages which can be helpful for debugging</p></body></html>"))
self.verbosityLabel.setText(_translate("batch_iv_analysis", "Verbosity"))
self.analysisThreadsLabel.setText(_translate("batch_iv_analysis", "Analysis Threads"))
self.useMultithreadingModeLabel.setToolTip(_translate("batch_iv_analysis", "<html><head/><body><p>Speeds up analysis by analyzing multiple files in parallel</p></body></html>"))
self.useMultithreadingModeLabel.setText(_translate("batch_iv_analysis", "Use Multiprocessing Mode"))
self.resetSettingsButton.setText(_translate("batch_iv_analysis", "Reset Defaults"))
self.tehTabs.setTabText(self.tehTabs.indexOf(self.settingsTab), _translate("batch_iv_analysis", "Settings"))
self.Rsh_ub.setText(_translate("batch_iv_analysis", "inf"))
self.Rsh_lb.setText(_translate("batch_iv_analysis", "0"))
self.label_3.setText(_translate("batch_iv_analysis", "I_0 [A]"))
self.I0_lb.setText(_translate("batch_iv_analysis", "0"))
self.n_ub.setText(_translate("batch_iv_analysis", "inf"))
self.label_6.setText(_translate("batch_iv_analysis", "R_sh [ohm]"))
self.label.setText(_translate("batch_iv_analysis", "Upper"))
self.label_2.setText(_translate("batch_iv_analysis", "Lower"))
self.n_lb.setText(_translate("batch_iv_analysis", "0"))
self.label_7.setText(_translate("batch_iv_analysis", "n"))
self.I0_ub.setText(_translate("batch_iv_analysis", "inf"))
self.Rs_lb.setText(_translate("batch_iv_analysis", "0"))
self.Rs_ub.setText(_translate("batch_iv_analysis", "inf"))
self.label_4.setText(_translate("batch_iv_analysis", "I_Ph [A]"))
self.label_5.setText(_translate("batch_iv_analysis", "R_s [ohm]"))
self.Iph_ub.setText(_translate("batch_iv_analysis", "inf"))
self.Iph_lb.setText(_translate("batch_iv_analysis", "0"))
self.tehTabs.setTabText(self.tehTabs.indexOf(self.tab), _translate("batch_iv_analysis", "Constraints"))
self.menuFile.setTitle(_translate("batch_iv_analysis", "File"))
self.menuTools.setTitle(_translate("batch_iv_analysis", "Tools"))
self.tehDock.setWindowTitle(_translate("batch_iv_analysis", "Event Log"))
self.actionQuit.setText(_translate("batch_iv_analysis", "Quit"))
self.actionQuit.setShortcut(_translate("batch_iv_analysis", "Ctrl+Q"))
self.actionOpen.setText(_translate("batch_iv_analysis", "Open"))
self.actionOpen.setShortcut(_translate("batch_iv_analysis", "Ctrl+O"))
self.actionSave.setText(_translate("batch_iv_analysis", "Export"))
self.actionSave.setShortcut(_translate("batch_iv_analysis", "Ctrl+S"))
self.actionClear_Table.setText(_translate("batch_iv_analysis", "Clear Table"))
self.actionClear_Table.setShortcut(_translate("batch_iv_analysis", "Ctrl+Backspace"))
self.actionFsadf.setText(_translate("batch_iv_analysis", "fsadf"))
self.actionSet_Bounds.setText(_translate("batch_iv_analysis", "Set Bounds"))
self.actionWatch.setText(_translate("batch_iv_analysis", "Watch"))
self.actionWatch.setShortcut(_translate("batch_iv_analysis", "Ctrl+W"))
self.actionEnable_Watching.setText(_translate("batch_iv_analysis", "Enable Watching"))
self.actionEnable_Watching.setShortcut(_translate("batch_iv_analysis", "Ctrl+E"))
self.actionWatch_2.setText(_translate("batch_iv_analysis", "Watch"))
self.actionWatch_2.setShortcut(_translate("batch_iv_analysis", "Ctrl+W"))
self.actionFit_Constraints.setText(_translate("batch_iv_analysis", "Fit Constraints"))
| greysAcademicCode/batch-iv-analysis | batch_iv_analysis/batch_iv_analysis_UI.py | Python | mit | 22,939 |
from vrtManager import util
from vrtManager.IPy import IP
from vrtManager.connection import wvmConnect
def network_size(net, dhcp=None):
"""
Func return gateway, mask and dhcp pool.
"""
mask = IP(net).strNetmask()
addr = IP(net)
gateway = addr[1].strNormal()
dhcp_pool = [addr[2].strNormal(), addr[addr.len() - 2].strNormal()]
if dhcp:
return gateway, mask, dhcp_pool
else:
return gateway, mask, None
class wvmNetworks(wvmConnect):
def get_networks_info(self):
get_networks = self.get_networks()
networks = []
for network in get_networks:
net = self.get_network(network)
net_status = net.isActive()
net_bridge = net.bridgeName()
net_forwd = util.get_xml_path(net.XMLDesc(0), "/network/forward/@mode")
networks.append({'name': network, 'status': net_status,
'device': net_bridge, 'forward': net_forwd})
return networks
def define_network(self, xml):
self.wvm.networkDefineXML(xml)
def create_network(self, name, forward, gateway, mask, dhcp, bridge, openvswitch, fixed=False):
xml = """
<network>
<name>%s</name>""" % name
if forward in ['nat', 'route', 'bridge']:
xml += """<forward mode='%s'/>""" % forward
xml += """<bridge """
if forward in ['nat', 'route', 'none']:
xml += """stp='on' delay='0'"""
if forward == 'bridge':
xml += """name='%s'""" % bridge
xml += """/>"""
if openvswitch is True:
xml += """<virtualport type='openvswitch'/>"""
if forward != 'bridge':
xml += """
<ip address='%s' netmask='%s'>""" % (gateway, mask)
if dhcp:
xml += """<dhcp>
<range start='%s' end='%s' />""" % (dhcp[0], dhcp[1])
if fixed:
fist_oct = int(dhcp[0].strip().split('.')[3])
last_oct = int(dhcp[1].strip().split('.')[3])
for ip in range(fist_oct, last_oct + 1):
xml += """<host mac='%s' ip='%s.%s' />""" % (util.randomMAC(), gateway[:-2], ip)
xml += """</dhcp>"""
xml += """</ip>"""
xml += """</network>"""
self.define_network(xml)
net = self.get_network(name)
net.create()
net.setAutostart(1)
class wvmNetwork(wvmConnect):
def __init__(self, host, login, passwd, conn, net):
wvmConnect.__init__(self, host, login, passwd, conn)
self.net = self.get_network(net)
def get_name(self):
return self.net.name()
def _XMLDesc(self, flags):
return self.net.XMLDesc(flags)
def get_autostart(self):
return self.net.autostart()
def set_autostart(self, value):
self.net.setAutostart(value)
def is_active(self):
return self.net.isActive()
def get_uuid(self):
return self.net.UUIDString()
def get_bridge_device(self):
try:
return self.net.bridgeName()
except:
return None
def start(self):
self.net.create()
def stop(self):
self.net.destroy()
def delete(self):
self.net.undefine()
def get_ipv4_network(self):
xml = self._XMLDesc(0)
if util.get_xml_path(xml, "/network/ip") is None:
return None
addrStr = util.get_xml_path(xml, "/network/ip/@address")
netmaskStr = util.get_xml_path(xml, "/network/ip/@netmask")
prefix = util.get_xml_path(xml, "/network/ip/@prefix")
if prefix:
prefix = int(prefix)
binstr = ((prefix * "1") + ((32 - prefix) * "0"))
netmaskStr = str(IP(int(binstr, base=2)))
if netmaskStr:
netmask = IP(netmaskStr)
gateway = IP(addrStr)
network = IP(gateway.int() & netmask.int())
ret = IP(str(network) + "/" + netmaskStr)
else:
ret = IP(str(addrStr))
return ret
def get_ipv4_forward(self):
xml = self._XMLDesc(0)
fw = util.get_xml_path(xml, "/network/forward/@mode")
forwardDev = util.get_xml_path(xml, "/network/forward/@dev")
return [fw, forwardDev]
def get_ipv4_dhcp_range(self):
xml = self._XMLDesc(0)
dhcpstart = util.get_xml_path(xml, "/network/ip/dhcp/range[1]/@start")
dhcpend = util.get_xml_path(xml, "/network/ip/dhcp/range[1]/@end")
if not dhcpstart or not dhcpend:
return None
return [IP(dhcpstart), IP(dhcpend)]
def get_ipv4_dhcp_range_start(self):
dhcp = self.get_ipv4_dhcp_range()
if not dhcp:
return None
return dhcp[0]
def get_ipv4_dhcp_range_end(self):
dhcp = self.get_ipv4_dhcp_range()
if not dhcp:
return None
return dhcp[1]
def can_pxe(self):
xml = self.get_xml()
forward = self.get_ipv4_forward()[0]
if forward and forward != "nat":
return True
return bool(util.get_xml_path(xml, "/network/ip/dhcp/bootp/@file"))
def get_mac_ipaddr(self):
def network(ctx):
result = []
for net in ctx.xpathEval('/network/ip/dhcp/host'):
host = net.xpathEval('@ip')[0].content
mac = net.xpathEval('@mac')[0].content
result.append({'host': host, 'mac': mac})
return result
return util.get_xml_path(self._XMLDesc(0), func=network)
| harry-ops/opencloud | webvirtcloud/vrtManager/network.py | Python | gpl-2.0 | 5,632 |
# -*- coding: utf-8 -*-
"""
Point.py - Extension of QPointF which adds a few missing methods.
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more infomation.
"""
from .Qt import QtCore
import numpy as np
def clip(x, mn, mx):
if x > mx:
return mx
if x < mn:
return mn
return x
class Point(QtCore.QPointF):
"""Extension of QPointF which adds a few missing methods."""
def __init__(self, *args):
if len(args) == 1:
if isinstance(args[0], QtCore.QSizeF):
QtCore.QPointF.__init__(self, float(args[0].width()), float(args[0].height()))
return
elif isinstance(args[0], float) or isinstance(args[0], int):
QtCore.QPointF.__init__(self, float(args[0]), float(args[0]))
return
elif hasattr(args[0], '__getitem__'):
QtCore.QPointF.__init__(self, float(args[0][0]), float(args[0][1]))
return
elif len(args) == 2:
QtCore.QPointF.__init__(self, args[0], args[1])
return
QtCore.QPointF.__init__(self, *args)
def __len__(self):
return 2
def __reduce__(self):
return (Point, (self.x(), self.y()))
def __getitem__(self, i):
if i == 0:
return self.x()
elif i == 1:
return self.y()
else:
raise IndexError("Point has no index %s" % str(i))
def __setitem__(self, i, x):
if i == 0:
return self.setX(x)
elif i == 1:
return self.setY(x)
else:
raise IndexError("Point has no index %s" % str(i))
def __radd__(self, a):
return self._math_('__radd__', a)
def __add__(self, a):
return self._math_('__add__', a)
def __rsub__(self, a):
return self._math_('__rsub__', a)
def __sub__(self, a):
return self._math_('__sub__', a)
def __rmul__(self, a):
return self._math_('__rmul__', a)
def __mul__(self, a):
return self._math_('__mul__', a)
def __rdiv__(self, a):
return self._math_('__rdiv__', a)
def __div__(self, a):
return self._math_('__div__', a)
def __rpow__(self, a):
return self._math_('__rpow__', a)
def __pow__(self, a):
return self._math_('__pow__', a)
def _math_(self, op, x):
#print "point math:", op
#try:
#fn = getattr(QtCore.QPointF, op)
#pt = fn(self, x)
#print fn, pt, self, x
#return Point(pt)
#except AttributeError:
x = Point(x)
return Point(getattr(self[0], op)(x[0]), getattr(self[1], op)(x[1]))
def length(self):
"""Returns the vector length of this Point."""
return (self[0]**2 + self[1]**2) ** 0.5
def norm(self):
"""Returns a vector in the same direction with unit length."""
return self / self.length()
def angle(self, a):
"""Returns the angle in degrees between this vector and the vector a."""
n1 = self.length()
n2 = a.length()
if n1 == 0. or n2 == 0.:
return None
## Probably this should be done with arctan2 instead..
ang = np.arccos(clip(self.dot(a) / (n1 * n2), -1.0, 1.0)) ### in radians
c = self.cross(a)
if c > 0:
ang *= -1.
return ang * 180. / np.pi
def dot(self, a):
"""Returns the dot product of a and this Point."""
a = Point(a)
return self[0]*a[0] + self[1]*a[1]
def cross(self, a):
a = Point(a)
return self[0]*a[1] - self[1]*a[0]
def proj(self, b):
"""Return the projection of this vector onto the vector b"""
b1 = b / b.length()
return self.dot(b1) * b1
def __repr__(self):
return "Point(%f, %f)" % (self[0], self[1])
def min(self):
return min(self[0], self[1])
def max(self):
return max(self[0], self[1])
def copy(self):
return Point(self)
def toQPoint(self):
return QtCore.QPoint(*self) | ibressler/pyqtgraph | pyqtgraph/Point.py | Python | mit | 4,271 |
import base64
import collections
import json
from io import BytesIO
from unittest import mock
import jira
from django.conf import settings
from django.core import mail
from django.test import override_settings
from django.urls import reverse
from django.utils import timezone
from rest_framework.test import APITransactionTestCase
from waldur_jira.backend import AttachmentSynchronizer, CommentSynchronizer
from waldur_mastermind.support.backend.atlassian import ServiceDeskBackend
from waldur_mastermind.support.tests import factories
from waldur_mastermind.support.tests.base import load_resource
from waldur_mastermind.support.tests.utils import override_plugin_settings
@mock.patch('waldur_mastermind.support.serializers.ServiceDeskBackend')
@override_plugin_settings(
ENABLED=True,
ACTIVE_BACKEND='waldur_mastermind.support.backend.atlassian:ServiceDeskBackend',
)
@override_settings(task_always_eager=True)
class TestJiraWebHooks(APITransactionTestCase):
def setUp(self):
self.url = reverse('web-hook-receiver')
backend_id = 'SNT-101'
self.issue = factories.IssueFactory(backend_id=backend_id)
def create_request(test, name, path):
jira_request = json.loads(load_resource(path))
jira_request['issue']['key'] = backend_id
setattr(test, 'request_data_' + name, jira_request)
jira_requests = (
('issue_updated', 'jira_issue_updated_query.json'),
('comment_create', 'jira_comment_create_query.json'),
('comment_update', 'jira_comment_update_query.json'),
('comment_delete', 'jira_comment_delete_query.json'),
)
[create_request(self, *r) for r in jira_requests]
def test_issue_update(self, mock_jira):
self.request_data_issue_updated['issue_event_type_name'] = 'issue_updated'
self.client.post(self.url, self.request_data_issue_updated)
self.assertTrue(self._call_update_issue(mock_jira))
def test_generic_update(self, mock_jira):
self.request_data_issue_updated['issue_event_type_name'] = 'issue_generic'
self.client.post(self.url, self.request_data_issue_updated)
self.assertTrue(self._call_update_issue(mock_jira))
def test_comment_create(self, mock_jira):
self.client.post(self.url, self.request_data_comment_create)
self.assertTrue(self._call_create_comment(mock_jira))
def test_comment_update(self, mock_jira):
comment = factories.CommentFactory(issue=self.issue)
self.request_data_comment_update['comment']['id'] = comment.backend_id
self.client.post(self.url, self.request_data_comment_update)
self.assertTrue(self._call_update_comment(mock_jira))
def test_comment_delete(self, mock_jira):
comment = factories.CommentFactory(issue=self.issue)
self.request_data_comment_delete['comment']['id'] = comment.backend_id
self.client.post(self.url, self.request_data_comment_delete)
self.assertTrue(self._call_delete_comment(mock_jira))
def test_add_attachment(self, mock_jira):
self.request_data_issue_updated['issue_event_type_name'] = 'issue_updated'
self.client.post(self.url, self.request_data_issue_updated)
self.assertTrue(self._call_update_attachment(mock_jira))
def test_delete_attachment(self, mock_jira):
self.request_data_issue_updated['issue_event_type_name'] = 'issue_updated'
self.client.post(self.url, self.request_data_issue_updated)
self.assertTrue(self._call_update_attachment(mock_jira))
def _call_update_attachment(self, mock_jira):
return filter(
lambda x: x[0] == '().update_attachment_from_jira', mock_jira.mock_calls
)
def _call_create_comment(self, mock_jira):
return filter(
lambda x: x[0] == '().create_comment_from_jira', mock_jira.mock_calls
)
def _call_update_comment(self, mock_jira):
return filter(
lambda x: x[0] == '().update_comment_from_jira', mock_jira.mock_calls
)
def _call_delete_comment(self, mock_jira):
return filter(
lambda x: x[0] == '().delete_comment_from_jira', mock_jira.mock_calls
)
def _call_update_issue(self, mock_jira):
return filter(
lambda x: x[0] == '().update_issue_from_jira', mock_jira.mock_calls
)
MockSupportUser = collections.namedtuple('MockSupportUser', ['key'])
MockResolution = collections.namedtuple('MockResolution', ['name'])
@override_settings(task_always_eager=True)
@override_plugin_settings(ENABLED=True)
class TestUpdateIssueFromJira(APITransactionTestCase):
def setUp(self):
self.issue = factories.IssueFactory()
backend_issue_raw = json.loads(load_resource('jira_issue_raw.json'))
self.backend_issue = jira.resources.Issue(
{'server': 'example.com'}, None, backend_issue_raw
)
self.impact_field_id = 'customfield_10116'
self.request_feedback = 'customfield_10216'
self.first_response_sla = timezone.now()
def side_effect(arg):
if arg == 'Impact':
return self.impact_field_id
elif arg == 'Request feedback':
return self.request_feedback
self.backend = ServiceDeskBackend()
self.backend.get_backend_issue = mock.Mock(return_value=self.backend_issue)
self.backend._get_first_sla_field = mock.Mock(
return_value=self.first_response_sla
)
self.backend.get_field_id_by_name = mock.Mock(side_effect=side_effect)
def update_issue_from_jira(self):
self.backend.update_issue_from_jira(self.issue)
self.issue.refresh_from_db()
def test_update_issue_impact_field(self):
impact_field_value = 'Custom Value'
setattr(self.backend_issue.fields, self.impact_field_id, impact_field_value)
self.update_issue_from_jira()
self.assertEqual(self.issue.impact, impact_field_value)
def test_update_issue_assignee(self):
assignee = factories.SupportUserFactory(backend_id='support_user_backend_id')
backend_assignee_user = MockSupportUser(key=assignee.backend_id)
self.backend_issue.fields.assignee = backend_assignee_user
self.update_issue_from_jira()
self.assertEqual(self.issue.assignee.id, assignee.id)
def test_update_issue_reporter(self):
reporter = factories.SupportUserFactory(backend_id='support_user_backend_id')
backend_reporter_user = MockSupportUser(key=reporter.backend_id)
self.backend_issue.fields.reporter = backend_reporter_user
self.update_issue_from_jira()
self.assertEqual(self.issue.reporter.id, reporter.id)
def test_update_issue_summary(self):
expected_summary = 'Happy New Year'
self.backend_issue.fields.summary = expected_summary
self.update_issue_from_jira()
self.assertEqual(self.issue.summary, expected_summary)
def test_update_issue_link(self):
permalink = self.backend_issue.permalink()
self.update_issue_from_jira()
self.assertEqual(self.issue.link, permalink)
def test_update_first_response_sla(self):
self.update_issue_from_jira()
self.assertEqual(self.issue.first_response_sla, self.first_response_sla)
def test_update_issue_resolution(self):
expected_resolution = MockResolution(name='Done')
self.backend_issue.fields.resolution = expected_resolution
self.update_issue_from_jira()
self.assertEqual(self.issue.resolution, expected_resolution.name)
def test_resolution_is_empty_if_it_is_none(self):
expected_resolution = None
self.backend_issue.fields.resolution = expected_resolution
self.update_issue_from_jira()
self.assertEqual(self.issue.resolution, '')
def test_update_issue_status(self):
self.update_issue_from_jira()
self.assertEqual(self.issue.status, self.backend_issue.fields.status.name)
def test_web_hook_does_not_trigger_issue_update_email_if_the_issue_was_not_updated(
self,
):
self.update_issue_from_jira()
self.update_issue_from_jira()
self.assertEqual(len(mail.outbox), 0)
def test_web_hook_does_trigger_issue_update_email_if_the_issue_was_updated(self):
self.update_issue_from_jira()
self.backend_issue.fields.summary = 'New summary'
self.update_issue_from_jira()
self.assertEqual(len(mail.outbox), 1)
def test_issue_update_callback_creates_deletes_two_comments(self):
factories.CommentFactory(issue=self.issue)
factories.CommentFactory(issue=self.issue)
synchronizer = CommentSynchronizer(self.backend, self.issue, self.backend_issue)
synchronizer.perform_update()
self.assertEqual(self.issue.comments.count(), 0)
def test_update_issue_feedback_request_field(self):
self.update_issue_from_jira()
self.assertEqual(self.issue.feedback_request, True)
setattr(self.backend_issue.fields, self.request_feedback, None)
self.update_issue_from_jira()
self.issue.refresh_from_db()
self.assertEqual(self.issue.feedback_request, False)
class TestUpdateCommentFromJira(APITransactionTestCase):
def setUp(self):
jira_backend = 'waldur_mastermind.support.backend.atlassian:ServiceDeskBackend'
settings.WALDUR_SUPPORT['ENABLED'] = True
settings.WALDUR_SUPPORT['ACTIVE_BACKEND'] = jira_backend
self.comment = factories.CommentFactory()
backend_comment_raw = json.loads(load_resource('jira_comment_raw.json'))
self.backend_comment = jira.resources.Comment(
{'server': 'example.com'}, None, backend_comment_raw
)
self.backend = ServiceDeskBackend()
self.internal = {'value': {'internal': False}}
path = mock.patch.object(
ServiceDeskBackend,
'_get_property',
new=mock.Mock(return_value=self.internal),
)
path.start()
path = mock.patch.object(
ServiceDeskBackend,
'get_backend_comment',
new=mock.Mock(return_value=self.backend_comment),
)
path.start()
def tearDown(self):
mock.patch.stopall()
def test_update_comment_description(self):
self.backend.update_comment_from_jira(self.comment)
self.comment.refresh_from_db()
self.assertEqual(
self.comment.description,
self.comment.clean_message(self.backend_comment.body),
)
def test_update_comment_is_public(self):
self.internal['value']['internal'] = True
self.backend.update_comment_from_jira(self.comment)
self.internal['value']['internal'] = False
self.comment.refresh_from_db()
self.assertEqual(self.comment.is_public, False)
def test_webhook_cleans_up_user_info_and_does_not_update_comment_if_it_is_not_changed(
self,
):
expected_comment_body = self.comment.description
jira_comment_body = '[Luke Skywalker 19BBY-TA-T16]: %s' % expected_comment_body
self.backend_comment.body = jira_comment_body
self.backend.update_comment_from_jira(self.comment)
self.comment.refresh_from_db()
self.assertEqual(self.comment.description, expected_comment_body)
class TestUpdateAttachmentFromJira(APITransactionTestCase):
def setUp(self):
jira_backend = 'waldur_mastermind.support.backend.atlassian:ServiceDeskBackend'
settings.WALDUR_SUPPORT['ENABLED'] = True
settings.WALDUR_SUPPORT['ACTIVE_BACKEND'] = jira_backend
self.issue = factories.IssueFactory()
backend_issue_raw = json.loads(load_resource('jira_issue_raw.json'))
self.backend_issue = jira.resources.Issue(
{'server': 'example.com'}, None, backend_issue_raw
)
backend_attachment_raw = json.loads(load_resource('jira_attachment_raw.json'))
self.backend_attachment = jira.resources.Attachment(
{'server': 'example.com'}, None, backend_attachment_raw
)
self.backend_issue.fields.attachment.append(self.backend_attachment)
self.backend = ServiceDeskBackend()
path = mock.patch.object(
ServiceDeskBackend,
'get_backend_issue',
new=mock.Mock(return_value=self.backend_issue),
)
path.start()
path = mock.patch.object(
ServiceDeskBackend,
'get_backend_attachment',
new=mock.Mock(return_value=self.backend_attachment),
)
path.start()
file_content = BytesIO(
base64.b64decode('R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7')
)
path = mock.patch.object(
AttachmentSynchronizer,
'_download_file',
new=mock.Mock(return_value=file_content),
)
path.start()
def tearDown(self):
mock.patch.stopall()
def test_add_attachment(self):
self.backend.update_attachment_from_jira(self.issue)
self.assertEqual(self.issue.attachments.count(), 1)
def test_delete_attachment(self):
self.backend.update_attachment_from_jira(self.issue)
self.assertEqual(self.issue.attachments.count(), 1)
self.backend_issue.fields.attachment = []
self.backend.update_attachment_from_jira(self.issue)
self.assertEqual(self.issue.attachments.count(), 0)
| opennode/nodeconductor-assembly-waldur | src/waldur_mastermind/support/tests/test_jira_web_hooks.py | Python | mit | 13,553 |
#!/usr/bin/env python
import server
import record_data
from optparse import OptionParser
if __name__ == "__main__":
usage = "you can find options info in server.py and record_data.py or by calling this script with -h options"
parser = OptionParser(usage=usage)
parser = server.add_parser_options(parser)
parser = record_data.add_parser_options(parser)
opts, args = parser.parse_args()
sensor = record_data.sensor_args[opts.s]
sd = record_data.SensorDatabase(opts.pin, sensor, opts.t, opts.p)
sd.daemon = True
sd.start()
print("server running")
server.app.run(host=opts.host, port=opts.port)
| griffincalme/rpi_climate_server | run_app.py | Python | gpl-2.0 | 636 |
import os
# Django settings for docusite projecit.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
STATICFILES_DIRS = (
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'docusite.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'docusite.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.abspath(__file__+"/../../"), 'Templates')
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'documanager',
'south',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
#HEROKU SETTINGS
import socket
if socket.gethostname() != 'sebastian-vm':
import dj_database_url
dbconfig = dj_database_url.config()
if dbconfig:
DATABASES['default'] = dbconfig
#Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static asset configuration
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__+'/../'))
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'Static'),
)
SECRET_KEY = os.environ['SECRET_KEY']
else:
# Local Settings
try:
from dev_settings import *
except ImportError:
pass
| elbasti/prettymarkdown | docusite/settings.py | Python | mit | 5,960 |
#!/usr/bin/env python
# coding: utf-8
# # rede_gephi_com_ipca_csv
# In[6]:
ano_eleicao = '2014'
rede =f'rede{ano_eleicao}'
csv_dir = f'/home/neilor/{rede}'
# In[7]:
dbschema = f'rede{ano_eleicao}'
table_edges = f"{dbschema}.gephi_edges_com_ipca_2018"
table_nodes = f"{dbschema}.gephi_nodes_com_ipca_2018"
table_receitas = f"{dbschema}.receitas_com_ipca_2018"
table_candidaturas = f"{dbschema}.candidaturas_com_ipca_2018"
table_municipios = f"{dbschema}.municipios_{ano_eleicao}"
# In[8]:
import sys
sys.path.append('../')
import mod_tse as mtse
# In[9]:
import os
home = os.environ["HOME"]
local_dir = f'{home}/temp'
# In[10]:
mtse.execute_query(f"update {table_municipios} set rede= 'N';")
# ## REDE BRASIL
# In[11]:
def salva_rede_brasil(csv_dir,rede):
rede_dir_BR = f'{csv_dir}/{rede}_Brasil'
os.makedirs(rede_dir_BR)
edges_csv_query=f"""copy
(
select * from {table_edges}
)
TO '{rede_dir_BR}/{rede}_Brasil_edges.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(edges_csv_query)
nodes_csv_query=f"""copy
(
select * from {table_nodes}
)
TO '{rede_dir_BR}/{rede}_Brasil_nodes.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(nodes_csv_query)
candidaturas_csv_query=f"""copy
(
select * from {table_candidaturas}
)
TO '{rede_dir_BR}/{rede}_Brasil_candidaturas.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(candidaturas_csv_query)
receitas_csv_query=f"""copy
(
select * from {table_receitas}
)
TO '{rede_dir_BR}/{rede}_Brasil_receitas.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(receitas_csv_query)
# ## REDES POR ESTADO
# In[12]:
def salva_rede_csv_uf(csv_dir,rede,sg_uf):
rede_dir_uf = f'{csv_dir}/{rede}_{sg_uf}'
os.makedirs(rede_dir_uf)
edges_query=f"""copy
(
select * from {table_edges} where ue ='{sg_uf}'
)
TO '{rede_dir_uf}/{rede}_{sg_uf}_edges.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(edges_query)
nodes_query=f"""copy
(
select * from {table_nodes} where ue ='{sg_uf}'
)
TO '{rede_dir_uf}/{rede}_{sg_uf}_nodes.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(nodes_query)
candidaturas_csv_query=f"""copy
(
select * from {table_candidaturas} where sg_uf ='{sg_uf}'
)
TO '{rede_dir_uf}/{rede}_{sg_uf}_candidaturas.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(candidaturas_csv_query)
receitas_csv_query=f"""copy
(
select * from {table_receitas} where receptor_uf ='{sg_uf}'
)
TO '{rede_dir_uf}/{rede}_{sg_uf}_receitas.csv' DELIMITER ';' CSV HEADER;
"""
mtse.execute_query(receitas_csv_query)
# In[13]:
import pandas as pd
import shutil
if os.path.exists(csv_dir):
shutil.rmtree(csv_dir)
os.makedirs(csv_dir)
salva_rede_brasil(csv_dir,rede)
df_uf = mtse.pandas_query(f'select sg_uf from {table_candidaturas} group by sg_uf order by sg_uf')
for index, row in df_uf.iterrows():
sg_uf = row['sg_uf']
salva_rede_csv_uf(csv_dir,rede,sg_uf)
# In[14]:
import datetime
print(datetime.datetime.now())
# In[ ]:
| elivre/arfe | e2014/SCRIPTS/055-rede2014_rede_gephi_com_ipca_csv.py | Python | mit | 3,896 |
# -*- coding: utf-8 -*-
#
# Tunir documentation build configuration file, created by
# sphinx-quickstart on Thu Jan 15 22:45:09 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Tunir'
copyright = u'2015, Kushal Das'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.6'
# The full version, including alpha/beta/rc tags.
release = '0.6'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Tunirdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Tunir.tex', u'Tunir Documentation',
u'Kushal Das', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tunir', u'Tunir, the simple CI with a big heart.',
[u'Kushal Das'], 8)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Tunir', u'Tunir Documentation',
u'Kushal Das', 'Tunir', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| dustymabe/tunir | docs/conf.py | Python | gpl-2.0 | 8,154 |
import numpy as np
def detrend_normalize_mat(voxels):
tcn = np.zeros((voxels.shape[1], voxels.shape[0]))
for i in range(voxels.shape[0]):
tcn[:, i] = voxels[i] / np.std(voxels[i])
return tcn
def detrend_normalize_nii(voxels):
tcn = np.zeros((voxels.shape[1], voxels.shape[0]))
for i in range(voxels.shape[0]):
tcn[:, i] = (voxels[i] - np.mean(voxels[i])) / np.std(voxels[i])
return tcn | mfalkiewicz/pyTotalActivation | TotalActivation/preprocess/detrend.py | Python | mit | 429 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-30 15:38
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('exams', '0083_add_default_theme'),
]
operations = [
migrations.AddField(
model_name='session',
name='parent_session',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='exams.Session'),
),
migrations.AddField(
model_name='session',
name='secret_key',
field=models.CharField(default='', max_length=10),
preserve_default=False,
),
]
| Zahajamaan/Fudulbank | exams/migrations/0084_shared_sessions.py | Python | agpl-3.0 | 792 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import relay
import Cdf
import myplot
def main():
results = relay.ReadResults()
speeds = relay.GetSpeeds(results)
# plot the distribution of actual speeds
cdf = Cdf.MakeCdfFromList(speeds, 'speeds')
myplot.Cdf(cdf)
myplot.Save(root='relay_cdf',
title='CDF of running speed',
xlabel='speed (mph)',
ylabel='probability')
if __name__ == '__main__':
main()
| qrsforever/workspace | python/learn/thinkstats/relay_cdf.py | Python | mit | 675 |
from Stack import Stack
def infixToPostfix(infixexpr):
prec = {}
prec["^"] = 4
prec["*"] = 3
prec["/"] = 3
prec["+"] = 2
prec["-"] = 2
prec["("] = 1
opStack = Stack()
postfixList = []
tokenList = infixexpr.split()
for token in tokenList:
if token in "ABCDEFGHIJKLMNOPQRSTUVWXYZ" or token in "0123456789":
postfixList.append(token)
elif token == '(':
opStack.push(token)
elif token == ')':
topToken = opStack.pop()
while topToken != '(':
postfixList.append(topToken)
topToken = opStack.pop()
else:
while (not opStack.isEmpty()) and \
(prec[opStack.peek()] >= prec[token]):
postfixList.append(opStack.pop())
opStack.push(token)
while not opStack.isEmpty():
postfixList.append(opStack.pop())
return " ".join(postfixList)
print(infixToPostfix("A * B + C * D"))
print(infixToPostfix("( A + B ) * C - ( D - E ) * ( F + G )"))
print(infixToPostfix("( A + B ) * ( C + D )"))
print(infixToPostfix("( A + B ) * C"))
print(infixToPostfix("A + B * C"))
print(infixToPostfix("5 * 3 ^ ( 4 - 2 )"))
| sookoor/PythonInterviewPrep | infixToPostfix.py | Python | mit | 1,217 |
#
# Copyright (c) 2013-2014, Scott J Maddox
#
# This file is part of openbandparams.
#
# openbandparams is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# openbandparams is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with openbandparams. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
# Make sure we import the local openbandparams version
import os
import sys
sys.path.insert(0,
os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
from openbandparams import *
import matplotlib.pyplot as plt
import numpy
T = 300
T_lattice = 300
# initialize the plot
fig = plt.figure()
ax = fig.add_subplot(111)
plt.xlabel('Lattice Parameter at %g K ($\AA$)' % T_lattice)
plt.ylabel('Bandgap at %g K (eV)' % T)
# Define colors
red = '#FE0303'
green = '#04A004'
blue = '#0404FF'
red_green = '#8D8D04'
red_blue = '#8D048D'
green_blue = '#04AEAE'
# list the binaries
phosphide_binaries = [AlP, GaP, InP] # red
arsenide_binaries = [AlAs, GaAs, InAs] # green
antimonide_binaries = [AlSb, GaSb, InSb] # blue
# list the ternaries
phosphide_ternaries = [AlGaP, AlInP, GaInP] # red
arsenide_ternaries = [AlGaAs, AlInAs, GaInAs] # green
antimonide_ternaries = [AlGaSb, AlInSb, GaInSb] # blue
phosphide_arsenide_ternaries = [AlPAs, GaPAs, InPAs] # red + green
phosphide_antimonide_ternaries = [AlPSb, GaPSb, InPSb] # red + blue
arsenide_antimonide_ternaries = [AlAsSb, GaAsSb, InAsSb] # green + blue
# plot the ternaries
fractions = numpy.linspace(0, 1, 1000)
for ternaries, color in [(phosphide_ternaries, red),
(arsenide_ternaries, green),
(antimonide_ternaries, blue),
(phosphide_arsenide_ternaries, red_green),
(phosphide_antimonide_ternaries, red_blue),
(arsenide_antimonide_ternaries, green_blue)]:
for ternary in ternaries:
ax.plot([ternary(x=f).a(T=T_lattice) for f in fractions],
[ternary(x=f).Eg(T=T) for f in fractions],
color=color,
linewidth=1.2)
# plot and label the binaries
x = []
y = []
label = []
for binaries, color in [(phosphide_binaries, red),
(arsenide_binaries, green),
(antimonide_binaries, blue)]:
ax.plot([b.a(T=T_lattice) for b in binaries],
[b.Eg(T=T)for b in binaries],
color=color,
linestyle=' ',
marker='o',
markersize=4,
markeredgecolor=color)
x.extend([b.a(T=T_lattice) for b in binaries])
y.extend([b.Eg(T=T) for b in binaries])
label.extend([b.name for b in binaries])
for x, y, label in zip(x, y, label):
ax.annotate(label, xy=(x, y), xytext=(-5, 5), ha='right', va='bottom',
bbox=dict(linewidth=0, fc='white', alpha=0.9),
textcoords='offset points')
plt.xlim(5.35, 6.5)
plt.ylim(0, 2.7)
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
output_filename = sys.argv[1]
plt.savefig(output_filename)
else:
plt.show() | scott-maddox/openbandparams | src/openbandparams/examples/Plot_Bandgap_vs_Lattice_Constant.py | Python | agpl-3.0 | 3,636 |
__author__ = 'Kamal.S'
import datetime
from haystack import indexes
from .models import Sample
class SampleIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.EdgeNgramField(document=True, use_template=True)
company = indexes.EdgeNgramField(model_attr='company')
name=indexes.EdgeNgramField(model_attr='name')
email=indexes.EdgeNgramField(model_attr='email')
def get_model(self):
return Sample
def index_queryset(self, using=None):
return self.get_model().objects.all()
| srkama/haysolr | dataview/testapi/search_indexes.py | Python | apache-2.0 | 527 |
## Copyright 2002-2003 Andrew Loewenstern, All Rights Reserved
# see LICENSE.txt for license information
import unittest
from airhook import *
from random import uniform as rand
from cStringIO import StringIO
if __name__ =="__main__":
tests = unittest.defaultTestLoader.loadTestsFromNames(['test_airhook'])
result = unittest.TextTestRunner().run(tests)
class Echo(protocol.Protocol):
def dataReceived(self, data):
self.transport.write(data)
class Noisy(protocol.Protocol):
def dataReceived(self, data):
print `data`
class Receiver(protocol.Protocol):
def __init__(self):
self.q = []
def dataReceived(self, data):
self.q.append(data)
class StreamReceiver(protocol.Protocol):
def __init__(self):
self.buf = ""
def dataReceived(self, data):
self.buf += data
def makeEcho(port):
f = protocol.Factory(); f.protocol = Echo
return listenAirhookStream(port, f)
def makeNoisy(port):
f = protocol.Factory(); f.protocol = Noisy
return listenAirhookStream(port, f)
def makeReceiver(port):
f = protocol.Factory(); f.protocol = Receiver
return listenAirhookStream(port, f)
def makeStreamReceiver(port):
f = protocol.Factory(); f.protocol = StreamReceiver
return listenAirhookStream(port, f)
class DummyTransport:
def __init__(self):
self.s = StringIO()
def write(self, data, addr):
self.s.write(data)
def seek(self, num):
return self.s.seek(num)
def read(self):
return self.s.read()
def test_createStartPacket():
flags = 0 | FLAG_AIRHOOK | FLAG_SESSION
packet = chr(flags) + "\xff" + "\x00\x00" + pack("!L", long(rand(0, 2**32)))
return packet
def test_createReply(session, observed, obseq, seq):
flags = 0 | FLAG_AIRHOOK | FLAG_SESSION | FLAG_OBSERVED
packet = chr(flags) + pack("!H", seq)[1] + pack("!H", obseq + 1) + pack("!L", session) + pack("!L", observed)
return packet
def pscope(msg, noisy=0):
# packet scope
str = ""
p = AirhookPacket(msg)
str += "oseq: %s seq: %s " % (p.oseq, p.seq)
if noisy:
str += "packet: %s \n" % (`p.datagram`)
flags = p.flags
str += "flags: "
if flags & FLAG_SESSION:
str += "FLAG_SESSION "
if flags & FLAG_OBSERVED:
str += "FLAG_OBSERVED "
if flags & FLAG_MISSED:
str += "FLAG_MISSED "
if flags & FLAG_NEXT:
str += "FLAG_NEXT "
str += "\n"
if p.observed != None:
str += "OBSERVED: %s\n" % p.observed
if p.session != None:
str += "SESSION: %s\n" % p.session
if p.next != None:
str += "NEXT: %s\n" % p.next
if p.missed:
if noisy:
str += "MISSED: " + `p.missed`
else:
str += "MISSED: " + `len(p.missed)`
str += "\n"
if p.msgs:
if noisy:
str += "MSGS: " + `p.msgs` + "\n"
else:
str += "MSGS: <%s> " % len(p.msgs)
str += "\n"
return str
# testing function
def swap(a, dir="", noisy=0):
msg = ""
while not msg:
a.transport.seek(0)
msg= a.transport.read()
a.transport = DummyTransport()
if not msg:
a.sendNext()
if noisy:
print 6*dir + " " + pscope(msg)
return msg
def runTillEmpty(a, b, prob=1.0, noisy=0):
msga = ''
msgb = ''
while a.omsgq or b.omsgq or a.weMissed or b.weMissed or ord(msga[0]) & (FLAG_NEXT | FLAG_MISSED) or ord(msgb[0]) & (FLAG_NEXT | FLAG_MISSED):
if rand(0,1) < prob:
msga = swap(a, '>', noisy)
b.datagramReceived(msga)
else:
msga = swap(a, '>', 0)
if rand(0,1) < prob:
msgb = swap(b, '<', noisy)
a.datagramReceived(msgb)
else:
msgb = swap(b, '<', 0)
class UstrTests(unittest.TestCase):
def u(self, seq):
return ustr("%s%s" % (pack("!H", seq), 'foobar'))
def testLT(self):
self.failUnless(self.u(0) < self.u(1))
self.failUnless(self.u(1) < self.u(2))
self.failUnless(self.u(2**16 - 1) < self.u(0))
self.failUnless(self.u(2**16 - 1) < self.u(1))
self.failIf(self.u(1) < self.u(0))
self.failIf(self.u(2) < self.u(1))
self.failIf(self.u(0) < self.u(2**16 - 1))
self.failIf(self.u(1) < self.u(2**16 - 1))
def testLTE(self):
self.failUnless(self.u(0) <= self.u(1))
self.failUnless(self.u(1) <= self.u(2))
self.failUnless(self.u(2) <= self.u(2))
self.failUnless(self.u(2**16 - 1) <= self.u(0))
self.failUnless(self.u(2**16 - 1) <= self.u(1))
self.failUnless(self.u(2**16 - 1) <= self.u(2**16))
self.failIf(self.u(1) <= self.u(0))
self.failIf(self.u(2) <= self.u(1))
self.failIf(self.u(0) <= self.u(2**16 - 1))
self.failIf(self.u(1) <= self.u(2**16 - 1))
def testGT(self):
self.failUnless(self.u(1) > self.u(0))
self.failUnless(self.u(2) > self.u(1))
self.failUnless(self.u(0) > self.u(2**16 - 1))
self.failUnless(self.u(1) > self.u(2**16 - 1))
self.failIf(self.u(0) > self.u(1))
self.failIf(self.u(1) > self.u(2))
self.failIf(self.u(2**16 - 1) > self.u(0))
self.failIf(self.u(2**16 - 1) > self.u(1))
def testGTE(self):
self.failUnless(self.u(1) >= self.u(0))
self.failUnless(self.u(2) >= self.u(1))
self.failUnless(self.u(2) >= self.u(2))
self.failUnless(self.u(0) >= self.u(0))
self.failUnless(self.u(1) >= self.u(1))
self.failUnless(self.u(2**16 - 1) >= self.u(2**16 - 1))
self.failIf(self.u(0) >= self.u(1))
self.failIf(self.u(1) >= self.u(2))
self.failIf(self.u(2**16 - 1) >= self.u(0))
self.failIf(self.u(2**16 - 1) >= self.u(1))
def testEQ(self):
self.failUnless(self.u(0) == self.u(0))
self.failUnless(self.u(1) == self.u(1))
self.failUnless(self.u(2**16 - 1) == self.u(2**16-1))
self.failIf(self.u(0) == self.u(1))
self.failIf(self.u(1) == self.u(0))
self.failIf(self.u(2**16 - 1) == self.u(0))
def testNEQ(self):
self.failUnless(self.u(1) != self.u(0))
self.failUnless(self.u(2) != self.u(1))
self.failIf(self.u(2) != self.u(2))
self.failIf(self.u(0) != self.u(0))
self.failIf(self.u(1) != self.u(1))
self.failIf(self.u(2**16 - 1) != self.u(2**16 - 1))
class SimpleTest(unittest.TestCase):
def setUp(self):
self.noisy = 0
self.a = AirhookConnection()
self.a.makeConnection(DummyTransport())
self.a.addr = ('127.0.0.1', 4444)
self.b = AirhookConnection()
self.b.makeConnection(DummyTransport())
self.b.addr = ('127.0.0.1', 4444)
def testReallySimple(self):
# connect to eachother and send a few packets, observe sequence incrementing
a = self.a
b = self.b
self.assertEqual(a.state, pending)
self.assertEqual(b.state, pending)
self.assertEqual(a.outSeq, 0)
self.assertEqual(b.outSeq, 0)
self.assertEqual(a.obSeq, 0)
self.assertEqual(b.obSeq, 0)
msg = swap(a, '>', self.noisy)
self.assertEqual(a.state, sent)
self.assertEqual(a.outSeq, 1)
self.assertEqual(a.obSeq, 0)
b.datagramReceived(msg)
self.assertEqual(b.inSeq, 0)
self.assertEqual(b.obSeq, 0)
msg = swap(b, '<', self.noisy)
self.assertEqual(b.state, sent)
self.assertEqual(b.outSeq, 1)
a.datagramReceived(msg)
self.assertEqual(a.state, confirmed)
self.assertEqual(a.obSeq, 0)
self.assertEqual(a.inSeq, 0)
msg = swap(a, '>', self.noisy)
self.assertEqual(a.outSeq, 2)
b.datagramReceived(msg)
self.assertEqual(b.state, confirmed)
self.assertEqual(b.obSeq, 0)
self.assertEqual(b.inSeq, 1)
msg = swap(b, '<', self.noisy)
self.assertEqual(b.outSeq, 2)
a.datagramReceived(msg)
self.assertEqual(a.outSeq, 2)
self.assertEqual(a.inSeq, 1)
self.assertEqual(a.obSeq, 1)
class BasicTests(unittest.TestCase):
def setUp(self):
self.noisy = 0
self.a = AirhookConnection()
self.a.makeConnection(DummyTransport())
self.a.addr = ('127.0.0.1', 4444)
self.b = AirhookConnection()
self.b.makeConnection(DummyTransport())
self.b.addr = ('127.0.0.1', 4444)
self.a.protocol = Receiver()
self.b.protocol = Receiver()
def testSimple(self):
a = self.a
b = self.b
TESTMSG = "Howdy, Y'All!"
a.omsgq.append(TESTMSG)
a.sendNext()
msg = swap(a, '>', self.noisy)
b.datagramReceived(msg)
msg = swap(b, '<', self.noisy)
a.datagramReceived(msg)
msg = swap(a, '>', self.noisy)
b.datagramReceived(msg)
self.assertEqual(b.inMsg, 1)
self.assertEqual(len(b.protocol.q), 1)
self.assertEqual(b.protocol.q[0], TESTMSG)
msg = swap(b, '<', self.noisy)
a.datagramReceived(msg)
msg = swap(a, '>', self.noisy)
b.datagramReceived(msg)
def testLostFirst(self):
a = self.a
b = self.b
TESTMSG = "Howdy, Y'All!"
TESTMSG2 = "Yee Haw"
a.omsgq.append(TESTMSG)
msg = swap(a, '>', self.noisy)
b.datagramReceived(msg)
msg = swap(b, '<', self.noisy)
self.assertEqual(b.state, sent)
a.datagramReceived(msg)
msg = swap(a, '>', self.noisy)
del(msg) # dropping first message
a.omsgq.append(TESTMSG2)
msg = swap(a, '>', self.noisy)
b.datagramReceived(msg)
self.assertEqual(b.state, confirmed)
self.assertEqual(len(b.protocol.q), 1)
self.assertEqual(b.protocol.q[0], TESTMSG2)
self.assertEqual(b.weMissed, [(1, 0)])
msg = swap(b, '<', self.noisy)
a.datagramReceived(msg)
msg = swap(a, '>', self.noisy)
b.datagramReceived(msg)
self.assertEqual(len(b.protocol.q), 2)
b.protocol.q.sort()
l = [TESTMSG2, TESTMSG]
l.sort()
self.assertEqual(b.protocol.q,l)
msg = swap(b, '<', self.noisy)
a.datagramReceived(msg)
msg = swap(a, '>', self.noisy)
b.datagramReceived(msg)
msg = swap(b, '<', self.noisy)
a.datagramReceived(msg)
msg = swap(a, '>', self.noisy)
b.datagramReceived(msg)
msg = swap(b, '<', self.noisy)
a.datagramReceived(msg)
msg = swap(a, '>', self.noisy)
self.assertEqual(len(b.protocol.q), 2)
b.protocol.q.sort()
l = [TESTMSG2, TESTMSG]
l.sort()
self.assertEqual(b.protocol.q,l)
def testLostSecond(self):
a = self.a
b = self.b
TESTMSG = "Howdy, Y'All!"
TESTMSG2 = "Yee Haw"
a.omsgq.append(TESTMSG)
msg = swap(a, '>', self.noisy)
b.datagramReceived(msg)
msg = swap(b, '<', self.noisy)
self.assertEqual(b.state, sent)
a.datagramReceived(msg)
msg = swap(a, '>', self.noisy)
a.omsgq.append(TESTMSG2)
msg2 = swap(a, '>', self.noisy)
del(msg2) # dropping second message
assert(a.outMsgs[1] != None)
b.datagramReceived(msg)
self.assertEqual(b.state, confirmed)
self.assertEqual(len(b.protocol.q), 1)
self.assertEqual(b.protocol.q[0], TESTMSG)
self.assertEqual(b.inMsg, 1)
self.assertEqual(b.weMissed, [])
msg = swap(b, '<', self.noisy)
a.datagramReceived(msg)
assert(a.outMsgs[1] != None)
msg = swap(a, '>', self.noisy)
b.datagramReceived(msg)
self.assertEqual(b.state, confirmed)
self.assertEqual(len(b.protocol.q), 1)
self.assertEqual(b.protocol.q[0], TESTMSG)
self.assertEqual(b.weMissed, [(2, 1)])
msg = swap(b, '<', self.noisy)
a.datagramReceived(msg)
msg = swap(a, '>', self.noisy)
b.datagramReceived(msg)
self.assertEqual(len(b.protocol.q), 2)
b.protocol.q.sort()
l = [TESTMSG2, TESTMSG]
l.sort()
self.assertEqual(b.protocol.q,l)
msg = swap(b, '<', self.noisy)
a.datagramReceived(msg)
msg = swap(a, '>', self.noisy)
b.datagramReceived(msg)
msg = swap(b, '<', self.noisy)
a.datagramReceived(msg)
msg = swap(a, '>', self.noisy)
b.datagramReceived(msg)
msg = swap(b, '<', self.noisy)
a.datagramReceived(msg)
msg = swap(a, '>', self.noisy)
self.assertEqual(len(b.protocol.q), 2)
b.protocol.q.sort()
l = [TESTMSG2, TESTMSG]
l.sort()
self.assertEqual(b.protocol.q,l)
def testDoubleDouble(self):
a = self.a
b = self.b
TESTMSGA = "Howdy, Y'All!"
TESTMSGB = "Yee Haw"
TESTMSGC = "FOO BAR"
TESTMSGD = "WING WANG"
a.omsgq.append(TESTMSGA)
a.omsgq.append(TESTMSGB)
b.omsgq.append(TESTMSGC)
b.omsgq.append(TESTMSGD)
msg = swap(a, '>', self.noisy)
b.datagramReceived(msg)
msg = swap(b, '<', self.noisy)
self.assertEqual(b.state, sent)
a.datagramReceived(msg)
msg = swap(a, '>', self.noisy)
b.datagramReceived(msg)
self.assertEqual(len(b.protocol.q), 2)
l = [TESTMSGA, TESTMSGB]
l.sort();b.protocol.q.sort()
self.assertEqual(b.protocol.q, l)
self.assertEqual(b.inMsg, 2)
msg = swap(b, '<', self.noisy)
a.datagramReceived(msg)
self.assertEqual(len(a.protocol.q), 2)
l = [TESTMSGC, TESTMSGD]
l.sort();a.protocol.q.sort()
self.assertEqual(a.protocol.q, l)
self.assertEqual(a.inMsg, 2)
def testDoubleDoubleProb(self, prob=0.25):
a = self.a
b = self.b
TESTMSGA = "Howdy, Y'All!"
TESTMSGB = "Yee Haw"
TESTMSGC = "FOO BAR"
TESTMSGD = "WING WANG"
a.omsgq.append(TESTMSGA)
a.omsgq.append(TESTMSGB)
b.omsgq.append(TESTMSGC)
b.omsgq.append(TESTMSGD)
runTillEmpty(a, b, prob, self.noisy)
self.assertEqual(a.state, confirmed)
self.assertEqual(b.state, confirmed)
self.assertEqual(len(b.protocol.q), 2)
l = [TESTMSGA, TESTMSGB]
l.sort();b.protocol.q.sort()
self.assertEqual(b.protocol.q, l)
self.assertEqual(len(a.protocol.q), 2)
l = [TESTMSGC, TESTMSGD]
l.sort();a.protocol.q.sort()
self.assertEqual(a.protocol.q, l)
def testOneWayBlast(self, num = 2**12):
a = self.a
b = self.b
import sha
for i in xrange(num):
a.omsgq.append(sha.sha(`i`).digest())
runTillEmpty(a, b, noisy=self.noisy)
self.assertEqual(len(b.protocol.q), num)
def testTwoWayBlast(self, num = 2**12, prob=0.5):
a = self.a
b = self.b
import sha
for i in xrange(num):
a.omsgq.append(sha.sha('a' + `i`).digest())
b.omsgq.append(sha.sha('b' + `i`).digest())
runTillEmpty(a, b, prob, self.noisy)
self.assertEqual(len(a.protocol.q), num)
self.assertEqual(len(b.protocol.q), num)
def testLimitMessageNumbers(self):
a = self.a
b = self.b
import sha
msg = swap(a, noisy=self.noisy)
b.datagramReceived(msg)
msg = swap(b, noisy=self.noisy)
a.datagramReceived(msg)
for i in range(5000):
a.omsgq.append(sha.sha('a' + 'i').digest())
for i in range(5000 / 255):
msg = swap(a, noisy=self.noisy)
self.assertEqual(a.obSeq, 0)
self.assertEqual(a.next, 255)
self.assertEqual(a.outMsgNums[(a.outSeq-1) % 256], 254)
def testConnectionReset(self):
self.testTwoWayBlast()
self.b.protocol.q = []
a = self.a
b = self.b
msg = swap(a, noisy=self.noisy)
b.datagramReceived(msg)
msg = swap(b, noisy=self.noisy)
a.datagramReceived(msg)
a.omsgq.append("TESTING")
msg = swap(a, noisy=self.noisy)
b.datagramReceived(msg)
msg = swap(b, noisy=self.noisy)
a.datagramReceived(msg)
self.assertEqual(b.protocol.q[0], "TESTING")
self.assertEqual(b.state, confirmed)
self.a = AirhookConnection()
self.a.makeConnection(DummyTransport())
self.a.addr = ('127.0.0.1', 4444)
a = self.a
a.omsgq.append("TESTING2")
msg = swap(a, noisy=self.noisy)
b.datagramReceived(msg)
msg = swap(b, noisy=self.noisy)
a.datagramReceived(msg)
self.assertEqual(len(b.protocol.q), 1)
msg = swap(a, noisy=self.noisy)
b.datagramReceived(msg)
msg = swap(b, noisy=self.noisy)
a.datagramReceived(msg)
self.assertEqual(len(b.protocol.q), 2)
self.assertEqual(b.protocol.q[1], "TESTING2")
def testRecipientReset(self):
self.testTwoWayBlast()
self.b.protocol.q = []
self.noisy = 0
a = self.a
b = self.b
msg = swap(a, noisy=self.noisy)
b.datagramReceived(msg)
msg = swap(b, noisy=self.noisy)
a.datagramReceived(msg)
a.omsgq.append("TESTING")
msg = swap(a, noisy=self.noisy)
b.datagramReceived(msg)
msg = swap(b, noisy=self.noisy)
a.datagramReceived(msg)
self.assertEqual(b.protocol.q[0], "TESTING")
self.assertEqual(b.state, confirmed)
self.b = AirhookConnection()
self.b.makeConnection(DummyTransport())
self.b.protocol = Receiver()
self.b.addr = ('127.0.0.1', 4444)
b = self.b
msg = swap(a, noisy=self.noisy)
b.datagramReceived(msg)
msg = swap(b, noisy=self.noisy)
a.datagramReceived(msg)
a.omsgq.append("TESTING2")
self.assertEqual(len(b.protocol.q), 0)
msg = swap(a, noisy=self.noisy)
b.datagramReceived(msg)
msg = swap(b, noisy=self.noisy)
a.datagramReceived(msg)
msg = swap(a, noisy=self.noisy)
b.datagramReceived(msg)
msg = swap(b, noisy=self.noisy)
a.datagramReceived(msg)
self.assertEqual(len(b.protocol.q), 1)
self.assertEqual(b.protocol.q[0], "TESTING2")
class StreamTests(unittest.TestCase):
def setUp(self):
self.noisy = 0
self.a = StreamConnection()
self.a.makeConnection(DummyTransport())
self.a.addr = ('127.0.0.1', 4444)
self.b = StreamConnection()
self.b.makeConnection(DummyTransport())
self.b.addr = ('127.0.0.1', 4444)
self.a.protocol = StreamReceiver()
self.b.protocol = StreamReceiver()
def testStreamSimple(self, num = 2**12, prob=1.0):
f = open('/dev/urandom', 'r')
a = self.a
b = self.b
MSGA = f.read(num)
MSGB = f.read(num)
self.a.write(MSGA)
self.b.write(MSGB)
runTillEmpty(a, b, prob, self.noisy)
self.assertEqual(len(a.protocol.buf), len(MSGB))
self.assertEqual(len(b.protocol.buf), len(MSGA))
self.assertEqual(a.protocol.buf, MSGB)
self.assertEqual(b.protocol.buf, MSGA)
def testStreamLossy(self, num = 2**12, prob=0.5):
self.testStreamSimple(num, prob)
class SimpleReactor(unittest.TestCase):
def setUp(self):
self.noisy = 0
self.a = makeReceiver(2020)
self.b = makeReceiver(2021)
self.ac = self.a.connectionForAddr(('127.0.0.1', 2021))
self.bc = self.b.connectionForAddr(('127.0.0.1', 2020))
self.ac.noisy = self.noisy
self.bc.noisy = self.noisy
def testSimple(self):
msg = "Testing 1, 2, 3"
self.ac.write(msg)
reactor.iterate()
reactor.iterate()
reactor.iterate()
self.assertEqual(self.bc.state, confirmed)
self.assertEqual(self.bc.protocol.q, [msg])
class SimpleReactorEcho(unittest.TestCase):
def setUp(self):
self.noisy = 0
self.a = makeReceiver(2022)
self.b = makeEcho(2023)
self.ac = self.a.connectionForAddr(('127.0.0.1', 2023))
self.bc = self.b.connectionForAddr(('127.0.0.1', 2022))
def testSimple(self):
msg = "Testing 1, 2, 3"
self.ac.write(msg)
reactor.iterate()
reactor.iterate()
reactor.iterate()
reactor.iterate()
self.assertEqual(self.ac.protocol.q, [msg])
reactor.iterate()
reactor.iterate()
reactor.iterate()
self.assertEqual(self.ac.protocol.q, [msg])
class SimpleReactorStream(unittest.TestCase):
def setUp(self):
self.noisy = 0
self.a = makeStreamReceiver(2024)
self.b = makeStreamReceiver(2025)
self.ac = self.a.connectionForAddr(('127.0.0.1', 2025))
self.bc = self.b.connectionForAddr(('127.0.0.1', 2024))
def testSimple(self):
msg = "Testing 1, 2, 3"
self.ac.write(msg)
reactor.iterate()
reactor.iterate()
reactor.iterate()
self.assertEqual(self.bc.protocol.buf, msg)
class SimpleReactorStreamBig(unittest.TestCase):
def setUp(self):
self.noisy = 0
self.a = makeStreamReceiver(2026)
self.b = makeStreamReceiver(2027)
self.ac = self.a.connectionForAddr(('127.0.0.1', 2027))
self.bc = self.b.connectionForAddr(('127.0.0.1', 2026))
def testBig(self):
msg = open('/dev/urandom').read(4096)
self.ac.write(msg)
reactor.iterate()
reactor.iterate()
reactor.iterate()
reactor.iterate()
reactor.iterate()
reactor.iterate()
reactor.iterate()
self.assertEqual(self.bc.protocol.buf, msg)
class EchoReactorStreamBig(unittest.TestCase):
def setUp(self):
self.noisy = 0
self.a = makeStreamReceiver(2028)
self.b = makeEcho(2029)
self.ac = self.a.connectionForAddr(('127.0.0.1', 2029))
def testBig(self):
msg = open('/dev/urandom').read(256)
self.ac.write(msg)
reactor.iterate()
reactor.iterate()
reactor.iterate()
reactor.iterate()
reactor.iterate()
self.assertEqual(self.ac.protocol.buf, msg)
| csm/khashmir | test_airhook.py | Python | mit | 23,088 |
# -*- coding: utf-8 -*-
#Copyright (C) 2011 Seรกn Hayes
#Django imports
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
# Create your models here.
class BattleRecord(models.Model):
attacker = models.ForeignKey(User, related_name="battlerecord_attacker_set")
attacker_health_lost = models.PositiveIntegerField(null=True)
attacker_money_lost = models.PositiveIntegerField(null=True)
attacker_experience_gained = models.PositiveIntegerField(null=True)
attacker_killed = models.NullBooleanField()
defender = models.ForeignKey(User, related_name="battlerecord_defender_set")
defender_health_lost = models.PositiveIntegerField(null=True)
defender_money_lost = models.PositiveIntegerField(null=True)
defender_experience_gained = models.PositiveIntegerField(null=True)
defender_killed = models.NullBooleanField()
attacker_won = models.NullBooleanField()
datetime = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return u'%s attacked %s dealing %s damage and receiving %s damage. %s lost %s money and gained %s XP, %s lost %s money and gained %s XP.' % (
self.attacker.first_name,
self.defender.first_name,
self.defender_health_lost,
self.attacker_health_lost,
self.attacker.first_name,
self.attacker_money_lost,
self.attacker_experience_gained,
self.defender.first_name,
self.defender_money_lost,
self.defender_experience_gained,
)
class Meta:
ordering = ['datetime']
class BattleProfile(models.Model):
user = models.OneToOneField(User)
attacked_and_won = models.PositiveIntegerField(default=0)
attacked_and_lost = models.PositiveIntegerField(default=0)
defended_and_won = models.PositiveIntegerField(default=0)
defended_and_lost = models.PositiveIntegerField(default=0)
killed = models.PositiveIntegerField(default=0)
kills = models.PositiveIntegerField(default=0)
def create_profile(user):
"""
Called using a post_save trigger on User, so when a new User is added a Profile is created as well.
"""
#create a profile
profile = BattleProfile(user=user)
profile.save()
def user_save_handler(sender, instance, created, **kwargs):
if created:
create_profile(instance)
post_save.connect(user_save_handler, sender=User)
| SeanHayes/swarm-war | swarm_war/battles/models.py | Python | agpl-3.0 | 2,273 |
from context import VerdictHiveContext
__all__ = [
"VerdictHiveContext"
]
| verdictdb/verdict | python/pyverdict/__init__.py | Python | apache-2.0 | 80 |
import os
import urllib
import logging
import csv
import StringIO
from google.appengine.api import users
from google.appengine.ext import ndb
##dummy entity to use as partent
class Course(ndb.Model):
"""A main model for representing an individual coursebook entry."""
author = ndb.UserProperty(indexed=True)
name = ndb.StringProperty(indexed=True)
description = ndb.StringProperty(indexed=False)
lang = ndb.StringProperty(indexed=True)
date = ndb.DateTimeProperty(auto_now_add=True)
def All():
#strong consistent version NOT RECOMENDED
#return Course.query(ancestor=ndb.Key('Root', '01')).order(-Course.date)
#eventual consistent version
return Course.query().order(-Course.date)
def Get(key):
logging.info('lanzado el get')
my_key = ndb.Key(urlsafe=key)
return my_key.get()
def Update(key, name, description, lang):
logging.info('lanzado el update')
user = users.get_current_user()
if user:
course = Get(key)
course.name = name
course.description = description
course.lang = lang
course.put()
return course
def Insert(name, description, lang):
user = users.get_current_user()
if user:
#eventual consistent version
course = Course(name=name, description=description, lang = lang, author= user)
#strong consistent version NOT RECOMENDED
#course = Course(name=name, description=description, lang = lang, author= user, parent = ndb.Key('Root', '01'))
course.put()
return course
def Delete(key):
my_key = ndb.Key(urlsafe=key)
my_key.delete()
def Import(my_csv):
user = users.get_current_user()
stringReader = csv.reader(StringIO.StringIO(my_csv))
courses = []
for row in stringReader:
#for each row makes a new element
course = Course()
course.name = row[0].decode('latin-1')
course.description = row[1].decode('latin-1')
course.lang = row[2].decode('latin-1')
course.author = user
courses.append(course)
return ndb.put_multi(courses)
def Export(writer):
courses = Course.query()
for course in courses:
desc, lang, author = '','',''
name = course.name.encode('UTF-8')
if course.description:
desc = course.description.encode('UTF-8')
if course.lang:
lang = course.lang.encode('UTF-8')
if course.author:
author = course.author
writer.writerow([name, desc, lang, author])
#emp = FlexEmployee(name='Sandy', location='SF')
#FlexEmployee.query(ndb.GenericProperty('location') == 'SF')
##KIND OF PROPERTIES
# IntegerProperty 64-bit signed integer
# FloatProperty Double-precision floating-point number
# BooleanProperty Boolean
# StringProperty Unicode string; up to 1500 bytes, indexed
# TextProperty Unicode string; unlimited length, not indexed
# BlobProperty Uninterpreted byte string:
# if you set indexed=True, up to 1500 bytes, indexed;
# if indexed is False (the default), unlimited length, not indexed.
# Optional keyword argument: compressed.
# DateTimeProperty Date and time (see Date and Time Properties)
# DateProperty Date (see Date and Time Properties)
# TimeProperty Time (see Date and Time Properties)
# GeoPtProperty Geographical location. This is a ndb.GeoPt object. The object has attributes lat and lon, both floats. You can construct one with two floats like ndb.GeoPt(52.37, 4.88) or with a string ndb.GeoPt("52.37, 4.88"). (This is actually the same class as db.GeoPt)
# KeyProperty Datastore key
# Optional keyword argument: kind=kind, to require that keys assigned to this property always have the indicated kind. May be a string or a Model subclass.
# BlobKeyProperty Blobstore key
# Corresponds to BlobReferenceProperty in the old db API, but the property value is a BlobKey instead of a BlobInfo; you can construct a BlobInfo from it using BlobInfo(blobkey)
# UserProperty User object.
# StructuredProperty Includes one kind of model inside another, by value (see Structured Properties)
# class Contact(ndb.Model):
# name = ndb.StringProperty()
# addresses = ndb.StructuredProperty(Address, repeated=True)
# guido = Contact(name='Guido',
# addresses=[Address(type='home',
# city='Amsterdam'),
# Address(type='work',
# street='Spear St',
# city='SF')])
# LocalStructuredProperty Like StructuredProperty, but on-disk representation is an opaque blob and is not indexed (see Structured Properties).
# Optional keyword argument: compressed.
# JsonProperty Value is a Python object (such as a list or a dict or a string) that is serializable using Python's json module; the Datastore stores the JSON serialization as a blob. Unindexed by default.
# Optional keyword argument: compressed.
# PickleProperty Value is a Python object (such as a list or a dict or a string) that is serializable using Python's pickle protocol; the Datastore stores the pickle serialization as a blob. Unindexed by default.
# Optional keyword argument: compressed.
# GenericProperty Generic value
# Used mostly by the Expando class, but also usable explicitly. Its type may be any of int, long, float, bool, str, unicode, datetime, Key, BlobKey, GeoPt, User, None.
# ComputedProperty Value computed from other properties by a user-defined function. (See Computed Properties.)
##REPEAT
#tags = ndb.StringProperty(repeated=True)
| pepetox/gae-angular-materialize | modelCourse.py | Python | mit | 5,570 |
__author__ = 'tivvit'
from protorpc import messages
class Quest_m(messages.Message):
name = messages.StringField(1)
faction = messages.StringField(2)
points = messages.IntegerField(3)
num = messages.IntegerField(4)
id = messages.IntegerField(5)
factionId = messages.IntegerField(6)
# inserted = messages.DateTimeField(5)
class QuestsCollection_m(messages.Message):
quest = messages.MessageField(Quest_m, 1, repeated=True) | gugcz/devfest-rpg | backend/cdh_m/quest_m.py | Python | mit | 457 |
# -*- coding: utf-8 -*-
#
# API configuration
#####################
DEBUG = False
# Top-level URL for deployment. Numerous other URLs depend on this.
CYCLADES_BASE_URL = "https://compute.example.synnefo.org/compute/"
# The API will return HTTP Bad Request if the ?changes-since
# parameter refers to a point in time more than POLL_LIMIT seconds ago.
POLL_LIMIT = 3600
# Astakos groups that have access to '/admin' views.
ADMIN_STATS_PERMITTED_GROUPS = ["admin-stats"]
# Enable/Disable the snapshots feature altogether at the API level.
# If set to False, Cyclades will not expose the '/snapshots' API URL
# of the 'volume' app.
CYCLADES_SNAPSHOTS_ENABLED = True
# Enable/Disable the feature of a sharing a resource to the members of the
# project to which it belongs, at the API level.
CYCLADES_SHARED_RESOURCES_ENABLED = False
# Enable/Disable the of feature of rescuing a Virtual Machine at the API
# level
RESCUE_ENABLED = False
#
# Network Configuration
#
# CYCLADES_DEFAULT_SERVER_NETWORKS setting contains a list of networks to
# connect a newly created server to, *if the user has not* specified them
# explicitly in the POST /server API call.
# Each member of the list may be a network UUID, a tuple of network UUIDs,
# "SNF:ANY_PUBLIC_IPV4" [any public network with an IPv4 subnet defined],
# "SNF:ANY_PUBLIC_IPV6 [any public network with only an IPV6 subnet defined],
# or "SNF:ANY_PUBLIC" [any public network].
#
# Access control and quota policy are enforced, just as if the user had
# specified the value of CYCLADES_DEFAULT_SERVER_NETWORKS in the content
# of the POST /call, after processing of "SNF:*" directives."
CYCLADES_DEFAULT_SERVER_NETWORKS = []
# This setting contains a list of networks which every new server
# will be forced to connect to, regardless of the contents of the POST
# /servers call, or the value of CYCLADES_DEFAULT_SERVER_NETWORKS.
# Its format is identical to that of CYCLADES_DEFAULT_SERVER_NETWORKS.
# WARNING: No access control or quota policy are enforced.
# The server will get all IPv4/IPv6 addresses needed to connect to the
# networks specified in CYCLADES_FORCED_SERVER_NETWORKS, regardless
# of the state of the floating IP pool of the user, and without
# allocating any floating IPs."
CYCLADES_FORCED_SERVER_NETWORKS = []
# Maximum allowed network size for private networks.
MAX_CIDR_BLOCK = 22
# Default settings used by network flavors
DEFAULT_MAC_PREFIX = 'aa:00:0'
DEFAULT_BRIDGE = 'br0'
# Network flavors that users are allowed to create through API requests
# Available flavors are IP_LESS_ROUTED, MAC_FILTERED, PHYSICAL_VLAN
API_ENABLED_NETWORK_FLAVORS = ['MAC_FILTERED']
# Settings for MAC_FILTERED network:
# ------------------------------------------
# All networks of this type are bridged to the same bridge. Isolation between
# networks is achieved by assigning a unique MAC-prefix to each network and
# filtering packets via ebtables.
DEFAULT_MAC_FILTERED_BRIDGE = 'prv0'
# Firewalling. Firewall tags should contain '%d' to be filled with the NIC
# ID.
GANETI_FIREWALL_ENABLED_TAG = 'synnefo:network:%s:protected'
GANETI_FIREWALL_DISABLED_TAG = 'synnefo:network:%s:unprotected'
GANETI_FIREWALL_PROTECTED_TAG = 'synnefo:network:%s:limited'
# The default firewall profile that will be in effect if no tags are defined
DEFAULT_FIREWALL_PROFILE = 'DISABLED'
# Fixed mapping of user VMs to a specific backend.
# e.g. BACKEND_PER_USER = {'example@synnefo.org': 2}
BACKEND_PER_USER = {}
# Encryption key for the instance hostname in the stat graphs URLs. Set it to
# a random string and update the STATS_SECRET_KEY setting in the snf-stats-app
# host (20-snf-stats-app-settings.conf) accordingly.
CYCLADES_STATS_SECRET_KEY = "secret_key"
# URL templates for the stat graphs.
# The API implementation replaces '%s' with the encrypted backend id.
CPU_BAR_GRAPH_URL = 'http://stats.example.synnefo.org/stats/v1.0/cpu-bar/%s'
CPU_TIMESERIES_GRAPH_URL = \
'http://stats.example.synnefo.org/stats/v1.0/cpu-ts/%s'
NET_BAR_GRAPH_URL = 'http://stats.example.synnefo.org/stats/v1.0/net-bar/%s'
NET_TIMESERIES_GRAPH_URL = \
'http://stats.example.synnefo.org/stats/v1.0/net-ts/%s'
# Recommended refresh period for server stats
STATS_REFRESH_PERIOD = 60
# The maximum number of file path/content pairs that can be supplied on server
# build
MAX_PERSONALITY = 5
# The maximum size, in bytes, for each personality file
MAX_PERSONALITY_SIZE = 10240
# Authentication URL of the astakos instance to be used for user management
ASTAKOS_AUTH_URL = 'https://accounts.example.synnefo.org/identity/v2.0'
# Tune the size of the Astakos http client connection pool
# This limit the number of concurrent requests to Astakos.
CYCLADES_ASTAKOSCLIENT_POOLSIZE = 50
# Key for password encryption-decryption. After changing this setting, synnefo
# will be unable to decrypt all existing Backend passwords. You will need to
# store again the new password by using 'snf-manage backend-modify'.
# SECRET_ENCRYPTION_KEY may up to 32 bytes. Keys bigger than 32 bytes are not
# supported.
SECRET_ENCRYPTION_KEY = "Password Encryption Key"
# Astakos service token
# The token used for astakos service api calls (e.g. api to retrieve user email
# using a user uuid)
CYCLADES_SERVICE_TOKEN = ''
# Template to use to build the FQDN of VMs. The setting will be formated with
# the id of the VM.
CYCLADES_SERVERS_FQDN = 'snf-%(id)s.vm.example.synnefo.org'
# Description of applied port forwarding rules (DNAT) for Cyclades VMs. This
# setting contains a mapping from the port of each VM to a tuple contaning the
# destination IP/hostname and the new port: (host, port). Instead of a tuple a
# python callable object may be used which must return such a tuple. The caller
# will pass to the callable the following positional arguments, in the
# following order:
# * server_id: The ID of the VM in the DB
# * ip_address: The IPv4 address of the public VM NIC
# * fqdn: The FQDN of the VM
# * user: The UUID of the owner of the VM
#
# Here is an example describing the mapping of the SSH port of all VMs to
# the external address 'gate.example.synnefo.org' and port 60000+server_id.
# e.g. iptables -t nat -A prerouting -d gate.example.synnefo.org \
# --dport (61000 + $(VM_ID)) -j DNAT --to-destination $(VM_IP):22
#CYCLADES_PORT_FORWARDING = {
# 22: lambda ip_address, server_id, fqdn, user:
# ("gate.example.synnefo.org", 61000 + server_id),
#}
CYCLADES_PORT_FORWARDING = {}
# Extra configuration options required for snf-vncauthproxy (>=1.5). Each dict
# of the list, describes one vncauthproxy instance.
CYCLADES_VNCAUTHPROXY_OPTS = [
{
# These values are required for VNC console support. They should match
# a user / password configured in the snf-vncauthproxy authentication /
# users file (/var/lib/vncauthproxy/users).
'auth_user': 'synnefo',
'auth_password': 'secret_password',
# server_address and server_port should reflect the --listen-address and
# --listen-port options passed to the vncauthproxy daemon
'server_address': '127.0.0.1',
'server_port': 24999,
# Set to True to enable SSL support on the control socket.
'enable_ssl': False,
# If you enabled SSL support for snf-vncauthproxy you can optionally
# provide a path to a CA file and enable strict checkfing for the server
# certficiate.
'ca_cert': None,
'strict': False,
},
]
# The maximum allowed size(GB) for a Cyclades Volume
CYCLADES_VOLUME_MAX_SIZE = 200
# The maximum allowed metadata items for a Cyclades Volume
CYCLADES_VOLUME_MAX_METADATA = 10
# The volume types that Cyclades allow to be detached
CYCLADES_DETACHABLE_DISK_TEMPLATES = ("ext_archipelago", "ext_vlmc")
# The maximum number of tags allowed for a Cyclades Virtual Machine
CYCLADES_VM_MAX_TAGS = 50
# The maximmum allowed metadata items for a Cyclades Virtual Machine
CYCLADES_VM_MAX_METADATA = 10
# Define cache for public stats
PUBLIC_STATS_CACHE = {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
"KEY_PREFIX": "publicstats",
"TIMEOUT": 300,
}
# Permit users of specific groups to override the flavor allow_create policy
CYCLADES_FLAVOR_OVERRIDE_ALLOW_CREATE = {}
# Define cache for VM password
VM_PASSWORD_CACHE = {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
"KEY_PREFIX": "vmpassword",
"TIMEOUT": None,
}
| grnet/synnefo | snf-cyclades-app/synnefo/app_settings/default/api.py | Python | gpl-3.0 | 8,466 |
from django.contrib import admin
from bootcamp.auth.models import Profile
admin.site.register(Profile) | maxpinto/Ptz | bootcamp/auth/admin.py | Python | mit | 104 |
import sys, os
def stop(arv):
pwd = os.getcwd()
# if argv given, folders = [argv]
# else, folders = pwd
### for each folder in folders
##### check pwd/folder/temp/pids for existing pid files
####### kill -15 & rm files
def main():
print "Please don't try to run this script separately."
if __name__ == '__main__':
main() | modcracker/Tork | tork/core/manage/stop.py | Python | mit | 347 |
import os.path
from django.contrib.auth import authenticate
from django.shortcuts import render
from qmpy.models import Entry, Task, Calculation, Formation, MetaData
from .tools import get_globals
def home_page(request):
data = get_globals()
data.update(
{
"done": "{:,}".format(Formation.objects.filter(fit="standard").count()),
}
)
request.session.set_test_cookie()
return render(request, "index.html", data)
def construction_page(request):
return render(request, "construction.html", {})
def faq_view(request):
return render(request, "faq.html")
def play_view(request):
return render(request, "play.html")
def login(request):
if request.method == "POST":
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
else:
pass
else:
pass
def logout(request):
logout(request)
# redirect to success
| wolverton-research-group/qmpy | qmpy/web/views/home.py | Python | mit | 1,123 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Minio Python Library for Amazon S3 Compatible Cloud Storage,
# (C) 2015, 2016, 2017, 2018 Minio, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import os
import io
from sys import exit
import uuid
import shutil
import inspect
import json
from random import random
from string import ascii_lowercase
import time
import traceback
from datetime import datetime, timedelta
import urllib3
import certifi
import hashlib
from threading import Thread
from minio import Minio, PostPolicy, CopyConditions
from minio.error import (APINotImplemented, NoSuchBucketPolicy, ResponseError,
PreconditionFailed, BucketAlreadyOwnedByYou,
BucketAlreadyExists, InvalidBucketError)
class LimitedRandomReader(object):
"""
LimitedRandomReader returns a Reader that upon read
returns random data, but stops with EOF after *limit*
bytes.
LimitedRandomReader is compatible with BufferedIOBase.
returns a class:`LimitedRandomReader` that upon read
provides random data and stops with EOF after *limit*
bytes
:param limit: Trigger EOF after limit bytes.
"""
def __init__(self, limit):
self._limit = limit
self._offset_location = 0
def read(self, amt=64*1024):
"""
Similar to :meth:`io.read`, with amt option.
:param amt:
How much of the content to read.
"""
# If offset is bigger than size. Treat it as EOF return here.
if self._offset_location == self._limit:
# return empty bytes to indicate EOF.
return b''
# make translation table from 0..255 to 97..122
bal = [c.encode('ascii') for c in ascii_lowercase]
amt = min(amt, self._limit - self._offset_location)
data = b''.join([bal[int(random() * 26)] for _ in range(amt)])
self._offset_location += len(data)
return data
class LogOutput(object):
"""
LogOutput is the class for log output. It is required standard for all
SDK tests controlled by mint.
Here are its attributes:
'name': name of the SDK under test, e.g. 'minio-py'
'function': name of the method/api under test with its signature
The following python code can be used to
pull args information of a <method> and to
put together with the method name:
<method>.__name__+'('+', '.join(args_list)+')'
e.g. 'remove_object(bucket_name, object_name)'
'args': method/api arguments with their values, in
dictionary form: {'arg1': val1, 'arg2': val2, ...}
'duration': duration of the whole test in milliseconds,
defaults to 0
'alert': any extra information user is needed to be alerted about,
like whether this is a Blocker/Gateway/Server related
issue, etc., defaults to None
'message': descriptive error message, defaults to None
'error': stack-trace/exception message(only in case of failure),
actual low level exception/error thrown by the program,
defaults to None
'status': exit status, possible values are 'PASS', 'FAIL', 'NA',
defaults to 'PASS'
"""
PASS = 'PASS'
FAIL = 'FAIL'
NA = 'NA'
def __init__(self, meth, test_name):
self.__args_list = inspect.getargspec(meth).args[1:]
self.__name = 'minio-py:'+test_name
self.__function = meth.__name__+'('+', '.join(self.__args_list)+')'
self.__args = {}
self.__duration = 0
self.__alert = ''
self.__message = None
self.__error = None
self.__status = self.PASS
self.__start_time = time.time()
@property
def name(self): return self.__name
@property
def function(self): return self.__function
@property
def args(self): return self.__args
@name.setter
def name(self, val): self.__name = val
@function.setter
def function(self, val): self.__function = val
@args.setter
def args(self, val): self.__args = val
def json_report(self, err_msg='', alert='', status=''):
self.__args = {k: v for k, v in self.__args.items() if v and v != ''}
entry = {'name': self.__name,
'function': self.__function,
'args': self.__args,
'duration': int(round((time.time() - self.__start_time)*1000)),
'alert': str(alert),
'message': str(err_msg),
'error': traceback.format_exc() if err_msg and err_msg != '' else '',
'status': status if status and status != '' else \
self.FAIL if err_msg and err_msg != '' else self.PASS
}
return json.dumps({k: v for k, v in entry.items() if v and v != ''})
def generate_bucket_name():
return "minio-py-test-" + uuid.uuid4().__str__()
def is_s3(client):
return "s3.amazonaws" in client._endpoint_url
def test_make_bucket_default_region(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "make_bucket(bucket_name, location)"
# Get a unique bucket_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
# Default location
log_output.args['location'] = "default value ('us-east-1')"
try:
# Create a bucket with default bucket location
client.make_bucket(bucket_name)
# Check if bucket was created properly
log_output.function = 'bucket_exists(bucket_name)'
client.bucket_exists(bucket_name)
# Remove bucket
log_output.function = 'remove_bucket(bucket_name)'
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
log_output.function = 'make_bucket(bucket_name, location)'
print(log_output.json_report())
def test_make_bucket_with_region(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "make_bucket(bucket_name, location)"
# Get a unique bucket_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
# A non-default location
log_output.args['location'] = location = 'us-west-1'
try:
# Create a bucket with default bucket location
client.make_bucket(bucket_name, location)
# Check if bucket was created properly
log_output.function = 'bucket_exists(bucket_name)'
client.bucket_exists(bucket_name)
# Remove bucket
log_output.function = 'remove_bucket(bucket_name)'
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
log_output.function = 'make_bucket(bucket_name, location)'
print(log_output.json_report())
def test_negative_make_bucket_invalid_name(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "make_bucket(bucket_name, location)"
# Get a unique bucket_name
bucket_name = generate_bucket_name()
# Default location
log_output.args['location'] = "default value ('us-east-1')"
# Create an array of invalid bucket names to test
invalid_bucket_name_list = [bucket_name+'.', '.'+bucket_name, bucket_name+'...'+'abcd']
for name in invalid_bucket_name_list:
log_output.args['bucket_name'] = name
try:
# Create a bucket
client.make_bucket(name)
# Check if bucket was created properly
log_output.function = 'bucket_exists(bucket_name)'
client.bucket_exists(name)
# Remove bucket
log_output.function = 'remove_bucket(bucket_name)'
client.remove_bucket(name)
except InvalidBucketError as err:
pass
except Exception as err:
raise Exception(err)
# Test passes
log_output.function = 'make_bucket(bucket_name, location)'
log_output.args['bucket_name'] = invalid_bucket_name_list
print(log_output.json_report())
def test_make_bucket_recreate(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "make_bucket(bucket_name, location)"
# Get a unique bucket_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
# s3 amazon has a bug and can let a bucket to be recreated for
# 'us-east-1' region, as opposed to the expected failure behavior.
# Until this issue is fixed by amazon, the following
# location manipulation will be used in our testing.
location = 'us-west-1' if is_s3(client) else 'us-east-1'
failed_as_expected = False
try:
client.make_bucket(bucket_name, location)
client.make_bucket(bucket_name, location)
except BucketAlreadyOwnedByYou as err:
# Expected this exception. Test passes
failed_as_expected = True
print(log_output.json_report())
except BucketAlreadyExists as err:
# Expected this exception. Test passes
failed_as_expected = True
print(log_output.json_report())
except Exception as err:
raise Exception(err)
if not failed_as_expected:
print(log_output.json_report("Recreating the same bucket SHOULD have failed!"))
exit()
def test_list_buckets(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "list_buckets( )"
# Get a unique bucket_name
bucket_name = generate_bucket_name()
try:
client.make_bucket(bucket_name)
# List all buckets.
buckets = client.list_buckets()
for bucket in buckets:
# bucket object should be of a valid value.
if bucket.name and bucket.creation_date:
continue
raise ValueError('list_bucket api failure')
except Exception as err:
raise Exception(err)
finally:
client.remove_bucket(bucket_name)
# Test passes
print(log_output.json_report())
def test_fput_object_small_file(client, testfile, log_output):
# default value for log_output.function attribute is;
# log_output.function = "fput_object(bucket_name, object_name, file_path, content_type, metadata)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
log_output.args['file_path'] = testfile
log_output.args['metadata'] = metadata = {'x-amz-storage-class': 'STANDARD_IA'}
try:
client.make_bucket(bucket_name)
# upload local small file.
if is_s3(client):
client.fput_object(bucket_name, object_name+'-f', testfile,
metadata)
else:
client.fput_object(bucket_name, object_name+'-f', testfile)
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name+'-f')
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_fput_object_large_file(client, largefile, log_output):
# default value for log_output.function attribute is;
# log_output.function = "fput_object(bucket_name, object_name, file_path, content_type, metadata)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
log_output.args['file_path'] = largefile
log_output.args['metadata'] = metadata = {'x-amz-storage-class': 'STANDARD_IA'}
# upload local large file through multipart.
try:
client.make_bucket(bucket_name)
if is_s3(client):
client.fput_object(bucket_name, object_name+'-large', largefile,
metadata)
else:
client.fput_object(bucket_name, object_name+'-large', largefile)
client.stat_object(bucket_name, object_name+'-large')
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name+'-large')
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_fput_object_with_content_type(client, testfile, log_output):
# default value for log_output.function attribute is;
# log_output.function = "fput_object(bucket_name, object_name, file_path, content_type, metadata)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
log_output.args['file_path'] = testfile
log_output.args['content_type'] = content_type = 'application/octet-stream'
log_output.args['metadata'] = metadata = {'x-amz-storage-class': 'STANDARD_IA'}
try:
client.make_bucket(bucket_name)
# upload local small file with content_type defined.
if is_s3(client):
client.fput_object(bucket_name, object_name+'-f', testfile,
content_type, metadata)
else:
client.fput_object(bucket_name, object_name+'-f', testfile,
content_type)
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name+'-f')
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_copy_object_no_copy_condition(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "copy_object(bucket_name, object_name, object_source, conditions)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
object_name = uuid.uuid4().__str__()
log_output.args['object_source'] = object_source = object_name+'-source'
log_output.args['object_name'] = object_copy = object_name+'-copy'
try:
client.make_bucket(bucket_name)
# Upload a streaming object of 1MiB
KB_1 = 1024 # 1KiB.
KB_1_reader = LimitedRandomReader(KB_1)
client.put_object(bucket_name, object_source, KB_1_reader, KB_1)
# Perform a server side copy of an object
client.copy_object(bucket_name, object_copy,
'/'+bucket_name+'/'+object_source)
st_obj = client.stat_object(bucket_name, object_copy)
validate_stat_data(st_obj, KB_1, {})
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_source)
client.remove_object(bucket_name, object_copy)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_copy_object_etag_match(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "copy_object(bucket_name, object_name, object_source, conditions)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
object_name = uuid.uuid4().__str__()
log_output.args['object_source'] = object_source = object_name+'-source'
log_output.args['object_name'] = object_copy = object_name+'-copy'
try:
client.make_bucket(bucket_name)
# Upload a streaming object of 1MiB
KB_1 = 1024 # 1KiB.
KB_1_reader = LimitedRandomReader(KB_1)
client.put_object(bucket_name, object_source, KB_1_reader, KB_1)
# Perform a server side copy of an object
client.copy_object(bucket_name, object_copy,
'/'+bucket_name+'/'+object_source)
# Verification
source_etag = client.stat_object(bucket_name, object_source).etag
copy_conditions = CopyConditions()
copy_conditions.set_match_etag(source_etag)
log_output.args['conditions'] = {'set_match_etag': source_etag}
client.copy_object(bucket_name, object_copy,
'/'+bucket_name+'/'+object_source,
copy_conditions)
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_source)
client.remove_object(bucket_name, object_copy)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_copy_object_negative_etag_match(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "copy_object(bucket_name, object_name, object_source, conditions)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
object_name = uuid.uuid4().__str__()
log_output.args['object_source'] = object_source = object_name+'-source'
log_output.args['object_name'] = object_copy = object_name+'-copy'
try:
client.make_bucket(bucket_name)
# Upload a streaming object of 1MiB
KB_1 = 1024 # 1KiB.
KB_1_reader = LimitedRandomReader(KB_1)
client.put_object(bucket_name, object_source, KB_1_reader, KB_1)
try:
# Perform a server side copy of an object
# with incorrect pre-conditions and fail
etag = 'test-etag'
copy_conditions = CopyConditions()
copy_conditions.set_match_etag(etag)
log_output.args['conditions'] = {'set_match_etag': etag}
client.copy_object(bucket_name, object_copy,
'/'+bucket_name+'/'+object_source,
copy_conditions)
except PreconditionFailed as err:
if err.message != 'At least one of the preconditions you specified did not hold.':
raise Exception(err)
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_source)
client.remove_object(bucket_name, object_copy)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_copy_object_modified_since(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "copy_object(bucket_name, object_name, object_source, conditions)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
object_name = uuid.uuid4().__str__()
log_output.args['object_source'] = object_source = object_name+'-source'
log_output.args['object_name'] = object_copy = object_name+'-copy'
try:
client.make_bucket(bucket_name)
# Upload a streaming object of 1MiB
KB_1 = 1024 # 1KiB.
KB_1_reader = LimitedRandomReader(KB_1)
client.put_object(bucket_name, object_source, KB_1_reader, KB_1)
# Set up the 'modified_since' copy condition
copy_conditions = CopyConditions()
t = (2014, 4, 1, 0, 0, 0, 0, 0, 0)
mod_since = datetime.utcfromtimestamp(time.mktime(t))
copy_conditions.set_modified_since(mod_since)
date_pretty = mod_since.strftime('%c')
log_output.args['conditions'] = {'set_modified_since':date_pretty}
# Perform a server side copy of an object
# and expect the copy to complete successfully
client.copy_object(bucket_name, object_copy,
'/'+bucket_name+'/'+object_source,
copy_conditions)
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_source)
client.remove_object(bucket_name, object_copy)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_copy_object_unmodified_since(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "copy_object(bucket_name, object_name, object_source, conditions)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
object_name = uuid.uuid4().__str__()
log_output.args['object_source'] = object_source = object_name+'-source'
log_output.args['object_name'] = object_copy = object_name+'-copy'
try:
client.make_bucket(bucket_name)
# Upload a streaming object of 1MiB
KB_1 = 1024 # 1KiB.
KB_1_reader = LimitedRandomReader(KB_1)
client.put_object(bucket_name, object_source, KB_1_reader, KB_1)
# Set up the 'modified_since' copy condition
copy_conditions = CopyConditions()
t = (2014, 4, 1, 0, 0, 0, 0, 0, 0)
unmod_since = datetime.utcfromtimestamp(time.mktime(t))
copy_conditions.set_unmodified_since(unmod_since)
date_pretty = unmod_since.strftime('%c')
log_output.args['conditions'] = {'set_unmodified_since': date_pretty}
try:
# Perform a server side copy of an object and expect
# the copy to fail since the creation/modification
# time is now, way later than unmodification time, April 1st, 2014
client.copy_object(bucket_name, object_copy,
'/'+bucket_name+'/'+object_source,
copy_conditions)
except PreconditionFailed as err:
if err.message != 'At least one of the preconditions you specified did not hold.':
raise Exception(err)
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_source)
client.remove_object(bucket_name, object_copy)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_put_object(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "put_object(bucket_name, object_name, data, length, content_type, metadata)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
client.make_bucket(bucket_name)
# Put/Upload a streaming object of 1MiB
log_output.args['length'] = MB_1 = 1024*1024 # 1MiB.
MB_1_reader = LimitedRandomReader(MB_1)
log_output.args['data'] = 'LimitedRandomReader(MB_1)'
client.put_object(bucket_name, object_name, MB_1_reader, MB_1)
client.stat_object(bucket_name, object_name)
# Put/Upload a streaming object of 11MiB
log_output.args['length'] = MB_11 = 11*1024*1024 # 11MiB.
MB_11_reader = LimitedRandomReader(MB_11)
log_output.args['data'] = 'LimitedRandomReader(MB_11)'
log_output.args['metadata'] = metadata = {'x-amz-meta-testing': 'value','test-key':'value2'}
log_output.args['content_type'] = content_type='application/octet-stream'
client.put_object(bucket_name,
object_name+'-metadata',
MB_11_reader,
MB_11,
content_type,
metadata)
# Stat on the uploaded object to check if it exists
# Fetch saved stat metadata on a previously uploaded object with metadata.
st_obj = client.stat_object(bucket_name, object_name+'-metadata')
if 'X-Amz-Meta-Testing' not in st_obj.metadata:
raise ValueError("Metadata key 'x-amz-meta-testing' not found")
value = st_obj.metadata['X-Amz-Meta-Testing']
if value != 'value':
raise ValueError('Metadata key has unexpected'
' value {0}'.format(value))
if 'X-Amz-Meta-Test-Key' not in st_obj.metadata:
raise ValueError("Metadata key 'x-amz-meta-test-key' not found")
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name)
client.remove_object(bucket_name, object_name+'-metadata')
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_negative_put_object_with_path_segment(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "put_object(bucket_name, object_name, data, length, content_type, metadata)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = "/a/b/c/" + uuid.uuid4().__str__()
try:
client.make_bucket(bucket_name)
log_output.args['length'] = 0 # Keep 0 bytes body to check for error.
log_output.args['data'] = ''
client.put_object(bucket_name,
object_name,
io.BytesIO(b''), 0)
except ResponseError as err:
pass
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def validate_stat_data(st_obj, expected_size, expected_meta):
received_modification_time = st_obj.last_modified
received_etag = st_obj.etag
received_metadata = st_obj.metadata
received_content_type = st_obj.content_type
received_size = st_obj.size
received_is_dir = st_obj.is_dir
if not isinstance(received_modification_time, time.struct_time):
raise ValueError('Incorrect last_modified time type'
', received type: ', type(received_modification_time))
if not received_etag or received_etag == '':
raise ValueError('No Etag value is returned.')
if received_content_type != 'application/octet-stream':
raise ValueError('Incorrect content type. Expected: ',
"'application/octet-stream', received: ",
received_content_type)
if received_size != expected_size:
raise ValueError('Incorrect file size. Expected: 11534336',
', received: ', received_size)
if received_is_dir != False:
raise ValueError('Incorrect file type. Expected: is_dir=False',
', received: is_dir=', received_is_dir)
if not all(i in expected_meta.items() for i in received_metadata.items()):
raise ValueError("Metadata key 'x-amz-meta-testing' not found")
def test_stat_object(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "stat_object(bucket_name, object_name)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
client.make_bucket(bucket_name)
# Put/Upload a streaming object of 1MiB
log_output.args['length'] = MB_1 = 1024*1024 # 1MiB.
MB_1_reader = LimitedRandomReader(MB_1)
log_output.args['data'] = 'LimitedRandomReader(MB_1)'
client.put_object(bucket_name, object_name, MB_1_reader, MB_1)
client.stat_object(bucket_name, object_name)
# Put/Upload a streaming object of 11MiB
log_output.args['length'] = MB_11 = 11*1024*1024 # 11MiB.
MB_11_reader = LimitedRandomReader(MB_11)
log_output.args['data'] = 'LimitedRandomReader(MB_11)'
log_output.args['metadata'] = metadata = {'X-Amz-Meta-Testing': 'value'}
log_output.args['content_type'] = content_type='application/octet-stream'
client.put_object(bucket_name,
object_name+'-metadata',
MB_11_reader,
MB_11,
content_type,
metadata)
# Get the stat on the uploaded object
st_obj = client.stat_object(bucket_name, object_name+'-metadata')
# Verify the collected stat data.
validate_stat_data(st_obj, MB_11, metadata)
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name)
client.remove_object(bucket_name, object_name+'-metadata')
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_remove_object(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "remove_object(bucket_name, object_name)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
client.make_bucket(bucket_name)
KB_1 = 1024 # 1KiB.
KB_1_reader = LimitedRandomReader(KB_1)
client.put_object(bucket_name, object_name, KB_1_reader, KB_1)
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_get_object(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "get_object(bucket_name, object_name, request_headers)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
newfile = 'newfile ุฌุฏูุฏ'
MB_1 = 1024*1024 # 1MiB.
MB_1_reader = LimitedRandomReader(MB_1)
client.make_bucket(bucket_name)
client.put_object(bucket_name, object_name, MB_1_reader, MB_1)
# Get/Download a full object, iterate on response to save to disk
object_data = client.get_object(bucket_name, object_name)
with open(newfile, 'wb') as file_data:
shutil.copyfileobj(object_data, file_data)
except Exception as err:
raise Exception(err)
finally:
try:
os.remove(newfile)
client.remove_object(bucket_name, object_name)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_fget_object(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "fget_object(bucket_name, object_name, file_path, request_headers)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
log_output.args['file_path'] = newfile_f = 'newfile-f ๆฐ'
try:
MB_1 = 1024*1024 # 1MiB.
MB_1_reader = LimitedRandomReader(MB_1)
client.make_bucket(bucket_name)
client.put_object(bucket_name, object_name, MB_1_reader, MB_1)
# Get/Download a full object and save locally at path
client.fget_object(bucket_name, object_name, newfile_f)
except Exception as err:
raise Exception(err)
finally:
try:
os.remove(newfile_f)
client.remove_object(bucket_name, object_name)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_get_partial_object_with_default_length(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "get_partial_object(bucket_name, object_name, offset, length, request_headers)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
newfile = 'newfile'
MB_1 = 1024*1024 # 1MiB.
length = 1000
log_output.args['offset'] = offset = MB_1 - length
MB_1_reader = LimitedRandomReader(MB_1)
client.make_bucket(bucket_name)
client.put_object(bucket_name, object_name, MB_1_reader, MB_1)
# Get half of the object
object_data = client.get_partial_object(bucket_name, object_name, offset)
with open(newfile, 'wb') as file_data:
for d in object_data:
file_data.write(d)
#Check if the new file is the right size
new_file_size = os.path.getsize('./newfile')
if new_file_size != length:
raise ValueError('Unexpected file size after running ')
except Exception as err:
raise Exception(err)
finally:
try:
# os.remove(newfile)
client.remove_object(bucket_name, object_name)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_get_partial_object(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "get_partial_object(bucket_name, object_name, offset, length, request_headers)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
newfile = 'newfile'
MB_1 = 1024*1024 # 1MiB.
log_output.args['offset'] = offset = int(MB_1/2)
log_output.args['length'] = length = int(MB_1/2)-1000
MB_1_reader = LimitedRandomReader(MB_1)
client.make_bucket(bucket_name)
client.put_object(bucket_name, object_name, MB_1_reader, MB_1)
# Get half of the object
object_data = client.get_partial_object(bucket_name, object_name, offset, length)
with open(newfile, 'wb') as file_data:
for d in object_data:
file_data.write(d)
#Check if the new file is the right size
new_file_size = os.path.getsize('./newfile')
if new_file_size != length:
raise ValueError('Unexpected file size after running ')
except Exception as err:
raise Exception(err)
finally:
try:
# os.remove(newfile)
client.remove_object(bucket_name, object_name)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_list_objects(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "list_objects(bucket_name, prefix, recursive)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
client.make_bucket(bucket_name)
MB_1 = 1024*1024 # 1MiB.
MB_1_reader = LimitedRandomReader(MB_1)
client.put_object(bucket_name, object_name+"-1", MB_1_reader, MB_1)
MB_1_reader = LimitedRandomReader(MB_1)
client.put_object(bucket_name, object_name+"-2", MB_1_reader, MB_1)
# List all object paths in bucket.
log_output.args['recursive'] = is_recursive = True
objects = client.list_objects(bucket_name, None, is_recursive)
for obj in objects:
_, _, _, _, _, _ = obj.bucket_name,\
obj.object_name,\
obj.last_modified,\
obj.etag, obj.size,\
obj.content_type
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name+"-1")
client.remove_object(bucket_name, object_name+"-2")
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def count_objects(objects):
no_of_files = 0
for obj in objects:
_, _, _, _, _, _ = obj.bucket_name,\
obj.object_name,\
obj.last_modified,\
obj.etag, obj.size,\
obj.content_type
no_of_files += 1
return no_of_files
def list_objects_api_test(client, bucket_name, expected_no, *argv):
# argv is composed of prefix and recursive arguments of
# list_objects api. They are both supposed to be passed as strings.
no_of_files = count_objects(client.list_objects(bucket_name, *argv) ) # expect all objects to be listed
if expected_no != no_of_files:
raise ValueError("Listed no of objects ({}), does not match the expected no of objects ({})".format(no_of_files, expected_no))
def test_list_objects_with_prefix(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "list_objects(bucket_name, prefix, recursive)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
client.make_bucket(bucket_name)
MB_1 = 1024*1024 # 1MiB.
no_of_created_files = 4
path_prefix = ''
# Create files and directories
for i in range(no_of_created_files):
str_i = str(i)
MB_1_reader = LimitedRandomReader(MB_1)
client.put_object(bucket_name, path_prefix + str_i + '_' + object_name, MB_1_reader, MB_1)
path_prefix += str_i + '/'
# Created files and directory structure
# ._<bucket_name>/
# |___0_<object_name>
# |___0/
# |___1_<object_name>
# |___1/
# |___2_<object_name>
# |___2/
# |___3_<object_name>
#
# Test and verify list_objects api outputs
# List objects recursively with NO prefix
log_output.args['recursive'] = recursive = 'True'
log_output.args['prefix'] = prefix = '' # no prefix
list_objects_api_test(client, bucket_name,
no_of_created_files,
prefix, recursive)
# List objects at the top level with no prefix and no recursive option
# Expect only the top 2 objects to be listed
log_output.args['recursive'] = recursive = ''
log_output.args['prefix'] = prefix = ''
list_objects_api_test(client, bucket_name, 2)
# List objects for '0' directory/prefix without recursive option
# Expect 2 object (directory '0' and '0_' object) to be listed
log_output.args['prefix'] = prefix = '0'
list_objects_api_test(client, bucket_name, 2, prefix)
# List objects for '0/' directory/prefix without recursive option
# Expect only 2 objects under directory '0/' to be listed, non-recursive
log_output.args['prefix'] = prefix = '0/'
list_objects_api_test(client, bucket_name, 2, prefix)
# List objects for '0/' directory/prefix, recursively
# Expect 2 objects to be listed
log_output.args['prefix'] = prefix = '0/'
log_output.args['recursive'] = recursive = 'True'
list_objects_api_test(client, bucket_name, 3, prefix, recursive)
# List object with '0/1/2/' directory/prefix, non-recursive
# Expect the single object under directory '0/1/2/' to be listed
log_output.args['prefix'] = prefix = '0/1/2/'
list_objects_api_test(client, bucket_name, 1, prefix)
except Exception as err:
raise Exception(err)
finally:
try:
path_prefix = ''
for i in range(no_of_created_files):
str_i = str(i)
client.remove_object(bucket_name, path_prefix + str_i + '_' + object_name)
path_prefix += str_i + '/'
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
log_output.args['recursive'] = 'Several prefix/recursive combinations are tested'
log_output.args['prefix'] = 'Several prefix/recursive combinations are tested'
print(log_output.json_report())
def test_list_objects_with_1001_files(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "list_objects(bucket_name, prefix, recursive)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
object_name = uuid.uuid4().__str__()
log_output.args['object_name'] = object_name + '_0 ~ ' + object_name + '_1000'
try:
client.make_bucket(bucket_name)
KB_1 = 1024 # 1KiB.
no_of_created_files = 2000
path_prefix = ''
# Create 1001 1KiB files under bucket_name at the same layer
for i in range(no_of_created_files):
str_i = str(i)
KB_1_reader = LimitedRandomReader(KB_1)
client.put_object(bucket_name, path_prefix + object_name + '_' + str_i, KB_1_reader, KB_1)
# List objects and check if 1001 files are returned
list_objects_api_test(client, bucket_name, no_of_created_files)
except Exception as err:
raise Exception(err)
finally:
try:
path_prefix = ''
for i in range(no_of_created_files):
str_i = str(i)
client.remove_object(bucket_name, path_prefix + object_name + '_' + str_i)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_list_objects_v2(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "list_objects(bucket_name, prefix, recursive)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
client.make_bucket(bucket_name)
MB_1 = 1024*1024 # 1MiB.
MB_1_reader = LimitedRandomReader(MB_1)
client.put_object(bucket_name, object_name+"-1", MB_1_reader, MB_1)
MB_1_reader = LimitedRandomReader(MB_1)
client.put_object(bucket_name, object_name+"-2", MB_1_reader, MB_1)
# List all object paths in bucket using V2 API.
log_output.args['recursive'] = is_recursive = True
objects = client.list_objects_v2(bucket_name, None, is_recursive)
for obj in objects:
_, _, _, _, _, _ = obj.bucket_name,\
obj.object_name,\
obj.last_modified,\
obj.etag, obj.size,\
obj.content_type
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name+"-1")
client.remove_object(bucket_name, object_name+"-2")
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
# Helper method for test_list_incomplete_uploads
# and test_remove_incomplete_uploads tests
def create_upload_ids(client, b_name, o_name, n):
# Create 'n' many incomplete upload ids and
# return the list of created upload ids
upload_ids_created = []
for i in range(n):
upload_id = client._new_multipart_upload(b_name, o_name, {})
upload_ids_created.append(upload_id)
return upload_ids_created
# Helper method for test_list_incomplete_uploads
# and test_remove_incomplete_uploads tests
def collect_incomplete_upload_ids(client, b_name, o_name):
# Collect the upload ids from 'list_incomplete_uploads'
# command, and return the list of created upload ids
upload_ids_listed = []
for obj in client.list_incomplete_uploads(b_name, o_name, False):
upload_ids_listed.append(obj.upload_id)
return upload_ids_listed
def test_remove_incomplete_upload(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "remove_incomplete_upload(bucket_name, object_name)"
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
client.make_bucket(bucket_name)
no_of_upload_ids = 3
# Create 'no_of_upload_ids' many incomplete upload ids
create_upload_ids(client, bucket_name, object_name, no_of_upload_ids)
# Remove all of the created upload ids
client.remove_incomplete_upload(bucket_name, object_name)
# Get the list of incomplete upload ids for object_name
# using 'list_incomplete_uploads' command
upload_ids_listed = collect_incomplete_upload_ids(client,
bucket_name,
object_name)
# Verify listed/returned upload id list
if upload_ids_listed:
# The list is not empty
raise ValueError("There are still upload ids not removed")
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_presigned_get_object_default_expiry(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "presigned_get_object(bucket_name, object_name, expires, response_headers)"
_http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where())
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
client.make_bucket(bucket_name)
MB_1 = 1024*1024 # 1MiB.
MB_1_reader = LimitedRandomReader(MB_1)
client.put_object(bucket_name, object_name, MB_1_reader, MB_1)
presigned_get_object_url = client.presigned_get_object(bucket_name,
object_name)
response = _http.urlopen('GET', presigned_get_object_url)
if response.status != 200:
raise ResponseError(response,
'GET',
bucket_name,
object_name).get_exception()
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_presigned_get_object_expiry_5sec(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "presigned_get_object(bucket_name, object_name, expires, response_headers)"
_http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where())
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
client.make_bucket(bucket_name)
KB_1 = 1024 # 1KiB.
KB_1_reader = LimitedRandomReader(KB_1)
client.put_object(bucket_name, object_name, KB_1_reader, KB_1)
presigned_get_object_url = client.presigned_get_object(bucket_name,
object_name,
timedelta(seconds=5))
response = _http.urlopen('GET', presigned_get_object_url)
if response.status != 200:
raise ResponseError(response,
'GET',
bucket_name,
object_name).get_exception()
# Wait for 5 seconds for the presigned url to expire
time.sleep(5)
response = _http.urlopen('GET', presigned_get_object_url)
# Success with an expired url is considered to be a failure
if response.status == 200:
raise ValueError('Presigned get url failed to expire!')
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_presigned_get_object_response_headers(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "presigned_get_object(bucket_name, object_name, expires, response_headers)"
_http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where())
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
client.make_bucket(bucket_name)
KB_1 = 1024 # 1KiB.
KB_1_reader = LimitedRandomReader(KB_1)
client.put_object(bucket_name, object_name, KB_1_reader, KB_1)
content_type = 'text/plain'
content_language = 'en_US'
response_headers = {'response-content-type': content_type,
'response-content-language': content_language}
presigned_get_object_url = client.presigned_get_object(bucket_name,
object_name,
timedelta(seconds=5),
response_headers)
response = _http.urlopen('GET', presigned_get_object_url)
returned_content_type = response.headers['Content-Type']
returned_content_language = response.headers['Content-Language']
if response.status != 200 or returned_content_type != content_type or\
returned_content_language != content_language:
raise ResponseError(response,
'GET',
bucket_name,
object_name).get_exception()
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_presigned_put_object_default_expiry(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "presigned_put_object(bucket_name, object_name, expires)"
_http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where())
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
client.make_bucket(bucket_name)
presigned_put_object_url = client.presigned_put_object(bucket_name,
object_name)
MB_1 = 1024*1024 # 1MiB.
response = _http.urlopen('PUT',
presigned_put_object_url,
LimitedRandomReader(MB_1))
if response.status != 200:
raise ResponseError(response,
'PUT',
bucket_name,
object_name).get_exception()
client.stat_object(bucket_name, object_name)
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_presigned_put_object_expiry_5sec(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "presigned_put_object(bucket_name, object_name, expires)"
_http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where())
# Get a unique bucket_name and object_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
KB_1 = 1024 # 1KiB.
try:
client.make_bucket(bucket_name)
presigned_put_object_url = client.presigned_put_object(bucket_name,
object_name,
timedelta(seconds=5))
# Wait for 5 seconds for the presigned url to expire
time.sleep(5)
response = _http.urlopen('PUT',
presigned_put_object_url,
LimitedRandomReader(KB_1))
if response.status == 200:
raise ValueError('Presigned put url failed to expire!')
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_presigned_post_policy(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "presigned_post_policy(post_policy)"
bucket_name = generate_bucket_name()
no_of_days = 10
prefix = 'objectPrefix/'
try:
client.make_bucket(bucket_name)
# Post policy.
policy = PostPolicy()
policy.set_bucket_name(bucket_name)
policy.set_key_startswith(prefix)
expires_date = datetime.utcnow()+timedelta(days=no_of_days)
policy.set_expires(expires_date)
# post_policy arg is a class. To avoid displaying meaningless value
# for the class, policy settings are made part of the args for
# clarity and debugging purposes.
log_output.args['post_policy'] = {'bucket_name': bucket_name,
'prefix': prefix,
'expires_in_days': no_of_days}
client.presigned_post_policy(policy)
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_thread_safe(client, test_file, log_output):
# Get a unique bucket_name and object_name
no_of_threads = 5
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
log_output.args['object_name'] = object_name = uuid.uuid4().__str__()
try:
# Create sha-sum value for the user provided
# source file, 'test_file'
with open(test_file, 'rb') as f:
contents = f.read()
test_file_sha_sum = hashlib.sha256(contents).hexdigest()
# Create the bucket
client.make_bucket(bucket_name)
# Put/Upload 'no_of_threads' many objects
# simultaneously using multi-threading
for i in range(no_of_threads):
thrd = Thread(target=client.fput_object,
args=(bucket_name, object_name, test_file))
thrd.start()
thrd.join()
# A list of exceptions raised by get_object_and_check
# called in multiple threads.
exceptions = []
# get_object_and_check() downloads an object, stores it in a file
# and then calculates its checksum. In case of mismatch, a new
# exception is generated and saved in exceptions.
def get_object_and_check(client, bckt_name, obj_name, no,
expected_sha_sum):
try:
obj_data = client.get_object(bckt_name, obj_name)
local_file = 'copied_file_'+str(no)
# Create a file with the returned data
with open(local_file, 'wb') as file_data:
shutil.copyfileobj(obj_data, file_data)
with open(local_file, 'rb') as f:
contents = f.read()
copied_file_sha_sum = hashlib.sha256(contents).hexdigest()
# Compare sha-sum values of the source file and the copied one
if expected_sha_sum != copied_file_sha_sum:
raise ValueError(
'Sha-sum mismatch on multi-threaded put and get objects')
except Exception as err:
exceptions.append(Exception(err))
finally:
# Remove downloaded file
os.path.isfile(local_file) and os.remove(local_file)
# Get/Download 'no_of_threads' many objects
# simultaneously using multi-threading
thrd_list = []
for i in range(no_of_threads):
# Create dynamic/varying names for to be created threads
thrd_name = 'thread_'+str(i)
vars()[thrd_name] = Thread(target=get_object_and_check,
args=(client, bucket_name,
object_name, i, test_file_sha_sum))
vars()[thrd_name].start()
thrd_list.append(vars()[thrd_name])
# Wait until all threads to finish
for t in thrd_list:
t.join()
if len(exceptions) > 0:
raise exceptions[0]
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_object(bucket_name, object_name)
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_get_bucket_policy(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "get_bucket_policy(bucket_name)"
# Get a unique bucket_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
try:
client.make_bucket(bucket_name)
client.get_bucket_policy(bucket_name)
except APINotImplemented:
print(log_output.json_report(alert='Not Implemented', status=LogOutput.NA))
except NoSuchBucketPolicy:
# Test passes
print(log_output.json_report())
except Exception as err:
raise Exception(err)
finally:
try:
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
def get_policy_actions(stat):
actions = []
for s in stat:
action = s.get('Action')
if action not in actions:
actions.append(action)
# flatten nested lists in actions
flattened_actions = []
for a in actions:
if isinstance(a, list):
for aa in a:
flattened_actions.append(aa)
else:
flattened_actions.append(a)
actions = [s.replace('s3:', '') for s in flattened_actions]
return actions
def policy_validated(client, bucket_name, policy):
policy_dict = json.loads(client.get_bucket_policy(bucket_name).decode("utf-8"))
actions = get_policy_actions(policy_dict.get('Statement'))
actions.sort()
expected_actions = get_policy_actions(policy.get('Statement'))
expected_actions.sort()
if expected_actions != actions:
return False
return True
def test_set_bucket_policy_readonly(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "set_bucket_policy(bucket_name, policy)"
# Get a unique bucket_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
try:
client.make_bucket(bucket_name)
# read-only policy
policy = {
"Version":"2012-10-17",
"Statement":[
{
"Sid":"",
"Effect":"Allow",
"Principal":{"AWS":"*"},
"Action":"s3:GetBucketLocation",
"Resource":"arn:aws:s3:::"+bucket_name
},
{
"Sid":"",
"Effect":"Allow",
"Principal":{"AWS":"*"},
"Action":"s3:ListBucket",
"Resource":"arn:aws:s3:::"+bucket_name
},
{
"Sid":"",
"Effect":"Allow",
"Principal":{"AWS":"*"},
"Action":"s3:GetObject",
"Resource":"arn:aws:s3:::"+bucket_name+"/*"
}
]
}
# Set read-only policy
client.set_bucket_policy(bucket_name, json.dumps(policy))
# Validate if the policy is set correctly
if not policy_validated(client, bucket_name, policy):
raise ValueError('Failed to set ReadOnly bucket policy')
except APINotImplemented:
print(log_output.json_report(alert='Not Implemented',
status=LogOutput.NA))
except Exception as err:
raise Exception(err)
else:
# Test passes
print(log_output.json_report())
finally:
try:
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
def test_set_bucket_policy_readwrite(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "set_bucket_policy(bucket_name, prefix, policy_access)"
# Get a unique bucket_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
try:
client.make_bucket(bucket_name)
# Read-write policy
policy = {
"Version": "2012-10-17",
"Statement": [
{
"Action": ["s3:GetBucketLocation"],
"Sid": "",
"Resource": ["arn:aws:s3:::"+bucket_name],
"Effect": "Allow",
"Principal": {"AWS": "*"}
},
{
"Action": ["s3:ListBucket"],
"Sid": "",
"Resource": ["arn:aws:s3:::"+bucket_name],
"Effect": "Allow",
"Principal": {"AWS": "*"}
},
{
"Action": ["s3:ListBucketMultipartUploads"],
"Sid": "",
"Resource": ["arn:aws:s3:::"+bucket_name],
"Effect": "Allow",
"Principal": {"AWS": "*"}
},
{
"Action": ["s3:ListMultipartUploadParts",
"s3:GetObject",
"s3:AbortMultipartUpload",
"s3:DeleteObject",
"s3:PutObject"],
"Sid": "",
"Resource": ["arn:aws:s3:::"+bucket_name+"/*"],
"Effect": "Allow",
"Principal": {"AWS": "*"}
}
]
}
# Set read-write policy
client.set_bucket_policy(bucket_name, json.dumps(policy))
# Validate if the policy is set correctly
if not policy_validated(client, bucket_name, policy):
raise ValueError('Failed to set ReadOnly bucket policy')
except APINotImplemented:
print(log_output.json_report(alert='Not Implemented', status=LogOutput.NA))
except Exception as err:
raise Exception(err)
else:
# Test passes
print(log_output.json_report())
finally:
try:
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
def test_remove_objects(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "remove_objects(bucket_name, objects_iter)"
# Get a unique bucket_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
try:
MB_1 = 1024*1024 # 1MiB.
client.make_bucket(bucket_name)
# Upload some new objects to prepare for multi-object delete test.
object_names = []
for i in range(10):
curr_object_name = "prefix"+"-{}".format(i)
client.put_object(bucket_name, curr_object_name, LimitedRandomReader(MB_1), MB_1)
object_names.append(curr_object_name)
log_output.args['objects_iter'] = objects_iter = object_names
# delete the objects in a single library call.
for del_err in client.remove_objects(bucket_name, objects_iter):
raise ValueError("Remove objects err: {}".format(del_err))
except Exception as err:
raise Exception(err)
finally:
try:
# Try to clean everything to keep our server intact
for del_err in client.remove_objects(bucket_name, objects_iter):
raise ValueError("Remove objects err: {}".format(del_err))
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def test_remove_bucket(client, log_output):
# default value for log_output.function attribute is;
# log_output.function = "remove_bucket(bucket_name)"
# Get a unique bucket_name
log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
try:
if is_s3(client):
log_output.args['location'] = location = 'us-east-1'
client.make_bucket(bucket_name+'.unique', location)
else:
client.make_bucket(bucket_name)
except Exception as err:
raise Exception(err)
finally:
try:
# Removing bucket. This operation will only work if your bucket is empty.
if is_s3(client):
client.remove_bucket(bucket_name+'.unique')
else:
client.remove_bucket(bucket_name)
except Exception as err:
raise Exception(err)
# Test passes
print(log_output.json_report())
def isFullMode():
return os.getenv("MINT_MODE") == "full"
def main():
"""
Functional testing of minio python library.
"""
try:
access_key = os.getenv('ACCESS_KEY', 'Q3AM3UQ867SPQQA43P2F')
secret_key = os.getenv('SECRET_KEY',
'zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG')
server_endpoint = os.getenv('SERVER_ENDPOINT', 'play.minio.io:9000')
secure = os.getenv('ENABLE_HTTPS', '1') == '1'
if server_endpoint == 'play.minio.io:9000':
access_key = 'Q3AM3UQ867SPQQA43P2F'
secret_key = 'zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG'
secure = True
client = Minio(server_endpoint, access_key, secret_key, secure=secure)
# Check if we are running in the mint environment.
data_dir = os.getenv('DATA_DIR')
if data_dir == None:
os.environ['DATA_DIR'] = data_dir = '/mint/data'
is_mint_env = (os.path.exists(data_dir) and\
os.path.exists(os.path.join(data_dir, 'datafile-1-MB')) and\
os.path.exists(os.path.join(data_dir, 'datafile-11-MB')))
# Enable trace
# import sys
# client.trace_on(sys.stderr)
testfile = 'datafile-1-MB'
largefile = 'datafile-11-MB'
if is_mint_env :
## Choose data files
testfile = os.path.join(data_dir, 'datafile-1-MB')
largefile = os.path.join(data_dir, 'datafile-11-MB')
else:
with open(testfile, 'wb') as file_data:
shutil.copyfileobj(LimitedRandomReader(1024*1024), file_data)
with open(largefile, 'wb') as file_data:
shutil.copyfileobj(LimitedRandomReader(11*1024*1024), file_data)
if isFullMode():
log_output = LogOutput(client.make_bucket, 'test_make_bucket_default_region')
test_make_bucket_default_region(client, log_output)
log_output = LogOutput(client.make_bucket, 'test_make_bucket_with_region')
test_make_bucket_with_region(client, log_output)
log_output = LogOutput(client.make_bucket, 'test_negative_make_bucket_invalid_name')
test_negative_make_bucket_invalid_name(client, log_output)
log_output = LogOutput(client.make_bucket, 'test_make_bucket_recreate')
test_make_bucket_recreate(client, log_output)
log_output = LogOutput(client.list_buckets, 'test_list_buckets')
test_list_buckets(client, log_output)
log_output = LogOutput(client.fput_object, 'test_fput_object_small_file')
test_fput_object_small_file(client, testfile, log_output)
log_output = LogOutput(client.fput_object, 'test_fput_object_large_file')
test_fput_object_large_file(client, largefile, log_output)
log_output = LogOutput(client.fput_object, 'test_fput_object_with_content_type')
test_fput_object_with_content_type(client, testfile, log_output)
log_output = LogOutput(client.copy_object, 'test_copy_object_no_copy_condition')
test_copy_object_no_copy_condition(client, log_output)
log_output = LogOutput(client.copy_object, 'test_copy_object_etag_match')
test_copy_object_etag_match(client, log_output)
log_output = LogOutput(client.copy_object, 'test_copy_object_negative_etag_match')
test_copy_object_negative_etag_match(client, log_output)
log_output = LogOutput(client.copy_object, 'test_copy_object_modified_since')
test_copy_object_modified_since(client, log_output)
log_output = LogOutput(client.copy_object, 'test_copy_object_unmodified_since')
test_copy_object_unmodified_since(client, log_output)
log_output = LogOutput(client.put_object, 'test_put_object')
test_put_object(client, log_output)
log_output = LogOutput(client.put_object, 'test_negative_put_object_with_path_segment')
test_negative_put_object_with_path_segment(client, log_output)
log_output = LogOutput(client.stat_object, 'test_stat_object')
test_stat_object(client, log_output)
log_output = LogOutput(client.get_object, 'test_get_object')
test_get_object(client, log_output)
log_output = LogOutput(client.fget_object, 'test_fget_object')
test_fget_object(client, log_output)
log_output = LogOutput(client.get_partial_object, 'test_get_partial_object_with_default_length')
test_get_partial_object_with_default_length(client, log_output)
log_output = LogOutput(client.get_partial_object, 'test_get_partial_object')
test_get_partial_object(client, log_output)
log_output = LogOutput(client.list_objects, 'test_list_objects')
test_list_objects(client, log_output)
log_output = LogOutput(client.list_objects, 'test_list_objects_with_prefix')
test_list_objects_with_prefix(client, log_output)
log_output = LogOutput(client.list_objects, 'test_list_objects_with_1001_files')
test_list_objects_with_1001_files(client, log_output)
log_output = LogOutput(client.remove_incomplete_upload, 'test_remove_incomplete_upload')
test_remove_incomplete_upload(client, log_output)
log_output = LogOutput(client.list_objects_v2, 'test_list_objects_v2')
test_list_objects_v2(client, log_output)
log_output = LogOutput(client.presigned_get_object, 'test_presigned_get_object_default_expiry')
test_presigned_get_object_default_expiry(client, log_output)
log_output = LogOutput(client.presigned_get_object, 'test_presigned_get_object_expiry_5sec')
test_presigned_get_object_expiry_5sec(client, log_output)
log_output = LogOutput(client.presigned_get_object, 'test_presigned_get_object_response_headers')
test_presigned_get_object_response_headers(client, log_output)
log_output = LogOutput(client.presigned_put_object, 'test_presigned_put_object_default_expiry')
test_presigned_put_object_default_expiry(client, log_output)
log_output = LogOutput(client.presigned_put_object, 'test_presigned_put_object_expiry_5sec')
test_presigned_put_object_expiry_5sec(client, log_output)
log_output = LogOutput(client.presigned_post_policy, 'test_presigned_post_policy')
test_presigned_post_policy(client, log_output)
log_output = LogOutput(client.put_object, 'test_thread_safe')
test_thread_safe(client, testfile, log_output)
log_output = LogOutput(client.get_bucket_policy, 'test_get_bucket_policy')
test_get_bucket_policy(client,log_output)
log_output = LogOutput(client.set_bucket_policy, 'test_set_bucket_policy_readonly')
test_set_bucket_policy_readonly(client, log_output)
log_output = LogOutput(client.set_bucket_policy, 'test_set_bucket_policy_readwrite')
test_set_bucket_policy_readwrite(client, log_output)
else:
# Quick mode tests
log_output = LogOutput(client.make_bucket, 'test_make_bucket_default_region')
test_make_bucket_default_region(client, log_output)
log_output = LogOutput(client.list_buckets, 'test_list_buckets')
test_list_buckets(client, log_output)
log_output = LogOutput(client.put_object, 'test_put_object')
test_put_object(client, log_output)
log_output = LogOutput(client.stat_object, 'test_stat_object')
test_stat_object(client, log_output)
log_output = LogOutput(client.get_object, 'test_get_object')
test_get_object(client, log_output)
log_output = LogOutput(client.list_objects, 'test_list_objects')
test_list_objects(client, log_output)
log_output = LogOutput(client.remove_incomplete_upload, 'test_remove_incomplete_upload')
test_remove_incomplete_upload(client, log_output)
log_output = LogOutput(client.presigned_get_object, 'test_presigned_get_object_default_expiry')
test_presigned_get_object_default_expiry(client, log_output)
log_output = LogOutput(client.presigned_put_object, 'test_presigned_put_object_default_expiry')
test_presigned_put_object_default_expiry(client, log_output)
log_output = LogOutput(client.presigned_post_policy, 'test_presigned_post_policy')
test_presigned_post_policy(client, log_output)
log_output = LogOutput(client.copy_object, 'test_copy_object_no_copy_condition')
test_copy_object_no_copy_condition(client, log_output)
log_output = LogOutput(client.get_bucket_policy, 'test_get_bucket_policy')
test_get_bucket_policy(client,log_output)
log_output = LogOutput(client.set_bucket_policy, 'test_set_bucket_policy_readonly')
test_set_bucket_policy_readonly(client, log_output)
# Remove all objects.
log_output = LogOutput(client.remove_object, 'test_remove_object')
test_remove_object(client, log_output)
log_output = LogOutput(client.remove_objects, 'test_remove_objects')
test_remove_objects(client, log_output)
log_output = LogOutput(client.remove_bucket, 'test_remove_bucket')
test_remove_bucket(client, log_output)
# Remove temporary files.
if not is_mint_env:
os.remove(testfile)
os.remove(largefile)
except Exception as err:
print(log_output.json_report(err))
exit(1)
if __name__ == "__main__":
# Execute only if run as a script
main()
| NitishT/minio-py | tests/functional/tests.py | Python | apache-2.0 | 79,042 |
import string
# prace se soubory
#fr = open("./file.txt", "r") # read
fw = open("./passwd.bak", "w") # write
#fa = open("./file.txt", "a") # append
fetc = open("./passwd", "r")
# read, readline, readlines, xreadlines, write, writelines(pole)
for line in fetc.xreadlines():
# print line
records = string.strip(line).split(":")
print ("user: %s shell: %s" % (records[0], records[6]))
fw.write("user: %s shell: %s\n" % (records[0], records[6]))
fw.close
fetc.close
#fr.close
#fw.close
#fa.close
fetc.close
| maiklos/python-experiments | misctasks.py | Python | apache-2.0 | 516 |
# Franca parser package
# Author: Ingmar Lehmann (lehmann.ingmar@gmail.com)
__version__ = '0.1'
__all__ = ['franca_parser','franca_ast','franca_lexer']
| ingmarlehmann/franca-tools | franca_parser/franca_parser/__init__.py | Python | mpl-2.0 | 156 |
"""
Each store has slightly different semantics wrt draft v published. XML doesn't officially recognize draft
but does hold it in a subdir. Old mongo has a virtual but not physical draft for every unit in published state.
Split mongo has a physical for every unit in every state.
Given that, here's a table of semantics and behaviors where - means no record and letters indicate values.
For xml, (-, x) means the item is published and can be edited. For split, it means the item's
been deleted from draft and will be deleted from published the next time it gets published. old mongo
can't represent that virtual state (2nd row in table)
In the table body, the tuples represent virtual modulestore result. The row headers represent the pre-import
modulestore state.
Modulestore virtual | XML physical (draft, published)
(draft, published) | (-, -) | (x, -) | (x, x) | (x, y) | (-, x)
----------------------+--------------------------------------------
(-, -) | (-, -) | (x, -) | (x, x) | (x, y) | (-, x)
(-, a) | (-, a) | (x, a) | (x, x) | (x, y) | (-, x) : deleted from draft before import
(a, -) | (a, -) | (x, -) | (x, x) | (x, y) | (a, x)
(a, a) | (a, a) | (x, a) | (x, x) | (x, y) | (a, x)
(a, b) | (a, b) | (x, b) | (x, x) | (x, y) | (a, x)
"""
import json
import logging
import mimetypes
import os
import re
from abc import abstractmethod
import xblock
from django.utils.translation import ugettext as _
from lxml import etree
from opaque_keys.edx.keys import UsageKey
from opaque_keys.edx.locator import LibraryLocator
from path import Path as path
from xblock.core import XBlockMixin
from xblock.fields import Reference, ReferenceList, ReferenceValueDict, Scope
from xblock.runtime import DictKeyValueStore, KvsFieldData
from common.djangoapps.util.monitoring import monitor_import_failure
from xmodule.assetstore import AssetMetadata
from xmodule.contentstore.content import StaticContent
from xmodule.errortracker import make_error_tracker
from xmodule.library_tools import LibraryToolsService
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import ASSET_IGNORE_REGEX
from xmodule.modulestore.exceptions import DuplicateCourseError
from xmodule.modulestore.mongo.base import MongoRevisionKey
from xmodule.modulestore.store_utilities import draft_node_constructor, get_draft_subtree_roots
from xmodule.modulestore.xml import ImportSystem, LibraryXMLModuleStore, XMLModuleStore
from xmodule.tabs import CourseTabList
from xmodule.util.misc import escape_invalid_characters
from xmodule.x_module import XModuleDescriptor, XModuleMixin
from .inheritance import own_metadata
from .store_utilities import rewrite_nonportable_content_links
log = logging.getLogger(__name__)
DEFAULT_STATIC_CONTENT_SUBDIR = 'static'
class LocationMixin(XBlockMixin):
"""
Adds a `location` property to an :class:`XBlock` so it is more compatible
with old-style :class:`XModule` API. This is a simplified version of
:class:`XModuleMixin`.
"""
@property
def location(self):
""" Get the UsageKey of this block. """
return self.scope_ids.usage_id
@location.setter
def location(self, value):
""" Set the UsageKey of this block. """
assert isinstance(value, UsageKey)
self.scope_ids = self.scope_ids._replace(
def_id=value,
usage_id=value,
)
class StaticContentImporter: # lint-amnesty, pylint: disable=missing-class-docstring
def __init__(self, static_content_store, course_data_path, target_id):
self.static_content_store = static_content_store
self.target_id = target_id
self.course_data_path = course_data_path
try:
with open(course_data_path / 'policies/assets.json') as f:
self.policy = json.load(f)
except (OSError, ValueError) as err: # lint-amnesty, pylint: disable=unused-variable
# xml backed courses won't have this file, only exported courses;
# so, its absence is not really an exception.
self.policy = {}
mimetypes.add_type('application/octet-stream', '.sjson')
mimetypes.add_type('application/octet-stream', '.srt')
self.mimetypes_list = list(mimetypes.types_map.values())
def import_static_content_directory(self, content_subdir=DEFAULT_STATIC_CONTENT_SUBDIR, verbose=False): # lint-amnesty, pylint: disable=missing-function-docstring
remap_dict = {}
static_dir = self.course_data_path / content_subdir
for dirname, _, filenames in os.walk(static_dir):
for filename in filenames:
file_path = os.path.join(dirname, filename)
if re.match(ASSET_IGNORE_REGEX, filename):
if verbose:
log.debug('skipping static content %s...', file_path)
continue
if verbose:
log.debug('importing static content %s...', file_path)
imported_file_attrs = self.import_static_file(file_path, base_dir=static_dir)
if imported_file_attrs:
# store the remapping information which will be needed
# to subsitute in the module data
remap_dict[imported_file_attrs[0]] = imported_file_attrs[1]
return remap_dict
def import_static_file(self, full_file_path, base_dir): # lint-amnesty, pylint: disable=missing-function-docstring
filename = os.path.basename(full_file_path)
try:
with open(full_file_path, 'rb') as f:
data = f.read()
except OSError:
# OS X "companion files". See
# http://www.diigo.com/annotated/0c936fda5da4aa1159c189cea227e174
if filename.startswith('._'):
return None
# Not a 'hidden file', then re-raise exception
raise
# strip away leading path from the name
file_subpath = full_file_path.replace(base_dir, '')
if file_subpath.startswith('/'):
file_subpath = file_subpath[1:]
asset_key = StaticContent.compute_location(self.target_id, file_subpath)
policy_ele = self.policy.get(asset_key.path, {})
# During export display name is used to create files, strip away slashes from name
displayname = escape_invalid_characters(
name=policy_ele.get('displayname', filename),
invalid_char_list=['/', '\\']
)
locked = policy_ele.get('locked', False)
mime_type = policy_ele.get('contentType')
# Check extracted contentType in list of all valid mimetypes
if not mime_type or mime_type not in self.mimetypes_list:
mime_type = mimetypes.guess_type(filename)[0] # Assign guessed mimetype
content = StaticContent(
asset_key, displayname, mime_type, data,
import_path=file_subpath, locked=locked
)
# first let's save a thumbnail so we can get back a thumbnail location
thumbnail_content, thumbnail_location = self.static_content_store.generate_thumbnail(content)
if thumbnail_content is not None:
content.thumbnail_location = thumbnail_location
# then commit the content
try:
self.static_content_store.save(content)
except Exception as err: # lint-amnesty, pylint: disable=broad-except
msg = f'Error importing {file_subpath}, error={err}'
log.exception(f'Course import {self.target_id}: {msg}')
monitor_import_failure(self.target_id, 'Updating', exception=err)
return file_subpath, asset_key
class ImportManager:
"""
Import xml-based courselikes from data_dir into modulestore.
Returns:
list of new courselike objects
Args:
store: a modulestore implementing ModuleStoreWriteBase in which to store the imported courselikes.
data_dir: the root directory from which to find the xml courselikes.
source_dirs: If specified, the list of data_dir subdirectories to load. Otherwise, load
all dirs
target_id: is the Locator that all modules should be remapped to
after import off disk. NOTE: this only makes sense if importing only
one courselike. If there are more than one courselike loaded from data_dir/source_dirs & you
supply this id, an AssertException will be raised.
static_content_store: the static asset store
do_import_static: if True, then import the courselike's static files into static_content_store
This can be employed for courselikes which have substantial
unchanging static content, which is too inefficient to import every
time the course is loaded. Static content for some courses may also be
served directly by nginx, instead of going through django.
do_import_python_lib: if True, import a courselike's python lib file into static_content_store
if it exists. This can be useful if the static content import needs to be skipped
(e.g.: for performance reasons), but the python lib still needs to be imported. If static
content is imported, then the python lib file will be imported regardless of this value.
create_if_not_present: If True, then a new courselike is created if it doesn't already exist.
Otherwise, it throws an InvalidLocationError if the courselike does not exist.
static_content_subdir: The subdirectory that contains static content.
python_lib_filename: The filename of the courselike's python library. Course authors can optionally
create this file to implement custom logic in their course.
default_class, load_error_modules: are arguments for constructing the XMLModuleStore (see its doc)
"""
store_class = XMLModuleStore
def __init__(
self, store, user_id, data_dir, source_dirs=None,
default_class='xmodule.hidden_module.HiddenDescriptor',
load_error_modules=True, static_content_store=None,
target_id=None, verbose=False,
do_import_static=True, do_import_python_lib=True,
create_if_not_present=False, raise_on_failure=False,
static_content_subdir=DEFAULT_STATIC_CONTENT_SUBDIR,
python_lib_filename='python_lib.zip',
status=None
):
self.store = store
self.user_id = user_id
self.data_dir = data_dir
self.source_dirs = source_dirs
self.load_error_modules = load_error_modules
self.static_content_store = static_content_store
self.target_id = target_id
self.verbose = verbose
self.static_content_subdir = static_content_subdir
self.python_lib_filename = python_lib_filename
self.do_import_static = do_import_static
self.do_import_python_lib = do_import_python_lib
self.create_if_not_present = create_if_not_present
self.raise_on_failure = raise_on_failure
self.xml_module_store = self.store_class(
data_dir,
default_class=default_class,
source_dirs=source_dirs,
load_error_modules=load_error_modules,
xblock_mixins=store.xblock_mixins,
xblock_select=store.xblock_select,
target_course_id=target_id,
)
self.status = status
self.logger, self.errors = make_error_tracker()
def preflight(self):
"""
Perform any pre-import sanity checks.
"""
# If we're going to remap the ID, then we can only do that with
# a single target
if self.target_id:
assert len(self.xml_module_store.modules) == 1, 'Store unable to load course correctly.'
def import_static(self, data_path, dest_id):
"""
Import all static items into the content store.
"""
if self.static_content_store is None:
log.warning(
f'Course import {self.target_id}: Static content store is None. Skipping static content import.'
)
return
static_content_importer = StaticContentImporter(
self.static_content_store,
course_data_path=data_path,
target_id=dest_id
)
if self.do_import_static:
if self.verbose:
log.info(f'Course import {self.target_id}: Importing static content and python library')
# first pass to find everything in the static content directory
static_content_importer.import_static_content_directory(
content_subdir=self.static_content_subdir, verbose=self.verbose
)
elif self.do_import_python_lib and self.python_lib_filename:
if self.verbose:
log.info(
f'Course import {self.target_id}: Skipping static content import, still importing python library'
)
python_lib_dir_path = data_path / self.static_content_subdir
python_lib_full_path = python_lib_dir_path / self.python_lib_filename
if os.path.isfile(python_lib_full_path):
static_content_importer.import_static_file(
python_lib_full_path, base_dir=python_lib_dir_path
)
else:
if self.verbose:
log.info(f'Course import {self.target_id}: Skipping import of static content and python library')
# No matter what do_import_static is, import "static_import" directory.
# This is needed because the "about" pages (eg "overview") are
# loaded via load_extra_content, and do not inherit the lms
# metadata from the course module, and thus do not get
# "static_content_store" properly defined. Static content
# referenced in those extra pages thus need to come through the
# c4x:// contentstore, unfortunately. Tell users to copy that
# content into the "static_import" subdir.
simport = 'static_import'
if os.path.exists(data_path / simport):
if self.verbose:
log.info(f'Course import {self.target_id}: Importing {simport} directory')
static_content_importer.import_static_content_directory(
content_subdir=simport, verbose=self.verbose
)
def import_asset_metadata(self, data_dir, course_id):
"""
Read in assets XML file, parse it, and add all asset metadata to the modulestore.
"""
asset_dir = path(data_dir) / AssetMetadata.EXPORTED_ASSET_DIR
assets_filename = AssetMetadata.EXPORTED_ASSET_FILENAME
asset_xml_file = asset_dir / assets_filename
def make_asset_id(course_id, asset_xml):
"""
Construct an asset ID out of a complete asset XML section.
"""
asset_type = None
asset_name = None
for child in asset_xml.iterchildren():
if child.tag == AssetMetadata.ASSET_TYPE_ATTR:
asset_type = child.text
elif child.tag == AssetMetadata.ASSET_BASENAME_ATTR:
asset_name = child.text
return course_id.make_asset_key(asset_type, asset_name)
all_assets = []
try:
xml_data = etree.parse(asset_xml_file).getroot()
assert xml_data.tag == AssetMetadata.ALL_ASSETS_XML_TAG
for asset in xml_data.iterchildren():
if asset.tag == AssetMetadata.ASSET_XML_TAG:
# Construct the asset key.
asset_key = make_asset_id(course_id, asset)
asset_md = AssetMetadata(asset_key)
asset_md.from_xml(asset)
all_assets.append(asset_md)
except OSError:
# file does not exist.
logging.info(f'Course import {course_id}: No {assets_filename} file present.')
return
except Exception as exc: # pylint: disable=W0703
monitor_import_failure(course_id, 'Updating', exception=exc)
logging.exception(f'Course import {course_id}: Error while parsing {assets_filename}.')
if self.raise_on_failure: # lint-amnesty, pylint: disable=no-else-raise
if self.status:
self.status.fail(_('Error while reading {}. Check file for XML errors.').format(assets_filename))
raise
else:
return
# Now add all asset metadata to the modulestore.
if len(all_assets) > 0:
self.store.save_asset_metadata_list(all_assets, all_assets[0].edited_by, import_only=True)
def import_courselike(self, runtime, courselike_key, dest_id, source_courselike):
"""
Import the base module/block
"""
if self.verbose:
log.debug("Scanning %s for courselike module...", courselike_key)
# Quick scan to get course module as we need some info from there.
# Also we need to make sure that the course module is committed
# first into the store
course_data_path = path(self.data_dir) / source_courselike.data_dir
log.debug('======> IMPORTING courselike %s', courselike_key)
if not self.do_import_static:
# for old-style xblock where this was actually linked to kvs
source_courselike.static_asset_path = source_courselike.data_dir
source_courselike.save()
log.debug('course static_asset_path=%s', source_courselike.static_asset_path)
log.debug('course data_dir=%s', source_courselike.data_dir)
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, dest_id):
course = _update_and_import_module(
source_courselike, self.store, self.user_id,
courselike_key,
dest_id,
do_import_static=self.do_import_static,
runtime=runtime,
)
self.static_updater(course, source_courselike, courselike_key, dest_id, runtime)
self.store.update_item(course, self.user_id)
return course, course_data_path
@abstractmethod
def static_updater(self, course, source_courselike, courselike_key, dest_id, runtime):
"""
Updates any special static items, such as PDF coursebooks.
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
@abstractmethod
def get_dest_id(self, courselike_key):
"""
Given a courselike_key, get the version of the key that will actually be used in the modulestore
for import.
"""
raise NotImplementedError
@abstractmethod
def get_courselike(self, courselike_key, runtime, dest_id):
"""
Given a key, a runtime, and an intended destination key, get the descriptor for the courselike
we'll be importing into.
"""
raise NotImplementedError
@abstractmethod
def import_children(self, source_courselike, courselike, courselike_key, dest_id):
"""
To be overloaded with a method that installs the child items into self.store.
"""
raise NotImplementedError
@abstractmethod
def import_drafts(self, courselike, courselike_key, data_path, dest_id):
"""
To be overloaded with a method that installs the draft items into self.store.
"""
raise NotImplementedError
def recursive_build(self, source_courselike, courselike, courselike_key, dest_id):
"""
Recursively imports all child blocks from the temporary modulestore into the
target modulestore.
"""
all_locs = set(self.xml_module_store.modules[courselike_key].keys())
all_locs.remove(source_courselike.location)
def depth_first(subtree):
"""
Import top down just so import code can make assumptions about parents always being available
"""
if subtree.has_children:
for child in subtree.get_children():
try:
all_locs.remove(child.location)
except KeyError:
# tolerate same child occurring under 2 parents such as in
# ContentStoreTest.test_image_import
pass
if self.verbose:
log.debug('importing module location %s', child.location)
try:
_update_and_import_module(
child,
self.store,
self.user_id,
courselike_key,
dest_id,
do_import_static=self.do_import_static,
runtime=courselike.runtime,
)
except Exception:
log.exception(
f'Course import {dest_id}: failed to import module location {child.location}'
)
if self.status:
self.status.fail(
_('Failed to import module: {} at location: {}').format(
child.display_name, child.location
)
)
raise
depth_first(child)
depth_first(source_courselike)
for leftover in all_locs:
if self.verbose:
log.debug('importing module location %s', leftover)
try:
_update_and_import_module(
self.xml_module_store.get_item(leftover),
self.store,
self.user_id,
courselike_key,
dest_id,
do_import_static=self.do_import_static,
runtime=courselike.runtime,
)
except Exception:
msg = f'Course import {dest_id}: failed to import module location {leftover}'
log.error(msg)
if self.status:
self.status.fail(
_('Failed to import module: {} at location: {}').format(
leftover.display_name, leftover.location
)
)
raise
def run_imports(self):
"""
Iterate over the given directories and yield courses.
"""
self.preflight()
for courselike_key in self.xml_module_store.modules.keys():
try:
dest_id, runtime = self.get_dest_id(courselike_key)
except DuplicateCourseError:
continue
# This bulk operation wraps all the operations to populate the published branch.
with self.store.bulk_operations(dest_id):
# Retrieve the course itself.
source_courselike, courselike, data_path = self.get_courselike(courselike_key, runtime, dest_id)
# Import all static pieces.
self.import_static(data_path, dest_id)
# Import asset metadata stored in XML.
self.import_asset_metadata(data_path, dest_id)
# Import all children
self.import_children(source_courselike, courselike, courselike_key, dest_id)
# This bulk operation wraps all the operations to populate the draft branch with any items
# from the /drafts subdirectory.
# Drafts must be imported in a separate bulk operation from published items to import properly,
# due to the recursive_build() above creating a draft item for each course block
# and then publishing it.
with self.store.bulk_operations(dest_id):
# Import all draft items into the courselike.
courselike = self.import_drafts(courselike, courselike_key, data_path, dest_id)
yield courselike
class CourseImportManager(ImportManager):
"""
Import manager for Courses.
"""
store_class = XMLModuleStore
def get_courselike(self, courselike_key, runtime, dest_id):
"""
Given a key, runtime, and target key, get the version of the course
from the temporary modulestore.
"""
source_course = self.xml_module_store.get_course(courselike_key)
# STEP 1: find and import course module
course, course_data_path = self.import_courselike(
runtime, courselike_key, dest_id, source_course,
)
return source_course, course, course_data_path
def get_dest_id(self, courselike_key):
"""
Get the course key that will be used for the target modulestore.
"""
if self.target_id is not None:
dest_id = self.target_id
else:
# Note that dest_course_id will be in the format for the default modulestore.
dest_id = self.store.make_course_key(courselike_key.org, courselike_key.course, courselike_key.run)
existing_id = self.store.has_course(dest_id, ignore_case=True)
# store.has_course will return the course_key in the format for the modulestore in which it was found.
# This may be different from dest_course_id, so correct to the format found.
if existing_id:
dest_id = existing_id
runtime = None
# Creates a new course if it doesn't already exist
if self.create_if_not_present and not existing_id:
try:
new_course = self.store.create_course(
dest_id.org, dest_id.course, dest_id.run, self.user_id
)
runtime = new_course.runtime
except DuplicateCourseError:
log.debug(
"Skipping import of course with id, %s, "
"since it collides with an existing one", dest_id
)
if self.status:
self.status.fail(
_('Aborting import because a course with this id: {} already exists.').format(dest_id)
)
raise
return dest_id, runtime
def static_updater(self, course, source_courselike, courselike_key, dest_id, runtime):
"""
Update special static assets, such as PDF textbooks and wiki resources.
"""
for entry in course.pdf_textbooks:
for chapter in entry.get('chapters', []):
if StaticContent.is_c4x_path(chapter.get('url', '')):
asset_key = StaticContent.get_location_from_path(chapter['url'])
chapter['url'] = StaticContent.get_static_path_from_location(asset_key)
# Original wiki_slugs had value location.course. To make them unique this was changed to 'org.course.name'.
# If we are importing into a course with a different course_id and wiki_slug is equal to either of these default
# values then remap it so that the wiki does not point to the old wiki.
if courselike_key != course.id:
original_unique_wiki_slug = '{}.{}.{}'.format(
courselike_key.org,
courselike_key.course,
courselike_key.run
)
if course.wiki_slug == original_unique_wiki_slug or course.wiki_slug == courselike_key.course:
course.wiki_slug = '{}.{}.{}'.format(
course.id.org,
course.id.course,
course.id.run,
)
# cdodge: more hacks (what else). Seems like we have a
# problem when importing a course (like 6.002) which
# does not have any tabs defined in the policy file.
# The import goes fine and then displays fine in LMS,
# but if someone tries to add a new tab in the CMS, then
# the LMS barfs because it expects that -- if there are
# *any* tabs -- then there at least needs to be
# some predefined ones
if course.tabs is None or len(course.tabs) == 0:
CourseTabList.initialize_default(course)
def import_children(self, source_courselike, courselike, courselike_key, dest_id):
"""
Imports all children into the desired store.
"""
# The branch setting of published_only forces an overwrite of all draft modules
# during the course import.
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, dest_id):
self.recursive_build(source_courselike, courselike, courselike_key, dest_id)
def import_drafts(self, courselike, courselike_key, data_path, dest_id):
"""
Imports all drafts into the desired store.
"""
# Import any draft items
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, dest_id):
_import_course_draft(
self.xml_module_store,
self.store,
self.user_id,
data_path,
courselike_key,
dest_id,
courselike.runtime
)
# Importing the drafts potentially triggered a new structure version.
# If so, the HEAD version_guid of the passed-in courselike will be out-of-date.
# Fetch the course to return the most recent course version.
return self.store.get_course(courselike.id.replace(branch=None, version_guid=None))
class LibraryImportManager(ImportManager):
"""
Import manager for Libraries
"""
store_class = LibraryXMLModuleStore
def get_dest_id(self, courselike_key):
"""
Get the LibraryLocator that will be used in the target modulestore.
"""
if self.target_id is not None:
dest_id = self.target_id
else:
dest_id = LibraryLocator(self.target_id.org, self.target_id.library)
existing_lib = self.store.get_library(dest_id, ignore_case=True)
runtime = None
if existing_lib:
dest_id = existing_lib.location.library_key
runtime = existing_lib.runtime
if self.create_if_not_present and not existing_lib:
try:
library = self.store.create_library(
org=self.target_id.org,
library=self.target_id.library,
user_id=self.user_id,
fields={"display_name": ""},
)
runtime = library.runtime
except DuplicateCourseError:
log.debug(
"Skipping import of Library with id %s, "
"since it collides with an existing one", dest_id
)
if self.status:
self.status.fail(_('Aborting import since a library with this id already exists.'))
raise
return dest_id, runtime
def get_courselike(self, courselike_key, runtime, dest_id):
"""
Get the descriptor of the library from the XML import modulestore.
"""
source_library = self.xml_module_store.get_library(courselike_key)
library, library_data_path = self.import_courselike(
runtime, courselike_key, dest_id, source_library,
)
return source_library, library, library_data_path
def static_updater(self, course, source_courselike, courselike_key, dest_id, runtime):
"""
Libraries have no special static items to import.
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
def import_children(self, source_courselike, courselike, courselike_key, dest_id):
"""
Imports all children into the desired store.
"""
self.recursive_build(source_courselike, courselike, courselike_key, dest_id)
def import_drafts(self, courselike, courselike_key, data_path, dest_id):
"""
Imports all drafts into the desired store.
"""
return courselike
def import_course_from_xml(*args, **kwargs):
"""
Thin wrapper for the Course Import Manager. See ImportManager for details.
"""
manager = CourseImportManager(*args, **kwargs)
return list(manager.run_imports())
def import_library_from_xml(*args, **kwargs):
"""
Thin wrapper for the Library Import Manager. See ImportManager for details.
"""
manager = LibraryImportManager(*args, **kwargs)
return list(manager.run_imports())
def _update_and_import_module(
module, store, user_id,
source_course_id, dest_course_id,
do_import_static=True, runtime=None):
"""
Update all the module reference fields to the destination course id,
then import the module into the destination course.
"""
logging.debug('processing import of module %s...', str(module.location))
def _update_module_references(module, source_course_id, dest_course_id):
"""
Move the module to a new course.
"""
def _convert_ref_fields_to_new_namespace(reference):
"""
Convert a reference to the new namespace, but only
if the original namespace matched the original course.
Otherwise, returns the input value.
"""
assert isinstance(reference, UsageKey)
if source_course_id == reference.course_key:
return reference.map_into_course(dest_course_id)
else:
return reference
fields = {}
for field_name, field in module.fields.items():
if field.scope != Scope.parent and field.is_set_on(module):
if isinstance(field, Reference):
value = field.read_from(module)
if value is None:
fields[field_name] = None
else:
fields[field_name] = _convert_ref_fields_to_new_namespace(field.read_from(module))
elif isinstance(field, ReferenceList):
references = field.read_from(module)
fields[field_name] = [_convert_ref_fields_to_new_namespace(reference) for reference in references]
elif isinstance(field, ReferenceValueDict):
reference_dict = field.read_from(module)
fields[field_name] = {
key: _convert_ref_fields_to_new_namespace(reference)
for key, reference
in reference_dict.items()
}
elif field_name == 'xml_attributes':
value = field.read_from(module)
# remove any export/import only xml_attributes
# which are used to wire together draft imports
if 'parent_url' in value:
del value['parent_url']
if 'parent_sequential_url' in value:
del value['parent_sequential_url']
if 'index_in_children_list' in value:
del value['index_in_children_list']
fields[field_name] = value
else:
fields[field_name] = field.read_from(module)
return fields
if do_import_static and 'data' in module.fields and isinstance(module.fields['data'], xblock.fields.String):
# we want to convert all 'non-portable' links in the module_data
# (if it is a string) to portable strings (e.g. /static/)
module.data = rewrite_nonportable_content_links(
source_course_id,
dest_course_id,
module.data
)
fields = _update_module_references(module, source_course_id, dest_course_id)
asides = module.get_asides() if isinstance(module, XModuleMixin) else None
if module.location.block_type == 'library_content':
with store.branch_setting(branch_setting=ModuleStoreEnum.Branch.published_only):
lib_content_block_already_published = store.has_item(module.location)
block = store.import_xblock(
user_id, dest_course_id, module.location.block_type,
module.location.block_id, fields, runtime, asides=asides
)
# TODO: Move this code once the following condition is met.
# Get to the point where XML import is happening inside the
# modulestore that is eventually going to store the data.
# Ticket: https://openedx.atlassian.net/browse/PLAT-1046
# Special case handling for library content blocks. The fact that this is
# in Modulestore code is _bad_ and breaks abstraction barriers, but is too
# much work to factor out at this point.
if block.location.block_type == 'library_content':
# If library exists, update source_library_version and children
# according to this existing library and library content block.
if store.get_library(block.source_library_key):
# If the library content block is already in the course, then don't
# refresh the children when we re-import it. This lets us address
# TNL-7507 (Randomized Content Block Settings Lost in Course Import)
# while still avoiding AA-310, where the IDs of the children for an
# existing library_content block might be altered, losing student
# user state.
#
# Note that while this method is run on import, it's also run when
# adding the library content from Studio for the first time.
#
# TLDR: When importing, we only copy the default values from content
# in a library the first time that library_content block is created.
# Future imports ignore what's in the library so as not to disrupt
# course state. You _can_ still update to the library via the Studio
# UI for updating to the latest version of a library for this block.
if lib_content_block_already_published:
return block
# Update library content block's children on draft branch
with store.branch_setting(branch_setting=ModuleStoreEnum.Branch.draft_preferred):
LibraryToolsService(store, user_id).update_children(
block,
version=block.source_library_version,
)
# Publish it if importing the course for branch setting published_only.
if store.get_branch_setting() == ModuleStoreEnum.Branch.published_only:
store.publish(block.location, user_id)
return block
def _import_course_draft(
xml_module_store,
store,
user_id,
course_data_path,
source_course_id,
target_id,
mongo_runtime
):
"""
This method will import all the content inside of the 'drafts' folder, if content exists.
NOTE: This is not a full course import! In our current application, only verticals
(and blocks beneath) can be in draft. Therefore, different call points into the import
process_xml are used as the XMLModuleStore() constructor cannot simply be called
(as is done for importing public content).
"""
draft_dir = course_data_path + "/drafts"
if not os.path.exists(draft_dir):
return
# create a new 'System' object which will manage the importing
errorlog = make_error_tracker()
# The course_dir as passed to ImportSystem is expected to just be relative, not
# the complete path including data_dir. ImportSystem will concatenate the two together.
data_dir = xml_module_store.data_dir
# Whether or not data_dir ends with a "/" differs in production vs. test.
if not data_dir.endswith("/"):
data_dir += "/"
# Remove absolute path, leaving relative <course_name>/drafts.
draft_course_dir = draft_dir.replace(data_dir, '', 1)
system = ImportSystem(
xmlstore=xml_module_store,
course_id=source_course_id,
course_dir=draft_course_dir,
error_tracker=errorlog.tracker,
load_error_modules=False,
mixins=xml_module_store.xblock_mixins,
field_data=KvsFieldData(kvs=DictKeyValueStore()),
target_course_id=target_id,
)
def _import_module(module):
# IMPORTANT: Be sure to update the module location in the NEW namespace
module_location = module.location.map_into_course(target_id)
# Update the module's location to DRAFT revision
# We need to call this method (instead of updating the location directly)
# to ensure that pure XBlock field data is updated correctly.
_update_module_location(module, module_location.replace(revision=MongoRevisionKey.draft))
parent_url = get_parent_url(module)
index = index_in_children_list(module)
# make sure our parent has us in its list of children
# this is to make sure private only modules show up
# in the list of children since they would have been
# filtered out from the non-draft store export.
if parent_url is not None and index is not None:
course_key = descriptor.location.course_key
parent_location = UsageKey.from_string(parent_url).map_into_course(course_key)
# IMPORTANT: Be sure to update the parent in the NEW namespace
parent_location = parent_location.map_into_course(target_id)
parent = store.get_item(parent_location, depth=0)
non_draft_location = module.location.map_into_course(target_id)
if not any(child.block_id == module.location.block_id for child in parent.children):
parent.children.insert(index, non_draft_location)
store.update_item(parent, user_id)
_update_and_import_module(
module, store, user_id,
source_course_id,
target_id,
runtime=mongo_runtime,
)
for child in module.get_children():
_import_module(child)
# Now walk the /drafts directory.
# Each file in the directory will be a draft copy of the vertical.
# First it is necessary to order the draft items by their desired index in the child list,
# since the order in which os.walk() returns the files is not guaranteed.
drafts = []
for rootdir, __, filenames in os.walk(draft_dir):
for filename in filenames:
if filename.startswith('._'):
# Skip any OSX quarantine files, prefixed with a '._'.
continue
module_path = os.path.join(rootdir, filename)
with open(module_path, 'r') as f:
try:
xml = f.read()
# The process_xml() call below recursively processes all descendants. If
# we call this on all verticals in a course with verticals nested below
# the unit level, we try to import the same content twice, causing naming conflicts.
# Therefore only process verticals at the unit level, assuming that any other
# verticals must be descendants.
if 'index_in_children_list' in xml:
descriptor = system.process_xml(xml)
# HACK: since we are doing partial imports of drafts
# the vertical doesn't have the 'url-name' set in the
# attributes (they are normally in the parent object,
# aka sequential), so we have to replace the location.name
# with the XML filename that is part of the pack
filename, __ = os.path.splitext(filename)
descriptor.location = descriptor.location.replace(name=filename)
index = index_in_children_list(descriptor)
parent_url = get_parent_url(descriptor, xml)
draft_url = str(descriptor.location)
draft = draft_node_constructor(
module=descriptor, url=draft_url, parent_url=parent_url, index=index
)
drafts.append(draft)
except Exception: # pylint: disable=broad-except
logging.exception('Error while parsing course drafts xml.')
# Sort drafts by `index_in_children_list` attribute.
drafts.sort(key=lambda x: x.index)
for draft in get_draft_subtree_roots(drafts):
try:
_import_module(draft.module)
except Exception: # pylint: disable=broad-except
logging.exception(f'Course import {source_course_id}: while importing draft descriptor {draft.module}')
def allowed_metadata_by_category(category):
# should this be in the descriptors?!?
return {
'vertical': [],
'chapter': ['start'],
'sequential': ['due', 'format', 'start', 'graded']
}.get(category, ['*'])
def check_module_metadata_editability(module):
"""
Assert that there is no metadata within a particular module that
we can't support editing. However we always allow 'display_name'
and 'xml_attributes'
"""
allowed = allowed_metadata_by_category(module.location.block_type)
if '*' in allowed:
# everything is allowed
return 0
allowed = allowed + ['xml_attributes', 'display_name']
err_cnt = 0
illegal_keys = set(own_metadata(module).keys()) - set(allowed)
if len(illegal_keys) > 0:
err_cnt = err_cnt + 1
print(
": found non-editable metadata on {url}. "
"These metadata keys are not supported = {keys}".format(
url=str(module.location), keys=illegal_keys
)
)
return err_cnt
def get_parent_url(module, xml=None):
"""
Get the parent_url, if any, from module using xml as an alternative source. If it finds it in
xml but not on module, it modifies module so that the next call to this w/o the xml will get the parent url
"""
if hasattr(module, 'xml_attributes'):
return module.xml_attributes.get(
# handle deprecated old attr
'parent_url', module.xml_attributes.get('parent_sequential_url')
)
if xml is not None:
create_xml_attributes(module, xml)
return get_parent_url(module) # don't reparse xml b/c don't infinite recurse but retry above lines
return None
def index_in_children_list(module, xml=None):
"""
Get the index_in_children_list, if any, from module using xml
as an alternative source. If it finds it in xml but not on module,
it modifies module so that the next call to this w/o the xml
will get the field.
"""
if hasattr(module, 'xml_attributes'):
val = module.xml_attributes.get('index_in_children_list')
if val is not None:
return int(val)
return None
if xml is not None:
create_xml_attributes(module, xml)
return index_in_children_list(module) # don't reparse xml b/c don't infinite recurse but retry above lines
return None
def create_xml_attributes(module, xml):
"""
Make up for modules which don't define xml_attributes by creating them here and populating
"""
xml_attrs = {}
for attr, val in xml.attrib.items():
if attr not in module.fields:
# translate obsolete attr
if attr == 'parent_sequential_url':
attr = 'parent_url'
xml_attrs[attr] = val
# now cache it on module where it's expected
module.xml_attributes = xml_attrs
def validate_no_non_editable_metadata(module_store, course_id, category): # lint-amnesty, pylint: disable=missing-function-docstring
err_cnt = 0
for module_loc in module_store.modules[course_id]:
module = module_store.modules[course_id][module_loc]
if module.location.block_type == category:
err_cnt = err_cnt + check_module_metadata_editability(module)
return err_cnt
def validate_category_hierarchy( # lint-amnesty, pylint: disable=missing-function-docstring
module_store, course_id, parent_category, expected_child_category):
err_cnt = 0
parents = []
# get all modules of parent_category
for module in module_store.modules[course_id].values():
if module.location.block_type == parent_category:
parents.append(module)
for parent in parents:
for child_loc in parent.children:
if child_loc.block_type != expected_child_category:
err_cnt += 1
print(
"ERROR: child {child} of parent {parent} was expected to be "
"category of {expected} but was {actual}".format(
child=child_loc, parent=parent.location,
expected=expected_child_category,
actual=child_loc.block_type
)
)
return err_cnt
def validate_data_source_path_existence(path, is_err=True, extra_msg=None): # lint-amnesty, pylint: disable=missing-function-docstring, redefined-outer-name
_cnt = 0
if not os.path.exists(path):
print(
"{type}: Expected folder at {path}. {extra}".format(
type='ERROR' if is_err else 'WARNING',
path=path,
extra=extra_msg or "",
)
)
_cnt = 1
return _cnt
def validate_data_source_paths(data_dir, course_dir): # lint-amnesty, pylint: disable=missing-function-docstring
# check that there is a '/static/' directory
course_path = data_dir / course_dir
err_cnt = 0
warn_cnt = 0
err_cnt += validate_data_source_path_existence(course_path / 'static')
warn_cnt += validate_data_source_path_existence(
course_path / 'static/subs', is_err=False,
extra_msg='Video captions (if they are used) will not work unless they are static/subs.'
)
return err_cnt, warn_cnt
def validate_course_policy(module_store, course_id):
"""
Validate that the course explicitly sets values for any fields
whose defaults may have changed between the export and the import.
Does not add to error count as these are just warnings.
"""
# is there a reliable way to get the module location just given the course_id?
warn_cnt = 0
for module in module_store.modules[course_id].values():
if module.location.block_type == 'course':
if not module._field_data.has(module, 'rerandomize'): # lint-amnesty, pylint: disable=protected-access
warn_cnt += 1
print(
'WARN: course policy does not specify value for '
'"rerandomize" whose default is now "never". '
'The behavior of your course may change.'
)
if not module._field_data.has(module, 'showanswer'): # lint-amnesty, pylint: disable=protected-access
warn_cnt += 1
print(
'WARN: course policy does not specify value for '
'"showanswer" whose default is now "finished". '
'The behavior of your course may change.'
)
return warn_cnt
def perform_xlint( # lint-amnesty, pylint: disable=missing-function-docstring
data_dir, source_dirs,
default_class='xmodule.hidden_module.HiddenDescriptor',
load_error_modules=True,
xblock_mixins=(LocationMixin, XModuleMixin)):
err_cnt = 0
warn_cnt = 0
module_store = XMLModuleStore(
data_dir,
default_class=default_class,
source_dirs=source_dirs,
load_error_modules=load_error_modules,
xblock_mixins=xblock_mixins
)
# check all data source path information
for course_dir in source_dirs:
_err_cnt, _warn_cnt = validate_data_source_paths(path(data_dir), course_dir)
err_cnt += _err_cnt
warn_cnt += _warn_cnt
# first count all errors and warnings as part of the XMLModuleStore import
for err_log in module_store._course_errors.values(): # pylint: disable=protected-access
for err_log_entry in err_log.errors:
msg = err_log_entry[0]
if msg.startswith('ERROR:'):
err_cnt += 1
else:
warn_cnt += 1
# then count outright all courses that failed to load at all
for err_log in module_store.errored_courses.values():
for err_log_entry in err_log.errors:
msg = err_log_entry[0]
print(msg)
if msg.startswith('ERROR:'):
err_cnt += 1
else:
warn_cnt += 1
for course_id in module_store.modules.keys():
# constrain that courses only have 'chapter' children
err_cnt += validate_category_hierarchy(
module_store, course_id, "course", "chapter"
)
# constrain that chapters only have 'sequentials'
err_cnt += validate_category_hierarchy(
module_store, course_id, "chapter", "sequential"
)
# constrain that sequentials only have 'verticals'
err_cnt += validate_category_hierarchy(
module_store, course_id, "sequential", "vertical"
)
# validate the course policy overrides any defaults
# which have changed over time
warn_cnt += validate_course_policy(module_store, course_id)
# don't allow metadata on verticals, since we can't edit them in studio
err_cnt += validate_no_non_editable_metadata(
module_store, course_id, "vertical"
)
# don't allow metadata on chapters, since we can't edit them in studio
err_cnt += validate_no_non_editable_metadata(
module_store, course_id, "chapter"
)
# don't allow metadata on sequences that we can't edit
err_cnt += validate_no_non_editable_metadata(
module_store, course_id, "sequential"
)
# check for a presence of a course marketing video
if not module_store.has_item(course_id.make_usage_key('about', 'video')):
print(
"WARN: Missing course marketing video. It is recommended "
"that every course have a marketing video."
)
warn_cnt += 1
print("\n")
print("------------------------------------------")
print("VALIDATION SUMMARY: {err} Errors {warn} Warnings".format(
err=err_cnt,
warn=warn_cnt
))
if err_cnt > 0:
print(
"This course is not suitable for importing. Please fix courseware "
"according to specifications before importing."
)
elif warn_cnt > 0:
print(
"This course can be imported, but some errors may occur "
"during the run of the course. It is recommend that you fix "
"your courseware before importing"
)
else:
print("This course can be imported successfully.")
return err_cnt
def _update_module_location(module, new_location):
"""
Update a module's location.
If the module is a pure XBlock (not an XModule), then its field data
keys will need to be updated to include the new location.
Args:
module (XModuleMixin): The module to update.
new_location (Location): The new location of the module.
Returns:
None
"""
# Retrieve the content and settings fields that have been explicitly set
# to ensure that they are properly re-keyed in the XBlock field data.
if isinstance(module, XModuleDescriptor):
rekey_fields = []
else:
rekey_fields = (
list(module.get_explicitly_set_fields_by_scope(Scope.content).keys()) +
list(module.get_explicitly_set_fields_by_scope(Scope.settings).keys()) +
list(module.get_explicitly_set_fields_by_scope(Scope.children).keys())
)
module.location = new_location
# Pure XBlocks store the field data in a key-value store
# in which one component of the key is the XBlock's location (equivalent to "scope_ids").
# Since we've changed the XBlock's location, we need to re-save
# all the XBlock's fields so they will be stored using the new location in the key.
# However, since XBlocks only save "dirty" fields, we need to call
# XBlock's `force_save_fields_method`
if len(rekey_fields) > 0:
module.force_save_fields(rekey_fields)
| eduNEXT/edunext-platform | common/lib/xmodule/xmodule/modulestore/xml_importer.py | Python | agpl-3.0 | 56,979 |
#!/usr/bin/env python
#
# Author: Alta Fang (altafang @caltech and alta @princeton)
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2015 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE
"""
example of using PowellDirectionalSolver on the rosenbrock function
"""
from mystic.models import rosen
import numpy
def constrain(x):
x[1] = x[0]
return x
if __name__=='__main__':
import time
times = []
algor = []
x0 = [0.8,1.2,0.7]
#x0 = [0.8,1.2,1.7] #... better when using "bad" range
min = [-0.999, -0.999, 0.999] #XXX: behaves badly when large range
max = [200.001, 100.001, numpy.inf] #... for >=1 x0 out of bounds; (up xtol)
# min = [-0.999, -0.999, -0.999]
# max = [200.001, 100.001, numpy.inf]
# min = [-0.999, -0.999, 0.999]
# max = [2.001, 1.001, 1.001]
print "Powell Direction Set Method"
print "==========================="
start = time.time()
from mystic.monitors import Monitor, VerboseMonitor
stepmon = VerboseMonitor(1,1)
#stepmon = Monitor() #VerboseMonitor(10)
from mystic.termination import NormalizedChangeOverGeneration as NCOG
#from mystic._scipyoptimize import fmin_powell
from mystic.solvers import fmin_powell, PowellDirectionalSolver
#print fmin_powell(rosen,x0,retall=0,full_output=0)#,maxiter=14)
solver = PowellDirectionalSolver(len(x0))
solver.SetInitialPoints(x0)
solver.SetStrictRanges(min,max)
#solver.SetEvaluationLimits(generations=13)
solver.SetGenerationMonitor(stepmon)
solver.SetConstraints(constrain)
solver.enable_signal_handler()
solver.Solve(rosen, NCOG(tolerance=1e-4), disp=1)
print solver.bestSolution
#print "Current function value: %s" % solver.bestEnergy
#print "Iterations: %s" % solver.generations
#print "Function evaluations: %s" % solver.evaluations
times.append(time.time() - start)
algor.append("Powell's Method\t")
for k in range(len(algor)):
print algor[k], "\t -- took", times[k]
# end of file
| jcfr/mystic | examples/test_rosenbrock3.py | Python | bsd-3-clause | 2,181 |
"""
pypebbleapi
------------
Pebble-api for python.
Library to ease the access to the Pebble Timeline and the creation of Pins.
"""
from setuptools import setup, find_packages
setup(
name='pypebbleapi',
version='1.0.0',
url='https://github.com/youtux/pypebbleapi',
license='MIT',
author='Alessio Bogon',
author_email='youtux@gmail.com',
description='Pebble-api for python.',
long_description=__doc__,
packages=find_packages(),
install_requires=['requests>=2.5.1,<3', 'six~=1.9', 'Cerberus~=0.9'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| youtux/pypebbleapi | setup.py | Python | mit | 1,105 |
# -----------------------------------------------------------
# demonstrates the usage of continue to skip the execution of
# statements in a loop
#o
# (C) 2014 Frank Hofmann, Berlin, Germany
# Released under GNU Public License (GPL)
# email frank.hofmann@efho.de
# -----------------------------------------------------------
# define list
shoppingCart = ["banana", "apple", "grapefruit", "strawberry", "orange"]
# output list content
print (shoppingCart)
# simple version with index
# initiate index
itemIndex = 0
# use an endless loop
while itemIndex < len(shoppingCart):
# print even elements, only, and skip the odd ones
if itemIndex % 2 == 1:
# skip
# increment itemIndex
itemIndex += 1
continue
else:
print (itemIndex, shoppingCart[itemIndex])
# increment itemIndex
itemIndex += 1
| plasmashadow/training-python | loops/while-continue.py | Python | gpl-2.0 | 810 |
#!/usr/bin/python2.4
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
#
# Copyright (C) 1999-2007 Keith Dart <keith@kdart.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
"""
Basic Python voice answering machine. Call this from vgetty using the
following config:
[ part of voice.conf]
program vgetty
voice_shell /usr/bin/python2.4
call_program /etc/pycopia/voicemail.py
"""
import sys, os, time
from pycopia import logfile
from pycopia.audio import Vgetty
from pycopia.basicconfig import ConfigHolder
def main(argv):
cf = ConfigHolder()
cf.VOICEDIR = "/var/spool/voice"
cf.MESSAGEDIR = "messages"
cf.SPOOLDIR = "incoming"
cf.GREETING = "greeting.rmd"
cf.LOGFILE = "/var/log/voicemail/voicemail.log"
lf = logfile.open(cf.LOGFILE)
try:
rv = Vgetty.answering_machine(cf, lf)
finally:
lf.close()
return rv
sys.exit(main(sys.argv))
| xiangke/pycopia | audio/etc/voicemail.py | Python | lgpl-2.1 | 1,389 |
from __future__ import absolute_import
import pytest
from qtpy import PYQT5, PYSIDE2
@pytest.mark.skipif(not (PYQT5 or PYSIDE2), reason="Only available in Qt5 bindings")
def test_qtqml():
"""Test the qtpy.QtQml namespace"""
from qtpy import QtQml
assert QtQml.QJSEngine is not None
assert QtQml.QJSValue is not None
assert QtQml.QJSValueIterator is not None
assert QtQml.QQmlAbstractUrlInterceptor is not None
assert QtQml.QQmlApplicationEngine is not None
assert QtQml.QQmlComponent is not None
assert QtQml.QQmlContext is not None
assert QtQml.QQmlEngine is not None
assert QtQml.QQmlImageProviderBase is not None
assert QtQml.QQmlError is not None
assert QtQml.QQmlExpression is not None
assert QtQml.QQmlExtensionPlugin is not None
assert QtQml.QQmlFileSelector is not None
assert QtQml.QQmlIncubationController is not None
assert QtQml.QQmlIncubator is not None
if not PYSIDE2:
# https://wiki.qt.io/Qt_for_Python_Missing_Bindings#QtQml
assert QtQml.QQmlListProperty is not None
assert QtQml.QQmlListReference is not None
assert QtQml.QQmlNetworkAccessManagerFactory is not None
assert QtQml.QQmlParserStatus is not None
assert QtQml.QQmlProperty is not None
assert QtQml.QQmlPropertyValueSource is not None
assert QtQml.QQmlScriptString is not None
assert QtQml.QQmlPropertyMap is not None
| sserrot/champion_relationships | venv/Lib/site-packages/qtpy/tests/test_qtqml.py | Python | mit | 1,413 |
import sys
import pyaudio
import wave
#reso_destroyed = "../audio/resonator_destroyed1.wav"
#reso_deployed = "../audio/resonator_deployed1.wav"
test_file = "../audio/violin-test-PCM16-48.wav"
#test_file = "../audio/violin-test-PCM16.wav"
CHUNK = 1024 * 16
print("wave open")
wf = wave.open(test_file, 'rb')
print (" wave object: channels ",wf.getnchannels()," rate ",wf.getframerate()," samp width ",wf.getsampwidth() )
print(" pyaudio create" )
try:
aud = pyaudio.PyAudio()
except:
print(" pyaudio open failed, threw exception")
sys.exit()
print(" pyaudio open")
print(" iterate devices")
for index in range(aud.get_device_count()):
desc = aud.get_device_info_by_index(index)
print(" audio device: idx ",index," desc ",desc)
try:
stream = aud.open(format=aud.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate = wf.getframerate(),
frames_per_buffer = wf.getframerate(),
output = True )
# stream = aud.open(format=aud.get_format_from_width(wf.getsampwidth()),
# channels=wf.getnchannels(),
# rate = wf.getframerate(),
# output = True )
except Exception as e:
print("pyaudio open failed, exception ",e)
sys.exit()
print("pyaudio readframes / write")
data = wf.readframes(CHUNK)
while len(data) > 0:
stream.write(data)
data = wf.readframes(CHUNK)
stream.stop_stream()
p.terminate()
| bbulkow/MagnusFlora | samples/sound.pyaudio.py | Python | mit | 1,357 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
from spack import *
from spack.environment import EnvironmentModifications
class Intel(IntelPackage):
"""Intel Compilers."""
homepage = "https://software.intel.com/en-us/intel-parallel-studio-xe"
version('18.0.1', '28cb807126d713350f4aa6f9f167448a',
url='http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/12381/parallel_studio_xe_2018_update1_composer_edition.tgz')
version('18.0.0', '31ba768fba6e7322957b03feaa3add28',
url='http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/12067/parallel_studio_xe_2018_composer_edition.tgz')
version('17.0.4', 'd03d351809e182c481dc65e07376d9a2',
url='http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/11541/parallel_studio_xe_2017_update4_composer_edition.tgz')
version('17.0.3', '52344df122c17ddff3687f84ceb21623',
url='http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/11464/parallel_studio_xe_2017_update3_composer_edition.tgz')
version('17.0.2', '2891ab1ece43eb61b6ab892f07c47f01',
url='http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/11302/parallel_studio_xe_2017_update2_composer_edition.tgz')
version('17.0.1', '1f31976931ed8ec424ac7c3ef56f5e85',
url='http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/10978/parallel_studio_xe_2017_update1_composer_edition.tgz')
version('17.0.0', 'b67da0065a17a05f110ed1d15c3c6312',
url='http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/9656/parallel_studio_xe_2017_composer_edition.tgz')
version('16.0.4', '2bc9bfc9be9c1968a6e42efb4378f40e',
url='http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/9785/parallel_studio_xe_2016_composer_edition_update4.tgz')
version('16.0.3', '3208eeabee951fc27579177b593cefe9',
url='http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/9063/parallel_studio_xe_2016_composer_edition_update3.tgz')
version('16.0.2', '1133fb831312eb519f7da897fec223fa',
url='http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/8680/parallel_studio_xe_2016_composer_edition_update2.tgz')
variant('rpath', default=True, description='Add rpath to .cfg files')
components = [
# Common files
'intel-comp-',
'intel-openmp',
# C/C++
'intel-icc',
# Fortran
'intel-ifort',
]
@property
def license_files(self):
return [
'Licenses/license.lic',
join_path('compilers_and_libraries', 'linux', 'bin',
'intel64', 'license.lic')
]
@run_after('install')
def rpath_configuration(self):
if '+rpath' in self.spec:
bin_dir = join_path(self.prefix, 'compilers_and_libraries',
'linux', 'bin', 'intel64')
lib_dir = join_path(self.prefix, 'compilers_and_libraries',
'linux', 'compiler', 'lib', 'intel64_lin')
for compiler in ['icc', 'icpc', 'ifort']:
cfgfilename = join_path(bin_dir, '{0}.cfg'.format(compiler))
with open(cfgfilename, 'w') as f:
f.write('-Xlinker -rpath -Xlinker {0}\n'.format(lib_dir))
def setup_environment(self, spack_env, run_env):
"""Adds environment variables to the generated module file.
These environment variables come from running:
.. code-block:: console
$ source bin/compilervars.sh intel64
"""
# NOTE: Spack runs setup_environment twice, once pre-build to set up
# the build environment, and once post-installation to determine
# the environment variables needed at run-time to add to the module
# file. The script we need to source is only present post-installation,
# so check for its existence before sourcing.
# TODO: At some point we should split setup_environment into
# setup_build_environment and setup_run_environment to get around
# this problem.
compilervars = os.path.join(self.prefix.bin, 'compilervars.sh')
if os.path.isfile(compilervars):
run_env.extend(EnvironmentModifications.from_sourcing_file(
compilervars, 'intel64'))
| EmreAtes/spack | var/spack/repos/builtin/packages/intel/package.py | Python | lgpl-2.1 | 5,555 |
import copy
from django.conf.urls import *
from django.conf import settings
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from haystack.views import SearchView
from haystack.forms import SearchForm as DefaultSearchForm
from pages.models import Page, slugify
from maps.models import MapData
from maps.widgets import InfoMap, map_options_for_region
from regions.views import RegionMixin
class WithMapSearchView(SearchView):
def get_map_options(self):
widget_options = copy.deepcopy(getattr(settings,
'OLWIDGET_DEFAULT_OPTIONS', {}))
map_opts = widget_options.get('map_options', {})
map_controls = map_opts.get('controls', [])
# Remove the PanZoom.
if 'PanZoomBar' in map_controls:
map_controls.remove('PanZoomBar')
if 'PanZoom' in map_controls:
map_controls.remove('PanZoom')
if 'TouchNavigation' in map_controls:
map_controls.remove('TouchNavigation')
widget_options['map_options'] = map_opts
widget_options['map_div_class'] = 'mapwidget small'
return widget_options
def get_map(self):
(paginator, page) = self.build_page()
result_pks = [p.pk for p in page.object_list if p]
maps = MapData.objects.filter(page__pk__in=result_pks)
if not maps:
return None
_map = InfoMap([(obj.geom, popup_html(obj)) for obj in maps],
options=self.get_map_options())
return _map
def extra_context(self):
context = super(WithMapSearchView, self).extra_context()
context['query_slug'] = Page(name=self.query).pretty_slug
context['keywords'] = self.query.split()
context['map'] = self.get_map()
return context
class GlobalSearchView(WithMapSearchView):
template = 'search/global_search.html'
class CreatePageSearchView(WithMapSearchView, RegionMixin):
def __call__(self, request, region=''):
from regions.models import Region
self.region = self.get_region(request=request, kwargs={'region': region})
return super(CreatePageSearchView, self).__call__(request)
def build_form(self, *args, **kwargs):
form = super(CreatePageSearchView, self).build_form(*args, **kwargs)
form.region = self.region
return form
def extra_context(self):
context = super(CreatePageSearchView, self).extra_context()
context['allow_page_creation'] = not Page.objects.filter(
slug=slugify(self.query), region=self.region).exists()
context['region'] = self.region
return context
class MapForInRegionSearchView(CreatePageSearchView):
template = 'search/map_for_search.html'
def get_map_options(self):
widget_options = copy.deepcopy(getattr(settings,
'OLWIDGET_DEFAULT_OPTIONS', {}))
map_opts = widget_options.get('map_options', {})
widget_options['map_options'] = map_opts
return widget_options
class MapForGlobalSearchView(GlobalSearchView):
template = 'search/map_for_search.html'
def get_map_options(self):
widget_options = copy.deepcopy(getattr(settings,
'OLWIDGET_DEFAULT_OPTIONS', {}))
map_opts = widget_options.get('map_options', {})
widget_options['map_options'] = map_opts
return widget_options
def popup_html(map_data):
page = map_data.page
return mark_safe('<a href="%s">%s</a>' %
(page.get_absolute_url(), page.name))
class SearchForm(DefaultSearchForm):
def search(self):
sqs = super(SearchForm, self).search()
cleaned_data = getattr(self, 'cleaned_data', {})
keywords = cleaned_data.get('q', '').split()
if not keywords:
return sqs
# we do __in because we want partial matches, not just exact ones.
# And by default, Haystack only searches the `document` field, so
# we need this to activate the boosts.
return sqs.filter_or(full_name__in=keywords).\
filter_or(slug__in=keywords).filter_or(name__in=keywords).\
filter_or(tags__in=keywords)
class InRegionSearchForm(DefaultSearchForm):
def search(self):
sqs = super(InRegionSearchForm, self).search()
cleaned_data = getattr(self, 'cleaned_data', {})
keywords = cleaned_data.get('q', '').split()
if not keywords:
return sqs
# we do __in because we want partial matches, not just exact ones.
# And by default, Haystack only searches the `document` field, so
# we need this to activate the boosts.
return sqs.filter_or(name__in=keywords).filter_or(tags__in=keywords).filter_and(region_id=self.region.id)
def search_view_factory(view_class=SearchView, *args, **kwargs):
def search_view(request, **kwargs2):
return view_class(*args, **kwargs)(request, **kwargs2)
return search_view
haystack_search = search_view_factory(
view_class=CreatePageSearchView,
form_class=InRegionSearchForm
)
map_for_haystack_search = search_view_factory(
view_class=MapForInRegionSearchView,
form_class=InRegionSearchForm
)
global_search = search_view_factory(
view_class=GlobalSearchView,
form_class=SearchForm
)
map_for_global_search = search_view_factory(
view_class=MapForGlobalSearchView,
form_class=SearchForm
)
urlpatterns_no_region = patterns('',
url(r'^_rsearch/(?P<region>[^/]+)?/?$', haystack_search , name='haystack_search'),
url(r'^_rsearch/_map/(?P<region>[^/]+)?/?$', map_for_haystack_search , name='map_for_haystack_search'),
)
urlpatterns = urlpatterns_no_region + patterns('',
url(r'^_search/$', global_search, name='global_search'),
url(r'^_search/_map/$', map_for_global_search, name='map_for_global_search'),
)
| evangeline97/localwiki-backend-server | localwiki/search/urls.py | Python | gpl-2.0 | 5,842 |
# Written by Bram Cohen
# see LICENSE.txt for license information
from cStringIO import StringIO
from binascii import b2a_hex
from socket import error as socketerror
from urllib import quote
from traceback import print_exc
from BitTornado.BTcrypto import Crypto
try:
True
except:
True = 1
False = 0
bool = lambda x: not not x
DEBUG = False
MAX_INCOMPLETE = 8
protocol_name = 'BitTorrent protocol'
option_pattern = chr(0)*8
def toint(s):
return long(b2a_hex(s), 16)
def tobinary16(i):
return chr((i >> 8) & 0xFF) + chr(i & 0xFF)
hexchars = '0123456789ABCDEF'
hexmap = []
for i in xrange(256):
hexmap.append(hexchars[(i&0xF0)/16]+hexchars[i&0x0F])
def tohex(s):
r = []
for c in s:
r.append(hexmap[ord(c)])
return ''.join(r)
def make_readable(s):
if not s:
return ''
if quote(s).find('%') >= 0:
return tohex(s)
return '"'+s+'"'
class IncompleteCounter:
def __init__(self):
self.c = 0
def increment(self):
self.c += 1
def decrement(self):
self.c -= 1
def toomany(self):
return self.c >= MAX_INCOMPLETE
incompletecounter = IncompleteCounter()
# header, options, download id, my id, [length, message]
class Connection:
def __init__(self, Encoder, connection, id,
ext_handshake=False, encrypted = None, options = None):
self.Encoder = Encoder
self.connection = connection
self.connecter = Encoder.connecter
self.id = id
self.locally_initiated = (id != None)
self.readable_id = make_readable(id)
self.complete = False
self.keepalive = lambda: None
self.closed = False
self.buffer = ''
self.bufferlen = None
self.log = None
self.read = self._read
self.write = self._write
self.cryptmode = 0
self.encrypter = None
if self.locally_initiated:
incompletecounter.increment()
if encrypted:
self.encrypted = True
self.encrypter = Crypto(True)
self.write(self.encrypter.pubkey+self.encrypter.padding())
else:
self.encrypted = False
self.write(chr(len(protocol_name)) + protocol_name +
option_pattern + self.Encoder.download_id )
self.next_len, self.next_func = 1+len(protocol_name), self.read_header
elif ext_handshake:
self.Encoder.connecter.external_connection_made += 1
if encrypted: # passed an already running encrypter
self.encrypter = encrypted
self.encrypted = True
self._start_crypto()
self.next_len, self.next_func = 14, self.read_crypto_block3c
else:
self.encrypted = False
self.options = options
self.write(self.Encoder.my_id)
self.next_len, self.next_func = 20, self.read_peer_id
else:
self.encrypted = None # don't know yet
self.next_len, self.next_func = 1+len(protocol_name), self.read_header
self.Encoder.raw_server.add_task(self._auto_close, 30)
def _log_start(self): # only called with DEBUG = True
self.log = open('peerlog.'+self.get_ip()+'.txt','a')
self.log.write('connected - ')
if self.locally_initiated:
self.log.write('outgoing\n')
else:
self.log.write('incoming\n')
self._logwritefunc = self.write
self.write = self._log_write
def _log_write(self, s):
self.log.write('w:'+b2a_hex(s)+'\n')
self._logwritefunc(s)
def get_ip(self, real=False):
return self.connection.get_ip(real)
def get_id(self):
return self.id
def get_readable_id(self):
return self.readable_id
def is_locally_initiated(self):
return self.locally_initiated
def is_encrypted(self):
return bool(self.encrypted)
def is_flushed(self):
return self.connection.is_flushed()
def _read_header(self, s):
if s == chr(len(protocol_name))+protocol_name:
return 8, self.read_options
return None
def read_header(self, s):
if self._read_header(s):
if self.encrypted or self.Encoder.config['crypto_stealth']:
return None
return 8, self.read_options
if self.locally_initiated and not self.encrypted:
return None
elif not self.Encoder.config['crypto_allowed']:
return None
if not self.encrypted:
self.encrypted = True
self.encrypter = Crypto(self.locally_initiated)
self._write_buffer(s)
return self.encrypter.keylength, self.read_crypto_header
################## ENCRYPTION SUPPORT ######################
def _start_crypto(self):
self.encrypter.setrawaccess(self._read,self._write)
self.write = self.encrypter.write
self.read = self.encrypter.read
if self.buffer:
self.buffer = self.encrypter.decrypt(self.buffer)
def _end_crypto(self):
self.read = self._read
self.write = self._write
self.encrypter = None
def read_crypto_header(self, s):
self.encrypter.received_key(s)
self.encrypter.set_skey(self.Encoder.download_id)
if self.locally_initiated:
if self.Encoder.config['crypto_only']:
cryptmode = '\x00\x00\x00\x02' # full stream encryption
else:
cryptmode = '\x00\x00\x00\x03' # header or full stream
padc = self.encrypter.padding()
self.write( self.encrypter.block3a
+ self.encrypter.block3b
+ self.encrypter.encrypt(
('\x00'*8) # VC
+ cryptmode # acceptable crypto modes
+ tobinary16(len(padc))
+ padc # PadC
+ '\x00\x00' ) ) # no initial payload data
self._max_search = 520
return 1, self.read_crypto_block4a
self.write(self.encrypter.pubkey+self.encrypter.padding())
self._max_search = 520
return 0, self.read_crypto_block3a
def _search_for_pattern(self, s, pat):
p = s.find(pat)
if p < 0:
if len(s) >= len(pat):
self._max_search -= len(s)+1-len(pat)
if self._max_search < 0:
self.close()
return False
self._write_buffer(s[1-len(pat):])
return False
self._write_buffer(s[p+len(pat):])
return True
### INCOMING CONNECTION ###
def read_crypto_block3a(self, s):
if not self._search_for_pattern(s,self.encrypter.block3a):
return -1, self.read_crypto_block3a # wait for more data
return len(self.encrypter.block3b), self.read_crypto_block3b
def read_crypto_block3b(self, s):
if s != self.encrypter.block3b:
return None
self.Encoder.connecter.external_connection_made += 1
self._start_crypto()
return 14, self.read_crypto_block3c
def read_crypto_block3c(self, s):
if s[:8] != ('\x00'*8): # check VC
return None
self.cryptmode = toint(s[8:12]) % 4
if self.cryptmode == 0:
return None # no encryption selected
if ( self.cryptmode == 1 # only header encryption
and self.Encoder.config['crypto_only'] ):
return None
padlen = (ord(s[12])<<8)+ord(s[13])
if padlen > 512:
return None
return padlen+2, self.read_crypto_pad3
def read_crypto_pad3(self, s):
s = s[-2:]
ialen = (ord(s[0])<<8)+ord(s[1])
if ialen > 65535:
return None
if self.cryptmode == 1:
cryptmode = '\x00\x00\x00\x01' # header only encryption
else:
cryptmode = '\x00\x00\x00\x02' # full stream encryption
padd = self.encrypter.padding()
self.write( ('\x00'*8) # VC
+ cryptmode # encryption mode
+ tobinary16(len(padd))
+ padd ) # PadD
if ialen:
return ialen, self.read_crypto_ia
return self.read_crypto_block3done()
def read_crypto_ia(self, s):
if DEBUG:
self._log_start()
self.log.write('r:'+b2a_hex(s)+'(ia)\n')
if self.buffer:
self.log.write('r:'+b2a_hex(self.buffer)+'(buffer)\n')
return self.read_crypto_block3done(s)
def read_crypto_block3done(self, ia=''):
if DEBUG:
if not self.log:
self._log_start()
if self.cryptmode == 1: # only handshake encryption
assert not self.buffer # oops; check for exceptions to this
self._end_crypto()
if ia:
self._write_buffer(ia)
return 1+len(protocol_name), self.read_encrypted_header
### OUTGOING CONNECTION ###
def read_crypto_block4a(self, s):
if not self._search_for_pattern(s,self.encrypter.VC_pattern()):
return -1, self.read_crypto_block4a # wait for more data
self._start_crypto()
return 6, self.read_crypto_block4b
def read_crypto_block4b(self, s):
self.cryptmode = toint(s[:4]) % 4
if self.cryptmode == 1: # only header encryption
if self.Encoder.config['crypto_only']:
return None
elif self.cryptmode != 2:
return None # unknown encryption
padlen = (ord(s[4])<<8)+ord(s[5])
if padlen > 512:
return None
if padlen:
return padlen, self.read_crypto_pad4
return self.read_crypto_block4done()
def read_crypto_pad4(self, s):
# discard data
return self.read_crypto_block4done()
def read_crypto_block4done(self):
if DEBUG:
self._log_start()
if self.cryptmode == 1: # only handshake encryption
if not self.buffer: # oops; check for exceptions to this
return None
self._end_crypto()
self.write(chr(len(protocol_name)) + protocol_name +
option_pattern + self.Encoder.download_id)
return 1+len(protocol_name), self.read_encrypted_header
### START PROTOCOL OVER ENCRYPTED CONNECTION ###
def read_encrypted_header(self, s):
return self._read_header(s)
################################################
def read_options(self, s):
self.options = s
return 20, self.read_download_id
def read_download_id(self, s):
if ( s != self.Encoder.download_id
or not self.Encoder.check_ip(ip=self.get_ip()) ):
return None
if not self.locally_initiated:
if not self.encrypted:
self.Encoder.connecter.external_connection_made += 1
self.write(chr(len(protocol_name)) + protocol_name +
option_pattern + self.Encoder.download_id + self.Encoder.my_id)
return 20, self.read_peer_id
def read_peer_id(self, s):
if not self.encrypted and self.Encoder.config['crypto_only']:
return None # allows older trackers to ping,
# but won't proceed w/ connections
if not self.id:
self.id = s
self.readable_id = make_readable(s)
else:
if s != self.id:
return None
self.complete = self.Encoder.got_id(self)
if not self.complete:
return None
if self.locally_initiated:
self.write(self.Encoder.my_id)
incompletecounter.decrement()
self._switch_to_read2()
c = self.Encoder.connecter.connection_made(self)
self.keepalive = c.send_keepalive
return 4, self.read_len
def read_len(self, s):
l = toint(s)
if l > self.Encoder.max_len:
return None
return l, self.read_message
def read_message(self, s):
if s != '':
self.connecter.got_message(self, s)
return 4, self.read_len
def read_dead(self, s):
return None
def _auto_close(self):
if not self.complete:
self.close()
def close(self):
if not self.closed:
self.connection.close()
self.sever()
def sever(self):
if self.log:
self.log.write('closed\n')
self.log.close()
self.closed = True
del self.Encoder.connections[self.connection]
if self.complete:
self.connecter.connection_lost(self)
elif self.locally_initiated:
incompletecounter.decrement()
def send_message_raw(self, message):
self.write(message)
def _write(self, message):
if not self.closed:
self.connection.write(message)
def data_came_in(self, connection, s):
self.read(s)
def _write_buffer(self, s):
self.buffer = s+self.buffer
def _read(self, s):
if self.log:
self.log.write('r:'+b2a_hex(s)+'\n')
self.Encoder.measurefunc(len(s))
self.buffer += s
while True:
if self.closed:
return
# self.next_len = # of characters function expects
# or 0 = all characters in the buffer
# or -1 = wait for next read, then all characters in the buffer
# not compatible w/ keepalives, switch out after all negotiation complete
if self.next_len <= 0:
m = self.buffer
self.buffer = ''
elif len(self.buffer) >= self.next_len:
m = self.buffer[:self.next_len]
self.buffer = self.buffer[self.next_len:]
else:
return
try:
x = self.next_func(m)
except:
self.next_len, self.next_func = 1, self.read_dead
raise
if x is None:
self.close()
return
self.next_len, self.next_func = x
if self.next_len < 0: # already checked buffer
return # wait for additional data
if self.bufferlen is not None:
self._read2('')
return
def _switch_to_read2(self):
self._write_buffer = None
if self.encrypter:
self.encrypter.setrawaccess(self._read2,self._write)
else:
self.read = self._read2
self.bufferlen = len(self.buffer)
self.buffer = [self.buffer]
def _read2(self, s): # more efficient, requires buffer['',''] & bufferlen
if self.log:
self.log.write('r:'+b2a_hex(s)+'\n')
self.Encoder.measurefunc(len(s))
while True:
if self.closed:
return
p = self.next_len-self.bufferlen
if self.next_len == 0:
m = ''
elif s:
if p > len(s):
self.buffer.append(s)
self.bufferlen += len(s)
return
self.bufferlen = len(s)-p
self.buffer.append(s[:p])
m = ''.join(self.buffer)
if p == len(s):
self.buffer = []
else:
self.buffer=[s[p:]]
s = ''
elif p <= 0:
# assert len(self.buffer) == 1
s = self.buffer[0]
self.bufferlen = len(s)-self.next_len
m = s[:self.next_len]
if p == 0:
self.buffer = []
else:
self.buffer = [s[self.next_len:]]
s = ''
else:
return
try:
x = self.next_func(m)
except:
self.next_len, self.next_func = 1, self.read_dead
raise
if x is None:
self.close()
return
self.next_len, self.next_func = x
if self.next_len < 0: # already checked buffer
return # wait for additional data
def connection_flushed(self, connection):
if self.complete:
self.connecter.connection_flushed(self)
def connection_lost(self, connection):
if self.Encoder.connections.has_key(connection):
self.sever()
class _dummy_banlist:
def includes(self, x):
return False
class Encoder:
def __init__(self, connecter, raw_server, my_id, max_len,
schedulefunc, keepalive_delay, download_id,
measurefunc, config, bans=_dummy_banlist() ):
self.raw_server = raw_server
self.connecter = connecter
self.my_id = my_id
self.max_len = max_len
self.schedulefunc = schedulefunc
self.keepalive_delay = keepalive_delay
self.download_id = download_id
self.measurefunc = measurefunc
self.config = config
self.connections = {}
self.banned = {}
self.external_bans = bans
self.to_connect = []
self.paused = False
if self.config['max_connections'] == 0:
self.max_connections = 2 ** 30
else:
self.max_connections = self.config['max_connections']
schedulefunc(self.send_keepalives, keepalive_delay)
def send_keepalives(self):
self.schedulefunc(self.send_keepalives, self.keepalive_delay)
if self.paused:
return
for c in self.connections.values():
c.keepalive()
def start_connections(self, list):
if not self.to_connect:
self.raw_server.add_task(self._start_connection_from_queue)
self.to_connect = list
def _start_connection_from_queue(self):
if self.connecter.external_connection_made:
max_initiate = self.config['max_initiate']
else:
max_initiate = int(self.config['max_initiate']*1.5)
cons = len(self.connections)
if cons >= self.max_connections or cons >= max_initiate:
delay = 60
elif self.paused or incompletecounter.toomany():
delay = 1
else:
delay = 0
dns, id, encrypted = self.to_connect.pop(0)
self.start_connection(dns, id, encrypted)
if self.to_connect:
self.raw_server.add_task(self._start_connection_from_queue, delay)
def start_connection(self, dns, id, encrypted = None):
if ( self.paused
or len(self.connections) >= self.max_connections
or id == self.my_id
or not self.check_ip(ip=dns[0]) ):
return True
if self.config['crypto_only']:
if encrypted is None or encrypted: # fails on encrypted = 0
encrypted = True
else:
return True
for v in self.connections.values():
if v is None:
continue
if id and v.id == id:
return True
ip = v.get_ip(True)
if self.config['security'] and ip != 'unknown' and ip == dns[0]:
return True
try:
c = self.raw_server.start_connection(dns)
con = Connection(self, c, id, encrypted = encrypted)
self.connections[c] = con
c.set_handler(con)
except socketerror:
return False
return True
def _start_connection(self, dns, id, encrypted = None):
def foo(self=self, dns=dns, id=id, encrypted=encrypted):
self.start_connection(dns, id, encrypted)
self.schedulefunc(foo, 0)
def check_ip(self, connection=None, ip=None):
if not ip:
ip = connection.get_ip(True)
if self.config['security'] and self.banned.has_key(ip):
return False
if self.external_bans.includes(ip):
return False
return True
def got_id(self, connection):
if connection.id == self.my_id:
self.connecter.external_connection_made -= 1
return False
ip = connection.get_ip(True)
for v in self.connections.values():
if connection is not v:
if connection.id == v.id:
if ip == v.get_ip(True):
v.close()
else:
return False
if self.config['security'] and ip != 'unknown' and ip == v.get_ip(True):
v.close()
return True
def external_connection_made(self, connection):
if self.paused or len(self.connections) >= self.max_connections:
connection.close()
return False
con = Connection(self, connection, None)
self.connections[connection] = con
connection.set_handler(con)
return True
def externally_handshaked_connection_made(self, connection, options,
already_read, encrypted = None):
if ( self.paused
or len(self.connections) >= self.max_connections
or not self.check_ip(connection=connection) ):
connection.close()
return False
con = Connection(self, connection, None,
ext_handshake = True, encrypted = encrypted, options = options)
self.connections[connection] = con
connection.set_handler(con)
if already_read:
con.data_came_in(con, already_read)
return True
def close_all(self):
for c in self.connections.values():
c.close()
self.connections = {}
def ban(self, ip):
self.banned[ip] = 1
def pause(self, flag):
self.paused = flag
| Cclleemm/FriendlyTorrent | src/tornado/BitTornado/BT1/Encrypter.py | Python | apache-2.0 | 22,211 |
# -*- coding: utf-8 -*-
#
#
# File to preform some standard tasks on a neuroConstruct project
#
# Author: Padraig Gleeson
#
# This file has been developed as part of the neuroConstruct project
# This work has been funded by the Medical Research Council and the
# Wellcome Trust
#
#
import sys
import time
import subprocess
from java.io import File
from ucl.physiol.neuroconstruct.cell.utils import CellTopologyHelper
from ucl.physiol.neuroconstruct.cell.compartmentalisation import GenesisCompartmentalisation
from ucl.physiol.neuroconstruct.cell.compartmentalisation import OriginalCompartmentalisation
from ucl.physiol.neuroconstruct.gui.plotter import PlotManager
from ucl.physiol.neuroconstruct.gui.plotter import PlotCanvas
from ucl.physiol.neuroconstruct.dataset import DataSet
from ucl.physiol.neuroconstruct.neuron import NeuronFileManager
from ucl.physiol.neuroconstruct.neuron.NeuronSettings import DataSaveFormat
from ucl.physiol.neuroconstruct.nmodleditor.processes import ProcessManager
from ucl.physiol.neuroconstruct.neuroml import NeuroMLConstants
from ucl.physiol.neuroconstruct.neuroml import LemsConstants
from ucl.physiol.neuroconstruct.project import SimPlot
from ucl.physiol.neuroconstruct.project import ProjectManager
from ucl.physiol.neuroconstruct.simulation import SimulationData
from ucl.physiol.neuroconstruct.simulation import SpikeAnalyser
from ucl.physiol.neuroconstruct.utils.units import UnitConverter
from ucl.physiol.neuroconstruct.utils import NumberGenerator
from ucl.physiol.neuroconstruct.hpc.mpi import MpiSettings
from ucl.physiol.neuroconstruct.pynn.PynnFileManager import PynnSimulator
from ucl.physiol.neuroconstruct.neuroml import NeuroMLFileManager
def loadMepFile(mepFile, scale=1):
# Load an OMV mep file, see https://github.com/OpenSourceBrain/osb-model-validation
spike_times = {}
mep_file = open(mepFile)
exp_name = ""
for line in mep_file:
line = line.strip()
if line.startswith('system:'):
pass
elif line.startswith('expected:'):
pass
elif line.startswith('spike times: ['):
times = line[14:-1].split(',')
tt = []
for time in times:
tt.append(float(time.strip())*scale)
spike_times[exp_name] = tt
else:
exp_name = line[:-1]
return spike_times
def generateNeuroML2(projFile,
simConfigs,
neuroConstructSeed = 1234,
seed = 1234,
verbose = True):
projectManager = ProjectManager()
project = projectManager.loadProject(projFile)
nmlfm = NeuroMLFileManager(project)
genDir = File(projFile.getParentFile(), "generatedNeuroML2")
if verbose: print("Generating NeuroML 2 files for project %s, sim configs: %s, into %s"%(project.getProjectName(), str(simConfigs), genDir.getAbsolutePath()))
for simConfigName in simConfigs:
projectManager.doGenerate(simConfigName, neuroConstructSeed)
while projectManager.isGenerating():
if verbose: print("Waiting for the project to be generated with Simulation Configuration: "+simConfigName)
time.sleep(5)
simConfig = project.simConfigInfo.getSimConfig(simConfigName)
nmlfm.generateNeuroMLFiles(simConfig,
NeuroMLConstants.NeuroMLVersion.getLatestVersion(),
LemsConstants.LemsOption.LEMS_WITHOUT_EXECUTE_MODEL,
OriginalCompartmentalisation(),
seed,
False,
True,
genDir,
"GENESIS Physiological Units",
False)
info = "These files are not the source files for the model, they have been generated from the source of the model in the neuroConstruct directory.\n"+ \
"These have been added to provide examples of valid NeuroML files for testing applications & the OSB website and may be removed at any time."
readme = open(genDir.getAbsolutePath()+'/README--GENERATED-FILES', 'w')
readme.write(info)
readme.close()
def generateNeuroML1(projFile,
simConfigs,
neuroConstructSeed = 1234,
seed = 1234,
verbose = True):
projectManager = ProjectManager()
project = projectManager.loadProject(projFile)
nmlfm = NeuroMLFileManager(project)
genDir = File(projFile.getParentFile(), "generatedNeuroML")
if verbose: print("Generating NeuroML v1.x files for project %s, sim configs: %s, into %s"%(project.getProjectName(), str(simConfigs), genDir.getAbsolutePath()))
for simConfigName in simConfigs:
projectManager.doGenerate(simConfigName, neuroConstructSeed)
while projectManager.isGenerating():
if verbose: print("Waiting for the project to be generated with Simulation Configuration: "+simConfigName)
time.sleep(5)
simConfig = project.simConfigInfo.getSimConfig(simConfigName)
nmlfm.generateNeuroMLFiles(simConfig,
NeuroMLConstants.NeuroMLVersion.NEUROML_VERSION_1,
LemsConstants.LemsOption.LEMS_WITHOUT_EXECUTE_MODEL,
OriginalCompartmentalisation(),
seed,
False,
True,
genDir,
"GENESIS Physiological Units",
False)
info = "These files are not the source files for the model, they have been generated from the source of the model in the neuroConstruct directory.\n"+ \
"These have been added to provide examples of valid NeuroML files for testing applications & the OSB website and may be removed at any time."
readme = open(genDir.getAbsolutePath()+'/README--GENERATED-FILES', 'w')
readme.write(info)
readme.close()
def getUnusedSimRef(project, simRefPrefix="P_Sim_"):
index = 0
while File( "%s/simulations/%s%i"%(project.getProjectMainDirectory().getCanonicalPath(), simRefPrefix,index)).exists():
index = index+1
simRef = "%s%i"%(simRefPrefix,index)
return simRef
def generateAndRunGenesis(project,
projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=True,
quitAfterRun=False,
runInBackground=False,
units=-1,
symmetricComps=None):
prefix = "--- GENESIS gen: "
if verbose: print prefix+"Going to generate GENESIS files for: "+simRef
if runInBackground:
project.genesisSettings.setNoConsole()
if units == UnitConverter.GENESIS_SI_UNITS or units == UnitConverter.GENESIS_PHYSIOLOGICAL_UNITS:
project.genesisSettings.setUnitSystemToUse(units) # else leave it as the units set in the proj
project.genesisSettings.setMooseCompatMode(False)
if symmetricComps is not None:
project.genesisSettings.setSymmetricCompartments(symmetricComps)
project.genesisFileManager.setQuitAfterRun(quitAfterRun)
compartmentalisation = GenesisCompartmentalisation()
project.genesisFileManager.generateTheGenesisFiles(simConfig,
None,
compartmentalisation,
simulatorSeed)
success = projectManager.doRunGenesis(simConfig)
if success:
print prefix+"Set running GENESIS simulation: "+simRef
else:
print prefix+"Problem running GENESIS simulation: "+simRef
return success
def generateAndRunMoose(project,
projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=True,
quitAfterRun=False,
runInBackground=False,
units=-1):
prefix = "--- MOOSE gen: "
if verbose: print prefix+"Going to generate MOOSE files for: "+simRef
if runInBackground:
project.genesisSettings.setNoConsole()
project.genesisFileManager.setQuitAfterRun(quitAfterRun)
if units == UnitConverter.GENESIS_SI_UNITS or units == UnitConverter.GENESIS_PHYSIOLOGICAL_UNITS:
project.genesisSettings.setUnitSystemToUse(units) # else leave it as the units set in the proj
project.genesisSettings.setMooseCompatMode(True)
compartmentalisation = GenesisCompartmentalisation()
project.genesisFileManager.generateTheGenesisFiles(simConfig,
None,
compartmentalisation,
simulatorSeed)
success = projectManager.doRunGenesis(simConfig)
if success:
print prefix+"Set running MOOSE simulation: "+simRef
else:
print prefix+"Problem running MOOSE simulation: "+simRef
return success
def generateAndRunPsics(project,
projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=True,
runInBackground=False):
prefix = "--- PSICS gen: "
if verbose: print prefix+"Going to generate PSICS files for: "+simRef
project.psicsFileManager.generateThePsicsFiles(simConfig,
simulatorSeed)
success = projectManager.doRunPsics(simConfig, (not runInBackground))
if success:
print prefix+"Set running PSICS simulation: "+simRef
else:
print prefix+"Problem running PSICS simulation: "+simRef
return success
def generateAndRunLems(project,
projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=True,
runInBackground=False,
version=NeuroMLConstants.NeuroMLVersion.getLatestVersion()):
prefix = "--- LEMS/NeuroML 2 gen: "
if verbose: print prefix+"Going to generate LEMS/NeuroML 2 files for: "+simRef
compartmentalisation = OriginalCompartmentalisation()
project.neuromlFileManager.generateNeuroMLFiles(simConfig,
version,
LemsConstants.LemsOption.EXECUTE_MODEL,
compartmentalisation,
simulatorSeed,
False,
False,
runInBackground)
return 1 # Call above will throw error if it fails
def generateAndRunPyNN(pynnSim,
project,
projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=True,
runInBackground=False):
prefix = "--- PyNN_"+pynnSim+" gen: "
if verbose: print prefix+"Going to generate PyNN_"+pynnSim+" files for: "+simRef
pynnSimulator = None
if "NEST" in pynnSim:
pynnSimulator = PynnSimulator.NEST
elif "NEURON" in pynnSim:
pynnSimulator = PynnSimulator.NEURON
elif "BRIAN" in pynnSim:
pynnSimulator = PynnSimulator.BRIAN
else:
print pynnSim
#if verbose: print prefix+"Going to generate PyNN_"+str(pynnSimulator)+" files for: "+simRef
project.pynnFileManager.generateThePynnFiles(simConfig,
pynnSimulator,
simulatorSeed)
project.pynnFileManager.runFile(True)
return 1
def generateNeuron(project,
projectManager,
simConfig,
simRef,
simulatorSeed,
verbose= True,
quitAfterRun= False,
runInBackground= False,
varTimestep= False,
varTimestepTolerance= None,
saveAsHdf5 = False,
runMode = NeuronFileManager.RUN_HOC):
prefix = "--- NEURON gen: "
if verbose: print prefix+"Going to generate NEURON files for simulation: "+simRef
project.neuronFileManager.setQuitAfterRun(quitAfterRun)
if runInBackground:
project.neuronSettings.setNoConsole()
if saveAsHdf5:
project.neuronSettings.setDataSaveFormat(DataSaveFormat.HDF5_NC)
else:
project.neuronSettings.setDataSaveFormat(DataSaveFormat.TEXT_NC)
project.neuronSettings.setVarTimeStep(varTimestep)
if varTimestepTolerance is not None:
project.neuronSettings.setVarTimeAbsTolerance(varTimestepTolerance)
project.neuronFileManager.generateTheNeuronFiles(simConfig,
None,
runMode,
simulatorSeed)
if verbose: print prefix+"Generated hoc files for simulation: "+simRef
compileProcManager = ProcessManager(project.neuronFileManager.getMainHocFile())
compileSuccess = compileProcManager.compileFileWithNeuron(0,0)
if verbose: print prefix+"Compiled NEURON files for: "+simRef
return compileSuccess
def generateAndRunNeuron(project,
projectManager,
simConfig,
simRef,
simulatorSeed,
verbose= True,
quitAfterRun= False,
runInBackground= False,
varTimestep= False,
varTimestepTolerance= None,
saveAsHdf5 = False,
runMode = NeuronFileManager.RUN_HOC):
### Set simulation running
prefix = "--- NEURON gen: "
compileSuccess = generateNeuron(project, projectManager, simConfig, simRef,
simulatorSeed, verbose=verbose,
quitAfterRun=quitAfterRun,
runInBackground=runInBackground,
varTimestep=varTimestep,
varTimestepTolerance=varTimestepTolerance,
saveAsHdf5=saveAsHdf5,runMode=runMode)
if compileSuccess:
success = projectManager.doRunNeuron(simConfig)
if success:
print prefix+"Set running NEURON simulation: "+simRef
else:
print prefix+"Problem running NEURON simulation: "+simRef
return success
else:
return False
class SimulationManager():
knownSimulators = ["NEURON", "GENESIS", "GENESIS_SI", "GENESIS_PHYS", "MOOSE", "MOOSE_PHYS", "MOOSE_SI", "PSICS", "LEMS", "LEMSalpha", "PYNN_NEST", "PYNN_NEURON", "PYNN_BRIAN"]
plotFrames = {}
dataSets = {}
def __init__(self,
projFile,
numConcurrentSims = 1,
verbose = True):
self.allRunningSims = []
self.allRecentlyFinishedSims = []
self.allFinishedSims = []
self.projectManager = ProjectManager()
self.project = self.projectManager.loadProject(projFile)
self.numConcurrentSims = numConcurrentSims
self.verbose = verbose
self.printver("Starting Simulation Manager for project: "+self.project.getProjectFullFileName(), True)
self.printver("This will run up to %i simulations concurrently"%numConcurrentSims)
def printver(self, message, forcePrint=False):
if self.verbose or forcePrint:
print "--- SimMgr: "+ str(message)
def updateSimsRunning(self):
self.updateSimsRunningR(True)
def updateSimsRunningR(self, checkRemote):
remoteChecked = False
for sim in self.allRunningSims:
completed = False
timeFile = File(self.project.getProjectMainDirectory(), "simulations/"+sim+"/time.dat")
timeFile2 = File(self.project.getProjectMainDirectory(), "simulations/"+sim+"/time.txt") # for PSICS...
self.printver("Checking file: "+timeFile.getCanonicalPath() +", exists: "+ str(timeFile.exists()))
if (timeFile.exists()):
self.allFinishedSims.append(sim)
self.allRecentlyFinishedSims.append(sim)
self.allRunningSims.remove(sim)
completed = True
else:
self.printver("Checking file: "+timeFile2.getCanonicalPath() +", exists: "+ str(timeFile2.exists()))
if (timeFile2.exists()):
self.allFinishedSims.append(sim)
self.allRecentlyFinishedSims.append(sim)
self.allRunningSims.remove(sim)
completed = True
if checkRemote and not completed:
pullFile = File(self.project.getProjectMainDirectory(), "simulations/"+sim+"/pullsim.sh")
checkingRemoteFile = File(self.project.getProjectMainDirectory(), "simulations/"+sim+"/checkingRemote")
if pullFile.exists() and not checkingRemoteFile.exists():
pullCmd = ''+pullFile.getAbsolutePath()
self.printver("Going to run: "+pullCmd)
subprocess.call(pullCmd,shell=True)
remoteChecked = True
if remoteChecked:
self.printver("Waiting while remote simulations are checked...")
time.sleep(5)
self.updateSimsRunningR(False)
else:
self.printver("allRecentlyFinishedSims: "+str(self.allRecentlyFinishedSims))
self.printver("allFinishedSims: "+str(self.allFinishedSims))
self.printver("allRunningSims: "+str(self.allRunningSims))
def doCheckNumberSims(self):
self.printver("%i simulations out of max %s currently running: %s"%(len(self.allRunningSims), self.numConcurrentSims, str(self.allRunningSims)))
while (len(self.allRunningSims)>=self.numConcurrentSims):
self.printver("Waiting for another simulation slot to become available...")
time.sleep(4) # wait a while...
self.updateSimsRunning()
def reloadSims(self,
waitForAllSimsToFinish = True,
plotSims = True,
analyseSims = True,
plotVoltageOnly = False):
self.printver("Trying to reload simulations: "+str(self.allFinishedSims))
plottedSims = []
for simRef in self.allRecentlyFinishedSims:
simDir = File(self.project.getProjectMainDirectory(), "/simulations/"+simRef)
timeFile = File(simDir, "time.dat")
timeFile2 = File(simDir,"time.txt") # for PSICS...
if timeFile.exists() or timeFile2.exists():
self.printver("--- Reloading data from simulation in directory: %s"%simDir.getCanonicalPath())
time.sleep(1) # wait a while...
try:
simData = SimulationData(simDir)
simData.initialise()
times = simData.getAllTimes()
if plotSims:
simConfigName = simData.getSimulationProperties().getProperty("Sim Config")
if simConfigName.find('(')>=0:
simConfigName = simConfigName[0:simConfigName.find('(')]
for dataStore in simData.getAllLoadedDataStores():
ds = simData.getDataSet(dataStore.getCellSegRef(), dataStore.getVariable(), False)
#self.printver("Found data store: "+str(dataStore)+", plotting volts only: "+str(plotVoltageOnly))
if not plotVoltageOnly or dataStore.getVariable() == SimPlot.VOLTAGE:
plotFrame = PlotManager.getPlotterFrame("Behaviour of "+dataStore.getVariable() \
+" for sim config: %s"%(simConfigName))
plotFrame.addDataSet(ds)
if analyseSims:
volts = ds.getYValues()
analyseStartTime = 0
analyseStopTime = times[-1]
analyseThreshold = 0 # mV
spikeTimes = SpikeAnalyser.getSpikeTimes(volts, times, analyseThreshold, analyseStartTime, analyseStopTime)
self.printver("Spike times in %s for sim %s: %s"%(dataStore.getCellSegRef(), simRef, str(spikeTimes)), True)
plottedSims.append(simRef)
except:
self.printver("Error analysing simulation data from: %s"%simDir.getCanonicalPath(), True)
self.printver(sys.exc_info(), True)
for simRef in plottedSims:
self.allRecentlyFinishedSims.remove(simRef)
if waitForAllSimsToFinish and len(self.allRunningSims)>0:
self.printver("Waiting for sims: %s to finish..."%str(self.allRunningSims))
time.sleep(2) # wait a while...
self.updateSimsRunning()
self.reloadSims(waitForAllSimsToFinish,
plotSims,
analyseSims,
plotVoltageOnly)
def checkSims(self,
spikeTimesToCheck = {},
spikeTimeAccuracy = 0.01,
threshold = 0 ): # mV
self.updateSimsRunning()
self.printver( "Trying to check simulations: %s against: %s, with a threshold: %s" % (str(self.allFinishedSims), str(spikeTimesToCheck), str(threshold)))
report = ""
numPassed = 0
numFailed = 0
checksUnused = spikeTimesToCheck.keys()
for simRef in self.allFinishedSims:
simDir = File(self.project.getProjectMainDirectory(), "/simulations/"+simRef)
try:
simData = SimulationData(simDir)
simData.initialise()
times = simData.getAllTimes()
simConfigName = simData.getSimulationProperties().getProperty("Sim Config")
if simConfigName.find('(')>=0:
simConfigName = simConfigName[0:simConfigName.find('(')]
for dataStore in simData.getAllLoadedDataStores():
self.printver("Checking dataStore: "+str(dataStore)+" ("+dataStore.getCellSegRef()+")")
ds = simData.getDataSet(dataStore.getCellSegRef(), dataStore.getVariable(), False)
if dataStore.getVariable() == SimPlot.VOLTAGE:
if spikeTimesToCheck is not None:
volts = ds.getYValues()
analyseStartTime = 0
analyseStopTime = times[-1]
threshToUse = threshold
if type(threshold) is dict:
threshToUse = float(threshold[dataStore.getCellSegRef()])
spikeTimes = SpikeAnalyser.getSpikeTimes(volts, times, threshToUse, analyseStartTime, analyseStopTime)
self.printver("Spike times (crossing %f) from %f to %f in %s for sim %s: %s"%(threshToUse, analyseStartTime, analyseStopTime, dataStore.getCellSegRef(), simRef, str(spikeTimes)))
if spikeTimesToCheck.has_key(dataStore.getCellSegRef()):
self.printver("Removing %s from %s"%(str(dataStore.getCellSegRef()), str(checksUnused)))
if dataStore.getCellSegRef() in checksUnused:
checksUnused.remove(dataStore.getCellSegRef())
fail = False
spikeTimesTarget = spikeTimesToCheck[dataStore.getCellSegRef()]
if len(spikeTimes) != len(spikeTimesTarget):
report = report + "ERROR: Number of spikes of %s (%i) not same as target list for %s (%i)!\n"% \
(dataStore.getCellSegRef(), len(spikeTimes), simRef, len(spikeTimesTarget))
fail = True
for spikeNum in range(0, min(len(spikeTimesTarget),len(spikeTimes))):
delta = spikeTimesTarget[spikeNum] - spikeTimes[spikeNum]
if float(abs(delta)) > float(spikeTimeAccuracy):
report = report + "ERROR: Spike time: %f not within %f of %f (delta = %f) for %s in %s!\n" % \
(spikeTimes[spikeNum], spikeTimeAccuracy, spikeTimesTarget[spikeNum], delta, dataStore.getCellSegRef(), simRef)
fail = True
if fail:
numFailed=numFailed+1
else:
numPassed=numPassed+1
except:
self.printver("Error analysing simulation data from: %s"%simDir.getCanonicalPath())
raise
self.printver(sys.exc_info())
numFailed=numFailed+1
ignored = "" if len(checksUnused) == 0 else ", %i test conditions ignored"%(len(checksUnused))
report = report+"\n %i tests passed, %i tests failed%s!\n"%(numPassed, numFailed, ignored)
return report
def runMultipleSims(self,
simConfigs = ["Default Simulation Configuration"],
maxElecLens = [-1],
simDt = None,
simDtOverride = None,
simDuration = None,
neuroConstructSeed = 12345,
simulatorSeed = 11111,
simulators = ["NEURON", "GENESIS_PHYS"],
runSims = True,
generateSims = True,
verboseSims = True,
runInBackground = False,
varTimestepNeuron = None,
varTimestepTolerance = None,
simRefGlobalSuffix = '',
simRefGlobalPrefix = '',
mpiConfig = MpiSettings.LOCAL_SERIAL,
mpiConfigs = [],
suggestedRemoteRunTime = -1,
saveAsHdf5 = False,
saveOnlySpikes = False,
saveAllContinuous = False,
runMode = NeuronFileManager.RUN_HOC):
for sim in simulators:
if sim not in self.knownSimulators:
print "Unknown simulator: "+sim+"!"
print "Known simulators: "+str(self.knownSimulators)
sys.exit(1)
allSimsSetRunning = []
for simConfigName in simConfigs:
simConfig = self.project.simConfigInfo.getSimConfig(simConfigName)
self.printver("Going to generate network for Simulation Configuration: "+str(simConfig))
if saveOnlySpikes:
for simPlotName in simConfig.getPlots():
simPlot = self.project.simPlotInfo.getSimPlot(simPlotName)
if simPlot.getValuePlotted() == SimPlot.VOLTAGE:
simPlot.setValuePlotted(SimPlot.SPIKE)
if saveAllContinuous:
for simPlotName in simConfig.getPlots():
simPlot = self.project.simPlotInfo.getSimPlot(simPlotName)
#print simPlot
if SimPlot.SPIKE in simPlot.getValuePlotted():
simPlot.setValuePlotted(SimPlot.VOLTAGE)
#print simPlot
if len(mpiConfigs) == 0:
mpiConfigs = [mpiConfig]
for mpiConfigToUse in mpiConfigs:
mpiSettings = MpiSettings()
simConfig.setMpiConf(mpiSettings.getMpiConfiguration(mpiConfigToUse))
self.printver("Using Parallel Configuration: "+ str(simConfig.getMpiConf()))
if suggestedRemoteRunTime > 0:
self.project.neuronFileManager.setSuggestedRemoteRunTime(suggestedRemoteRunTime)
self.project.genesisFileManager.setSuggestedRemoteRunTime(suggestedRemoteRunTime)
for maxElecLen in maxElecLens:
if simDt is not None:
self.project.simulationParameters.setDt(simDt)
else:
simDt = self.project.simulationParameters.getDt() # for later if simDtOverride used...
if simDuration is not None:
simConfig.setSimDuration(simDuration)
recompSuffix = ""
if maxElecLen > 0:
cellGroup = simConfig.getCellGroups().get(0)
cell = self.project.cellManager.getCell(self.project.cellGroupsInfo.getCellType(cellGroup))
self.printver("Recompartmentalising cell in: "+cellGroup+" which is: "+str(cell))
info = CellTopologyHelper.recompartmentaliseCell(cell, maxElecLen, self.project)
self.printver("*** Recompartmentalised cell: "+info)
if len(maxElecLens) > 1 or maxElecLen > 0 : recompSuffix = "_"+str(maxElecLen)
self.projectManager.doGenerate(simConfig.getName(), neuroConstructSeed)
while self.projectManager.isGenerating():
self.printver("Waiting for the project to be generated with Simulation Configuration: "+str(simConfig))
time.sleep(15)
self.printver("Generated network with %i cell(s)" % self.project.generatedCellPositions.getNumberInAllCellGroups())
simRefPrefix = (simConfigName+"_").replace(' ', '').replace(':', '')
if len(mpiConfigs) > 1:
simRefPrefix = simRefPrefix+(mpiConfigToUse+"_").replace(' ', '').replace('(', '_').replace(')', '_')
self.doCheckNumberSims()
self.printver("Going to generate for simulators: "+str(simulators))
if simulators.count("NEURON")>0:
if simDtOverride is not None:
if simDtOverride.has_key("NEURON"):
self.project.simulationParameters.setDt(simDtOverride["NEURON"])
else:
self.project.simulationParameters.setDt(simDt)
simRef = simRefGlobalPrefix + simRefPrefix+"_N"+recompSuffix + simRefGlobalSuffix
self.project.simulationParameters.setReference(simRef)
if varTimestepNeuron is None:
varTimestepNeuron = self.project.neuronSettings.isVarTimeStep()
if varTimestepTolerance is None:
varTimestepTolerance = self.project.neuronSettings.getVarTimeAbsTolerance()
if generateSims or runSims:
func = generateAndRunNeuron if runSims else generateNeuron
print("Using function %s" % str(func))
success = func(self.project,
self.projectManager,
simConfig,
simRef,
simulatorSeed,
verbose= verboseSims,
runInBackground= runInBackground,
varTimestep= varTimestepNeuron,
varTimestepTolerance= varTimestepTolerance,
saveAsHdf5 = saveAsHdf5,
runMode = runMode)
if success and runSims:
self.allRunningSims.append(simRef)
allSimsSetRunning.append(simRef)
else:
allSimsSetRunning.append(simRef)
self.doCheckNumberSims()
if simulators.count("PSICS")>0:
if simDtOverride is not None:
if simDtOverride.has_key("PSICS"):
self.project.simulationParameters.setDt(simDtOverride["PSICS"])
else:
self.project.simulationParameters.setDt(simDt)
simRef = simRefGlobalPrefix + simRefPrefix+"_P"+recompSuffix + simRefGlobalSuffix
self.project.simulationParameters.setReference(simRef)
if runSims:
success = generateAndRunPsics(self.project,
self.projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=verboseSims,
runInBackground=runInBackground)
if success:
self.allRunningSims.append(simRef)
allSimsSetRunning.append(simRef)
else:
allSimsSetRunning.append(simRef)
self.doCheckNumberSims()
if simulators.count("LEMSalpha")>0:
if simDtOverride is not None:
if simDtOverride.has_key("LEMS"):
self.project.simulationParameters.setDt(simDtOverride["LEMS"])
else:
self.project.simulationParameters.setDt(simDt)
simRef = simRefGlobalPrefix + simRefPrefix+"_L"+recompSuffix + simRefGlobalSuffix
self.project.simulationParameters.setReference(simRef)
if runSims:
success = generateAndRunLems(self.project,
self.projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=verboseSims,
runInBackground=runInBackground,
version=NeuroMLConstants.NeuroMLVersion.NEUROML_VERSION_2_ALPHA)
if success:
self.allRunningSims.append(simRef)
allSimsSetRunning.append(simRef)
else:
allSimsSetRunning.append(simRef)
if simulators.count("LEMS")>0:
if simDtOverride is not None:
if simDtOverride.has_key("LEMS"):
self.project.simulationParameters.setDt(simDtOverride["LEMS"])
else:
self.project.simulationParameters.setDt(simDt)
simRef = simRefGlobalPrefix + simRefPrefix+"_L"+recompSuffix + simRefGlobalSuffix
self.project.simulationParameters.setReference(simRef)
if runSims:
success = generateAndRunLems(self.project,
self.projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=verboseSims,
runInBackground=runInBackground,
version=NeuroMLConstants.NeuroMLVersion.getLatestVersion())
if success:
self.allRunningSims.append(simRef)
allSimsSetRunning.append(simRef)
else:
allSimsSetRunning.append(simRef)
self.doCheckNumberSims()
for sim in simulators:
if "PYNN_" in sim:
if simDtOverride is not None:
if simDtOverride.has_key(sim):
self.project.simulationParameters.setDt(simDtOverride[sim])
else:
self.project.simulationParameters.setDt(simDt)
pynnSim = sim[5:]
simRef = simRefGlobalPrefix + simRefPrefix+"_Py_"+pynnSim+recompSuffix + simRefGlobalSuffix
self.project.simulationParameters.setReference(simRef)
if runSims:
success = generateAndRunPyNN(pynnSim,
self.project,
self.projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=verboseSims,
runInBackground=runInBackground)
if success:
self.allRunningSims.append(simRef)
allSimsSetRunning.append(simRef)
else:
allSimsSetRunning.append(simRef)
self.printver("Waiting a while before running next sim...")
time.sleep(2) # wait a while before running PyNN...
self.doCheckNumberSims()
for sim in simulators:
if "MOOSE" in sim:
if simDtOverride is not None:
if simDtOverride.has_key(sim):
self.project.simulationParameters.setDt(simDtOverride[sim])
else:
self.project.simulationParameters.setDt(simDt)
simRef = simRefGlobalPrefix + simRefPrefix+"_M"+recompSuffix + simRefGlobalSuffix
units = -1 # leave as what's set in project
if "_SI" in sim:
simRef = simRef+"_SI"
units = UnitConverter.GENESIS_SI_UNITS
if "_PHYS" in sim:
simRef = simRef+"_PHYS"
units = UnitConverter.GENESIS_PHYSIOLOGICAL_UNITS
self.project.simulationParameters.setReference(simRef)
if runSims:
success = generateAndRunMoose(self.project,
self.projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=verboseSims,
quitAfterRun=runInBackground,
runInBackground=runInBackground,
units=units)
if success:
self.allRunningSims.append(simRef)
allSimsSetRunning.append(simRef)
else:
allSimsSetRunning.append(simRef)
time.sleep(1) # wait a while before running GENESIS...
self.doCheckNumberSims()
for sim in simulators:
if "GENESIS" in sim:
if simDtOverride is not None:
if simDtOverride.has_key(sim):
self.project.simulationParameters.setDt(simDtOverride[sim])
else:
self.project.simulationParameters.setDt(simDt)
simRef = simRefGlobalPrefix + simRefPrefix+"_G"+recompSuffix + simRefGlobalSuffix
units = -1 # leave as what's set in project
if "_SI" in sim:
simRef = simRef+"_SI"
units = UnitConverter.GENESIS_SI_UNITS
if "_PHYS" in sim:
simRef = simRef+"_PHYS"
units = UnitConverter.GENESIS_PHYSIOLOGICAL_UNITS
self.project.simulationParameters.setReference(simRef)
if runSims:
success = generateAndRunGenesis(self.project,
self.projectManager,
simConfig,
simRef,
simulatorSeed,
verbose=verboseSims,
quitAfterRun=runInBackground,
runInBackground=runInBackground,
units=units,
symmetricComps=None)
if success:
self.allRunningSims.append(simRef)
allSimsSetRunning.append(simRef)
else:
allSimsSetRunning.append(simRef)
time.sleep(1) # wait a while before running GENESISsym...
self.doCheckNumberSims()
if simulators.count("GENESISsym")>0:
simRef = simRefGlobalPrefix + simRefPrefix+"_Gs"+recompSuffix + simRefGlobalSuffix
self.project.simulationParameters.setReference(simRef)
if runSims:
success = generateAndRunGenesis(self.project,
self.projectManagerm,
simConfig,
simRef,
simulatorSeed,
verbose=verboseSims,
quitAfterRun=runInBackground,
runInBackground=runInBackground,
symmetricComps=True)
if success:
self.allRunningSims.append(simRef)
allSimsSetRunning.append(simRef)
else:
allSimsSetRunning.append(simRef)
self.updateSimsRunningR(False)
self.printver("Finished setting running all simulations for ParallelConfig: "+mpiConfigToUse)
self.printver("Finished setting running all simulations for sim config: "+simConfigName)
return allSimsSetRunning
def generateFICurve(self,
simulator,
simConfigName,
stimAmpLow,
stimAmpInc,
stimAmpHigh,
stimDel,
stimDur,
simDuration,
analyseStartTime,
analyseStopTime,
analyseThreshold,
simDt = None,
simPrefix = 'FI_',
neuroConstructSeed = 1234,
plotAllTraces = False,
verboseSims = True,
varTimestepNeuron = None,
mpiConfig = MpiSettings.LOCAL_SERIAL,
suggestedRemoteRunTime = -1):
simConfig = self.project.simConfigInfo.getSimConfig(simConfigName)
self.printver("Going to generate F-I curve on %s for sim config: %s with amplitude of stim: (%f -> %f ; %f)" % (simulator, simConfigName, stimAmpLow, stimAmpHigh, stimAmpInc))
if simConfig == None:
raise NameError('No such Simulation configuration as: '+ simConfigName+'. \nExisting sim configs: '+str(self.project.simConfigInfo.getAllSimConfigNames()))
simConfig.setSimDuration(simDuration)
self.projectManager.doGenerate(simConfig.getName(), neuroConstructSeed)
while self.projectManager.isGenerating():
self.printver("Waiting for the project to be generated with Simulation Configuration: "+str(simConfig))
time.sleep(1)
numGenerated = self.project.generatedCellPositions.getNumberInAllCellGroups()
self.printver("Number of cells generated: " + str(numGenerated))
if numGenerated > 0:
self.printver("Generating scripts for simulator: %s..."%simulator)
if simulator == 'NEURON':
self.project.neuronFileManager.setQuitAfterRun(1) # Remove this line to leave the NEURON sim windows open after finishing
self.project.neuronSettings.setCopySimFiles(1) # 1 copies hoc/mod files to PySim_0 etc. and will allow multiple sims to run at once
self.project.neuronSettings.setGraphicsMode(0) # 0 hides graphs during execution
if simulator.count('GENESIS')>0 or simulator.count('MOOSE')>0:
self.project.genesisFileManager.setQuitAfterRun(1) # Remove this line to leave the NEURON sim windows open after finishing
self.project.genesisSettings.setCopySimFiles(1) # 1 copies hoc/mod files to PySim_0 etc. and will allow multiple sims to run at once
self.project.genesisSettings.setGraphicsMode(0) # 0 hides graphs during execution
stimAmp = stimAmpLow
simRefsVsStims = {}
while (stimAmp - stimAmpHigh) < (stimAmpInc/1e9): # to avoid floating point errors
######## Adjusting the amplitude of the current clamp ###############
stim = self.project.elecInputInfo.getStim(simConfig.getInputs().get(0))
if stim.getElectricalInput().getType() != "IClamp":
raise Exception('Simulation config: '+ simConfigName+' has a non IClamp input: '+str(stim)+'!')
if simConfig.getInputs()>1:
for stimIndex in range(1, simConfig.getInputs().size()):
stimOther = self.project.elecInputInfo.getStim(simConfig.getInputs().get(stimIndex))
if stimOther.getElectricalInput().getType() != "IClamp":
raise Exception('Simulation config: '+ simConfigName+' has a non IClamp input: '+str(stimOther)+'!')
else:
stimOther.setAmp(NumberGenerator(0))
stimOther.setDel(NumberGenerator(0))
stimOther.setDur(NumberGenerator(0))
stim.setAmp(NumberGenerator(stimAmp))
stim.setDel(NumberGenerator(stimDel))
stim.setDur(NumberGenerator(stimDur))
self.project.elecInputInfo.updateStim(stim)
self.printver("Next stim: "+ str(stim))
simRefs = self.runMultipleSims(simConfigs = [simConfig.getName()],
simulators = [simulator],
simDt = simDt,
verboseSims = verboseSims,
runInBackground = True,
simRefGlobalPrefix = simPrefix,
simRefGlobalSuffix = ("_"+str(float(stimAmp))),
varTimestepNeuron = varTimestepNeuron,
mpiConfig = mpiConfig,
suggestedRemoteRunTime = suggestedRemoteRunTime)
simRefsVsStims[simRefs[0]] = stimAmp # should be just one simRef returned...
stimAmp = stimAmp + stimAmpInc
if abs(stimAmp) < stimAmpInc/1e9: stimAmp = 0
while (len(self.allRunningSims)>0):
self.printver("Waiting for all simulations to finish...")
time.sleep(1) # wait a while...
self.updateSimsRunning()
self.printver("Going to plot traces from recorded sims: %s"%str(simRefsVsStims))
plotFrameFI = PlotManager.getPlotterFrame("F-I curve from project: "+str(self.project.getProjectFile())+" on "+simulator , 0, 1)
plotFrameVolts = PlotManager.getPlotterFrame("Voltage traces from project: "+str(self.project.getProjectFile())+" on "+simulator , 0, plotAllTraces)
plotFrameFI.setViewMode(PlotCanvas.INCLUDE_ORIGIN_VIEW)
info = "F-I curve for Simulation Configuration: "+str(simConfig)
dataSet = DataSet(info, info, "nA", "Hz", "Current injected", "Firing frequency")
dataSet.setGraphFormat(PlotCanvas.USE_CIRCLES_FOR_PLOT)
simList = simRefsVsStims.keys()
simList.sort()
for sim in simList:
simDir = File(self.project.getProjectMainDirectory(), "/simulations/"+sim)
self.printver("--- Reloading data from simulation in directory: %s"%simDir.getCanonicalPath())
try:
simData = SimulationData(simDir)
simData.initialise()
self.printver("Data loaded: ")
self.printver(simData.getAllLoadedDataStores())
times = simData.getAllTimes()
cellSegmentRef = simConfig.getCellGroups().get(0)+"_0"
volts = simData.getVoltageAtAllTimes(cellSegmentRef)
traceInfo = "Voltage at: %s in simulation: %s"%(cellSegmentRef, sim)
dataSetV = DataSet(traceInfo, traceInfo, "mV", "ms", "Membrane potential", "Time")
for i in range(len(times)):
dataSetV.addPoint(times[i], volts[i])
if plotAllTraces:
plotFrameVolts.addDataSet(dataSetV)
spikeTimes = SpikeAnalyser.getSpikeTimes(volts, times, analyseThreshold, analyseStartTime, analyseStopTime)
stimAmp = simRefsVsStims[sim]
self.printver("Number of spikes at %f nA in sim %s: %i"%(stimAmp, sim, len(spikeTimes)))
avgFreq = 0
if len(spikeTimes)>1:
avgFreq = len(spikeTimes)/ ((analyseStopTime - analyseStartTime)/1000.0)
dataSet.addPoint(stimAmp,avgFreq)
else:
dataSet.addPoint(stimAmp,0)
except:
self.printver("Error analysing simulation data from: %s"%simDir.getCanonicalPath())
self.printver(sys.exc_info()[0])
plotFrameFI.addDataSet(dataSet)
def generateBatchCurve(self,
simulator,
simConfigName,
stimAmpLow,
stimAmpInc,
stimAmpHigh,
stimDel,
stimDur,
simDuration,
analyseStartTime,
analyseStopTime,
analyseThreshold,
simDt = None,
simPrefix = 'FI_',
neuroConstructSeed = 1234,
plotAllTraces = False,
verboseSims = True,
varTimestepNeuron = None,
mpiConfig = MpiSettings.LOCAL_SERIAL,
suggestedRemoteRunTime = -1,
curveType = 'F-I'):
simConfig = self.project.simConfigInfo.getSimConfig(simConfigName)
self.printver("Going to generate %s curve on %s for sim config: %s with amplitude of stim: (%f -> %f ; %f)" % (curveType, simulator, simConfigName, stimAmpLow, stimAmpHigh, stimAmpInc))
# can generate differetn categories of simulationType F-I also SS-I
if simConfig == None:
raise NameError('No such Simulation configuration as: '+ simConfigName+'. \nExisting sim configs: '+str(self.project.simConfigInfo.getAllSimConfigNames()))
simConfig.setSimDuration(simDuration)
self.projectManager.doGenerate(simConfig.getName(), neuroConstructSeed)
while self.projectManager.isGenerating():
self.printver("Waiting for the project to be generated with Simulation Configuration: "+str(simConfig))
time.sleep(1)
numGenerated = self.project.generatedCellPositions.getNumberInAllCellGroups()
self.printver("Number of cells generated: " + str(numGenerated))
if numGenerated > 0:
self.printver("Generating scripts for simulator: %s..."%simulator)
if simulator == 'NEURON':
self.project.neuronFileManager.setQuitAfterRun(1) # Remove this line to leave the NEURON sim windows open after finishing
self.project.neuronSettings.setCopySimFiles(1) # 1 copies hoc/mod files to PySim_0 etc. and will allow multiple sims to run at once
self.project.neuronSettings.setGraphicsMode(0) # 0 hides graphs during execution
if simulator.count('GENESIS')>0 or simulator.count('MOOSE')>0:
self.project.genesisFileManager.setQuitAfterRun(1) # Remove this line to leave the NEURON sim windows open after finishing
self.project.genesisSettings.setCopySimFiles(1) # 1 copies hoc/mod files to PySim_0 etc. and will allow multiple sims to run at once
self.project.genesisSettings.setGraphicsMode(0) # 0 hides graphs during execution
stimAmp = stimAmpLow
simRefsVsStims = {}
while (stimAmp - stimAmpHigh) < (stimAmpInc/1e9): # to avoid floating point errors
######## Adjusting the amplitude of the current clamp ###############
stim = self.project.elecInputInfo.getStim(simConfig.getInputs().get(0))
if stim.getElectricalInput().getType() != "IClamp":
raise Exception('Simulation config: '+ simConfigName+' has a non IClamp input: '+str(stim)+'!')
if simConfig.getInputs()>1:
for stimIndex in range(1, simConfig.getInputs().size()):
stimOther = self.project.elecInputInfo.getStim(simConfig.getInputs().get(stimIndex))
if stimOther.getElectricalInput().getType() != "IClamp":
raise Exception('Simulation config: '+ simConfigName+' has a non IClamp input: '+str(stimOther)+'!')
else:
stimOther.setAmp(NumberGenerator(0))
stimOther.setDel(NumberGenerator(0))
stimOther.setDur(NumberGenerator(0))
stim.setAmp(NumberGenerator(stimAmp))
stim.setDel(NumberGenerator(stimDel))
stim.setDur(NumberGenerator(stimDur))
self.project.elecInputInfo.updateStim(stim)
self.printver("Next stim: "+ str(stim))
simRefs = self.runMultipleSims(simConfigs = [simConfig.getName()],
simulators = [simulator],
simDt = simDt,
verboseSims = verboseSims,
runInBackground = True,
simRefGlobalPrefix = simPrefix,
simRefGlobalSuffix = ("_"+str(float(stimAmp))),
varTimestepNeuron = varTimestepNeuron,
mpiConfig = mpiConfig,
suggestedRemoteRunTime = suggestedRemoteRunTime)
simRefsVsStims[simRefs[0]] = stimAmp # should be just one simRef returned...
stimAmp = stimAmp + stimAmpInc
if abs(stimAmp) < stimAmpInc/1e9: stimAmp = 0
while (len(self.allRunningSims)>0):
self.printver("Waiting for all simulations to finish...")
time.sleep(1) # wait a while...
self.updateSimsRunning()
self.generatePlotAnalisys(simulator,simConfigName,analyseStartTime,analyseStopTime,analyseThreshold,plotAllTraces,curveType,simRefsVsStims)
def generatePlotAnalisys(self,
simulator,
simConfigName,
analyseStartTime,
analyseStopTime,
analyseThreshold,
plotAllTraces,
curveType,
simRefsVsStims):
simConfig = self.project.simConfigInfo.getSimConfig(simConfigName)
self.printver("Going to plot traces from recorded sims: %s"%str(simRefsVsStims))
self.plotFrames[curveType] = PlotManager.getPlotterFrame(curveType+" curve from project: "+str(self.project.getProjectFile())+" on "+simulator , 0, 1)
self.plotFrames["Volts"] = PlotManager.getPlotterFrame("Voltage traces from project: "+str(self.project.getProjectFile())+" on "+simulator , 0, plotAllTraces)
self.plotFrames[curveType].setViewMode(PlotCanvas.INCLUDE_ORIGIN_VIEW)
info = curveType+" curve for Simulation Configuration: "+str(simConfig)
if (curveType == "F-I") :
self.dataSets[curveType] = DataSet(info, info, "nA", "Hz", "Current injected", "Firing frequency")
elif (curveType == "SS-I") :
self.dataSets[curveType] = DataSet(info, info, "nA", "V", "Current injected", "Steady state Voltage")
self.dataSets[curveType].setGraphFormat(PlotCanvas.USE_CIRCLES_FOR_PLOT)
simList = simRefsVsStims.keys()
simList.sort()
for sim in simList:
simDir = File(self.project.getProjectMainDirectory(), "/simulations/"+sim)
self.printver("--- Reloading data from simulation in directory: %s"%simDir.getCanonicalPath())
try:
simData = SimulationData(simDir)
simData.initialise()
self.printver("Data loaded: ")
self.printver(simData.getAllLoadedDataStores())
times = simData.getAllTimes()
cellSegmentRef = simConfig.getCellGroups().get(0)+"_0"
volts = simData.getVoltageAtAllTimes(cellSegmentRef)
traceInfo = "Voltage at: %s in simulation: %s"%(cellSegmentRef, sim)
self.dataSets["V"] = DataSet(traceInfo, traceInfo, "mV", "ms", "Membrane potential", "Time")
for i in range(len(times)):
self.dataSets["V"].addPoint(times[i], volts[i])
if plotAllTraces:
self.plotFrames["V"].addDataSet(self.dataSets["V"])
if (curveType == "F-I") :
spikeTimes = SpikeAnalyser.getSpikeTimes(volts, times, analyseThreshold, analyseStartTime, analyseStopTime)
stimAmp = simRefsVsStims[sim]
self.printver("Number of spikes at %f nA in sim %s: %i"%(stimAmp, sim, len(spikeTimes)))
avgFreq = 0
if len(spikeTimes)>1:
avgFreq = len(spikeTimes)/ ((analyseStopTime - analyseStartTime)/1000.0)
self.dataSets["F-I"].addPoint(stimAmp,avgFreq)
else:
self.dataSets["F-I"].addPoint(stimAmp,0)
elif (curveType == "SS-I") :
# check within analyseStartTime and analyseStopTime if we deviate by more than +/- analyseThreshold
steadyStateVoltageFound = False
stimAmp = simRefsVsStims[sim]
minVolt = 99999999
maxVolt = -99999999
for i in range(len(volts)) :
if times[i] >= analyseStartTime and times[i] <= analyseStopTime :
if steadyStateVoltageFound == False:
self.printver("Data start time found for SS-I")
minVolt = volts[i]
maxVolt = volts[i]
self.printver(" i:", i, " times_i:",times[i]," minVolt:",minVolt," maxVolt:",maxVolt," delta:",maxVolt - minVolt," threshold:",analyseThreshold)
steadyStateVoltageFound = True
if volts[i] < minVolt :
minVolt = volts[i]
elif volts[i] > maxVolt :
maxVolt = volts[i]
if (maxVolt - minVolt) > analyseThreshold :
self.printver("Data outside the threshold for steady state voltage, Error")
self.printver(" i:", i, " times_i:",times[i]," minVolt:",minVolt," maxVolt:",maxVolt," delta:",maxVolt - minVolt," threshold:",analyseThreshold)
steadyStateVoltageFound = False
break
if (steadyStateVoltageFound) :
midVoltage = (minVolt + maxVolt) / 2
self.dataSets["SS-I"].addPoint(stimAmp,midVoltage)
except:
self.printver("Error analysing simulation data from: %s"%simDir.getCanonicalPath())
self.printver(sys.exc_info()[0])
self.plotFrames[curveType].addDataSet(self.dataSets[curveType])
| rgerkin/neuroConstruct | pythonNeuroML/nCUtils/ncutils.py | Python | gpl-2.0 | 66,510 |
#!/usr/bin/python
#
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import optparse
import os
import shutil
import subprocess
import sys
import tarfile
import tempfile
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
DEFAULT_VERSION = "0.9.6"
DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
def _python_cmd(*args):
"""run cmd in python."""
args = (sys.executable,) + args
return subprocess.call(args) == 0
def _install(tarball, install_args=()):
"""install tarball."""
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Setuptools')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
# exitcode will be 2
return 2
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
def _build_egg(egg, tarball, to_dir):
"""build egg."""
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Setuptools egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def _do_download(version, download_base, to_dir, download_delay):
"""download package."""
egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, tarball, to_dir)
sys.path.insert(0, egg)
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15):
"""use setuptools to do the setup."""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
was_imported = 'pkg_resources' in sys.modules or \
'setuptools' in sys.modules
try:
import pkg_resources
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("setuptools>=" + version)
return
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir,
download_delay)
except pkg_resources.VersionConflict:
error = sys.exc_info()[1]
if was_imported:
sys.stderr.writelines([
"The required version of setuptools (>=%s) is not available,",
"and can't be installed while this script is running. Please",
"install a more recent version first, using",
"'easy_install -U setuptools'.",
"",
"(Currently using %r)" % (version, error.args[0]),
"",
])
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return _do_download(version, download_base, to_dir,
download_delay)
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
tgz_name = "setuptools-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
log.warn("Downloading %s", url)
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(saveto, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
return os.path.realpath(saveto)
def _extractall(self, path=".", members=None):
"""Extract all members.
From the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
"""sort dir"""
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
error = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % error)
def _build_install_args(options):
"""Build install args
Build the arguments to 'python setup.py install' on the setuptools package
"""
install_args = []
if options.user_install:
if sys.version_info < (2, 6):
log.warn("--user requires Python 2.6 or later")
raise SystemExit(1)
install_args.append('--user')
return install_args
def _parse_args():
"""Parse the command line for options."""
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package (requires Python 2.6 or later)')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the setuptools package')
options, _ = parser.parse_args()
# positional arguments are ignored
return options
def main(version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall."""
options = _parse_args()
tarball = download_setuptools(download_base=options.download_base)
return _install(tarball, _build_install_args(options))
if __name__ == '__main__':
sys.exit(main())
| baigk/compass-core | ez_setup.py | Python | apache-2.0 | 9,482 |
# Copyright 2011 The greplin-exception-catcher Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for logging exceptions to files suitable for sending to gec."""
import json
import os
import os.path
import uuid
import logging
import time
import random
class GecHandler(logging.Handler):
"""Log observer that writes exceptions to json files to be picked up by upload.py."""
def __init__(self, path, project, environment, serverName, prepareMessage=None):
self.__path = path
self.__project = project
self.__environment = environment
self.__serverName = serverName
self.__prepareMessage = prepareMessage
logging.Handler.__init__(self)
def emit(self, item):
"""Emit an error from the given event, if it was an error event."""
if item.exc_info:
formatted = self.formatException(item)
else:
formatted = self.formatLogMessage(item)
result = {
'project': self.__project,
'environment': self.__environment,
'serverName': self.__serverName,
'errorLevel': item.levelname,
}
result.update(formatted)
self.write(json.dumps(result))
def write(self, output):
"""Write an exception to disk, possibly overwriting a previous one"""
filename = os.path.join(self.__path, str(uuid.uuid4()) + '.gec.json')
if not os.path.exists(filename):
with open(filename, 'w') as f:
f.write(output)
def formatLogMessage(self, item):
"""Format a log message that got triggered without an exception"""
try:
itemMessage = item.getMessage()
except TypeError:
itemMessage = 'Error formatting message'
log = {
'type': "%s message" % item.levelname,
'message': itemMessage,
'backtrace': "%s:%d at %s" % (item.module, item.lineno, item.pathname)
}
if self.__prepareMessage:
return self.__prepareMessage(log)
return log
def formatException(self, item):
"""Format an exception"""
exception = {
'type': item.exc_info[0].__module__ + '.' + item.exc_info[0].__name__,
'message': str(item.exc_info[1]),
'logMessage': getattr(item, 'message', None) or getattr(item, 'msg', None),
'backtrace': item.exc_text,
'loggedFrom': "%s:%d at %s" % (item.module, item.lineno, item.pathname)
}
if self.__prepareMessage:
return self.__prepareMessage(exception)
return exception
def stop(self):
"""Stop observing log events."""
logging.getLogger().removeHandler(self)
class GentleGecHandler(GecHandler):
"""A GEC Handler that conserves disk space by overwriting errors
"""
MAX_BASENAME = 10
MAX_ERRORS = 10000
def __init__(self, path, project, environment, serverName, prepareException=None):
GecHandler.__init__(self, path, project, environment, serverName, prepareException)
self.baseName = random.randint(0, GentleGecHandler.MAX_BASENAME)
self.errorId = random.randint(0, GentleGecHandler.MAX_ERRORS)
def write(self, output):
self.errorId = (self.errorId + 1) % GentleGecHandler.MAX_ERRORS
filename = os.path.join(self._GecHandler__path, '%d-%d.gec.json' % (self.baseName, self.errorId))
with open(filename, 'w') as f:
f.write(output)
class SpaceAwareGecHandler(GecHandler):
"""A gec log handler that will stop logging when free disk space / inodes become too low
"""
SPACE_CHECK_COUNTER_MAX = 128
LIMITED_LOGGING_PC = 0.3
NO_LOGGING_PC = 0.1
def __init__(self, path, project, environment, serverName, prepareException=None):
GecHandler.__init__(self, path, project, environment, serverName, prepareException)
self.spaceCheckCounter = 0
self.lastStatus = True
def logDiskSpaceError(self):
"""Log an error message complaining about low disk space (instead of the original message)
"""
noSpaceLeft = {'created': time.time(),
'process': os.getpid(),
'module': 'greplin.gec.logging.logHandler',
'levelno': 40,
'exc_text': None,
'lineno': 113,
'msg': 'Not enough free blocks/inodes on this disk',
'exc_info': None,
'funcName': 'checkSpace',
'levelname': 'ERROR'}
GecHandler.emit(self, logging.makeLogRecord(noSpaceLeft))
def doCheckSpace(self):
"""Check blocks/inodes and log an error if we're too low on either
"""
vfsStat = os.statvfs(self._GecHandler__path)
blocksLeft = float(vfsStat.f_bavail) / vfsStat.f_blocks
inodesLeft = float(vfsStat.f_favail) / vfsStat.f_files
if blocksLeft < self.LIMITED_LOGGING_PC or inodesLeft < self.LIMITED_LOGGING_PC:
self.lastStatus = False
if blocksLeft > self.NO_LOGGING_PC or inodesLeft > self.NO_LOGGING_PC:
self.logDiskSpaceError()
else:
self.lastStatus = True
def checkSpace(self):
"""Runs the actual disk space check only on every SPACE_CHECK_COUNTER_MAX calls
"""
self.spaceCheckCounter -= 1
if self.spaceCheckCounter < 0:
self.spaceCheckCounter = GentleGecHandler.SPACE_CHECK_COUNTER_MAX
self.doCheckSpace()
return self.lastStatus
def emit(self, item):
"""Log a message if we have enough disk resources.
"""
if self.checkSpace():
GecHandler.emit(self, item)
| Cue/greplin-exception-catcher | python/logging/greplin/gec/logHandler.py | Python | apache-2.0 | 5,815 |
from ... import BaseProvider
class Provider(BaseProvider):
"""
Provider for Philippine IDs that are related to social security
There is no unified social security program in the Philippines. Instead, the Philippines has a messy collection of
social programs and IDs that, when put together, serves as an analogue of other countries' social security program.
The government agencies responsible for these programs have relatively poor/outdated information and documentation
on their respective websites, so the sources section include third party "unofficial" information.
- Social Security System (SSS) - Social insurance program for workers in private, professional, and informal sectors
- Government Service Insurance System (GSIS) - Social insurance program for government employees
- Home Development Mutual Fund (popularly known as Pag-IBIG) - Socialized financial assistance and loaning program
- Philippine Health Insurance Corporation (PhilHealth) - Social insurance program for health care
- Unified Multi-Purpose ID (UMID) - Identity card with common reference number (CRN) that serves as a link to
the four previous programs and was planned to supersede the previous IDs, but
its future is now uncertain because of the upcoming national ID system
Sources:
- https://www.sss.gov.ph/sss/DownloadContent?fileName=SSSForms_UMID_Application.pdf
- https://www.gsis.gov.ph/active-members/benefits/ecard-plus/
- https://www.pagibigfund.gov.ph/DLForms/providentrelated/PFF039_MembersDataForm_V07.pdf
- https://filipiknow.net/is-umid-and-sss-id-the-same/
- https://filipiknow.net/philhealth-number/
- https://en.wikipedia.org/wiki/Unified_Multi-Purpose_ID
"""
sss_formats = ('##-#######-#',)
gsis_formats = ('###########',)
philhealth_formats = ('##-#########-#',)
pagibig_formats = ('####-####-####',)
umid_formats = ('####-#######-#',)
def sss(self):
return self.numerify(self.random_element(self.sss_formats))
def gsis(self):
return self.numerify(self.random_element(self.gsis_formats))
def pagibig(self):
return self.numerify(self.random_element(self.pagibig_formats))
def philhealth(self):
return self.numerify(self.random_element(self.philhealth_formats))
def umid(self):
return self.numerify(self.random_element(self.umid_formats))
def ssn(self):
# Use UMID as SSN in the interim till its deprecation
return self.umid()
| danhuss/faker | faker/providers/ssn/en_PH/__init__.py | Python | mit | 2,596 |
import sys
import time
import logging
from decimal import *
logger = logging.getLogger("faircoins")
from django.conf import settings
from django.db.models import Q
import faircoin_nrp.electrum_fair_nrp as efn
from django_rea.valueaccounting.models import EconomicAgent, EconomicEvent, EconomicResource
from django_rea.valueaccounting.lockfile import FileLock, AlreadyLocked, LockTimeout, LockFailed
#FAIRCOIN_DIVISOR = int(1000000)
def init_electrum_fair():
#import pdb; pdb.set_trace()
try:
assert(efn.daemon_is_up())
except:
#handle failure better here
msg = "Can not init Electrum Network. Exiting."
assert False, msg
def acquire_lock():
lock = FileLock("broadcast-faircoins")
logger.debug("acquiring lock...")
try:
#lock.acquire(settings.BROADCAST_FAIRCOINS_LOCK_WAIT_TIMEOUT)
lock.acquire(1)
except AlreadyLocked:
logger.warning("lock already in place. quitting.")
return False
except LockTimeout:
logger.warning("waiting for the lock timed out. quitting.")
return False
logger.debug("lock acquired.")
return lock
def create_address_for_agent(agent):
#import pdb; pdb.set_trace()
address = None
try:
address = efn.new_fair_address(
entity_id = agent.nick,
entity = agent.agent_type.name,
)
except Exception:
_, e, _ = sys.exc_info()
logger.critical("an exception occurred in creating a FairCoin address: {0}".format(e))
return address
def create_address_for_resource(resource):
agent = resource.owner()
address = create_address_for_agent(agent)
if address:
resource.digital_currency_address = address
resource.save()
return True
else:
msg = " ".join(["Failed to get a FairCoin address for", agent.name])
logger.warning(msg)
return False
def create_requested_addresses():
try:
requests = EconomicResource.objects.filter(
digital_currency_address="address_requested")
msg = " ".join(["new FairCoin address requests count:", str(requests.count())])
logger.debug(msg)
except Exception:
_, e, _ = sys.exc_info()
logger.critical("an exception occurred in retrieving FairCoin address requests: {0}".format(e))
return "failed to get FairCoin address requests"
if requests:
init_electrum_fair()
logger.debug("broadcast_tx ready to process FairCoin address requests")
for resource in requests:
result = create_address_for_resource(resource)
msg = " ".join(["created", str(requests.count()), "new faircoin addresses."])
else:
msg = "No new faircoin address requests to process."
return msg
def broadcast_tx():
#import pdb; pdb.set_trace()
try:
events = EconomicEvent.objects.filter(
digital_currency_tx_state="new").order_by('pk')
events = events.filter(
Q(event_type__name='Give')|Q(event_type__name='Distribution'))
msg = " ".join(["new FairCoin event count:", str(events.count())])
logger.debug(msg)
except Exception:
_, e, _ = sys.exc_info()
logger.critical("an exception occurred in retrieving events: {0}".format(e))
logger.warning("releasing lock because of error...")
lock.release()
logger.debug("released.")
return "failed to get events"
try:
#import pdb; pdb.set_trace()
successful_events = 0
failed_events = 0
if events:
init_electrum_fair()
logger.critical("broadcast_tx ready to process events")
for event in events:
#do we need to check for missing digital_currency_address here?
#and create them?
#fee = efn.network_fee() # In Satoshis
#fee = Decimal("%s" %fee) / FAIRCOIN_DIVISOR
if event.resource:
if event.event_type.name=="Give":
address_origin = event.resource.digital_currency_address
address_end = event.event_reference
elif event.event_type.name=="Distribution":
address_origin = event.from_agent.faircoin_address()
address_end = event.resource.digital_currency_address
amount = float(event.quantity) * 1.e6 # In satoshis
if amount < 1001:
event.digital_currency_tx_state = "broadcast"
event.digital_currency_tx_hash = "Null"
event.save()
continue
logger.critical("about to make_transaction_from_address. Amount: %d" %(int(amount)))
#import pdb; pdb.set_trace()
tx_hash = None
try:
tx_hash = efn.make_transaction_from_address(address_origin, address_end, int(amount))
except Exception:
_, e, _ = sys.exc_info()
logger.critical("an exception occurred in make_transaction_from_address: {0}".format(e))
if (tx_hash == "ERROR") or (not tx_hash):
logger.warning("ERROR tx_hash, make tx failed without raising Exception")
failed_events += 1
elif tx_hash:
successful_events += 1
event.digital_currency_tx_state = "broadcast"
event.digital_currency_tx_hash = tx_hash
event.save()
transfer = event.transfer
if transfer:
revent = transfer.receive_event()
if revent:
revent.digital_currency_tx_state = "broadcast"
revent.digital_currency_tx_hash = tx_hash
revent.save()
msg = " ".join([ "**** sent tx", tx_hash, "amount", str(amount), "from", address_origin, "to", address_end ])
logger.debug(msg)
except Exception:
_, e, _ = sys.exc_info()
logger.critical("an exception occurred in processing events: {0}".format(e))
"""
logger.warning("releasing lock because of error...")
lock.release()
logger.debug("released.")
"""
return "failed to process events"
"""
logger.debug("releasing lock normally...")
lock.release()
logger.debug("released.")
"""
if events:
msg = " ".join(["Broadcast", str(successful_events), "new faircoin tx."])
if failed_events:
msg += " ".join([ str(failed_events), "events failed."])
else:
msg = "No new faircoin tx to process."
return msg
| django-rea/nrp | django_rea/valueaccounting/process_faircoin_requests.py | Python | agpl-3.0 | 6,876 |
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from pressgang.core.exceptions import PressGangConfigurationError
# The prefix that a user must use in their settings.py file before any variables
# that directly control a PressGang setting
_SETTINGS_PREFIX = "PRESSGANG_"
def _get_app_setting(setting):
"""Get the value of the requested application setting.
This searches first in the user's settings.py file, looking for the requested
setting name prefixed with the app identifier. If the value is not found,
the passed default is used.
Arguments:
setting -- the name of the setting
Returns: the value of the application setting
"""
return getattr(settings, "%s%s" % (_SETTINGS_PREFIX, setting), None)
def _setting_name(setting):
"""Return the setting name as it would appear in a user's settings.py file.
Returns: a string of the setting name as it would be in a settings.py file.
"""
return "settings.%s%s" % (_SETTINGS_PREFIX, setting)
# The absolute path to the directory that will contain per-blog Apache config files
APACHE_CONFIGS_DIR = _get_app_setting('APACHE_CONFIGS_DIR')
if not APACHE_CONFIGS_DIR:
raise PressGangConfigurationError(_("You must provide the absolute path to the directory that will contain per-blog Apache configuration files via %(var)s.") % {'var': _setting_name('APACHE_CONFIGS_DIR')})
# The root directory from which Apache serves files
APACHE_DOCUMENT_ROOT = _get_app_setting('APACHE_DOCUMENT_ROOT')
if not APACHE_DOCUMENT_ROOT:
raise PressGangConfigurationError(_("You must provide the value of Apache's DocumentRoot via %(var)s.") % {'var': _setting_name('APACHE_DOCUMENT_ROOT')})
# The directory used for storing blog backups
BACKUPS_DIR = _get_app_setting('BACKUPS_DIR')
if not BACKUPS_DIR:
raise PressGangConfigurationError(_("You must provide the absolute path to the directory that will contain blog backups via %(var)s.") % {'var': _setting_name('BACKUPS_DIR')})
# The password to go with the DB_ADMIN_USER admin username
DB_ADMIN_PASSWORD = _get_app_setting('DB_ADMIN_PASSWORD')
if not DB_ADMIN_PASSWORD:
raise PressGangConfigurationError(_("You must provide the password of the MySQL admin user via %(var)s.") % {'var': _setting_name('DB_ADMIN_PASSWORD')})
# The username of a MySQL admin user
DB_ADMIN_USER = _get_app_setting('DB_ADMIN_USER')
if not DB_ADMIN_USER:
raise PressGangConfigurationError(_("You must provide the username of the MySQL admin user via %(var)s.") % {'var': _setting_name('DB_ADMIN_USER')})
# Installation packages that can be extended by the user
INSTALLERS = [
'pressgang.actions.install.installers.wp3',
'pressgang.actions.install.installers.wp3ms'
]
INSTALLERS.extend(_get_app_setting('INSTALLERS') or [])
# The full path to the mysql executable
MYSQL_PATH = _get_app_setting('MYSQL_PATH')
if not MYSQL_PATH:
raise PressGangConfigurationError(_("You must provide the full path to your mysql executable via %(var)s.") % {'var': _setting_name('MYSQL_PATH')})
# The full path to the mysqldump
MYSQLDUMP_PATH = _get_app_setting('MYSQLDUMP_PATH')
if not MYSQLDUMP_PATH:
raise PressGangConfigurationError(_("You must provide the full path to your mysqldump executable via %(var)s.") % {'var': _setting_name('MYSQLDUMP_PATH')})
# The base HTTP address of the server
SERVER_BASE = _get_app_setting('SERVER_BASE')
if not SERVER_BASE:
raise PressGangConfigurationError(_("You must provide the network location of your server via %(var)s.") % {'var': _setting_name('SERVER_BASE')})
| oberlin/pressgang | pressgang/settings.py | Python | bsd-3-clause | 3,526 |
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to run all existing unit tests."""
__author__ = 'api.sgrinberg@gmail.com (Stan Grinberg)'
import glob
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..'))
import unittest
from adspygoogle.adwords import LIB_SIG
from adspygoogle.common.Logger import Logger
LOG_NAME = 'adwords_api_lib'
LOGGER = Logger(LIB_SIG, os.path.join('..', '..', '..', 'logs'))
suite = unittest.TestSuite()
tests = [test[:-3] for test in glob.glob('*_test.py')]
for test in tests:
suite.addTest(unittest.TestLoader().loadTestsFromModule(__import__(test)))
if __name__ == '__main__':
LOGGER.Log(LOG_NAME, 'Start all unit tests.', log_level=Logger.DEBUG)
unittest.TextTestRunner().run(suite)
LOGGER.Log(LOG_NAME, 'End all unit tests.', log_level=Logger.DEBUG)
| donspaulding/adspygoogle | tests/adspygoogle/adwords/alltests.py | Python | apache-2.0 | 1,393 |
#!/usr/bin/python
#
# :Author: PB
# :Date: 4 Aug 2015
# :License: GPL v2
#
# motor controller for k Hats with a list of motors on each Hat
#
# Todo:
# -- add polling for sensors on ADC (see cmtd code)
# -- what process for determining presence? average last second?
# -- write poll result to sensor file
# -- switch motor action from file polling to presence calc.
# -- control illumination lights? slow up, slow down. (whiteleds.py)
#
import time
import json
import collections
import os
import atexit
import os
import random
import signal
import sys
from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor
sys.path.append("/home/pi/Adafruit-Raspberry-Pi-Python-Code/Adafruit_ADS1x15")
from Adafruit_ADS1x15 import ADS1x15
ADS1015 = 0x00 # 12-bit ADC
def handle_INT(signal, frame):
print("SIGINT caught, exiting gracefully.")
shutdown(quit=True)
def handle_TERM(signal, frame):
print("SIGTERM caught, exiting gracefully.")
halt()
def halt(ignore=None):
shutdown(quit=True, halt=True)
def turnon_lamp():
GPIO.output(LAMP_PIN, GPIO.HIGH)
def shutdown_lamp():
GPIO.output(LAMP_PIN, GPIO.LOW)
def shutdown_motors():
log_rec = {'timestamp': int(time.time()),
'message': "shutting down motors."}
with open('/home/pi/var/log/status.log', 'a') as logFH:
logFH.write("%s\n" % json.dumps(log_rec))
for hat in hats:
hat.shutdown()
def shutdown(quit=False, halt=False):
log_rec = {'timestamp': int(time.time()),
'message': "shutting down with quit=%s and halt=%s."}
with open('/home/pi/var/log/status.log', 'a') as logFH:
logFH.write("%s\n" % json.dumps(log_rec))
with open('/home/pi/var/spool/ir-dutycycle', 'r') as dcFH:
dcFH.write("0\n")
for hat in hats:
hat.shutdown()
if halt:
os.system("sudo shutdown -h now")
if quit or halt:
sys.exit(0)
class Hat(object):
def __init__(self, addr, motorlist, verbose=False):
self.name = addr
self.verbose = verbose
self.hat = Adafruit_MotorHAT(int(addr, 16))
self.motors = dict([(m, self.hat.getMotor(m)) for m in motorlist])
self.until = dict([(m, None) for m in motorlist])
for motorname, motor in self.motors.items():
motor.setSpeed(150)
motor.run(Adafruit_MotorHAT.FORWARD)
motor.run(Adafruit_MotorHAT.RELEASE)
if self.verbose:
print("init hat %s motor %s" % (self.name, motorname))
def shutdown_one(self, motorname):
self.until[motorname] = None
motor = self.motors[motorname]
motor.run(Adafruit_MotorHAT.RELEASE)
if self.verbose:
print("shutdown hat %s motor %s" % (self.name, motorname))
def shutdown(self):
for motorname, motor in self.motors.items():
self.shutdown_one(motorname)
def run(self, motorname, direction, speedpct, text=""):
if speedpct < 1:
self.motors[motorname].run(Adafruit_MotorHAT.RELEASE)
if self.verbose:
print("hat %s motor %s resting %s" % (self.name, motorname, text))
else:
direction = Adafruit_MotorHAT.FORWARD if direction[0].upper() == "F" else Adafruit_MotorHAT.BACKWARD
speed = int((speedpct if speedpct < 1.0 else speedpct / 100.0) * 253)
self.motors[motorname].run(direction)
self.motors[motorname].setSpeed(speed)
if self.verbose:
print("hat %s motor %s running %s at speed %s %s" %
(self.name, motorname, direction, speed, text))
def run_for(self, motorname, direction, speedpct, runtime=10):
self.until[motorname] = time.time() + runtime
self.run(motorname, direction, speedpct, text="for %ss" % int(runtime))
def run_random(self, motorname):
direction = "F" if random.getrandbits(1) == 0 else "B"
speedpct = abs(int(random.gauss(60, 40)))
runtime = abs(random.gauss(6, 3))
speedpct = 0 if speedpct < 35 or runtime > 20 else speedpct
self.run_for(motorname, direction, speedpct, runtime)
def check_all_and_restart(self):
for motorname, until in self.until.items():
if until is None or time.time() > until:
self.run_random(motorname)
def test_on(self, secs=10):
for motorname, motor in self.motors.items():
self.run(motorname, 'F', 50,
'testing hat %s motor %s at %s' % (self.name, motorname, 50))
time.sleep(secs)
self.shutdown()
if __name__ == "__main__":
signal.signal(signal.SIGINT, handle_INT)
signal.signal(signal.SIGTERM, handle_TERM)
verbose = True
ADS1015 = 0x00 # 12-bit ADC
adc = ADS1x15(ic=ADS1015)
V_per_mV_read = 200.8
IR_VALUE = 100 # tune this
hats = [Hat('0x67', [1,2,3,4], verbose=verbose),
Hat('0x61', [1,2,3,4], verbose=verbose),
Hat('0x60', [1,2,3,4], verbose=verbose),
Hat('0x66', [1,2,3,4], verbose=verbose)]
poll_interval = 1
running = False
sensed = False
log_rec = dict()
voltages = collections.deque(maxlen=100)
# needs to rotate the status.log to its timestamp
logpath = "/home/pi/var/log/status.log"
logtime = time.gmtime(os.stat(logpath).st_mtime)
newpath = "/home/pi/var/log/%s.log" % time.strftime("%Y-%m-%d-%H.%M.%SZ", logtime)
os.rename(logpath, newpath)
with open(logpath, 'w') as logFH:
logFH.write('# beginning log')
while True:
measured_V = adc.readADCSingleEnded(0, 4096, 250)
measured_V = round(measured_V/V_per_mV_read, 2)
voltages.append(measured_V)
mean_V = round(sum(voltages)/len(voltages), 2)
# log everything on each loop
log_rec = {'timestamp': int(time.time()),
'running': running, 'sensed': sensed,
'mean_volts': mean_V,
'measured_volts': measured_V}
with open(logpath, 'a') as logFH:
logFH.write("%s\n" % json.dumps(log_rec))
# now take action.
if mean_V < 11.1:
log_rec = {'timestamp': int(time.time()),
'message': "mean volts too low, shutting down."}
with open(logpath, 'a') as logFH:
logFH.write("%s\n" % json.dumps(log_rec))
shutdown(quit=True, halt=True)
with open("/home/pi/var/spool/run-motors", 'r') as presence:
sensed = presence.read().strip()[-1]
if sensed not in ["0", "1"] or sensed == "0":
# print(sensed)
if running:
shutdown(quit=False)
running = False
time.sleep(poll_interval/2.0)
continue
if not running:
# startup code: bring up IR light, then start motors
running = True
with open('/home/pi/var/spool/ir-dutycycle', 'w') as dcFH:
dcFH.write("%s\n" % IR_VALUE)
for hat in hats:
hat.check_all_and_restart()
time.sleep(poll_interval) # possible to go slower?
shutdown(quit=True)
| slobberchops/rop | controls/controllers.py | Python | gpl-3.0 | 6,303 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for accessing datastore usage statistics.
These entities cannot be created by users, but are populated in the
application's datastore by offline processes run by the Google Cloud team.
"""
from google.cloud.ndb import model
__all__ = [
"BaseKindStatistic",
"BaseStatistic",
"GlobalStat",
"KindCompositeIndexStat",
"KindNonRootEntityStat",
"KindPropertyNamePropertyTypeStat",
"KindPropertyNameStat",
"KindPropertyTypeStat",
"KindRootEntityStat",
"KindStat",
"NamespaceGlobalStat",
"NamespaceKindCompositeIndexStat",
"NamespaceKindNonRootEntityStat",
"NamespaceKindPropertyNamePropertyTypeStat",
"NamespaceKindPropertyNameStat",
"NamespaceKindPropertyTypeStat",
"NamespaceKindRootEntityStat",
"NamespaceKindStat",
"NamespacePropertyTypeStat",
"NamespaceStat",
"PropertyTypeStat",
]
class BaseStatistic(model.Model):
"""Base Statistic Model class.
Attributes:
bytes (int): the total number of bytes taken up in Cloud Datastore for
the statistic instance.
count (int): attribute is the total number of occurrences of the
statistic in Cloud Datastore.
timestamp (datetime.datetime): the time the statistic instance was
written to Cloud Datastore.
"""
# This is necessary for the _get_kind() classmethod override.
STORED_KIND_NAME = "__BaseStatistic__"
bytes = model.IntegerProperty()
count = model.IntegerProperty()
timestamp = model.DateTimeProperty()
@classmethod
def _get_kind(cls):
"""Kind name override."""
return cls.STORED_KIND_NAME
class BaseKindStatistic(BaseStatistic):
"""Base Statistic Model class for stats associated with kinds.
Attributes:
kind_name (str): the name of the kind associated with the statistic
instance.
entity_bytes (int): the number of bytes taken up to store the statistic
in Cloud Datastore minus the cost of storing indices.
"""
STORED_KIND_NAME = "__BaseKindStatistic__"
kind_name = model.StringProperty()
entity_bytes = model.IntegerProperty(default=0)
class GlobalStat(BaseStatistic):
"""An aggregate of all entities across the entire application.
This statistic only has a single instance in Cloud Datastore that contains
the total number of entities stored and the total number of bytes they take
up.
Attributes:
entity_bytes (int): the number of bytes taken up to store the statistic
in Cloud Datastore minus the cost of storing indices.
builtin_index_bytes (int): the number of bytes taken up to store
built-in index entries.
builtin_index_count (int): the number of built-in index entries.
composite_index_bytes (int): the number of bytes taken up to store
composite index entries.
composite_index_count (int): the number of composite index entries.
"""
STORED_KIND_NAME = "__Stat_Total__"
entity_bytes = model.IntegerProperty(default=0)
builtin_index_bytes = model.IntegerProperty(default=0)
builtin_index_count = model.IntegerProperty(default=0)
composite_index_bytes = model.IntegerProperty(default=0)
composite_index_count = model.IntegerProperty(default=0)
class NamespaceStat(BaseStatistic):
"""An aggregate of all entities across an entire namespace.
This statistic has one instance per namespace. The key_name is the
represented namespace. NamespaceStat entities will only be found
in the namespace "" (empty string). It contains the total
number of entities stored and the total number of bytes they take up.
Attributes:
subject_namespace (str): the namespace associated with the statistic
instance.
entity_bytes (int): the number of bytes taken up to store the statistic
in Cloud Datastore minus the cost of storing indices.
builtin_index_bytes (int): the number of bytes taken up to store
builtin-in index entries.
builtin_index_count (int): the number of built-in index entries.
composite_index_bytes (int): the number of bytes taken up to store
composite index entries.
composite_index_count (int): the number of composite index entries.
"""
STORED_KIND_NAME = "__Stat_Namespace__"
subject_namespace = model.StringProperty()
entity_bytes = model.IntegerProperty(default=0)
builtin_index_bytes = model.IntegerProperty(default=0)
builtin_index_count = model.IntegerProperty(default=0)
composite_index_bytes = model.IntegerProperty(default=0)
composite_index_count = model.IntegerProperty(default=0)
class KindStat(BaseKindStatistic):
"""An aggregate of all entities at the granularity of their Kind.
There is an instance of the KindStat for every Kind that is in the
application's datastore. This stat contains per-Kind statistics.
Attributes:
builtin_index_bytes (int): the number of bytes taken up to store
built-in index entries.
builtin_index_count (int): the number of built-in index entries.
composite_index_bytes (int): the number of bytes taken up to store
composite index entries.
composite_index_count (int): the number of composite index entries.
"""
STORED_KIND_NAME = "__Stat_Kind__"
builtin_index_bytes = model.IntegerProperty(default=0)
builtin_index_count = model.IntegerProperty(default=0)
composite_index_bytes = model.IntegerProperty(default=0)
composite_index_count = model.IntegerProperty(default=0)
class KindRootEntityStat(BaseKindStatistic):
"""Statistics of the number of root entities in Cloud Datastore by Kind.
There is an instance of the KindRootEntityState for every Kind that is in
the application's datastore and has an instance that is a root entity. This
stat contains statistics regarding these root entity instances.
"""
STORED_KIND_NAME = "__Stat_Kind_IsRootEntity__"
class KindNonRootEntityStat(BaseKindStatistic):
"""Statistics of the number of non root entities in Cloud Datastore by Kind.
There is an instance of the KindNonRootEntityStat for every Kind that is in
the application's datastore that is a not a root entity. This stat
contains statistics regarding these non root entity instances.
"""
STORED_KIND_NAME = "__Stat_Kind_NotRootEntity__"
class PropertyTypeStat(BaseStatistic):
"""An aggregate of all properties across the entire application by type.
There is an instance of the PropertyTypeStat for every property type
(google.appengine.api.datastore_types._PROPERTY_TYPES) in use by the
application in its datastore.
Attributes:
property_type (str): the property type associated with the statistic
instance.
entity_bytes (int): the number of bytes taken up to store the statistic
in Cloud Datastore minus the cost of storing indices.
builtin_index_bytes (int): the number of bytes taken up to store
built-in index entries.
builtin_index_count (int): the number of built-in index entries.
"""
STORED_KIND_NAME = "__Stat_PropertyType__"
property_type = model.StringProperty()
entity_bytes = model.IntegerProperty(default=0)
builtin_index_bytes = model.IntegerProperty(default=0)
builtin_index_count = model.IntegerProperty(default=0)
class KindPropertyTypeStat(BaseKindStatistic):
"""Statistics on (kind, property_type) tuples in the app's datastore.
There is an instance of the KindPropertyTypeStat for every
(kind, property_type) tuple in the application's datastore.
Attributes:
property_type (str): the property type associated with the statistic
instance.
builtin_index_bytes (int): the number of bytes taken up to store\
built-in index entries.
builtin_index_count (int): the number of built-in index entries.
"""
STORED_KIND_NAME = "__Stat_PropertyType_Kind__"
property_type = model.StringProperty()
builtin_index_bytes = model.IntegerProperty(default=0)
builtin_index_count = model.IntegerProperty(default=0)
class KindPropertyNameStat(BaseKindStatistic):
"""Statistics on (kind, property_name) tuples in the app's datastore.
There is an instance of the KindPropertyNameStat for every
(kind, property_name) tuple in the application's datastore.
Attributes:
property_name (str): the name of the property associated with the
statistic instance.
builtin_index_bytes (int): the number of bytes taken up to store
built-in index entries.
builtin_index_count (int): the number of built-in index entries.
"""
STORED_KIND_NAME = "__Stat_PropertyName_Kind__"
property_name = model.StringProperty()
builtin_index_bytes = model.IntegerProperty(default=0)
builtin_index_count = model.IntegerProperty(default=0)
class KindPropertyNamePropertyTypeStat(BaseKindStatistic):
"""Statistic on (kind, property_name, property_type) tuples in Cloud
Datastore.
There is an instance of the KindPropertyNamePropertyTypeStat for every
(kind, property_name, property_type) tuple in the application's datastore.
Attributes:
property_type (str): the property type associated with the statistic
instance.
property_name (str): the name of the property associated with the
statistic instance.
builtin_index_bytes (int): the number of bytes taken up to store
built-in index entries
builtin_index_count (int): the number of built-in index entries.
"""
STORED_KIND_NAME = "__Stat_PropertyType_PropertyName_Kind__"
property_type = model.StringProperty()
property_name = model.StringProperty()
builtin_index_bytes = model.IntegerProperty(default=0)
builtin_index_count = model.IntegerProperty(default=0)
class KindCompositeIndexStat(BaseStatistic):
"""Statistic on (kind, composite_index_id) tuples in Cloud Datastore.
There is an instance of the KindCompositeIndexStat for every unique
(kind, composite_index_id) tuple in the application's datastore indexes.
Attributes:
index_id (int): the id of the composite index associated with the
statistic instance.
kind_name (str): the name of the kind associated with the statistic
instance.
"""
STORED_KIND_NAME = "__Stat_Kind_CompositeIndex__"
index_id = model.IntegerProperty()
kind_name = model.StringProperty()
# The following specify namespace-specific stats.
# These types are specific to Cloud Datastore namespace they are located
# within. These will only be produced if datastore entities exist
# in a namespace other than the empty namespace (i.e. namespace="").
class NamespaceGlobalStat(GlobalStat):
"""GlobalStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for that
particular namespace.
"""
STORED_KIND_NAME = "__Stat_Ns_Total__"
class NamespaceKindStat(KindStat):
"""KindStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for that
particular namespace.
"""
STORED_KIND_NAME = "__Stat_Ns_Kind__"
class NamespaceKindRootEntityStat(KindRootEntityStat):
"""KindRootEntityStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for that
particular namespace.
"""
STORED_KIND_NAME = "__Stat_Ns_Kind_IsRootEntity__"
class NamespaceKindNonRootEntityStat(KindNonRootEntityStat):
"""KindNonRootEntityStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for that
particular namespace.
"""
STORED_KIND_NAME = "__Stat_Ns_Kind_NotRootEntity__"
class NamespacePropertyTypeStat(PropertyTypeStat):
"""PropertyTypeStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for that
particular namespace.
"""
STORED_KIND_NAME = "__Stat_Ns_PropertyType__"
class NamespaceKindPropertyTypeStat(KindPropertyTypeStat):
"""KindPropertyTypeStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for that
particular namespace.
"""
STORED_KIND_NAME = "__Stat_Ns_PropertyType_Kind__"
class NamespaceKindPropertyNameStat(KindPropertyNameStat):
"""KindPropertyNameStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for that
particular namespace.
"""
STORED_KIND_NAME = "__Stat_Ns_PropertyName_Kind__"
class NamespaceKindPropertyNamePropertyTypeStat(KindPropertyNamePropertyTypeStat):
"""KindPropertyNamePropertyTypeStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for that
particular namespace.
"""
STORED_KIND_NAME = "__Stat_Ns_PropertyType_PropertyName_Kind__"
class NamespaceKindCompositeIndexStat(KindCompositeIndexStat):
"""KindCompositeIndexStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for that
particular namespace.
"""
STORED_KIND_NAME = "__Stat_Ns_Kind_CompositeIndex__"
# Maps a datastore stat entity kind name to its respective model class.
# NOTE: Any new stats added to this module should also be added here.
_DATASTORE_STATS_CLASSES_BY_KIND = {
GlobalStat.STORED_KIND_NAME: GlobalStat,
NamespaceStat.STORED_KIND_NAME: NamespaceStat,
KindStat.STORED_KIND_NAME: KindStat,
KindRootEntityStat.STORED_KIND_NAME: KindRootEntityStat,
KindNonRootEntityStat.STORED_KIND_NAME: KindNonRootEntityStat,
PropertyTypeStat.STORED_KIND_NAME: PropertyTypeStat,
KindPropertyTypeStat.STORED_KIND_NAME: KindPropertyTypeStat,
KindPropertyNameStat.STORED_KIND_NAME: KindPropertyNameStat,
KindPropertyNamePropertyTypeStat.STORED_KIND_NAME: KindPropertyNamePropertyTypeStat, # noqa: E501
KindCompositeIndexStat.STORED_KIND_NAME: KindCompositeIndexStat,
NamespaceGlobalStat.STORED_KIND_NAME: NamespaceGlobalStat,
NamespaceKindStat.STORED_KIND_NAME: NamespaceKindStat,
NamespaceKindRootEntityStat.STORED_KIND_NAME: NamespaceKindRootEntityStat,
NamespaceKindNonRootEntityStat.STORED_KIND_NAME: NamespaceKindNonRootEntityStat, # noqa: E501
NamespacePropertyTypeStat.STORED_KIND_NAME: NamespacePropertyTypeStat,
NamespaceKindPropertyTypeStat.STORED_KIND_NAME: NamespaceKindPropertyTypeStat, # noqa: E501
NamespaceKindPropertyNameStat.STORED_KIND_NAME: NamespaceKindPropertyNameStat, # noqa: E501
NamespaceKindPropertyNamePropertyTypeStat.STORED_KIND_NAME: NamespaceKindPropertyNamePropertyTypeStat, # noqa: E501
NamespaceKindCompositeIndexStat.STORED_KIND_NAME: NamespaceKindCompositeIndexStat, # noqa: E501
}
| googleapis/python-ndb | google/cloud/ndb/stats.py | Python | apache-2.0 | 15,713 |
from django.db import models
# Create your models here.
class Course(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=255)
description = models.TextField()
def __str__(self):
return self.title
class Step(models.Model):
title = models.CharField(max_length=255)
description = models.TextField()
content = models.TextField(blank=True, default='')
order = models.IntegerField(default=0)
course = models.ForeignKey(Course)
class Meta:
ordering = ['order',]
def __str__(self):
return self.title | davejlin/treehouse | python/django/learning_site/courses/models.py | Python | unlicense | 616 |
import pyquaternion
# Create a quaternion representing a rotation of +90 degrees about positive y axis.
my_quaternion = pyquaternion.Quaternion(axis=[0, 1, 0], degrees=90)
my_vector = [0, 0, 4]
my_rotated_vector = my_quaternion.rotate(my_vector)
print('\nBasic Rotation')
print('--------------')
print('My Vector: {}'.format(my_vector))
print('Performing rotation of {angle} deg about {axis}'.format(angle=my_quaternion.degrees, axis=my_quaternion.axis))
print('My Rotated Vector: {}'.format(my_rotated_vector))
# Create another quaternion representing no rotation at all
null_quaternion = pyquaternion.Quaternion(axis=[0, 1, 0], angle=0)
print('\nInterpolated Rotation')
print('---------------------')
# The following will create a sequence of 9 intermediate quaternion rotation objects
for q in pyquaternion.Quaternion.intermediates(null_quaternion, my_quaternion, 9, include_endpoints=True):
my_interpolated_point = q.rotate(my_vector)
print('My Interpolated Point: {point}\t(after rotation of {angle} deg about {axis})'.format(
point=my_interpolated_point, angle=round(q.degrees, 4), axis=q.axis
))
print('Done!') | KieranWynn/pyquaternion | demo/demo.py | Python | mit | 1,146 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# genedb documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import genedb
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Gene Based Database'
copyright = u"2017, W. Bailey Glen Jr"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = genedb.__version__
# The full version, including alpha/beta/rc tags.
release = genedb.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'genedbdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'genedb.tex',
u'Gene Based Database Documentation',
u'W. Bailey Glen Jr', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'genedb',
u'Gene Based Database Documentation',
[u'W. Bailey Glen Jr'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'genedb',
u'Gene Based Database Documentation',
u'W. Bailey Glen Jr',
'genedb',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| BaileyGlen/genedb | docs/conf.py | Python | mit | 8,446 |
{
'repo_type' : 'git',
'url' : 'https://github.com/DeadSix27/AMF',
'rename_folder' : 'amd_media_framework_headers',
'depth_git': 1,
'needs_configure' : False,
'needs_make' : False,
'needs_make_install' : False,
'run_post_patch' : [
'if [ ! -f "already_done" ] ; then if [ ! -d "{target_prefix}/include/AMF" ]; then mkdir -p "{target_prefix}/include/AMF" ; fi ; fi',
'if [ ! -f "already_done" ] ; then pwd ; fi',
'if [ ! -f "already_done" ] ; then cp -av "amf/public/include/." "{target_prefix}/include/AMF" ; fi',
'if [ ! -f "already_done" ] ; then touch "already_done" ; fi',
],
'_info' : { 'version' : None, 'fancy_name' : 'AMF (headers)' },
} | DeadSix27/python_cross_compile_script | packages/dependencies/amf_headers.py | Python | mpl-2.0 | 664 |
#!/usr/bin/env python
#
# Python Object Transfer: transport
#
import struct
import select
import socket
import logging
import threading
import SocketServer
logger = logging.getLogger(__name__)
_DEB = logger.debug
_INF = logger.info
#
# Factory
#
def get_transport(qos={}):
return TCPTransport()
#
# URI encoding/decode
#
def encode_SAP(sap_str):
try:
ttype, sap = sap_str.split('@')
except:
raise CannotEncodeSAP(sap_str)
if ttype == 'null':
return TransportSAP()
elif ttype == 'tcp':
if sap.count(':') == 0:
return TCPSAP(address=sap)
elif sap.count(':') == 1:
address, port = sap.split(':')
return TCPSAP(address, int(port))
else:
raise CannotEncodeSAP(sap_str)
else:
raise CannotEncodeSAP(sap_str)
_VMTU = 1024
#
# Interface classes
#
class TransportSAP(object):
""" This class is used to store transport configuration. """
def __str__(self):
""" String representation should be enought to create more
instances. """
return "none"
class Transport(object):
""" This class send and receive "frames" of data. """
@property
def secure(self):
""" Secure transport.
This property returns if transport is secure or not. """
return False
@property
def sap(self):
""" Service access point.
This property is used to compose endpoint string reference. """
return TransportSAP()
@property
def ready(self):
""" On line.
This property returns if transport is ready or not. """
return False
@property
def is_bind(self):
""" Return if transport has a callback or not.
Args:
none.
Returns:
True if callback is defined.
Raises:
none.
"""
raise NotImplementedError()
def bind(self, callback):
""" Set server callback.
Args:
callback: handler for received requests from remote clients.
Returns:
none.
Raises:
none.
"""
raise NotImplementedError()
def open(self, local_sap):
""" Open local SAP.
Create local service access point to allow remote
endpoints to connect and transfer data.
Args:
local_resource: a local resource used as SAP.
Returns:
none
Raises:
CannotOpenTransport: unable to open local resource.
"""
raise NotImplementedError()
def close(self):
""" Close local SAP.
Close local service access point and forbids remote
endpoints to connect.
Args:
none
Returns:
none
Raises:
CannotCloseTransport: unable to close local resource.
"""
raise NotImplementedError()
def connect(self, remote_sap):
""" Open remote SAP.
Connect to remote SAP to transfer data.
Args:
remote_resource: a remote resource used as SAP.
Returns:
none
Raises:
CannotOpenTransport: unable to open resource.
"""
raise NotImplementedError()
def disconnect(self):
""" Close remote SAP.
Close remote service access point.
Args:
none
Returns:
none
Raises:
CannotCloseTransport: unable to close remote resource.
"""
raise NotImplementedError()
def send_request(self, msg):
""" Send request.
Args:
msg: request to send.
Returns:
response to request from server.
Raises:
ConnectionLost: error writing socket.
"""
raise NotImplementedError()
def create_SAP(self, *args, **kwargs):
""" Factory of SAP objects.
Args:
attributes of given SAP.
Returns:
SAP object with desired parameters.
Raises:
none.
"""
return TransportSAP()
#
# Common errors
#
class TransportNotConnected(Exception):
def __init__(self, transport):
self.__transport = transport
def __str__(self):
return '[%s] is not connected yet!' % self.__transport
class TransportError(Exception):
def __init__(self, cause='unknown'):
self.__cause = cause
def __str__(self):
return 'Error in transport (%s)' % self.__cause
class CannotEncodeSAP(Exception):
def __init__(self, sap_str):
self.__sap_str = sap_str
def __str__(self):
return 'Unable to decode "%s" as SAP' % self.__sap_str
#
# Aux. methods
#
def __get_free_tcp4_port__():
s = socket.socket()
s.bind(('', 0))
port = s.getsockname()[1]
s.close()
return port
def __wait_frame__(active_socket):
_INF('Waiting for frame')
frame_size = active_socket.recv(4)
if len(frame_size) != 4:
raise TransportError('Frame header must have 4 bytes')
frame_size = struct.unpack('i', frame_size)[0]
data = ''
total_received = 0
while total_received < frame_size:
partial = active_socket.recv(min(_VMTU,
frame_size - total_received))
if not partial:
break
total_received += len(partial)
data += partial
_INF('Readed frame of %s bytes' % len(data))
return data
def __send_frame__(active_socket, data):
_INF('Sending frame of %s bytes' % len(data))
header = struct.pack('i', len(data))
frame = header + data
sent = active_socket.sendall(frame)
_INF('Frame sended')
#
# TCP implementation
#
class TCPTransport(Transport):
class _RequestHandler(SocketServer.StreamRequestHandler):
def __init__(self, request, client_address, server):
SocketServer.BaseRequestHandler.__init__(self,
request,
client_address,
server)
def handle(self):
while True:
r, w, x = select.select([self.request], [], [])
if not r:
break
_DEB('Server waiting for frames...')
try:
request = __wait_frame__(self.request)
except TransportError:
_INF('Server disconnected from client')
break
_DEB('Server received "%s"' % repr(request))
response = self.server.request_handler(request)
_DEB('Server sends "%s"' % repr(response))
__send_frame__(self.request,
'' if response is None else response)
class _TCPBasicServer(SocketServer.ThreadingMixIn,
SocketServer.TCPServer):
def __init__(self, address, request_handler):
SocketServer.TCPServer.__init__(self,
address, request_handler)
self.callback = None
def request_handler(self, request):
if self.callback is None:
_DEB('Request received but no callback stablished!')
return
return self.callback(request)
def __init__(self):
Transport.__init__(self)
self.__local = None
self.__remote = None
self.__client_socket = None
self.__server = None
self.__server_thread = None
self.__request_callback = None
@property
def client_mode(self):
return self.__client_socket is not None
@property
def server_mode(self):
return self.__server is not None
@property
def ready(self):
return self.__local or self.__remote
@property
def sap(self):
return self.__local
@property
def is_binded(self):
if self.__server is None:
return False
return self.__server.callback is not None
def bind(self, callback):
_DEB('Bind to %s' % repr(callback))
self.__request_callback = callback
if self.__server is None:
return
self.__server.callback = callback
def open(self, local_sap):
assert(isinstance(local_sap, TCPSAP))
_DEB('Create server socket...')
self.__local = local_sap
addr = self.__local.address
port = self.__local.port
_DEB('Server address=%s' % addr)
_DEB('Server port=%s' % port)
self.__server = self._TCPBasicServer((addr, port),
self._RequestHandler)
_DEB('Server created in %s:%s' % self.__server.server_address)
self.__server_thread = threading.Thread(
target = self.__server.serve_forever)
self.__server_thread.daemon=True
self.__server_thread.start()
# If bind() is called before open()
if self.__request_callback is not None:
self.__server.callback = self.__request_callback
def close(self):
_DEB('Terminate server socket...')
self.__server.shutdown()
self.__server_thread.join()
self.__server = None
self.__server_thread = None
self.__local = None
def connect(self, remote_sap):
assert(isinstance(remote_sap, TCPSAP))
_DEB('Client wants to connect to %s' % remote_sap)
self.__remote = remote_sap
self.__client_socket = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
# FIX: if remote is 0.0.0.0, remote could be 127.0.0.1?
addr = '127.0.0.1' if self.__remote.address == '0.0.0.0' else self.__remote.address
self.__client_socket.connect((self.__remote.address,
self.__remote.port))
_DEB('Connected to server')
def disconnect(self):
_DEB('Terminate client socket...')
self.__remote = None
try:
self.__client_socket.shutdown(socket.SHUT_RDWR)
self.__client_socket.close()
finally:
self.__client_socket = None
def send_request(self, request):
_DEB('Client wants to send "%s"' % repr(request))
if not self.client_mode:
raise TransportNotConnected(self)
__send_frame__(self.__client_socket, request)
_DEB('Client wait for response...')
response = __wait_frame__(self.__client_socket)
_DEB('Client received "%s"' % repr(response))
return response
def create_sap(self, *args, **kwargs):
address = '0.0.0.0'
port = __get_free_tcp4_port__()
# Positional arguments
for position, argument in enumerate(args):
if position == 0:
address = argument
elif position == 1:
port = argument
# Named arguments
if 'address' in kwargs.keys():
address = kwargs['address']
if 'port' in kwargs.keys():
port = kwargs['port']
return TCPSAP(address, port)
class TCPSAP(TransportSAP):
def __init__(self, address='0.0.0.0', port=None):
self.__address = address
self.__port = __get_free_tcp4_port__() if (port in [None, 0]) else port
_DEB('TCPSAP: %s, %s' % (repr(self.__address), repr(self.__port)))
@property
def address(self):
return self.__address
@property
def port(self):
return self.__port
def __str__(self):
return 'tcp@%s:%s' % (self.__address, self.__port)
| int-0/potp | src/potp/transport.py | Python | lgpl-3.0 | 11,848 |
__author__ = 'scott'
import os
from DjangoServer.settings.base import *
DEBUG = True
| Sensorica/Sensor-Network | DjangoServer/DjangoServer/settings/dev.py | Python | cc0-1.0 | 87 |
import os
try:
from PIL import Image
from PIL import ImageColor
except ModuleNotFoundError:
exit("่ซๅฎ่ฃ Python Imaging Library 7.2.0, ๆไปค: pip install Pillow")
def main():
current_dir = (os.path.abspath(os.path.dirname(__file__)))
image_types = ("png", "jpg", "gif")
default_image_size = (800, 600)
image_sizes = (
(320, 240),
(640, 480),
(800, 600),
(1024, 768),
(2048, 1536),
(640, 360),
(1280, 720),
(1920, 1080),
(2560, 1440),
(3840, 2460),
(7680, 4320),
(15360, 8640),
(1, 1),
(10, 10),
(100, 100),
(1000, 1000),
(10000, 10000),
(1, 65535),
(65535, 1),
)
""" ็ข็ PNG, JPEG, GIF ๅ็ๆชๆก """
for image_type in image_types:
output_dir = current_dir + os.sep + "output" + os.sep + image_type
os.makedirs(output_dir, exist_ok=True)
""" ็ข็ 148 ๅไธๅ้ก่ฒ็ๅ็ """
for color_name in ImageColor.colormap:
im = Image.new("RGB", default_image_size, color_name)
filename = output_dir + os.sep + color_name + "." + image_type
if not os.path.exists(filename):
im.save(filename)
""" ็ข็ไธๅๅคงๅฐ็ๅ็ """
for size in image_sizes:
im = Image.new("RGB", size, "cyan")
filename = output_dir + os.sep + "{}x{}.png".format(*size)
if not os.path.exists(filename):
im.save(filename)
if __name__ == "__main__":
main()
| t5318019/playground | tools/genImages.py | Python | mit | 1,595 |
"""
Apply a wave equation fit to the beta oscillations.
as in Zanos et al 2015
Thy use a 2ms step size
Fit wave equation over 20ms ( not even full beta period? )
they use lsqcurvefit in Matlab to fit the wave equation.
there is a python equivalent scipy.optimize.leastsq
2D wave equation is
u(x,y,t) = A(t) np.sin(Kx(t)*x+Ky(t)*y-w(t)*t+phi(t))
A(t) is time varying amplitude
Kx(t) is a time varying contribution from x spatial component
Ky(t) is a time varying contribution from y spatial component
w(t) is current wavelength in time
phi(t) is a phase parameter
We don't explicitly model time -- we take short snapshots and fit the wave
equation. So what we're actually fitting is
u(x,y,t) = A np.sin(a*x+b*y-w*t+phi)
Which has 5 free Parameters amplitude, x/y spatial wavelength,
time wavelength, and phase offset.
"""
import numpy as np
import matplotlib.pyplot as plt
zscore = lambda x: (x-np.mean(x,0))/std(x,0)
def predict(xys,times,A,B,a,b,w):
'''
'''
nxy = np.shape(xys)[0]
nt = np.shape(times)[0]
predicted = np.zeros((nt,nxy))
for it in range(nt):
for ixy in range(nxy):
x,y = xys[ixy]
t = times[it]
phase = a*x+b*y-w*t
predicted[it,ixy] = A*np.sin(phase)+B*np.cos(phase)
return predicted
def plotdata(xys,data):
'''
'''
x,y = xys.T
scale = 20
for frame in data:
plt.clf()
plt.scatter(x,y,s=frame*scale,color='b')
plt.scatter(x,y,s=-frame*scale,color='r')
plt.draw()
plt.show()
def makeLSQminimizerPolar(xy,time,neuraldata):
'''
Generates a suitable function for computing an objective function for
least-squares minimization of a synchronous wave model
Parameters
----------
xy : 2D numeric array
locations in space of observations
time : 1D numeric array
time-points of observations
neuraldata:
experimental observations to which to fit the model
a Ntimes x NElectrode filtered neural data snippit
Returns
-------
function
An objective function that can be used with the Numpy leastsq
optimizer function
'''
nxy = np.shape(xy)[0]
nt = np.shape(time)[0]
time -= np.mean(time)
xy -= np.mean(xy,0)
window = np.hanning(nt+2)[1:-1]
def getResiduals(params):
A,B,a,b,w,xo,yo = params
residuals = np.zeros((nxy,nt))
for ixy in range(nxy):
for it in range(nt):
x,y = xy[ixy]
t = time[it]
x -= xo
y -= yo
r = sqrt(x*x+y*y)
h = arctan2(y,x)
phase = a*r+b*h-w*t
prediction = A*np.sin(phase)+B*np.cos(phase)
residuals[ixy,it] = np.abs(neuraldata[it,ixy] - prediction)*window[it]
return np.ravel(residuals)
return getResiduals
def makeLSQminimizerStanding(xy,time,neuraldata):
'''
Generates a suitable function for computing an objective function for
least-squares minimization of a synchronous wave model
Parameters
----------
xy : 2D numeric array
locations in space of observations
time : 1D numeric array
time-points of observations
neuraldata:
experimental observations to which to fit the model
a Ntimes x NElectrode filtered neural data snippit
Returns
-------
function
An objective function that can be used with the Numpy leastsq
optimizer function
'''
nxy = np.shape(xy)[0]
nt = np.shape(time)[0]
time -= np.mean(time)
xy -= np.mean(xy,0)
window = np.hanning(nt+2)[1:-1]
def getResiduals(params):
A,B,C,D,a,b,w = params
residuals = np.zeros((nxy,nt))
for ixy in range(nxy):
for it in range(nt):
x,y = xy[ixy]
t = time[it]
phase1 = a*x+b*y
phase2 = w*t
cp1 = np.cos(phase1)
sp1 = np.sin(phase1)
cp2 = np.cos(phase2)
sp2 = np.sin(phase2)
prediction = A*sp1*sp2+B*sp1*cp2+C*cp1*sp2+D*cp1*cp2
residuals[ixy,it] = np.abs(neuraldata[it,ixy] - prediction)*window[it]
return np.ravel(residuals)
return getResiduals
def makeLSQminimizerSynchronous(xy,time,neuraldata):
'''
Generates a suitable function for computing an objective function for
least-squares minimization of a synchronous wave model
Parameters
----------
xy : 2D numeric array
locations in space of observations
time : 1D numeric array
time-points of observations
neuraldata:
experimental observations to which to fit the model
a Ntimes x NElectrode filtered neural data snippit
Returns
-------
function
An objective function that can be used with the Numpy leastsq
optimizer function
'''
nxy = np.shape(xy)[0]
nt = np.shape(time)[0]
time -= np.mean(time)
xy -= np.mean(xy,0)
window = np.hanning(nt+2)[1:-1]
def getResiduals(params):
A,B,w = params
residuals = np.zeros((nxy,nt))
for ixy in range(nxy):
for it in range(nt):
x,y = xy[ixy]
t = time[it]
phase = w*t
prediction = A*np.cos(phase)+B*np.sin(phase)
residuals[ixy,it] = np.abs(neuraldata[it,ixy] - prediction)*window[it]
return np.ravel(residuals)
return getResiduals
def makeLSQminimizerPlane(xy,time,neuraldata):
'''
Generates a suitable function for computing an objective function for
least-squares minimization of a synchronous wave model
Parameters
----------
xy : 2D numeric array
locations in space of observations
time : 1D numeric array
time-points of observations
neuraldata:
experimental observations to which to fit the model
a Ntimes x NElectrode filtered neural data snippit
Returns
-------
function
An objective function that can be used with the Numpy leastsq
optimizer function
'''
nxy = np.shape(xy)[0]
nt = np.shape(time)[0]
time -= np.mean(time)
xy -= np.mean(xy,0)
window = np.hanning(nt+2)[1:-1]
def getResiduals(params):
A,B,a,b,w = params
residuals = np.zeros((nxy,nt))
for ixy in range(nxy):
for it in range(nt):
x,y = xy[ixy]
t = time[it]
phase = a*x+b*y-w*t
prediction = A*np.sin(phase)+B*np.cos(phase)
residuals[ixy,it] = np.abs(neuraldata[it,ixy] - prediction)*window[it]
return np.ravel(residuals)
return getResiduals
def makeLSQminimizerDoublePlane(xy,time,neuraldata):
'''
Generates a suitable function for computing an objective function for
least-squares minimization of a synchronous wave model
Parameters
----------
xy : 2D numeric array
locations in space of observations
time : 1D numeric array
time-points of observations
neuraldata:
experimental observations to which to fit the model
a Ntimes x NElectrode filtered neural data snippit
Returns
-------
function
An objective function that can be used with the Numpy leastsq
optimizer function
'''
nxy = np.shape(xy)[0]
nt = np.shape(time)[0]
time -= np.mean(time)
xy -= np.mean(xy,0)
window = np.hanning(nt+2)[1:-1]
def getResiduals(params):
A1,B1,a1,b1,w1,A2,B2,a2,b2,w2 = params
residuals = np.zeros((nxy,nt))
for ixy in range(nxy):
for it in range(nt):
x,y = xy[ixy]
t = time[it]
phase1 = a1*x+b1*y-w1*t
phase2 = a2*x+b2*y-w2*t
prediction = A1*np.sin(phase1)+B1*np.cos(phase1)+A2*np.sin(phase2)+B2*np.cos(phase2)
residuals[ixy,it] = np.abs(neuraldata[it,ixy] - prediction)*window[it]
return np.ravel(residuals)
return getResiduals
def phase_gradient(data):
'''
Computes 1D linear phase gradient
'''
data = np.angle(data)
phase_gradient = np.diff(data)
phase_gradient = (phase_gradient+pi)%(2*pi)-pi
return phase_gradient
def heuristic_B_polar(data,xys,times):
'''
Heuristic parameter guess for the polar wave model
Parameters
----------
xys : 2D numeric array
locations in space of observations
timew : 1D numeric array
time-points of observations
data:
experimental observations to which to fit the model
a Ntimes x NElectrode filtered neural data snippit
Returns
-------
tuple
Amplitude, ?, ?, ?, frequency
'''
amplitude_guess = np.max(np.abs(data))
frequency_guess = np.median(list(map(phase_gradient,data.T))/np.mean(np.diff(times)))
x,y = xys.T
return np.array([amplitude_guess,0,0,0,frequency_guess,np.mean(x),np.mean(y)])
def heuristic_B_planar(data,xys,times):
'''
Heuristic parameter guess for the planar wave model
Parameters
----------
xys : 2D numeric array
locations in space of observations
timew : 1D numeric array
time-points of observations
data:
experimental observations to which to fit the model
a Ntimes x NElectrode filtered neural data snippit
Returns
-------
tuple
Amplitude, ?, ?, ?, frequency
'''
amplitude_guess = np.max(np.abs(data))
frequency_guess = np.median(map(phase_gradient,data.T))/np.mean(np.diff(times))
return np.array([amplitude_guess,0,0,0,frequency_guess])
def heuristic_B_standing(data,xys,times):
'''
Heuristic parameter guess for the standing wave model
Parameters
----------
xys : 2D numeric array
locations in space of observations
timew : 1D numeric array
time-points of observations
data:
experimental observations to which to fit the model
a Ntimes x NElectrode filtered neural data snippit
Returns
-------
tuple
Amplitude, ?, ?, ?, ?, ?, frequency
'''
amplitude_guess = np.max(np.abs(data))
frequency_guess = np.median(list(map(phase_gradient,data.T))/np.mean(np.diff(times)))
return np.array([amplitude_guess,0,0,0,0,0,frequency_guess])
def heuristic_B_synchronous(data,xys,times):
'''
Heuristic parameter guess for the spatially synchronous wave model
Parameters
----------
xys : 2D numeric array
locations in space of observations
timew : 1D numeric array
time-points of observations
data:
experimental observations to which to fit the model
a Ntimes x NElectrode filtered neural data snippit
Returns
-------
tuple
Amplitude, ?, frequency
'''
amplitude_guess = np.max(np.abs(data))
frequency_guess = np.median(map(phase_gradient,data.T))/np.mean(np.diff(times))
return np.array([amplitude_guess,0,frequency_guess])
def heuristic_B_double_planar(data,xys,times):
'''
Heuristic parameter guess for the double planar wave model
Parameters
----------
xys : 2D numeric array
locations in space of observations
timew : 1D numeric array
time-points of observations
data:
experimental observations to which to fit the model
a Ntimes x NElectrode filtered neural data snippit
Returns
-------
tuple
Amplitude, ?, ?, ?, frequency, ? , ampltiude2, 0.1, -0.1, frequency2
'''
amplitude_guess = np.max(np.abs(data))
frequency_guess = np.median(list(map(phase_gradient,data.T))/np.mean(np.diff(times)))
return np.array([amplitude_guess,0,0,0,frequency_guess,0,amplitude_guess,0.1,-0.1,frequency_guess])
def frame_synchrony(frame):
'''
Non-Kuromoto synchrony measure
'''
return np.abs(np.mean(frame))/np.mean(np.abs(frame))
def synchrony(data):
'''
Just maps frame_synchrony(frame) over first dimention of parameter data
'''
syn = [frame_synchrony(frame) for frame in data]
return np.mean(syn)
def pairwise_phase_difference(a,b):
'''
Phase difference, compensated for wraparound
'''
return (a-b+pi)%(2*pi)-pi
def spatial_phase_gradient(arraymap,chi,frame):
'''
Computes phase gradient from electrode positions as opposed to the
array-packed representation of data?
I think?
'''
# PGD = |E(phase)|/E(|phase|)
frame = np.angle(frame)
height,width = np.shape(arraymap)
gradients = []
for y in range(height-1):
for x in range(width-1):
ch0 = arraymap[y][x]
chx = arraymap[y][x+1]
chy = arraymap[y+1][x]
ch3 = arraymap[y+1][x+1]
if ch0==0: continue
if chx==0: continue
if chy==0: continue
if ch3==0: continue
if not ch0 in chi: continue
if not chx in chi: continue
if not chy in chi: continue
if not ch3 in chi: continue
ch0 = np.where(chi==ch0)
chx = np.where(chi==chx)
chy = np.where(chi==chy)
ch3 = np.where(chi==ch3)
dx = pairwise_phase_difference(frame[ch0],frame[chx])
dy = pairwise_phase_difference(frame[ch0],frame[chy])
dx+= pairwise_phase_difference(frame[chy],frame[ch3])
dy+= pairwise_phase_difference(frame[chx],frame[ch3])
dz = (dx+1j*dy)*0.5
gradients.np.append(dz)
gradients = np.array(gradients)
return gradients
def directionality_index(arraymap,chi,frame):
'''
PGD
'''
# PGD = |E(phase)|/E(|phase|)
frame = np.angle(frame)
height,width = np.shape(arraymap)
gradients = spatial_phase_gradient(arraymap,chi,frame)
return np.abs(np.mean(gradients))/np.mean(np.abs(gradients))
def phase_unwrap(x):
x = np.angle(x)
x = np.diff(x)
x = (x+pi)%(2*pi)-pi
return np.append(0,np.cumsum(x))+x[0]
def averaged_directionality_index(a,c,x):
# note: this failes.
# meanphase = np.array([np.mean(phase_unwrap(x[:,i])) for i in xrange(np.shape(x)[1])])
# meanphase %= 2*pi
# return directionality_index(arraymap,chi,exp(1j*meanphase))
# this is better
gradients = [spatial_phase_gradient(a,c,f) for f in x]
# f = np.median(map(phase_gradient,data.T))
# f is in units of d_phase d_t, can be used to recenter gradients for averaging?
# wait... there is no need to re-center gradients.
# there is no evidence that hatsopoulos averaged PGD in time?
def heuristic_solver_double_planar(params):
'''
Heuristic fit of data to a wave solution with two plane waves.
Intended to be used with neurotools.parallel
Parameters
----------
i : integer
the job number (will be returned with the result)
xys : 2D numeric array
spatial locations of each channel
times: 1D numeric array
the time basis for the observation
data : 3D numeric array, real valued
wave data.
a Ntimes x NElectrode filtered neural data snippit
Returns
-------
i :
job index for tracking parallel jobs
result[0] :
first element of tuple returned from leastsq. presumably the model
parameters?
error :
norm of the residuals divided by the norm of the data
'''
(i,xys,times,data) = params
objective = makeLSQminimizerDoublePlane(xys,times,real(data))
result = leastsq(objective,heuristic_B_double_planar(data,xys),full_output=1)
return i,result[0],norm(result[2]['fvec'])/norm(data)
def heuristic_solver_standing(params):
'''
Heuristic fit of data to a planar standing wave solution.
Intended to be used with neurotools.parallel
Parameters
----------
i : integer
the job number (will be returned with the result)
xys : 2D numeric array
spatial locations of each channel
times: 1D numeric array
the time basis for the observation
data : 3D numeric array, real valued
wave data.
a Ntimes x NElectrode filtered neural data snippit
Returns
-------
int
job index for tracking parallel jobs
object
first element of tuple returned from leastsq. presumably the model
parameters?
float
norm of the residuals divided by the norm of the data
'''
(i,xys,times,data) = params
objective = makeLSQminimizerStanding(xys,times,real(data))
result = leastsq(objective,heuristic_B_standing(data,xys),full_output=1)
return i,result[0],norm(result[2]['fvec'])/norm(data)
def heuristic_solver_planar(params):
'''
Heuristic fit of data to a plane wave solution.
Intended to be used with neurotools.parallel
Parameters
----------
i : integer
the job number (will be returned with the result)
xys : 2D numeric array
spatial locations of each channel
times: 1D numeric array
the time basis for the observation
data : 3D numeric array, real valued
wave data.
a Ntimes x NElectrode filtered neural data snippit
Returns
-------
int
job index for tracking parallel jobs
object
first element of tuple returned from leastsq. presumably the model
parameters?
float
norm of the residuals divided by the norm of the data
'''
(i,xys,times,data) = params
objective = makeLSQminimizerPlane(xys,times,real(data))
result = leastsq(objective,heuristic_B_planar(data,xys),full_output=1)
return i,result[0],norm(result[2]['fvec'])/norm(data)
def heuristic_solver_polar(params):
'''
Heuristic fit of data to a polar wave solution.
Polar waves include radial, spiral, and pinwheel rotating waves
Intended to be used with neurotools.parallel
Parameters
----------
i : integer
the job number (will be returned with the result)
xys : 2D numeric array
spatial locations of each channel
times: 1D numeric array
the time basis for the observation
data : 3D numeric array, real valued
wave data.
a Ntimes x NElectrode filtered neural data snippit
Returns
-------
int
job index for tracking parallel jobs
object
first element of tuple returned from leastsq. presumably the model
parameters?
float
norm of the residuals divided by the norm of the data
'''
(i,xys,times,data) = params
objective = makeLSQminimizerPolar(xys,times,real(data))
result = leastsq(objective,heuristic_B_polar(data,xys),full_output=1)
return i,result[0],norm(result[2]['fvec'])/norm(data)
def heuristic_solver_synchronous(params):
'''
Heuristic fit of data to a synchronous wave solution.
Intended to be used with neurotools.parallel
Parameters
----------
i : integer
the job number (will be returned with the result)
xys : 2D numeric array
spatial locations of each channel
times: 1D numeric array
the time basis for the observation
data : 3D numeric array, real valued
wave data.
a Ntimes x NElectrode filtered neural data snippit
Returns
-------
int
job index for tracking parallel jobs
object
first element of tuple returned from leastsq. presumably the model
parameters?
float
norm of the residuals divided by the norm of the data
'''
(i,xys,times,data) = params
objective = makeLSQminimizerSynchronous(xys,times,real(data))
result = leastsq(objective,heuristic_B_synchronous(data,xys),full_output=1)
return i,result[0],norm(result[2]['fvec'])/norm(data)
| michaelerule/neurotools | spatial/wave_parametric_models.py | Python | gpl-3.0 | 19,987 |
# Copyright 2016 - 2021 Ternaris.
# SPDX-License-Identifier: AGPL-3.0-only
import math
import mimetypes
import os
import time
from datetime import timezone
from pathlib import Path
import jwt
from aiohttp import web
from aiohttp.web_urldispatcher import SystemRoute
AGGRESSIVE_CACHING = bool(os.environ.get('MARV_EXPERIMENTAL_AGGRESSIVE_CACHING'))
@web.middleware
async def auth_middleware(request, handler):
if (instance := getattr(handler, '__self__', None)) and isinstance(instance, SystemRoute): # pylint: disable=used-before-assignment
return await handler(request)
assert isinstance(handler.acl, set)
authorization = request.headers.get('Authorization')
if not authorization:
authorization = request.query.get('access_token')
username = 'marv:anonymous'
groups = {'__unauthenticated__'}
if authorization:
token = authorization.replace('Bearer ', '')
try:
session = jwt.decode(token, request.app['config']['SECRET_KEY'], algorithms=['HS256'])
except BaseException:
raise web.HTTPUnauthorized()
user = await request.app['site'].db.get_user_by_name(session['sub'], deep=True)
if not user or not user.active \
or user.time_updated.replace(tzinfo=timezone.utc).timestamp() > session['iat']:
raise web.HTTPUnauthorized()
username = user.name
groups = {g.name for g in user.groups}
groups.add('__authenticated__')
if '__authenticated__' not in handler.acl:
raise web.HTTPForbidden()
elif '__unauthenticated__' not in handler.acl:
raise web.HTTPUnauthorized()
request['username'] = username
request['user_groups'] = groups
return await handler(request)
def HTTPPermissionError(request): # noqa: N802 pylint: disable=invalid-name
if request['username'] == 'marv:anonymous':
return web.HTTPUnauthorized
return web.HTTPForbidden
def get_global_granted(request):
if 'admin' in request['user_groups']:
return ['admin']
return []
def get_local_granted(request):
if request['username'] == 'marv:anonymous':
if request.app['site'].config.marv.ce_anonymous_readonly_access:
return ['download_raw', 'list', 'read']
return []
if 'admin' in request['user_groups']:
return ['comment', 'delete', 'download_raw', 'list', 'read', 'tag']
return ['comment', 'download_raw', 'list', 'read', 'tag']
def generate_token(username, key):
now = math.ceil(time.time())
return jwt.encode(
{
'exp': now + 2419200, # 4 weeks expiration
'iat': now,
'sub': username,
},
key,
algorithm='HS256',
)
def safejoin(basepath, rel):
rel = Path(rel)
if rel.anchor:
raise web.HTTPForbidden
fullpath = basepath.joinpath(rel).resolve()
if basepath.resolve() not in fullpath.parents:
raise web.HTTPForbidden
return fullpath
def sendfile(path, approot, reverse_proxy, filename=None, headers=None):
headers = headers.copy() if headers else {}
headers.setdefault('Content-Disposition', f'attachment; filename={filename or path.name}')
if AGGRESSIVE_CACHING and (
path.suffix in ('.jpg', '.json') or
(path.suffix == '.mrv' and path.stat().st_size < 20 * 10**6) or
path.name == 'default-stream'
):
headers['Cache-Control'] = 'max-age=14400'
else:
headers['Cache-Control'] = 'no-cache, no-store'
if reverse_proxy == 'nginx':
mime = mimetypes.guess_type(str(path))[0]
return web.Response(
headers={
'Content-Type': mime or 'application/octet-stream',
'X-Accel-Buffering': 'no',
'X-Accel-Redirect': f'{approot}{str(path)}',
**headers,
},
)
assert not reverse_proxy, f'Unknown reverse_proxy {reverse_proxy}'
return web.FileResponse(path, headers=headers)
class Webapi:
def __init__(self, url_prefix=''):
self.url_prefix = url_prefix
self.endpoints = []
def endpoint(self, url_rule, methods, only_anon=False, allow_anon=False):
def closure(func):
func.name = func.__name__
func.url_rule = f'{self.url_prefix}{url_rule}'
if only_anon:
func.acl = {'__unauthenticated__'}
else:
func.acl = {'__authenticated__'}
if allow_anon:
func.acl |= {'__unauthenticated__'}
func.methods = methods
self.endpoints.append(func)
return closure
def __repr__(self):
return f'<Webapi url_prefix={self.url_prefix}>'
| ternaris/marv-robotics | code/marv/marv_webapi/tooling.py | Python | agpl-3.0 | 4,756 |
#!/usr/bin/env python
"""
--------------------------------------------------------------------------------
(c) 2017 Julian Rees
License: GNU GPLv3
Description: Plot the performance of the SCF convergence in ORCA.
Run: python scfconvergence.py filename [scfnum skip]
Arguments: filename - file name with extension;
there should be at least one SCF cycle present
scfnum - optional: if more than one SCF cycle (e.g. a geometry
optimization), the desired cycle to plot
skip - optional: SCF iterations to truncate from start;
to better visualize late-stage convergence
Dependencies: matplotlib
--------------------------------------------------------------------------------
"""
print(__doc__)
import sys
import math
import matplotlib.pyplot as plt
# check for correct number of inputs
if len(sys.argv) < 2:
print(' ')
sys.exit("You must supply exactly one filename!")
elif len(sys.argv) == 2:
print(' ')
print('- - !! ONLY THE FIRST SCF WILL BE PRINTED !! - -')
elif (len(sys.argv) == 3 and sys.argv[2].isdigit() == True):
pass
elif (len(sys.argv) == 4 and sys.argv[2].isdigit() == True and
sys.argv[3][1].isdigit() == True and sys.argv[3][0] is '-'):
pass
else:
print(' ')
str1 = 'You must supply exactly one filename, '
str2 = 'and an optional SCF number and pre-cutoff (negative)!'
sys.exit(str1 + str2)
# define search string and clear the list
searchfor = "SCF ITERATIONS"
energies = []
delta_energies = []
# optionally assign the SCF to print
if len(sys.argv) == 3:
scfnum = int(sys.argv[2])
skip = 0
elif len(sys.argv) == 4:
scfnum = int(sys.argv[2])
skip = int(float(sys.argv[3])*-1)
else:
scfnum = 1
skip = 0
# open filename
fname = str(sys.argv[1])
try:
with open(fname) as f:
# search lines for string and move down two lines to get energy
i = 1
for line in f:
if searchfor in line:
next(f)
try:
line = f.next()
except:
print(' ')
sys.exit('- - !! REACHED THE END OF THE OUTPUT FILE !! - -')
# check if i = scfnum
if i < scfnum:
i = i + 1
else:
# run a loop over the first SCF convergence
while "SUCCESS" not in line:
if not line.strip():
break
# check to see if the line is an iteration
elif line.split()[0].isdigit():
# get the energy as a number and add it to the list
try:
energy = float(line.split()[1])
energies.append(energy)
delta_energies.append(float(line.split()[2]))
except ValueError:
pass
try:
line = f.next()
except:
print(' ')
print('- - !! THE SCF IS NOT YET CONVERGED !! - -')
break
break
except IOError:
sys.exit("The specified file does not exist!")
# truncate the list if needed
if skip == 0:
pass
else:
energies[0:skip] = []
delta_energies[0:skip] = []
# plot energies
x_axis = range(1+skip, 1+len(energies)+skip)
plt.plot(x_axis, energies,'o-')
plt.title('%d SCF Iterations' %len(energies))
plt.xlabel('SCF Iteration')
plt.ylabel('SCF Energy')
plt.show()
x_axis = range(1+skip, 1+len(delta_energies)+skip)
plt.plot(x_axis, delta_energies,'o-')
plt.title('%d SCF Iterations' %len(delta_energies))
plt.xlabel('SCF Iteration')
plt.ylabel('SCF Energy Change')
plt.show()
| julianrees/scripts | python/scfconvergence.py | Python | gpl-3.0 | 3,926 |
from enum import Enum
EXIT_CODES = [
"SUCCESS",
"BUILD_FAILURE",
"PARSING_FAILURE",
"COMMAND_LINE_ERROR",
"TESTS_FAILED",
"PARTIAL_ANALYSIS_FAILURE",
"NO_TESTS_FOUND",
"RUN_FAILURE",
"ANALYSIS_FAILURE",
"INTERRUPTED",
"LOCK_HELD_NOBLOCK_FOR_LOCK",
"REMOTE_ENVIRONMENTAL_ERROR",
"OOM_ERROR",
"REMOTE_ERROR",
"LOCAL_ENVIRONMENT_ERROR",
"BLAZE_INTERNAL_ERROR",
"PUBLISH_ERROR",
"PERSISTENT_BUILD_EVENT_SERVICE_UPLOAD_ERROR"
]
class DistantEnum(Enum):
def __str__(self):
return str(self.value)
class CPU(DistantEnum):
k8 = "k8"
piii = "piii"
darwin = "darwin"
freebsd = "freebsd"
armeabi = "armeabi-v7a"
arm = "arm"
aarch64 = "aarch64"
x64_windows = "x64_windows"
x64_windows_msvc = "x64_windows_msvc"
s390x = "s390x"
ppc = "ppc"
ppc64 = "ppc64"
class CompilationMode(DistantEnum):
fastbuild = "fastbuild"
dbg = "dbg"
opt = "opt"
| antmicro/distant-bes | distantbes/enums.py | Python | apache-2.0 | 1,058 |
# -*- coding: utf-8 -*-
import flask_sqlalchemy
from flask import Response
from .base import json
def render_schema(model, schema=None):
headers = {}
if schema is None:
resp = json.dumps(model)
elif isinstance(model, flask_sqlalchemy.Pagination):
resp = schema(many=True).dumps(model.items).data
headers['X-Total'] = model.total
headers['X-Page'] = model.page
elif isinstance(model, list):
resp = schema(many=True).dumps(model).data
else:
resp = schema().dumps(model).data
return Response(response=resp, headers=headers)
def render_error(code, error, status=400):
message = {
'code': code,
'error': error}
return Response(
response=json.dumps(message), status=status)
| fengluo/flask-tube | flask_tube/response.py | Python | mit | 779 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.