code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
"""
models/
This package contains models to be used by the
application layer. All models should be either
named tuples or named tuple-like. That is,
immutable objects with appropriate named
attributes.
"""
__author__ = 'Alan Barber'
| alanebarber/sabroso | python/application/data_layer/models/__init__.py | Python | bsd-3-clause | 235 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest2 as unittest
from nupic.bindings.algorithms import SpatialPooler as CPPSpatialPooler
import spatial_pooler_py_api_test
spatial_pooler_py_api_test.SpatialPooler = CPPSpatialPooler
SpatialPoolerCPPAPITest = spatial_pooler_py_api_test.SpatialPoolerAPITest
if __name__ == "__main__":
unittest.main()
| badlogicmanpreet/nupic | tests/unit/nupic/research/spatial_pooler_cpp_api_test.py | Python | agpl-3.0 | 1,297 |
"""
Public interface for student training:
* Staff can create assessments for example responses.
* Students assess an example response, then compare the scores
they gave to to the instructor's assessment.
"""
import logging
from django.db import DatabaseError
from django.utils.translation import ugettext as _
from openassessment.assessment.errors import StudentTrainingInternalError, StudentTrainingRequestError
from openassessment.assessment.models import InvalidRubricSelection, StudentTrainingWorkflow
from openassessment.assessment.serializers import (InvalidRubric, InvalidTrainingExample, deserialize_training_examples,
serialize_training_example, validate_training_example_format)
from submissions import api as sub_api
logger = logging.getLogger(__name__)
def submitter_is_finished(submission_uuid, training_requirements): # pylint:disable=W0613
"""
Check whether the student has correctly assessed
all the training example responses.
Args:
submission_uuid (str): The UUID of the student's submission.
training_requirements (dict): Must contain "num_required" indicating
the number of examples the student must assess.
Returns:
bool
Raises:
StudentTrainingRequestError
"""
if training_requirements is None:
return False
try:
num_required = int(training_requirements['num_required'])
except KeyError:
raise StudentTrainingRequestError(u'Requirements dict must contain "num_required" key')
except ValueError:
raise StudentTrainingRequestError(u'Number of requirements must be an integer')
try:
workflow = StudentTrainingWorkflow.objects.get(submission_uuid=submission_uuid)
except StudentTrainingWorkflow.DoesNotExist:
return False
else:
return workflow.num_completed >= num_required
def on_start(submission_uuid):
"""
Creates a new student training workflow.
This function should be called to indicate that a submission has entered the
student training workflow part of the assessment process.
Args:
submission_uuid (str): The submission UUID for the student that is
initiating training.
Returns:
None
Raises:
StudentTrainingInternalError: Raised when an error occurs persisting the
Student Training Workflow
"""
try:
StudentTrainingWorkflow.create_workflow(submission_uuid)
except Exception:
msg = (
u"An internal error has occurred while creating the learner "
u"training workflow for submission UUID {}".format(submission_uuid)
)
logger.exception(msg)
raise StudentTrainingInternalError(msg)
def validate_training_examples(rubric, examples):
"""
Validate that the training examples match the rubric.
Args:
rubric (dict): Serialized rubric model.
examples (list): List of serialized training examples.
Returns:
list of errors (unicode)
Raises:
StudentTrainingRequestError
StudentTrainingInternalError
Example usage:
>>> options = [
>>> {
>>> "order_num": 0,
>>> "name": "poor",
>>> "explanation": "Poor job!",
>>> "points": 0,
>>> },
>>> {
>>> "order_num": 1,
>>> "name": "good",
>>> "explanation": "Good job!",
>>> "points": 1,
>>> },
>>> {
>>> "order_num": 2,
>>> "name": "excellent",
>>> "explanation": "Excellent job!",
>>> "points": 2,
>>> },
>>> ]
>>>
>>> rubric = {
>>> "prompts": [{"description": "Write an essay!"}],
>>> "criteria": [
>>> {
>>> "order_num": 0,
>>> "name": "vocabulary",
>>> "prompt": "How varied is the vocabulary?",
>>> "options": options
>>> },
>>> {
>>> "order_num": 1,
>>> "name": "grammar",
>>> "prompt": "How correct is the grammar?",
>>> "options": options
>>> }
>>> ]
>>> }
>>>
>>> examples = [
>>> {
>>> 'answer': {'parts': [{'text': u'Lorem ipsum'}]},
>>> 'options_selected': {
>>> 'vocabulary': 'good',
>>> 'grammar': 'excellent'
>>> }
>>> },
>>> {
>>> 'answer': {'parts': [{'text': u'Doler'}]},
>>> 'options_selected': {
>>> 'vocabulary': 'good',
>>> 'grammar': 'poor'
>>> }
>>> }
>>> ]
>>>
>>> errors = validate_training_examples(rubric, examples)
"""
errors = []
# Construct a list of valid options for each criterion
try:
criteria_options = {
unicode(criterion['name']): [
unicode(option['name'])
for option in criterion['options']
]
for criterion in rubric['criteria']
}
except (ValueError, KeyError):
logger.warning("Could not parse serialized rubric", exc_info=True)
return [_(u"Could not parse serialized rubric")]
# Check that at least one criterion in the rubric has options
# If this is not the case (that is, if all rubric criteria are written feedback only),
# then it doesn't make sense to do student training.
criteria_without_options = [
criterion_name
for criterion_name, criterion_option_list in criteria_options.iteritems()
if len(criterion_option_list) == 0
]
if len(set(criteria_options) - set(criteria_without_options)) == 0:
return [_(
"If your assignment includes a learner training step, "
"the rubric must have at least one criterion, "
"and that criterion must have at least one option."
)]
# Check each example
for order_num, example_dict in enumerate(examples, start=1):
# Check the structure of the example dict
is_format_valid, format_errors = validate_training_example_format(example_dict)
if not is_format_valid:
format_errors = [
_(u"Example {example_number} has a validation error: {error}").format(
example_number=order_num, error=error
)
for error in format_errors
]
errors.extend(format_errors)
else:
# Check each selected option in the example (one per criterion)
options_selected = example_dict['options_selected']
for criterion_name, option_name in options_selected.iteritems():
if criterion_name in criteria_options:
valid_options = criteria_options[criterion_name]
if option_name not in valid_options:
msg = _(
u"Example {example_number} has an invalid option "
u"for \"{criterion_name}\": \"{option_name}\""
).format(
example_number=order_num,
criterion_name=criterion_name,
option_name=option_name
)
errors.append(msg)
else:
msg = _(
u"Example {example_number} has an extra option "
u"for \"{criterion_name}\""
).format(
example_number=order_num,
criterion_name=criterion_name
)
errors.append(msg)
# Check for missing criteria
# Ignore options
all_example_criteria = set(options_selected.keys() + criteria_without_options)
for missing_criterion in set(criteria_options.keys()) - all_example_criteria:
msg = _(
u"Example {example_number} is missing an option "
u"for \"{criterion_name}\""
).format(
example_number=order_num,
criterion_name=missing_criterion
)
errors.append(msg)
return errors
def get_num_completed(submission_uuid):
"""
Get the number of training examples the student has assessed successfully.
Args:
submission_uuid (str): The UUID of the student's submission.
Returns:
int: The number of completed training examples
Raises:
StudentTrainingInternalError
Example usage:
>>> get_num_completed("5443ebbbe2297b30f503736e26be84f6c7303c57")
2
"""
try:
try:
workflow = StudentTrainingWorkflow.objects.get(submission_uuid=submission_uuid)
except StudentTrainingWorkflow.DoesNotExist:
return 0
else:
return workflow.num_completed
except DatabaseError:
msg = (
u"An unexpected error occurred while "
u"retrieving the learner training workflow status for submission UUID {}"
).format(submission_uuid)
logger.exception(msg)
raise StudentTrainingInternalError(msg)
def get_training_example(submission_uuid, rubric, examples):
"""
Retrieve a training example for the student to assess.
This will implicitly create a workflow for the student if one does not yet exist.
NOTE: We include the rubric in the returned dictionary to handle
the case in which the instructor changes the rubric definition
while the student is assessing the training example. Once a student
starts on a training example, the student should see the same training
example consistently. However, the next training example the student
retrieves will use the updated rubric.
Args:
submission_uuid (str): The UUID of the student's submission.
rubric (dict): Serialized rubric model.
examples (list): List of serialized training examples.
Returns:
dict: The training example with keys "answer", "rubric", and "options_selected".
If no training examples are available (the student has already assessed every example,
or no examples are defined), returns None.
Raises:
StudentTrainingInternalError
Example usage:
>>> examples = [
>>> {
>>> 'answer': {
>>> 'parts': {
>>> [
>>> {'text:' 'Answer part 1'},
>>> {'text:' 'Answer part 2'},
>>> {'text:' 'Answer part 3'}
>>> ]
>>> }
>>> },
>>> 'options_selected': {
>>> 'vocabulary': 'good',
>>> 'grammar': 'poor'
>>> }
>>> }
>>> ]
>>>
>>> get_training_example("5443ebbbe2297b30f503736e26be84f6c7303c57", rubric, examples)
{
'answer': {
'parts': {
[
{'text:' 'Answer part 1'},
{'text:' 'Answer part 2'},
{'text:' 'Answer part 3'}
]
}
},
'rubric': {
"prompts": [
{"description": "Prompt 1"},
{"description": "Prompt 2"},
{"description": "Prompt 3"}
],
"criteria": [
{
"order_num": 0,
"name": "vocabulary",
"prompt": "How varied is the vocabulary?",
"options": options
},
{
"order_num": 1,
"name": "grammar",
"prompt": "How correct is the grammar?",
"options": options
}
],
},
'options_selected': {
'vocabulary': 'good',
'grammar': 'excellent'
}
}
"""
try:
# Validate the training examples
errors = validate_training_examples(rubric, examples)
if len(errors) > 0:
msg = (
u"Training examples do not match the rubric (submission UUID is {uuid}): {errors}"
).format(uuid=submission_uuid, errors="\n".join(errors))
raise StudentTrainingRequestError(msg)
# Get or create the workflow
workflow = StudentTrainingWorkflow.get_workflow(submission_uuid=submission_uuid)
if not workflow:
raise StudentTrainingRequestError(
u"No learner training workflow found for submission {}".format(submission_uuid)
)
# Get or create the training examples
examples = deserialize_training_examples(examples, rubric)
# Pick a training example that the student has not yet completed
# If the student already started a training example, then return that instead.
next_example = workflow.next_training_example(examples)
return None if next_example is None else serialize_training_example(next_example)
except (InvalidRubric, InvalidRubricSelection, InvalidTrainingExample) as ex:
logger.exception(
"Could not deserialize training examples for submission UUID {}".format(submission_uuid)
)
raise StudentTrainingRequestError(ex)
except sub_api.SubmissionNotFoundError as ex:
msg = u"Could not retrieve the submission with UUID {}".format(submission_uuid)
logger.exception(msg)
raise StudentTrainingRequestError(msg)
except DatabaseError:
msg = (
u"Could not retrieve a training example "
u"for the learner with submission UUID {}"
).format(submission_uuid)
logger.exception(msg)
raise StudentTrainingInternalError(msg)
def assess_training_example(submission_uuid, options_selected, update_workflow=True):
"""
Assess a training example and update the workflow.
This must be called *after* `get_training_example()`.
Args:
submission_uuid (str): The UUID of the student's submission.
options_selected (dict): The options the student selected.
Keyword Arguments:
update_workflow (bool): If true, mark the current item complete
if the student has assessed the example correctly.
Returns:
corrections (dict): Dictionary containing the correct
options for criteria the student scored incorrectly.
Raises:
StudentTrainingRequestError
StudentTrainingInternalError
Example usage:
>>> options_selected = {
>>> 'vocabulary': 'good',
>>> 'grammar': 'excellent'
>>> }
>>> assess_training_example("5443ebbbe2297b30f503736e26be84f6c7303c57", options_selected)
{'grammar': 'poor'}
"""
# Find a workflow for the student
try:
workflow = StudentTrainingWorkflow.objects.get(submission_uuid=submission_uuid)
# Find the item the student is currently working on
item = workflow.current_item
if item is None:
msg = (
u"No items are available in the learner training workflow associated with "
u"submission UUID {}"
).format(submission_uuid)
raise StudentTrainingRequestError(msg)
# Check the student's scores against the staff's scores.
corrections = item.check_options(options_selected)
# Mark the item as complete if the student's selection
# matches the instructor's selection
if update_workflow and len(corrections) == 0:
item.mark_complete()
return corrections
except StudentTrainingWorkflow.DoesNotExist:
msg = u"Could not find learner training workflow for submission UUID {}".format(submission_uuid)
raise StudentTrainingRequestError(msg)
except DatabaseError:
msg = (
u"An error occurred while comparing the learner's assessment "
u"to the training example. The submission UUID for the learner is {}"
).format(submission_uuid)
logger.exception(msg)
raise StudentTrainingInternalError(msg)
| Edraak/edx-ora2 | openassessment/assessment/api/student_training.py | Python | agpl-3.0 | 16,892 |
import csv
import six
from decimal import Decimal, InvalidOperation
if six.PY3:
from io import StringIO
else:
from StringIO import StringIO
from .base import BaseSmartCSVTestCase
from .config import COLUMNS_WITH_VALUE_TRANSFORMATIONS
import smartcsv
class ValidCSVWithValueTransformations(BaseSmartCSVTestCase):
def test_valid_and_value_transformed_with_all_data(self):
"""Should transform all values (in_stock) is not required but present"""
csv_data = """
title,currency,price,in_stock
iPhone 5c blue,USD,799,yes
iPad mini,USD,699,no
"""
reader = smartcsv.reader(
StringIO(csv_data), columns=COLUMNS_WITH_VALUE_TRANSFORMATIONS)
iphone = next(reader)
ipad = next(reader)
self.assertRaises(StopIteration, lambda: list(next(reader)))
self.assertTrue(isinstance(iphone, dict) and isinstance(ipad, dict))
self.assertModelsEquals(iphone, {
'title': 'iPhone 5c blue',
'currency': 'USD',
'price': Decimal('799'),
'in_stock': True
})
self.assertModelsEquals(ipad, {
'title': 'iPad mini',
'currency': 'USD',
'price': Decimal('699'),
'in_stock': False
})
def test_valid_and_value_transformed_with_only_required_data(self):
"""Should transform all values with only required data present"""
csv_data = """
title,currency,price,in_stock
iPhone 5c blue,USD,799,
iPad mini,USD,699,
"""
reader = smartcsv.reader(
StringIO(csv_data), columns=COLUMNS_WITH_VALUE_TRANSFORMATIONS)
iphone = next(reader)
ipad = next(reader)
self.assertRaises(StopIteration, lambda: list(next(reader)))
self.assertTrue(
isinstance(iphone, dict) and isinstance(ipad, dict))
self.assertModelsEquals(iphone, {
'title': 'iPhone 5c blue',
'currency': 'USD',
'price': Decimal('799'),
'in_stock': ''
})
self.assertModelsEquals(ipad, {
'title': 'iPad mini',
'currency': 'USD',
'price': Decimal('699'),
'in_stock': ''
})
class InvalidCSVWithValueTransformations(BaseSmartCSVTestCase):
def setUp(self):
self.columns = COLUMNS_WITH_VALUE_TRANSFORMATIONS[:]
price_without_validation = self.columns[2].copy()
del price_without_validation['validator']
self.columns[2] = price_without_validation
def test_invalid_value_causes_natural_exception(self):
"""Should raise the exception raised by the transform function"""
csv_data = """
title,currency,price,in_stock
iPhone 5c blue,USD,799,
iPad mini,USD,INVALID,
"""
reader = smartcsv.reader(StringIO(csv_data), columns=self.columns)
iphone = next(reader)
self.assertRaises(InvalidOperation, lambda: next(reader))
self.assertRaises(StopIteration, lambda: list(next(reader)))
self.assertTrue(
isinstance(iphone, dict))
self.assertModelsEquals(iphone, {
'title': 'iPhone 5c blue',
'currency': 'USD',
'price': Decimal('799'),
'in_stock': ''
})
def test_invalid_value_with_fail_fast_deactivated(self):
"""Shouldn't raise the exception raised by the transform function but save it in the errors attribute"""
invalid_row = "iPad mini,USD,INVALID,"
csv_data = """
title,currency,price,in_stock
iPhone 5c blue,USD,799,
{invalid_row}
Macbook Pro,USD,1399,yes
{invalid_row}
iPod shuffle,USD,199,
""".format(invalid_row=invalid_row)
reader = smartcsv.reader(
StringIO(csv_data), columns=self.columns, fail_fast=False)
iphone = next(reader)
mac = next(reader)
ipod = next(reader)
self.assertModelsEquals(iphone, {
'title': 'iPhone 5c blue',
'currency': 'USD',
'price': Decimal('799'),
'in_stock': ''
})
self.assertModelsEquals(mac, {
'title': 'Macbook Pro',
'currency': 'USD',
'price': Decimal('1399'),
'in_stock': True
})
self.assertModelsEquals(ipod, {
'title': 'iPod shuffle',
'currency': 'USD',
'price': Decimal('199'),
'in_stock': ''
})
self.assertTrue(reader.errors is not None)
self.assertTrue('rows' in reader.errors)
self.assertTrue(1 in reader.errors['rows'])
self.assertTrue(3 in reader.errors['rows'])
self.assertTrue('transform' in reader.errors['rows'][1]['errors'])
self.assertTrue(
'InvalidOperation' in
reader.errors['rows'][1]['errors']['transform'])
self.assertTrue('transform' in reader.errors['rows'][3]['errors'])
self.assertTrue(
'InvalidOperation' in
reader.errors['rows'][3]['errors']['transform'])
self.assertRowError(reader.errors, invalid_row, 1, 'transform')
| santiagobasulto/smartcsv | tests/test_value_transformations.py | Python | mit | 5,070 |
import os
import sys
from logging.config import fileConfig
from alembic import context
from sqlalchemy import engine_from_config, pool
# Insert parent directory into path to pick up ernest code
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from ernest.main import db, app
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
config.set_main_option('sqlalchemy.url', app.config['SQLALCHEMY_DATABASE_URI'])
target_metadata = db.metadata
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool
)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| willkg/ernest | alembic/env.py | Python | mpl-2.0 | 1,873 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateSecuritySettings
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflowcx
# [START dialogflow_v3beta1_generated_SecuritySettingsService_CreateSecuritySettings_sync]
from google.cloud import dialogflowcx_v3beta1
def sample_create_security_settings():
# Create a client
client = dialogflowcx_v3beta1.SecuritySettingsServiceClient()
# Initialize request argument(s)
security_settings = dialogflowcx_v3beta1.SecuritySettings()
security_settings.retention_window_days = 2271
security_settings.display_name = "display_name_value"
request = dialogflowcx_v3beta1.CreateSecuritySettingsRequest(
parent="parent_value",
security_settings=security_settings,
)
# Make the request
response = client.create_security_settings(request=request)
# Handle the response
print(response)
# [END dialogflow_v3beta1_generated_SecuritySettingsService_CreateSecuritySettings_sync]
| googleapis/python-dialogflow-cx | samples/generated_samples/dialogflow_v3beta1_generated_security_settings_service_create_security_settings_sync.py | Python | apache-2.0 | 1,800 |
import os
from collections import OrderedDict
"""
This module is all about constant value
"""
## *********************************************************************************** Testing configuration ************************************************************************************
DEBUG_MODE = True
FULL_SYSTEM_TEST = False
SLURM_TEST = False
DB_TEST = False
XLS_TEST = False
TEST_PROJECT_CODE = 'b2012247'
ENV_TEST_DIR = "PYTHON_TEST_DIR"
## *********************************************************************************** General configuration ************************************************************************************
DFLT_JOB_ALLOC_TIME = "4-00:00:00"
PHENOTYPE_MISSING = 0
PHENOTYPE_UNAFFECTED = 1
PHENOTYPE_AFFECTED = 2
## ************************************************************************************* GATK Best practice (DNA seq) *************************************************************************************
DNASEQ_SLURM_MONITOR_PIPELINE_BIN = 'pyCMM-dnaseq-slurm-monitor-pipeline'
DNASEQ_PIPELINE_DESCRIPTION = "A flow to control a pipeline to process DNA sequencing data"
DNASEQ_PIPELINE_DFLT_LOG_FILE = "dnaseq_pipeline"
DNASEQ_CREATE_JOB_SETUP_FILE_DESCRIPTION = "An applicationt to generate job setup file for DNASEQ_PIPELINE"
## ************************************************************************************************ CMM DB ************************************************************************************************
DUMMY_TABLE_ANNOVAR_BIN = 'pyCMM-dummy-table-annovar'
DUMMY_TABLE_ANNOVAR_BASH = "$PYCMM/bash/table_annovar_dummy.sh"
CMMDB_PIPELINE_DESCRIPTION = "A flow to control a pipeline to process CMM database"
CMMDB_MUTSTAT_DESCRIPTION = "A flow to control a pipeline to process mutation statistics database"
CMMDB_VCFAF2ANNOVAR_DESCRIPTION = "A flow to parse muation statistics from vcf format into annovar format"
CMMDB_TABLEANNOVAR_DESCRIPTION = "A flow to control a pipeline to run tableannovar"
CMMDB_MUTATIONREPORTS_DESCRIPTION = "A flow to control a pipeline to run generate mutation reports"
CMMDB_PIPELINE_DFLT_LOG_FILE = "cmmdb_pipeline"
CMMDB_CREATE_JOB_SETUP_FILE_DESCRIPTION = "An applicationt to generate job setup file to process CMM database"
## ******************************************************************************************* Mutation report ********************************************************************************************
MUTREP_SLURM_MONITOR_PIPELINE_BIN = 'pyCMM-mutrep-slurm-monitor-pipeline'
MUTREP_FAMILY_REPORT_BIN = 'pyCMM-mutrep-family-report'
MUTREP_SUMMARY_REPORT_BIN = 'pyCMM-mutrep-summary-report'
MUTREP_PIPELINE_DESCRIPTION = "A flow to control a pipeline to process mutations report"
MUTREP_PIPELINE_DFLT_LOG_FILE = "mutrep_pipeline"
MUTREP_FAMILY_REPORT_DESCRIPTION = "An appliation to generate mutation report for a given family at given regions"
MUTREP_SUMMARY_REPORT_DESCRIPTION = "An appliation to generate summary report at given regions"
MUTREP_CREATE_JOB_SETUP_FILE_DESCRIPTION = "An application to generate job setup file to process mutations report"
## ******************************************************************************************* Mutation DB seq report ********************************************************************************************
MUTREPDB_SEQ_REPORT_BIN = 'pyCMM-mutrepdb-seq-report'
MUTREPDB_CREATE_JOB_SETUP_FILE_DESCRIPTION = "An application to generate job setup file to process mutations report"
MUTREPDB_SEQ_REPORT_DESCRIPTION = "An appliation to generate sequencing report at given regions"
MUTREPDB_CONTROLLER_DESCRIPTION = "A flow to control a pipeline to process seqeuncing report"
MUTREPDB_CONTROLLER_DFLT_LOG_FILE = "mutrepdb_controller"
## ******************************************************************************************* DBMS ********************************************************************************************
#MUTREP_SLURM_MONITOR_PIPELINE_BIN = 'pyCMM-mutrep-slurm-monitor-pipeline'
#MUTREP_FAMILY_REPORT_BIN = 'pyCMM-mutrep-family-report'
DBMS_EXECUTE_DB_JOBS_BIN = 'pyCMM-dbms-execute-db-jobs'
#
#MUTREP_PIPELINE_DESCRIPTION = "A flow to control a pipeline to process mutations report"
#MUTREP_PIPELINE_DFLT_LOG_FILE = "mutrep_pipeline"
#
#MUTREP_FAMILY_REPORT_DESCRIPTION = "An appliation to generate mutation report for a given family at given regions"
DBMS_EXECUTE_DB_JOBS_DESCRIPTION = "An appliation to execute jobs related to SQLite DB"
DBMS_CREATE_JOB_SETUP_FILE_DESCRIPTION = "An application to generate job setup file to handle database management"
## *********************************************************************************************** PLINK *************************************************************************************************
PLINK_SLURM_MONITOR_PIPELINE_BIN = 'pyCMM-plink-slurm-monitor-pipeline'
PLINK_PIPELINE_DESCRIPTION = "A flow to control a PLINK haplotype association study"
PLINK_MERGE_HAP_ASSOCS_BIN = 'pyCMM-plink-merge-hap-assocs'
PLINK_HAP_ASSOCS_REPORT_BIN = 'pyCMM-plink-hap-assocs-report'
PLINK_PIPELINE_DESCRIPTION = "A flow to control a PLINK haplotype association study"
PLINK_MERGE_HAP_ASSOCS_DESCRIPTION = "An applciation to merge raw Plink haplotype association study results"
PLINK_HAP_ASSOCS_REPORT_DESCRIPTION = "A flow to generate haplotype association study report"
#PLINK_PIPELINE_DESCRIPTION = "A flow to control a PLINK pipeline"
PLINK_PIPELINE_DFLT_LOG_FILE = "plink_pipeline"
PLINK_CREATE_JOB_SETUP_FILE_DESCRIPTION = "An application to generate job setup file to run PLINK and generate report"
## > > > > > > > > > > > > > > > > > > > > > > > > > > Annovar DB configurations < < < < < < < < < < < < < < < < < < < < < < < < < <
DFLT_ANV_DB_DIR = os.environ['ANNOVAR_HUMAN_DB_DIR']
DFLT_ANV_DB_NAMES = "refGene"
DFLT_ANV_DB_OPS = "g"
DFLT_ANV_DB_NAMES += ",cytoBand"
DFLT_ANV_DB_OPS += ",r"
#DFLT_ANV_DB_NAMES += ",genomicSuperDups"
#DFLT_ANV_DB_OPS += ",r"
DFLT_ANV_DB_NAMES += ",exac03constraint"
DFLT_ANV_DB_OPS += ",r"
DFLT_ANV_DB_NAMES += ",1000g2014oct_eur"
DFLT_ANV_DB_OPS += ",f"
DFLT_ANV_DB_NAMES += ",1000g2014oct_all"
DFLT_ANV_DB_OPS += ",f"
DFLT_ANV_DB_NAMES += ",CMM_Swegen_20161223"
DFLT_ANV_DB_OPS += ",f"
DFLT_ANV_DB_NAMES += ",snp138"
DFLT_ANV_DB_OPS += ",f"
DFLT_ANV_DB_NAMES += ",exac03"
DFLT_ANV_DB_OPS += ",f"
DFLT_ANV_DB_NAMES += ",exac03nontcga"
DFLT_ANV_DB_OPS += ",f"
DFLT_ANV_DB_NAMES += ",gnomad_genome"
DFLT_ANV_DB_OPS += ",f"
#DFLT_ANV_DB_NAMES += ",cg69"
#DFLT_ANV_DB_OPS += ",f"
DFLT_ANV_DB_NAMES += ",cosmic70"
DFLT_ANV_DB_OPS += ",f"
#DFLT_ANV_DB_NAMES += ",nci60"
#DFLT_ANV_DB_OPS += ",f"
DFLT_ANV_DB_NAMES += ",spidex"
DFLT_ANV_DB_OPS += ",f"
DFLT_ANV_DB_NAMES += ",intervar"
DFLT_ANV_DB_OPS += ",f"
DFLT_ANV_DB_NAMES += ",clinvar_20150330"
DFLT_ANV_DB_OPS += ",f"
DFLT_ANV_DB_NAMES += ",clinvar_20170130"
DFLT_ANV_DB_OPS += ",f"
#DFLT_ANV_DB_NAMES += ",mitimpact2"
#DFLT_ANV_DB_OPS += ",f"
DFLT_ANV_DB_NAMES += ",CMM_OAF_BrC_CRC_prostate"
DFLT_ANV_DB_OPS += ",f"
DFLT_ANV_DB_NAMES += ",CMM_OAF_BrCs"
DFLT_ANV_DB_OPS += ",f"
DFLT_ANV_DB_NAMES += ",CMM_OAF_CRCs"
DFLT_ANV_DB_OPS += ",f"
DFLT_ANV_DB_NAMES += ",CMM_OAF_familial_CRCs"
DFLT_ANV_DB_OPS += ",f"
DFLT_ANV_DB_NAMES += ",CMM_OAF_CHEK2"
DFLT_ANV_DB_OPS += ",f"
DFLT_ANV_DB_NAMES += ",CMM_OAF_EARLYONSET"
DFLT_ANV_DB_OPS += ",f"
DFLT_ANV_DB_NAMES += ",CMM_Axeq_chr3_6_14_18"
DFLT_ANV_DB_OPS += ",f"
DFLT_ANV_DB_NAMES += ",CMM_Axeq_chr5_19"
DFLT_ANV_DB_OPS += ",f"
DFLT_ANV_DB_NAMES += ",CMM_Axeq_chr9"
DFLT_ANV_DB_OPS += ",f"
DFLT_ANV_DB_NAMES += ",200_Danes"
DFLT_ANV_DB_OPS += ",f"
DFLT_ANV_DB_NAMES += ",249_Swedes"
DFLT_ANV_DB_OPS += ",f"
DFLT_ANV_DB_NAMES += ",ljb26_all"
DFLT_ANV_DB_OPS += ",f"
## > > > > > > > > > > > > > > > > > > > > > > > > > > Annotation columns < < < < < < < < < < < < < < < < < < < < < < < < < <
# This list will show all possible annotation columns (based on the dbs annotated by table_annovar.pl), in the INFO fields.
# Note. Defining CONSTANT var is for error detection purpose. It will raise exception if the constant is used without pre-defined value
# Columns group tags: Tags to define columns group. This is for exclusion purpose.
# Group of mitochondria-related columns
MT_COLS_TAG = "MT"
# Group of Axeq chr3, 6, 14, 18 columns
AXEQ_CHR3_6_14_18_COLS_TAG = "Axeq_chr3_6_14_18"
# Group of Axeq chr5, 19 columns
AXEQ_CHR5_19_COLS_TAG = "Axeq_chr5_19"
# Group of Axeq chr9 columns
AXEQ_CHR9_COLS_TAG = "Axeq_chr9"
# WES294_OAF_CRCs is a group of colorectal cancer samples, one famiy each, from a group of 294 exome sequencing samples
WES294_OAF_CRCS_COLS_TAG = "WES294_CRCS"
# WES294_OAF_BrCs is a group of breast cancer samples, one famiy each, from a group of 294 exome sequencing samples
WES294_OAF_BRC_COLS_TAG = "WES294_BRCS"
# WES294_BRC_CRC_PROSTATE is a group of all cancer samples, one famiy each, from a group of 294 exome sequencing samples
WES294_OAF_BRC_CRC_PROSTATE_COLS_TAG = "WES294_BRC_CRC_PROSTATE"
# WES294_OAF_CHEK2 is a group of CHEK2 samples, one famiy each, from a group of 294 exome sequencing samples
WES294_OAF_CHEK2_COLS_TAG = "WES294_CHEK2"
# WES294_OAF_EARLYONSET is a group early onset colorectal samples from a group of 294 exome sequencing samples
WES294_OAF_EARLYONSET_COLS_TAG = "WES294_EARLYONSET"
# WES294_OAF_CRCs is a group of familial colorectal cancer samples from a group of 294 exome sequencing samples
WES294_OAF_FAMILIAL_CRCS_COLS_TAG = "WES294_FAMILIAL_CRCS"
# Group of columns annotating detailed information of mutation stat
MUTSTAT_DETAILS_COLS_TAG = "Mutstat_details"
# Group of LJB score columns
LJB_SCORE_COLS_TAG = "LJB_score"
# Group of ExAC population frequencies beside ExAC_ALL and ExAC_NFE
EXAC_OTH_COLS_TAG = "ExAC_Other"
# Group of GNOMAD population frequencies beside ALL and NFE
GNOMAD_OTH_COLS_TAG = "GNOMAD_Other"
# Group of ExAC constraint indicating mutation intolerance for the gene
EXAC_CONSTRAINT_COLS_TAG = "ExAC_constraint"
# Group of other frequency-related from SWEGEN beside allele frequency
SWEGEN_OTH_COLS_TAG = "SWEGEN_Other"
# Group of KVOT calculation
KVOT_COLS_TAG = "KVOT"
# Group of columns that I don't really sure what it is about
UNKNOWN_COLS_TAG = "Unknown"
# Mitochondria-related columns
GENE_SYMBOL_COL_NAME = "Gene_symbol"
OXPHOS_COMPLEX_COL_NAME = "OXPHOS_Complex"
ENSEMBL_GENE_ID_COL_NAME = "Ensembl_Gene_ID"
ENSEMBL_PROTEIN_ID_COL_NAME = "Ensembl_Protein_ID"
UNIPROT_NAME_COL_NAME = "Uniprot_Name"
UNIPROT_ID_COL_NAME = "Uniprot_ID"
NCBI_GENE_ID_COL_NAME = "NCBI_Gene_ID"
NCBI_PROTEIN_ID_COL_NAME = "NCBI_Protein_ID"
GENE_POS_COL_NAME = "Gene_pos"
AA_POS_COL_NAME = "AA_pos"
AA_SUB_COL_NAME = "AA_sub"
CODON_SUB_COL_NAME = "Codon_sub"
DBSNP_ID_COL_NAME = "dbSNP_ID"
PHYLOP_46V_COL_NAME = "PhyloP_46V"
PHASTCONS_46V_COL_NAME = "PhastCons_46V"
PHYLOP_100V_COL_NAME = "PhyloP_100V"
PHASTCONS_100V_COL_NAME = "PhastCons_100V"
SITEVAR_COL_NAME = "SiteVar"
POLYPHEN2_PREDICTION_COL_NAME = "PolyPhen2_prediction"
POLYPHEN2_SCORE_COL_NAME = "PolyPhen2_score"
SIFT_PREDICTION_COL_NAME = "SIFT_prediction"
SIFT_SCORE_COL_NAME = "SIFT_score"
FATHMM_PREDICTION_COL_NAME = "FatHmm_prediction"
FATHMM_SCORE_COL_NAME = "FatHmm_score"
PROVEAN_PREDICTION_COL_NAME = "PROVEAN_prediction"
PROVEAN_SCORE_COL_NAME = "PROVEAN_score"
MUTASS_PREDICTION_COL_NAME = "MutAss_prediction"
MUTASS_SCORE_COL_NAME = "MutAss_score"
EFIN_SWISS_PROT_SCORE_COL_NAME = "EFIN_Swiss_Prot_Score"
EFIN_SWISS_PROT_PREDICTION_COL_NAME = "EFIN_Swiss_Prot_Prediction"
EFIN_HUMDIV_SCORE_COL_NAME = "EFIN_HumDiv_Score"
EFIN_HUMDIV_PREDICTION_COL_NAME = "EFIN_HumDiv_Prediction"
CADD_SCORE_COL_NAME = "CADD_score"
CADD_PHRED_SCORE_COL_NAME = "CADD_Phred_score"
CADD_PREDICTION_COL_NAME = "CADD_prediction"
CAROL_PREDICTION_COL_NAME = "Carol_prediction"
CAROL_SCORE_COL_NAME = "Carol_score"
CONDEL_SCORE_COL_NAME = "Condel_score"
CONDEL_PRED_COL_NAME = "Condel_pred"
COVEC_WMV_COL_NAME = "COVEC_WMV"
COVEC_WMV_PREDICTION_COL_NAME = "COVEC_WMV_prediction"
POLYPHEN2_SCORE_TRANSF_COL_NAME = "PolyPhen2_score_transf"
POLYPHEN2_PRED_TRANSF_COL_NAME = "PolyPhen2_pred_transf"
SIFT_SCORE_TRANSF_COL_NAME = "SIFT_score_transf"
SIFT_PRED_TRANSF_COL_NAME = "SIFT_pred_transf"
MUTASS_SCORE_TRANSF_COL_NAME = "MutAss_score_transf"
MUTASS_PRED_TRANSF_COL_NAME = "MutAss_pred_transf"
PERC_COEVO_SITES_COL_NAME = "Perc_coevo_Sites"
MEAN_MI_SCORE_COL_NAME = "Mean_MI_score"
COSMIC_ID_COL_NAME = "COSMIC_ID"
TUMOR_SITE_COL_NAME = "Tumor_site"
EXAMINED_SAMPLES_COL_NAME = "Examined_samples"
MUTATION_FREQUENCY_COL_NAME = "Mutation_frequency"
US_COL_NAME = "US"
STATUS_COL_NAME = "Status"
ASSOCIATED_DISEASE_COL_NAME = "Associated_disease"
PRESENCE_IN_TD_COL_NAME = "Presence_in_TD"
CLASS_PREDICTED_COL_NAME = "Class_predicted"
PROB_N_COL_NAME = "Prob_N"
PROB_P_COL_NAME = "Prob_P"
MT_ANNO_COLS = OrderedDict()
MT_ANNO_COLS[GENE_SYMBOL_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[OXPHOS_COMPLEX_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[ENSEMBL_GENE_ID_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[ENSEMBL_PROTEIN_ID_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[UNIPROT_NAME_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[UNIPROT_ID_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[NCBI_GENE_ID_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[NCBI_PROTEIN_ID_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[GENE_POS_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[AA_POS_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[AA_SUB_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[CODON_SUB_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[DBSNP_ID_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[PHYLOP_46V_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[PHASTCONS_46V_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[PHYLOP_100V_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[PHASTCONS_100V_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[SITEVAR_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[POLYPHEN2_PREDICTION_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[POLYPHEN2_SCORE_COL_NAME] = [MT_COLS_TAG]
#MT_ANNO_COLS[SIFT_PREDICTION_COL_NAME] = [MT_COLS_TAG]
#MT_ANNO_COLS[SIFT_SCORE_COL_NAME] = [MT_COLS_TAG]
#MT_ANNO_COLS[FATHMM_PREDICTION_COL_NAME] = [MT_COLS_TAG]
#MT_ANNO_COLS[FATHMM_SCORE_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[PROVEAN_PREDICTION_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[PROVEAN_SCORE_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[MUTASS_PREDICTION_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[MUTASS_SCORE_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[EFIN_SWISS_PROT_SCORE_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[EFIN_SWISS_PROT_PREDICTION_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[EFIN_HUMDIV_SCORE_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[EFIN_HUMDIV_PREDICTION_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[CADD_SCORE_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[CADD_PHRED_SCORE_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[CADD_PREDICTION_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[CAROL_PREDICTION_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[CAROL_SCORE_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[CONDEL_SCORE_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[CONDEL_PRED_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[COVEC_WMV_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[COVEC_WMV_PREDICTION_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[POLYPHEN2_SCORE_TRANSF_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[POLYPHEN2_PRED_TRANSF_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[SIFT_SCORE_TRANSF_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[SIFT_PRED_TRANSF_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[MUTASS_SCORE_TRANSF_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[MUTASS_PRED_TRANSF_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[PERC_COEVO_SITES_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[MEAN_MI_SCORE_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[COSMIC_ID_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[TUMOR_SITE_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[EXAMINED_SAMPLES_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[MUTATION_FREQUENCY_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[US_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[STATUS_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[ASSOCIATED_DISEASE_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[PRESENCE_IN_TD_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[CLASS_PREDICTED_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[PROB_N_COL_NAME] = [MT_COLS_TAG]
MT_ANNO_COLS[PROB_P_COL_NAME] = [MT_COLS_TAG]
# Effect predictor columns
LJB_SIFT_PREDICTION_COL_NAME = "SIFT_pred"
LJB_POLYPHEN2_HDIV_PREDICTION_COL_NAME = "Polyphen2_HDIV_pred"
LJB_POLYPHEN2_HVAR_PREDICTION_COL_NAME = "Polyphen2_HVAR_pred"
LJB_LRT_PREDICTION_COL_NAME = "LRT_pred"
LJB_MUTATIONTASTER_PREDICTION_COL_NAME = "MutationTaster_pred"
LJB_MUTATIONASSESSOR_PREDICTION_COL_NAME = "MutationAssessor_pred"
LJB_FATHMM_PREDICTION_COL_NAME = "FATHMM_pred"
LJB_RADIALSVM_PREDICTION_COL_NAME = "RadialSVM_pred"
LJB_METASVM_PREDICTION_COL_NAME = "MetaSVM_pred"
LJB_LR_PREDICTION_COL_NAME = "LR_pred"
LJB_METALR_PREDICTION_COL_NAME = "MetaLR_pred"
PATHOGENIC_COUNT_COL_NAME = "Pathogenic_count"
PREDICTION_COLS = {}
PREDICTION_COLS[LJB_SIFT_PREDICTION_COL_NAME] = []
PREDICTION_COLS[LJB_POLYPHEN2_HDIV_PREDICTION_COL_NAME] = []
PREDICTION_COLS[LJB_POLYPHEN2_HVAR_PREDICTION_COL_NAME] = []
PREDICTION_COLS[LJB_LRT_PREDICTION_COL_NAME] = []
PREDICTION_COLS[LJB_MUTATIONTASTER_PREDICTION_COL_NAME] = []
PREDICTION_COLS[LJB_MUTATIONASSESSOR_PREDICTION_COL_NAME] = []
PREDICTION_COLS[LJB_FATHMM_PREDICTION_COL_NAME] = []
PREDICTION_COLS[LJB_RADIALSVM_PREDICTION_COL_NAME] = []
PREDICTION_COLS[LJB_LR_PREDICTION_COL_NAME] = []
# Other columns
FUNC_REFGENE_COL_NAME = "Func.refGene"
EXONICFUNC_REFGENE_COL_NAME = "ExonicFunc.refGene"
GENE_REFGENE_COL_NAME = "Gene.refGene"
GENEDETAIL_REFGENE_COL_NAME = "GeneDetail.refGene"
SNP138_COL_NAME = "snp138"
AVSNP144_COL_NAME ="avsnp144"
MAX_REF_MAF_COL_NAME = "MAX_REF_MAF"
KG2014OCT_EUR_COL_NAME = "1000g2014oct_eur"
KG2014OCT_ALL_COL_NAME = "1000g2014oct_all"
AXEQ_CHR3_6_14_18_PF_COL_NAME = "AXEQ_CHR3_6_14_18_PF"
AXEQ_CHR3_6_14_18_GF_COL_NAME = "AXEQ_CHR3_6_14_18_GF"
AXEQ_CHR5_19_PF_COL_NAME = "AXEQ_CHR5_19_PF"
AXEQ_CHR5_19_GF_COL_NAME = "AXEQ_CHR5_19_GF"
AXEQ_CHR9_PF_COL_NAME = "AXEQ_CHR9_PF"
AXEQ_CHR9_GF_COL_NAME = "AXEQ_CHR9_GF"
SWEDES_COL_NAME = "249_SWEDES"
DANES_COL_NAME = "200_DANES"
SWEGEN_HET_COL_NAME = "SWEGEN_HET"
SWEGEN_HOM_COL_NAME = "SWEGEN_HOM"
SWEGEN_HEMI_COL_NAME = "SWEGEN_HEMI"
SWEGEN_AC_COL_NAME = "SWEGEN_AC"
SWEGEN_GT_COL_NAME = "SWEGEN_GT"
SWEGEN_GF_COL_NAME = "SWEGEN_GF"
SWEGEN_AF_COL_NAME = "SWEGEN_AF"
SWEGEN_PF_COL_NAME = "SWEGEN_PF"
EXAC_ALL_COL_NAME = "ExAC_ALL"
EXAC_AFR_COL_NAME = "ExAC_AFR"
EXAC_AMR_COL_NAME = "ExAC_AMR"
EXAC_EAS_COL_NAME = "ExAC_EAS"
EXAC_FIN_COL_NAME = "ExAC_FIN"
EXAC_NFE_COL_NAME = "ExAC_NFE"
EXAC_OTH_COL_NAME = "ExAC_OTH"
EXAC_SAS_COL_NAME = "ExAC_SAS"
EXAC_NONTCGA_ALL_COL_NAME = "ExAC_nontcga_ALL"
EXAC_NONTCGA_AFR_COL_NAME = "ExAC_nontcga_AFR"
EXAC_NONTCGA_AMR_COL_NAME = "ExAC_nontcga_AMR"
EXAC_NONTCGA_EAS_COL_NAME = "ExAC_nontcga_EAS"
EXAC_NONTCGA_FIN_COL_NAME = "ExAC_nontcga_FIN"
EXAC_NONTCGA_NFE_COL_NAME = "ExAC_nontcga_NFE"
EXAC_NONTCGA_OTH_COL_NAME = "ExAC_nontcga_OTH"
EXAC_NONTCGA_SAS_COL_NAME = "ExAC_nontcga_SAS"
EXAC03_CONSTRAINT_COL_NAME = 'exac03constraint'
GNOMAD_GENOME_ALL_COL_NAME = "gnomAD_genome_ALL"
GNOMAD_GENOME_AFR_COL_NAME = "gnomAD_genome_AFR"
GNOMAD_GENOME_AMR_COL_NAME = "gnomAD_genome_AMR"
GNOMAD_GENOME_ASJ_COL_NAME = "gnomAD_genome_ASJ"
GNOMAD_GENOME_EAS_COL_NAME = "gnomAD_genome_EAS"
GNOMAD_GENOME_FIN_COL_NAME = "gnomAD_genome_FIN"
GNOMAD_GENOME_NFE_COL_NAME = "gnomAD_genome_NFE"
GNOMAD_GENOME_OTH_COL_NAME = "gnomAD_genome_OTH"
EXAC03_CONSTRAINT_EXP_SYN_COL_NAME = "EXAC03_CONSTRAINT_EXP_SYN"
EXAC03_CONSTRAINT_N_SYN_COL_NAME = "EXAC03_CONSTRAINT_N_SYN"
EXAC03_CONSTRAINT_SYN_Z_COL_NAME = "EXAC03_CONSTRAINT_SYN_Z"
EXAC03_CONSTRAINT_EXP_MIS_COL_NAME = "EXAC03_CONSTRAINT_EXP_MIS"
EXAC03_CONSTRAINT_N_MIS_COL_NAME = "EXAC03_CONSTRAINT_N_MIS"
EXAC03_CONSTRAINT_MIS_Z_COL_NAME = "EXAC03_CONSTRAINT_MIS_Z"
EXAC03_CONSTRAINT_EXP_LOF_COL_NAME = "EXAC03_CONSTRAINT_EXP_LOF"
EXAC03_CONSTRAINT_N_LOF_COL_NAME = "EXAC03_CONSTRAINT_N_LOF"
EXAC03_CONSTRAINT_PLI_COL_NAME = "EXAC03_CONSTRAINT_PLI"
AACHANGE_REFGENE_COL_NAME = "AAChange.refGene"
CYTOBAND_COL_NAME = "cytoBand"
#GENOMICSUPERDUPS_COL_NAME = "genomicSuperDups"
#CG69_COL_NAME = "cg69"
COSMIC70_COL_NAME = "cosmic70"
#NCI60_COL_NAME = "nci60"
CLINSIG_COL_NAME = "CLINSIG"
CLNDBN_COL_NAME = "CLNDBN"
CLNACC_COL_NAME = "CLNACC"
CLNDSDB_COL_NAME = "CLNDSDB"
CLNDSDBID_COL_NAME = "CLNDSDBID"
CLINVAR_20150330_COL_NAME = "clinvar_20150330"
WES294_OAF_BRCS_WT_COL_NAME = "OAF_BRCS_WT"
WES294_OAF_BRCS_HET_COL_NAME = "OAF_BRCS_HET"
WES294_OAF_BRCS_HOM_COL_NAME = "OAF_BRCS_HOM"
WES294_OAF_BRCS_OTH_COL_NAME = "OAF_BRCS_OTH"
WES294_OAF_BRCS_NA_COL_NAME = "OAF_BRCS_NA"
WES294_OAF_BRCS_GT_COL_NAME = "OAF_BRCS_GT"
WES294_OAF_BRCS_PF_COL_NAME = "OAF_BRCS_PF"
WES294_OAF_BRCS_AF_COL_NAME = "OAF_BRCS_AF"
WES294_OAF_BRCS_GF_COL_NAME = "OAF_BRCS_GF"
WES294_OAF_CRCS_WT_COL_NAME = "OAF_CRCS_WT"
WES294_OAF_CRCS_HET_COL_NAME = "OAF_CRCS_HET"
WES294_OAF_CRCS_HOM_COL_NAME = "OAF_CRCS_HOM"
WES294_OAF_CRCS_OTH_COL_NAME = "OAF_CRCS_OTH"
WES294_OAF_CRCS_NA_COL_NAME = "OAF_CRCS_NA"
WES294_OAF_CRCS_GT_COL_NAME = "OAF_CRCS_GT"
WES294_OAF_CRCS_PF_COL_NAME = "OAF_CRCS_PF"
WES294_OAF_CRCS_AF_COL_NAME = "OAF_CRCS_AF"
WES294_OAF_CRCS_GF_COL_NAME = "OAF_CRCS_GF"
WES294_OAF_BRC_CRC_PROSTATE_WT_COL_NAME = "OAF_BRC_CRC_PROSTATE_WT"
WES294_OAF_BRC_CRC_PROSTATE_HET_COL_NAME = "OAF_BRC_CRC_PROSTATE_HET"
WES294_OAF_BRC_CRC_PROSTATE_HOM_COL_NAME = "OAF_BRC_CRC_PROSTATE_HOM"
WES294_OAF_BRC_CRC_PROSTATE_OTH_COL_NAME = "OAF_BRC_CRC_PROSTATE_OTH"
WES294_OAF_BRC_CRC_PROSTATE_NA_COL_NAME = "OAF_BRC_CRC_PROSTATE_NA"
WES294_OAF_BRC_CRC_PROSTATE_GT_COL_NAME = "OAF_BRC_CRC_PROSTATE_GT"
WES294_OAF_BRC_CRC_PROSTATE_PF_COL_NAME = "OAF_BRC_CRC_PROSTATE_PF"
WES294_OAF_BRC_CRC_PROSTATE_AF_COL_NAME = "OAF_BRC_CRC_PROSTATE_AF"
WES294_OAF_BRC_CRC_PROSTATE_GF_COL_NAME = "OAF_BRC_CRC_PROSTATE_GF"
WES294_OAF_CHEK2_WT_COL_NAME = "OAF_CHEK2_WT"
WES294_OAF_CHEK2_HET_COL_NAME = "OAF_CHEK2_HET"
WES294_OAF_CHEK2_HOM_COL_NAME = "OAF_CHEK2_HOM"
WES294_OAF_CHEK2_OTH_COL_NAME = "OAF_CHEK2_OTH"
WES294_OAF_CHEK2_NA_COL_NAME = "OAF_CHEK2_NA"
WES294_OAF_CHEK2_GT_COL_NAME = "OAF_CHEK2_GT"
WES294_OAF_CHEK2_PF_COL_NAME = "OAF_CHEK2_PF"
WES294_OAF_CHEK2_AF_COL_NAME = "OAF_CHEK2_AF"
WES294_OAF_CHEK2_GF_COL_NAME = "OAF_CHEK2_GF"
WES294_OAF_EARLYONSET_WT_COL_NAME = "OAF_EARLYONSET_WT"
WES294_OAF_EARLYONSET_HET_COL_NAME = "OAF_EARLYONSET_HET"
WES294_OAF_EARLYONSET_HOM_COL_NAME = "OAF_EARLYONSET_HOM"
WES294_OAF_EARLYONSET_OTH_COL_NAME = "OAF_EARLYONSET_OTH"
WES294_OAF_EARLYONSET_NA_COL_NAME = "OAF_EARLYONSET_NA"
WES294_OAF_EARLYONSET_GT_COL_NAME = "OAF_EARLYONSET_GT"
WES294_OAF_EARLYONSET_PF_COL_NAME = "OAF_EARLYONSET_PF"
WES294_OAF_FAMILIAL_CRCS_WT_COL_NAME = "OAF_FAMILIAL_CRCS_WT"
WES294_OAF_FAMILIAL_CRCS_HET_COL_NAME = "OAF_FAMILIAL_CRCS_HET"
WES294_OAF_FAMILIAL_CRCS_HOM_COL_NAME = "OAF_FAMILIAL_CRCS_HOM"
WES294_OAF_FAMILIAL_CRCS_OTH_COL_NAME = "OAF_FAMILIAL_CRCS_OTH"
WES294_OAF_FAMILIAL_CRCS_NA_COL_NAME = "OAF_FAMILIAL_CRCS_NA"
WES294_OAF_FAMILIAL_CRCS_GT_COL_NAME = "OAF_FAMILIAL_CRCS_GT"
WES294_OAF_FAMILIAL_CRCS_PF_COL_NAME = "OAF_FAMILIAL_CRCS_PF"
WES294_OAF_FAMILIAL_CRCS_AF_COL_NAME = "OAF_FAMILIAL_CRCS_AF"
WES294_OAF_FAMILIAL_CRCS_GF_COL_NAME = "OAF_FAMILIAL_CRCS_GF"
EST_KVOT_EARLYONSET_VS_BRC_COL_NAME = "KVOT_EARLYONSET_VS_BRC_ESTIMATED"
EST_KVOT_EARLYONSET_VS_EXAC_NFE_COL_NAME = "KVOT_EARLYONSET_VS_EXAC_NFE_ESTIMATED"
EST_KVOT_EARLYONSET_VS_KG_EUR_COL_NAME = "KVOT_EARLYONSET_VS_1000G_EUR_ESTIMATED"
EST_KVOT_EARLYONSET_VS_SWEGEN_COL_NAME = "KVOT_EARLYONSET_VS_SWEGEN_ESTIMATED"
WES294_OAF_EARLYONSET_AF_COL_NAME = "OAF_EARLYONSET_AF"
WES294_OAF_EARLYONSET_GF_COL_NAME = "OAF_EARLYONSET_GF"
AXEQ_CHR3_6_14_18_WT_COL_NAME = "AXEQ_CHR3_6_14_18_WT"
AXEQ_CHR3_6_14_18_HET_COL_NAME = "AXEQ_CHR3_6_14_18_HET"
AXEQ_CHR3_6_14_18_HOM_COL_NAME = "AXEQ_CHR3_6_14_18_HOM"
AXEQ_CHR3_6_14_18_OTH_COL_NAME = "AXEQ_CHR3_6_14_18_OTH"
AXEQ_CHR3_6_14_18_NA_COL_NAME = "AXEQ_CHR3_6_14_18_NA"
AXEQ_CHR3_6_14_18_GT_COL_NAME = "AXEQ_CHR3_6_14_18_GT"
AXEQ_CHR3_6_14_18_AF_COL_NAME = "AXEQ_CHR3_6_14_18_AF"
AXEQ_CHR5_19_WT_COL_NAME = "AXEQ_CHR5_19_WT"
AXEQ_CHR5_19_HET_COL_NAME = "AXEQ_CHR5_19_HET"
AXEQ_CHR5_19_HOM_COL_NAME = "AXEQ_CHR5_19_HOM"
AXEQ_CHR5_19_OTH_COL_NAME = "AXEQ_CHR5_19_OTH"
AXEQ_CHR5_19_NA_COL_NAME = "AXEQ_CHR5_19_NA"
AXEQ_CHR5_19_GT_COL_NAME = "AXEQ_CHR5_19_GT"
AXEQ_CHR5_19_AF_COL_NAME = "AXEQ_CHR5_19_AF"
AXEQ_CHR9_WT_COL_NAME = "AXEQ_CHR9_WT"
AXEQ_CHR9_HET_COL_NAME = "AXEQ_CHR9_HET"
AXEQ_CHR9_HOM_COL_NAME = "AXEQ_CHR9_HOM"
AXEQ_CHR9_OTH_COL_NAME = "AXEQ_CHR9_OTH"
AXEQ_CHR9_NA_COL_NAME = "AXEQ_CHR9_NA"
AXEQ_CHR9_GT_COL_NAME = "AXEQ_CHR9_GT"
AXEQ_CHR9_AF_COL_NAME = "AXEQ_CHR9_AF"
SPIDEX_DPSI_MAX_TISSUE_COL_NAME = "dpsi_max_tissue"
SPIDEX_DPSI_ZSCORE_COL_NAME = "dpsi_zscore"
INTERVAR_CLASS_COL_NAME = "InterVar_class"
INTERVAR_PVS1_EVIDENCE_COL_NAME = "InterVar_PVS1_evidence"
INTERVAR_PS1_EVIDENCE_COL_NAME = "InterVar_PS1_evidence"
INTERVAR_PS2_EVIDENCE_COL_NAME = "InterVar_PS2_evidence"
INTERVAR_PS3_EVIDENCE_COL_NAME = "InterVar_PS3_evidence"
INTERVAR_PS4_EVIDENCE_COL_NAME = "InterVar_PS4_evidence"
INTERVAR_PM1_EVIDENCE_COL_NAME = "InterVar_PM1_evidence"
INTERVAR_PM2_EVIDENCE_COL_NAME = "InterVar_PM2_evidence"
INTERVAR_PM3_EVIDENCE_COL_NAME = "InterVar_PM3_evidence"
INTERVAR_PM4_EVIDENCE_COL_NAME = "InterVar_PM4_evidence"
INTERVAR_PM5_EVIDENCE_COL_NAME = "InterVar_PM5_evidence"
INTERVAR_PM6_EVIDENCE_COL_NAME = "InterVar_PM6_evidence"
INTERVAR_PP1_EVIDENCE_COL_NAME = "InterVar_PP1_evidence"
INTERVAR_PP2_EVIDENCE_COL_NAME = "InterVar_PP2_evidence"
INTERVAR_PP3_EVIDENCE_COL_NAME = "InterVar_PP3_evidence"
INTERVAR_PP4_EVIDENCE_COL_NAME = "InterVar_PP4_evidence"
INTERVAR_PP5_EVIDENCE_COL_NAME = "InterVar_PP5_evidence"
INTERVAR_BA1_EVIDENCE_COL_NAME = "InterVar_BA1_evidence"
INTERVAR_BS1_EVIDENCE_COL_NAME = "InterVar_BS1_evidence"
INTERVAR_BS2_EVIDENCE_COL_NAME = "InterVar_BS2_evidence"
INTERVAR_BS3_EVIDENCE_COL_NAME = "InterVar_BS3_evidence"
INTERVAR_BS4_EVIDENCE_COL_NAME = "InterVar_BS4_evidence"
INTERVAR_BP1_EVIDENCE_COL_NAME = "InterVar_BP1_evidence"
INTERVAR_BP2_EVIDENCE_COL_NAME = "InterVar_BP2_evidence"
INTERVAR_BP3_EVIDENCE_COL_NAME = "InterVar_BP3_evidence"
INTERVAR_BP4_EVIDENCE_COL_NAME = "InterVar_BP4_evidence"
INTERVAR_BP5_EVIDENCE_COL_NAME = "InterVar_BP5_evidence"
INTERVAR_BP6_EVIDENCE_COL_NAME = "InterVar_BP6_evidence"
INTERVAR_BP7_EVIDENCE_COL_NAME = "InterVar_BP7_evidence"
SIFT_SCORE_COL_NAME = "SIFT_score"
POLYPHEN2_HDIV_SCORE_COL_NAME = "Polyphen2_HDIV_score"
POLYPHEN2_HVAR_SCORE_COL_NAME = "Polyphen2_HVAR_score"
LRT_SCORE_COL_NAME = "LRT_score"
MUTATIONTASTER_SCORE_COL_NAME = "MutationTaster_score"
MUTATIONASSESSOR_SCORE_COL_NAME = "MutationAssessor_score"
FATHMM_SCORE_COL_NAME = "FATHMM_score"
RADIALSVM_SCORE_COL_NAME = "RadialSVM_score"
LR_SCORE_COL_NAME = "LR_score"
VEST3_SCORE_COL_NAME = "VEST3_score"
CADD_RAW_COL_NAME = "CADD_raw"
CADD_PHRED_COL_NAME = "CADD_phred"
GERP_RS_COL_NAME = "GERP++_RS"
PHYLOP46WAY_PLACENTAL_COL_NAME = "phyloP46way_placental"
PHYLOP100WAY_VERTEBRATE_COL_NAME = "phyloP100way_vertebrate"
SIPHY_29WAY_LOGODDS_COL_NAME = "SiPhy_29way_logOdds"
TARGETSCANS_COL_NAME = 'targetScanS'
WGRNA_COL_NAME = 'wgRna'
TFBSCONSSITES_COL_NAME = 'tfbsConsSites'
COMPOUND_HETEROZYGOTE_AFFECTED_COUNT_COL_NAME = "compound_heterozygote_affected_count"
COMPOUND_HETEROZYGOTE_UNAFFECTED_COUNT_COL_NAME = "compound_heterozygote_unaffected_count"
COMPOUND_HETEROZYGOTE_FREQ_RATIO_COL_NAME = "compound_heterozygote_frequency_ratio"
HOMOZYGOTE_AFFECTED_COUNT_COL_NAME = "homozygote_affected_count"
HOMOZYGOTE_UNAFFECTED_COUNT_COL_NAME = "homozygote_unaffected_count"
HOMOZYGOTE_FREQ_RATIO_COL_NAME = "homozygote_frequency_ratio"
# This will be obsolete in mutrepdb
INTERVAR_AND_EVIDENCE_COL_NAME = "InterVar:InterVarAndEvidence"
INTERVAR_EVIDENCE_COL_NAME = "InterVar_evidence"
RECESSIVE_STUDY_COL_NAMES = []
RECESSIVE_STUDY_COL_NAMES.append(COMPOUND_HETEROZYGOTE_AFFECTED_COUNT_COL_NAME)
RECESSIVE_STUDY_COL_NAMES.append(COMPOUND_HETEROZYGOTE_UNAFFECTED_COUNT_COL_NAME)
RECESSIVE_STUDY_COL_NAMES.append(COMPOUND_HETEROZYGOTE_FREQ_RATIO_COL_NAME)
RECESSIVE_STUDY_COL_NAMES.append(HOMOZYGOTE_AFFECTED_COUNT_COL_NAME)
RECESSIVE_STUDY_COL_NAMES.append(HOMOZYGOTE_UNAFFECTED_COUNT_COL_NAME)
RECESSIVE_STUDY_COL_NAMES.append(HOMOZYGOTE_FREQ_RATIO_COL_NAME)
PRIMARY_MAF_VAR = KG2014OCT_EUR_COL_NAME
DFLT_MUTREP_FREQ_RATIOS = PRIMARY_MAF_VAR + ":0.2"
FUNC_REFGENE_VAR = FUNC_REFGENE_COL_NAME
EXONICFUNC_REFGENE_VAR = EXONICFUNC_REFGENE_COL_NAME
REF_MAF_COL_NAMES = OrderedDict()
REF_MAF_COL_NAMES[SWEGEN_AF_COL_NAME] = []
REF_MAF_COL_NAMES[EXAC_NFE_COL_NAME] = []
REF_MAF_COL_NAMES[EXAC_ALL_COL_NAME] = []
REF_MAF_COL_NAMES[EXAC_AFR_COL_NAME] = []
REF_MAF_COL_NAMES[EXAC_AMR_COL_NAME] = []
REF_MAF_COL_NAMES[EXAC_EAS_COL_NAME] = []
REF_MAF_COL_NAMES[EXAC_FIN_COL_NAME] = []
REF_MAF_COL_NAMES[EXAC_OTH_COL_NAME] = []
REF_MAF_COL_NAMES[EXAC_SAS_COL_NAME] = []
REF_MAF_COL_NAMES[KG2014OCT_EUR_COL_NAME] = []
REF_MAF_COL_NAMES[KG2014OCT_ALL_COL_NAME] = []
REF_MAF_COL_NAMES[SWEDES_COL_NAME] = []
REF_MAF_COL_NAMES[DANES_COL_NAME] = []
REF_MAF_COL_NAMES[GNOMAD_GENOME_ALL_COL_NAME] = []
REF_MAF_COL_NAMES[GNOMAD_GENOME_AFR_COL_NAME] = []
REF_MAF_COL_NAMES[GNOMAD_GENOME_AMR_COL_NAME] = []
REF_MAF_COL_NAMES[GNOMAD_GENOME_ASJ_COL_NAME] = []
REF_MAF_COL_NAMES[GNOMAD_GENOME_EAS_COL_NAME] = []
REF_MAF_COL_NAMES[GNOMAD_GENOME_FIN_COL_NAME] = []
REF_MAF_COL_NAMES[GNOMAD_GENOME_NFE_COL_NAME] = []
REF_MAF_COL_NAMES[GNOMAD_GENOME_OTH_COL_NAME] = []
# defining all annotation columns
# manually assigning is because not all columns have their own tags
# This is the place to define columns order
ALL_MUTREP_ANNO_COLS = OrderedDict()
ALL_MUTREP_ANNO_COLS[FUNC_REFGENE_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EXONICFUNC_REFGENE_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[SNP138_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[AVSNP144_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[GENE_REFGENE_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[GENEDETAIL_REFGENE_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[AACHANGE_REFGENE_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[CYTOBAND_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[MAX_REF_MAF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EST_KVOT_EARLYONSET_VS_SWEGEN_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[SWEGEN_AF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[SWEGEN_GF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EST_KVOT_EARLYONSET_VS_EXAC_NFE_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EXAC_NFE_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EXAC_ALL_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EXAC_NONTCGA_NFE_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EXAC_NONTCGA_ALL_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EST_KVOT_EARLYONSET_VS_KG_EUR_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[KG2014OCT_EUR_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[KG2014OCT_ALL_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_EARLYONSET_AF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_EARLYONSET_GF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EST_KVOT_EARLYONSET_VS_BRC_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[GNOMAD_GENOME_ALL_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[GNOMAD_GENOME_NFE_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_BRCS_AF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_BRCS_GF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_CRCS_AF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_CRCS_GF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_FAMILIAL_CRCS_AF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_FAMILIAL_CRCS_GF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_BRC_CRC_PROSTATE_AF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_BRC_CRC_PROSTATE_GF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_CHEK2_AF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_CHEK2_GF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[AXEQ_CHR3_6_14_18_AF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[AXEQ_CHR3_6_14_18_GF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[AXEQ_CHR5_19_AF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[AXEQ_CHR5_19_GF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[AXEQ_CHR9_AF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[AXEQ_CHR9_GF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[SWEDES_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[DANES_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[SWEGEN_HET_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[SWEGEN_HOM_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[SWEGEN_HEMI_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[SWEGEN_AC_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[SWEGEN_GT_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[SWEGEN_PF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EXAC_AFR_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EXAC_AMR_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EXAC_EAS_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EXAC_FIN_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EXAC_OTH_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EXAC_SAS_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EXAC_NONTCGA_AFR_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EXAC_NONTCGA_AMR_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EXAC_NONTCGA_EAS_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EXAC_NONTCGA_FIN_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EXAC_NONTCGA_OTH_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EXAC_NONTCGA_SAS_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[GNOMAD_GENOME_AFR_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[GNOMAD_GENOME_AMR_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[GNOMAD_GENOME_ASJ_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[GNOMAD_GENOME_EAS_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[GNOMAD_GENOME_FIN_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[GNOMAD_GENOME_OTH_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EXAC03_CONSTRAINT_EXP_SYN_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EXAC03_CONSTRAINT_N_SYN_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EXAC03_CONSTRAINT_SYN_Z_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EXAC03_CONSTRAINT_EXP_MIS_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EXAC03_CONSTRAINT_N_MIS_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EXAC03_CONSTRAINT_MIS_Z_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EXAC03_CONSTRAINT_EXP_LOF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EXAC03_CONSTRAINT_N_LOF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[EXAC03_CONSTRAINT_PLI_COL_NAME] = []
#ALL_MUTREP_ANNO_COLS[GENOMICSUPERDUPS_COL_NAME] = []
#ALL_MUTREP_ANNO_COLS[CG69_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[COSMIC70_COL_NAME] = []
#ALL_MUTREP_ANNO_COLS[NCI60_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[CLINVAR_20150330_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[CLINSIG_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[CLNDBN_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[CLNACC_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[CLNDSDB_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[CLNDSDBID_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[SPIDEX_DPSI_MAX_TISSUE_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[SPIDEX_DPSI_ZSCORE_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[INTERVAR_CLASS_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[INTERVAR_EVIDENCE_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[TARGETSCANS_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WGRNA_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[TFBSCONSSITES_COL_NAME] = []
#ALL_MUTREP_ANNO_COLS.update(MT_ANNO_COLS)
ALL_MUTREP_ANNO_COLS[AXEQ_CHR3_6_14_18_WT_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[AXEQ_CHR3_6_14_18_HET_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[AXEQ_CHR3_6_14_18_HOM_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[AXEQ_CHR3_6_14_18_OTH_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[AXEQ_CHR3_6_14_18_NA_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[AXEQ_CHR3_6_14_18_GT_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[AXEQ_CHR3_6_14_18_PF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[AXEQ_CHR5_19_WT_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[AXEQ_CHR5_19_HET_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[AXEQ_CHR5_19_HOM_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[AXEQ_CHR5_19_OTH_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[AXEQ_CHR5_19_NA_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[AXEQ_CHR5_19_GT_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[AXEQ_CHR5_19_PF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[AXEQ_CHR9_WT_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[AXEQ_CHR9_HET_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[AXEQ_CHR9_HOM_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[AXEQ_CHR9_OTH_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[AXEQ_CHR9_NA_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[AXEQ_CHR9_GT_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[AXEQ_CHR9_PF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_CRCS_HET_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_CRCS_HOM_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_CRCS_OTH_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_CRCS_NA_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_CRCS_GT_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_CRCS_PF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_CRCS_WT_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_BRCS_WT_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_BRCS_HET_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_BRCS_HOM_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_BRCS_OTH_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_BRCS_NA_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_BRCS_GT_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_BRCS_PF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_BRC_CRC_PROSTATE_WT_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_BRC_CRC_PROSTATE_HET_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_BRC_CRC_PROSTATE_HOM_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_BRC_CRC_PROSTATE_OTH_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_BRC_CRC_PROSTATE_NA_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_BRC_CRC_PROSTATE_GT_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_BRC_CRC_PROSTATE_PF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_CHEK2_WT_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_CHEK2_HET_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_CHEK2_HOM_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_CHEK2_OTH_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_CHEK2_NA_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_CHEK2_GT_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_CHEK2_PF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_EARLYONSET_WT_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_EARLYONSET_HET_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_EARLYONSET_HOM_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_EARLYONSET_OTH_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_EARLYONSET_NA_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_EARLYONSET_GT_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_EARLYONSET_PF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_FAMILIAL_CRCS_HET_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_FAMILIAL_CRCS_HOM_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_FAMILIAL_CRCS_OTH_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_FAMILIAL_CRCS_NA_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_FAMILIAL_CRCS_GT_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_FAMILIAL_CRCS_PF_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[WES294_OAF_FAMILIAL_CRCS_WT_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[PATHOGENIC_COUNT_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[SIFT_SCORE_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[LJB_SIFT_PREDICTION_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[POLYPHEN2_HDIV_SCORE_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[LJB_POLYPHEN2_HDIV_PREDICTION_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[POLYPHEN2_HVAR_SCORE_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[LJB_POLYPHEN2_HVAR_PREDICTION_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[LRT_SCORE_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[LJB_LRT_PREDICTION_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[MUTATIONTASTER_SCORE_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[LJB_MUTATIONTASTER_PREDICTION_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[MUTATIONASSESSOR_SCORE_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[LJB_MUTATIONASSESSOR_PREDICTION_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[FATHMM_SCORE_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[LJB_FATHMM_PREDICTION_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[RADIALSVM_SCORE_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[LJB_RADIALSVM_PREDICTION_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[LJB_METASVM_PREDICTION_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[LR_SCORE_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[LJB_LR_PREDICTION_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[LJB_METALR_PREDICTION_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[VEST3_SCORE_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[CADD_RAW_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[CADD_PHRED_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[GERP_RS_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[PHYLOP46WAY_PLACENTAL_COL_NAME] = []
ALL_MUTREP_ANNO_COLS[PHYLOP100WAY_VERTEBRATE_COL_NAME] = []
FORMAT_COL_FLOAT = "fmt_float"
FORMAT_COL_INT = "fmt_int"
FORMAT_COLS = {}
FORMAT_COLS[SPIDEX_DPSI_MAX_TISSUE_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[SPIDEX_DPSI_ZSCORE_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[EXAC03_CONSTRAINT_EXP_SYN_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[EXAC03_CONSTRAINT_N_SYN_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[EXAC03_CONSTRAINT_SYN_Z_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[EXAC03_CONSTRAINT_EXP_MIS_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[EXAC03_CONSTRAINT_N_MIS_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[EXAC03_CONSTRAINT_MIS_Z_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[EXAC03_CONSTRAINT_EXP_LOF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[EXAC03_CONSTRAINT_N_LOF_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[EXAC03_CONSTRAINT_PLI_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[WES294_OAF_BRCS_WT_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_BRCS_HET_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_BRCS_HOM_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_BRCS_OTH_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_BRCS_NA_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_BRCS_GT_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_BRCS_PF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[WES294_OAF_BRCS_AF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[WES294_OAF_BRCS_GF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[WES294_OAF_CRCS_WT_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_CRCS_HET_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_CRCS_HOM_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_CRCS_OTH_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_CRCS_NA_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_CRCS_GT_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_CRCS_PF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[WES294_OAF_CRCS_AF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[WES294_OAF_CRCS_GF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[WES294_OAF_BRC_CRC_PROSTATE_WT_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_BRC_CRC_PROSTATE_HET_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_BRC_CRC_PROSTATE_HOM_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_BRC_CRC_PROSTATE_OTH_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_BRC_CRC_PROSTATE_NA_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_BRC_CRC_PROSTATE_GT_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_BRC_CRC_PROSTATE_PF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[WES294_OAF_BRC_CRC_PROSTATE_AF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[WES294_OAF_BRC_CRC_PROSTATE_GF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[WES294_OAF_CHEK2_WT_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_CHEK2_HET_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_CHEK2_HOM_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_CHEK2_OTH_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_CHEK2_NA_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_CHEK2_GT_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_CHEK2_PF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[WES294_OAF_CHEK2_AF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[WES294_OAF_CHEK2_GF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[WES294_OAF_EARLYONSET_WT_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_EARLYONSET_HET_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_EARLYONSET_HOM_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_EARLYONSET_OTH_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_EARLYONSET_NA_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_EARLYONSET_GT_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_EARLYONSET_PF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[WES294_OAF_EARLYONSET_AF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[WES294_OAF_EARLYONSET_GF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[WES294_OAF_FAMILIAL_CRCS_WT_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_FAMILIAL_CRCS_HET_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_FAMILIAL_CRCS_HOM_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_FAMILIAL_CRCS_OTH_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_FAMILIAL_CRCS_NA_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_FAMILIAL_CRCS_GT_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[WES294_OAF_FAMILIAL_CRCS_PF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[WES294_OAF_FAMILIAL_CRCS_AF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[WES294_OAF_FAMILIAL_CRCS_GF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[EST_KVOT_EARLYONSET_VS_BRC_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[EST_KVOT_EARLYONSET_VS_EXAC_NFE_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[EST_KVOT_EARLYONSET_VS_SWEGEN_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[EST_KVOT_EARLYONSET_VS_KG_EUR_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[AXEQ_CHR3_6_14_18_WT_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[AXEQ_CHR3_6_14_18_HET_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[AXEQ_CHR3_6_14_18_HOM_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[AXEQ_CHR3_6_14_18_OTH_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[AXEQ_CHR3_6_14_18_NA_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[AXEQ_CHR3_6_14_18_GT_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[AXEQ_CHR3_6_14_18_AF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[AXEQ_CHR3_6_14_18_PF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[AXEQ_CHR3_6_14_18_GF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[AXEQ_CHR5_19_WT_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[AXEQ_CHR5_19_HET_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[AXEQ_CHR5_19_HOM_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[AXEQ_CHR5_19_OTH_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[AXEQ_CHR5_19_NA_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[AXEQ_CHR5_19_GT_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[AXEQ_CHR5_19_AF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[AXEQ_CHR5_19_PF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[AXEQ_CHR5_19_GF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[AXEQ_CHR9_WT_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[AXEQ_CHR9_HET_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[AXEQ_CHR9_HOM_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[AXEQ_CHR9_OTH_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[AXEQ_CHR9_NA_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[AXEQ_CHR9_GT_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[AXEQ_CHR9_AF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[AXEQ_CHR9_PF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[AXEQ_CHR9_GF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[SWEDES_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[DANES_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[SWEGEN_HET_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[SWEGEN_HOM_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[SWEGEN_HEMI_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[SWEGEN_AC_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[SWEGEN_GT_COL_NAME] = FORMAT_COL_INT
FORMAT_COLS[SWEGEN_GF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[SWEGEN_AF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[SWEGEN_PF_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[EXAC_ALL_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[EXAC_AFR_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[EXAC_AMR_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[EXAC_EAS_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[EXAC_FIN_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[EXAC_NFE_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[EXAC_OTH_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[EXAC_SAS_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[CADD_RAW_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[SIFT_SCORE_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[POLYPHEN2_HDIV_SCORE_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[POLYPHEN2_HVAR_SCORE_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[LRT_SCORE_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[MUTATIONTASTER_SCORE_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[MUTATIONASSESSOR_SCORE_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[FATHMM_SCORE_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[RADIALSVM_SCORE_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[LR_SCORE_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[VEST3_SCORE_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[CADD_PHRED_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[GERP_RS_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[PHYLOP46WAY_PLACENTAL_COL_NAME] = FORMAT_COL_FLOAT
FORMAT_COLS[PHYLOP100WAY_VERTEBRATE_COL_NAME] = FORMAT_COL_FLOAT
# set tags
cols_tags = {}
# set columns associated with AXEQ_CHR3_6_14_18_COLS_TAG
cols_tags[AXEQ_CHR3_6_14_18_COLS_TAG] = []
cols_tags[AXEQ_CHR3_6_14_18_COLS_TAG].append(AXEQ_CHR3_6_14_18_WT_COL_NAME)
cols_tags[AXEQ_CHR3_6_14_18_COLS_TAG].append(AXEQ_CHR3_6_14_18_HET_COL_NAME)
cols_tags[AXEQ_CHR3_6_14_18_COLS_TAG].append(AXEQ_CHR3_6_14_18_HOM_COL_NAME)
cols_tags[AXEQ_CHR3_6_14_18_COLS_TAG].append(AXEQ_CHR3_6_14_18_OTH_COL_NAME)
cols_tags[AXEQ_CHR3_6_14_18_COLS_TAG].append(AXEQ_CHR3_6_14_18_NA_COL_NAME)
cols_tags[AXEQ_CHR3_6_14_18_COLS_TAG].append(AXEQ_CHR3_6_14_18_GT_COL_NAME)
cols_tags[AXEQ_CHR3_6_14_18_COLS_TAG].append(AXEQ_CHR3_6_14_18_AF_COL_NAME)
cols_tags[AXEQ_CHR3_6_14_18_COLS_TAG].append(AXEQ_CHR3_6_14_18_PF_COL_NAME)
cols_tags[AXEQ_CHR3_6_14_18_COLS_TAG].append(AXEQ_CHR3_6_14_18_GF_COL_NAME)
# set columns associated with AXEQ_CHR5_19_COLS_TAG
cols_tags[AXEQ_CHR5_19_COLS_TAG] = []
cols_tags[AXEQ_CHR5_19_COLS_TAG].append(AXEQ_CHR5_19_WT_COL_NAME)
cols_tags[AXEQ_CHR5_19_COLS_TAG].append(AXEQ_CHR5_19_HET_COL_NAME)
cols_tags[AXEQ_CHR5_19_COLS_TAG].append(AXEQ_CHR5_19_HOM_COL_NAME)
cols_tags[AXEQ_CHR5_19_COLS_TAG].append(AXEQ_CHR5_19_OTH_COL_NAME)
cols_tags[AXEQ_CHR5_19_COLS_TAG].append(AXEQ_CHR5_19_NA_COL_NAME)
cols_tags[AXEQ_CHR5_19_COLS_TAG].append(AXEQ_CHR5_19_GT_COL_NAME)
cols_tags[AXEQ_CHR5_19_COLS_TAG].append(AXEQ_CHR5_19_AF_COL_NAME)
cols_tags[AXEQ_CHR5_19_COLS_TAG].append(AXEQ_CHR5_19_PF_COL_NAME)
cols_tags[AXEQ_CHR5_19_COLS_TAG].append(AXEQ_CHR5_19_GF_COL_NAME)
# set columns associated with AXEQ_CHR9_COLS_TAG
cols_tags[AXEQ_CHR9_COLS_TAG] = []
cols_tags[AXEQ_CHR9_COLS_TAG].append(AXEQ_CHR9_WT_COL_NAME)
cols_tags[AXEQ_CHR9_COLS_TAG].append(AXEQ_CHR9_HET_COL_NAME)
cols_tags[AXEQ_CHR9_COLS_TAG].append(AXEQ_CHR9_HOM_COL_NAME)
cols_tags[AXEQ_CHR9_COLS_TAG].append(AXEQ_CHR9_OTH_COL_NAME)
cols_tags[AXEQ_CHR9_COLS_TAG].append(AXEQ_CHR9_NA_COL_NAME)
cols_tags[AXEQ_CHR9_COLS_TAG].append(AXEQ_CHR9_GT_COL_NAME)
cols_tags[AXEQ_CHR9_COLS_TAG].append(AXEQ_CHR9_AF_COL_NAME)
cols_tags[AXEQ_CHR9_COLS_TAG].append(AXEQ_CHR9_PF_COL_NAME)
cols_tags[AXEQ_CHR9_COLS_TAG].append(AXEQ_CHR9_GF_COL_NAME)
# set columns associated with WES294_OAF_CRCS_COLS_TAG
cols_tags[WES294_OAF_CRCS_COLS_TAG] = []
cols_tags[WES294_OAF_CRCS_COLS_TAG].append(WES294_OAF_CRCS_WT_COL_NAME)
cols_tags[WES294_OAF_CRCS_COLS_TAG].append(WES294_OAF_CRCS_HET_COL_NAME)
cols_tags[WES294_OAF_CRCS_COLS_TAG].append(WES294_OAF_CRCS_HOM_COL_NAME)
cols_tags[WES294_OAF_CRCS_COLS_TAG].append(WES294_OAF_CRCS_OTH_COL_NAME)
cols_tags[WES294_OAF_CRCS_COLS_TAG].append(WES294_OAF_CRCS_NA_COL_NAME)
cols_tags[WES294_OAF_CRCS_COLS_TAG].append(WES294_OAF_CRCS_GT_COL_NAME)
cols_tags[WES294_OAF_CRCS_COLS_TAG].append(WES294_OAF_CRCS_AF_COL_NAME)
cols_tags[WES294_OAF_CRCS_COLS_TAG].append(WES294_OAF_CRCS_PF_COL_NAME)
cols_tags[WES294_OAF_CRCS_COLS_TAG].append(WES294_OAF_CRCS_GF_COL_NAME)
# set columns associated with WES294_OAF_BRC_COLS_TAG
cols_tags[WES294_OAF_BRC_COLS_TAG] = []
cols_tags[WES294_OAF_BRC_COLS_TAG].append(WES294_OAF_BRCS_WT_COL_NAME)
cols_tags[WES294_OAF_BRC_COLS_TAG].append(WES294_OAF_BRCS_HET_COL_NAME)
cols_tags[WES294_OAF_BRC_COLS_TAG].append(WES294_OAF_BRCS_HOM_COL_NAME)
cols_tags[WES294_OAF_BRC_COLS_TAG].append(WES294_OAF_BRCS_OTH_COL_NAME)
cols_tags[WES294_OAF_BRC_COLS_TAG].append(WES294_OAF_BRCS_NA_COL_NAME)
cols_tags[WES294_OAF_BRC_COLS_TAG].append(WES294_OAF_BRCS_GT_COL_NAME)
cols_tags[WES294_OAF_BRC_COLS_TAG].append(WES294_OAF_BRCS_AF_COL_NAME)
cols_tags[WES294_OAF_BRC_COLS_TAG].append(WES294_OAF_BRCS_PF_COL_NAME)
cols_tags[WES294_OAF_BRC_COLS_TAG].append(WES294_OAF_BRCS_GF_COL_NAME)
# set columns associated with WES294_OAF_BRC_CRC_PROSTATE_COLS_TAG
cols_tags[WES294_OAF_BRC_CRC_PROSTATE_COLS_TAG] = []
cols_tags[WES294_OAF_BRC_CRC_PROSTATE_COLS_TAG].append(WES294_OAF_BRC_CRC_PROSTATE_WT_COL_NAME)
cols_tags[WES294_OAF_BRC_CRC_PROSTATE_COLS_TAG].append(WES294_OAF_BRC_CRC_PROSTATE_HET_COL_NAME)
cols_tags[WES294_OAF_BRC_CRC_PROSTATE_COLS_TAG].append(WES294_OAF_BRC_CRC_PROSTATE_HOM_COL_NAME)
cols_tags[WES294_OAF_BRC_CRC_PROSTATE_COLS_TAG].append(WES294_OAF_BRC_CRC_PROSTATE_OTH_COL_NAME)
cols_tags[WES294_OAF_BRC_CRC_PROSTATE_COLS_TAG].append(WES294_OAF_BRC_CRC_PROSTATE_NA_COL_NAME)
cols_tags[WES294_OAF_BRC_CRC_PROSTATE_COLS_TAG].append(WES294_OAF_BRC_CRC_PROSTATE_GT_COL_NAME)
cols_tags[WES294_OAF_BRC_CRC_PROSTATE_COLS_TAG].append(WES294_OAF_BRC_CRC_PROSTATE_AF_COL_NAME)
cols_tags[WES294_OAF_BRC_CRC_PROSTATE_COLS_TAG].append(WES294_OAF_BRC_CRC_PROSTATE_PF_COL_NAME)
cols_tags[WES294_OAF_BRC_CRC_PROSTATE_COLS_TAG].append(WES294_OAF_BRC_CRC_PROSTATE_GF_COL_NAME)
# set columns associated with WES294_OAF_CHEK2_COLS_TAG
cols_tags[WES294_OAF_CHEK2_COLS_TAG] = []
cols_tags[WES294_OAF_CHEK2_COLS_TAG].append(WES294_OAF_CHEK2_WT_COL_NAME)
cols_tags[WES294_OAF_CHEK2_COLS_TAG].append(WES294_OAF_CHEK2_HET_COL_NAME)
cols_tags[WES294_OAF_CHEK2_COLS_TAG].append(WES294_OAF_CHEK2_HOM_COL_NAME)
cols_tags[WES294_OAF_CHEK2_COLS_TAG].append(WES294_OAF_CHEK2_OTH_COL_NAME)
cols_tags[WES294_OAF_CHEK2_COLS_TAG].append(WES294_OAF_CHEK2_NA_COL_NAME)
cols_tags[WES294_OAF_CHEK2_COLS_TAG].append(WES294_OAF_CHEK2_GT_COL_NAME)
cols_tags[WES294_OAF_CHEK2_COLS_TAG].append(WES294_OAF_CHEK2_AF_COL_NAME)
cols_tags[WES294_OAF_CHEK2_COLS_TAG].append(WES294_OAF_CHEK2_PF_COL_NAME)
cols_tags[WES294_OAF_CHEK2_COLS_TAG].append(WES294_OAF_CHEK2_GF_COL_NAME)
# set columns associated with WES294_OAF_EARLYONSET_COLS_TAG
cols_tags[WES294_OAF_EARLYONSET_COLS_TAG] = []
cols_tags[WES294_OAF_EARLYONSET_COLS_TAG].append(WES294_OAF_EARLYONSET_WT_COL_NAME)
cols_tags[WES294_OAF_EARLYONSET_COLS_TAG].append(WES294_OAF_EARLYONSET_HET_COL_NAME)
cols_tags[WES294_OAF_EARLYONSET_COLS_TAG].append(WES294_OAF_EARLYONSET_HOM_COL_NAME)
cols_tags[WES294_OAF_EARLYONSET_COLS_TAG].append(WES294_OAF_EARLYONSET_OTH_COL_NAME)
cols_tags[WES294_OAF_EARLYONSET_COLS_TAG].append(WES294_OAF_EARLYONSET_NA_COL_NAME)
cols_tags[WES294_OAF_EARLYONSET_COLS_TAG].append(WES294_OAF_EARLYONSET_GT_COL_NAME)
cols_tags[WES294_OAF_EARLYONSET_COLS_TAG].append(WES294_OAF_EARLYONSET_AF_COL_NAME)
cols_tags[WES294_OAF_EARLYONSET_COLS_TAG].append(EST_KVOT_EARLYONSET_VS_BRC_COL_NAME)
cols_tags[WES294_OAF_EARLYONSET_COLS_TAG].append(EST_KVOT_EARLYONSET_VS_EXAC_NFE_COL_NAME)
cols_tags[WES294_OAF_EARLYONSET_COLS_TAG].append(EST_KVOT_EARLYONSET_VS_KG_EUR_COL_NAME)
cols_tags[WES294_OAF_EARLYONSET_COLS_TAG].append(EST_KVOT_EARLYONSET_VS_SWEGEN_COL_NAME)
cols_tags[WES294_OAF_EARLYONSET_COLS_TAG].append(WES294_OAF_EARLYONSET_PF_COL_NAME)
cols_tags[WES294_OAF_EARLYONSET_COLS_TAG].append(WES294_OAF_EARLYONSET_GF_COL_NAME)
# set columns associated with WES294_OAF_CRCS_COLS_TAG
cols_tags[WES294_OAF_FAMILIAL_CRCS_COLS_TAG] = []
cols_tags[WES294_OAF_FAMILIAL_CRCS_COLS_TAG].append(WES294_OAF_FAMILIAL_CRCS_WT_COL_NAME)
cols_tags[WES294_OAF_FAMILIAL_CRCS_COLS_TAG].append(WES294_OAF_FAMILIAL_CRCS_HET_COL_NAME)
cols_tags[WES294_OAF_FAMILIAL_CRCS_COLS_TAG].append(WES294_OAF_FAMILIAL_CRCS_HOM_COL_NAME)
cols_tags[WES294_OAF_FAMILIAL_CRCS_COLS_TAG].append(WES294_OAF_FAMILIAL_CRCS_OTH_COL_NAME)
cols_tags[WES294_OAF_FAMILIAL_CRCS_COLS_TAG].append(WES294_OAF_FAMILIAL_CRCS_NA_COL_NAME)
cols_tags[WES294_OAF_FAMILIAL_CRCS_COLS_TAG].append(WES294_OAF_FAMILIAL_CRCS_GT_COL_NAME)
cols_tags[WES294_OAF_FAMILIAL_CRCS_COLS_TAG].append(WES294_OAF_FAMILIAL_CRCS_AF_COL_NAME)
cols_tags[WES294_OAF_FAMILIAL_CRCS_COLS_TAG].append(WES294_OAF_FAMILIAL_CRCS_PF_COL_NAME)
cols_tags[WES294_OAF_FAMILIAL_CRCS_COLS_TAG].append(WES294_OAF_FAMILIAL_CRCS_GF_COL_NAME)
# set columns associated with MUTSTAT_DETAILS_COLS_TAG
cols_tags[MUTSTAT_DETAILS_COLS_TAG] = []
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(AXEQ_CHR3_6_14_18_WT_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(AXEQ_CHR3_6_14_18_HET_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(AXEQ_CHR3_6_14_18_HOM_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(AXEQ_CHR3_6_14_18_OTH_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(AXEQ_CHR3_6_14_18_NA_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(AXEQ_CHR3_6_14_18_GT_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(AXEQ_CHR3_6_14_18_PF_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(AXEQ_CHR5_19_WT_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(AXEQ_CHR5_19_HET_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(AXEQ_CHR5_19_HOM_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(AXEQ_CHR5_19_OTH_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(AXEQ_CHR5_19_NA_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(AXEQ_CHR5_19_GT_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(AXEQ_CHR5_19_PF_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(AXEQ_CHR9_WT_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(AXEQ_CHR9_HET_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(AXEQ_CHR9_HOM_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(AXEQ_CHR9_OTH_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(AXEQ_CHR9_NA_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(AXEQ_CHR9_GT_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(AXEQ_CHR9_PF_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_CRCS_WT_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_CRCS_HET_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_CRCS_HOM_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_CRCS_OTH_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_CRCS_NA_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_CRCS_GT_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_CRCS_PF_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_BRCS_WT_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_BRCS_HET_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_BRCS_HOM_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_BRCS_OTH_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_BRCS_NA_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_BRCS_GT_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_BRCS_PF_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_BRC_CRC_PROSTATE_WT_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_BRC_CRC_PROSTATE_HET_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_BRC_CRC_PROSTATE_HOM_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_BRC_CRC_PROSTATE_OTH_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_BRC_CRC_PROSTATE_NA_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_BRC_CRC_PROSTATE_GT_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_BRC_CRC_PROSTATE_PF_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_CHEK2_WT_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_CHEK2_HET_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_CHEK2_HOM_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_CHEK2_OTH_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_CHEK2_NA_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_CHEK2_GT_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_CHEK2_PF_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_EARLYONSET_WT_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_EARLYONSET_HET_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_EARLYONSET_HOM_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_EARLYONSET_OTH_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_EARLYONSET_NA_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_EARLYONSET_GT_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_EARLYONSET_PF_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_FAMILIAL_CRCS_WT_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_FAMILIAL_CRCS_HET_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_FAMILIAL_CRCS_HOM_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_FAMILIAL_CRCS_OTH_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_FAMILIAL_CRCS_NA_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_FAMILIAL_CRCS_GT_COL_NAME)
cols_tags[MUTSTAT_DETAILS_COLS_TAG].append(WES294_OAF_FAMILIAL_CRCS_PF_COL_NAME)
# set columns associated with LJB_SCORE_COLS_TAG
cols_tags[LJB_SCORE_COLS_TAG] = []
cols_tags[LJB_SCORE_COLS_TAG].append(CADD_RAW_COL_NAME)
cols_tags[LJB_SCORE_COLS_TAG].append(SIFT_SCORE_COL_NAME)
cols_tags[LJB_SCORE_COLS_TAG].append(POLYPHEN2_HDIV_SCORE_COL_NAME)
cols_tags[LJB_SCORE_COLS_TAG].append(POLYPHEN2_HVAR_SCORE_COL_NAME)
cols_tags[LJB_SCORE_COLS_TAG].append(LRT_SCORE_COL_NAME)
cols_tags[LJB_SCORE_COLS_TAG].append(MUTATIONTASTER_SCORE_COL_NAME)
cols_tags[LJB_SCORE_COLS_TAG].append(MUTATIONASSESSOR_SCORE_COL_NAME)
cols_tags[LJB_SCORE_COLS_TAG].append(FATHMM_SCORE_COL_NAME)
cols_tags[LJB_SCORE_COLS_TAG].append(RADIALSVM_SCORE_COL_NAME)
cols_tags[LJB_SCORE_COLS_TAG].append(LR_SCORE_COL_NAME)
# set columns associated with EXAC_OTH_COLS_TAG
cols_tags[EXAC_OTH_COLS_TAG] = []
cols_tags[EXAC_OTH_COLS_TAG].append(EXAC_AFR_COL_NAME)
cols_tags[EXAC_OTH_COLS_TAG].append(EXAC_AMR_COL_NAME)
cols_tags[EXAC_OTH_COLS_TAG].append(EXAC_EAS_COL_NAME)
cols_tags[EXAC_OTH_COLS_TAG].append(EXAC_FIN_COL_NAME)
cols_tags[EXAC_OTH_COLS_TAG].append(EXAC_OTH_COL_NAME)
cols_tags[EXAC_OTH_COLS_TAG].append(EXAC_SAS_COL_NAME)
cols_tags[EXAC_OTH_COLS_TAG].append(EXAC_NONTCGA_AFR_COL_NAME)
cols_tags[EXAC_OTH_COLS_TAG].append(EXAC_NONTCGA_AMR_COL_NAME)
cols_tags[EXAC_OTH_COLS_TAG].append(EXAC_NONTCGA_EAS_COL_NAME)
cols_tags[EXAC_OTH_COLS_TAG].append(EXAC_NONTCGA_FIN_COL_NAME)
cols_tags[EXAC_OTH_COLS_TAG].append(EXAC_NONTCGA_OTH_COL_NAME)
cols_tags[EXAC_OTH_COLS_TAG].append(EXAC_NONTCGA_SAS_COL_NAME)
# set columns associated with GNOMAD_OTH_COLS_TAG
cols_tags[GNOMAD_OTH_COLS_TAG] = []
cols_tags[GNOMAD_OTH_COLS_TAG].append(GNOMAD_GENOME_AFR_COL_NAME)
cols_tags[GNOMAD_OTH_COLS_TAG].append(GNOMAD_GENOME_AMR_COL_NAME)
cols_tags[GNOMAD_OTH_COLS_TAG].append(GNOMAD_GENOME_ASJ_COL_NAME)
cols_tags[GNOMAD_OTH_COLS_TAG].append(GNOMAD_GENOME_EAS_COL_NAME)
cols_tags[GNOMAD_OTH_COLS_TAG].append(GNOMAD_GENOME_FIN_COL_NAME)
cols_tags[GNOMAD_OTH_COLS_TAG].append(GNOMAD_GENOME_OTH_COL_NAME)
# set columns associated with SWEGEN_OTH_COLS_TAG
cols_tags[SWEGEN_OTH_COLS_TAG] = []
cols_tags[SWEGEN_OTH_COLS_TAG].append(SWEGEN_HET_COL_NAME)
cols_tags[SWEGEN_OTH_COLS_TAG].append(SWEGEN_HOM_COL_NAME)
cols_tags[SWEGEN_OTH_COLS_TAG].append(SWEGEN_HEMI_COL_NAME)
cols_tags[SWEGEN_OTH_COLS_TAG].append(SWEGEN_AC_COL_NAME)
cols_tags[SWEGEN_OTH_COLS_TAG].append(SWEGEN_GT_COL_NAME)
cols_tags[SWEGEN_OTH_COLS_TAG].append(SWEGEN_GF_COL_NAME)
cols_tags[SWEGEN_OTH_COLS_TAG].append(SWEGEN_PF_COL_NAME)
# set columns associated with EXAC_CONSTRAINT_COLS_TAG
cols_tags[EXAC_CONSTRAINT_COLS_TAG] = []
cols_tags[EXAC_CONSTRAINT_COLS_TAG].append(EXAC03_CONSTRAINT_EXP_SYN_COL_NAME)
cols_tags[EXAC_CONSTRAINT_COLS_TAG].append(EXAC03_CONSTRAINT_N_SYN_COL_NAME)
cols_tags[EXAC_CONSTRAINT_COLS_TAG].append(EXAC03_CONSTRAINT_SYN_Z_COL_NAME)
cols_tags[EXAC_CONSTRAINT_COLS_TAG].append(EXAC03_CONSTRAINT_EXP_MIS_COL_NAME)
cols_tags[EXAC_CONSTRAINT_COLS_TAG].append(EXAC03_CONSTRAINT_N_MIS_COL_NAME)
cols_tags[EXAC_CONSTRAINT_COLS_TAG].append(EXAC03_CONSTRAINT_MIS_Z_COL_NAME)
cols_tags[EXAC_CONSTRAINT_COLS_TAG].append(EXAC03_CONSTRAINT_EXP_LOF_COL_NAME)
cols_tags[EXAC_CONSTRAINT_COLS_TAG].append(EXAC03_CONSTRAINT_N_LOF_COL_NAME)
cols_tags[EXAC_CONSTRAINT_COLS_TAG].append(EXAC03_CONSTRAINT_PLI_COL_NAME)
EXAC03_CONSTRAINT_COL_NAMES = cols_tags[EXAC_CONSTRAINT_COLS_TAG]
cols_tags[KVOT_COLS_TAG] = []
cols_tags[KVOT_COLS_TAG].append(EST_KVOT_EARLYONSET_VS_BRC_COL_NAME)
cols_tags[KVOT_COLS_TAG].append(EST_KVOT_EARLYONSET_VS_SWEGEN_COL_NAME)
cols_tags[KVOT_COLS_TAG].append(EST_KVOT_EARLYONSET_VS_EXAC_NFE_COL_NAME)
cols_tags[KVOT_COLS_TAG].append(EST_KVOT_EARLYONSET_VS_KG_EUR_COL_NAME)
EST_KVOT_COLS = cols_tags[KVOT_COLS_TAG]
# set columns associated with UNKNOWN_COLS_TAG
cols_tags[UNKNOWN_COLS_TAG] = []
#cols_tags[UNKNOWN_COLS_TAG].append(GENOMICSUPERDUPS_COL_NAME)
#cols_tags[UNKNOWN_COLS_TAG].append(CG69_COL_NAME)
#cols_tags[UNKNOWN_COLS_TAG].append(NCI60_COL_NAME)
cols_tags[UNKNOWN_COLS_TAG].append(VEST3_SCORE_COL_NAME)
cols_tags[UNKNOWN_COLS_TAG].append(GERP_RS_COL_NAME)
cols_tags[UNKNOWN_COLS_TAG].append(PHYLOP46WAY_PLACENTAL_COL_NAME)
cols_tags[UNKNOWN_COLS_TAG].append(PHYLOP100WAY_VERTEBRATE_COL_NAME)
# assign all tags to all related columns
for cols_tag in cols_tags:
col_names = cols_tags[cols_tag]
for col_name in col_names:
ALL_MUTREP_ANNO_COLS[col_name].append(cols_tag)
TMP_HEADER_CORRECTIONS = []
TMP_HEADER_CORRECTIONS.append(WES294_OAF_BRCS_WT_COL_NAME + ":" + "BRCS_WT")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_BRCS_HET_COL_NAME + ":" + "BRCS_HET")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_BRCS_HOM_COL_NAME + ":" + "BRCS_HOM")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_BRCS_OTH_COL_NAME + ":" + "BRCS_OTH")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_BRCS_NA_COL_NAME + ":" + "BRCS_NA")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_BRCS_GT_COL_NAME + ":" + "BRCS_GT")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_BRCS_PF_COL_NAME + ":" + "BRCS_PF")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_BRCS_AF_COL_NAME + ":" + "BRCS_AF")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_BRCS_GF_COL_NAME + ":" + "BRCS_GF")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_CRCS_WT_COL_NAME + ":" + "CRCS_WT")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_CRCS_HET_COL_NAME + ":" + "CRCS_HET")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_CRCS_HOM_COL_NAME + ":" + "CRCS_HOM")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_CRCS_OTH_COL_NAME + ":" + "CRCS_OTH")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_CRCS_NA_COL_NAME + ":" + "CRCS_NA")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_CRCS_GT_COL_NAME + ":" + "CRCS_GT")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_CRCS_PF_COL_NAME + ":" + "CRCS_PF")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_CRCS_AF_COL_NAME + ":" + "CRCS_AF")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_CRCS_GF_COL_NAME + ":" + "CRCS_GF")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_BRC_CRC_PROSTATE_WT_COL_NAME + ":" + "ALL_EXOME_WT")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_BRC_CRC_PROSTATE_HET_COL_NAME + ":" + "ALL_EXOME_HET")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_BRC_CRC_PROSTATE_HOM_COL_NAME + ":" + "ALL_EXOME_HOM")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_BRC_CRC_PROSTATE_OTH_COL_NAME + ":" + "ALL_EXOME_OTH")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_BRC_CRC_PROSTATE_NA_COL_NAME + ":" + "ALL_EXOME_NA")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_BRC_CRC_PROSTATE_GT_COL_NAME + ":" + "ALL_EXOME_GT")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_BRC_CRC_PROSTATE_PF_COL_NAME + ":" + "ALL_EXOME_PF")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_BRC_CRC_PROSTATE_AF_COL_NAME + ":" + "ALL_EXOME_AF")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_BRC_CRC_PROSTATE_GF_COL_NAME + ":" + "ALL_EXOME_GF")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_CHEK2_WT_COL_NAME + ":" + "CHEK2_WT")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_CHEK2_HET_COL_NAME + ":" + "CHEK2_HET")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_CHEK2_HOM_COL_NAME + ":" + "CHEK2_HOM")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_CHEK2_OTH_COL_NAME + ":" + "CHEK2_OTH")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_CHEK2_NA_COL_NAME + ":" + "CHEK2_NA")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_CHEK2_GT_COL_NAME + ":" + "CHEK2_GT")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_CHEK2_PF_COL_NAME + ":" + "CHEK2_PF")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_CHEK2_AF_COL_NAME + ":" + "CHEK2_AF")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_CHEK2_GF_COL_NAME + ":" + "CHEK2_GF")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_EARLYONSET_WT_COL_NAME + ":" + "EARLYONSET_WT")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_EARLYONSET_HET_COL_NAME + ":" + "EARLYONSET_HET")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_EARLYONSET_HOM_COL_NAME + ":" + "EARLYONSET_HOM")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_EARLYONSET_OTH_COL_NAME + ":" + "EARLYONSET_OTH")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_EARLYONSET_NA_COL_NAME + ":" + "EARLYONSET_NA")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_EARLYONSET_GT_COL_NAME + ":" + "EARLYONSET_GT")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_EARLYONSET_PF_COL_NAME + ":" + "EARLYONSET_PF")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_EARLYONSET_AF_COL_NAME + ":" + "EARLYONSET_AF")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_EARLYONSET_GF_COL_NAME + ":" + "EARLYONSET_GF")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_FAMILIAL_CRCS_WT_COL_NAME + ":" + "FAMILIAL_CRCS_WT")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_FAMILIAL_CRCS_HET_COL_NAME + ":" + "FAMILIAL_CRCS_HET")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_FAMILIAL_CRCS_HOM_COL_NAME + ":" + "FAMILIAL_CRCS_HOM")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_FAMILIAL_CRCS_OTH_COL_NAME + ":" + "FAMILIAL_CRCS_OTH")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_FAMILIAL_CRCS_NA_COL_NAME + ":" + "FAMILIAL_CRCS_NA")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_FAMILIAL_CRCS_GT_COL_NAME + ":" + "FAMILIAL_CRCS_GT")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_FAMILIAL_CRCS_PF_COL_NAME + ":" + "FAMILIAL_CRCS_PF")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_FAMILIAL_CRCS_AF_COL_NAME + ":" + "FAMILIAL_CRCS_AF")
TMP_HEADER_CORRECTIONS.append(WES294_OAF_FAMILIAL_CRCS_GF_COL_NAME + ":" + "FAMILIAL_CRCS_GF")
TMP_HEADER_CORRECTIONS.append(EXAC03_CONSTRAINT_EXP_SYN_COL_NAME + ":" + "Exp Syn(ExAC)")
TMP_HEADER_CORRECTIONS.append(EXAC03_CONSTRAINT_N_SYN_COL_NAME + ":" + "Obs Syn(ExAC)")
TMP_HEADER_CORRECTIONS.append(EXAC03_CONSTRAINT_SYN_Z_COL_NAME + ":" + "Syn z(ExAC)")
TMP_HEADER_CORRECTIONS.append(EXAC03_CONSTRAINT_EXP_MIS_COL_NAME + ":" + "Exp Mis(ExAC)")
TMP_HEADER_CORRECTIONS.append(EXAC03_CONSTRAINT_N_MIS_COL_NAME + ":" + "Obs Mis(ExAC)")
TMP_HEADER_CORRECTIONS.append(EXAC03_CONSTRAINT_MIS_Z_COL_NAME + ":" + "Mis Z(ExAC)")
TMP_HEADER_CORRECTIONS.append(EXAC03_CONSTRAINT_EXP_LOF_COL_NAME + ":" + "Exp LoF(ExAC)")
TMP_HEADER_CORRECTIONS.append(EXAC03_CONSTRAINT_N_LOF_COL_NAME + ":" + "Obs LoF(ExAC)")
TMP_HEADER_CORRECTIONS.append(EXAC03_CONSTRAINT_PLI_COL_NAME + ":" + "PLI(ExAC)")
TMP_HEADER_CORRECTIONS.append(GNOMAD_GENOME_ALL_COL_NAME + ":" + "gnomAD_ALL")
TMP_HEADER_CORRECTIONS.append(GNOMAD_GENOME_AFR_COL_NAME + ":" + "gnomAD_AFR")
TMP_HEADER_CORRECTIONS.append(GNOMAD_GENOME_AMR_COL_NAME + ":" + "gnomAD_AMR")
TMP_HEADER_CORRECTIONS.append(GNOMAD_GENOME_ASJ_COL_NAME + ":" + "gnomAD_ASJ")
TMP_HEADER_CORRECTIONS.append(GNOMAD_GENOME_EAS_COL_NAME + ":" + "gnomAD_EAS")
TMP_HEADER_CORRECTIONS.append(GNOMAD_GENOME_FIN_COL_NAME + ":" + "gnomAD_FIN")
TMP_HEADER_CORRECTIONS.append(GNOMAD_GENOME_NFE_COL_NAME + ":" + "gnomAD_NFE")
TMP_HEADER_CORRECTIONS.append(GNOMAD_GENOME_OTH_COL_NAME + ":" + "gnomAD_OTH")
DFLT_HEADER_CORRECTIONS = ",".join(TMP_HEADER_CORRECTIONS)
| jessada/pyCMM | pycmm/settings.py | Python | gpl-2.0 | 70,066 |
import warnings
import itertools
from contextlib import contextmanager
import numpy as np
from matplotlib import transforms
from .. import utils
from .. import _py3k_compat as py3k
class Renderer(object):
@staticmethod
def ax_zoomable(ax):
return bool(ax and ax.get_navigate())
@staticmethod
def ax_has_xgrid(ax):
return bool(ax and ax.xaxis._gridOnMajor and ax.yaxis.get_gridlines())
@staticmethod
def ax_has_ygrid(ax):
return bool(ax and ax.yaxis._gridOnMajor and ax.yaxis.get_gridlines())
@property
def current_ax_zoomable(self):
return self.ax_zoomable(self._current_ax)
@property
def current_ax_has_xgrid(self):
return self.ax_has_xgrid(self._current_ax)
@property
def current_ax_has_ygrid(self):
return self.ax_has_ygrid(self._current_ax)
@contextmanager
def draw_figure(self, fig, props):
if hasattr(self, "_current_fig") and self._current_fig is not None:
warnings.warn("figure embedded in figure: something is wrong")
self._current_fig = fig
self._fig_props = props
self.open_figure(fig=fig, props=props)
yield
self.close_figure(fig=fig)
self._current_fig = None
self._fig_props = {}
@contextmanager
def draw_axes(self, ax, props):
if hasattr(self, "_current_ax") and self._current_ax is not None:
warnings.warn("axes embedded in axes: something is wrong")
self._current_ax = ax
self._ax_props = props
self.open_axes(ax=ax, props=props)
yield
self.close_axes(ax=ax)
self._current_ax = None
self._ax_props = {}
@contextmanager
def draw_legend(self, legend, props):
self._current_legend = legend
self._legend_props = props
self.open_legend(legend=legend, props=props)
yield
self.close_legend(legend=legend)
self._current_legend = None
self._legend_props = {}
# Following are the functions which should be overloaded in subclasses
def open_figure(self, fig, props):
"""
Begin commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The Figure which will contain the ensuing axes and elements
props : dictionary
The dictionary of figure properties
"""
pass
def close_figure(self, fig):
"""
Finish commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The figure which is finished being drawn.
"""
pass
def open_axes(self, ax, props):
"""
Begin commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which will contain the ensuing axes and elements
props : dictionary
The dictionary of axes properties
"""
pass
def close_axes(self, ax):
"""
Finish commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which is finished being drawn.
"""
pass
def open_legend(self, legend, props):
"""
Begin commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend that will contain the ensuing elements
props : dictionary
The dictionary of legend properties
"""
pass
def close_legend(self, legend):
"""
Finish commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend which is finished being drawn
"""
pass
def draw_marked_line(self, data, coordinates, linestyle, markerstyle,
label, mplobj=None):
"""Draw a line that also has markers.
If this isn't reimplemented by a renderer object, by default, it will
make a call to BOTH draw_line and draw_markers when both markerstyle
and linestyle are not None in the same Line2D object.
"""
if linestyle is not None:
self.draw_line(data, coordinates, linestyle, label, mplobj)
if markerstyle is not None:
self.draw_markers(data, coordinates, markerstyle, label, mplobj)
def draw_line(self, data, coordinates, style, label, mplobj=None):
"""
Draw a line. By default, draw the line via the draw_path() command.
Some renderers might wish to override this and provide more
fine-grained behavior.
In matplotlib, lines are generally created via the plt.plot() command,
though this command also can create marker collections.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the line.
mplobj : matplotlib object
the matplotlib plot element which generated this line
"""
pathcodes = ['M'] + (data.shape[0] - 1) * ['L']
pathstyle = dict(facecolor='none', **style)
pathstyle['edgecolor'] = pathstyle.pop('color')
pathstyle['edgewidth'] = pathstyle.pop('linewidth')
self.draw_path(data=data, coordinates=coordinates,
pathcodes=pathcodes, style=pathstyle, mplobj=mplobj)
@staticmethod
def _iter_path_collection(paths, path_transforms, offsets, styles):
"""Build an iterator over the elements of the path collection"""
N = max(len(paths), len(offsets))
if len(path_transforms) == 0:
path_transforms = [np.eye(3)]
edgecolor = styles['edgecolor']
if np.size(edgecolor) == 0:
edgecolor = ['none']
facecolor = styles['facecolor']
if np.size(facecolor) == 0:
facecolor = ['none']
elements = [paths, path_transforms, offsets,
edgecolor, styles['linewidth'], facecolor]
it = itertools
return it.islice(py3k.zip(*py3k.map(it.cycle, elements)), N)
def draw_path_collection(self, paths, path_coordinates, path_transforms,
offsets, offset_coordinates, offset_order,
styles, mplobj=None):
"""
Draw a collection of paths. The paths, offsets, and styles are all
iterables, and the number of paths is max(len(paths), len(offsets)).
By default, this is implemented via multiple calls to the draw_path()
function. For efficiency, Renderers may choose to customize this
implementation.
Examples of path collections created by matplotlib are scatter plots,
histograms, contour plots, and many others.
Parameters
----------
paths : list
list of tuples, where each tuple has two elements:
(data, pathcodes). See draw_path() for a description of these.
path_coordinates: string
the coordinates code for the paths, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
path_transforms: array_like
an array of shape (*, 3, 3), giving a series of 2D Affine
transforms for the paths. These encode translations, rotations,
and scalings in the standard way.
offsets: array_like
An array of offsets of shape (N, 2)
offset_coordinates : string
the coordinates code for the offsets, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
offset_order : string
either "before" or "after". This specifies whether the offset
is applied before the path transform, or after. The matplotlib
backend equivalent is "before"->"data", "after"->"screen".
styles: dictionary
A dictionary in which each value is a list of length N, containing
the style(s) for the paths.
mplobj : matplotlib object
the matplotlib plot element which generated this collection
"""
if offset_order == "before":
raise NotImplementedError("offset before transform")
for tup in self._iter_path_collection(paths, path_transforms,
offsets, styles):
(path, path_transform, offset, ec, lw, fc) = tup
vertices, pathcodes = path
path_transform = transforms.Affine2D(path_transform)
vertices = path_transform.transform(vertices)
# This is a hack:
if path_coordinates == "figure":
path_coordinates = "points"
style = {"edgecolor": utils.color_to_hex(ec),
"facecolor": utils.color_to_hex(fc),
"edgewidth": lw,
"dasharray": "10,0",
"alpha": styles['alpha'],
"zorder": styles['zorder']}
self.draw_path(data=vertices, coordinates=path_coordinates,
pathcodes=pathcodes, style=style, offset=offset,
offset_coordinates=offset_coordinates,
mplobj=mplobj)
def draw_markers(self, data, coordinates, style, label, mplobj=None):
"""
Draw a set of markers. By default, this is done by repeatedly
calling draw_path(), but renderers should generally overload
this method to provide a more efficient implementation.
In matplotlib, markers are created using the plt.plot() command.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the markers.
mplobj : matplotlib object
the matplotlib plot element which generated this marker collection
"""
vertices, pathcodes = style['markerpath']
pathstyle = dict((key, style[key]) for key in ['alpha', 'edgecolor',
'facecolor', 'zorder',
'edgewidth'])
pathstyle['dasharray'] = "10,0"
for vertex in data:
self.draw_path(data=vertices, coordinates="points",
pathcodes=pathcodes, style=pathstyle,
offset=vertex, offset_coordinates=coordinates,
mplobj=mplobj)
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
"""
Draw text on the image.
Parameters
----------
text : string
The text to draw
position : tuple
The (x, y) position of the text
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the text.
text_type : string or None
if specified, a type of text such as "xlabel", "ylabel", "title"
mplobj : matplotlib object
the matplotlib plot element which generated this text
"""
raise NotImplementedError()
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
"""
Draw a path.
In matplotlib, paths are created by filled regions, histograms,
contour plots, patches, etc.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
'figure' for figure (pixel) coordinates, or "points" for raw
point coordinates (useful in conjunction with offsets, below).
pathcodes : list
A list of single-character SVG pathcodes associated with the data.
Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't',
'S', 's', 'C', 'c', 'Z', 'z']
See the SVG specification for details. Note that some path codes
consume more than one datapoint (while 'Z' consumes none), so
in general, the length of the pathcodes list will not be the same
as that of the data array.
style : dictionary
a dictionary specifying the appearance of the line.
offset : list (optional)
the (x, y) offset of the path. If not given, no offset will
be used.
offset_coordinates : string (optional)
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
mplobj : matplotlib object
the matplotlib plot element which generated this path
"""
raise NotImplementedError()
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
"""
Draw an image.
Parameters
----------
imdata : string
base64 encoded png representation of the image
extent : list
the axes extent of the image: [xmin, xmax, ymin, ymax]
coordinates: string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the image
mplobj : matplotlib object
the matplotlib plot object which generated this image
"""
raise NotImplementedError()
| azjps/bokeh | bokeh/core/compat/mplexporter/renderers/base.py | Python | bsd-3-clause | 14,360 |
"""Coordinate Point Extractor for KIT system."""
# Author: Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD (3-clause)
from os import SEEK_CUR, path as op
import pickle
import re
from struct import unpack
import numpy as np
from .constants import KIT
from .._digitization import _read_dig_points
def read_mrk(fname):
r"""Marker Point Extraction in MEG space directly from sqd.
Parameters
----------
fname : str
Absolute path to Marker file.
File formats allowed: \*.sqd, \*.mrk, \*.txt, \*.pickled.
Returns
-------
mrk_points : ndarray, shape (n_points, 3)
Marker points in MEG space [m].
"""
ext = op.splitext(fname)[-1]
if ext in ('.sqd', '.mrk'):
with open(fname, 'rb', buffering=0) as fid:
fid.seek(192)
mrk_offset = unpack('i', fid.read(KIT.INT))[0]
fid.seek(mrk_offset)
# skips match_done, meg_to_mri and mri_to_meg
fid.seek(KIT.INT + (2 * KIT.DOUBLE * 4 ** 2), SEEK_CUR)
mrk_count = unpack('i', fid.read(KIT.INT))[0]
pts = []
for _ in range(mrk_count):
# skips mri/meg mrk_type and done, mri_marker
fid.seek(KIT.INT * 4 + (KIT.DOUBLE * 3), SEEK_CUR)
pts.append(np.fromfile(fid, dtype='d', count=3))
mrk_points = np.array(pts)
elif ext == '.txt':
mrk_points = _read_dig_points(fname, unit='m')
elif ext == '.pickled':
with open(fname, 'rb') as fid:
food = pickle.load(fid)
try:
mrk_points = food['mrk']
except Exception:
err = ("%r does not contain marker points." % fname)
raise ValueError(err)
else:
raise ValueError('KIT marker file must be *.sqd, *.mrk, *.txt or '
'*.pickled, *%s is not supported.' % ext)
# check output
mrk_points = np.asarray(mrk_points)
if mrk_points.shape != (5, 3):
err = ("%r is no marker file, shape is "
"%s" % (fname, mrk_points.shape))
raise ValueError(err)
return mrk_points
def read_sns(fname):
"""Sensor coordinate extraction in MEG space.
Parameters
----------
fname : str
Absolute path to sensor definition file.
Returns
-------
locs : numpy.array, shape = (n_points, 3)
Sensor coil location.
"""
p = re.compile(r'\d,[A-Za-z]*,([\.\-0-9]+),' +
r'([\.\-0-9]+),([\.\-0-9]+),' +
r'([\.\-0-9]+),([\.\-0-9]+)')
with open(fname) as fid:
locs = np.array(p.findall(fid.read()), dtype=float)
return locs
| Teekuningas/mne-python | mne/io/kit/coreg.py | Python | bsd-3-clause | 2,665 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import warnings
import numpy as np
from scipy import odr
try:
from modefit.baseobjects import BaseModel, BaseFitter
except:
raise ImportError("install modefit (pip install modefit) to be able to access to ADRFitter")
from .adr import ADR
""" Tools to fit ADR parameters """
__all__ = ["ADRFitter"]
class ADRFitter( BaseFitter ):
""" """
PROPERTIES = ["adr","lbda",
"x", "y", "dx","dy"]
DERIVED_PROPERTIES = []
def __init__(self, adr, lbdaref=7000, base_parangle=0, unit=1):
""" """
self._properties['adr'] = adr
if lbdaref is not None:
self.adr.set(lbdaref=lbdaref)
self.set_model( ADRModel(self.adr, base_parangle=base_parangle, unit=unit))
def get_fitted_rotation(self):
""" dictionary containing the effective rotatio (paramgle=base_parangle+fitted_rotation) + details:
Returns
-------
dict:
{"parangle":self.fitvalues["parangle"]+self.model.base_parangle,
"base_parangle":self.model.base_parangle,
"fitted_addition_parangle":self.fitvalues["parangle"]}
"""
return {"parangle":self.fitvalues["parangle"]+self.model.base_parangle,
"base_parangle":self.model.base_parangle,
"fitted_addition_parangle":self.fitvalues["parangle"]
}
def set_data(self, lbda, x, y, dx, dy):
""" set the fundatemental properties of the object.
These that will be used to the fit """
self._properties['x'] = np.asarray(x)
self._properties['y'] = np.asarray(y)
self._properties['dx'] = np.asarray(dx)
self._properties['dy'] = np.asarray(dy)
self._properties['lbda'] = np.asarray(lbda)
indexref = np.argmin(np.abs(self.lbda-self.adr.lbdaref))
# - Initial Guess
self.model.set_reference(self.adr.lbdaref, self.x[indexref], self.y[indexref])
def _get_model_args_(self):
""" see model.get_loglikelihood"""
return self.x, self.y, self.lbda, self.dx, self.dy
# ---------- #
# PLOTTER #
# ---------- #
def show(self, ax=None, savefile=None, show=True, cmap=None,
show_colorbar=True, clabel="Wavelength [A]",
labelkey=None, guess_airmass=None,**kwargs):
""" Plotting method for the ADR fit.
Parameters
----------
Returns
-------
"""
import matplotlib.pyplot as mpl
from .tools import figout, insert_ax, colorbar
if ax is None:
fig = mpl.figure(figsize=[5.5,4])
ax = fig.add_axes([0.14,0.13,0.76,0.75])
ax.set_xlabel("spaxels x-axis", fontsize="medium")
ax.set_ylabel("spaxels y-axis", fontsize="medium")
else:
fig = ax.figure
# - Colors
if cmap is None:
cmap = mpl.cm.viridis
vmin, vmax = np.nanmin(self.lbda),np.nanmax(self.lbda)
colors = cmap( (self.lbda-vmin)/(vmax-vmin) )
# - data
scd = ax.scatter(self.x, self.y, facecolors=colors, edgecolors="None",
lw=1., label="data", **kwargs)
# - error
if self.dx is not None or self.dy is not None:
ax.errorscatter(self.x, self.y, dx=self.dx, dy=self.dy,
ecolor="0.7", zorder=0)
# - model
xmodel, ymodel = self.model.get_model(self.lbda)
scm = ax.scatter(xmodel, ymodel, edgecolors=colors, facecolors="None",
lw=2., label="model", **kwargs)
ax.legend(loc="best", frameon=True, ncol=2)
if labelkey is None:
textlabel = " ; ".join(["%s: %.2f"%(k,self.fitvalues[k]) for k in self.model.FREEPARAMETERS]) + "\n"+" %s: %.1f"%("lbdaref",self.model.adr.lbdaref) + " | unit: %.2f"%self.model._unit
else:
textlabel = " ; ".join(["%s: %.2f"%(k,self.fitvalues[k]) for k in labelkey])
if guess_airmass is not None:
textlabel += " (input airmass: %.2f)"%guess_airmass
ax.text(0.5,1.01, textlabel, fontsize="small", transform=ax.transAxes, va="bottom", ha="center")
if show_colorbar:
axc = ax.insert_ax("right", shrunk=0.89)
axc.colorbar(cmap, vmin=vmin, vmax=vmax,
label=clabel, fontsize="medium")
fig.figout(savefile=savefile, show=show)
return {"ax":ax, "fig":fig, "plot":[scd,scm]}
# ================= #
# Properties #
# ================= #
@property
def adr(self):
""" """
return self._properties['adr']
@property
def x(self):
""" x-positions """
return self._properties['x']
@property
def y(self):
""" y-positions """
return self._properties['y']
@property
def dx(self):
""" x-position errors """
return self._properties['dx']
@property
def dy(self):
""" y-position errors """
return self._properties['dy']
@property
def lbda(self):
""" wavelength [A] """
return self._properties['lbda']
@property
def npoints(self):
""" number of data point """
return len(self.x)
class ADRModel( BaseModel):
""" """
PROPERTIES = ["adr", "lbdaref"]
SIDE_PROPERTIES = ["base_parangle"] # could be moved to parameters
FREEPARAMETERS = ["parangle", "airmass", "xref", "yref"]
parangle_boundaries = [-180, 180]
def __init__(self, adr, xref=0, yref=0, base_parangle=0, unit=1.):
""" """
self.set_adr(adr)
self._side_properties['xref'] = xref
self._side_properties['yref'] = yref
self._side_properties['base_parangle'] = base_parangle
self._unit = unit
def setup(self, parameters):
""" """
self._properties["parameters"] = np.asarray(parameters)
for i,p in enumerate(self.FREEPARAMETERS):
if p == "unit":
self._unit = parameters[i]
elif p== "xref":
self._side_properties['xref'] = parameters[i]
elif p== "yref":
self._side_properties['yref'] = parameters[i]
elif p=="parangle":
self.adr.set(**{p:(parameters[i]+self.base_parangle)%360})
else:
self.adr.set(**{p:parameters[i]})
def set_reference(self, lbdaref, xref=0, yref=0):
""" use 'lbdaref=None' to avoid changing lbdaref """
if lbdaref is not None:
self.adr.set(lbdaref=lbdaref)
self._side_properties['xref'] = xref
self._side_properties['yref'] = yref
def get_model(self, lbda):
""" return the model for the given data.
The modelization is based on legendre polynomes that expect x to be between -1 and 1.
This will create a reshaped copy of x to scale it between -1 and 1 but
if x is already as such, save time by setting reshapex to False
Returns
-------
array (size of x)
"""
return self.adr.refract(self.xref, self.yref, lbda, unit=self._unit)
def get_loglikelihood(self, x, y, lbda, dx=None, dy=None):
""" Measure the likelihood to find the data given the model's parameters.
Set pdf to True to have the array prior sum of the logs (array not in log=pdf).
In the Fitter define _get_model_args_() that should return the input of this
"""
if dx is None: dx = 1
if dy is None: dy = 1
xadr, yadr = self.get_model(lbda)
point_distance = ((x-xadr)/dx)**2 + ((y-yadr)/dy)**2
return -0.5 * np.sum(point_distance)
# ================= #
# Properties #
# ================= #
def set_adr(self, adr):
""" """
if self._properties['lbdaref'] is not None:
adr.set(lbdaref=lbdaref)
self._properties['adr'] = adr
@property
def adr(self):
""" ADR object """
if self._properties['adr'] is None:
self.set_adr( ADR() )
return self._properties['adr']
@property
def lbdaref(self):
""" reference wavelength of the ADR """
return self._properties['lbdaref'] if self._properties['lbdaref'] is not None\
else self.adr.lbdaref
# - side properties
@property
def xref(self):
""" x-position at the reference wavelength (lbdaref)"""
return self._side_properties['xref']
@property
def yref(self):
""" y-position at the reference wavelength (lbdaref)"""
return self._side_properties['yref']
@property
def base_parangle(self):
""" the parangle is the additional rotation on top of this """
return self._side_properties["base_parangle"]
| MickaelRigault/pyifu | pyifu/adrfit.py | Python | apache-2.0 | 9,121 |
from __future__ import with_statement
from distutils.version import StrictVersion
from itertools import chain
from select import select
import os
import socket
import sys
import threading
import warnings
try:
import ssl
ssl_available = True
except ImportError:
ssl_available = False
from redis._compat import (b, xrange, imap, byte_to_chr, unicode, bytes, long,
BytesIO, nativestr, basestring, iteritems,
LifoQueue, Empty, Full, urlparse, parse_qs,
unquote)
from redis.exceptions import (
RedisError,
ConnectionError,
TimeoutError,
BusyLoadingError,
ResponseError,
InvalidResponse,
AuthenticationError,
NoScriptError,
ExecAbortError,
ReadOnlyError
)
from redis.utils import HIREDIS_AVAILABLE
if HIREDIS_AVAILABLE:
import hiredis
hiredis_version = StrictVersion(hiredis.__version__)
HIREDIS_SUPPORTS_CALLABLE_ERRORS = \
hiredis_version >= StrictVersion('0.1.3')
HIREDIS_SUPPORTS_BYTE_BUFFER = \
hiredis_version >= StrictVersion('0.1.4')
if not HIREDIS_SUPPORTS_BYTE_BUFFER:
msg = ("redis-py works best with hiredis >= 0.1.4. You're running "
"hiredis %s. Please consider upgrading." % hiredis.__version__)
warnings.warn(msg)
HIREDIS_USE_BYTE_BUFFER = True
# only use byte buffer if hiredis supports it and the Python version
# is >= 2.7
if not HIREDIS_SUPPORTS_BYTE_BUFFER or (
sys.version_info[0] == 2 and sys.version_info[1] < 7):
HIREDIS_USE_BYTE_BUFFER = False
SYM_STAR = b('*')
SYM_DOLLAR = b('$')
SYM_CRLF = b('\r\n')
SYM_EMPTY = b('')
SERVER_CLOSED_CONNECTION_ERROR = "Connection closed by server."
class Token(object):
"""
Literal strings in Redis commands, such as the command names and any
hard-coded arguments are wrapped in this class so we know not to apply
and encoding rules on them.
"""
def __init__(self, value):
if isinstance(value, Token):
value = value.value
self.value = value
def __repr__(self):
return self.value
def __str__(self):
return self.value
class BaseParser(object):
EXCEPTION_CLASSES = {
'ERR': ResponseError,
'EXECABORT': ExecAbortError,
'LOADING': BusyLoadingError,
'NOSCRIPT': NoScriptError,
'READONLY': ReadOnlyError,
}
def parse_error(self, response):
"Parse an error response"
error_code = response.split(' ')[0]
if error_code in self.EXCEPTION_CLASSES:
response = response[len(error_code) + 1:]
return self.EXCEPTION_CLASSES[error_code](response)
return ResponseError(response)
class SocketBuffer(object):
def __init__(self, socket, socket_read_size):
self._sock = socket
self.socket_read_size = socket_read_size
self._buffer = BytesIO()
# number of bytes written to the buffer from the socket
self.bytes_written = 0
# number of bytes read from the buffer
self.bytes_read = 0
@property
def length(self):
return self.bytes_written - self.bytes_read
def _read_from_socket(self, length=None):
socket_read_size = self.socket_read_size
buf = self._buffer
buf.seek(self.bytes_written)
marker = 0
try:
while True:
data = self._sock.recv(socket_read_size)
# an empty string indicates the server shutdown the socket
if isinstance(data, bytes) and len(data) == 0:
raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)
buf.write(data)
data_length = len(data)
self.bytes_written += data_length
marker += data_length
if length is not None and length > marker:
continue
break
except socket.timeout:
raise TimeoutError("Timeout reading from socket")
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError("Error while reading from socket: %s" %
(e.args,))
def read(self, length):
length = length + 2 # make sure to read the \r\n terminator
# make sure we've read enough data from the socket
if length > self.length:
self._read_from_socket(length - self.length)
self._buffer.seek(self.bytes_read)
data = self._buffer.read(length)
self.bytes_read += len(data)
# purge the buffer when we've consumed it all so it doesn't
# grow forever
if self.bytes_read == self.bytes_written:
self.purge()
return data[:-2]
def readline(self):
buf = self._buffer
buf.seek(self.bytes_read)
data = buf.readline()
while not data.endswith(SYM_CRLF):
# there's more data in the socket that we need
self._read_from_socket()
buf.seek(self.bytes_read)
data = buf.readline()
self.bytes_read += len(data)
# purge the buffer when we've consumed it all so it doesn't
# grow forever
if self.bytes_read == self.bytes_written:
self.purge()
return data[:-2]
def purge(self):
self._buffer.seek(0)
self._buffer.truncate()
self.bytes_written = 0
self.bytes_read = 0
def close(self):
self.purge()
self._buffer.close()
self._buffer = None
self._sock = None
class PythonParser(BaseParser):
"Plain Python parsing class"
encoding = None
def __init__(self, socket_read_size):
self.socket_read_size = socket_read_size
self._sock = None
self._buffer = None
def __del__(self):
try:
self.on_disconnect()
except Exception:
pass
def on_connect(self, connection):
"Called when the socket connects"
self._sock = connection._sock
self._buffer = SocketBuffer(self._sock, self.socket_read_size)
if connection.decode_responses:
self.encoding = connection.encoding
def on_disconnect(self):
"Called when the socket disconnects"
if self._sock is not None:
self._sock.close()
self._sock = None
if self._buffer is not None:
self._buffer.close()
self._buffer = None
self.encoding = None
def can_read(self):
return self._buffer and bool(self._buffer.length)
def read_response(self):
response = self._buffer.readline()
if not response:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
byte, response = byte_to_chr(response[0]), response[1:]
if byte not in ('-', '+', ':', '$', '*'):
raise InvalidResponse("Protocol Error: %s, %s" %
(str(byte), str(response)))
# server returned an error
if byte == '-':
response = nativestr(response)
error = self.parse_error(response)
# if the error is a ConnectionError, raise immediately so the user
# is notified
if isinstance(error, ConnectionError):
raise error
# otherwise, we're dealing with a ResponseError that might belong
# inside a pipeline response. the connection's read_response()
# and/or the pipeline's execute() will raise this error if
# necessary, so just return the exception instance here.
return error
# single value
elif byte == '+':
pass
# int value
elif byte == ':':
response = long(response)
# bulk response
elif byte == '$':
length = int(response)
if length == -1:
return None
response = self._buffer.read(length)
# multi-bulk response
elif byte == '*':
length = int(response)
if length == -1:
return None
response = [self.read_response() for i in xrange(length)]
if isinstance(response, bytes) and self.encoding:
response = response.decode(self.encoding)
return response
class HiredisParser(BaseParser):
"Parser class for connections using Hiredis"
def __init__(self, socket_read_size):
if not HIREDIS_AVAILABLE:
raise RedisError("Hiredis is not installed")
self.socket_read_size = socket_read_size
if HIREDIS_USE_BYTE_BUFFER:
self._buffer = bytearray(socket_read_size)
def __del__(self):
try:
self.on_disconnect()
except Exception:
pass
def on_connect(self, connection):
self._sock = connection._sock
kwargs = {
'protocolError': InvalidResponse,
'replyError': self.parse_error,
}
# hiredis < 0.1.3 doesn't support functions that create exceptions
if not HIREDIS_SUPPORTS_CALLABLE_ERRORS:
kwargs['replyError'] = ResponseError
if connection.decode_responses:
kwargs['encoding'] = connection.encoding
self._reader = hiredis.Reader(**kwargs)
self._next_response = False
def on_disconnect(self):
self._sock = None
self._reader = None
self._next_response = False
def can_read(self):
if not self._reader:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
if self._next_response is False:
self._next_response = self._reader.gets()
return self._next_response is not False
def read_response(self):
if not self._reader:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
# _next_response might be cached from a can_read() call
if self._next_response is not False:
response = self._next_response
self._next_response = False
return response
response = self._reader.gets()
socket_read_size = self.socket_read_size
while response is False:
try:
if HIREDIS_USE_BYTE_BUFFER:
bufflen = self._sock.recv_into(self._buffer)
if bufflen == 0:
raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)
else:
buffer = self._sock.recv(socket_read_size)
# an empty string indicates the server shutdown the socket
if not isinstance(buffer, bytes) or len(buffer) == 0:
raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)
except socket.timeout:
raise TimeoutError("Timeout reading from socket")
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError("Error while reading from socket: %s" %
(e.args,))
if HIREDIS_USE_BYTE_BUFFER:
self._reader.feed(self._buffer, 0, bufflen)
else:
self._reader.feed(buffer)
# proactively, but not conclusively, check if more data is in the
# buffer. if the data received doesn't end with \r\n, there's more.
if HIREDIS_USE_BYTE_BUFFER:
if bufflen > 2 and \
self._buffer[bufflen - 2:bufflen] != SYM_CRLF:
continue
else:
if not buffer.endswith(SYM_CRLF):
continue
response = self._reader.gets()
# if an older version of hiredis is installed, we need to attempt
# to convert ResponseErrors to their appropriate types.
if not HIREDIS_SUPPORTS_CALLABLE_ERRORS:
if isinstance(response, ResponseError):
response = self.parse_error(response.args[0])
elif isinstance(response, list) and response and \
isinstance(response[0], ResponseError):
response[0] = self.parse_error(response[0].args[0])
# if the response is a ConnectionError or the response is a list and
# the first item is a ConnectionError, raise it as something bad
# happened
if isinstance(response, ConnectionError):
raise response
elif isinstance(response, list) and response and \
isinstance(response[0], ConnectionError):
raise response[0]
return response
if HIREDIS_AVAILABLE:
DefaultParser = HiredisParser
else:
DefaultParser = PythonParser
class Connection(object):
"Manages TCP communication to and from a Redis server"
description_format = "Connection<host=%(host)s,port=%(port)s,db=%(db)s>"
def __init__(self, host='localhost', port=6379, db=0, password=None,
socket_timeout=None, socket_connect_timeout=None,
socket_keepalive=False, socket_keepalive_options=None,
retry_on_timeout=False, encoding='utf-8',
encoding_errors='strict', decode_responses=False,
parser_class=DefaultParser, socket_read_size=65536):
self.pid = os.getpid()
self.host = host
self.port = int(port)
self.db = db
self.password = password
self.socket_timeout = socket_timeout
self.socket_connect_timeout = socket_connect_timeout or socket_timeout
self.socket_keepalive = socket_keepalive
self.socket_keepalive_options = socket_keepalive_options or {}
self.retry_on_timeout = retry_on_timeout
self.encoding = encoding
self.encoding_errors = encoding_errors
self.decode_responses = decode_responses
self._sock = None
self._parser = parser_class(socket_read_size=socket_read_size)
self._description_args = {
'host': self.host,
'port': self.port,
'db': self.db,
}
self._connect_callbacks = []
def __repr__(self):
return self.description_format % self._description_args
def __del__(self):
try:
self.disconnect()
except Exception:
pass
def register_connect_callback(self, callback):
self._connect_callbacks.append(callback)
def clear_connect_callbacks(self):
self._connect_callbacks = []
def connect(self):
"Connects to the Redis server if not already connected"
if self._sock:
return
try:
sock = self._connect()
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError(self._error_message(e))
self._sock = sock
try:
self.on_connect()
except RedisError:
# clean up after any error in on_connect
self.disconnect()
raise
# run any user callbacks. right now the only internal callback
# is for pubsub channel/pattern resubscription
for callback in self._connect_callbacks:
callback(self)
def _connect(self):
"Create a TCP socket connection"
# we want to mimic what socket.create_connection does to support
# ipv4/ipv6, but we want to set options prior to calling
# socket.connect()
err = None
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
family, socktype, proto, canonname, socket_address = res
sock = None
try:
sock = socket.socket(family, socktype, proto)
# TCP_NODELAY
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# TCP_KEEPALIVE
if self.socket_keepalive:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
for k, v in iteritems(self.socket_keepalive_options):
sock.setsockopt(socket.SOL_TCP, k, v)
# set the socket_connect_timeout before we connect
sock.settimeout(self.socket_connect_timeout)
# connect
sock.connect(socket_address)
# set the socket_timeout now that we're connected
sock.settimeout(self.socket_timeout)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
raise socket.error("socket.getaddrinfo returned an empty list")
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return "Error connecting to %s:%s. %s." % \
(self.host, self.port, exception.args[0])
else:
return "Error %s connecting to %s:%s. %s." % \
(exception.args[0], self.host, self.port, exception.args[1])
def on_connect(self):
"Initialize the connection, authenticate and select a database"
self._parser.on_connect(self)
# if a password is specified, authenticate
if self.password:
self.send_command('AUTH', self.password)
if nativestr(self.read_response()) != 'OK':
raise AuthenticationError('Invalid Password')
# if a database is specified, switch to it
if self.db:
self.send_command('SELECT', self.db)
if nativestr(self.read_response()) != 'OK':
raise ConnectionError('Invalid Database')
def disconnect(self):
"Disconnects from the Redis server"
self._parser.on_disconnect()
if self._sock is None:
return
try:
self._sock.shutdown(socket.SHUT_RDWR)
self._sock.close()
except socket.error:
pass
self._sock = None
def send_packed_command(self, command):
"Send an already packed command to the Redis server"
if not self._sock:
self.connect()
try:
if isinstance(command, str):
command = [command]
for item in command:
self._sock.sendall(item)
except socket.timeout:
self.disconnect()
raise TimeoutError("Timeout writing to socket")
except socket.error:
e = sys.exc_info()[1]
self.disconnect()
if len(e.args) == 1:
_errno, errmsg = 'UNKNOWN', e.args[0]
else:
_errno, errmsg = e.args
raise ConnectionError("Error %s while writing to socket. %s." %
(_errno, errmsg))
except:
self.disconnect()
raise
def send_command(self, *args):
"Pack and send a command to the Redis server"
self.send_packed_command(self.pack_command(*args))
def can_read(self, timeout=0):
"Poll the socket to see if there's data that can be read."
sock = self._sock
if not sock:
self.connect()
sock = self._sock
return self._parser.can_read() or \
bool(select([sock], [], [], timeout)[0])
def read_response(self):
"Read the response from a previously sent command"
try:
response = self._parser.read_response()
except:
self.disconnect()
raise
if isinstance(response, ResponseError):
raise response
return response
def encode(self, value):
"Return a bytestring representation of the value"
if isinstance(value, Token):
return b(value.value)
elif isinstance(value, bytes):
return value
elif isinstance(value, (int, long)):
value = b(str(value))
elif isinstance(value, float):
value = b(repr(value))
elif not isinstance(value, basestring):
value = unicode(value)
if isinstance(value, unicode):
value = value.encode(self.encoding, self.encoding_errors)
return value
def pack_command(self, *args):
"Pack a series of arguments into the Redis protocol"
output = []
# the client might have included 1 or more literal arguments in
# the command name, e.g., 'CONFIG GET'. The Redis server expects these
# arguments to be sent separately, so split the first argument
# manually. All of these arguements get wrapped in the Token class
# to prevent them from being encoded.
command = args[0]
if ' ' in command:
args = tuple([Token(s) for s in command.split(' ')]) + args[1:]
else:
args = (Token(command),) + args[1:]
buff = SYM_EMPTY.join(
(SYM_STAR, b(str(len(args))), SYM_CRLF))
for arg in imap(self.encode, args):
# to avoid large string mallocs, chunk the command into the
# output list if we're sending large values
if len(buff) > 6000 or len(arg) > 6000:
buff = SYM_EMPTY.join(
(buff, SYM_DOLLAR, b(str(len(arg))), SYM_CRLF))
output.append(buff)
output.append(arg)
buff = SYM_CRLF
else:
buff = SYM_EMPTY.join((buff, SYM_DOLLAR, b(str(len(arg))),
SYM_CRLF, arg, SYM_CRLF))
output.append(buff)
return output
def pack_commands(self, commands):
"Pack multiple commands into the Redis protocol"
output = []
pieces = []
buffer_length = 0
for cmd in commands:
for chunk in self.pack_command(*cmd):
pieces.append(chunk)
buffer_length += len(chunk)
if buffer_length > 6000:
output.append(SYM_EMPTY.join(pieces))
buffer_length = 0
pieces = []
if pieces:
output.append(SYM_EMPTY.join(pieces))
return output
class SSLConnection(Connection):
description_format = "SSLConnection<host=%(host)s,port=%(port)s,db=%(db)s>"
def __init__(self, ssl_keyfile=None, ssl_certfile=None, ssl_cert_reqs=None,
ssl_ca_certs=None, **kwargs):
if not ssl_available:
raise RedisError("Python wasn't built with SSL support")
super(SSLConnection, self).__init__(**kwargs)
self.keyfile = ssl_keyfile
self.certfile = ssl_certfile
if ssl_cert_reqs is None:
ssl_cert_reqs = ssl.CERT_NONE
elif isinstance(ssl_cert_reqs, basestring):
CERT_REQS = {
'none': ssl.CERT_NONE,
'optional': ssl.CERT_OPTIONAL,
'required': ssl.CERT_REQUIRED
}
if ssl_cert_reqs not in CERT_REQS:
raise RedisError(
"Invalid SSL Certificate Requirements Flag: %s" %
ssl_cert_reqs)
ssl_cert_reqs = CERT_REQS[ssl_cert_reqs]
self.cert_reqs = ssl_cert_reqs
self.ca_certs = ssl_ca_certs
def _connect(self):
"Wrap the socket with SSL support"
sock = super(SSLConnection, self)._connect()
sock = ssl.wrap_socket(sock,
cert_reqs=self.cert_reqs,
keyfile=self.keyfile,
certfile=self.certfile,
ca_certs=self.ca_certs)
return sock
class UnixDomainSocketConnection(Connection):
description_format = "UnixDomainSocketConnection<path=%(path)s,db=%(db)s>"
def __init__(self, path='', db=0, password=None,
socket_timeout=None, encoding='utf-8',
encoding_errors='strict', decode_responses=False,
retry_on_timeout=False,
parser_class=DefaultParser, socket_read_size=65536):
self.pid = os.getpid()
self.path = path
self.db = db
self.password = password
self.socket_timeout = socket_timeout
self.retry_on_timeout = retry_on_timeout
self.encoding = encoding
self.encoding_errors = encoding_errors
self.decode_responses = decode_responses
self._sock = None
self._parser = parser_class(socket_read_size=socket_read_size)
self._description_args = {
'path': self.path,
'db': self.db,
}
self._connect_callbacks = []
def _connect(self):
"Create a Unix domain socket connection"
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.socket_timeout)
sock.connect(self.path)
return sock
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return "Error connecting to unix socket: %s. %s." % \
(self.path, exception.args[0])
else:
return "Error %s connecting to unix socket: %s. %s." % \
(exception.args[0], self.path, exception.args[1])
class ConnectionPool(object):
"Generic connection pool"
@classmethod
def from_url(cls, url, db=None, decode_components=False, **kwargs):
"""
Return a connection pool configured from the given URL.
For example::
redis://[:password]@localhost:6379/0
rediss://[:password]@localhost:6379/0
unix://[:password]@/path/to/socket.sock?db=0
Three URL schemes are supported:
redis:// creates a normal TCP socket connection
rediss:// creates a SSL wrapped TCP socket connection
unix:// creates a Unix Domain Socket connection
There are several ways to specify a database number. The parse function
will return the first specified option:
1. A ``db`` querystring option, e.g. redis://localhost?db=0
2. If using the redis:// scheme, the path argument of the url, e.g.
redis://localhost/0
3. The ``db`` argument to this function.
If none of these options are specified, db=0 is used.
The ``decode_components`` argument allows this function to work with
percent-encoded URLs. If this argument is set to ``True`` all ``%xx``
escapes will be replaced by their single-character equivalents after
the URL has been parsed. This only applies to the ``hostname``,
``path``, and ``password`` components.
Any additional querystring arguments and keyword arguments will be
passed along to the ConnectionPool class's initializer. In the case
of conflicting arguments, querystring arguments always win.
"""
url_string = url
url = urlparse(url)
qs = ''
# in python2.6, custom URL schemes don't recognize querystring values
# they're left as part of the url.path.
if '?' in url.path and not url.query:
# chop the querystring including the ? off the end of the url
# and reparse it.
qs = url.path.split('?', 1)[1]
url = urlparse(url_string[:-(len(qs) + 1)])
else:
qs = url.query
url_options = {}
for name, value in iteritems(parse_qs(qs)):
if value and len(value) > 0:
url_options[name] = value[0]
if decode_components:
password = unquote(url.password) if url.password else None
path = unquote(url.path) if url.path else None
hostname = unquote(url.hostname) if url.hostname else None
else:
password = url.password
path = url.path
hostname = url.hostname
# We only support redis:// and unix:// schemes.
if url.scheme == 'unix':
url_options.update({
'password': password,
'path': path,
'connection_class': UnixDomainSocketConnection,
})
else:
url_options.update({
'host': hostname,
'port': int(url.port or 6379),
'password': password,
})
# If there's a path argument, use it as the db argument if a
# querystring value wasn't specified
if 'db' not in url_options and path:
try:
url_options['db'] = int(path.replace('/', ''))
except (AttributeError, ValueError):
pass
if url.scheme == 'rediss':
url_options['connection_class'] = SSLConnection
# last shot at the db value
url_options['db'] = int(url_options.get('db', db or 0))
# update the arguments from the URL values
kwargs.update(url_options)
# backwards compatability
if 'charset' in kwargs:
warnings.warn(DeprecationWarning(
'"charset" is deprecated. Use "encoding" instead'))
kwargs['encoding'] = kwargs.pop('charset')
if 'errors' in kwargs:
warnings.warn(DeprecationWarning(
'"errors" is deprecated. Use "encoding_errors" instead'))
kwargs['encoding_errors'] = kwargs.pop('errors')
return cls(**kwargs)
def __init__(self, connection_class=Connection, max_connections=None,
**connection_kwargs):
"""
Create a connection pool. If max_connections is set, then this
object raises redis.ConnectionError when the pool's limit is reached.
By default, TCP connections are created connection_class is specified.
Use redis.UnixDomainSocketConnection for unix sockets.
Any additional keyword arguments are passed to the constructor of
connection_class.
"""
max_connections = max_connections or 2 ** 31
if not isinstance(max_connections, (int, long)) or max_connections < 0:
raise ValueError('"max_connections" must be a positive integer')
self.connection_class = connection_class
self.connection_kwargs = connection_kwargs
self.max_connections = max_connections
self.reset()
def __repr__(self):
return "%s<%s>" % (
type(self).__name__,
self.connection_class.description_format % self.connection_kwargs,
)
def reset(self):
self.pid = os.getpid()
self._created_connections = 0
self._available_connections = []
self._in_use_connections = set()
self._check_lock = threading.Lock()
def _checkpid(self):
if self.pid != os.getpid():
with self._check_lock:
if self.pid == os.getpid():
# another thread already did the work while we waited
# on the lock.
return
self.disconnect()
self.reset()
def get_connection(self, command_name, *keys, **options):
"Get a connection from the pool"
self._checkpid()
try:
connection = self._available_connections.pop()
except IndexError:
connection = self.make_connection()
self._in_use_connections.add(connection)
return connection
def make_connection(self):
"Create a new connection"
if self._created_connections >= self.max_connections:
raise ConnectionError("Too many connections")
self._created_connections += 1
return self.connection_class(**self.connection_kwargs)
def release(self, connection):
"Releases the connection back to the pool"
self._checkpid()
if connection.pid != self.pid:
return
self._in_use_connections.remove(connection)
self._available_connections.append(connection)
def disconnect(self):
"Disconnects all connections in the pool"
all_conns = chain(self._available_connections,
self._in_use_connections)
for connection in all_conns:
connection.disconnect()
class BlockingConnectionPool(ConnectionPool):
"""
Thread-safe blocking connection pool::
>>> from redis.client import Redis
>>> client = Redis(connection_pool=BlockingConnectionPool())
It performs the same function as the default
``:py:class: ~redis.connection.ConnectionPool`` implementation, in that,
it maintains a pool of reusable connections that can be shared by
multiple redis clients (safely across threads if required).
The difference is that, in the event that a client tries to get a
connection from the pool when all of connections are in use, rather than
raising a ``:py:class: ~redis.exceptions.ConnectionError`` (as the default
``:py:class: ~redis.connection.ConnectionPool`` implementation does), it
makes the client wait ("blocks") for a specified number of seconds until
a connection becomes available.
Use ``max_connections`` to increase / decrease the pool size::
>>> pool = BlockingConnectionPool(max_connections=10)
Use ``timeout`` to tell it either how many seconds to wait for a connection
to become available, or to block forever:
# Block forever.
>>> pool = BlockingConnectionPool(timeout=None)
# Raise a ``ConnectionError`` after five seconds if a connection is
# not available.
>>> pool = BlockingConnectionPool(timeout=5)
"""
def __init__(self, max_connections=50, timeout=20,
connection_class=Connection, queue_class=LifoQueue,
**connection_kwargs):
self.queue_class = queue_class
self.timeout = timeout
super(BlockingConnectionPool, self).__init__(
connection_class=connection_class,
max_connections=max_connections,
**connection_kwargs)
def reset(self):
self.pid = os.getpid()
self._check_lock = threading.Lock()
# Create and fill up a thread safe queue with ``None`` values.
self.pool = self.queue_class(self.max_connections)
while True:
try:
self.pool.put_nowait(None)
except Full:
break
# Keep a list of actual connection instances so that we can
# disconnect them later.
self._connections = []
def make_connection(self):
"Make a fresh connection."
connection = self.connection_class(**self.connection_kwargs)
self._connections.append(connection)
return connection
def get_connection(self, command_name, *keys, **options):
"""
Get a connection, blocking for ``self.timeout`` until a connection
is available from the pool.
If the connection returned is ``None`` then creates a new connection.
Because we use a last-in first-out queue, the existing connections
(having been returned to the pool after the initial ``None`` values
were added) will be returned before ``None`` values. This means we only
create new connections when we need to, i.e.: the actual number of
connections will only increase in response to demand.
"""
# Make sure we haven't changed process.
self._checkpid()
# Try and get a connection from the pool. If one isn't available within
# self.timeout then raise a ``ConnectionError``.
connection = None
try:
connection = self.pool.get(block=True, timeout=self.timeout)
except Empty:
# Note that this is not caught by the redis client and will be
# raised unless handled by application code. If you want never to
raise ConnectionError("No connection available.")
# If the ``connection`` is actually ``None`` then that's a cue to make
# a new connection to add to the pool.
if connection is None:
connection = self.make_connection()
return connection
def release(self, connection):
"Releases the connection back to the pool."
# Make sure we haven't changed process.
self._checkpid()
if connection.pid != self.pid:
return
# Put the connection back into the pool.
try:
self.pool.put_nowait(connection)
except Full:
# perhaps the pool has been reset() after a fork? regardless,
# we don't want this connection
pass
def disconnect(self):
"Disconnects all connections in the pool."
for connection in self._connections:
connection.disconnect()
| sigma-random/redis-py | redis/connection.py | Python | mit | 37,068 |
import unittest
from matrix import Matrix
class MatrixTest(unittest.TestCase):
def test_extract_a_row(self):
matrix = Matrix("1 2\n10 20")
self.assertEqual([1, 2], matrix.rows[0])
def test_extract_same_row_again(self):
matrix = Matrix("9 7\n8 6")
self.assertEqual([9, 7], matrix.rows[0])
def test_extract_other_row(self):
matrix = Matrix("9 8 7\n19 18 17")
self.assertEqual([19, 18, 17], matrix.rows[1])
def test_extract_other_row_again(self):
matrix = Matrix("1 4 9\n16 25 36")
self.assertEqual([16, 25, 36], matrix.rows[1])
def test_extract_a_column(self):
matrix = Matrix("1 2 3\n4 5 6\n7 8 9\n8 7 6")
self.assertEqual([1, 4, 7, 8], matrix.columns[0])
def test_extract_another_column(self):
matrix = Matrix("89 1903 3\n18 3 1\n9 4 800")
self.assertEqual([1903, 3, 4], matrix.columns[1])
if __name__ == '__main__':
unittest.main(verbosity=2)
| SteffenBauer/Katas | Exercism.io/python/matrix/matrix_test.py | Python | mit | 978 |
# Django imports
from django.views.generic import ListView
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
# Local Django imports
from user.decorators import is_patient
from prescription.models import PatientPrescription
class ListPatientPrescription(ListView):
'''
A list of all patient prescriptions.
'''
template_name = 'list_patient_prescription.html'
context_object_name = 'list_patient_prescription'
model = PatientPrescription
paginate_by = 20
ordering = ['-date_created']
@method_decorator(login_required)
@method_decorator(is_patient)
def dispatch(self, *args, **kwargs):
return super(ListPatientPrescription, self).dispatch(*args, **kwargs)
def get_queryset(self):
return self.model.objects.filter(patient=self.request.user)
| fga-gpp-mds/2017.2-Receituario-Medico | medical_prescription/prescription/views/listprescriptionpatient.py | Python | mit | 871 |
# -*- coding: utf-8 -*-
###############################################################################################
#
# MediaPortal for Dreambox OS
#
# Coded by MediaPortal Team (c) 2013-2015
#
# This plugin is open source but it is NOT free software.
#
# This plugin may only be distributed to and executed on hardware which
# is licensed by Dream Property GmbH. This includes commercial distribution.
# In other words:
# It's NOT allowed to distribute any parts of this plugin or its source code in ANY way
# to hardware which is NOT licensed by Dream Property GmbH.
# It's NOT allowed to execute this plugin and its source code or even parts of it in ANY way
# on hardware which is NOT licensed by Dream Property GmbH.
#
# This applies to the source code as a whole as well as to parts of it, unless
# explicitely stated otherwise.
#
# If you want to use or modify the code or parts of it,
# you have to keep OUR license and inform us about the modifications, but it may NOT be
# commercially distributed other than under the conditions noted above.
#
# As an exception regarding modifcations, you are NOT permitted to remove
# any copy protections implemented in this plugin or change them for means of disabling
# or working around the copy protections, unless the change has been explicitly permitted
# by the original authors. Also decompiling and modification of the closed source
# parts is NOT permitted.
#
# Advertising with this plugin is NOT allowed.
# For other uses, permission from the authors is necessary.
#
###############################################################################################
from Plugins.Extensions.MediaPortal.plugin import _
from Plugins.Extensions.MediaPortal.resources.imports import *
class sexuGenreScreen(MPScreen):
def __init__(self, session):
self.plugin_path = mp_globals.pluginPath
self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath
path = "%s/%s/defaultGenreScreen.xml" % (self.skin_path, config.mediaportal.skin.value)
if not fileExists(path):
path = self.skin_path + mp_globals.skinFallback + "/defaultGenreScreen.xml"
with open(path, "r") as f:
self.skin = f.read()
f.close()
MPScreen.__init__(self, session)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel" : self.keyCancel
}, -1)
self['title'] = Label("Sexu.com")
self['ContentTitle'] = Label("Genre:")
self.keyLocked = True
self.suchString = ''
self.genreliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.keyLocked = True
url = "http://sexu.com/"
getPage(url, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.genreData).addErrback(self.dataError)
def genreData(self, data):
parse = re.search('listTags4">(.*?)allTags-->', data, re.S)
Cats = re.findall('href="(.*?)".*?</i>(.*?)</a>', parse.group(1), re.S)
if Cats:
for (Url, Title) in Cats:
Url = "http://sexu.com" + Url + '/'
self.genreliste.append((Title.title(), Url))
self.genreliste.sort()
self.genreliste.insert(0, ("Most Popular", "http://sexu.com/top/all/"))
self.genreliste.insert(0, ("Newest", "http://sexu.com/top/recent/"))
self.genreliste.insert(0, ("--- Search ---", "callSuchen"))
self.ml.setList(map(self._defaultlistcenter, self.genreliste))
self.keyLocked = False
def keyOK(self):
if self.keyLocked:
return
Name = self['liste'].getCurrent()[0][0]
if Name == "--- Search ---":
self.suchen()
else:
Link = self['liste'].getCurrent()[0][1]
self.session.open(sexuFilmScreen, Link, Name)
def SuchenCallback(self, callback = None, entry = None):
if callback is not None and len(callback):
self.suchString = callback.replace(' ', '+')
Name = "--- Search ---"
Link = '%s' % (self.suchString)
self.session.open(sexuFilmScreen, Link, Name)
class sexuFilmScreen(MPScreen, ThumbsHelper):
def __init__(self, session, Link, Name):
self.Link = Link
self.Name = Name
self.plugin_path = mp_globals.pluginPath
self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath
path = "%s/%s/defaultListWideScreen.xml" % (self.skin_path, config.mediaportal.skin.value)
if not fileExists(path):
path = self.skin_path + mp_globals.skinFallback + "/defaultListWideScreen.xml"
with open(path, "r") as f:
self.skin = f.read()
f.close()
MPScreen.__init__(self, session)
ThumbsHelper.__init__(self)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel" : self.keyCancel,
"5" : self.keyShowThumb,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft,
"nextBouquet" : self.keyPageUp,
"prevBouquet" : self.keyPageDown,
"green" : self.keyPageNumber
}, -1)
self['title'] = Label("Sexu.com")
self['ContentTitle'] = Label("Genre: %s" % self.Name)
self['F2'] = Label(_("Page"))
self['Page'] = Label(_("Page:"))
self.keyLocked = True
self.page = 1
self.lastpage = 1
self.filmliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.loadPage)
def loadPage(self):
self.keyLocked = True
self['name'].setText(_('Please wait...'))
self.filmliste = []
if re.match(".*?Search", self.Name):
url = "http://sexu.com/search?q=%s&p=%s" % (self.Link, str(self.page))
else:
url = "%s%s" % (self.Link, str(self.page))
getPage(url, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.loadData).addErrback(self.dataError)
def loadData(self, data):
self.getLastPage(data, 'class="pagination">(.*?)</div>', '.*[>|=|\/](\d+)[<|"]')
Movies = re.findall('class="thumb-item".*?href="(.*?)"\stitle="(.*?)".*?img\sclass.*?data-original="(.*?)".*?timeVideo">(.*?)</span', data, re.S)
if Movies:
for (Url, Title, Image, Runtime) in Movies:
Url = "http://sexu.com" + Url
Title = Title.strip('.').replace("\\'","'")
self.filmliste.append((decodeHtml(Title), Url, Image, Runtime))
if len(self.filmliste) == 0:
self.filmliste.append((_('No movies found!'), None, None))
self.ml.setList(map(self._defaultlistleft, self.filmliste))
self.ml.moveToIndex(0)
self.keyLocked = False
self.th_ThumbsQuery(self.filmliste, 0, 1, 2, None, None, self.page, self.lastpage, mode=1)
self.showInfos()
def showInfos(self):
Url = self['liste'].getCurrent()[0][1]
if Url == None:
return
title = self['liste'].getCurrent()[0][0]
pic = self['liste'].getCurrent()[0][2]
runtime = self['liste'].getCurrent()[0][3]
self['name'].setText(title)
self['handlung'].setText("Runtime: %s" % runtime)
CoverHelper(self['coverArt']).getCover(pic)
def keyOK(self):
if self.keyLocked:
return
Link = self['liste'].getCurrent()[0][1]
if Link == None:
return
getPage(Link, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.getVideoData).addErrback(self.dataError)
def getVideoData(self, data):
videoData = re.findall("data:\s\{v_id:\s(.*?),\sbitrate:\s'(.*?)'\}", data, re.S)
if videoData:
Link = "http://sexu.com/v.php?v_id=%s&bitrate=%s" % (videoData[0][0],videoData[0][1])
getPage(Link, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.getVideoPage).addErrback(self.dataError)
def getVideoPage(self, data):
videoPage = re.findall('"url":"(.*?)"', data, re.S)
if videoPage:
url = videoPage[-1].replace('\/','/')
title = self['liste'].getCurrent()[0][0]
self.session.open(SimplePlayer, [(title, url)], showPlaylist=False, ltype='sexu') | n3wb13/OpenNfrGui-5.0-1 | lib/python/Plugins/Extensions/MediaPortal/additions/porn/sexu.py | Python | gpl-2.0 | 7,787 |
#!/usr/bin/env python
import array
import binascii
import glob
import itertools
import json
import logging
import os
import serial
import signal
import stm32_crc
import struct
import threading
import time
import traceback
import uuid
import zipfile
try:
from collections import OrderedDict
except:
from ordereddict import OrderedDict
from LightBluePebble import LightBluePebble
from struct import pack, unpack
log = logging.getLogger()
logging.basicConfig(format='[%(levelname)-8s] %(message)s')
log.setLevel(logging.DEBUG)
DEFAULT_PEBBLE_ID = None #Triggers autodetection on unix-like systems
DEBUG_PROTOCOL = False
class PebbleBundle(object):
MANIFEST_FILENAME = 'manifest.json'
STRUCT_DEFINITION = [
'8s', # header
'2B', # struct version
'2B', # sdk version
'2B', # app version
'H', # size
'I', # offset
'I', # crc
'32s', # app name
'32s', # company name
'I', # icon resource id
'I', # symbol table address
'I', # flags
'I', # relocation list start
'I', # num relocation list entries
'16s' # uuid
]
def __init__(self, bundle_path):
bundle_abs_path = os.path.abspath(bundle_path)
if not os.path.exists(bundle_abs_path):
raise Exception("Bundle does not exist: " + bundle_path)
self.zip = zipfile.ZipFile(bundle_abs_path)
self.path = bundle_abs_path
self.manifest = None
self.header = None
self.app_metadata_struct = struct.Struct(''.join(self.STRUCT_DEFINITION))
self.app_metadata_length_bytes = self.app_metadata_struct.size
def get_manifest(self):
if (self.manifest):
return self.manifest
if self.MANIFEST_FILENAME not in self.zip.namelist():
raise Exception("Could not find {}; are you sure this is a PebbleBundle?".format(self.MANIFEST_FILENAME))
self.manifest = json.loads(self.zip.read(self.MANIFEST_FILENAME))
return self.manifest
def get_app_metadata(self):
if (self.header):
return self.header
app_manifest = self.get_manifest()['application']
app_bin = self.zip.open(app_manifest['name']).read()
header = app_bin[0:self.app_metadata_length_bytes]
values = self.app_metadata_struct.unpack(header)
self.header = {
'sentinel' : values[0],
'struct_version_major' : values[1],
'struct_version_minor' : values[2],
'sdk_version_major' : values[3],
'sdk_version_minor' : values[4],
'app_version_major' : values[5],
'app_version_minor' : values[6],
'app_size' : values[7],
'offset' : values[8],
'crc' : values[9],
'app_name' : values[10].rstrip('\0'),
'company_name' : values[11].rstrip('\0'),
'icon_resource_id' : values[12],
'symbol_table_addr' : values[13],
'flags' : values[14],
'relocation_list_index' : values[15],
'num_relocation_entries' : values[16],
'uuid' : uuid.UUID(bytes=values[17])
}
return self.header
def close(self):
self.zip.close()
def is_firmware_bundle(self):
return 'firmware' in self.get_manifest()
def is_app_bundle(self):
return 'application' in self.get_manifest()
def has_resources(self):
return 'resources' in self.get_manifest()
def get_firmware_info(self):
if not self.is_firmware_bundle():
return None
return self.get_manifest()['firmware']
def get_application_info(self):
if not self.is_app_bundle():
return None
return self.get_manifest()['application']
def get_resources_info(self):
if not self.has_resources():
return None
return self.get_manifest()['resources']
class EndpointSync():
timeout = 10
def __init__(self, pebble, endpoint):
pebble.register_endpoint(endpoint, self.callback)
self.marker = threading.Event()
def callback(self, *args):
self.data = args
self.marker.set()
def get_data(self):
try:
self.marker.wait(timeout=self.timeout)
return self.data[1]
except:
return False
class PebbleError(Exception):
def __init__(self, id, message):
self._id = id
self._message = message
def __str__(self):
return "%s (ID:%s)" % (self._message, self._id)
class Pebble(object):
"""
A connection to a Pebble watch; data and commands may be sent
to the watch through an instance of this class.
"""
endpoints = {
"TIME": 11,
"VERSION": 16,
"PHONE_VERSION": 17,
"SYSTEM_MESSAGE": 18,
"MUSIC_CONTROL": 32,
"PHONE_CONTROL": 33,
"APPLICATION_MESSAGE": 48,
"LAUNCHER": 49,
"LOGS": 2000,
"PING": 2001,
"LOG_DUMP": 2002,
"RESET": 2003,
"APP": 2004,
"APP_LOGS": 2006,
"NOTIFICATION": 3000,
"RESOURCE": 4000,
"APP_MANAGER": 6000,
"PUTBYTES": 48879
}
log_levels = {
0: "*",
1: "E",
50: "W",
100: "I",
200: "D",
250: "V"
}
bridges = {}
@staticmethod
def AutodetectDevice():
if os.name != "posix": #i.e. Windows
raise NotImplementedError("Autodetection is only implemented on UNIX-like systems.")
pebbles = glob.glob("/dev/tty.Pebble????-SerialPortSe")
if len(pebbles) == 0:
raise PebbleError(None, "Autodetection could not find any Pebble devices")
elif len(pebbles) > 1:
log.warn("Autodetect found %d Pebbles; using most recent" % len(pebbles))
#NOTE: Not entirely sure if this is the correct approach
pebbles.sort(key=lambda x: os.stat(x).st_mtime, reverse=True)
id = pebbles[0][15:19]
log.info("Autodetect found a Pebble with ID %s" % id)
return id
def __init__(self, id = None, using_lightblue = True, pair_first = False, locationSource = None):
if id is None and not using_lightblue:
id = Pebble.AutodetectDevice()
self.id = id
self.using_lightblue = using_lightblue
self._locationSource = locationSource
self._alive = True
self._endpoint_handlers = {}
self._internal_endpoint_handlers = {
self.endpoints["TIME"]: self._get_time_response,
self.endpoints["VERSION"]: self._version_response,
self.endpoints["PHONE_VERSION"]: self._phone_version_response,
self.endpoints["SYSTEM_MESSAGE"]: self._system_message_response,
self.endpoints["MUSIC_CONTROL"]: self._music_control_response,
self.endpoints["APPLICATION_MESSAGE"]: self._application_message_response,
self.endpoints["LAUNCHER"]: self._application_message_response,
self.endpoints["LOGS"]: self._log_response,
self.endpoints["PING"]: self._ping_response,
self.endpoints["APP_LOGS"]: self._app_log_response,
self.endpoints["APP_MANAGER"]: self._appbank_status_response
}
try:
if using_lightblue:
self._ser = LightBluePebble(self.id, pair_first)
else:
devicefile = "/dev/tty.Pebble"+id+"-SerialPortSe"
log.debug("Attempting to open %s as Pebble device %s" % (devicefile, id))
self._ser = serial.Serial(devicefile, 115200, timeout=1)
log.debug("Initializing reader thread")
self._read_thread = threading.Thread(target=self._reader)
self._read_thread.setDaemon(True)
self._read_thread.start()
log.debug("Reader thread loaded on tid %s" % self._read_thread.name)
except PebbleError:
raise PebbleError(id, "Failed to connect to Pebble")
except:
raise
def _exit_signal_handler(self, signum, frame):
self.disconnect()
time.sleep(1)
os._exit(0)
def __del__(self):
try:
self._ser.close()
except:
pass
def _reader(self):
try:
while self._alive:
endpoint, resp = self._recv_message()
if resp == None:
continue
if endpoint in self._internal_endpoint_handlers:
resp = self._internal_endpoint_handlers[endpoint](endpoint, resp)
if endpoint in self._endpoint_handlers and resp:
self._endpoint_handlers[endpoint](endpoint, resp)
except:
#traceback.print_exc()
raise PebbleError(self.id, "Lost connection to Pebble")
self._alive = False
def _pack_message_data(self, lead, parts):
pascal = map(lambda x: x[:255], parts)
d = pack("b" + reduce(lambda x,y: str(x) + "p" + str(y), map(lambda x: len(x) + 1, pascal)) + "p", lead, *pascal)
return d
def _build_message(self, endpoint, data):
return pack("!HH", len(data), endpoint)+data
def _send_message(self, endpoint, data, callback = None):
if endpoint not in self.endpoints:
raise PebbleError(self.id, "Invalid endpoint specified")
msg = self._build_message(self.endpoints[endpoint], data)
if DEBUG_PROTOCOL:
log.debug('>>> ' + msg.encode('hex'))
self._ser.write(msg)
def _recv_message(self):
if self.using_lightblue:
try:
endpoint, resp, data = self._ser.read()
if resp is None:
return None, None
except TypeError:
# the lightblue process has likely shutdown and cannot be read from
self.alive = False
return None, None
else:
data = self._ser.read(4)
if len(data) == 0:
return (None, None)
elif len(data) < 4:
raise PebbleError(self.id, "Malformed response with length "+str(len(data)))
size, endpoint = unpack("!HH", data)
resp = self._ser.read(size)
if DEBUG_PROTOCOL:
log.debug("Got message for endpoint %s of length %d" % (endpoint, len(resp)))
log.debug('<<< ' + (data + resp).encode('hex'))
return (endpoint, resp)
def register_endpoint(self, endpoint_name, func):
if endpoint_name not in self.endpoints:
raise PebbleError(self.id, "Invalid endpoint specified")
endpoint = self.endpoints[endpoint_name]
self._endpoint_handlers[endpoint] = func
def notification_sms(self, sender, body):
"""Send a 'SMS Notification' to the displayed on the watch."""
ts = str(int(time.time())*1000)
parts = [sender, body, ts]
self._send_message("NOTIFICATION", self._pack_message_data(1, parts))
def notification_email(self, sender, subject, body):
"""Send an 'Email Notification' to the displayed on the watch."""
ts = str(int(time.time())*1000)
parts = [sender, body, ts, subject]
self._send_message("NOTIFICATION", self._pack_message_data(0, parts))
def set_nowplaying_metadata(self, track, album, artist):
"""Update the song metadata displayed in Pebble's music app."""
parts = [artist[:30], album[:30], track[:30]]
self._send_message("MUSIC_CONTROL", self._pack_message_data(16, parts))
def get_versions(self, async = False):
"""
Retrieve a summary of version information for various software
(firmware, bootloader, etc) running on the watch.
"""
self._send_message("VERSION", "\x00")
if not async:
return EndpointSync(self, "VERSION").get_data()
def get_appbank_status(self, async = False):
"""
Retrieve a list of all installed watch-apps.
This is particularly useful when trying to locate a
free app-bank to use when installing a new watch-app.
"""
self._send_message("APP_MANAGER", "\x01")
if not async:
return EndpointSync(self, "APP_MANAGER").get_data()
def remove_app(self, appid, index, async=False):
"""Remove an installed application from the target app-bank."""
data = pack("!bII", 2, appid, index)
self._send_message("APP_MANAGER", data)
if not async:
return EndpointSync(self, "APP_MANAGER").get_data()
def remove_app_by_uuid(self, uuid_to_remove, uuid_is_string=True, async = False):
"""Remove an installed application by UUID."""
if uuid_is_string:
uuid_to_remove = uuid_to_remove.decode('hex')
elif type(uuid_to_remove) is uuid.UUID:
uuid_to_remove = uuid_to_remove.bytes
# else, assume it's a byte array
data = pack("b", 0x02) + str(uuid_to_remove)
self._send_message("APP_MANAGER", data)
if not async:
return EndpointSync(self, "APP_MANAGER").get_data()
def get_time(self, async = False):
"""Retrieve the time from the Pebble's RTC."""
self._send_message("TIME", "\x00")
if not async:
return EndpointSync(self, "TIME").get_data()
def set_time(self, timestamp):
"""Set the time stored in the target Pebble's RTC."""
data = pack("!bL", 2, timestamp)
self._send_message("TIME", data)
def reinstall_app(self, pbz_path, launch_on_install=True):
"""
A convenience method to uninstall and install an app
If the UUID uninstallation method fails, app name in metadata will be used.
"""
def endpoint_check(result, pbz_path):
if result == 'app removed':
return True
else:
if DEBUG_PROTOCOL:
log.warn("Failed to remove supplied app, app manager message was: " + result)
return False
# get the bundle's metadata to identify the app being replaced
bundle = PebbleBundle(pbz_path)
if not bundle.is_app_bundle():
raise PebbleError(self.id, "This is not an app bundle")
app_metadata = bundle.get_app_metadata()
# attempt to remove an app by its UUID
result_uuid = self.remove_app_by_uuid(app_metadata['uuid'].bytes, uuid_is_string=False)
if endpoint_check(result_uuid, pbz_path):
return self.install_app(pbz_path, launch_on_install)
if DEBUG_PROTOCOL:
log.warn("UUID removal failure, attempting to remove existing app by app name")
# attempt to remove an app by its name
apps = self.get_appbank_status()
for app in apps["apps"]:
if app["name"] == app_metadata['app_name']:
result_name = self.remove_app(app["id"], app["index"])
if endpoint_check(result_name, pbz_path):
return self.install_app(pbz_path, launch_on_install)
log.warn("Unable to locate previous instance of supplied application")
def reinstall_app_by_uuid(self, uuid, pbz_path):
"""
A convenience method to uninstall and install an app by UUID.
Must supply app UUID from source. ex: '54D3008F0E46462C995C0D0B4E01148C'
"""
self.remove_app_by_uuid(uuid)
self.install_app(pbz_path)
def install_app(self, pbz_path, launch_on_install=True):
"""
Install an app bundle (*.pbw) to the target Pebble.
This will pick the first free app-bank available.
"""
bundle = PebbleBundle(pbz_path)
if not bundle.is_app_bundle():
raise PebbleError(self.id, "This is not an app bundle")
app_metadata = bundle.get_app_metadata()
binary = bundle.zip.read(bundle.get_application_info()['name'])
if bundle.has_resources():
resources = bundle.zip.read(bundle.get_resources_info()['name'])
else:
resources = None
apps = self.get_appbank_status()
if not apps:
raise PebbleError(self.id, "could not obtain app list; try again")
first_free = 1
for app in apps["apps"]:
if app["index"] == first_free:
first_free += 1
if first_free == apps["banks"]:
raise PebbleError(self.id, "All %d app banks are full, you'll need to delete an existing app or watchface to make more space." % apps["banks"])
log.debug("Attempting to add app to bank %d of %d" % (first_free, apps["banks"]))
client = PutBytesClient(self, first_free, "BINARY", binary)
self.register_endpoint("PUTBYTES", client.handle_message)
client.init()
while not client._done and not client._error:
pass
if client._error:
raise PebbleError(self.id, "Failed to send application binary %s/pebble-app.bin" % pbz_path)
if resources:
client = PutBytesClient(self, first_free, "RESOURCES", resources)
self.register_endpoint("PUTBYTES", client.handle_message)
client.init()
while not client._done and not client._error:
pass
if client._error:
raise PebbleError(self.id, "Failed to send application resources %s/app_resources.pbpack" % pbz_path)
time.sleep(2)
self._add_app(first_free)
time.sleep(2)
if launch_on_install:
self.launcher_message(app_metadata['uuid'].bytes, "RUNNING", uuid_is_string=False)
def install_firmware(self, pbz_path, recovery=False):
"""Install a firmware bundle to the target watch."""
resources = None
pbz = zipfile.ZipFile(pbz_path)
binary = pbz.read("tintin_fw.bin")
# Calculate CRC in advance to avoid timeout on slow hardware
bincrc = stm32_crc.crc32(binary)
if not recovery:
resources = pbz.read("system_resources.pbpack")
if resources:
rescrc = stm32_crc.crc32(resources)
self.system_message("FIRMWARE_START")
time.sleep(2)
if resources:
client = PutBytesClient(self, 0, "SYS_RESOURCES", resources, rescrc)
self.register_endpoint("PUTBYTES", client.handle_message)
client.init()
while not client._done and not client._error:
time.sleep(0.2)
if client._error:
raise PebbleError(self.id, "Failed to send firmware resources %s/system_resources.pbpack" % pbz_path)
client = PutBytesClient(self, 0, "RECOVERY" if recovery else "FIRMWARE", binary, bincrc)
self.register_endpoint("PUTBYTES", client.handle_message)
client.init()
while not client._done and not client._error:
time.sleep(0.2)
if client._error:
raise PebbleError(self.id, "Failed to send firmware binary %s/tintin_fw.bin" % pbz_path)
self.system_message("FIRMWARE_COMPLETE")
def launcher_message(self, app_uuid, key_value, uuid_is_string = True, async = False):
""" send an appication message to launch or kill a specified application"""
launcher_keys = {
"RUN_STATE_KEY": 1,
}
launcher_key_values = {
"NOT_RUNNING": b'\x00',
"RUNNING": b'\x01'
}
if key_value not in launcher_key_values:
raise PebbleError(self.id, "not a valid application message")
if uuid_is_string:
app_uuid = app_uuid.decode('hex')
elif type(app_uuid) is uuid.UUID:
app_uuid = app_uuid.bytes
#else we can assume it's a byte array
# build and send a single tuple-sized launcher command
app_message_tuple = AppMessage.build_tuple(launcher_keys["RUN_STATE_KEY"], "UINT", launcher_key_values[key_value])
app_message_dict = AppMessage.build_dict(app_message_tuple)
packed_message = AppMessage.build_message(app_message_dict, "PUSH", app_uuid)
self._send_message("LAUNCHER", packed_message)
# wait for either ACK or NACK response
if not async:
return EndpointSync(self, "LAUNCHER").get_data()
def app_message_send_tuple(self, app_uuid, key, tuple_datatype, tuple_data):
""" Send a Dictionary with a single tuple to the app corresponding to UUID """
app_uuid = app_uuid.decode('hex')
app_message_tuple = AppMessage.build_tuple(key, tuple_datatype, tuple_data)
app_message_dict = AppMessage.build_dict(app_message_tuple)
packed_message = AppMessage.build_message(app_message_dict, "PUSH", app_uuid)
self._send_message("APPLICATION_MESSAGE", packed_message)
def app_message_send_string(self, app_uuid, key, string):
""" Send a Dictionary with a single tuple of type CSTRING to the app corresponding to UUID """
# NULL terminate and pack
string = string + '\0'
fmt = '<' + str(len(string)) + 's'
string = pack(fmt, string);
self.app_message_send_tuple(app_uuid, key, "CSTRING", string)
def app_message_send_uint(self, app_uuid, key, tuple_uint):
""" Send a Dictionary with a single tuple of type UINT to the app corresponding to UUID """
fmt = '<' + str(tuple_uint.bit_length() / 8 + 1) + 'B'
tuple_uint = pack(fmt, tuple_uint)
self.app_message_send_tuple(app_uuid, key, "UINT", tuple_uint)
def app_message_send_int(self, app_uuid, key, tuple_int):
""" Send a Dictionary with a single tuple of type INT to the app corresponding to UUID """
fmt = '<' + str(tuple_int.bit_length() / 8 + 1) + 'b'
tuple_int = pack(fmt, tuple_int)
self.app_message_send_tuple(app_uuid, key, "INT", tuple_int)
def app_message_send_byte_array(self, app_uuid, key, tuple_byte_array):
""" Send a Dictionary with a single tuple of type BYTE_ARRAY to the app corresponding to UUID """
# Already packed, fix endianness
tuple_byte_array = tuple_byte_array[::-1]
self.app_message_send_tuple(app_uuid, key, "BYTE_ARRAY", tuple_byte_array)
def system_message(self, command):
"""
Send a 'system message' to the watch.
These messages are used to signal important events/state-changes to the watch firmware.
"""
commands = {
"FIRMWARE_AVAILABLE": 0,
"FIRMWARE_START": 1,
"FIRMWARE_COMPLETE": 2,
"FIRMWARE_FAIL": 3,
"FIRMWARE_UP_TO_DATE": 4,
"FIRMWARE_OUT_OF_DATE": 5,
"BLUETOOTH_START_DISCOVERABLE": 6,
"BLUETOOTH_END_DISCOVERABLE": 7
}
if command not in commands:
raise PebbleError(self.id, "Invalid command \"%s\"" % command)
data = pack("!bb", 0, commands[command])
log.debug("Sending command %s (code %d)" % (command, commands[command]))
self._send_message("SYSTEM_MESSAGE", data)
def ping(self, cookie = 0xDEC0DE, async = False):
"""Send a 'ping' to the watch to test connectivity."""
data = pack("!bL", 0, cookie)
self._send_message("PING", data)
if not async:
return EndpointSync(self, "PING").get_data()
def reset(self):
"""Reset the watch remotely."""
self._send_message("RESET", "\x00")
def disconnect(self):
"""Disconnect from the target Pebble."""
self._alive = False
self._ser.close()
def is_alive(self):
if not self._alive:
return False
return self._ser.is_alive()
def _add_app(self, index):
data = pack("!bI", 3, index)
self._send_message("APP_MANAGER", data)
def _ping_response(self, endpoint, data):
restype, retcookie = unpack("!bL", data)
return retcookie
def _get_time_response(self, endpoint, data):
restype, timestamp = unpack("!bL", data)
return timestamp
def _system_message_response(self, endpoint, data):
if len(data) == 2:
log.info("Got system message %s" % repr(unpack('!bb', data)))
else:
log.info("Got 'unknown' system message...")
def _log_response(self, endpoint, data):
if (len(data) < 8):
log.warn("Unable to decode log message (length %d is less than 8)" % len(data))
return
timestamp, level, msgsize, linenumber = unpack("!IBBH", data[:8])
filename = data[8:24].decode('utf-8')
message = data[24:24+msgsize].decode('utf-8')
str_level = self.log_levels[level] if level in self.log_levels else "?"
def _app_log_response(self, endpoint, data):
if (len(data) < 8):
log.warn("Unable to decode log message (length %d is less than 8)" % len(data))
return
app_uuid = uuid.UUID(bytes=data[0:16])
timestamp, level, msgsize, linenumber = unpack("!IBBH", data[16:24])
filename = data[24:40].decode('utf-8')
message = data[40:40+msgsize].decode('utf-8')
str_level = self.log_levels[level] if level in self.log_levels else "?"
def _appbank_status_response(self, endpoint, data):
apps = {}
restype, = unpack("!b", data[0])
app_install_message = {
0: "app available",
1: "app removed",
2: "app updated"
}
if restype == 1:
apps["banks"], apps_installed = unpack("!II", data[1:9])
apps["apps"] = []
appinfo_size = 78
offset = 9
for i in xrange(apps_installed):
app = {}
try:
app["id"], app["index"], app["name"], app["company"], app["flags"], app["version"] = \
unpack("!II32s32sIH", data[offset:offset+appinfo_size])
app["name"] = app["name"].replace("\x00", "")
app["company"] = app["company"].replace("\x00", "")
apps["apps"] += [app]
except:
if offset+appinfo_size > len(data):
log.warn("Couldn't load bank %d; remaining data = %s" % (i,repr(data[offset:])))
else:
raise
offset += appinfo_size
return apps
elif restype == 2:
message_id = unpack("!I", data[1:])
message_id = int(''.join(map(str, message_id)))
return app_install_message[message_id]
def _version_response(self, endpoint, data):
fw_names = {
0: "normal_fw",
1: "recovery_fw"
}
resp = {}
for i in xrange(2):
fwver_size = 47
offset = i*fwver_size+1
fw = {}
fw["timestamp"],fw["version"],fw["commit"],fw["is_recovery"], \
fw["hardware_platform"],fw["metadata_ver"] = \
unpack("!i32s8s?bb", data[offset:offset+fwver_size])
fw["version"] = fw["version"].replace("\x00", "")
fw["commit"] = fw["commit"].replace("\x00", "")
fw_name = fw_names[i]
resp[fw_name] = fw
resp["bootloader_timestamp"],resp["hw_version"],resp["serial"] = \
unpack("!L9s12s", data[95:120])
resp["hw_version"] = resp["hw_version"].replace("\x00","")
btmac_hex = binascii.hexlify(data[120:126])
resp["btmac"] = ":".join([btmac_hex[i:i+2].upper() for i in reversed(xrange(0, 12, 2))])
return resp
def install_bridge(self, bridge):
assert "process" in dir(bridge) #TODO: Proper parentage check
self.bridges[bridge.UUID] = bridge(self, self._locationSource)
log.info("Installed %s as a bridge on UUID %s" % (bridge, bridge.UUID))
def _application_message_response(self, endpoint, data):
command = data[0]
if command == b'\x01': #PUSH
(command, transaction, app_uuid, msg_dict) = AppMessage.read_message(data)
log.debug("ACKing transaction %x" % ord(transaction))
self._send_message("APPLICATION_MESSAGE", "\xFF%s" % transaction)
if app_uuid in self.bridges:
reply = self.bridges[app_uuid].process(msg_dict)
if reply is not None:
msg = AppMessage.construct_message(reply, "PUSH", app_uuid.bytes, transaction)
self._send_message("APPLICATION_MESSAGE", msg)
else:
# No app is registered to handle this, let HTTPebble have a go
# (some custom apps are really just HTTPebble implementations
# with their own UUIDs)
log.warn("Got app message for %s and no bridge was found, attempt HTTPebble" % app_uuid)
http_uuid = uuid.UUID("9141b628-bc89-498e-b147-049f49c099ad")
reply = self.bridges[http_uuid].process(msg_dict)
if reply is not None:
msg = AppMessage.construct_message(reply, "PUSH", app_uuid.bytes, transaction)
self._send_message("APPLICATION_MESSAGE", msg)
elif command == b'\x02': #REQUEST:
log.warn("Got app request; not yet implemented; NACKing")
transaction = data[1]
self._send_message("APPLICATION_MESSAGE", "\x7F%s" % transaction)
elif command == b'\x7F': #NACK
transaction = data[1]
log.warn("Pebble NACKed transaction %x" % ord(transaction))
elif command == b'\xFF': #ACK
transaction = data[1]
log.debug("Pebble ACKed transaction %x" % ord(transaction))
else:
log.error("Unknown command type %x" % ord(command))
#TODO: Old, untouched, code. Remove?
if len(data) > 1:
rest = data[1:]
else:
rest = ''
if data[0] in AppMessage.app_messages:
return AppMessage.app_messages[data[0]] + rest
def _phone_version_response(self, endpoint, data):
session_cap = {
"GAMMA_RAY" : 0x80000000,
}
remote_cap = {
"TELEPHONY" : 16,
"SMS" : 32,
"GPS" : 64,
"BTLE" : 128,
"CAMERA_REAR" : 256,
"ACCEL" : 512,
"GYRO" : 1024,
"COMPASS" : 2048,
}
os = {
"UNKNOWN" : 0,
"IOS" : 1,
"ANDROID" : 2,
"OSX" : 3,
"LINUX" : 4,
"WINDOWS" : 5,
}
# Then session capabilities, android adds GAMMA_RAY and it's
# the only session flag so far
session = session_cap["GAMMA_RAY"]
# Then phone capabilities, android app adds TELEPHONY and SMS,
# and the phone type (we know android works for now)
remote = remote_cap["TELEPHONY"] | remote_cap["SMS"] | os["ANDROID"]
msg = pack("!biII", 1, -1, session, remote)
self._send_message("PHONE_VERSION", msg);
def _music_control_response(self, endpoint, data):
event, = unpack("!b", data)
event_names = {
1: "PLAYPAUSE",
4: "NEXT",
5: "PREVIOUS",
}
return event_names[event] if event in event_names else None
class AppMessage(object):
# tools to build a valid app message
#TODO: Refactor this in to a clean object representation instead of static utility functions.
tuple_datatypes = {
"BYTE_ARRAY": b'\x00',
"CSTRING": b'\x01',
"UINT": b'\x02',
"INT": b'\x03'
}
struct_to_tuple_type = {
'P':'BYTE_ARRAY',
's':'CSTRING',
'b':'INT',
'h':'INT',
'i':'INT',
'q':'INT',
'B':'UINT',
'H':'UINT',
'I':'UINT',
'Q':'UINT',
}
app_messages = {
"PUSH": b'\x01',
"REQUEST": b'\x02',
"ACK": b'\xFF',
"NACK": b'\x7F'
}
def read_byte_array(v_type, v_len, data):
return (array.array('B',data), "%sP" % v_len)
def read_cstring(v_type, v_len, data):
#TODO: This seems kludgy.
n = data.find("\x00")
if n != -1:
data = data[:n]
return (data, "%ss" % v_len)
def read_uint(v_type, v_len, data):
types = {
1:"B",
2:"H",
4:"I",
8:"Q"
}
return (unpack("<%s" % types[v_len], data)[0], types[v_len])
def read_int(v_type, v_len, data):
types = {
1:"b",
2:"h",
4:"i",
8:"q"
}
return (unpack("<%s" % types[v_len], data)[0], types[v_len])
tuple_readers = {
0:read_byte_array,
1:read_cstring,
2:read_uint,
3:read_int
}
@staticmethod
def read_dict(data):
count = ord(data[0])
data = data[1:]
tuples = []
while len(data):
(k,t,l) = unpack("<LBH", data[0:7])
v = data[7:7+l]
p = AppMessage.tuple_readers[t](t,l,v)
tuples.append((k,p))
data = data[7+l:]
return OrderedDict(tuples)
@staticmethod
def read_message(data):
return (data[0], data[1], uuid.UUID(bytes=data[2:18]), AppMessage.read_dict(data[18:]))
#NOTE: The "construct" methods should replace the "build" methods at some point.
@staticmethod
def construct_tuple(key, data_type, data):
t = array.array('B')
t.fromstring(pack('<L', key))
t.fromstring(AppMessage.tuple_datatypes[data_type])
t.fromstring(pack("<H", len(data)))
t.fromstring(data)
return t
@staticmethod
def construct_dict(tuples):
count = len(tuples)
out = array.array('B')
out.fromstring(pack('<B', count))
#TODO: Re-solve this using byte arrays
for v in tuples:
out.extend(v)
return out
@staticmethod
def construct_message(packed_dict, command, uuid, transaction_id):
m = array.array('B')
m.fromstring(AppMessage.app_messages[command])
m.fromstring(transaction_id)
m.fromstring(uuid)
m.extend(packed_dict)
return m.tostring()
@staticmethod
def build_tuple(key, data_type, data):
""" make a single app_message tuple"""
# build the message_tuple
app_message_tuple = OrderedDict([
("KEY", pack('<L', key)),
("TYPE", AppMessage.tuple_datatypes[data_type]),
("LENGTH", pack('<H', len(data))),
("DATA", data)
])
return app_message_tuple
@staticmethod
def build_dict(tuple_of_tuples):
""" make a dictionary from a list of app_message tuples"""
# note that "TUPLE" can refer to 0 or more tuples. Tuples must be correct endian-ness already
tuple_count = len(tuple_of_tuples)
# make the bytearray from the flattened tuples
tuple_total_bytes = ''.join(item for item in itertools.chain(*tuple_of_tuples.values()))
# now build the dict
app_message_dict = OrderedDict([
("TUPLECOUNT", pack('B', tuple_count)),
("TUPLE", tuple_total_bytes)
])
return app_message_dict
@staticmethod
def build_message(dict_of_tuples, command, uuid, transaction_id=b'\x00'):
""" build the app_message intended for app with matching uuid"""
# NOTE: uuid must be a byte array
# finally build the entire message
app_message = OrderedDict([
("COMMAND", AppMessage.app_messages[command]),
("TRANSACTIONID", transaction_id),
("UUID", uuid),
("DICT", ''.join(dict_of_tuples.values()))
])
return ''.join(app_message.values())
class PutBytesClient(object):
states = {
"NOT_STARTED": 0,
"WAIT_FOR_TOKEN": 1,
"IN_PROGRESS": 2,
"COMMIT": 3,
"COMPLETE": 4,
"FAILED": 5
}
transfer_types = {
"FIRMWARE": 1,
"RECOVERY": 2,
"SYS_RESOURCES": 3,
"RESOURCES": 4,
"BINARY": 5
}
def __init__(self, pebble, index, transfer_type, buffer, crc=None):
self._pebble = pebble
self._state = self.states["NOT_STARTED"]
self._transfer_type = self.transfer_types[transfer_type]
self._buffer = buffer
self._index = index
self._done = False
self._error = False
self._crc = None
def init(self):
data = pack("!bIbb", 1, len(self._buffer), self._transfer_type, self._index)
self._pebble._send_message("PUTBYTES", data)
self._state = self.states["WAIT_FOR_TOKEN"]
def wait_for_token(self, resp):
res, = unpack("!b", resp[0])
if res != 1:
log.error("init failed with code %d" % res)
self._error = True
return
self._token, = unpack("!I", resp[1:])
self._left = len(self._buffer)
self._state = self.states["IN_PROGRESS"]
self.send()
def in_progress(self, resp):
res, = unpack("!b", resp[0])
if res != 1:
self.abort()
return
if self._left > 0:
self.send()
log.debug("Sent %d of %d bytes" % (len(self._buffer)-self._left, len(self._buffer)))
else:
self._state = self.states["COMMIT"]
self.commit()
def commit(self):
if self._crc:
crc = self._crc
else:
crc = stm32_crc.crc32(self._buffer)
data = pack("!bII", 3, self._token & 0xFFFFFFFF, crc)
self._pebble._send_message("PUTBYTES", data)
def handle_commit(self, resp):
res, = unpack("!b", resp[0])
if res != 1:
self.abort()
return
self._state = self.states["COMPLETE"]
self.complete()
def complete(self):
data = pack("!bI", 5, self._token & 0xFFFFFFFF)
self._pebble._send_message("PUTBYTES", data)
def handle_complete(self, resp):
res, = unpack("!b", resp[0])
if res != 1:
self.abort()
return
self._done = True
def abort(self):
msgdata = pack("!bI", 4, self._token & 0xFFFFFFFF)
self._pebble.send_message("PUTBYTES", msgdata)
self._error = True
def send(self):
datalen = min(self._left, 2000)
rg = len(self._buffer)-self._left
msgdata = pack("!bII", 2, self._token & 0xFFFFFFFF, datalen)
msgdata += self._buffer[rg:rg+datalen]
self._pebble._send_message("PUTBYTES", msgdata)
self._left -= datalen
def handle_message(self, endpoint, resp):
if self._state == self.states["WAIT_FOR_TOKEN"]:
self.wait_for_token(resp)
elif self._state == self.states["IN_PROGRESS"]:
self.in_progress(resp)
elif self._state == self.states["COMMIT"]:
self.handle_commit(resp)
elif self._state == self.states["COMPLETE"]:
self.handle_complete(resp)
| Elleo/rockwatch | pebble/pebble.py | Python | gpl-3.0 | 33,192 |
"""
Django base settings for Waldur Core.
"""
from datetime import timedelta
import locale
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import warnings
from waldur_core.core import WaldurExtension
from waldur_core.core.metadata import WaldurConfiguration
from waldur_core.server.admin.settings import * # noqa: F403
encoding = locale.getpreferredencoding()
if encoding.lower() != 'utf-8':
raise Exception("""Your system's preferred encoding is `{}`, but Waldur requires `UTF-8`.
Fix it by setting the LC_* and LANG environment settings. Example:
LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8
""".format(encoding))
ADMINS = ()
BASE_DIR = os.path.abspath(os.path.join(os.path.join(os.path.dirname(os.path.dirname(__file__)), '..'), '..'))
DEBUG = False
MEDIA_ROOT = '/media_root/'
MEDIA_URL = '/media/'
ALLOWED_HOSTS = []
SITE_ID = 1
DBTEMPLATES_USE_REVERSION = True
DBTEMPLATES_USE_CODEMIRROR = True
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.humanize',
'django.contrib.staticfiles',
'django.contrib.sites',
'waldur_core.landing',
'waldur_core.logging',
'waldur_core.core',
'waldur_core.quotas',
'waldur_core.structure',
'waldur_core.users',
'waldur_core.media',
'rest_framework',
'rest_framework.authtoken',
'rest_framework_swagger',
'django_filters',
'axes',
'django_fsm',
'reversion',
'taggit',
'jsoneditor',
'modeltranslation',
'import_export',
'health_check',
'health_check.db',
'health_check.cache',
'health_check.storage',
'health_check.contrib.migrations',
'health_check.contrib.celery_ping',
'dbtemplates',
'binary_database_files',
)
INSTALLED_APPS += ADMIN_INSTALLED_APPS # noqa: F405
MIDDLEWARE = (
'waldur_core.server.middleware.cors_middleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'waldur_core.logging.middleware.CaptureEventContextMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'axes.middleware.AxesMiddleware'
)
REST_FRAMEWORK = {
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
'DEFAULT_AUTHENTICATION_CLASSES': (
'waldur_core.core.authentication.TokenAuthentication',
'waldur_core.core.authentication.SessionAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'waldur_core.core.renderers.BrowsableAPIRenderer',
),
'DEFAULT_PAGINATION_CLASS': 'waldur_core.core.pagination.LinkHeaderPagination',
'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.coreapi.AutoSchema',
'PAGE_SIZE': 10,
'EXCEPTION_HANDLER': 'waldur_core.core.views.exception_handler',
# Return native `Date` and `Time` objects in `serializer.data`
'DATETIME_FORMAT': None,
'DATE_FORMAT': None,
'TIME_FORMAT': None,
'ORDERING_PARAM': 'o'
}
AUTHENTICATION_BACKENDS = (
'axes.backends.AxesBackend',
'django.contrib.auth.backends.ModelBackend',
'waldur_core.core.authentication.AuthenticationBackend',
)
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
ANONYMOUS_USER_ID = None
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': (os.path.join(BASE_DIR, 'src', 'waldur_core', 'templates'),),
'OPTIONS': {
'context_processors': (
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
),
'loaders': ADMIN_TEMPLATE_LOADERS + (
'dbtemplates.loader.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
), # noqa: F405
},
},
]
ROOT_URLCONF = 'waldur_core.server.urls'
AUTH_USER_MODEL = 'core.User'
# Session
# https://docs.djangoproject.com/en/2.2/ref/settings/#sessions
SESSION_COOKIE_AGE = 3600
SESSION_SAVE_EVERY_REQUEST = True
WSGI_APPLICATION = 'waldur_core.server.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'src', 'waldur_core', 'locale'),
)
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
# Celery
CELERY_BROKER_URL = 'redis://localhost'
CELERY_RESULT_BACKEND = 'redis://localhost'
CELERY_TASK_QUEUES = {
'tasks': {'exchange': 'tasks'},
'heavy': {'exchange': 'heavy'},
'background': {'exchange': 'background'},
}
CELERY_TASK_DEFAULT_QUEUE = 'tasks'
CELERY_TASK_ROUTES = ('waldur_core.server.celery.PriorityRouter',)
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': 'redis://localhost',
'OPTIONS': {
'DB': 1,
'PARSER_CLASS': 'redis.connection.HiredisParser',
'CONNECTION_POOL_CLASS': 'redis.BlockingConnectionPool',
'PICKLE_VERSION': -1,
},
},
}
# Regular tasks
CELERY_BEAT_SCHEDULE = {
'pull-service-properties': {
'task': 'waldur_core.structure.ServicePropertiesListPullTask',
'schedule': timedelta(hours=24),
'args': (),
},
'pull-service-resources': {
'task': 'waldur_core.structure.ServiceResourcesListPullTask',
'schedule': timedelta(hours=1),
'args': (),
},
'pull-service-subresources': {
'task': 'waldur_core.structure.ServiceSubResourcesListPullTask',
'schedule': timedelta(hours=2),
'args': (),
},
'check-expired-permissions': {
'task': 'waldur_core.structure.check_expired_permissions',
'schedule': timedelta(hours=24),
'args': (),
},
'cancel-expired-invitations': {
'task': 'waldur_core.users.cancel_expired_invitations',
'schedule': timedelta(hours=24),
'args': (),
},
'structure-set-erred-stuck-resources': {
'task': 'waldur_core.structure.SetErredStuckResources',
'schedule': timedelta(hours=1),
'args': (),
},
'create_customer_permission_reviews': {
'task': 'waldur_core.structure.create_customer_permission_reviews',
'schedule': timedelta(hours=24),
'args': (),
},
}
globals().update(WaldurConfiguration().dict())
for ext in WaldurExtension.get_extensions():
INSTALLED_APPS += (ext.django_app(),)
for name, task in ext.celery_tasks().items():
if name in CELERY_BEAT_SCHEDULE:
warnings.warn(
"Celery beat task %s from Waldur extension %s "
"is overlapping with primary tasks definition" % (name, ext.django_app()))
else:
CELERY_BEAT_SCHEDULE[name] = task
for key, val in ext.Settings.__dict__.items():
if not key.startswith('_'):
globals()[key] = val
ext.update_settings(globals())
# Swagger
SWAGGER_SETTINGS = {
# USE_SESSION_AUTH parameter should be equal to DEBUG parameter.
# If it is True, LOGIN_URL and LOGOUT_URL must be specified.
'USE_SESSION_AUTH': False,
'APIS_SORTER': 'alpha',
'JSON_EDITOR': True,
'SECURITY_DEFINITIONS': {
'api_key': {
'type': 'apiKey',
'name': 'Authorization',
'in': 'header',
},
},
}
AXES_ONLY_USER_FAILURES = True
AXES_COOLOFF_TIME = timedelta(minutes=10)
AXES_FAILURE_LIMIT = 5
# Django File Storage API
DEFAULT_FILE_STORAGE = 'binary_database_files.storage.DatabaseStorage'
DB_FILES_AUTO_EXPORT_DB_TO_FS = False
DATABASE_FILES_URL_METHOD = 'URL_METHOD_2'
# Disable excessive xmlschema and django-axes logging
import logging
logging.getLogger("xmlschema").propagate = False
logging.getLogger("axes").propagate = False
| opennode/nodeconductor-assembly-waldur | src/waldur_core/server/base_settings.py | Python | mit | 9,157 |
#===============================================================================
# This file is part of TEMPy.
#
# TEMPy is a software designed to help the user in the manipulation
# and analyses of macromolecular assemblies using 3D electron microscopy maps.
#
# Copyright 2015 Birkbeck College University of London.
#
# Authors: Maya Topf, Daven Vasishtan, Arun Prasad Pandurangan,
# Irene Farabella, Agnel-Praveen Joseph, Harpal Sahota
#
# This software is made available under GPL V3 license
# http://www.gnu.org/licenses/gpl-3.0.html
#
#
# Please cite your use of TEMPy in published work:
#
# Farabella, I., Vasishtan, D., Joseph, A.P., Pandurangan, A.P., Sahota, H. & Topf, M. (2015). J. Appl. Cryst. 48.
#
#===============================================================================
from TEMPy.StructureBlurrer import StructureBlurrer
from TEMPy.ScoringFunctions import ScoringFunctions
from numpy import zeros
import sys
class Cluster:
"""A class to clustering an ensemble of structure instance"""
def __init__(self):
pass
def _print_results_cluster(self,models,class_num,number_top_mod,score,write=False):
"""
private function used in Cluster_Ensemble
"""
out_list=[]
if write==True:
outp = open("top"+str(number_top_mod)+str(score)+"_classes.txt", "w")
outp.write("pdb_name\tscore\tlrms\tclass\n")
for i in range(1,class_num+1):
# print the fits of each class ordered by the highest score
for ipdb in models:
if (ipdb[-1] == i):
out_list.append([ipdb[0],ipdb[2],ipdb[3],ipdb[4]])
outp.write("%s\t%.5f\t%.3f\t%d\n" %(ipdb[0],ipdb[2],ipdb[3],ipdb[4]))
outp.close()
else:
for i in range(1,class_num+1):
for ipdb in models:
if (ipdb[-1] == i):
out_list.append([ipdb[0],ipdb[2],ipdb[3],ipdb[4]])
return out_list
def _print_results_cluster2(self,models,write=True):
"""
private function used in Cluster_Ensemble
"""
out_list=[]
if write==True:
outp = open("top_rank.txt", "w")
outp.write("pdb_name\tscore\tlrms\tclass\n")
for i in models:
#[name_mod,mod,score_mod,int(0),int(0)]
# print the fits of each class ordered by the highest score
outp.write("%s\t%.5f\n" %(i[0],i[2]))
outp.close()
else:
print('this is for print!!!')
def cluster_fit_ensemble_top_fit(self,ensemble_list,score,rms_cutoff,res_target_map,sigma_coeff,number_top_mod=0,write=False,targetMap=False):
"""
RMSD clustering of the multiple "fits" starting from the best scoring model accordingly with a chosen score.
Cluster the fits based on Calpha RMSD (starting from the best scoring model)
Arguments:
*ensemble_list*
Input list of Structure Instances.
*targetMap*
Target Map Instance.
*score*
Scoring function to use.
See ScoringFunctions class for a list of the available Scoring Function.
E.g. set score='CCC' to use the Cross-correlation coefficient.
Score option are:
i 'CCC' - Cross-correlation coefficient;
ii 'LAP' - Laplacian-filtered cross-correlation coefficient: useful for maps with resolutions worse than 10-15 A;
iii 'MI' - Mutual information score: a good and robust score but relatively slow to calculate;
iv 'ENV' - Envelope score: the fastest score to calculate due to binarisation of the map.
v-vii 'NV','NV_Sobel','NV_Laplace'- Normal vector score: a vector-based surface superimposition score with or without Sobel/Laplace filter.
viii 'CD' - Chamfer Distance: a score used in computer vision algorithms as a fast similarity metric
*rms_cutoff*
float, the Calpha RMSD cutoff based on which you want to cluster the solutions. For example 3.5 (for 3.5 A).
*res_target_map*
the resolution, in Angstroms, of the target Map.
*sigma_coeff*
the sigma value (multiplied by the resolution) that controls the width of the Gaussian.
Default values is 0.356.
Other values used :
0.187R corresponding with the Gaussian width of the Fourier transform falling to half the maximum at 1/resolution, as used in Situs (Wriggers et al, 1999);
0.225R which makes the Fourier transform of the distribution fall to 1/e of its maximum value at wavenumber 1/resolution, the default in Chimera (Petterson et al, 2004)
0.356R corresponding to the Gaussian width at 1/e maximum height equaling the resolution, an option in Chimera (Petterson et al, 2004);
0.425R the fullwidth half maximum being equal to the resolution, as used by FlexEM (Topf et al, 2008);
0.5R the distance between the two inflection points being the same length as the resolution, an option in Chimera (Petterson et al, 2004);
1R where the sigma value simply equal to the resolution, as used by NMFF (Tama et al, 2004).
*number_top_mod*
Number of Fits to cluster. Default is all.
*write*
True will write out a file that contains the list of the structure instances representing different fits scored and clustered.
note the lrms column is the Calpha RMSD of each fit from the first fit in its class
"""
blurrer = StructureBlurrer()
scorer = ScoringFunctions()
cluster=Cluster()
count=0
dict_ensembl={}
list_ordered=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=0,write=False,targetMap=targetMap.copy())
#cluster fits by local rmsd
if number_top_mod==0:
ini_num = 0
end_num = len(list_ordered)
fit_class = 0
for ipdb in list_ordered:
print("model num %d: %s\n", list_ordered.index(ipdb)+1, ipdb[0])
ini_num1 = list_ordered.index(ipdb)
mod1=ipdb[1]
print('next index ' + str(ini_num1))
if ipdb[-1] == 0:
fit_class+=1
for ipdb1 in list_ordered[ini_num1 : end_num]:
mod2=ipdb1[1]
if ipdb1[-1] == 0:
rmsd_val=float(mod1.RMSD_from_same_structure(mod2,CA=True))
ipdb1[3]=rmsd_val
print("rmsd of %s from best local fit (%s)= %.2f", ipdb1[0], ipdb[0], rmsd_val)
if rmsd_val < rms_cutoff:
ipdb1[-1] = fit_class
print('class= ' + str(ipdb1[-1]))
else: continue
else: continue
return cluster._print_results_cluster(list_ordered,fit_class,number_top_mod,score,write)
else:
x=int(number_top_mod)
ini_num = 0
end_num = len(list_ordered[:x])
fit_class = 0
for ipdb in list_ordered[:x]:
print("model num %d: %s\n", list_ordered.index(ipdb)+1, ipdb[0])
ini_num1 = list_ordered.index(ipdb)
mod1=ipdb[1]
print('next index ' + str(ini_num1))
if ipdb[-1] == 0:
fit_class+=1
for ipdb1 in list_ordered[ini_num1 : end_num]:
mod2=ipdb1[1]
if ipdb1[-1] == 0:
rmsd_val=float(mod1.RMSD_from_same_structure(mod2,CA=True))
print("rms of %s from best local fit (%s)= %.2f", ipdb1[0], ipdb[0], rmsd_val)
ipdb1[3]=rmsd_val
if rmsd_val < rms_cutoff:
ipdb1[-1] = fit_class
print('class= ' + str(ipdb1[-1]))
else: continue
else: continue
return cluster._print_results_cluster(list_ordered[:x],fit_class,number_top_mod,score,write)
def RMSD_ensemble(self,rank_fit_ensemble,ensemble_list,CA=True):
"""
Calculates the pairwise RMSD matrix for all Structure Instance in the ensemble.
Arguments:
*rank_fit_ensemble*
Ensemble of Structure Instance ranked using cluster.rank_fit_ensemble
*ensemble_list*
Input list of Structure Instances
*CA is set to True if only CA-RMSD is needed*
Return:
A numpy array
"""
list_rotate_models_dict={}
for i in ensemble_list:
list_rotate_models_dict[i[0]]=i[1]
sorted_rank=rank_fit_ensemble
mxRMSD = zeros(shape=(len(sorted_rank),len(sorted_rank)))
for mod1 in sorted_rank:
for mod2 in sorted_rank:
print(mod1[0],mod2[0])
rmsd_val=float(list_rotate_models_dict[mod1[0]].RMSD_from_same_structure(list_rotate_models_dict[mod2[0]],CA=CA))
m1=sorted_rank.index(mod1)
m2=sorted_rank.index(mod2)
mxRMSD[m1][m2]=rmsd_val
return mxRMSD
def rank_fit_ensemble(self,ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=0,\
write=False,targetMap=False,cont_targetMap=None):
"""
RMSD clustering of the multiple "fits" accordingly with a chosen score.
Cluster the fits based on Calpha RMSD (starting from the best scoring model)
Arguments:
*ensemble_list*
Input list of Structure Instances.
*targetMap*
Target Map Instance.
*score*
Scoring function to use.
See ScoringFunctions class for a list of the available Scoring Function.
E.g. set score='CCC' to use the Cross-correlation coefficient.
Score option are:
i 'CCC' - Cross-correlation coefficient;
ii 'LAP' - Laplacian-filtered cross-correlation coefficient: useful for maps with resolutions worse than 10-15 A;
iii 'MI' - Mutual information score: a good and robust score but relatively slow to calculate;
iv 'ENV' - Envelope score: the fastest score to calculate due to binarisation of the map.
v-vii 'NV','NV_Sobel','NV_Laplace'- Normal vector score: a vector-based surface superimposition score with or without Sobel/Laplace filter.
viii 'CD' - Chamfer Distance: a score used in computer vision algorithms as a fast similarity metric
*rms_cutoff*
float, the Calpha RMSD cutoff based on which you want to cluster the solutions. For example 3.5 (for 3.5 A).
*res_target_map*
the resolution, in Angstroms, of the target Map.
*sigma_coeff*
the sigma value (multiplied by the resolution) that controls the width of the Gaussian.
Default values is 0.356.
Other values used :
0.187R corresponding with the Gaussian width of the Fourier transform falling to half the maximum at 1/resolution, as used in Situs (Wriggers et al, 1999);
0.225R which makes the Fourier transform of the distribution fall to 1/e of its maximum value at wavenumber 1/resolution, the default in Chimera (Petterson et al, 2004)
0.356R corresponding to the Gaussian width at 1/e maximum height equaling the resolution, an option in Chimera (Petterson et al, 2004);
0.425R the fullwidth half maximum being equal to the resolution, as used by FlexEM (Topf et al, 2008);
0.5R the distance between the two inflection points being the same length as the resolution, an option in Chimera (Petterson et al, 2004);
1R where the sigma value simply equal to the resolution, as used by NMFF (Tama et al, 2004).
*number_top_mod*
Number of Fits to cluster. Default is all.
*write*
True will write out a file that contains the list of the structure instances representing different fits scored and clustered.
note the lrms column is the Calpha RMSD of each fit from the first fit in its class
"""
blurrer = StructureBlurrer()
scorer = ScoringFunctions()
cluster=Cluster()
count=0
dict_ensembl={}
list_to_order=[]
#print targetMap
if targetMap==False:
#targetMap = self.protMap(prot, min(resolution/4., 3.5), resolution)
print("WARNING:Need target map")
sys.exit()
if score not in ['CCC','LAP','MI','NV','NV_Sobel','NV_Laplace','ENV','CD']:
print('Incorrect Scoring Function: %s', score)
print('Please select from one of the following scoring functions: %s', ''.join(['CCC','LAP','MI','NV','NV_Sobel','NV_Laplace','ENV','CD']))
sys.exit()
targetMap=targetMap.copy()
if score=='CCC':
for mod1 in ensemble_list:
count+=1
name_mod=mod1[0]
mod=mod1[1]
sim_map = blurrer.gaussian_blur(mod, res_target_map,densMap=targetMap,sigma_coeff=sigma_coeff)
if not cont_targetMap is None: score_mod=scorer.CCC_map(sim_map,targetMap,0.5*sim_map.fullMap.std(),cont_targetMap,2,True)[0]#CCC(sim_map,targetMap)
else: score_mod=scorer.CCC_map(sim_map,targetMap,0.0,0.0,True)[0]
#else: score_mod=scorer.CCC(sim_map,targetMap)
#'name_file','structure_instance','score','lrmsd','class'
list_to_order.append([name_mod,mod,score_mod,0,0])
if score=='LAP':
for mod1 in ensemble_list:
count+=1
name_mod=mod1[0]
mod=mod1[1]
sim_map = blurrer.gaussian_blur(mod, res_target_map,densMap=targetMap,sigma_coeff=sigma_coeff)
score_mod=scorer.laplace_CCC(sim_map,targetMap)
#'name_file','structure_instance','score','lrmsd','class'
list_to_order.append([name_mod,mod,score_mod,0,0])
if score=='MI':
for mod1 in ensemble_list:
count+=1
name_mod=mod1[0]
mod=mod1[1]
sim_map = blurrer.gaussian_blur(mod, res_target_map,densMap=targetMap,sigma_coeff=sigma_coeff)
if not cont_targetMap is None: score_mod=scorer.MI(sim_map,targetMap,0.5*sim_map.fullMap.std(),cont_targetMap,1)
else: score_mod=scorer.MI(sim_map,targetMap)
list_to_order.append([name_mod,mod,score_mod,0,0])
if score=='NV':
for mod1 in ensemble_list:
count+=1
name_mod=mod1[0]
mod=mod1[1]
#These two values should be calculated for the experimental map, and only
#need to be calculated once, at the beginning
sim_map = blurrer.gaussian_blur(mod, res_target_map,densMap=targetMap,sigma_coeff=sigma_coeff)
if not cont_targetMap is None: score_mod=scorer.normal_vector_score(targetMap,sim_map, cont_targetMap-(0.1*targetMap.std()), cont_targetMap+(0.1*targetMap.std()),Filter=None)
else:
min_thr=targetMap.get_primary_boundary(mod.get_prot_mass_from_atoms(), targetMap.min(), targetMap.max())
points=targetMap.get_point_map(min_thr,percentage=0.2)
max_thr=targetMap.get_second_boundary(min_thr, points, min_thr, targetMap.max(),err_percent=1)
score_mod=scorer.normal_vector_score(targetMap,sim_map, min_thr, max_thr,Filter=None)
score_mod = 1 - (score_mod/3.14)
list_to_order.append([name_mod,mod,score_mod,0,0])
if score=='NV_Sobel':
for mod1 in ensemble_list:
count+=1
name_mod=mod1[0]
mod=mod1[1]
sim_map = blurrer.gaussian_blur(mod, res_target_map,densMap=targetMap,sigma_coeff=sigma_coeff)
if not cont_targetMap is None: score_mod=scorer.normal_vector_score(targetMap,sim_map, cont_targetMap-(0.1*targetMap.std()), cont_targetMap+(0.1*targetMap.std()),Filter='Sobel')
else:
min_thr=targetMap.get_primary_boundary(mod.get_prot_mass_from_atoms(), targetMap.min(), targetMap.max())
points=targetMap.get_point_map(min_thr,percentage=0.2)
max_thr=targetMap.get_second_boundary(min_thr, points, min_thr, targetMap.max(),err_percent=1)
score_mod=scorer.normal_vector_score(targetMap,sim_map, min_thr, max_thr,Filter='Sobel')
score_mod = 1 - (score_mod/3.14)
list_to_order.append([name_mod,mod,score_mod,0,0])
if score=='NV_Laplace':
for mod1 in ensemble_list:
count+=1
name_mod=mod1[0]
mod=mod1[1]
sim_map = blurrer.gaussian_blur(mod, res_target_map,densMap=targetMap,sigma_coeff=sigma_coeff)
if not cont_targetMap is None: score_mod=scorer.normal_vector_score(targetMap,sim_map, cont_targetMap-(0.1*targetMap.std()), cont_targetMap+(0.1*targetMap.std()),Filter='Laplace')
else:
min_thr=targetMap.get_primary_boundary(mod.get_prot_mass_from_atoms(), targetMap.min(), targetMap.max())
points=targetMap.get_point_map(min_thr,percentage=0.2)
max_thr=targetMap.get_second_boundary(min_thr, points, min_thr, targetMap.max(),err_percent=1)
score_mod=scorer.normal_vector_score(targetMap,sim_map, min_thr, max_thr,Filter='Laplace')
score_mod = 1 - (score_mod/3.14)
list_to_order.append([name_mod,mod,score_mod,0,0])
if score=='ENV':
for mod1 in ensemble_list:
count+=1
name_mod=mod1[0]
mod=mod1[1]
min_thr=targetMap.get_primary_boundary(mod.get_prot_mass_from_atoms(), targetMap.min(), targetMap.max())
score_mod=scorer.envelope_score(targetMap,min_thr,mod)
#'name_file','structure_instance','score','lrmsd','class'
list_to_order.append([name_mod,mod,score_mod,0,0])
if score=='CD':
for mod1 in ensemble_list:
count+=1
name_mod=mod1[0]
mod=mod1[1]
sim_map = blurrer.gaussian_blur(mod, res_target_map,densMap=targetMap,sigma_coeff=sigma_coeff)
if not cont_targetMap is None:
score_mod=scorer._surface_distance_score(sim_map,targetMap,0.5*sim_map.fullMap.std(),cont_targetMap,'Minimum')
else:
min_thr=targetMap.get_primary_boundary(mod.get_prot_mass_from_atoms(), targetMap.min(), targetMap.max())
points=targetMap.get_point_map(min_thr,percentage=0.2)
max_thr=targetMap.get_second_boundary(min_thr, points, min_thr, targetMap.max(),err_percent=1)
score_mod=scorer.chamfer_distance(sim_map,targetMap, min_thr, max_thr, kdtree=None)
score_mod = 1/score_mod
list_to_order.append([name_mod,mod,score_mod,0,0])
if score in ['NV','NV_Sobel','NV_Laplace']:
list_ordered=sorted(list_to_order, key=lambda x: x[2],reverse=True)#was false when NV was negative
else:
list_ordered=sorted(list_to_order, key=lambda x: x[2],reverse=True)
if number_top_mod==0:
if write==True:
return cluster._print_results_cluster2(list_ordered,write)
return list_ordered
else:
x=int(number_top_mod)
if write==True:
return cluster._print_results_cluster2(list_ordered[:x],write)
return list_ordered[:x]
| OniDaito/ChimeraXTempy | TEMPy/Cluster.py | Python | mit | 22,226 |
from skimage import img_as_float
import math
import numpy as np
def rgChromaticity(rgb):
"""
Converting an RGB image into normalized RGB removes the effect
of any intensity variations.
rg Chromaticity
http://en.wikipedia.org/wiki/Rg_chromaticity
Also know as normalised RGB as per paper:
Color-based object recognition, Theo Gevers and Arnold W.M. Smeulders,
Pattern Recognition,number 3, pages 453-464, volume 32, 1999.
"""
rgChrom = img_as_float(rgb)
r = rgb[:, :, 1] + 0.00000000001
g = rgb[:, :, 0] + 0.00000000001
b = rgb[:, :, 2] + 0.00000000001
divisor = r + g + b
rgChrom[:, :, 1] = r / divisor
rgChrom[:, :, 0] = g / divisor
rgChrom[:, :, 2] = b / divisor
return rgChrom
def normalisedRGB(rgb):
"""
Converting an RGB image into normalized RGB removes the effect
of any intensity variations.
L2 Norm (Euclidean norm)
"""
norm = img_as_float(rgb)
r = rgb[:, :, 0] + 0.00000000001
g = rgb[:, :, 1] + 0.00000000001
b = rgb[:, :, 2] + 0.00000000001
divisor = np.sqrt(np.square(r) + np.square(g) + np.square(b))
norm[:, :, 1] = r / divisor
norm[:, :, 0] = g / divisor
norm[:, :, 2] = b / divisor
return norm
def linear_normalization(arr):
"""
Converting an RGB image into normalized RGB removes the effect
of any intensity variations.
Linear normalization
http://en.wikipedia.org/wiki/Normalization_%28image_processing%29
"""
arr = arr.astype('float')
# Do not touch the alpha channel
for i in range(3):
minval = arr[..., i].min()
maxval = arr[..., i].max()
if minval != maxval:
arr[..., i] -= minval
arr[..., i] *= (255.0 / (maxval - minval))
return arr
def normalisedRGB_simple(image):
r = image[:,:,0]
g = image[:,:,1]
b = image[:,:,2]
rn = r / (r+g+b)
gn = g / (r+g+b)
bn = b / (r+g+b)
return np.array((rn,gn,bn))
| michaelborck/ipfe | ipfe/colour.py | Python | bsd-3-clause | 1,979 |
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
"TanhActivation", "SigmoidActivation", "SoftmaxActivation",
"IdentityActivation", "LinearActivation", 'SequenceSoftmaxActivation',
'ExpActivation', "ReluActivation", "BReluActivation", "SoftReluActivation",
"STanhActivation", "AbsActivation", "SquareActivation", "BaseActivation",
"LogActivation", "SqrtActivation", "ReciprocalActivation"
]
class BaseActivation(object):
"""
A mark for activation class.
Each activation inherit BaseActivation, which has two parameters.
:param name: activation name in paddle config.
:type name: basestring
:param support_hppl: True if supported by hppl. HPPL is a library used by paddle
internally. Currently, lstm layer can only use activations
supported by hppl.
:type support_hppl: bool
"""
def __init__(self, name, support_hppl):
self.name = name
self.support_hppl = support_hppl
def __repr__(self):
return self.name
class TanhActivation(BaseActivation):
"""
Tanh activation.
.. math::
f(z)=tanh(z)=\\frac{e^z-e^{-z}}{e^z+e^{-z}}
"""
def __init__(self):
BaseActivation.__init__(self, 'tanh', True)
class SigmoidActivation(BaseActivation):
"""
Sigmoid activation.
.. math::
f(z) = \\frac{1}{1+exp(-z)}
"""
def __init__(self):
BaseActivation.__init__(self, 'sigmoid', True)
class SoftmaxActivation(BaseActivation):
"""
Softmax activation for simple input
.. math::
P(y=j|x) = \\frac{e^{x_j}} {\\sum^K_{k=1} e^{x_j} }
"""
def __init__(self):
BaseActivation.__init__(self, 'softmax', False)
class SequenceSoftmaxActivation(BaseActivation):
"""
Softmax activation for one sequence. The dimension of input feature must be
1 and a sequence.
.. code:: python
result = softmax(for each_feature_vector[0] in input_feature)
for i, each_time_step_output in enumerate(output):
each_time_step_output = result[i]
"""
def __init__(self):
BaseActivation.__init__(self, 'sequence_softmax', False)
class IdentityActivation(BaseActivation):
"""
Identity Activation.
Just do nothing for output both forward/backward.
"""
def __init__(self):
BaseActivation.__init__(self, '', False)
LinearActivation = IdentityActivation
class ReluActivation(BaseActivation):
"""
Relu activation.
forward. :math:`y = max(0, z)`
derivative:
.. math::
1 &\\quad if z > 0 \\\\
0 &\\quad\\mathrm{otherwize}
"""
def __init__(self):
BaseActivation.__init__(self, 'relu', True)
class BReluActivation(BaseActivation):
"""
BRelu Activation.
forward. :math:`y = min(24, max(0, z))`
derivative:
.. math::
1 &\\quad if 0 < z < 24 \\\\
0 &\\quad \\mathrm{otherwise}
"""
def __init__(self):
BaseActivation.__init__(self, 'brelu', False)
class SoftReluActivation(BaseActivation):
"""
SoftRelu Activation.
"""
def __init__(self):
BaseActivation.__init__(self, 'softrelu', False)
class STanhActivation(BaseActivation):
"""
Scaled Tanh Activation.
.. math::
f(z) = 1.7159 * tanh(2/3*z)
"""
def __init__(self):
BaseActivation.__init__(self, 'stanh', False)
class AbsActivation(BaseActivation):
"""
Abs Activation.
Forward: :math:`f(z) = abs(z)`
Derivative:
.. math::
1 &\\quad if \\quad z > 0 \\\\
-1 &\\quad if \\quad z < 0 \\\\
0 &\\quad if \\quad z = 0
"""
def __init__(self):
BaseActivation.__init__(self, 'abs', False)
class SquareActivation(BaseActivation):
"""
Square Activation.
.. math::
f(z) = z^2.
"""
def __init__(self):
BaseActivation.__init__(self, 'square', False)
class ExpActivation(BaseActivation):
"""
Exponential Activation.
.. math::
f(z) = e^z.
"""
def __init__(self):
BaseActivation.__init__(self, 'exponential', False)
class LogActivation(BaseActivation):
"""
Logarithm Activation.
.. math::
f(z) = log(z)
"""
def __init__(self):
BaseActivation.__init__(self, 'log', False)
class SqrtActivation(BaseActivation):
"""
Square Root Activation.
.. math::
f(z) = sqrt(z)
"""
def __init__(self):
BaseActivation.__init__(self, 'sqrt', False)
class ReciprocalActivation(BaseActivation):
"""
Reciprocal Activation.
.. math::
f(z) = 1/z
"""
def __init__(self):
BaseActivation.__init__(self, 'reciprocal', False)
| lispc/Paddle | python/paddle/trainer_config_helpers/activations.py | Python | apache-2.0 | 5,336 |
'''
Copyright 2012 Will Snook (http://willsnook.com)
MIT License
Generate a NEC2 card stack file for a 2m folded dipole
'''
from nec2utils import *
# =======================================================================================================
# Plan for a 2m folded dipole
# =======================================================================================================
'''
Notes:
'+' marks a boundary between wire elements
'X' marks the feedpoint
Material is 1/8" diameter bare copper wire
Total length of wire is 1 wavelength of center frequency
Wavelength = (300 * velocity factor of 1/8" bare copper) / (design frequency)
A = (1/2 wavelength) - (pi * rb)
a1 A a2
,-+---------------------------------------------------------------------------+-,
/ \
D |rd d b rb| B
\ /
`-+-------------------------------------X-------------------------------------+-'
c2 C c1
'''
targetMHz = 146.310
correctionFactor = 0.932 # Trial and error correcion constant to account for velocity factor, etc
wavelength = m((300.0 * correctionFactor) / targetMHz)
radiusB = inch(0.5)
radiusD = radiusB
A = (0.5 * wavelength) - (math.pi*radiusB)
C = A
Y0 = inch(5.0 + (2.0/8.0))
Z0 = inch(36.0)
comments = 'CM -------------------------------------------------------------------\n'
comments += 'CM NEC2 model for simulating a folded dipole built from copper wire.\n'
comments += 'CM Geometry is tuned for min SWR at {:.3f} MHz\n'.format(targetMHz)
comments += 'CM \n'
comments += 'CM Wire length before bends = {: >6.3f} in\n'.format(mToIn(wavelength))
comments += 'CM Radius of bends = {: >6.3f} in\n'.format(mToIn(radiusB))
comments += 'CM -------------------------------------------------------------------\n'
comments += 'CE'
a1 = Point(A/2.0, Y0, Z0)
a2 = Point(-a1.x, Y0, Z0)
b = Point( a2.x, Y0, Z0-radiusB)
c1 = Point( a2.x, Y0, Z0-(2.0*radiusB))
c2 = Point( a1.x, Y0, c1.z)
d = Point( a1.x, Y0, b.z)
wireRadius = inch(1.0/16.0) # radius for a 1/8" wire
segs = 51
arcSegs = 15
arcStart = deg(90)
arcEnd = deg(270)
m = Model(wireRadius)
m.addWire(segs, a1, a2)
m.addArc(arcSegs, radiusB, arcStart, arcEnd, rotate=Rotation(deg(0),deg(0),deg(0)), translate=b)
m.addWire(segs, c1, c2).feedAtMiddle()
m.addArc(arcSegs, radiusD, arcStart, arcEnd, rotate=Rotation(deg(0),deg(0),deg(180)), translate=d)
steps = (148.0 - 144.0) / 0.1
cardStack = m.getText(start=144.000, stepSize=0.1, stepCount=steps)
# =======================================================================================================
# Write the file
# =======================================================================================================
fileName = '2m-folded-dipole.nec'
writeCardsToFile(fileName, comments, cardStack)
copyCardFileToConsole(fileName)
| ckuethe/nec2-toys | 2m-folded-dipole/2m-folded-dipole.py | Python | mit | 3,244 |
# manufac - a commandline tool for step-by-step instructions
# Copyright (C) 2014 Johannes Reinhardt <jreinhardt@ist-dein-freund.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import json
import hashlib
from os.path import join, exists
from os import lstat, utime, makedirs
from codecs import open
from shutil import rmtree
class FileCache:
"""
Context manager to allow caching
"""
def __init__(self,path):
self.path = path
self.dependencies = None
def __enter__(self):
#load cached dependency data
dep_path = join(self.path,'.deps')
if not exists(dep_path):
self.dependencies = {}
else:
with open(dep_path,'r','utf8') as fid:
self.dependencies = json.loads(fid.read())
return self
def __exit__(self,exc_type,exc_val,exc_tb):
#write dependency data
with open(join(self.path,'.deps'),'w','utf8') as fid:
fid.write(json.dumps(self.dependencies))
self.dependencies = None
return False
def clear(self):
"""
Clear the cache and all files in it
"""
rmtree(self.path)
makedirs(self.path)
self.dependencies = {}
def process(self,callback,**kwargs):
"""
calls the callable callback to create a file and returns its path.
the callback must return a list of file paths the created file
depends on and which make reexecution of the callback necessary if
modified. Alternatively None can be returned to retry
additional keyword arguments can be used to supply any information to
callback and dependencies. An id of the calling importer is required
and an extension for the resulting file is required.
callback(target_filename,importer='imp_id',extension='.png',**kwargs)
"""
assert 'importer' in kwargs
assert 'extension' in kwargs
#calculate target file id
m = hashlib.sha512()
for k in sorted(kwargs.keys()):
m.update(k)
m.update(str(kwargs[k]))
target_id = m.hexdigest()
#check if update is nessecary
update = False
target_path = join(self.path,target_id + kwargs["extension"])
if not exists(target_path):
# print "Target path for %s does not exist" % target_id
update = True
elif not target_id in self.dependencies:
# print "Dependencies for %s do not exist" % target_id
update = True
elif self.dependencies[target_id] is None:
#The callback requested to try again next time
update = True
else:
target_time = lstat(target_path).st_mtime
for dep in self.dependencies[target_id]:
if not exists(dep):
# print "Dependency %s for %s does not exist" % (dep,target_id)
update = True
break
elif lstat(dep).st_mtime > target_time:
# print "Dependency %s for %s is outdated" % (dep,target_id)
# print target_time, lstat(dep).st_mtime
update = True
break
if update:
# print "Updating %s" % target_id
self.dependencies[target_id] = callback(target_path,**kwargs)
utime(target_path,None)
return target_path, self.dependencies[target_id]
| jreinhardt/manufac | src/manufac/utils.py | Python | gpl-2.0 | 4,121 |
# -*- coding: utf-8 -*-
# Gitless - a version control system built on top of Git
# Licensed under MIT
"""gl resolve - Mark a file with conflicts as resolved."""
from . import file_cmd
parser = file_cmd.parser('mark files with conflicts as resolved', 'resolve', ['rs'])
| sdg-mit/gitless | gitless/cli/gl_resolve.py | Python | mit | 274 |
import math
import sys
EPSILON = sys.float_info.epsilon
def float_equal(a, b, epsilon=EPSILON):
""" Compares to floats with a given epsilon. Normally you should use the
epsilon constant EPSILON in this module (default value).
Test:
>>> float_equal(0, 0)
True
>>> float_equal(0.0000, 0.0000)
True
>>> float_equal(1, 0)
False
>>> float_equal(0.0, 0.00001)
False
>>> float_equal(4.00001, 4.00001)
True
>>> float_equal(125352.00001, 125352.00001)
True
"""
# Shortcut, handles infinities.
if a == b:
return True
else:
diff = math.fabs(a - b)
# One or both are zero.
if a * b == 0:
# Relative error is not meaningful here.
return diff < (epsilon * epsilon)
# Use relative error.
else:
abs_a = math.fabs(a)
abs_b = math.fabs(b)
return diff / (abs_a + abs_b) < epsilon
def string_is_float(string):
""" Checks if a string is a float.
Test:
>>> string_is_float('1')
True
>>> string_is_float('1427')
True
>>> string_is_float('-1')
True
>>> string_is_float('-1427')
True
>>> string_is_float('1.0')
True
>>> string_is_float('1337.536')
True
>>> string_is_float('-153.0563')
True
>>> string_is_float('abc')
False
>>> string_is_float('1a')
False
>>> string_is_float('1,31434')
False
>>> string_is_float('1.341a')
False
>>> string_is_float('1314.142.')
False
"""
try:
float(string)
return True
except ValueError:
return False
def string_is_int(string):
""" Checks if a string is a integer.
Test:
>>> string_is_int('1')
True
>>> string_is_int('1427')
True
>>> string_is_int('-1')
True
>>> string_is_int('-1427')
True
>>> string_is_int('1.0')
False
>>> string_is_int('-1.0')
False
>>> string_is_int('abc')
False
>>> string_is_int('1a')
False
"""
try:
int(string)
return True
except ValueError:
return False
if __name__ == '__main__':
print('Executing doctest.')
import doctest
doctest.testmod() | tea2code/gamemath | gamemath/comparison.py | Python | mit | 2,241 |
# -*- coding: utf-8 -*-
"""
Sop.libs.baidu
~~~~~~~~~~~~~~
util tool.
:copyright: (c) 2017 by 陶先森.
:license: MIT, see LICENSE for more details.
"""
from .base import ServiceBase
from utils.tool import logger
from bs4 import BeautifulSoup
from urllib import urlencode
import requests
class BaiduIncludedCheckUtil(ServiceBase):
""" 百度收录查询 """
QueueKey = "Sop:BaiduIncludedCheck_sq"
def _put(self, value):
""" 向队列写数据 """
return self.redis.rpush(self.QueueKey, value)
def _get(self, value):
""" 查询value是否在队列中 """
#_queue_length = self.redis.llen(self.QueueKey)
_queue_data = self.redis.lrange(self.QueueKey, 0, -1)
return True if value in _queue_data else False
def check(self, url):
""" 百度收录查询入口 """
if self._get(url):
return True
else:
# 设置UA模拟用户,还可设置多个UA提高搜索成功率
headers = {'User-Agent': 'Mozilla/4.0+(compatible;+MSIE+8.0;+Windows+NT+5.1;+Trident/4.0;+GTB7.1;+.NET+CLR+2.0.50727)'}
# 构建百度搜索URL;因为是查收录,所以只显示了前1个搜索结果,还可以通过rn参数来调整搜索结果的数量,不过结果数量越多,速度越慢,因为每个结果都要请求下获取真实URL。
b_url = 'http://www.baidu.com/s?{}'.format(urlencode(dict(wd="{}".format(url), rn=1)))
logger.debug("search url is: {}".format(b_url))
# 初始化BeautifulSoup
soup = BeautifulSoup(requests.get(b_url, headers=headers, timeout=self.timeout).content, "html.parser")
# 获取URL的特征值是通过class="t"
b_links = [tag.a['href'] for tag in soup.find_all('h3', {'class': 't'})]
logger.debug("search result: {}".format(b_links))
# 分析搜索结果中的真实URL,使用requests库获取了最终URL,而不是快照URL
for link in b_links:
try:
r = requests.get(link, headers=headers, timeout=self.timeout)
except Exception as e:
pass
else:
# 待查URL匹配百度搜索结果的真实URL,如果匹配就表示收录,循环完毕仍未匹配则未收录, 返回False
curl = url.split("://")[-1] if "://" in url else url
if curl in r.url.split("://")[-1]:
self._put(url)
return True
return False | staugur/Sop | src/libs/baidu.py | Python | mit | 2,667 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ReplicationController(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None):
"""
V1ReplicationController - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1ReplicationControllerSpec',
'status': 'V1ReplicationControllerStatus'
}
self.attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
self._api_version = api_version
self._kind = kind
self._metadata = metadata
self._spec = spec
self._status = status
@property
def api_version(self):
"""
Gets the api_version of this V1ReplicationController.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:return: The api_version of this V1ReplicationController.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1ReplicationController.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:param api_version: The api_version of this V1ReplicationController.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this V1ReplicationController.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:return: The kind of this V1ReplicationController.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1ReplicationController.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1ReplicationController.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1ReplicationController.
If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
:return: The metadata of this V1ReplicationController.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1ReplicationController.
If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
:param metadata: The metadata of this V1ReplicationController.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""
Gets the spec of this V1ReplicationController.
Spec defines the specification of the desired behavior of the replication controller. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
:return: The spec of this V1ReplicationController.
:rtype: V1ReplicationControllerSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""
Sets the spec of this V1ReplicationController.
Spec defines the specification of the desired behavior of the replication controller. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
:param spec: The spec of this V1ReplicationController.
:type: V1ReplicationControllerSpec
"""
self._spec = spec
@property
def status(self):
"""
Gets the status of this V1ReplicationController.
Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
:return: The status of this V1ReplicationController.
:rtype: V1ReplicationControllerStatus
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this V1ReplicationController.
Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
:param status: The status of this V1ReplicationController.
:type: V1ReplicationControllerStatus
"""
self._status = status
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| skuda/client-python | kubernetes/client/models/v1_replication_controller.py | Python | apache-2.0 | 7,930 |
from argparse import ArgumentParser
from gettext import gettext as _
from hashlib import sha1
from os import environ, mkdir
from shutil import rmtree
from subprocess import Popen
from sys import argv
from django.core.management import execute_from_command_line
from .apps.settings.config import create_default_config
from . import VERSION_STRING
def build_parser():
parser = ArgumentParser(prog="teamvault")
parser.add_argument(
"--version",
action='version',
version=VERSION_STRING,
)
subparsers = parser.add_subparsers(
title=_("subcommands"),
help=_("use 'teamvault <subcommand> --help' for more info"),
)
# teamvault plumbing
parser_plumbing = subparsers.add_parser("plumbing")
parser_plumbing.add_argument('plumbing_command', nargs='+')
parser_plumbing.set_defaults(func=plumbing)
# teamvault run
parser_run = subparsers.add_parser("run")
parser_run.add_argument('--bind', nargs='?', help='define bind, default is 127.0.0.1:8000')
parser_run.set_defaults(func=run)
# teamvault setup
parser_setup = subparsers.add_parser("setup")
parser_setup.set_defaults(func=setup)
# teamvault upgrade
parser_upgrade = subparsers.add_parser("upgrade")
parser_upgrade.set_defaults(func=upgrade)
return parser
def main(*args):
"""
Entry point for the 'teamvault' command line utility.
args: used for integration tests
"""
if not args:
args = argv[1:]
parser = build_parser()
pargs = parser.parse_args(args)
if not hasattr(pargs, 'func'):
parser.print_help()
exit(2)
pargs.func(pargs)
def plumbing(pargs):
environ['DJANGO_SETTINGS_MODULE'] = 'teamvault.settings'
environ.setdefault("TEAMVAULT_CONFIG_FILE", "/etc/teamvault.cfg")
execute_from_command_line([""] + pargs.plumbing_command[0].split(" "))
def run(pargs):
cmd = "gunicorn --preload teamvault.wsgi:application"
if pargs.bind:
cmd += ' -b ' + pargs.bind
gunicorn = Popen(
cmd,
shell=True,
)
gunicorn.communicate()
def setup(pargs):
environ.setdefault("TEAMVAULT_CONFIG_FILE", "/etc/teamvault.cfg")
create_default_config(environ['TEAMVAULT_CONFIG_FILE'])
def upgrade(pargs):
environ['DJANGO_SETTINGS_MODULE'] = 'teamvault.settings'
environ.setdefault("TEAMVAULT_CONFIG_FILE", "/etc/teamvault.cfg")
print("\n### Running migrations...\n")
execute_from_command_line(["", "migrate", "--noinput", "-v", "3", "--traceback"])
from django.conf import settings
from .apps.settings.models import Setting
if Setting.get("fernet_key_hash", default=None) is None:
print("\n### Storing fernet_key hash in database...\n")
key_hash = sha1(settings.TEAMVAULT_SECRET_KEY.encode('utf-8')).hexdigest()
Setting.set("fernet_key_hash", key_hash)
print("\n### Gathering static files...\n")
try:
rmtree(settings.STATIC_ROOT)
except FileNotFoundError:
pass
mkdir(settings.STATIC_ROOT)
execute_from_command_line(["", "collectstatic", "--noinput"])
print("\n### Updating search index...\n")
execute_from_command_line(["", "update_search_index"])
| trehn/teamvault | teamvault/cli.py | Python | gpl-3.0 | 3,230 |
# -*- coding: utf-8 -*-
#
# mididings
#
# Copyright (C) 2008-2014 Dominic Sacré <dominic.sacre@gmx.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
from tests.helpers import *
from mididings import *
class EngineTestCase(MididingsTestCase):
@data_offsets
def testSanitize(self, off):
def foo(ev):
ev.port = off(42)
def bar(ev):
self.fail()
p = Process(foo) >> Sanitize() >> Process(bar)
self.run_patch(p, self.make_event(port=off(42)))
p = Velocity(+666) >> Sanitize()
r = self.run_patch(p, self.make_event(NOTEON, velocity=42))
self.assertEqual(len(r), 1)
self.assertEqual(r[0].data2, 127)
@data_offsets
def testSceneSwitch(self, off):
config(silent=True)
p = {
off(0): Split({
PROGRAM: SceneSwitch(),
~PROGRAM: Channel(off(7)),
}),
off(1): Channel(off(13)),
}
events = (
self.make_event(NOTEON, off(0), off(0), 69, 123),
self.make_event(PROGRAM, off(0), off(0), 0, 1), # no data offset!
self.make_event(NOTEON, off(0), off(0), 23, 42),
self.make_event(NOTEOFF, off(0), off(0), 69, 0),
)
results = [
self.make_event(NOTEON, off(0), off(7), 69, 123),
self.make_event(NOTEON, off(0), off(13), 23, 42),
self.make_event(NOTEOFF, off(0), off(7), 69, 0),
]
self.check_scenes(p, {
events: results,
})
| dsacre/mididings | tests/units/test_engine.py | Python | gpl-2.0 | 1,779 |
"""demo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from soft_drf.routing import urls as api_urls
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/', include(api_urls, namespace="api")),
]
| angellagunas/drf-scaffolding | demo/demo/urls.py | Python | gpl-3.0 | 872 |
#!/usr/bin/python
import bluetooth
from subprocess import Popen, PIPE
import sys
class BT(object):
def __init__(self, receiveSize=1024):
self.btSocket = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
self._ReceiveSize = receiveSize
def __exit__(self):
self.Disconnect()
def Connect(self, mac, port= 3333):
self.btSocket.connect((mac, port))
def Disconnect(self):
try:
self.btSocket.close()
except Exception:
pass
def Discover(self):
btDevices = bluetooth.discover_devices(lookup_names = True)
if (len(btDevices) > 0):
return btDevices
else:
raise Exception('no BT device!')
def DumpDevices(self, btDeviceList):
for mac, name in btDeviceList:
print("BT device name: {0}, MAC: {1}".format(name, mac))
def BindListen(self, mac, port=3333, backlog=1):
self.btSocket.bind((mac, port))
self.btSocket.listen(backlog)
def Accept(self):
client, clientInfo = self.btSocket.accept()
return client, clientInfo
def Send(self, data):
self.btSocket.send(data)
def Receive(self):
return self.btSocket.recv(self._ReceiveSize)
def GetReceiveSize(self):
return self._ReceiveSize
def StartBTClient():
cli = BT()
print('BT Discovery...')
btDeviceList = cli.Discover()
cli.DumpDevices(btDeviceList)
mac = btDeviceList[0][0]
name = btDeviceList[0][1]
print('Connecting to first BT device found: {0}, MAC: {1}'.format(name, mac))
cli.Connect(mac)
print('Connected... Enter data or \'exit\' to terminate the connection.')
while True:
data = raw_input()
if (data == 'exit'):
break
try:
cli.Send(data)
except Exception as e:
print(e.__str__())
break
cli.Disconnect()
def GetFirstMAC():
proc = Popen(['hcitool', 'dev'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, error = proc.communicate()
if (proc.returncode == 0):
lines = output.split('\r')
for line in lines:
if ('hci0' in line):
temp = line.split('\t')
temp = temp[2].strip('\n')
return temp
raise Exception('MAC not found')
else:
raise Exception('Command: {0} returned with error: {1}'.format(cmd, error))
def StartBTServer():
srv = BT()
mac = GetFirstMAC()
srv.BindListen(mac)
print('Listening for connections on: {0}'.format(mac))
while True:
client, clientInfo = srv.Accept()
print('Connected to: {0}, port: {1}'.format(clientInfo[0], clientInfo[1]))
try:
while True:
data = client.recv(srv.GetReceiveSize())
if (data is not None):
print(data)
client.send(data)
except:
print("Closing client socket")
client.close()
srv.Disconnect()
if __name__ == '__main__':
cmd = sys.argv[1]
if (cmd == 'server'):
StartBTServer()
elif (cmd == 'client'):
StartBTClient()
else:
print("Bluetooth RFCOMM client/server demo")
print("Copyright 2014 Nwazet, LLC.")
print("Please specify 'client' or 'server'")
print("This demo assumes a single Bluetooth interface per machine.")
| fabienroyer/Bluetooth | bt.py | Python | lgpl-3.0 | 3,465 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import note
| addition-it-solutions/project-all | addons/note/__init__.py | Python | agpl-3.0 | 991 |
#!/usr/bin/python
import sys
import os
import subprocess
import threading
import urllib2
from xml.dom import minidom
import time
import logging
from logging.handlers import RotatingFileHandler
import signal
import cPickle
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, 'wb')
TVHPORT = ':9981/status.xml'
MKV_EXT = ".mkv"
TS_EXT = ".ts"
#VIDEOEXT = ".mkv|.ts"
VIDEOEXT2 = [MKV_EXT, TS_EXT] #VIDEOEXT.decode('utf-8').split('|')
EDL_EXT = ".edl"
LOG_EXT = ".log"
LOGO_EXT = ".logo.txt"
TXT_EXT = ".txt"
COMSKIPEXT2 = [EDL_EXT,LOG_EXT,LOGO_EXT,TXT_EXT]
DBFILE = "/hts_skipper.db"
DEBUGFILE = "./test2.xml"
DEBUG = False
#Status
IDLE = 0
QUEUED = 1
SKIPPING = 2
FINISHED = 3
RECORDING = 99
############################
class Recording:
Start = 0 # Real start unix time
Stop = 0 # Real stop unix time
Title = ''
Status = 0
############################
class DataBaseItem:
FileName = None
Recording = None
PID = 0
Skipper = None
Status = IDLE
############################
class DiskDataBaseItem:
FileName = None
Recording = None
PID = 0
Status = IDLE
############################
class logger(threading.Thread):
def __init__(self, Settings):
threading.Thread.__init__(self)
self.daemon = False
self.fdRead, self.fdWrite = os.pipe()
self.pipeReader = os.fdopen(self.fdRead)
self.logger = None
self.InitLogger(Settings)
self.daemon = True
self.start()
# wrap over original logging.logger attributes
def __getattr__(self,attr):
orig_attr = self.logger.__getattribute__(attr)
if callable(orig_attr):
def hooked(*args, **kwargs):
result = orig_attr(*args, **kwargs)
if result == self.logger:
return self
return result
return hooked
else:
return orig_attr
def InitLogger(self,Settings):
self.logger = logging.getLogger("hts_skipper")
if (DEBUG):
handler=logging.handlers.RotatingFileHandler("./hts_skipper.log", maxBytes=200000, backupCount=3)
else:
handler=logging.handlers.RotatingFileHandler(Settings.GetSetting("logpath"), maxBytes=200000, backupCount=3)
formatter = logging.Formatter('%(asctime)s %(levelname)s:%(message)s')
handler.setFormatter(formatter)
self.logger.addHandler(handler)
self.logger.setLevel(logging.DEBUG)
def fileno(self):
return self.fdWrite
def run(self):
for line in iter(self.pipeReader.readline, ''):
self.logger.info(line.strip('\n'))
self.pipeReader.close()
def close(self):
os.close(self.fdWrite)
############################
class Settings(object):
def __init__(self):
self.settings = []
self.defaults = [[u'server', u'http://localhost'], [u'userpass', u'xbmc:xbmc'], [u'maxattempts', u'10'], [u'sleepbetweenattempts', u'5'], [u'recordingpath', u'/kodi/Recorded TV'], [u'logpath', u'/var/log/comskip'], [u'comskiplocation', u'/usr/local/bin/comskip'],[u'inilocation', u''], [u'simultaneousskippers', u'1'], [u'updateinterval', u'20'], [u'logskipperdata', u'True'], [u'logskippersettings', u'False'], [u'delete_edl', u'True'], [u'delete_log', u'True'], [u'delete_logo', u'True'], [u'delete_txt', u'False'], [u'storedb', u'True'], [u'dblocation', u'/etc/comskip']]
self.GetSettingsHtsSkipperXml()
def GetSettingsHtsSkipperXml(self):
path = "./hts_skipper.xml"
# find file and get settings
if not os.path.isfile(path):
path = "~/hts_skipper.xml"
if not os.path.isfile(path):
path = "/etc/comskip/hts_skipper.xml"
if not os.path.isfile(path):
print "Settingsfile does not exist: %s" % path
try:
__xml = minidom.parse(path)
nodes = __xml.getElementsByTagName("settings")
if nodes:
for node in nodes:
asettings=node.getElementsByTagName('setting')
for a in asettings:
self.settings.append([a.getAttribute("id"),a.getAttribute("value")])
del asettings
del nodes
__xml.unlink()
except Exception, e:
print "Error reading from settingsfile: %s" % path
return
def GetSetting(self, search):
for a in self.settings:
if (a[0].lower() == search.lower()):
return a[1]
for a in self.defaults: # if not found, search in defaults
if (a[0].lower() == search.lower()):
return a[1]
return None
def GetUserPassword(self, userpass):
return userpass.split(':', 1);
###########################
class ComSkipper(object):
def __init__(self, Settings, logger):
self.__settings = Settings
self.__logger = logger
self.p = None
def Start(self, filename, endtime):
if (self.__settings.GetSetting("logskippersettings").lower()=="true"):
_stdout=logger
else:
_stdout=DEVNULL
if (self.__settings.GetSetting("logskipperdata").lower()=="true"):
_stderr=logger
else:
_stderr=DEVNULL
if self.__settings.GetSetting("inilocation") == '':
process = [self.__settings.GetSetting("comskiplocation"),filename]
else:
process = [self.__settings.GetSetting("comskiplocation"),"--ini=%s"%(self.__settings.GetSetting("inilocation")),filename]
self.p = subprocess.Popen(process, stdout=_stdout, stderr=_stderr)
def Busy(self):
return (self.p.poll() <= -15)
def GetPID(self):
return (self.p.pid)
def Kill(self):
if (self.Busy()):
self.p.kill()
###########################
class HTS(object):
def __init__(self, Settings, logger, Test):
self.__settings = Settings
self.__logger = logger
self.__conn_established = None
#self.__xml = None
self.__maxattempts = 10
if not Test:
self.establishConn()
def establishConn(self):
if (DEBUG):
self.__conn_established = True
self.__logger.info('Connection to %s established' % 'DEBUG')
else:
self.__conn_established = False
self.__maxattempts = int(Settings.GetSetting('maxattempts'))
while self.__maxattempts > 0:
try:
pwd_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
upass=self.__settings.GetUserPassword(self.__settings.GetSetting('userpass'))
pwd_mgr.add_password(None, self.__settings.GetSetting('server') + TVHPORT, upass[0], upass[1])
handle = urllib2.HTTPDigestAuthHandler(pwd_mgr)
opener = urllib2.build_opener(handle)
opener.open(self.__settings.GetSetting('server') + TVHPORT)
urllib2.install_opener(opener)
self.__conn_established = True
self.__logger.info('Connection to %s established' % (self.__settings.GetSetting('server')))
break
except Exception, e:
print('%s' % (e))
self.__maxattempts -= 1
self.__logger.warning('Remaining connection attempts to %s: %s' % (self.__settings.GetSetting('server'), self.__maxattempts))
time.sleep(5)
continue
if not self.__conn_established:
self.__logger.error("Error establishing connection")
time.sleep(6)
def readXMLbyTag(self, xmlnode):
nodedata = []
while self.__conn_established:
try:
if (DEBUG):
__f = open(DEBUGFILE,"r") #, timeout=mytimeout
else:
__f = urllib2.urlopen(self.__settings.GetSetting('server') + TVHPORT) #, timeout=mytimeout
__xmlfile = __f.read()
__xml = minidom.parseString(__xmlfile)
__f.close()
nodes = __xml.getElementsByTagName(xmlnode)
if nodes:
for node in nodes:
nodedata.append(node.childNodes[0].data)
del nodes
break
except Exception, e:
self.__logger.error("Could not read from %s" % (self.__settings.GetSetting('server')))
time.sleep(5)
self.establishConn()
return nodedata
def getTestRecording(self,FileName):
Recordings = []
if os.path.isfile(FileName):
Rec = Recording()
Rec.Start=int(time.time()) # just now
Rec.Stop=Rec.Start+2*3600 # make it 2 hours
Rec.Title=os.path.splitext(os.path.basename(FileName))[0]
Rec.Status=RECORDING
Recordings.append(Rec)
return Recordings
def readXMLRecordings(self):
Recordings = []
while self.__conn_established:
try:
if (DEBUG):
__f = open(DEBUGFILE,"r") #, timeout=mytimeout
else:
__f = urllib2.urlopen(self.__settings.GetSetting('server') + TVHPORT) #, timeout=mytimeout
__xmlfile = __f.read()
__xml = minidom.parseString(__xmlfile)
__f.close()
nodes = __xml.getElementsByTagName('recording')
if nodes:
for node in nodes:
Rec = Recording()
start=node.getElementsByTagName('start')[0]
unixtime=start.getElementsByTagName('unixtime')[0]
extra=start.getElementsByTagName('extra_start')[0]
Rec.Start=int(unixtime.firstChild.data)-(int(extra.firstChild.data)*60)
stop=node.getElementsByTagName('stop')[0]
unixtime=stop.getElementsByTagName('unixtime')[0]
extra=stop.getElementsByTagName('extra_stop')[0]
Rec.Stop=int(unixtime.firstChild.data)+(int(extra.firstChild.data)*60)
Rec.Title=node.getElementsByTagName('title')[0].firstChild.data
Rec.Status=RECORDING if (node.getElementsByTagName('status')[0].firstChild.data=="Recording") else IDLE
Recordings.append(Rec)
del nodes
__xml.unlink()
break
except Exception, e:
self.__logger.error("Could not read from %s" % (self.__settings.GetSetting('server')))
time.sleep(5)
self.establishConn()
return Recordings
###########################
class Database(object):
def __init__(self, Settings, logger):
self.__settings = Settings
self.__logger = logger
self.DataBase = []
self.InitDBFromDisk()
def LoadDBFromDisk(self):
if (self.__settings.GetSetting('storedb').lower()=="true"):
if (DEBUG):
path = "."+DBFILE
else:
path = self.__settings.GetSetting('dblocation')+DBFILE
try:
if os.path.isfile(path):
if (os.path.getsize(path) > 0):
with open(path, "rb") as input:
self.DiskDB2DB(cPickle.load(input))
except Exception, e:
self.__logger.error("Error reading from dbfile: %s" % (path))
def SaveDBToDisk(self):
if (self.__settings.GetSetting('storedb').lower()=="true"):
if (DEBUG):
path1 = "."
else:
path1 = self.__settings.GetSetting('dblocation')
path = path1 + DBFILE
try:
if os.path.isdir(path1):
DiskDB = self.DB2DiskDB()
with open(path, "wb") as output:
cPickle.dump(DiskDB, output, cPickle.HIGHEST_PROTOCOL)
del DiskDB
except Exception, e:
self.__logger.error("Error writing to dbfile: %s" % (path))
def DB2DiskDB(self):
DiskDB = []
for dbitem in self.DataBase:
ddbitem=DiskDataBaseItem()
ddbitem.FileName = dbitem.FileName
ddbitem.Recording = dbitem.Recording
ddbitem.PID = dbitem.PID
ddbitem.Status = dbitem.Status
DiskDB.append(ddbitem)
return DiskDB
def DiskDB2DB(self, DiskDB):
del self.DataBase
self.DataBase = []
for ddbitem in DiskDB:
dbitem=DataBaseItem()
dbitem.FileName = ddbitem.FileName
dbitem.Recording = ddbitem.Recording
dbitem.PID = ddbitem.PID
dbitem.Status = ddbitem.Status
self.DataBase.append(dbitem)
def InitDBFromDisk(self):
self.LoadDBFromDisk()
#self.PrintDB()
# check for finished items and remove them
changeditems=0
CheckDB = True
while (CheckDB):
CheckDB = False
for dbitem in self.DataBase:
if (dbitem.Status == QUEUED):
#Check beyond endtime and no filename
if (dbitem.FileName == None):
dbitem.FileName = self.GetRecordingName(dbitem.Recording.Title)
elif not os.path.isfile(dbitem.FileName):
#File deleted
dbitem.FileName = None
if (dbitem.FileName == None):
curtime=int(time.time())
if (curtime > dbitem.Recording.Stop):
#Recording finished and still no filename, delete from database
changeditems+=1
self.__logger.info("DB: Init - %s queued, but no file found beyond finish time" % dbitem.Recording.Title)
self.__logger.info("DB: Init - %s recording probably failed, so removing it from db" % dbitem.Recording.Title)
if dbitem.Skipper != None:
del dbitem.Skipper
if dbitem.Recording != None:
del dbitem.Recording
dbitem.Skipper = None
dbitem.Recording = None
self.DataBase.remove(dbitem)
CheckDB = True
else:
self.__logger.info("DB: Init - %s queued, but no file to skip found (yet)" % dbitem.Recording.Title)
else:
self.__logger.info("DB: Init - %s queued, re-queue to restart" % dbitem.Recording.Title)
if (dbitem.Status == FINISHED):
changeditems+=1
self.__logger.info("DB: Init - %s finished, so removing it from db" % dbitem.Recording.Title)
if dbitem.Skipper != None:
del dbitem.Skipper
if dbitem.Recording != None:
del dbitem.Recording
dbitem.Skipper = None
dbitem.Recording = None
self.DataBase.remove(dbitem)
CheckDB = True
break;
if (dbitem.Status == SKIPPING):
changeditems+=1
self.__logger.info("DB: Init - %s was skipping during shutdown, queue to restart" % dbitem.Recording.Title)
dbitem.Status = QUEUED;
if (changeditems>0):
self.SaveDBToDisk()
#self.PrintDB()
def CleanupDeletedRecordings(self):
files = []
files = self.GetFiles(self.__settings.GetSetting('recordingpath'), "", files, self.IsComSkipFile)
prevnumber = 0
for csfile in files:
name, ext = os.path.splitext(csfile)
fexists = False
for ext in VIDEOEXT2:
destination = name + ext
if os.path.isfile(destination):
fexists = True
if not fexists:
self.__logger.info("DB: Cleanup - no video file found for %s, so removing this file" % csfile)
os.remove(csfile)
def Update(self, Recordings):
# Check database for new entry (s)
# Check database for finished entry (s) --> I Think we do not need this one, only add new items, don't do anything if finished
# Check comskipper finished and delete required files
# Check start of new comskipper
newitems = self.CheckForNewItems(Recordings)
finishedskippers = self.CheckComskipperFinished()
startskippers = self.CheckStartComskipper()
if (newitems + finishedskippers + startskippers > 0): #Database has changed, so save it
self.SaveDBToDisk()
for rec in Recordings:
del rec
del Recordings
return []
def CheckForNewItems(self, Recordings):
newitems = 0
for rec in Recordings:
newitem = True
for dbitem in self.DataBase:
if self.CompareRecording(dbitem.Recording, rec):
newitem = False
if newitem:
self.__logger.info("DB: Recording - %s started, added to db" % rec.Title)
self.AddItem(rec, QUEUED)
newitems += 1
return newitems
def CheckForFinishedItems(self, Recordings):
finisheditems = 0
for dbitem in self.DataBase:
finisheditem = True
for rec in Recordings:
if self.CompareRecording(dbitem.Recording, rec):
finisheditem = False
if finisheditem:
self.__logger.info("DB: Recording - %s finished" % rec.Title)
finisheditems += 1
return finisheditems
def CheckComskipperFinished(self):
readyitems = 0
for dbitem in self.DataBase:
if (dbitem.Status == SKIPPING):
if (dbitem.Skipper == None):
self.__logger.error("DB: Lost Skipper information - %s, set to finished" % dbitem.Recording.Title)
dbitem.Status = FINISHED
readyitems += 1
elif not dbitem.Skipper.Busy():
self.__logger.info("DB: Skipping - %s finished" % dbitem.Recording.Title)
dbitem.Status = FINISHED
dbitem.PID=0
self.DeleteUnnecessaryFiles(dbitem)
if dbitem.Skipper != None:
del dbitem.Skipper
dbitem.Skipper = None
readyitems += 1
return readyitems
def DeleteUnnecessaryFiles(self, dbitem):
if (dbitem.FileName != None):
name, ext = os.path.splitext(dbitem.FileName)
try:
if (self.__settings.GetSetting('delete_edl').lower()=="true"):
destination = name + EDL_EXT
if os.path.isfile(destination):
os.remove(destination)
if (self.__settings.GetSetting('delete_log').lower()=="true"):
destination = name + LOG_EXT
if os.path.isfile(destination):
os.remove(destination)
if (self.__settings.GetSetting('delete_logo').lower()=="true"):
destination = name + LOGO_EXT
if os.path.isfile(destination):
os.remove(destination)
if (self.__settings.GetSetting('delete_txt').lower()=="true"):
destination = name + TXT_EXT
if os.path.isfile(destination):
os.remove(destination)
except IOError, e:
self.__logger.error("DB: IOError Removing file - %s" % destination)
return
def IsVideoFile(self, path, title):
head, tail = os.path.split(path)
title2=title.replace(' ','-')
if (title.lower() in tail.lower()) or (title2.lower() in tail.lower()):
name, ext = os.path.splitext(tail)
return ext.lower() in VIDEOEXT2
return False
def IsComSkipFile(self, path, title):
head, tail = os.path.split(path)
name, ext = os.path.splitext(tail)
return ext.lower() in COMSKIPEXT2
def GetFiles(self, folder, title, files, TestFunction):
if os.path.isdir(folder):
for item in os.listdir(folder):
itempath = os.path.join(folder, item)
if os.path.isfile(itempath):
if TestFunction(itempath, title):
files.append(itempath)
elif os.path.isdir(itempath):
files = self.GetFiles(itempath, title, files, TestFunction)
return files
def GetRecordingName(self, Title):
recordingname = None
files = []
files = self.GetFiles(self.__settings.GetSetting('recordingpath'), Title, files, self.IsVideoFile)
prevnumber = 0
for vfile in files:
name, ext = os.path.splitext(vfile)
k = name.rfind("-")
try:
number = int(name[k+1:])
except:
number = 0
if (number >= prevnumber):
recordingname = vfile
prevnumber = number
del files
return recordingname
def CheckStartComskipper(self):
startitems = 0
nskippers = 0
maxskippers = int(self.__settings.GetSetting('simultaneousskippers'))
for dbitem in self.DataBase:
if (dbitem.Status == SKIPPING):
nskippers += 1
elif (dbitem.Status == QUEUED):
if dbitem.FileName == None:
dbitem.FileName = self.GetRecordingName(dbitem.Recording.Title)
if (nskippers < maxskippers):
for dbitem in self.DataBase:
if (dbitem.Status == QUEUED):
dbitem.Skipper = ComSkipper(self.__settings, self.__logger)
if dbitem.FileName != None:
dbitem.Skipper.Start(dbitem.FileName, dbitem.Recording.Stop)
self.__logger.info("DB: Skipping - %s started" % dbitem.Recording.Title)
dbitem.Status = SKIPPING
dbitem.PID = dbitem.Skipper.GetPID()
nskippers += 1
startitems += 1
else:
curtime=int(time.time())
if (curtime > dbitem.Recording.Stop):
#Recording finished and still no filename, delete from database
startitems += 1
self.__logger.info("DB: Recording - %s started, but no file found beyond finish time" % dbitem.Recording.Title)
self.__logger.info("DB: Recording - %s recording probably failed, so removing it from db" % dbitem.Recording.Title)
if dbitem.Skipper != None:
del dbitem.Skipper
if dbitem.Recording != None:
del dbitem.Recording
dbitem.Skipper = None
dbitem.Recording = None
self.DataBase.remove(dbitem)
else:
self.__logger.info("DB: Recording - %s started, but no file to skip found (yet)" % dbitem.Recording.Title)
if (nskippers >= maxskippers):
break
return startitems
def CompareRecording(self, rec1, rec2):
return True if (rec1.Start == rec2.Start) and (rec1.Stop == rec2.Stop) and (rec1.Title == rec2.Title) else False
def AddItem(self, Recording, Status = IDLE):
Item = DataBaseItem()
Item.Recording = Recording
Item.Status = Status
self.DataBase.append(Item)
return len(self.DataBase)-1
def PrintDB(self):
item = 0
for dbitem in self.DataBase:
self.__logger.info("DB: Item %d" % item)
self.__logger.info("DB: Status %d" % dbitem.Status)
self.__logger.info("DB: PID %d" % dbitem.PID)
self.__logger.info("DB: Skipper %s" % dbitem.Skipper)
self.__logger.info("DB: Recording: Start %d" % dbitem.Recording.Start)
self.__logger.info("DB: Recording: Stop %d" % dbitem.Recording.Stop)
self.__logger.info("DB: Recording: Title %s" % dbitem.Recording.Title)
self.__logger.info("DB: Recording: Status %d" % dbitem.Recording.Status)
item += 1
#########################################################
# Main #
#########################################################
###
Running = True
Test = False
def sigterm_handler(signum, frame):
global Running
Running = False
### MAIN ###
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGINT, sigterm_handler)
#Read settings file, no logger location yet, refer to defaults if fails
Settings = Settings()
#init logger
logger=logger(Settings)
logger.info("Comskip HTS Started ...")
if len(sys.argv) > 1:
FileName = sys.argv[1]
logger.info("Test mode, no daemon on File:%s"%FileName)
Test = True
else:
FileName = None
#init HTS
TVH = HTS(Settings, logger, Test)
#init Database
DB = Database(Settings, logger)
#cleanup deleted recordings
DB.CleanupDeletedRecordings()
#Get recording in test mode
Recordings = []
if (FileName != None):
Recordings = TVH.getTestRecording(FileName)
looptime = 1
while (Running):
time.sleep(1)
if looptime <= 1:
# Check recordings
if (FileName == None):
Recordings = TVH.readXMLRecordings()
if (DEBUG):
for rec in Recordings:
logger.info("start:%d, stop:%d, title:%s, status:%d" % (rec.Start, rec.Stop, rec.Title, rec.Status))
Recordings = DB.Update(Recordings)
looptime = int(Settings.GetSetting('updateinterval'))
else:
looptime -= 1
del DB
del TVH
del Settings
logger.info("Comskip HTS Ready ...");
logger.close()
del logger
| Helly1206/comskipper | script/hts_skipper.py | Python | gpl-2.0 | 26,814 |
# coding: utf-8
from __future__ import absolute_import
from swagger_server.models.part_offer_data import PartOfferData
from .base_model_ import Model
from datetime import date, datetime
from typing import List, Dict
from ..util import deserialize_model
class PartOffer(Model):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, packaging_unit=None, quantity=None, min_order_quantity=None, unit_price=None, available_stock=None, packaging=None, currency=None, sku=None, updated=None):
"""
PartOffer - a model defined in Swagger
:param packaging_unit: The packaging_unit of this PartOffer.
:type packaging_unit: int
:param quantity: The quantity of this PartOffer.
:type quantity: int
:param min_order_quantity: The min_order_quantity of this PartOffer.
:type min_order_quantity: int
:param unit_price: The unit_price of this PartOffer.
:type unit_price: float
:param available_stock: The available_stock of this PartOffer.
:type available_stock: int
:param packaging: The packaging of this PartOffer.
:type packaging: str
:param currency: The currency of this PartOffer.
:type currency: str
:param sku: The sku of this PartOffer.
:type sku: str
:param updated: The updated of this PartOffer.
:type updated: str
"""
self.swagger_types = {
'packaging_unit': int,
'quantity': int,
'min_order_quantity': int,
'unit_price': float,
'available_stock': int,
'packaging': str,
'currency': str,
'sku': str,
'updated': str
}
self.attribute_map = {
'packaging_unit': 'packaging_unit',
'quantity': 'quantity',
'min_order_quantity': 'min_order_quantity',
'unit_price': 'unit_price',
'available_stock': 'available_stock',
'packaging': 'packaging',
'currency': 'currency',
'sku': 'sku',
'updated': 'updated'
}
self._packaging_unit = packaging_unit
self._quantity = quantity
self._min_order_quantity = min_order_quantity
self._unit_price = unit_price
self._available_stock = available_stock
self._packaging = packaging
self._currency = currency
self._sku = sku
self._updated = updated
@classmethod
def from_dict(cls, dikt):
"""
Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The PartOffer of this PartOffer.
:rtype: PartOffer
"""
return deserialize_model(dikt, cls)
@property
def packaging_unit(self):
"""
Gets the packaging_unit of this PartOffer.
:return: The packaging_unit of this PartOffer.
:rtype: int
"""
return self._packaging_unit
@packaging_unit.setter
def packaging_unit(self, packaging_unit):
"""
Sets the packaging_unit of this PartOffer.
:param packaging_unit: The packaging_unit of this PartOffer.
:type packaging_unit: int
"""
self._packaging_unit = packaging_unit
@property
def quantity(self):
"""
Gets the quantity of this PartOffer.
:return: The quantity of this PartOffer.
:rtype: int
"""
return self._quantity
@quantity.setter
def quantity(self, quantity):
"""
Sets the quantity of this PartOffer.
:param quantity: The quantity of this PartOffer.
:type quantity: int
"""
self._quantity = quantity
@property
def min_order_quantity(self):
"""
Gets the min_order_quantity of this PartOffer.
:return: The min_order_quantity of this PartOffer.
:rtype: int
"""
return self._min_order_quantity
@min_order_quantity.setter
def min_order_quantity(self, min_order_quantity):
"""
Sets the min_order_quantity of this PartOffer.
:param min_order_quantity: The min_order_quantity of this PartOffer.
:type min_order_quantity: int
"""
self._min_order_quantity = min_order_quantity
@property
def unit_price(self):
"""
Gets the unit_price of this PartOffer.
:return: The unit_price of this PartOffer.
:rtype: float
"""
return self._unit_price
@unit_price.setter
def unit_price(self, unit_price):
"""
Sets the unit_price of this PartOffer.
:param unit_price: The unit_price of this PartOffer.
:type unit_price: float
"""
self._unit_price = unit_price
@property
def available_stock(self):
"""
Gets the available_stock of this PartOffer.
:return: The available_stock of this PartOffer.
:rtype: int
"""
return self._available_stock
@available_stock.setter
def available_stock(self, available_stock):
"""
Sets the available_stock of this PartOffer.
:param available_stock: The available_stock of this PartOffer.
:type available_stock: int
"""
self._available_stock = available_stock
@property
def packaging(self):
"""
Gets the packaging of this PartOffer.
:return: The packaging of this PartOffer.
:rtype: str
"""
return self._packaging
@packaging.setter
def packaging(self, packaging):
"""
Sets the packaging of this PartOffer.
:param packaging: The packaging of this PartOffer.
:type packaging: str
"""
self._packaging = packaging
@property
def currency(self):
"""
Gets the currency of this PartOffer.
:return: The currency of this PartOffer.
:rtype: str
"""
return self._currency
@currency.setter
def currency(self, currency):
"""
Sets the currency of this PartOffer.
:param currency: The currency of this PartOffer.
:type currency: str
"""
self._currency = currency
@property
def sku(self):
"""
Gets the sku of this PartOffer.
:return: The sku of this PartOffer.
:rtype: str
"""
return self._sku
@sku.setter
def sku(self, sku):
"""
Sets the sku of this PartOffer.
:param sku: The sku of this PartOffer.
:type sku: str
"""
self._sku = sku
@property
def updated(self):
"""
Gets the updated of this PartOffer.
:return: The updated of this PartOffer.
:rtype: str
"""
return self._updated
@updated.setter
def updated(self, updated):
"""
Sets the updated of this PartOffer.
:param updated: The updated of this PartOffer.
:type updated: str
"""
self._updated = updated
| turdusmerula/kipartman | kipartbase/swagger_server/models/part_offer.py | Python | gpl-3.0 | 7,192 |
#!/usr/bin/env python3
import os, sys, logging, urllib, time, string, json, argparse, collections, datetime, re, bz2, math
from concurrent.futures import ThreadPoolExecutor, wait
import lz4
pool = ThreadPoolExecutor(max_workers=16)
logging.basicConfig(level=logging.DEBUG)
sys.path.append(os.path.join(os.path.dirname(__file__), "lib", "python"))
from carta import (logger, POI)
from mongoengine import *
connect('carta')
zoomspacing = [round(0.0001*(1.6**n), 4) for n in range(21, 1, -1)]
def compute_occlusions(box):
SW, NE = box
points = list(POI.objects(at__geo_within_box=(SW, NE)))
print("Starting", SW, NE, len(points))
for i, p1 in enumerate(points):
for j, p2 in enumerate(points[i+1:]):
coords1, coords2 = p1.at['coordinates'], p2.at['coordinates']
dist = math.sqrt(abs(coords1[0]-coords2[0])**2 + abs(coords1[1]-coords2[1])**2)
occluded_point = p1 if p1.rank < p2.rank else p2
for zoom, spacing in enumerate(zoomspacing):
if dist < spacing:
continue
break
occluded_point.min_zoom = max(occluded_point.min_zoom, zoom)
p1.save()
print("Finished", SW, NE, len(points))
step = 2
boxes = []
for lat in range(-90, 90, step):
for lng in range(-180, 180, step):
boxes.append([(lng, lat), (lng+step, lat+step)])
for result in pool.map(compute_occlusions, boxes):
pass
# docs_by_rank = sorted(POI.objects(at__geo_within_center=(doc.at['coordinates'], spacing)),
# key=lambda point: point.rank or 0,
# reverse=True)
# for doc in POI.objects(at__geo_within_center=(doc.at['coordinates'], 1), min_zoom__gt=0).order_by('-rank'):
# for doc2 in POI.objects(at__geo_within_center=(doc.at['coordinates'], zoomspacing[doc.min_zoom]), min_zoom__lte=doc.min_zoom).order_by('-rank'):
| kislyuk/cartographer | postproc_db.py | Python | agpl-3.0 | 1,920 |
#!/usr/bin/env python
# Copyright 2012-2014 Keith Fancher
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import tempfile
import unittest
from list import TodoTxtList
class TestTodoTxtList(unittest.TestCase):
def test_init_from_text(self):
todo_text = "(A) Item one\n(Z) Item two\nx Item three\n\n \n"
test_list = TodoTxtList(None, todo_text)
self.assertEqual(3, test_list.num_items())
self.assertEqual('Item one', test_list.items[0].text)
self.assertEqual('A', test_list.items[0].priority)
self.assertFalse(test_list.items[0].is_completed)
self.assertEqual('Item two', test_list.items[1].text)
self.assertEqual('Z', test_list.items[1].priority)
self.assertFalse(test_list.items[1].is_completed)
self.assertEqual('Item three', test_list.items[2].text)
self.assertEqual(None, test_list.items[2].priority)
self.assertTrue(test_list.items[2].is_completed)
def test_init_from_file(self):
file_name = 'sample-todo.txt'
test_list = TodoTxtList(file_name)
self.assertEqual(8, test_list.num_items())
self.assertEqual('Do that really important thing', test_list.items[0].text)
self.assertEqual('A', test_list.items[0].priority)
self.assertFalse(test_list.items[0].is_completed)
self.assertEqual('Summon AppIndicator documentation from my ass', test_list.items[1].text)
self.assertEqual('D', test_list.items[1].priority)
self.assertFalse(test_list.items[1].is_completed)
self.assertEqual('This other important thing', test_list.items[2].text)
self.assertEqual('A', test_list.items[2].priority)
self.assertFalse(test_list.items[2].is_completed)
self.assertEqual('Walk the cat', test_list.items[3].text)
self.assertEqual('B', test_list.items[3].priority)
self.assertFalse(test_list.items[3].is_completed)
self.assertEqual('Something with no priority!', test_list.items[4].text)
self.assertEqual(None, test_list.items[4].priority)
self.assertFalse(test_list.items[4].is_completed)
self.assertEqual('Cook the dog', test_list.items[5].text)
self.assertEqual('C', test_list.items[5].priority)
self.assertFalse(test_list.items[5].is_completed)
self.assertEqual('Be annoyed at GTK3 docs', test_list.items[6].text)
self.assertEqual(None, test_list.items[6].priority)
self.assertTrue(test_list.items[6].is_completed)
self.assertEqual('Something I already did', test_list.items[7].text)
self.assertEqual(None, test_list.items[7].priority)
self.assertTrue(test_list.items[7].is_completed)
def test_reload_from_file(self):
test_list = TodoTxtList() # Start with an empty list
test_list.reload_from_file() # Should do nothing
test_list.todo_filename = 'sample-todo.txt'
test_list.reload_from_file()
self.assertEqual(8, test_list.num_items())
self.assertEqual('Do that really important thing', test_list.items[0].text)
self.assertEqual('A', test_list.items[0].priority)
self.assertFalse(test_list.items[0].is_completed)
self.assertEqual('Summon AppIndicator documentation from my ass', test_list.items[1].text)
self.assertEqual('D', test_list.items[1].priority)
self.assertFalse(test_list.items[1].is_completed)
self.assertEqual('This other important thing', test_list.items[2].text)
self.assertEqual('A', test_list.items[2].priority)
self.assertFalse(test_list.items[2].is_completed)
self.assertEqual('Walk the cat', test_list.items[3].text)
self.assertEqual('B', test_list.items[3].priority)
self.assertFalse(test_list.items[3].is_completed)
self.assertEqual('Something with no priority!', test_list.items[4].text)
self.assertEqual(None, test_list.items[4].priority)
self.assertFalse(test_list.items[4].is_completed)
self.assertEqual('Cook the dog', test_list.items[5].text)
self.assertEqual('C', test_list.items[5].priority)
self.assertFalse(test_list.items[5].is_completed)
self.assertEqual('Be annoyed at GTK3 docs', test_list.items[6].text)
self.assertEqual(None, test_list.items[6].priority)
self.assertTrue(test_list.items[6].is_completed)
self.assertEqual('Something I already did', test_list.items[7].text)
self.assertEqual(None, test_list.items[7].priority)
self.assertTrue(test_list.items[7].is_completed)
def test_has_items(self):
test_list = TodoTxtList()
self.assertFalse(test_list.has_items())
test_list = TodoTxtList(None, 'An item')
self.assertTrue(test_list.has_items())
def test_remove_item(self):
todo_text = "(A) Item one\n(Z) Item two\nx Item three\n\n \n"
test_list = TodoTxtList(None, todo_text)
self.assertEqual(3, test_list.num_items())
test_list.remove_item('Item two')
self.assertEqual(2, test_list.num_items())
self.assertEqual('Item one', test_list.items[0].text)
self.assertEqual('A', test_list.items[0].priority)
self.assertFalse(test_list.items[0].is_completed)
self.assertEqual('Item three', test_list.items[1].text)
self.assertEqual(None, test_list.items[1].priority)
self.assertTrue(test_list.items[1].is_completed)
def test_remove_completed_items(self):
todo_text = "(A) Item one\n(Z) Item two\nx Item three\n\n \n"
test_list = TodoTxtList(None, todo_text)
self.assertEqual(3, test_list.num_items())
test_list.remove_completed_items()
self.assertEqual(2, test_list.num_items())
self.assertEqual('Item one', test_list.items[0].text)
self.assertEqual('A', test_list.items[0].priority)
self.assertFalse(test_list.items[0].is_completed)
self.assertEqual('Item two', test_list.items[1].text)
self.assertEqual('Z', test_list.items[1].priority)
self.assertFalse(test_list.items[1].is_completed)
def test_mark_item_completed(self):
todo_text = "(A) Item one\n(Z) Item two\nx Item three\n\n \n"
test_list = TodoTxtList(None, todo_text)
test_list.mark_item_completed('Item two')
self.assertEqual('Item one', test_list.items[0].text)
self.assertEqual('A', test_list.items[0].priority)
self.assertFalse(test_list.items[0].is_completed)
self.assertEqual('Item two', test_list.items[1].text)
self.assertEqual('Z', test_list.items[1].priority)
self.assertTrue(test_list.items[1].is_completed)
self.assertEqual('Item three', test_list.items[2].text)
self.assertEqual(None, test_list.items[2].priority)
self.assertTrue(test_list.items[2].is_completed)
def test_mark_item_completed_with_full_text(self):
todo_text = "(A) Item one\n(Z) Item two\nx Item three\n\n \n"
test_list = TodoTxtList(None, todo_text)
test_list.mark_item_completed_with_full_text('(Z) Item two')
self.assertEqual('Item one', test_list.items[0].text)
self.assertEqual('A', test_list.items[0].priority)
self.assertFalse(test_list.items[0].is_completed)
self.assertEqual('Item two', test_list.items[1].text)
self.assertEqual('Z', test_list.items[1].priority)
self.assertTrue(test_list.items[1].is_completed)
self.assertEqual('Item three', test_list.items[2].text)
self.assertEqual(None, test_list.items[2].priority)
self.assertTrue(test_list.items[2].is_completed)
def test_sort_list(self):
todo_text = "x (C) No biggie\n(Z) aaaaa\nNothing\n(B) hey hey\n(Z) bbbbb\n(A) aaaaa\nx Item three\n\nx (B) Done it\n"
test_list = TodoTxtList(None, todo_text)
test_list.sort_list()
self.assertEqual(8, test_list.num_items())
self.assertEqual('aaaaa', test_list.items[0].text)
self.assertEqual('A', test_list.items[0].priority)
self.assertFalse(test_list.items[0].is_completed)
self.assertEqual('hey hey', test_list.items[1].text)
self.assertEqual('B', test_list.items[1].priority)
self.assertFalse(test_list.items[1].is_completed)
self.assertEqual('aaaaa', test_list.items[2].text)
self.assertEqual('Z', test_list.items[2].priority)
self.assertFalse(test_list.items[2].is_completed)
self.assertEqual('bbbbb', test_list.items[3].text)
self.assertEqual('Z', test_list.items[3].priority)
self.assertFalse(test_list.items[3].is_completed)
self.assertEqual('Nothing', test_list.items[4].text)
self.assertEqual(None, test_list.items[4].priority)
self.assertFalse(test_list.items[4].is_completed)
self.assertEqual('Done it', test_list.items[5].text)
self.assertEqual('B', test_list.items[5].priority)
self.assertTrue(test_list.items[5].is_completed)
self.assertEqual('No biggie', test_list.items[6].text)
self.assertEqual('C', test_list.items[6].priority)
self.assertTrue(test_list.items[6].is_completed)
self.assertEqual('Item three', test_list.items[7].text)
self.assertEqual(None, test_list.items[7].priority)
self.assertTrue(test_list.items[7].is_completed)
def test_to_text(self):
test_list = TodoTxtList()
# Empty list yields empty string:
self.assertEqual('', str(test_list))
todo_text = "(A) Do one thing\n (B) Do another thing\n x One last thing"
expected_output = "(A) Do one thing\n(B) Do another thing\nx One last thing"
test_list.init_from_text(todo_text)
self.assertEqual(expected_output, str(test_list))
def test_write_to_file(self):
todo_text = "(A) Do one thing\n (B) Do another thing\n x One last thing"
expected_output = "(A) Do one thing\n(B) Do another thing\nx One last thing"
test_list = TodoTxtList(None, todo_text)
# Write to a temporary output file:
output_file = tempfile.NamedTemporaryFile(mode='w+')
test_list.todo_filename = output_file.name
test_list.write_to_file()
# Now read the file in and see that it all matches up:
self.assertEqual(expected_output, output_file.read())
if __name__ == '__main__':
unittest.main()
| keithfancher/Todo-Indicator | todotxt/test_list.py | Python | gpl-3.0 | 11,017 |
from paddle.trainer_config_helpers import *
settings(
batch_size=1000,
learning_rate=1e-5
)
data = data_layer(name='data', size=2304)
conv = img_conv_layer(input=data,
filter_size = 3,
num_channels=1,
num_filters=16,
padding=1,
act=LinearActivation(),
bias_attr=True)
maxout = maxout_layer(input=conv,
num_channels=16,
groups=2)
pool = img_pool_layer(input=maxout,
num_channels=8,
pool_size=2,
stride=2,
pool_type=MaxPooling())
fc = fc_layer(input=pool, size=384, bias_attr=False)
outputs(fc)
| zuowang/Paddle | python/paddle/trainer_config_helpers/tests/configs/test_maxout.py | Python | apache-2.0 | 772 |
#!/usr/bin/env python
# Copyright (c) 2013 Turbulenz Limited.
# Released under "Modified BSD License". See COPYING for full text.
from __future__ import print_function
import os
import glob
import sys
def find(dirname, pattern):
for root, dirs, files in os.walk(dirname):
found = glob.glob(os.path.join(root, pattern))
for f in found:
print(f)
return 0
def usage():
print("Usage: %s [<dir>] [options]" % sys.argv[0])
print("")
print("Options:")
print(" -iname '<pattern>' - find all files that match pattern,")
print(" e.g. '*.js'")
print("")
if "__main__" == __name__:
dirname = "."
pattern = "*"
args = sys.argv[1:]
while len(args) > 0:
a = args.pop(0)
if '-iname' == a:
pattern = args.pop(0)
else:
dirname = a
exit(find(dirname, pattern))
| turbulenz/turbulenz_build | commands/find.py | Python | bsd-3-clause | 911 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Optiongroup.form_type'
db.add_column('budgetcalc_optiongroup', 'form_type', self.gf('django.db.models.fields.CharField')(default='r', max_length=1), keep_default=False)
def backwards(self, orm):
# Deleting field 'Optiongroup.form_type'
db.delete_column('budgetcalc_optiongroup', 'form_type')
models = {
'budgetcalc.category': {
'Meta': {'ordering': "['order']", 'object_name': 'Category'},
'cat_type': ('django.db.models.fields.CharField', [], {'default': "'r'", 'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'budgetcalc.option': {
'Meta': {'ordering': "['order']", 'object_name': 'Option'},
'amount': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['budgetcalc.Category']"}),
'desc': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'optiongroup': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['budgetcalc.Optiongroup']", 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'ridership': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'budgetcalc.optiongroup': {
'Meta': {'object_name': 'Optiongroup'},
'desc': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'form_type': ('django.db.models.fields.CharField', [], {'default': "'r'", 'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'budgetcalc.submission': {
'Meta': {'object_name': 'Submission'},
'budget': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'selected_options'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['budgetcalc.Option']"})
}
}
complete_apps = ['budgetcalc']
| MAPC/MBTA | budgetcalc/migrations/0007_auto__add_field_optiongroup_form_type.py | Python | bsd-3-clause | 3,090 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "opengain.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| null-none/OpenGain | manage.py | Python | gpl-2.0 | 251 |
"""
The MIT License (MIT)
Copyright (c) 2014 Chris Wimbrow
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
| cwimbrow/veganeyes-api | app/api_1_0/errors.py | Python | mit | 1,089 |
#!/usr/bin/env python
# encoding: utf-8
from nose.tools import * # noqa
from framework import sessions
from framework.flask import request
from website.models import Session
from website.addons.osfstorage.tests import factories
from website.addons.osfstorage import utils
from website.addons.osfstorage.tests.utils import StorageTestCase
class TestSerializeRevision(StorageTestCase):
def setUp(self):
super(TestSerializeRevision, self).setUp()
self.path = 'kind-of-magic.webm'
self.record = self.node_settings.root_node.append_file(self.path)
self.versions = [
factories.FileVersionFactory(creator=self.user)
for __ in range(3)
]
self.record.versions = self.versions
self.record.save()
def test_serialize_revision(self):
sessions.sessions[request._get_current_object()] = Session()
utils.update_analytics(self.project, self.record._id, 0)
utils.update_analytics(self.project, self.record._id, 0)
utils.update_analytics(self.project, self.record._id, 2)
expected = {
'index': 1,
'user': {
'name': self.user.fullname,
'url': self.user.url,
},
'date': self.versions[0].date_created.isoformat(),
'downloads': 2,
'md5': None,
'sha256': None,
}
observed = utils.serialize_revision(
self.project,
self.record,
self.versions[0],
0,
)
assert_equal(expected, observed)
assert_equal(self.record.get_download_count(), 3)
assert_equal(self.record.get_download_count(version=2), 1)
assert_equal(self.record.get_download_count(version=0), 2)
def test_anon_revisions(self):
sessions.sessions[request._get_current_object()] = Session()
utils.update_analytics(self.project, self.record._id, 0)
utils.update_analytics(self.project, self.record._id, 0)
utils.update_analytics(self.project, self.record._id, 2)
expected = {
'index': 2,
'user': None,
'date': self.versions[0].date_created.isoformat(),
'downloads': 0,
'md5': None,
'sha256': None,
}
observed = utils.serialize_revision(
self.project,
self.record,
self.versions[0],
1,
anon=True
)
assert_equal(expected, observed)
| jmcarp/osf.io | website/addons/osfstorage/tests/test_utils.py | Python | apache-2.0 | 2,524 |
from __future__ import unicode_literals
from django.db import models
import datetime
from django.db.models.signals import pre_save
from django.urls import reverse
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from source_utils.starters import CommonInfo, GenericCategory
from versatileimagefield.fields import (
VersatileImageField,
PPOIField
)
def upload_location(instance, filename):
return "%s/%s" %(instance.slug, filename)
ASSESEMENT = (
('units', 'Per unit'),
('square feet', 'Square foot'),
('linear feet', 'Linear foot'),
('square meters', 'Square meter'),
('linear meters', 'Linear meter'),
)
class Base(GenericCategory):
"""
This model represents the general type of product category offered.
"""
class Meta:
verbose_name = _('Product Category')
verbose_name_plural = _('Product Categories')
ordering = ["category"]
def get_success_url(self):
return reverse("product:company_list")
def get_absolute_url(self):
return reverse(
"product:base_product_detail",
kwargs={'slug': self.slug}
)
def pre_save_category(sender, instance, *args, **kwargs):
instance.slug = slugify(instance.category)
pre_save.connect(pre_save_category, sender=Base)
class Product(CommonInfo):
"""
This model describes the specific product related to the category.
"""
base = models.ForeignKey(
Base,
on_delete=models.CASCADE
)
supplier = models.ForeignKey(
'company.Company',
on_delete=models.CASCADE
)
item = models.CharField(
max_length=30,
unique=True
)
admin_time = models.DecimalField(
default=0,
max_digits=4,
decimal_places=2
)
prep_time = models.DecimalField(
default=0,
max_digits=4,
decimal_places=2
)
field_time = models.DecimalField(
default=0,
max_digits=4,
decimal_places=2
)
admin_material = models.DecimalField(
default=0,
max_digits=8,
decimal_places=2
)
prep_material = models.DecimalField(
default=0,
max_digits=8,
decimal_places=2
)
field_material = models.DecimalField(
default=0,
max_digits=8,
decimal_places=2
)
quantity_assesement = models.CharField(
max_length=12,
verbose_name=_("Quantity assesement method"),
choices=ASSESEMENT
)
order_if_below = models.SmallIntegerField()
discontinued = models.DateField(
null=True,
blank=True
)
order_now = models.BooleanField(
default=False
)
units_damaged_or_lost = models.SmallIntegerField(
default=0
)
quantity = models.SmallIntegerField(
"Usable quantity",
default=0,
null=True,
blank=True
)
quantity_called_for = models.SmallIntegerField(
default=0,
null=True,
blank=True
)
image = VersatileImageField(
'Image',
upload_to='images/product/',
null=True, blank=True,
width_field='width',
height_field='height',
ppoi_field='ppoi'
)
height = models.PositiveIntegerField(
'Image Height',
blank=True,
null=True
)
width = models.PositiveIntegerField(
'Image Width',
blank=True,
null=True
)
ppoi = PPOIField(
'Image PPOI'
)
no_longer_available = models.BooleanField(default=False)
class Meta:
ordering= ['item']
def __str__(self):
return self.item
def get_time(self):
return self.admin_time + self.prep_time + self.field_time
def get_cost(self):
return self.admin_material + self.prep_material + self.field_material
def get_usable_quantity(self):
return self.quantity - self.units_damaged_or_lost - self.quantity_called_for
def get_success_url(self):
return reverse("product:category_item_list", kwargs={'slug': self.base.slug})
def get_absolute_url(self):
return reverse("product:item_detail", kwargs={'slug': self.slug})
def pre_save_product(sender, instance, *args, **kwargs):
if not instance.no_longer_available:
instance.discontinued = None
elif instance.no_longer_available and instance.discontinued == None:
instance.discontinued = datetime.date.today()
if (
instance.quantity -
instance.units_damaged_or_lost -
instance.quantity_called_for
) < instance.order_if_below:
instance.order_now = True
else:
instance.order_now = False
instance.slug = slugify(instance.item)
pre_save.connect(pre_save_product, sender=Product) | michealcarrerweb/LHVent_app | stock/models.py | Python | mit | 4,911 |
from common_helper_files import get_dir_of_file
from common_helper_passwords import get_merged_password_set
import logging
import os
NAME = 'blacklist'
BLACKLIST = list()
def filter_function(file_meta, file_cache=None):
_get_blacklist()
return file_meta['uid'] in BLACKLIST
def setup(app):
app.register_plugin(NAME, filter_function)
def _get_blacklist():
global BLACKLIST
if len(BLACKLIST) == 0:
blacklist_dir = _get_blacklist_dir()
BLACKLIST = get_merged_password_set(blacklist_dir)
logging.debug('blacklist with {} entries loaded'.format(len(BLACKLIST)))
def _get_blacklist_dir():
return os.path.join(get_dir_of_file(__file__), '../../blacklist')
| weidenba/recovery_sort | filter_plugins/ignore/blacklist.py | Python | gpl-3.0 | 707 |
""" watch for modifications to ./docs folder """
from twisted.internet import inotify
from twisted.internet import task
from twisted.python import filepath
from twisted.python import log
from twisted.application.service import IService
from zope.interface import implements
from txbitwrap.event import HANDLERS
class SessionWatch(object):
""" observer websocket subscribers """
implements(IService)
interval = 60 * 60 * 24 # 1 day
""" looping call interval in seconds """
def __init__(self):
self.name = __name__
self.loop = task.LoopingCall(self.session_sweep)
def privilegedStartService(self):
""" required by IService """
def startService(self):
""" start service """
log.msg('__SERVICE__ => ' + self.name)
self.loop.start(self.interval)
def stopService(self):
""" stop service """
log.msg('stopping %s' % self.name)
def session_sweep(self):
""" sweep websocket sessions """
# FIXME: integrate w/ Auth + Sessions
# websocket handlers should expire
HANDLERS = {} # KLUDGE just purge all subscribers/handlers
| stackdump/txbitwrap | txbitwrap/session_watch.py | Python | mit | 1,150 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for binary coefficient-wise operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
_ADD = lambda x, y: x + y
_SUB = lambda x, y: x - y
_MUL = lambda x, y: x * y
_POW = lambda x, y: x**y
_TRUEDIV = lambda x, y: x / y
_FLOORDIV = lambda x, y: x // y
_MOD = lambda x, y: x % y
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), x_values
def _default_tolerance(dtype):
"""Returns a sensible default tolerance for comparing results of a given type.
Args:
dtype: A datatype.
"""
if dtype == np.float16:
return 5e-3
elif dtype in (np.float32, np.complex64):
return 1e-3
elif dtype in (np.float64, np.complex128):
return 1e-5
else:
return None # Fail fast for unexpected types
class BinaryOpTest(test.TestCase):
def _compareCpu(self, x, y, np_func, tf_func, also_compare_variables=False):
np_ans = np_func(x, y)
with self.test_session(use_gpu=False):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_cpu = self.evaluate(out)
# Test that the op takes precedence over numpy operators.
np_left = tf_func(x, iny).eval()
np_right = tf_func(inx, y).eval()
if also_compare_variables:
var_x = variables.Variable(x)
var_y = variables.Variable(y)
variables.global_variables_initializer().run()
print(type(x), type(y), type(var_x), type(var_y))
print(type(tf_func(x, var_y)), type(tf_func(var_x, y)))
np_var_left = tf_func(x, var_y).eval()
np_var_right = tf_func(var_x, y).eval()
if np_ans.dtype != np.object:
self.assertAllClose(np_ans, tf_cpu)
self.assertAllClose(np_ans, np_left)
self.assertAllClose(np_ans, np_right)
if also_compare_variables:
self.assertAllClose(np_ans, np_var_left)
self.assertAllClose(np_ans, np_var_right)
self.assertShapeEqual(np_ans, out)
_GRAD_TOL = {
dtypes_lib.float16: 1e-3,
dtypes_lib.float32: 1e-3,
dtypes_lib.complex64: 1e-2,
dtypes_lib.float64: 1e-5,
dtypes_lib.complex128: 1e-4
}
def _compareGradientX(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
xs = list(x.shape)
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, xs, out, zs, x_init_value=x)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = tf_func(inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inxf, xs, outf, zs, x_init_value=xf, delta=1e-3)
jacob_n = jacob_n.astype(x.dtype)
tol = self._GRAD_TOL[dtypes_lib.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGradientY(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
ys = list(np.shape(y))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, ys, out, zs, x_init_value=y)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = tf_func(inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inyf, ys, outf, zs, x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
tol = self._GRAD_TOL[dtypes_lib.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(force_gpu=test_util.is_gpu_available()):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = self.evaluate(out)
self.assertAllClose(np_ans, tf_gpu)
self.assertShapeEqual(np_ans, out)
# TODO(zhifengc/ke): make gradient checker work on GPU.
def _compareBoth(self, x, y, np_func, tf_func, also_compare_variables=False):
self._compareCpu(x, y, np_func, tf_func, also_compare_variables)
if x.dtype in (np.float16, np.float32, np.float64, np.complex64,
np.complex128):
if tf_func not in (_FLOORDIV, math_ops.floordiv, math_ops.zeta,
math_ops.polygamma):
self._compareGradientX(x, y, np_func, tf_func)
self._compareGradientY(x, y, np_func, tf_func)
if tf_func in (math_ops.zeta, math_ops.polygamma):
# These methods only support gradients in the second parameter
self._compareGradientY(x, y, np_func, tf_func)
self._compareGpu(x, y, np_func, tf_func)
def testFloatBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(x, y, np.add, math_ops.add, also_compare_variables=True)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareBoth(x, y + 0.1, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.arctan2, math_ops.atan2)
x1 = np.random.randn(5, 6).astype(np.float32)
x2 = np.random.randn(5, 6).astype(np.float32)
# Remove tiny values--atan2 gradients are flaky near the origin.
x1[np.abs(x1) < 0.05] = 0.05 * np.sign(x1[np.abs(x1) < 0.05])
x2[np.abs(x2) < 0.05] = 0.05 * np.sign(x2[np.abs(x2) < 0.05])
self._compareBoth(x1, x2, np.arctan2, math_ops.atan2)
try:
from scipy import special # pylint: disable=g-import-not-at-top
a_pos_small = np.linspace(0.1, 2, 15).reshape(1, 3, 5).astype(np.float32)
x_pos_small = np.linspace(0.1, 10, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(a_pos_small, x_pos_small, special.gammainc,
math_ops.igamma)
self._compareBoth(a_pos_small, x_pos_small, special.gammaincc,
math_ops.igammac)
# Need x > 1
self._compareBoth(x_pos_small + 1, a_pos_small, special.zeta,
math_ops.zeta)
n_small = np.arange(0, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(n_small, x_pos_small, special.polygamma,
math_ops.polygamma)
except ImportError as e:
tf_logging.warn("Cannot test special functions: %s" % str(e))
def testFloatDifferentShapes(self):
x = np.array([1, 2, 3, 4]).reshape(2, 2).astype(np.float32)
y = np.array([1, 2]).reshape(2, 1).astype(np.float32)
with self.cached_session() as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
s = math_ops.reduce_sum(inx * iny)
gx, gy = sess.run(gradients_impl.gradients(s, [inx, iny]))
# gx is simply the broadcasted y
self.assertAllEqual(gx,
np.array([1, 1, 2, 2]).reshape(2, 2).astype(np.float32))
# gy is x's column summed up
self.assertAllEqual(gy, np.array([3, 7]).reshape(2, 1).astype(np.float32))
def testFloatVariableOverload(self):
x = np.array([1, 2, 3, 4]).reshape(2, 2).astype(np.int32)
y = np.array([1, 2]).reshape(2, 1).astype(np.int32)
var_x = variables.Variable(x)
var_y = variables.Variable(y)
with self.cached_session() as sess:
sess.run([var_x.initializer, var_y.initializer])
left_result = (var_x * y).eval()
right_result = (x * var_y).eval()
np_result = x * y
self.assertAllEqual(np_result, left_result)
self.assertAllEqual(np_result, right_result)
def testDoubleBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64)
self._compareBoth(x, y, np.add, math_ops.add)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareBoth(x, y + 0.1, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.arctan2, math_ops.atan2)
x1 = np.random.randn(7, 4).astype(np.float64)
x2 = np.random.randn(7, 4).astype(np.float64)
# Remove tiny values--atan2 gradients are flaky near the origin.
x1[np.abs(x1) < 0.5] = 0.5 * np.sign(x1[np.abs(x1) < 0.5])
x2[np.abs(x2) < 0.5] = 0.5 * np.sign(x2[np.abs(x2) < 0.5])
self._compareBoth(x1, x2, np.arctan2, math_ops.atan2)
try:
from scipy import special # pylint: disable=g-import-not-at-top
a_pos_small = np.linspace(0.1, 2, 15).reshape(1, 3, 5).astype(np.float32)
x_pos_small = np.linspace(0.1, 10, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(a_pos_small, x_pos_small, special.gammainc,
math_ops.igamma)
self._compareBoth(a_pos_small, x_pos_small, special.gammaincc,
math_ops.igammac)
except ImportError as e:
tf_logging.warn("Cannot test special functions: %s" % str(e))
def testUint8Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.uint8)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.uint8)
self._compareBoth(x, y, np.add, math_ops.add)
def testInt8Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int8)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int8)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y, np.multiply, _MUL)
def testInt16Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int16)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int16)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y, np.multiply, _MUL)
def testUint16Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.uint16)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.uint16)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
def testInt32Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int32)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int32)
self._compareBoth(x, y, np.add, math_ops.add)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.mod, math_ops.mod)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.mod, _MOD)
# _compareBoth tests on GPU only for floating point types, so test
# _MOD for int32 on GPU by calling _compareGpu
self._compareGpu(x, y, np.mod, _MOD)
def testInt64Basic(self):
x = np.arange(1 << 40, 13 << 40, 2 << 40).reshape(1, 3, 2).astype(np.int64)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int64)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.mod, math_ops.mod)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.mod, _MOD)
def testComplex64Basic(self):
x = np.complex(1, 1) * np.linspace(-10, 10, 6).reshape(1, 3, 2).astype(
np.complex64)
y = np.complex(1, 1) * np.linspace(20, -20, 6).reshape(1, 3, 2).astype(
np.complex64)
self._compareBoth(x, y, np.add, math_ops.add)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
def testComplex128Basic(self):
x = np.complex(1, 1) * np.linspace(-10, 10, 6).reshape(1, 3, 2).astype(
np.complex128)
y = np.complex(1, 1) * np.linspace(20, -20, 6).reshape(1, 3, 2).astype(
np.complex128)
self._compareBoth(x, y, np.add, math_ops.add)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
def testStringComparison(self):
x = np.array([["abc", "bh"], ["c", ""]])
y = np.array([["abc", "bh"], ["def", "hi"]])
with self.test_session(use_gpu=False) as sess:
cmp_eq = math_ops.equal(x, y)
cmp_not_eq = math_ops.not_equal(x, y)
values = sess.run([cmp_eq, cmp_not_eq])
self.assertAllEqual([[True, True], [False, False]], values[0])
self.assertAllEqual([[False, False], [True, True]], values[1])
def testString(self):
x = np.array([["x_0_0", "x_0_1", "x_0_2"], ["x_1_0", "x_1_1", "x_1_2"],
["x_2_0", "x_2_1", "x_2_2"]],
dtype=np.object)
y = np.array([["y_0_0", "y_0_1", "y_0_2"], ["y_1_0", "y_1_1", "y_1_2"],
["y_2_0", "y_2_1", "y_2_2"]],
dtype=np.object)
z = np.array([["z_0", "z_1", "z_2"]], dtype=np.object)
w = np.array("w", dtype=np.object)
self._compareCpu(x, y, _ADD, _ADD)
self._compareCpu(x, z, _ADD, _ADD)
self._compareCpu(x, w, _ADD, _ADD)
self._compareCpu(z, w, _ADD, _ADD)
def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
if dtype in (np.complex64, np.complex128):
x = (1 + np.linspace(0, 2 + 3j, np.prod(xs))).astype(dtype).reshape(xs)
y = (1 + np.linspace(0, 2 - 2j, np.prod(ys))).astype(dtype).reshape(ys)
else:
x = (1 + np.linspace(0, 5, np.prod(xs))).astype(dtype).reshape(xs)
y = (1 + np.linspace(0, 5, np.prod(ys))).astype(dtype).reshape(ys)
self._compareCpu(x, y, np_func, tf_func)
if x.dtype in (np.float16, np.float32, np.float64):
# TODO(aselle): Make the test work for dtypes:
# (np.complex64, np.complex128).
if tf_func not in (_FLOORDIV, math_ops.floordiv):
if x.dtype == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
self._compareGradientX(x, y, np_func, tf_func, np.float)
self._compareGradientY(x, y, np_func, tf_func, np.float)
else:
self._compareGradientX(x, y, np_func, tf_func)
self._compareGradientY(x, y, np_func, tf_func)
self._compareGpu(x, y, np_func, tf_func)
# TODO(josh11b,vrv): Refactor this to use parameterized tests.
def _testBCastByFunc(self, funcs, xs, ys):
dtypes = [
np.float16,
np.float32,
np.float64,
np.int32,
np.int64,
np.complex64,
np.complex128,
]
for dtype in dtypes:
for (np_func, tf_func) in funcs:
if (dtype in (np.complex64, np.complex128) and
tf_func in (_FLOORDIV, math_ops.floordiv)):
continue # floordiv makes no sense for complex numbers
self._compareBCast(xs, ys, dtype, np_func, tf_func)
self._compareBCast(ys, xs, dtype, np_func, tf_func)
def _testBCastA(self, xs, ys):
funcs = [
(np.add, math_ops.add),
(np.add, _ADD),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastB(self, xs, ys):
funcs = [
(np.subtract, math_ops.subtract),
(np.subtract, _SUB),
(np.power, math_ops.pow),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastC(self, xs, ys):
funcs = [
(np.multiply, math_ops.multiply),
(np.multiply, _MUL),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastD(self, xs, ys):
funcs = [
(np.true_divide, math_ops.truediv),
(np.floor_divide, math_ops.floordiv),
(np.true_divide, _TRUEDIV),
(np.floor_divide, _FLOORDIV),
]
self._testBCastByFunc(funcs, xs, ys)
def testBCast_0A(self):
self._testBCastA([1, 3, 2], [1])
def testBCast_0B(self):
self._testBCastB([1, 3, 2], [1])
def testBCast_0C(self):
self._testBCastC([1, 3, 2], [1])
def testBCast_0D(self):
self._testBCastD([1, 3, 2], [1])
def testBCast_1A(self):
self._testBCastA([1, 3, 2], [2])
def testBCast_1B(self):
self._testBCastB([1, 3, 2], [2])
def testBCast_1C(self):
self._testBCastC([1, 3, 2], [2])
def testBCast_1D(self):
self._testBCastD([1, 3, 2], [2])
def testBCast_2A(self):
self._testBCastA([1, 3, 2], [3, 2])
def testBCast_2B(self):
self._testBCastB([1, 3, 2], [3, 2])
def testBCast_2C(self):
self._testBCastC([1, 3, 2], [3, 2])
def testBCast_2D(self):
self._testBCastD([1, 3, 2], [3, 2])
def testBCast_3A(self):
self._testBCastA([1, 3, 2], [3, 1])
def testBCast_3B(self):
self._testBCastB([1, 3, 2], [3, 1])
def testBCast_3C(self):
self._testBCastC([1, 3, 2], [3, 1])
def testBCast_3D(self):
self._testBCastD([1, 3, 2], [3, 1])
def testBCast_4A(self):
self._testBCastA([1, 3, 2], [1, 3, 2])
def testBCast_4B(self):
self._testBCastB([1, 3, 2], [1, 3, 2])
def testBCast_4C(self):
self._testBCastC([1, 3, 2], [1, 3, 2])
def testBCast_4D(self):
self._testBCastD([1, 3, 2], [1, 3, 2])
def testBCast_5A(self):
self._testBCastA([1, 3, 2], [2, 3, 1])
def testBCast_5B(self):
self._testBCastB([1, 3, 2], [2, 3, 1])
def testBCast_5C(self):
self._testBCastC([1, 3, 2], [2, 3, 1])
def testBCast_5D(self):
self._testBCastD([1, 3, 2], [2, 3, 1])
def testBCast_6A(self):
self._testBCastA([1, 3, 2], [2, 1, 1])
def testBCast_6B(self):
self._testBCastB([1, 3, 2], [2, 1, 1])
def testBCast_6C(self):
self._testBCastC([1, 3, 2], [2, 1, 1])
def testBCast_6D(self):
self._testBCastD([1, 3, 2], [2, 1, 1])
def testBCast_7A(self):
self._testBCastA([1, 3, 2], [1, 3, 1])
def testBCast_7B(self):
self._testBCastB([1, 3, 2], [1, 3, 1])
def testBCast_7C(self):
self._testBCastC([1, 3, 2], [1, 3, 1])
def testBCast_7D(self):
self._testBCastD([1, 3, 2], [1, 3, 1])
def testBCast_8A(self):
self._testBCastA([2, 1, 5], [2, 3, 1])
def testBCast_8B(self):
self._testBCastB([2, 1, 5], [2, 3, 1])
def testBCast_8C(self):
self._testBCastC([2, 1, 5], [2, 3, 1])
def testBCast_8D(self):
self._testBCastD([2, 1, 5], [2, 3, 1])
def testBCast_9A(self):
self._testBCastA([2, 0, 5], [2, 0, 1])
def testBCast_9B(self):
self._testBCastB([2, 0, 5], [2, 0, 1])
def testBCast_9C(self):
self._testBCastC([2, 0, 5], [2, 0, 1])
def testBCast_9D(self):
self._testBCastD([2, 0, 5], [2, 0, 1])
def testBCast_10A(self):
self._testBCastA([2, 3, 0], [2, 3, 1])
def testBCast_10B(self):
self._testBCastB([2, 3, 0], [2, 3, 1])
def testBCast_10C(self):
self._testBCastC([2, 3, 0], [2, 3, 1])
def testBCast_10D(self):
self._testBCastD([2, 3, 0], [2, 3, 1])
def testBCast_11A(self):
self._testBCastA([1, 3, 2], [1, 3, 2])
def testBCast_11B(self):
self._testBCastB([1, 3, 2], [1, 3, 2])
def testBCast_11C(self):
self._testBCastC([1, 3, 2], [1, 3, 2])
def testBCast_11D(self):
self._testBCastD([1, 3, 2], [1, 3, 2])
def testBCast_12A(self):
self._testBCastA([1, 1, 1, 1, 3, 2], [1, 3, 2])
def testBCast_12B(self):
self._testBCastB([1, 1, 1, 1, 3, 2], [1, 3, 2])
def testBCast_12C(self):
self._testBCastC([1, 1, 1, 1, 3, 2], [1, 3, 2])
def testBCast_12D(self):
self._testBCastD([1, 1, 1, 1, 3, 2], [1, 3, 2])
def testBCast_13A(self):
self._testBCastA([1, 3, 2, 1, 1], [1])
def testBCast_13B(self):
self._testBCastB([1, 3, 2, 1, 1], [1])
def testBCast_13C(self):
self._testBCastC([1, 3, 2, 1, 1], [1])
def testBCast_13D(self):
self._testBCastD([1, 3, 2, 1, 1], [1])
def testBCast_14A(self):
self._testBCastA([2, 3, 1, 1, 5], [1])
def testBCast_14B(self):
self._testBCastB([2, 3, 1, 1, 5], [1])
def testBCast_14C(self):
self._testBCastC([2, 3, 1, 1, 5], [1])
def testBCast_14D(self):
self._testBCastD([2, 3, 1, 1, 5], [1])
def testBCast_15A(self):
self._testBCastA([10, 3, 1, 2], [3, 1, 2])
def testBCast_15B(self):
self._testBCastB([10, 3, 1, 2], [3, 1, 2])
def testBCast_15C(self):
self._testBCastC([10, 3, 1, 2], [3, 1, 2])
def testBCast_15D(self):
self._testBCastD([10, 3, 1, 2], [3, 1, 2])
def testMismatchedDimensions(self):
for func in [
math_ops.add, math_ops.subtract, math_ops.multiply, math_ops.div, _ADD,
_SUB, _MUL, _TRUEDIV, _FLOORDIV
]:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Dimensions must" in str(e)):
func(
ops.convert_to_tensor([10.0, 20.0, 30.0]),
ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
def testZeroPowGrad(self):
with self.cached_session():
for dtype in (np.float16, np.float32, np.float64, np.complex64,
np.complex128):
x = constant_op.constant(0.0, dtype=dtype)
y = constant_op.constant(2.0, dtype=dtype)
z = math_ops.pow(x, y)
error = gradient_checker.compute_gradient_error(y, [], z, [])
self.assertEqual(error, 0)
def testComplexPowGrad(self):
with self.cached_session():
for dtype in np.complex64, np.complex128:
for base in 2.0, -2.0:
x = constant_op.constant(base, dtype=dtype)
y = constant_op.constant(2.0, dtype=dtype)
z = math_ops.pow(x, y)
error = gradient_checker.compute_gradient_error(y, [], z, [])
self.assertLess(error, 2e-4)
def testAtan2SpecialValues(self):
x1l, x2l = zip((+0.0, +0.0), (+0.0, -0.0), (-0.0, +0.0), (-0.0, -0.0),
(1.2345, float("inf")), (1.2345, -float("inf")),
(-4.321, float("inf")), (-4.125, -float("inf")),
(float("inf"), float("inf")), (float("inf"), -float("inf")),
(-float("inf"), float("inf")),
(-float("inf"), -float("inf")))
for dtype in np.float32, np.float64:
x1 = np.array(x1l).astype(dtype)
x2 = np.array(x2l).astype(dtype)
self._compareCpu(x1, x2, np.arctan2, math_ops.atan2)
self._compareGpu(x1, x2, np.arctan2, math_ops.atan2)
def testPowNegativeExponent(self):
for dtype in [np.int32, np.int64]:
with self.test_session(use_gpu=False) as sess:
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Integers to negative integer powers are not allowed"):
x = np.array([5, 2]).astype(dtype)
y = np.array([-2, 3]).astype(dtype)
sess.run(math_ops.pow(x, y))
with self.test_session(use_gpu=False) as sess:
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Integers to negative integer powers are not allowed"):
x = np.array([5, 2]).astype(dtype)
y = np.array([2, -3]).astype(dtype)
sess.run(math_ops.pow(x, y))
with self.test_session(use_gpu=False) as sess:
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Integers to negative integer powers are not allowed"):
x = np.array([5, 2]).astype(dtype)
y = -3
sess.run(math_ops.pow(x, y))
class ComparisonOpTest(test.TestCase):
def _compareScalar(self, func, x, y, dtype):
with self.test_session(force_gpu=test_util.is_gpu_available()):
out = func(
ops.convert_to_tensor(np.array([x]).astype(dtype)),
ops.convert_to_tensor(np.array([y]).astype(dtype)))
ret = self.evaluate(out)
return ret[0]
def testScalarCompareScalar(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
data = [-1, 0, 1]
for t in dtypes:
for x in data:
for y in data:
self.assertEqual(self._compareScalar(math_ops.less, x, y, t), x < y)
self.assertEqual(
self._compareScalar(math_ops.less_equal, x, y, t), x <= y)
self.assertEqual(
self._compareScalar(math_ops.greater, x, y, t), x > y)
self.assertEqual(
self._compareScalar(math_ops.greater_equal, x, y, t), x >= y)
self.assertEqual(self._compareScalar(math_ops.equal, x, y, t), x == y)
self.assertEqual(
self._compareScalar(math_ops.not_equal, x, y, t), x != y)
data = [-1, 0, 1, -1j, 1j, 1 + 1j, 1 - 1j]
for t in [np.complex64, np.complex128]:
for x in data:
for y in data:
self.assertEqual(self._compareScalar(math_ops.equal, x, y, t), x == y)
self.assertEqual(
self._compareScalar(math_ops.not_equal, x, y, t), x != y)
def _compare(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(force_gpu=test_util.is_gpu_available()):
out = tf_func(ops.convert_to_tensor(x), ops.convert_to_tensor(y))
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
def testTensorCompareTensor(self):
x = np.linspace(-15, 15, 6).reshape(1, 3, 2)
y = np.linspace(20, -10, 6).reshape(1, 3, 2)
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(xt, yt, np.less, math_ops.less)
self._compare(xt, yt, np.less_equal, math_ops.less_equal)
self._compare(xt, yt, np.greater, math_ops.greater)
self._compare(xt, yt, np.greater_equal, math_ops.greater_equal)
self._compare(xt, yt, np.equal, math_ops.equal)
self._compare(xt, yt, np.not_equal, math_ops.not_equal)
# Complex types do not support ordering but do support equality tests.
for t in [np.complex64, np.complex128]:
xt = x.astype(t)
xt -= 1j * xt
yt = y.astype(t)
yt -= 1j * yt
self._compare(xt, yt, np.equal, math_ops.equal)
self._compare(xt, yt, np.not_equal, math_ops.not_equal)
def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
x = np.linspace(-15, 15, np.prod(xs)).astype(dtype).reshape(xs)
y = np.linspace(20, -10, np.prod(ys)).astype(dtype).reshape(ys)
if dtype in (np.complex64, np.complex128):
x -= 1j * x
y -= 1j * y
self._compare(x, y, np_func, tf_func)
self._compare(y, x, np_func, tf_func)
def _testBCastByFunc(self, np_func, tf_func, include_complex=False):
shapes = [
([1, 3, 2], [1]),
([1, 3, 2], [2]),
([1, 3, 2], [3, 2]),
([1, 3, 2], [3, 1]),
([1, 3, 2], [1, 3, 2]),
([1, 3, 2], [2, 3, 1]),
([1, 3, 2], [2, 1, 1]),
([1, 3, 2], [1, 3, 1]),
([2, 1, 5], [2, 3, 1]),
([2, 0, 5], [2, 0, 1]),
([2, 3, 0], [2, 3, 1]),
]
dtypes = [
np.float16,
np.float32,
np.float64,
np.int32,
np.int64,
]
if include_complex:
dtypes.extend([np.complex64, np.complex128])
for (xs, ys) in shapes:
for dtype in dtypes:
self._compareBCast(xs, ys, dtype, np_func, tf_func)
def testBCastLess(self):
self._testBCastByFunc(np.less, math_ops.less)
def testBCastLessEqual(self):
self._testBCastByFunc(np.less_equal, math_ops.less_equal)
def testBCastGreater(self):
self._testBCastByFunc(np.greater, math_ops.greater)
def testBCastGreaterEqual(self):
self._testBCastByFunc(np.greater_equal, math_ops.greater_equal)
def testBCastEqual(self):
self._testBCastByFunc(np.equal, math_ops.equal, include_complex=True)
def testBCastNotEqual(self):
self._testBCastByFunc(
np.not_equal, math_ops.not_equal, include_complex=True)
def testShapeMismatch(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
funcs = [
math_ops.less, math_ops.less_equal, math_ops.greater,
math_ops.greater_equal, math_ops.equal, math_ops.not_equal
]
x = np.arange(0, 10).reshape([2, 5])
y = np.arange(0, 10).reshape([5, 2])
for t in dtypes:
for f in funcs:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Dimensions must" in str(e)):
f(x.astype(t), y.astype(t))
if __name__ == "__main__":
test.main()
| hehongliang/tensorflow | tensorflow/python/kernel_tests/cwise_ops_binary_test.py | Python | apache-2.0 | 32,577 |
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/widgets/signsandsymbols.py
# signsandsymbols.py
# A collection of new widgets
# author: John Precedo (johnp@reportlab.com)
__version__=''' $Id: signsandsymbols.py 3959 2012-09-27 14:39:39Z robin $ '''
__doc__="""This file is a collection of widgets to produce some common signs and symbols.
Widgets include:
- ETriangle (an equilateral triangle),
- RTriangle (a right angled triangle),
- Octagon,
- Crossbox,
- Tickbox,
- SmileyFace,
- StopSign,
- NoEntry,
- NotAllowed (the red roundel from 'no smoking' signs),
- NoSmoking,
- DangerSign (a black exclamation point in a yellow triangle),
- YesNo (returns a tickbox or a crossbox depending on a testvalue),
- FloppyDisk,
- ArrowOne, and
- ArrowTwo
"""
from reportlab.lib import colors
from reportlab.lib.validators import *
from reportlab.lib.attrmap import *
from reportlab.graphics import shapes
from reportlab.graphics.widgetbase import Widget
from reportlab.graphics import renderPDF
class _Symbol(Widget):
"""Abstract base widget
possible attributes:
'x', 'y', 'size', 'fillColor', 'strokeColor'
"""
_nodoc = 1
_attrMap = AttrMap(
x = AttrMapValue(isNumber,desc='symbol x coordinate'),
y = AttrMapValue(isNumber,desc='symbol y coordinate'),
dx = AttrMapValue(isNumber,desc='symbol x coordinate adjustment'),
dy = AttrMapValue(isNumber,desc='symbol x coordinate adjustment'),
size = AttrMapValue(isNumber),
fillColor = AttrMapValue(isColorOrNone),
strokeColor = AttrMapValue(isColorOrNone),
strokeWidth = AttrMapValue(isNumber),
)
def __init__(self):
assert self.__class__.__name__!='_Symbol', 'Abstract class _Symbol instantiated'
self.x = self.y = self.dx = self.dy = 0
self.size = 100
self.fillColor = colors.red
self.strokeColor = None
self.strokeWidth = 0.1
def demo(self):
D = shapes.Drawing(200, 100)
s = float(self.size)
ob = self.__class__()
ob.x=50
ob.y=0
ob.draw()
D.add(ob)
D.add(shapes.String(ob.x+(s/2),(ob.y-12),
ob.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=10))
return D
class ETriangle(_Symbol):
"""This draws an equilateral triangle."""
def __init__(self):
pass #AbstractSymbol
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# Triangle specific bits
ae = s*0.125 #(ae = 'an eighth')
triangle = shapes.Polygon(points = [
self.x, self.y,
self.x+s, self.y,
self.x+(s/2),self.y+s],
fillColor = self.fillColor,
strokeColor = self.strokeColor,
strokeWidth=s/50.)
g.add(triangle)
return g
class RTriangle(_Symbol):
"""This draws a right-angled triangle.
possible attributes:
'x', 'y', 'size', 'fillColor', 'strokeColor'
"""
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.fillColor = colors.green
self.strokeColor = None
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# Triangle specific bits
ae = s*0.125 #(ae = 'an eighth')
triangle = shapes.Polygon(points = [
self.x, self.y,
self.x+s, self.y,
self.x,self.y+s],
fillColor = self.fillColor,
strokeColor = self.strokeColor,
strokeWidth=s/50.)
g.add(triangle)
return g
class Octagon(_Symbol):
"""This widget draws an Octagon.
possible attributes:
'x', 'y', 'size', 'fillColor', 'strokeColor'
"""
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.fillColor = colors.yellow
self.strokeColor = None
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# Octagon specific bits
athird=s/3
octagon = shapes.Polygon(points=[self.x+athird, self.y,
self.x, self.y+athird,
self.x, self.y+(athird*2),
self.x+athird, self.y+s,
self.x+(athird*2), self.y+s,
self.x+s, self.y+(athird*2),
self.x+s, self.y+athird,
self.x+(athird*2), self.y],
strokeColor = self.strokeColor,
fillColor = self.fillColor,
strokeWidth=10)
g.add(octagon)
return g
class Crossbox(_Symbol):
"""This draws a black box with a red cross in it - a 'checkbox'.
possible attributes:
'x', 'y', 'size', 'crossColor', 'strokeColor', 'crosswidth'
"""
_attrMap = AttrMap(BASE=_Symbol,
crossColor = AttrMapValue(isColorOrNone),
crosswidth = AttrMapValue(isNumber),
)
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.fillColor = colors.white
self.crossColor = colors.red
self.strokeColor = colors.black
self.crosswidth = 10
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# crossbox specific bits
box = shapes.Rect(self.x+1, self.y+1, s-2, s-2,
fillColor = self.fillColor,
strokeColor = self.strokeColor,
strokeWidth=2)
g.add(box)
crossLine1 = shapes.Line(self.x+(s*0.15), self.y+(s*0.15), self.x+(s*0.85), self.y+(s*0.85),
fillColor = self.crossColor,
strokeColor = self.crossColor,
strokeWidth = self.crosswidth)
g.add(crossLine1)
crossLine2 = shapes.Line(self.x+(s*0.15), self.y+(s*0.85), self.x+(s*0.85) ,self.y+(s*0.15),
fillColor = self.crossColor,
strokeColor = self.crossColor,
strokeWidth = self.crosswidth)
g.add(crossLine2)
return g
class Tickbox(_Symbol):
"""This draws a black box with a red tick in it - another 'checkbox'.
possible attributes:
'x', 'y', 'size', 'tickColor', 'strokeColor', 'tickwidth'
"""
_attrMap = AttrMap(BASE=_Symbol,
tickColor = AttrMapValue(isColorOrNone),
tickwidth = AttrMapValue(isNumber),
)
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.tickColor = colors.red
self.strokeColor = colors.black
self.fillColor = colors.white
self.tickwidth = 10
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# tickbox specific bits
box = shapes.Rect(self.x+1, self.y+1, s-2, s-2,
fillColor = self.fillColor,
strokeColor = self.strokeColor,
strokeWidth=2)
g.add(box)
tickLine = shapes.PolyLine(points = [self.x+(s*0.15), self.y+(s*0.35), self.x+(s*0.35), self.y+(s*0.15),
self.x+(s*0.35), self.y+(s*0.15), self.x+(s*0.85) ,self.y+(s*0.85)],
fillColor = self.tickColor,
strokeColor = self.tickColor,
strokeWidth = self.tickwidth)
g.add(tickLine)
return g
class SmileyFace(_Symbol):
"""This draws a classic smiley face.
possible attributes:
'x', 'y', 'size', 'fillColor'
"""
def __init__(self):
_Symbol.__init__(self)
self.x = 0
self.y = 0
self.size = 100
self.fillColor = colors.yellow
self.strokeColor = colors.black
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# SmileyFace specific bits
g.add(shapes.Circle(cx=self.x+(s/2), cy=self.y+(s/2), r=s/2,
fillColor=self.fillColor, strokeColor=self.strokeColor,
strokeWidth=max(s/38.,self.strokeWidth)))
for i in (1,2):
g.add(shapes.Ellipse(self.x+(s/3)*i,self.y+(s/3)*2, s/30, s/10,
fillColor=self.strokeColor, strokeColor = self.strokeColor,
strokeWidth=max(s/38.,self.strokeWidth)))
# calculate a pointslist for the mouth
# THIS IS A HACK! - don't use if there is a 'shapes.Arc'
centerx=self.x+(s/2)
centery=self.y+(s/2)
radius=s/3
yradius = radius
xradius = radius
startangledegrees=200
endangledegrees=340
degreedelta = 1
pointslist = []
a = pointslist.append
from math import sin, cos, pi
degreestoradians = pi/180.0
radiansdelta = degreedelta*degreestoradians
startangle = startangledegrees*degreestoradians
endangle = endangledegrees*degreestoradians
while endangle<startangle:
endangle = endangle+2*pi
angle = startangle
while angle<endangle:
x = centerx + cos(angle)*radius
y = centery + sin(angle)*yradius
a(x); a(y)
angle = angle+radiansdelta
# make the mouth
smile = shapes.PolyLine(pointslist,
fillColor = self.strokeColor,
strokeColor = self.strokeColor,
strokeWidth = max(s/38.,self.strokeWidth))
g.add(smile)
return g
class StopSign(_Symbol):
"""This draws a (British) stop sign.
possible attributes:
'x', 'y', 'size'
"""
_attrMap = AttrMap(BASE=_Symbol,
stopColor = AttrMapValue(isColorOrNone,desc='color of the word stop'),
)
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.strokeColor = colors.black
self.fillColor = colors.orangered
self.stopColor = colors.ghostwhite
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# stop-sign specific bits
athird=s/3
outerOctagon = shapes.Polygon(points=[self.x+athird, self.y,
self.x, self.y+athird,
self.x, self.y+(athird*2),
self.x+athird, self.y+s,
self.x+(athird*2), self.y+s,
self.x+s, self.y+(athird*2),
self.x+s, self.y+athird,
self.x+(athird*2), self.y],
strokeColor = self.strokeColor,
fillColor = None,
strokeWidth=1)
g.add(outerOctagon)
innerOctagon = shapes.Polygon(points=[self.x+athird+(s/75), self.y+(s/75),
self.x+(s/75), self.y+athird+(s/75),
self.x+(s/75), self.y+(athird*2)-(s/75),
self.x+athird+(s/75), self.y+s-(s/75),
self.x+(athird*2)-(s/75), (self.y+s)-(s/75),
(self.x+s)-(s/75), self.y+(athird*2)-(s/75),
(self.x+s)-(s/75), self.y+athird+(s/75),
self.x+(athird*2)-(s/75), self.y+(s/75)],
strokeColor = None,
fillColor = self.fillColor,
strokeWidth=0)
g.add(innerOctagon)
if self.stopColor:
g.add(shapes.String(self.x+(s*0.5),self.y+(s*0.4),
'STOP', fillColor=self.stopColor, textAnchor='middle',
fontSize=s/3, fontName="Helvetica-Bold"))
return g
class NoEntry(_Symbol):
"""This draws a (British) No Entry sign - a red circle with a white line on it.
possible attributes:
'x', 'y', 'size'
"""
_attrMap = AttrMap(BASE=_Symbol,
innerBarColor = AttrMapValue(isColorOrNone,desc='color of the inner bar'),
)
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.strokeColor = colors.black
self.fillColor = colors.orangered
self.innerBarColor = colors.ghostwhite
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# no-entry-sign specific bits
if self.strokeColor:
g.add(shapes.Circle(cx = (self.x+(s/2)), cy = (self.y+(s/2)), r = s/2, fillColor = None, strokeColor = self.strokeColor, strokeWidth=1))
if self.fillColor:
g.add(shapes.Circle(cx = (self.x+(s/2)), cy =(self.y+(s/2)), r = ((s/2)-(s/50)), fillColor = self.fillColor, strokeColor = None, strokeWidth=0))
innerBarColor = self.innerBarColor
if innerBarColor:
g.add(shapes.Rect(self.x+(s*0.1), self.y+(s*0.4), width=s*0.8, height=s*0.2, fillColor = innerBarColor, strokeColor = innerBarColor, strokeLineCap = 1, strokeWidth = 0))
return g
class NotAllowed(_Symbol):
"""This draws a 'forbidden' roundel (as used in the no-smoking sign).
possible attributes:
'x', 'y', 'size'
"""
_attrMap = AttrMap(BASE=_Symbol,
)
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.strokeColor = colors.red
self.fillColor = colors.white
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
strokeColor = self.strokeColor
# not=allowed specific bits
outerCircle = shapes.Circle(cx = (self.x+(s/2)), cy = (self.y+(s/2)), r = (s/2)-(s/10), fillColor = self.fillColor, strokeColor = strokeColor, strokeWidth=s/10.)
g.add(outerCircle)
centerx=self.x+s
centery=self.y+(s/2)-(s/6)
radius=s-(s/6)
yradius = radius/2
xradius = radius/2
startangledegrees=100
endangledegrees=-80
degreedelta = 90
pointslist = []
a = pointslist.append
from math import sin, cos, pi
degreestoradians = pi/180.0
radiansdelta = degreedelta*degreestoradians
startangle = startangledegrees*degreestoradians
endangle = endangledegrees*degreestoradians
while endangle<startangle:
endangle = endangle+2*pi
angle = startangle
while angle<endangle:
x = centerx + cos(angle)*radius
y = centery + sin(angle)*yradius
a(x); a(y)
angle = angle+radiansdelta
crossbar = shapes.PolyLine(pointslist, fillColor = strokeColor, strokeColor = strokeColor, strokeWidth = s/10.)
g.add(crossbar)
return g
class NoSmoking(NotAllowed):
"""This draws a no-smoking sign.
possible attributes:
'x', 'y', 'size'
"""
def __init__(self):
NotAllowed.__init__(self)
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = NotAllowed.draw(self)
# no-smoking-sign specific bits
newx = self.x+(s/2)-(s/3.5)
newy = self.y+(s/2)-(s/32)
cigarrette1 = shapes.Rect(x = newx, y = newy, width = (s/2), height =(s/16),
fillColor = colors.ghostwhite, strokeColor = colors.gray, strokeWidth=0)
newx=newx+(s/2)+(s/64)
g.insert(-1,cigarrette1)
cigarrette2 = shapes.Rect(x = newx, y = newy, width = (s/80), height =(s/16),
fillColor = colors.orangered, strokeColor = None, strokeWidth=0)
newx= newx+(s/35)
g.insert(-1,cigarrette2)
cigarrette3 = shapes.Rect(x = newx, y = newy, width = (s/80), height =(s/16),
fillColor = colors.orangered, strokeColor = None, strokeWidth=0)
newx= newx+(s/35)
g.insert(-1,cigarrette3)
cigarrette4 = shapes.Rect(x = newx, y = newy, width = (s/80), height =(s/16),
fillColor = colors.orangered, strokeColor = None, strokeWidth=0)
newx= newx+(s/35)
g.insert(-1,cigarrette4)
return g
class DangerSign(_Symbol):
"""This draws a 'danger' sign: a yellow box with a black exclamation point.
possible attributes:
'x', 'y', 'size', 'strokeColor', 'fillColor', 'strokeWidth'
"""
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.strokeColor = colors.black
self.fillColor = colors.gold
self.strokeWidth = self.size*0.125
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
ew = self.strokeWidth
ae = s*0.125 #(ae = 'an eighth')
# danger sign specific bits
ew = self.strokeWidth
ae = s*0.125 #(ae = 'an eighth')
outerTriangle = shapes.Polygon(points = [
self.x, self.y,
self.x+s, self.y,
self.x+(s/2),self.y+s],
fillColor = None,
strokeColor = self.strokeColor,
strokeWidth=0)
g.add(outerTriangle)
innerTriangle = shapes.Polygon(points = [
self.x+(s/50), self.y+(s/75),
(self.x+s)-(s/50), self.y+(s/75),
self.x+(s/2),(self.y+s)-(s/50)],
fillColor = self.fillColor,
strokeColor = None,
strokeWidth=0)
g.add(innerTriangle)
exmark = shapes.Polygon(points=[
((self.x+s/2)-ew/2), self.y+ae*2.5,
((self.x+s/2)+ew/2), self.y+ae*2.5,
((self.x+s/2)+((ew/2))+(ew/6)), self.y+ae*5.5,
((self.x+s/2)-((ew/2))-(ew/6)), self.y+ae*5.5],
fillColor = self.strokeColor,
strokeColor = None)
g.add(exmark)
exdot = shapes.Polygon(points=[
((self.x+s/2)-ew/2), self.y+ae,
((self.x+s/2)+ew/2), self.y+ae,
((self.x+s/2)+ew/2), self.y+ae*2,
((self.x+s/2)-ew/2), self.y+ae*2],
fillColor = self.strokeColor,
strokeColor = None)
g.add(exdot)
return g
class YesNo(_Symbol):
"""This widget draw a tickbox or crossbox depending on 'testValue'.
If this widget is supplied with a 'True' or 1 as a value for
testValue, it will use the tickbox widget. Otherwise, it will
produce a crossbox.
possible attributes:
'x', 'y', 'size', 'tickcolor', 'crosscolor', 'testValue'
"""
_attrMap = AttrMap(BASE=_Symbol,
tickcolor = AttrMapValue(isColor),
crosscolor = AttrMapValue(isColor),
testValue = AttrMapValue(isBoolean),
)
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.tickcolor = colors.green
self.crosscolor = colors.red
self.testValue = 1
def draw(self):
if self.testValue:
yn=Tickbox()
yn.tickColor=self.tickcolor
else:
yn=Crossbox()
yn.crossColor=self.crosscolor
yn.x=self.x
yn.y=self.y
yn.size=self.size
yn.draw()
return yn
def demo(self):
D = shapes.Drawing(200, 100)
yn = YesNo()
yn.x = 15
yn.y = 25
yn.size = 70
yn.testValue = 0
yn.draw()
D.add(yn)
yn2 = YesNo()
yn2.x = 120
yn2.y = 25
yn2.size = 70
yn2.testValue = 1
yn2.draw()
D.add(yn2)
labelFontSize = 8
D.add(shapes.String(yn.x+(yn.size/2),(yn.y-(1.2*labelFontSize)),
'testValue=0', fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
D.add(shapes.String(yn2.x+(yn2.size/2),(yn2.y-(1.2*labelFontSize)),
'testValue=1', fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
labelFontSize = 10
D.add(shapes.String(yn.x+85,(yn.y-20),
self.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
return D
class FloppyDisk(_Symbol):
"""This widget draws an icon of a floppy disk.
possible attributes:
'x', 'y', 'size', 'diskcolor'
"""
_attrMap = AttrMap(BASE=_Symbol,
diskColor = AttrMapValue(isColor),
)
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.diskColor = colors.black
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# floppy disk specific bits
diskBody = shapes.Rect(x=self.x, y=self.y+(s/100), width=s, height=s-(s/100),
fillColor = self.diskColor,
strokeColor = None,
strokeWidth=0)
g.add(diskBody)
label = shapes.Rect(x=self.x+(s*0.1), y=(self.y+s)-(s*0.5), width=s*0.8, height=s*0.48,
fillColor = colors.whitesmoke,
strokeColor = None,
strokeWidth=0)
g.add(label)
labelsplash = shapes.Rect(x=self.x+(s*0.1), y=(self.y+s)-(s*0.1), width=s*0.8, height=s*0.08,
fillColor = colors.royalblue,
strokeColor = None,
strokeWidth=0)
g.add(labelsplash)
line1 = shapes.Line(x1=self.x+(s*0.15), y1=self.y+(0.6*s), x2=self.x+(s*0.85), y2=self.y+(0.6*s),
fillColor = colors.black,
strokeColor = colors.black,
strokeWidth=0)
g.add(line1)
line2 = shapes.Line(x1=self.x+(s*0.15), y1=self.y+(0.7*s), x2=self.x+(s*0.85), y2=self.y+(0.7*s),
fillColor = colors.black,
strokeColor = colors.black,
strokeWidth=0)
g.add(line2)
line3 = shapes.Line(x1=self.x+(s*0.15), y1=self.y+(0.8*s), x2=self.x+(s*0.85), y2=self.y+(0.8*s),
fillColor = colors.black,
strokeColor = colors.black,
strokeWidth=0)
g.add(line3)
metalcover = shapes.Rect(x=self.x+(s*0.2), y=(self.y), width=s*0.5, height=s*0.35,
fillColor = colors.silver,
strokeColor = None,
strokeWidth=0)
g.add(metalcover)
coverslot = shapes.Rect(x=self.x+(s*0.28), y=(self.y)+(s*0.035), width=s*0.12, height=s*0.28,
fillColor = self.diskColor,
strokeColor = None,
strokeWidth=0)
g.add(coverslot)
return g
class ArrowOne(_Symbol):
"""This widget draws an arrow (style one).
possible attributes:
'x', 'y', 'size', 'fillColor'
"""
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.fillColor = colors.red
self.strokeWidth = 0
self.strokeColor = None
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
x = self.x
y = self.y
s2 = s/2
s3 = s/3
s5 = s/5
g.add(shapes.Polygon(points = [
x,y+s3,
x,y+2*s3,
x+s2,y+2*s3,
x+s2,y+4*s5,
x+s,y+s2,
x+s2,y+s5,
x+s2,y+s3,
],
fillColor = self.fillColor,
strokeColor = self.strokeColor,
strokeWidth = self.strokeWidth,
)
)
return g
class ArrowTwo(ArrowOne):
"""This widget draws an arrow (style two).
possible attributes:
'x', 'y', 'size', 'fillColor'
"""
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.fillColor = colors.blue
self.strokeWidth = 0
self.strokeColor = None
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# arrow specific bits
x = self.x
y = self.y
s2 = s/2
s3 = s/3
s5 = s/5
s24 = s/24
g.add(shapes.Polygon(
points = [
x,y+11*s24,
x,y+13*s24,
x+18.75*s24, y+13*s24,
x+2*s3, y+2*s3,
x+s, y+s2,
x+2*s3, y+s3,
x+18.75*s24, y+11*s24,
],
fillColor = self.fillColor,
strokeColor = self.strokeColor,
strokeWidth = self.strokeWidth)
)
return g
def test():
"""This function produces a pdf with examples of all the signs and symbols from this file.
"""
labelFontSize = 10
D = shapes.Drawing(450,650)
cb = Crossbox()
cb.x = 20
cb.y = 530
D.add(cb)
D.add(shapes.String(cb.x+(cb.size/2),(cb.y-(1.2*labelFontSize)),
cb.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
tb = Tickbox()
tb.x = 170
tb.y = 530
D.add(tb)
D.add(shapes.String(tb.x+(tb.size/2),(tb.y-(1.2*labelFontSize)),
tb.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
yn = YesNo()
yn.x = 320
yn.y = 530
D.add(yn)
tempstring = yn.__class__.__name__ + '*'
D.add(shapes.String(yn.x+(tb.size/2),(yn.y-(1.2*labelFontSize)),
tempstring, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
D.add(shapes.String(130,6,
"(The 'YesNo' widget returns a tickbox if testvalue=1, and a crossbox if testvalue=0)", fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize*0.75))
ss = StopSign()
ss.x = 20
ss.y = 400
D.add(ss)
D.add(shapes.String(ss.x+(ss.size/2), ss.y-(1.2*labelFontSize),
ss.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
ne = NoEntry()
ne.x = 170
ne.y = 400
D.add(ne)
D.add(shapes.String(ne.x+(ne.size/2),(ne.y-(1.2*labelFontSize)),
ne.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
sf = SmileyFace()
sf.x = 320
sf.y = 400
D.add(sf)
D.add(shapes.String(sf.x+(sf.size/2),(sf.y-(1.2*labelFontSize)),
sf.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
ds = DangerSign()
ds.x = 20
ds.y = 270
D.add(ds)
D.add(shapes.String(ds.x+(ds.size/2),(ds.y-(1.2*labelFontSize)),
ds.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
na = NotAllowed()
na.x = 170
na.y = 270
D.add(na)
D.add(shapes.String(na.x+(na.size/2),(na.y-(1.2*labelFontSize)),
na.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
ns = NoSmoking()
ns.x = 320
ns.y = 270
D.add(ns)
D.add(shapes.String(ns.x+(ns.size/2),(ns.y-(1.2*labelFontSize)),
ns.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
a1 = ArrowOne()
a1.x = 20
a1.y = 140
D.add(a1)
D.add(shapes.String(a1.x+(a1.size/2),(a1.y-(1.2*labelFontSize)),
a1.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
a2 = ArrowTwo()
a2.x = 170
a2.y = 140
D.add(a2)
D.add(shapes.String(a2.x+(a2.size/2),(a2.y-(1.2*labelFontSize)),
a2.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
fd = FloppyDisk()
fd.x = 320
fd.y = 140
D.add(fd)
D.add(shapes.String(fd.x+(fd.size/2),(fd.y-(1.2*labelFontSize)),
fd.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
renderPDF.drawToFile(D, 'signsandsymbols.pdf', 'signsandsymbols.py')
print('wrote file: signsandsymbols.pdf')
if __name__=='__main__':
test()
| nakagami/reportlab | src/reportlab/graphics/widgets/signsandsymbols.py | Python | bsd-3-clause | 30,281 |
from setuptools import setup , find_namespace_packages
setup(name='PeriPyDIC',
version='0.3',
description='Peridynamics (PD) computations for state-based PD in 1D, 2D for elastic and viscoelastic materials. Also possible to import Digital Image Correlation results and compute PD forces for each pixel as a node.',
author='Patrick Diehl, Rolland Delorme, Ilyass Tabiai',
author_email='patrickdiehl@lsu.edu, rolland.delorme@polymtl.ca, ilyass.tabiai@polymtl.ca',
url='https://github.com/lm2-poly/PeriPyDIC',
keywords='material science, peridynamics, digital image correlation',
license='GPL-3.0',
packages=find_namespace_packages(where="./"),
zip_safe=False)
| lm2-poly/PeriPyDIC | setup.py | Python | gpl-3.0 | 713 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-03 02:32
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('reports', '0020_auto_20170116_1410'),
]
operations = [
migrations.CreateModel(
name='CloudProjectFaculty',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('project_id', models.CharField(blank=True, max_length=32, null=True, unique=True)),
('contact_email', models.CharField(blank=True, max_length=75, null=True)),
('name', models.CharField(blank=True, max_length=64, null=True)),
('faculty_abbreviation', models.CharField(blank=True, max_length=7, null=True)),
],
options={
'db_table': 'cloud_project_faculty',
'managed': False,
},
),
]
| MartinPaulo/ReportsAlpha | reports/migrations/0021_auto_20170503_1232.py | Python | gpl-3.0 | 1,079 |
# -*- coding: utf-8 -*-
"""
docstring goes here.
:copyright: Copyright 2014 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
import unittest
import neo
import numpy as np
from numpy.testing.utils import assert_array_almost_equal, assert_array_equal
import quantities as pq
import elephant.statistics as es
class isi_TestCase(unittest.TestCase):
def setUp(self):
self.test_array_2d = np.array([[0.3, 0.56, 0.87, 1.23],
[0.02, 0.71, 1.82, 8.46],
[0.03, 0.14, 0.15, 0.92]])
self.targ_array_2d_0 = np.array([[-0.28, 0.15, 0.95, 7.23],
[0.01, -0.57, -1.67, -7.54]])
self.targ_array_2d_1 = np.array([[0.26, 0.31, 0.36],
[0.69, 1.11, 6.64],
[0.11, 0.01, 0.77]])
self.targ_array_2d_default = self.targ_array_2d_1
self.test_array_1d = self.test_array_2d[0, :]
self.targ_array_1d = self.targ_array_2d_1[0, :]
def test_isi_with_spiketrain(self):
st = neo.SpikeTrain(self.test_array_1d, units='ms', t_stop=10.0)
target = pq.Quantity(self.targ_array_1d, 'ms')
res = es.isi(st)
assert_array_almost_equal(res, target, decimal=9)
def test_isi_with_quantities_1d(self):
st = pq.Quantity(self.test_array_1d, units='ms')
target = pq.Quantity(self.targ_array_1d, 'ms')
res = es.isi(st)
assert_array_almost_equal(res, target, decimal=9)
def test_isi_with_plain_array_1d(self):
st = self.test_array_1d
target = self.targ_array_1d
res = es.isi(st)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_isi_with_plain_array_2d_default(self):
st = self.test_array_2d
target = self.targ_array_2d_default
res = es.isi(st)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_isi_with_plain_array_2d_0(self):
st = self.test_array_2d
target = self.targ_array_2d_0
res = es.isi(st, axis=0)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_isi_with_plain_array_2d_1(self):
st = self.test_array_2d
target = self.targ_array_2d_1
res = es.isi(st, axis=1)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
class isi_cv_TestCase(unittest.TestCase):
def setUp(self):
self.test_array_regular = np.arange(1, 6)
def test_cv_isi_regular_spiketrain_is_zero(self):
st = neo.SpikeTrain(self.test_array_regular, units='ms', t_stop=10.0)
targ = 0.0
res = es.cv(es.isi(st))
self.assertEqual(res, targ)
def test_cv_isi_regular_array_is_zero(self):
st = self.test_array_regular
targ = 0.0
res = es.cv(es.isi(st))
self.assertEqual(res, targ)
class mean_firing_rate_TestCase(unittest.TestCase):
def setUp(self):
self.test_array_3d = np.ones([5, 7, 13])
self.test_array_2d = np.array([[0.3, 0.56, 0.87, 1.23],
[0.02, 0.71, 1.82, 8.46],
[0.03, 0.14, 0.15, 0.92]])
self.targ_array_2d_0 = np.array([3, 3, 3, 3])
self.targ_array_2d_1 = np.array([4, 4, 4])
self.targ_array_2d_None = 12
self.targ_array_2d_default = self.targ_array_2d_None
self.max_array_2d_0 = np.array([0.3, 0.71, 1.82, 8.46])
self.max_array_2d_1 = np.array([1.23, 8.46, 0.92])
self.max_array_2d_None = 8.46
self.max_array_2d_default = self.max_array_2d_None
self.test_array_1d = self.test_array_2d[0, :]
self.targ_array_1d = self.targ_array_2d_1[0]
self.max_array_1d = self.max_array_2d_1[0]
def test_mean_firing_rate_with_spiketrain(self):
st = neo.SpikeTrain(self.test_array_1d, units='ms', t_stop=10.0)
target = pq.Quantity(self.targ_array_1d/10., '1/ms')
res = es.mean_firing_rate(st)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_spiketrain_set_ends(self):
st = neo.SpikeTrain(self.test_array_1d, units='ms', t_stop=10.0)
target = pq.Quantity(2/0.5, '1/ms')
res = es.mean_firing_rate(st, t_start=0.4, t_stop=0.9)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_quantities_1d(self):
st = pq.Quantity(self.test_array_1d, units='ms')
target = pq.Quantity(self.targ_array_1d/self.max_array_1d, '1/ms')
res = es.mean_firing_rate(st)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_quantities_1d_set_ends(self):
st = pq.Quantity(self.test_array_1d, units='ms')
target = pq.Quantity(2/0.6, '1/ms')
res = es.mean_firing_rate(st, t_start=400*pq.us, t_stop=1.)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_1d(self):
st = self.test_array_1d
target = self.targ_array_1d/self.max_array_1d
res = es.mean_firing_rate(st)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_1d_set_ends(self):
st = self.test_array_1d
target = self.targ_array_1d/(1.23-0.3)
res = es.mean_firing_rate(st, t_start=0.3, t_stop=1.23)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_2d_default(self):
st = self.test_array_2d
target = self.targ_array_2d_default/self.max_array_2d_default
res = es.mean_firing_rate(st)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_2d_0(self):
st = self.test_array_2d
target = self.targ_array_2d_0/self.max_array_2d_0
res = es.mean_firing_rate(st, axis=0)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_2d_1(self):
st = self.test_array_2d
target = self.targ_array_2d_1/self.max_array_2d_1
res = es.mean_firing_rate(st, axis=1)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_3d_None(self):
st = self.test_array_3d
target = np.sum(self.test_array_3d, None)/5.
res = es.mean_firing_rate(st, axis=None, t_stop=5.)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_3d_0(self):
st = self.test_array_3d
target = np.sum(self.test_array_3d, 0)/5.
res = es.mean_firing_rate(st, axis=0, t_stop=5.)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_3d_1(self):
st = self.test_array_3d
target = np.sum(self.test_array_3d, 1)/5.
res = es.mean_firing_rate(st, axis=1, t_stop=5.)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_3d_2(self):
st = self.test_array_3d
target = np.sum(self.test_array_3d, 2)/5.
res = es.mean_firing_rate(st, axis=2, t_stop=5.)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_2d_1_set_ends(self):
st = self.test_array_2d
target = np.array([4, 1, 3])/(1.23-0.14)
res = es.mean_firing_rate(st, axis=1, t_start=0.14, t_stop=1.23)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_2d_None(self):
st = self.test_array_2d
target = self.targ_array_2d_None/self.max_array_2d_None
res = es.mean_firing_rate(st, axis=None)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_and_units_start_stop_typeerror(self):
st = self.test_array_2d
self.assertRaises(TypeError, es.mean_firing_rate, st,
t_start=pq.Quantity(0, 'ms'))
self.assertRaises(TypeError, es.mean_firing_rate, st,
t_stop=pq.Quantity(10, 'ms'))
self.assertRaises(TypeError, es.mean_firing_rate, st,
t_start=pq.Quantity(0, 'ms'),
t_stop=pq.Quantity(10, 'ms'))
self.assertRaises(TypeError, es.mean_firing_rate, st,
t_start=pq.Quantity(0, 'ms'),
t_stop=10.)
self.assertRaises(TypeError, es.mean_firing_rate, st,
t_start=0.,
t_stop=pq.Quantity(10, 'ms'))
class FanoFactorTestCase(unittest.TestCase):
def setUp(self):
np.random.seed(100)
num_st = 300
self.test_spiketrains = []
self.test_array = []
self.test_quantity = []
self.test_list = []
self.sp_counts = np.zeros(num_st)
for i in range(num_st):
r = np.random.rand(np.random.randint(20) + 1)
st = neo.core.SpikeTrain(r * pq.ms,
t_start=0.0 * pq.ms,
t_stop=20.0 * pq.ms)
self.test_spiketrains.append(st)
self.test_array.append(r)
self.test_quantity.append(r * pq.ms)
self.test_list.append(list(r))
# for cross-validation
self.sp_counts[i] = len(st)
def test_fanofactor_spiketrains(self):
# Test with list of spiketrains
self.assertEqual(
np.var(self.sp_counts) / np.mean(self.sp_counts),
es.fanofactor(self.test_spiketrains))
# One spiketrain in list
st = self.test_spiketrains[0]
self.assertEqual(es.fanofactor([st]), 0.0)
def test_fanofactor_empty(self):
# Test with empty list
self.assertTrue(np.isnan(es.fanofactor([])))
self.assertTrue(np.isnan(es.fanofactor([[]])))
# Test with empty quantity
self.assertTrue(np.isnan(es.fanofactor([] * pq.ms)))
# Empty spiketrain
st = neo.core.SpikeTrain([] * pq.ms, t_start=0 * pq.ms,
t_stop=1.5 * pq.ms)
self.assertTrue(np.isnan(es.fanofactor(st)))
def test_fanofactor_spiketrains_same(self):
# Test with same spiketrains in list
sts = [self.test_spiketrains[0]] * 3
self.assertEqual(es.fanofactor(sts), 0.0)
def test_fanofactor_array(self):
self.assertEqual(es.fanofactor(self.test_array),
np.var(self.sp_counts) / np.mean(self.sp_counts))
def test_fanofactor_array_same(self):
lst = [self.test_array[0]] * 3
self.assertEqual(es.fanofactor(lst), 0.0)
def test_fanofactor_quantity(self):
self.assertEqual(es.fanofactor(self.test_quantity),
np.var(self.sp_counts) / np.mean(self.sp_counts))
def test_fanofactor_quantity_same(self):
lst = [self.test_quantity[0]] * 3
self.assertEqual(es.fanofactor(lst), 0.0)
def test_fanofactor_list(self):
self.assertEqual(es.fanofactor(self.test_list),
np.var(self.sp_counts) / np.mean(self.sp_counts))
def test_fanofactor_list_same(self):
lst = [self.test_list[0]] * 3
self.assertEqual(es.fanofactor(lst), 0.0)
class LVTestCase(unittest.TestCase):
def setUp(self):
self.test_seq = [1, 28, 4, 47, 5, 16, 2, 5, 21, 12,
4, 12, 59, 2, 4, 18, 33, 25, 2, 34,
4, 1, 1, 14, 8, 1, 10, 1, 8, 20,
5, 1, 6, 5, 12, 2, 8, 8, 2, 8,
2, 10, 2, 1, 1, 2, 15, 3, 20, 6,
11, 6, 18, 2, 5, 17, 4, 3, 13, 6,
1, 18, 1, 16, 12, 2, 52, 2, 5, 7,
6, 25, 6, 5, 3, 15, 4, 3, 16, 3,
6, 5, 24, 21, 3, 3, 4, 8, 4, 11,
5, 7, 5, 6, 8, 11, 33, 10, 7, 4]
self.target = 0.971826029994
def test_lv_with_quantities(self):
seq = pq.Quantity(self.test_seq, units='ms')
assert_array_almost_equal(es.lv(seq), self.target, decimal=9)
def test_lv_with_plain_array(self):
seq = np.array(self.test_seq)
assert_array_almost_equal(es.lv(seq), self.target, decimal=9)
def test_lv_with_list(self):
seq = self.test_seq
assert_array_almost_equal(es.lv(seq), self.target, decimal=9)
def test_lv_raise_error(self):
seq = self.test_seq
self.assertRaises(AttributeError, es.lv, [])
self.assertRaises(AttributeError, es.lv, 1)
self.assertRaises(ValueError, es.lv, np.array([seq, seq]))
class RateEstimationTestCase(unittest.TestCase):
def setUp(self):
# create a poisson spike train:
self.st_tr = (0, 20.0) # seconds
self.st_dur = self.st_tr[1] - self.st_tr[0] # seconds
self.st_margin = 5.0 # seconds
self.st_rate = 10.0 # Hertz
st_num_spikes = np.random.poisson(self.st_rate*(self.st_dur-2*self.st_margin))
spike_train = np.random.rand(st_num_spikes) * (self.st_dur-2*self.st_margin) + self.st_margin
spike_train.sort()
# convert spike train into neo objects
self.spike_train = neo.SpikeTrain(spike_train*pq.s,
t_start=self.st_tr[0]*pq.s,
t_stop=self.st_tr[1]*pq.s)
def test_instantaneous_rate(self):
st = self.spike_train
sampling_period = 0.01*pq.s
inst_rate = es.instantaneous_rate(
st, sampling_period, 'TRI', 0.03*pq.s)
self.assertIsInstance(inst_rate, neo.core.AnalogSignalArray)
self.assertEquals(
inst_rate.sampling_period.simplified, sampling_period.simplified)
self.assertEquals(inst_rate.simplified.units, pq.Hz)
self.assertEquals(inst_rate.t_stop.simplified, st.t_stop.simplified)
self.assertEquals(inst_rate.t_start.simplified, st.t_start.simplified)
def test_error_instantaneous_rate(self):
self.assertRaises(
AttributeError, es.instantaneous_rate,
spiketrain=[1,2,3]*pq.s, sampling_period=0.01*pq.ms, form='TRI',
sigma=0.03*pq.s)
self.assertRaises(
AttributeError, es.instantaneous_rate, spiketrain=[1,2,3],
sampling_period=0.01*pq.ms, form='TRI', sigma=0.03*pq.s)
st = self.spike_train
self.assertRaises(
AttributeError, es.instantaneous_rate, spiketrain=st,
sampling_period=0.01, form='TRI', sigma=0.03*pq.s)
self.assertRaises(
ValueError, es.instantaneous_rate, spiketrain=st,
sampling_period=-0.01*pq.ms, form='TRI', sigma=0.03*pq.s)
self.assertRaises(
AssertionError, es.instantaneous_rate, spiketrain=st,
sampling_period=0.01*pq.ms, form='NONE', sigma=0.03*pq.s)
self.assertRaises(
AttributeError, es.instantaneous_rate, spiketrain=st,
sampling_period=0.01*pq.ms, form='TRI', sigma=0.03)
self.assertRaises(
ValueError, es.instantaneous_rate, spiketrain=st,
sampling_period=0.01*pq.ms, form='TRI', sigma=-0.03*pq.s)
def test_re_consistency(self):
"""
test, whether the integral of the rate estimation curve is (almost)
equal to the number of spikes of the spike train.
"""
shapes = ['GAU', 'gaussian', 'TRI', 'triangle', 'BOX', 'boxcar',
'EPA', 'epanechnikov', 'ALP', 'alpha', 'EXP', 'exponential']
kernel_resolution = 0.01*pq.s
for shape in shapes:
rate_estimate0 = es.instantaneous_rate(self.spike_train, form=shape,
sampling_period=kernel_resolution,
sigma=0.5*pq.s,
m_idx=None)
rate_estimate1 = es.instantaneous_rate(self.spike_train, form=shape,
sampling_period=kernel_resolution,
sigma=0.5*pq.s,
t_start=self.st_tr[0]*pq.s,
t_stop=self.st_tr[1]*pq.s,
trim=False,
acausal=False)
rate_estimate2 = es.instantaneous_rate(self.spike_train, form=shape,
sampling_period=kernel_resolution,
sigma=0.5*pq.s,
t_start=self.st_tr[0]*pq.s,
t_stop=self.st_tr[1]*pq.s,
trim=True,
acausal=True)
rate_estimate3 = es.instantaneous_rate(self.spike_train, form=shape,
sampling_period=kernel_resolution,
sigma=0.5*pq.s,
t_start=self.st_tr[0]*pq.s,
t_stop=self.st_tr[1]*pq.s,
trim=True,
acausal=False)
rate_estimate4 = es.instantaneous_rate(self.spike_train, form=shape,
sampling_period=kernel_resolution,
sigma=0.5*pq.s,
t_start=self.st_tr[0]*pq.s,
t_stop=self.st_tr[1]*pq.s,
trim=False,
acausal=True)
kernel = es.make_kernel(form=shape, sampling_period=kernel_resolution,
sigma=0.5*pq.s, direction=-1)
### test consistency
rate_estimate_list = [rate_estimate0, rate_estimate1,
rate_estimate2, rate_estimate3,
rate_estimate4]
for rate_estimate in rate_estimate_list:
num_spikes = len(self.spike_train)
# re_diff = np.diff(rate_estimate)
re_diff = rate_estimate.magnitude[1:]-rate_estimate.magnitude[:-1]
re_fixed = (rate_estimate.magnitude[:-1] + re_diff)[:,0]
re_times_diff = np.diff(rate_estimate.times.rescale('s'))
integral = 0
for i, rate in enumerate(re_fixed):
integral += rate*re_times_diff.magnitude[i]
integral = integral
self.assertAlmostEqual(num_spikes, integral)
class TimeHistogramTestCase(unittest.TestCase):
def setUp(self):
self.spiketrain_a = neo.SpikeTrain(
[0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] * pq.s, t_stop=10.0 * pq.s)
self.spiketrain_b = neo.SpikeTrain(
[0.1, 0.7, 1.2, 2.2, 4.3, 5.5, 8.0] * pq.s, t_stop=10.0 * pq.s)
self.spiketrains = [self.spiketrain_a, self.spiketrain_b]
def tearDown(self):
del self.spiketrain_a
self.spiketrain_a = None
del self.spiketrain_b
self.spiketrain_b = None
def test_time_histogram(self):
targ = np.array([4, 2, 1, 1, 2, 2, 1, 0, 1, 0])
histogram = es.time_histogram(self.spiketrains, binsize=pq.s)
assert_array_equal(targ, histogram[:, 0].magnitude)
def test_time_histogram_binary(self):
targ = np.array([2, 2, 1, 1, 2, 2, 1, 0, 1, 0])
histogram = es.time_histogram(self.spiketrains, binsize=pq.s,
binary=True)
assert_array_equal(targ, histogram[:, 0].magnitude)
def test_time_histogram_tstart_tstop(self):
# Start, stop short range
targ = np.array([2, 1])
histogram = es.time_histogram(self.spiketrains, binsize=pq.s,
t_start=5 * pq.s, t_stop=7 * pq.s)
assert_array_equal(targ, histogram[:, 0].magnitude)
# Test without t_stop
targ = np.array([4, 2, 1, 1, 2, 2, 1, 0, 1, 0])
histogram = es.time_histogram(self.spiketrains, binsize=1 * pq.s,
t_start=0 * pq.s)
assert_array_equal(targ, histogram[:, 0].magnitude)
# Test without t_start
histogram = es.time_histogram(self.spiketrains, binsize=1 * pq.s,
t_stop=10 * pq.s)
assert_array_equal(targ, histogram[:, 0].magnitude)
def test_time_histogram_output(self):
# Normalization mean
histogram = es.time_histogram(self.spiketrains, binsize=pq.s,
output='mean')
targ = np.array([4, 2, 1, 1, 2, 2, 1, 0, 1, 0], dtype=float) / 2
assert_array_equal(targ.reshape(targ.size, 1), histogram.magnitude)
# Normalization rate
histogram = es.time_histogram(self.spiketrains, binsize=pq.s,
output='rate')
assert_array_equal(histogram.view(pq.Quantity),
targ.reshape(targ.size, 1) * 1 / pq.s)
# Normalization unspecified, raises error
self.assertRaises(ValueError, es.time_histogram, self.spiketrains,
binsize=pq.s, output=' ')
if __name__ == '__main__':
unittest.main()
| mczerwinski/elephant | elephant/test/test_statistics.py | Python | bsd-3-clause | 22,605 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'WeekNumber'
db.create_table(u'mothers_calendar_weeknumber', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('week_number', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal(u'mothers_calendar', ['WeekNumber'])
# Adding model 'Message'
db.create_table(u'mothers_calendar_message', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('message_text', self.gf('django.db.models.fields.CharField')(max_length=120)),
('week', self.gf('django.db.models.fields.related.ForeignKey')(related_name='week_in_cycle', to=orm['mothers_calendar.WeekNumber'])),
))
db.send_create_signal(u'mothers_calendar', ['Message'])
# Adding model 'QuestionType'
db.create_table(u'mothers_calendar_questiontype', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('q_type', self.gf('django.db.models.fields.CharField')(max_length=120)),
))
db.send_create_signal(u'mothers_calendar', ['QuestionType'])
# Adding model 'Question'
db.create_table(u'mothers_calendar_question', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('question_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['mothers_calendar.QuestionType'])),
('week', self.gf('django.db.models.fields.related.ForeignKey')(related_name='weeks_question', to=orm['mothers_calendar.WeekNumber'])),
('question_text', self.gf('django.db.models.fields.CharField')(max_length=120)),
))
db.send_create_signal(u'mothers_calendar', ['Question'])
def backwards(self, orm):
# Deleting model 'WeekNumber'
db.delete_table(u'mothers_calendar_weeknumber')
# Deleting model 'Message'
db.delete_table(u'mothers_calendar_message')
# Deleting model 'QuestionType'
db.delete_table(u'mothers_calendar_questiontype')
# Deleting model 'Question'
db.delete_table(u'mothers_calendar_question')
models = {
u'mothers_calendar.message': {
'Meta': {'object_name': 'Message'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message_text': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'week': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'week_in_cycle'", 'to': u"orm['mothers_calendar.WeekNumber']"})
},
u'mothers_calendar.question': {
'Meta': {'object_name': 'Question'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question_text': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'question_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mothers_calendar.QuestionType']"}),
'week': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'weeks_question'", 'to': u"orm['mothers_calendar.WeekNumber']"})
},
u'mothers_calendar.questiontype': {
'Meta': {'object_name': 'QuestionType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'q_type': ('django.db.models.fields.CharField', [], {'max_length': '120'})
},
u'mothers_calendar.weeknumber': {
'Meta': {'object_name': 'WeekNumber'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'week_number': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['mothers_calendar'] | muranga/ataps | ataps/apps/mothers_calendar/migrations/0001_initial.py | Python | unlicense | 4,004 |
import shutil
import time
import os
import string
import random
import sys
sys.path.append('../')
from constants import PLAYER_X, PLAYER_Y, DANCE, UP, REVERSE
CENTER = 'center'
LEFT = 'left'
RIGHT = 'right'
class Box():
def __init__(self, width, height, border='*', empty=' '):
self.width = width
self.height = height
self.empty = empty
self.border = border
self.clean()
def clean(self):
self.matrix = [[self.empty for _ in range(self.width)] for _ in range(self.height)]
self.matrix[0] = [self.border for _ in range(self.width)]
self.matrix[-1] = [self.border for _ in range(self.width)]
for row in range(self.height):
self.matrix[row][0] = self.border
self.matrix[row][-1] = self.border
def random(self):
printables = string.printable[:-5]
for row in range(self.height):
for col in range(self.width):
self.matrix[row][col] = printables[random.randint(0, len(printables)-1)]
def get_center(self):
return int(self.width/2), int(self.height/2)
def put_text(self, col, row, text, align=CENTER):
text_len = len(text)
if align == CENTER:
col -= int(text_len/2)
elif align == RIGHT:
col -= text_len
if col < 0:
raise ValueError("Col out of range %r" % col)
for index in range(text_len):
self.matrix[row][col + index] = text[index]
def put_box(self, col, row, box, align=CENTER):
if align == CENTER:
col -= int(box.width/2)
row -= int(box.height/2)
elif align == RIGHT:
col -= box.width
row -= box.height
if col < 0:
raise ValueError("Col out of range %r" % col)
if row < 0:
raise ValueError("Row out of range %r" % row)
for col_index in range(box.width):
for row_index in range(box.height):
self.matrix[row + row_index][col + col_index] = box.matrix[row_index][col_index]
class DebugBox(Box):
def __init__(self, width, height):
super(DebugBox, self).__init__(width, height, ' ')
def update(self, debug_lines):
self.clean()
for i, line in enumerate(debug_lines):
self.put_text(0, i, line, align=LEFT)
class PlayerScoreBoard(Box):
def __init__(self, width, height, player_name):
super(PlayerScoreBoard, self).__init__(width, height, '%')
self.player_name = player_name
self.update(0, 0, 0)
def update(self, score, nerding, drunkness):
self.clean()
col, row = self.get_center()
self.put_text(col, 3, self.player_name, align=CENTER)
self.put_text(col, 5, 'Score: ' + str(score), align=CENTER)
self.put_text(col, 6, 'Drunknes: ' + str(drunkness), align=CENTER)
self.put_text(col, 7, 'Nerdnes: ' + str(nerding), align=CENTER)
class Window():
def __init__(self):
self.max_cols, self.max_rows = shutil.get_terminal_size()
self.max_rows -= 2
self.max_cols -= 2
if self.max_cols < 50 or self.max_rows < 30:
raise ValueError("Console too small {}x{}".format(self.max_rows, self.max_cols))
self.frame = Box(self.max_cols, self.max_rows, border=' ')
def clean_screen(self):
self.frame.clean()
self.update()
def welcome_screen(self, title="Choppy by PyCamp 2017"):
self.frame.random()
text_box = Box(len(title) + 4, 5, border=' ')
text_box.clean()
col, row = text_box.get_center()
text_box.put_text(col, row, title, align=CENTER)
col, row = self.frame.get_center()
self.frame.put_box(col, row, text_box)
self.update()
def game_over_screen(self, text, winer):
self.frame.clean()
text_box = Box(35, 7, border='$')
text_box.clean()
col, row = text_box.get_center()
text_box.put_text(col, row - 1, text, align=CENTER)
text_box.put_text(col, row + 1, 'THE WINER ' + winer, align=CENTER)
col, row = self.frame.get_center()
self.frame.put_box(col, row, text_box)
self.update()
def draw_frame(self):
for row in self.frame.matrix:
line = ''
for tile in row:
line += tile
print(line)
def update(self):
os.system('clear')
self.draw_frame()
class MapVisualizer():
def __init__(self, map_matrix, welcome_screen=True, fps=3, dance_frames=4):
self.fps = fps
self.dance_frames = dance_frames
self.window = Window()
if welcome_screen:
self.window.welcome_screen()
time.sleep(1.5)
self.window.clean_screen()
self.window.update()
self.map = Box(len(map_matrix[0]), len(map_matrix), ' ')
self.map.matrix = map_matrix
score_width = 20
score_height = 10
total_width = 2 + score_width + 2 + self.map.width
if (score_height*2 > self.map.height):
total_height = 4 + score_height*2
else:
total_height = 4 + self.map.height
self.debugbox = DebugBox(total_width, total_height)
self.playerX = PlayerScoreBoard(score_width, score_height, 'Player_X')
self.playerY = PlayerScoreBoard(score_width, score_height, 'Player_Y')
col, row = self.window.frame.get_center()
self.sub_boxes = [{'col': 0, 'row': 0, 'box': self.debugbox},
{'col': col - int(total_width/2), 'row': row - int(total_height/2), 'box': self.playerX},
{'col': col - int(total_width/2), 'row': row, 'box': self.playerY},
{'col': col, 'row': row - int(total_height/2), 'box': self.map}]
def update_game(self):
for sub_box in self.sub_boxes:
self.window.frame.put_box(sub_box['col'], sub_box['row'], sub_box['box'], align=LEFT)
self.window.update()
def game_over(self, scores):
text = 'PLAYER_X: ' + str(scores[PLAYER_X]) + ' / '
text += 'PLAYER_Y: ' + str(scores[PLAYER_Y])
if scores[PLAYER_X] == scores[PLAYER_Y]:
winer = 'Tide'
elif scores[PLAYER_X] > scores[PLAYER_Y]:
winer = 'PLAYER_X'
else:
winer = 'PLAYER_Y'
self.window.game_over_screen(text, winer)
def draw(self, map_matrix, actions, scores, nerding, drunkness, debug_lines=None):
if debug_lines is not None:
self.debugbox.update(debug_lines)
if scores is not None:
self.playerX.update(scores[PLAYER_X], nerding[PLAYER_X], drunkness[PLAYER_X])
self.playerY.update(scores[PLAYER_Y], nerding[PLAYER_Y], drunkness[PLAYER_Y])
if actions is not None:
if actions[PLAYER_X] == DANCE or actions[PLAYER_Y] == DANCE:
for cycle in range(self.dance_frames):
for row_index, row in enumerate(map_matrix):
try:
xcol_index = row.index(PLAYER_X)
xrow_index = row_index
except ValueError:
pass
try:
ycol_index = row.index(PLAYER_Y)
yrow_index = row_index
except ValueError:
pass
if actions[PLAYER_X] == DANCE:
map_matrix[xrow_index][xcol_index] = PLAYER_X if cycle % 2 else '{0}{1}'.format(REVERSE, PLAYER_X)
if actions[PLAYER_Y] == DANCE:
map_matrix[yrow_index][ycol_index] = PLAYER_Y if cycle % 2 else '{0}{1}'.format(REVERSE, PLAYER_Y)
self.map.matrix = map_matrix
self.update_game()
time.sleep(1 / self.fps)
self.map.matrix = map_matrix
self.update_game()
time.sleep(1 / self.fps)
from game import map_generator
if __name__ == '__main__':
example_map = map_generator.generate(40, 40, 0.01, 0.01, 0.05)
map_visualizer = MapVisualizer(example_map, welcome_screen=True)
map_visualizer.draw(example_map, None, {PLAYER_X: 60, PLAYER_Y: 1})
for i in range(10):
map_visualizer.draw(example_map, None, {PLAYER_X: i, PLAYER_Y: i*2})
time.sleep(1)
| fisadev/choppycamp | game/visualizer.py | Python | mit | 8,460 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-24 20:39
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('node', '0003_node_is_paused_errors'),
]
operations = [
migrations.RenameField(
model_name='node',
old_name='archived',
new_name='deprecated',
),
]
| ngageoint/scale | scale/node/migrations/0004_auto_20170524_1639.py | Python | apache-2.0 | 430 |
import pytest
import cv2
from plantcv.plantcv import auto_crop
@pytest.mark.parametrize('padx,pady,expected', [[20, 20, (98, 56, 4)], [(400, 400), (400, 400), (58, 16, 4)]])
def test_auto_crop(padx, pady, expected, test_data):
"""Test for PlantCV."""
# Read in test data
img = cv2.imread(test_data.small_rgb_img, -1)
contour = test_data.load_composed_contours(test_data.small_composed_contours_file)
cropped = auto_crop(img=img, obj=contour, padding_x=padx, padding_y=pady, color='image')
assert cropped.shape == expected
@pytest.mark.parametrize("color", ["black", "white", "image"])
def test_auto_crop_grayscale(color, test_data):
"""Test for PlantCV."""
# Read in test data
gray_img = cv2.imread(test_data.small_gray_img, -1)
contour = test_data.load_composed_contours(test_data.small_composed_contours_file)
cropped = auto_crop(img=gray_img, obj=contour, padding_x=20, padding_y=20, color=color)
assert cropped.shape == (98, 56)
def test_auto_crop_bad_color_input(test_data):
"""Test for PlantCV."""
# Read in test data
gray_img = cv2.imread(test_data.small_gray_img, -1)
contour = test_data.load_composed_contours(test_data.small_composed_contours_file)
with pytest.raises(RuntimeError):
_ = auto_crop(img=gray_img, obj=contour, padding_x=20, padding_y=20, color='wite')
def test_auto_crop_bad_padding_input(test_data):
"""Test for PlantCV."""
# Read in test data
gray_img = cv2.imread(test_data.small_gray_img, -1)
contour = test_data.load_composed_contours(test_data.small_composed_contours_file)
with pytest.raises(RuntimeError):
_ = auto_crop(img=gray_img, obj=contour, padding_x="one", padding_y=20, color='white')
| danforthcenter/plantcv | tests/plantcv/test_auto_crop.py | Python | mit | 1,737 |
'''Holds Settings'''
import logging
import json
from WeatherType import WeatherType
import os
FILELOC = 'json/whet_settings.json'
class Settings(object):
"""Class to hold settings"""
# pylint: disable=too-many-instance-attributes
logger = logging.getLogger('__main__')
last_modified_time = 0
def __init__(self):
try:
self.read_file()
self.weather = 'normal' #always start with normal weather
self.last_modified_time = os.stat(FILELOC).st_mtime
self.dump_file()
except IOError:
self.logger.info("No settings file found...using defaults and creating file ")
self.weather = "normal" # todo make this the enum
self.catchup_on = True
self.catchup_steps = 255
self.catchup_time = 5
self.clouds_random_on = False
self.clouds_random_start_time = 13
self.clouds_random_end_time = 15
self.clouds_random_freq = 20
self.clouds_dim_percent = .20
self.clouds_dim_resolution = 255
self.clouds_dim_speed = .05
self.storms_random_on = True
self.storms_random_start_time = 21
self.storms_random_end_time = 0
self.storms_random_freq = 1000
self.sound_on = True
self.preview_timeout = 600
self.pushbullet = ''
self.outlet_run = False
self.dump_file()
self.last_modified_time = os.stat(FILELOC).st_mtime
def dump_file(self):
'''dumps json representation of obj to disk'''
with open(FILELOC, 'w') as data_file:
string = '{"settings":'
string += json.dumps(
self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
string += '}'
data_file.write(string)
def read_file(self):
'''reads file from disk'''
if self.last_modified_time != os.stat(FILELOC).st_mtime:
with open(FILELOC) as data_file:
self.__dict__ = json.load(data_file)["settings"]
| mike-gracia/whet | Settings.py | Python | mit | 2,114 |
import os
import shutil
import pytest
DATA_PATH = os.path.join(os.path.dirname(__file__), 'data')
@pytest.fixture
def testing_gallery(tmpdir):
"""Testing gallery with two albums:
- testing-album
- album-incomplete (without album metadata in album.ini)
Both galleryes contain four photos: Photo{1..4}.jpg
"""
shutil.copy(os.path.join(DATA_PATH, 'gallery.ini'), str(tmpdir))
_create_album(tmpdir, 'testing-album', 'album.ini')
_create_album(tmpdir, 'incomplete-album', 'album-incomplete.ini')
return tmpdir
def _create_album(gallery_dir, album_name, ini_name):
photos = ['Photo1.jpg', 'Photo2.jpg', 'Photo3.jpg', 'Photo4.jpg']
album_path = gallery_dir.join(album_name)
album_path.mkdir()
shutil.copy(os.path.join(DATA_PATH, ini_name),
str(album_path.join('album.ini')))
for photo in photos:
shutil.copy(os.path.join(DATA_PATH, 'photo.jpg'),
str(album_path.join(photo)))
| sergejx/kaleidoscope | tests/conftest.py | Python | bsd-3-clause | 975 |
from django.contrib import admin
from .models import SimpleModel
admin.site.register(SimpleModel, admin.ModelAdmin)
| gavinwahl/django-optimistic-lock | tests/tests/admin.py | Python | bsd-2-clause | 118 |
from google.appengine.ext import db
class BudgetGroup():
INCOME = "Income"
COMMITED = "Committed"
IRREGULAR = "Irregular"
FUN = "Fun"
# RETIREMENT = "Retirement"
SAVINGS = "Savings"
UNGROUPED = "Ungrouped"
IGNORED = "Ignored"
@staticmethod
def getAllGroups():
return [BudgetGroup.INCOME, BudgetGroup.COMMITED, BudgetGroup.IRREGULAR, BudgetGroup.FUN,
BudgetGroup.SAVINGS, BudgetGroup.UNGROUPED, BudgetGroup.IGNORED]
class CategoryMapping(db.Model):
transactionCategory = db.StringProperty()
budgetGroup = db.StringProperty()
class UserBudgetPreferences(db.Model):
userid = db.StringProperty()
skipRetirement = db.BooleanProperty()
class CategoryMapManager():
def __init__(self, user, allCategories = None):
self.user = user
self.categories = allCategories
self.mapping = {}
def clearMappings(self):
q = CategoryMapping.all()
parentKey = db.Key.from_path('User', self.user.user_id())
q.ancestor(parentKey)
for m in q.run():
m.delete()
def getMapping(self):
if (len(self.mapping) == 0):
#q = db.GqlQuery("SELECT * FROM CategoryMapping " +
# "WHERE userid = :1 ",
# self.user.user_id())
q = CategoryMapping.all()
parentKey = db.Key.from_path('User', self.user.user_id())
q.ancestor(parentKey)
for m in q.run():
self.mapping[m.transactionCategory] = m.budgetGroup
if self.categories is not None:
for c in self.categories:
if (c not in self.mapping):
self.mapping[c] = BudgetGroup.UNGROUPED
return self.mapping
def getCategories(self, budgetGroup):
ret = []
mapping = self.getMapping()
for cat, grp in mapping.items():
if grp==budgetGroup:
ret.append(cat)
return ret
@staticmethod
def getCategoriesFromTransactions(transactions):
categories = set()
for t in transactions:
categories.add(t.transactionCategory)
return categories
def setMapping(self, transactionCategory, budgetGroup):
self.mapping[transactionCategory] = budgetGroup
def writeMappings(self):
for c, g in self.mapping.items():
key = db.Key.from_path('User', self.user.user_id(), 'CategoryMapping', c)
cm = CategoryMapping(key=key, transactionCategory=c, budgetGroup=g)
cm.put()
class BudgetResult():
def __init__(self, categoryMap, userBudgetPreferences):
self.cmap = categoryMap
self.prefs = userBudgetPreferences
self.groupCategoryMap = {}
self.categoryTransactions = {}
for c,g in self.cmap.items():
if c not in self.categoryTransactions:
self.categoryTransactions[c] = []
if (g not in self.groupCategoryMap):
self.groupCategoryMap[g] = set()
self.groupCategoryMap[g].add(c)
def addTransaction(self, t):
transactions = self.categoryTransactions[t.transactionCategory]
transactions.append(t)
def getCategoryTotal(self, category):
transactions = self.categoryTransactions[category]
ret = 0.0
for t in transactions:
ret += t.getAmount()
return ret
def getGroups(self):
# use this to effect a fixed display order
allGroups = BudgetGroup.getAllGroups()
actualGroups = self.groupCategoryMap.keys()
groups = filter(lambda x: x in actualGroups and x != BudgetGroup.IGNORED, allGroups)
return groups
def getCategoriesInGroup(self, group):
return self.groupCategoryMap[group]
def getGroupTotal(self, group):
ret = 0.0
for c in self.getCategoriesInGroup(group):
ret += self.getCategoryTotal(c)
return ret
def calculate(transactions, categoryMap, userBudgetPreferences):
result = BudgetResult(categoryMap, userBudgetPreferences)
for t in transactions:
result.addTransaction(t)
return result
| gregmli/Spendalyzer | budget.py | Python | mit | 4,648 |
# Copyright (c) 2010 Witchspace <witchspace81@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
bitcoin-python3 - Easy-to-use Bitcoin API client
"""
def connect_to_local(filename=None):
"""
Connect to default bitcoin instance owned by this user, on this machine.
Returns a :class:`~bitcoinrpc.connection.BitcoinConnection` object.
Arguments:
- `filename`: Path to a configuration file in a non-standard location (optional)
"""
from bitcoinrpc.connection import BitcoinConnection
from bitcoinrpc.config import read_default_config
cfg = read_default_config(filename)
if cfg is None:
cfg = {}
port = int(cfg.get('rpcport', '18332' if cfg.get('testnet') else '8332'))
rpcuser = cfg.get('rpcuser', '')
rpcpassword = cfg.get('rpcpassword', '')
return BitcoinConnection(rpcuser, rpcpassword, 'localhost', port)
def connect_to_remote(user, password, host='localhost', port=8332,
use_https=False):
"""
Connect to remote or alternative local bitcoin client instance.
Returns a :class:`~bitcoinrpc.connection.BitcoinConnection` object.
"""
from bitcoinrpc.connection import BitcoinConnection
return BitcoinConnection(user, password, host, port, use_https)
| XertroV/bitcoin-python3 | src/bitcoinrpc/__init__.py | Python | mit | 2,292 |
# -*- coding: utf-8 -*-
from ccxt.base.exchange import Exchange
import base64
import hashlib
from ccxt.base.errors import ExchangeError
class btcturk (Exchange):
def describe(self):
return self.deep_extend(super(btcturk, self).describe(), {
'id': 'btcturk',
'name': 'BTCTurk',
'countries': 'TR', # Turkey
'rateLimit': 1000,
'hasCORS': True,
'hasFetchTickers': True,
'hasFetchOHLCV': True,
'timeframes': {
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27992709-18e15646-64a3-11e7-9fa2-b0950ec7712f.jpg',
'api': 'https://www.btcturk.com/api',
'www': 'https://www.btcturk.com',
'doc': 'https://github.com/BTCTrader/broker-api-docs',
},
'api': {
'public': {
'get': [
'ohlcdata', # ?last=COUNT
'orderbook',
'ticker',
'trades', # ?last=COUNT(max 50)
],
},
'private': {
'get': [
'balance',
'openOrders',
'userTransactions', # ?offset=0&limit=25&sort=asc
],
'post': [
'buy',
'cancelOrder',
'sell',
],
},
},
'markets': {
'BTC/TRY': {'id': 'BTCTRY', 'symbol': 'BTC/TRY', 'base': 'BTC', 'quote': 'TRY', 'maker': 0.002 * 1.18, 'taker': 0.0035 * 1.18},
'ETH/TRY': {'id': 'ETHTRY', 'symbol': 'ETH/TRY', 'base': 'ETH', 'quote': 'TRY', 'maker': 0.002 * 1.18, 'taker': 0.0035 * 1.18},
'ETH/BTC': {'id': 'ETHBTC', 'symbol': 'ETH/BTC', 'base': 'ETH', 'quote': 'BTC', 'maker': 0.002 * 1.18, 'taker': 0.0035 * 1.18},
},
})
def fetch_balance(self, params={}):
response = self.privateGetBalance()
result = {'info': response}
base = {
'free': response['bitcoin_available'],
'used': response['bitcoin_reserved'],
'total': response['bitcoin_balance'],
}
quote = {
'free': response['money_available'],
'used': response['money_reserved'],
'total': response['money_balance'],
}
symbol = self.symbols[0]
market = self.markets[symbol]
result[market['base']] = base
result[market['quote']] = quote
return self.parse_balance(result)
def fetch_order_book(self, symbol, params={}):
market = self.market(symbol)
orderbook = self.publicGetOrderbook(self.extend({
'pairSymbol': market['id'],
}, params))
timestamp = int(orderbook['timestamp'] * 1000)
return self.parse_order_book(orderbook, timestamp)
def parse_ticker(self, ticker, market=None):
symbol = None
if market:
symbol = market['symbol']
timestamp = int(ticker['timestamp']) * 1000
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high']),
'low': float(ticker['low']),
'bid': float(ticker['bid']),
'ask': float(ticker['ask']),
'vwap': None,
'open': float(ticker['open']),
'close': None,
'first': None,
'last': float(ticker['last']),
'change': None,
'percentage': None,
'average': float(ticker['average']),
'baseVolume': float(ticker['volume']),
'quoteVolume': None,
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
tickers = self.publicGetTicker(params)
result = {}
for i in range(0, len(tickers)):
ticker = tickers[i]
symbol = ticker['pair']
market = None
if symbol in self.markets_by_id:
market = self.markets_by_id[symbol]
symbol = market['symbol']
result[symbol] = self.parse_ticker(ticker, market)
return result
def fetch_ticker(self, symbol, params={}):
self.load_markets()
tickers = self.fetch_tickers()
result = None
if symbol in tickers:
result = tickers[symbol]
return result
def parse_trade(self, trade, market):
timestamp = trade['date'] * 1000
return {
'id': trade['tid'],
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': None,
'price': trade['price'],
'amount': trade['amount'],
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
market = self.market(symbol)
# maxCount = 50
response = self.publicGetTrades(self.extend({
'pairSymbol': market['id'],
}, params))
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1d', since=None, limit=None):
timestamp = self.parse8601(ohlcv['Time'])
return [
timestamp,
ohlcv['Open'],
ohlcv['High'],
ohlcv['Low'],
ohlcv['Close'],
ohlcv['Volume'],
]
def fetch_ohlcv(self, symbol, timeframe='1d', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {}
if limit:
request['last'] = limit
response = self.publicGetOhlcdata(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
method = 'privatePost' + self.capitalize(side)
order = {
'Type': 'BuyBtc' if (side == 'buy') else 'SelBtc',
'IsMarketOrder': 1 if (type == 'market') else 0,
}
if type == 'market':
if side == 'buy':
order['Total'] = amount
else:
order['Amount'] = amount
else:
order['Price'] = price
order['Amount'] = amount
response = getattr(self, method)(self.extend(order, params))
return {
'info': response,
'id': response['id'],
}
def cancel_order(self, id, symbol=None, params={}):
return self.privatePostCancelOrder({'id': id})
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
if self.id == 'btctrader':
raise ExchangeError(self.id + ' is an abstract base API for BTCExchange, BTCTurk')
url = self.urls['api'] + '/' + path
if api == 'public':
if params:
url += '?' + self.urlencode(params)
else:
self.check_required_credentials()
nonce = str(self.nonce())
body = self.urlencode(params)
secret = base64.b64decode(self.secret)
auth = self.apiKey + nonce
headers = {
'X-PCK': self.apiKey,
'X-Stamp': nonce,
'X-Signature': self.stringToBase64(self.hmac(self.encode(auth), secret, hashlib.sha256, 'binary')),
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
| tritoanst/ccxt | python/ccxt/btcturk.py | Python | mit | 7,964 |
# -*- coding: utf-8 -*-
from __future__ import print_function
# pylint: disable=W0141
import sys
from pandas.core.base import PandasObject
from pandas.core.common import adjoin, notnull
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas import compat
from pandas.compat import(StringIO, lzip, range, map, zip, reduce, u,
OrderedDict)
from pandas.util.terminal import get_terminal_size
from pandas.core.config import get_option, set_option
import pandas.core.common as com
import pandas.lib as lib
from pandas.tslib import iNaT, Timestamp, Timedelta, format_array_from_datetime
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.period import PeriodIndex
import numpy as np
import itertools
import csv
docstring_to_string = """
Parameters
----------
frame : DataFrame
object to render
buf : StringIO-like, optional
buffer to write to
columns : sequence, optional
the subset of columns to write; default None writes all columns
col_space : int, optional
the minimum width of each column
header : bool, optional
whether to print column labels, default True
index : bool, optional
whether to print index (row) labels, default True
na_rep : string, optional
string representation of NAN to use, default 'NaN'
formatters : list or dict of one-parameter functions, optional
formatter functions to apply to columns' elements by position or name,
default None. The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional
formatter function to apply to columns' elements if they are floats,
default None. The result of this function must be a unicode string.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print every
multiindex key at each row, default True
justify : {'left', 'right'}, default None
Left or right-justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box.
index_names : bool, optional
Prints the names of the indexes, default True
force_unicode : bool, default False
Always return a unicode result. Deprecated in v0.10.0 as string
formatting is now rendered to unicode by default.
Returns
-------
formatted : string (or unicode, depending on data and options)"""
class CategoricalFormatter(object):
def __init__(self, categorical, buf=None, length=True,
na_rep='NaN', name=False, footer=True):
self.categorical = categorical
self.buf = buf if buf is not None else StringIO(u(""))
self.name = name
self.na_rep = na_rep
self.length = length
self.footer = footer
def _get_footer(self):
footer = ''
if self.name:
name = com.pprint_thing(self.categorical.name,
escape_chars=('\t', '\r', '\n'))
footer += ('Name: %s' % name if self.categorical.name is not None
else '')
if self.length:
if footer:
footer += ', '
footer += "Length: %d" % len(self.categorical)
level_info = self.categorical._repr_categories_info()
# Levels are added in a newline
if footer:
footer += '\n'
footer += level_info
return compat.text_type(footer)
def _get_formatted_values(self):
return format_array(self.categorical.get_values(), None,
float_format=None,
na_rep=self.na_rep)
def to_string(self):
categorical = self.categorical
if len(categorical) == 0:
if self.footer:
return self._get_footer()
else:
return u('')
fmt_values = self._get_formatted_values()
result = ['%s' % i for i in fmt_values]
result = [i.strip() for i in result]
result = u(', ').join(result)
result = [u('[')+result+u(']')]
if self.footer:
footer = self._get_footer()
if footer:
result.append(footer)
return compat.text_type(u('\n').join(result))
class SeriesFormatter(object):
def __init__(self, series, buf=None, length=True, header=True,
na_rep='NaN', name=False, float_format=None, dtype=True,
max_rows=None):
self.series = series
self.buf = buf if buf is not None else StringIO()
self.name = name
self.na_rep = na_rep
self.header = header
self.length = length
self.max_rows = max_rows
if float_format is None:
float_format = get_option("display.float_format")
self.float_format = float_format
self.dtype = dtype
self._chk_truncate()
def _chk_truncate(self):
from pandas.tools.merge import concat
max_rows = self.max_rows
truncate_v = max_rows and (len(self.series) > max_rows)
series = self.series
if truncate_v:
if max_rows == 1:
row_num = max_rows
series = series.iloc[:max_rows]
else:
row_num = max_rows // 2
series = concat((series.iloc[:row_num], series.iloc[-row_num:]))
self.tr_row_num = row_num
self.tr_series = series
self.truncate_v = truncate_v
def _get_footer(self):
name = self.series.name
footer = u('')
if getattr(self.series.index, 'freq', None) is not None:
footer += 'Freq: %s' % self.series.index.freqstr
if self.name is not False and name is not None:
if footer:
footer += ', '
series_name = com.pprint_thing(name,
escape_chars=('\t', '\r', '\n'))
footer += ("Name: %s" %
series_name) if name is not None else ""
if self.length:
if footer:
footer += ', '
footer += 'Length: %d' % len(self.series)
if self.dtype is not False and self.dtype is not None:
name = getattr(self.tr_series.dtype, 'name', None)
if name:
if footer:
footer += ', '
footer += 'dtype: %s' % com.pprint_thing(name)
# level infos are added to the end and in a new line, like it is done for Categoricals
# Only added when we request a name
if name and com.is_categorical_dtype(self.tr_series.dtype):
level_info = self.tr_series.values._repr_categories_info()
if footer:
footer += "\n"
footer += level_info
return compat.text_type(footer)
def _get_formatted_index(self):
index = self.tr_series.index
is_multi = isinstance(index, MultiIndex)
if is_multi:
have_header = any(name for name in index.names)
fmt_index = index.format(names=True)
else:
have_header = index.name is not None
fmt_index = index.format(name=True)
return fmt_index, have_header
def _get_formatted_values(self):
return format_array(self.tr_series.get_values(), None,
float_format=self.float_format,
na_rep=self.na_rep)
def to_string(self):
series = self.tr_series
footer = self._get_footer()
if len(series) == 0:
return 'Series([], ' + footer + ')'
fmt_index, have_header = self._get_formatted_index()
fmt_values = self._get_formatted_values()
maxlen = max(len(x) for x in fmt_index) # max index len
pad_space = min(maxlen, 60)
if self.truncate_v:
n_header_rows = 0
row_num = self.tr_row_num
width = len(fmt_values[row_num-1])
if width > 3:
dot_str = '...'
else:
dot_str = '..'
dot_str = dot_str.center(width)
fmt_values.insert(row_num + n_header_rows, dot_str)
fmt_index.insert(row_num + 1, '')
result = adjoin(3, *[fmt_index[1:], fmt_values])
if self.header and have_header:
result = fmt_index[0] + '\n' + result
if footer:
result += '\n' + footer
return compat.text_type(u('').join(result))
def _strlen_func():
if compat.PY3: # pragma: no cover
_strlen = len
else:
encoding = get_option("display.encoding")
def _strlen(x):
try:
return len(x.decode(encoding))
except UnicodeError:
return len(x)
return _strlen
class TableFormatter(object):
is_truncated = False
show_dimensions = None
@property
def should_show_dimensions(self):
return self.show_dimensions is True or (self.show_dimensions == 'truncate' and
self.is_truncated)
def _get_formatter(self, i):
if isinstance(self.formatters, (list, tuple)):
if com.is_integer(i):
return self.formatters[i]
else:
return None
else:
if com.is_integer(i) and i not in self.columns:
i = self.columns[i]
return self.formatters.get(i, None)
class DataFrameFormatter(TableFormatter):
"""
Render a DataFrame
self.to_string() : console-friendly tabular output
self.to_html() : html table
self.to_latex() : LaTeX tabular environment table
"""
__doc__ = __doc__ if __doc__ else ''
__doc__ += docstring_to_string
def __init__(self, frame, buf=None, columns=None, col_space=None,
header=True, index=True, na_rep='NaN', formatters=None,
justify=None, float_format=None, sparsify=None,
index_names=True, line_width=None, max_rows=None,
max_cols=None, show_dimensions=False, **kwds):
self.frame = frame
self.buf = buf if buf is not None else StringIO()
self.show_index_names = index_names
if sparsify is None:
sparsify = get_option("display.multi_sparse")
self.sparsify = sparsify
self.float_format = float_format
self.formatters = formatters if formatters is not None else {}
self.na_rep = na_rep
self.col_space = col_space
self.header = header
self.index = index
self.line_width = line_width
self.max_rows = max_rows
self.max_cols = max_cols
self.max_rows_displayed = min(max_rows or len(self.frame),
len(self.frame))
self.show_dimensions = show_dimensions
if justify is None:
self.justify = get_option("display.colheader_justify")
else:
self.justify = justify
self.kwds = kwds
if columns is not None:
self.columns = _ensure_index(columns)
self.frame = self.frame[self.columns]
else:
self.columns = frame.columns
self._chk_truncate()
def _chk_truncate(self):
'''
Checks whether the frame should be truncated. If so, slices
the frame up.
'''
from pandas.tools.merge import concat
# Column of which first element is used to determine width of a dot col
self.tr_size_col = -1
# Cut the data to the information actually printed
max_cols = self.max_cols
max_rows = self.max_rows
if max_cols == 0 or max_rows == 0: # assume we are in the terminal (why else = 0)
(w, h) = get_terminal_size()
self.w = w
self.h = h
if self.max_rows == 0:
dot_row = 1
prompt_row = 1
if self.show_dimensions:
show_dimension_rows = 3
n_add_rows = self.header + dot_row + show_dimension_rows + prompt_row
max_rows_adj = self.h - n_add_rows # rows available to fill with actual data
self.max_rows_adj = max_rows_adj
# Format only rows and columns that could potentially fit the screen
if max_cols == 0 and len(self.frame.columns) > w:
max_cols = w
if max_rows == 0 and len(self.frame) > h:
max_rows = h
if not hasattr(self, 'max_rows_adj'):
self.max_rows_adj = max_rows
if not hasattr(self, 'max_cols_adj'):
self.max_cols_adj = max_cols
max_cols_adj = self.max_cols_adj
max_rows_adj = self.max_rows_adj
truncate_h = max_cols_adj and (len(self.columns) > max_cols_adj)
truncate_v = max_rows_adj and (len(self.frame) > max_rows_adj)
frame = self.frame
if truncate_h:
if max_cols_adj == 0:
col_num = len(frame.columns)
elif max_cols_adj == 1:
frame = frame.iloc[:, :max_cols]
col_num = max_cols
else:
col_num = (max_cols_adj // 2)
frame = concat((frame.iloc[:, :col_num], frame.iloc[:, -col_num:]), axis=1)
self.tr_col_num = col_num
if truncate_v:
if max_rows_adj == 0:
row_num = len(frame)
if max_rows_adj == 1:
row_num = max_rows
frame = frame.iloc[:max_rows, :]
else:
row_num = max_rows_adj // 2
frame = concat((frame.iloc[:row_num, :], frame.iloc[-row_num:, :]))
self.tr_row_num = row_num
self.tr_frame = frame
self.truncate_h = truncate_h
self.truncate_v = truncate_v
self.is_truncated = self.truncate_h or self.truncate_v
def _to_str_columns(self):
"""
Render a DataFrame to a list of columns (as lists of strings).
"""
_strlen = _strlen_func()
frame = self.tr_frame
# may include levels names also
str_index = self._get_formatted_index(frame)
str_columns = self._get_formatted_column_labels(frame)
if self.header:
stringified = []
for i, c in enumerate(frame):
cheader = str_columns[i]
max_colwidth = max(self.col_space or 0,
*(_strlen(x) for x in cheader))
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(fmt_values, self.justify,
minimum=max_colwidth)
max_len = max(np.max([_strlen(x) for x in fmt_values]),
max_colwidth)
if self.justify == 'left':
cheader = [x.ljust(max_len) for x in cheader]
else:
cheader = [x.rjust(max_len) for x in cheader]
stringified.append(cheader + fmt_values)
else:
stringified = []
for i, c in enumerate(frame):
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(fmt_values, self.justify,
minimum=(self.col_space or 0))
stringified.append(fmt_values)
strcols = stringified
if self.index:
strcols.insert(0, str_index)
# Add ... to signal truncated
truncate_h = self.truncate_h
truncate_v = self.truncate_v
if truncate_h:
col_num = self.tr_col_num
col_width = len(strcols[self.tr_size_col][0]) # infer from column header
strcols.insert(self.tr_col_num + 1, ['...'.center(col_width)] * (len(str_index)))
if truncate_v:
n_header_rows = len(str_index) - len(frame)
row_num = self.tr_row_num
for ix, col in enumerate(strcols):
cwidth = len(strcols[ix][row_num]) # infer from above row
is_dot_col = False
if truncate_h:
is_dot_col = ix == col_num + 1
if cwidth > 3 or is_dot_col:
my_str = '...'
else:
my_str = '..'
if ix == 0:
dot_str = my_str.ljust(cwidth)
elif is_dot_col:
cwidth = len(strcols[self.tr_size_col][0])
dot_str = my_str.center(cwidth)
else:
dot_str = my_str.rjust(cwidth)
strcols[ix].insert(row_num + n_header_rows, dot_str)
return strcols
def to_string(self):
"""
Render a DataFrame to a console-friendly tabular output.
"""
from pandas import Series
frame = self.frame
if len(frame.columns) == 0 or len(frame.index) == 0:
info_line = (u('Empty %s\nColumns: %s\nIndex: %s')
% (type(self.frame).__name__,
com.pprint_thing(frame.columns),
com.pprint_thing(frame.index)))
text = info_line
else:
strcols = self._to_str_columns()
if self.line_width is None: # no need to wrap around just print the whole frame
text = adjoin(1, *strcols)
elif not isinstance(self.max_cols, int) or self.max_cols > 0: # need to wrap around
text = self._join_multiline(*strcols)
else: # max_cols == 0. Try to fit frame to terminal
text = adjoin(1, *strcols).split('\n')
row_lens = Series(text).apply(len)
max_len_col_ix = np.argmax(row_lens)
max_len = row_lens[max_len_col_ix]
headers = [ele[0] for ele in strcols]
# Size of last col determines dot col size. See `self._to_str_columns
size_tr_col = len(headers[self.tr_size_col])
max_len += size_tr_col # Need to make space for largest row plus truncate dot col
dif = max_len - self.w
adj_dif = dif
col_lens = Series([Series(ele).apply(len).max() for ele in strcols])
n_cols = len(col_lens)
counter = 0
while adj_dif > 0 and n_cols > 1:
counter += 1
mid = int(round(n_cols / 2.))
mid_ix = col_lens.index[mid]
col_len = col_lens[mid_ix]
adj_dif -= (col_len + 1) # adjoin adds one
col_lens = col_lens.drop(mid_ix)
n_cols = len(col_lens)
max_cols_adj = n_cols - self.index # subtract index column
self.max_cols_adj = max_cols_adj
# Call again _chk_truncate to cut frame appropriately
# and then generate string representation
self._chk_truncate()
strcols = self._to_str_columns()
text = adjoin(1, *strcols)
self.buf.writelines(text)
if self.should_show_dimensions:
self.buf.write("\n\n[%d rows x %d columns]"
% (len(frame), len(frame.columns)))
def _join_multiline(self, *strcols):
lwidth = self.line_width
adjoin_width = 1
strcols = list(strcols)
if self.index:
idx = strcols.pop(0)
lwidth -= np.array([len(x) for x in idx]).max() + adjoin_width
col_widths = [np.array([len(x) for x in col]).max()
if len(col) > 0 else 0
for col in strcols]
col_bins = _binify(col_widths, lwidth)
nbins = len(col_bins)
if self.truncate_v:
nrows = self.max_rows_adj + 1
else:
nrows = len(self.frame)
str_lst = []
st = 0
for i, ed in enumerate(col_bins):
row = strcols[st:ed]
row.insert(0, idx)
if nbins > 1:
if ed <= len(strcols) and i < nbins - 1:
row.append([' \\'] + [' '] * (nrows - 1))
else:
row.append([' '] * nrows)
str_lst.append(adjoin(adjoin_width, *row))
st = ed
return '\n\n'.join(str_lst)
def to_latex(self, column_format=None, longtable=False):
"""
Render a DataFrame to a LaTeX tabular/longtable environment output.
"""
self.escape = self.kwds.get('escape', True)
# TODO: column_format is not settable in df.to_latex
def get_col_type(dtype):
if issubclass(dtype.type, np.number):
return 'r'
else:
return 'l'
frame = self.frame
if len(frame.columns) == 0 or len(frame.index) == 0:
info_line = (u('Empty %s\nColumns: %s\nIndex: %s')
% (type(self.frame).__name__,
frame.columns, frame.index))
strcols = [[info_line]]
else:
strcols = self._to_str_columns()
if self.index and isinstance(self.frame.index, MultiIndex):
clevels = self.frame.columns.nlevels
strcols.pop(0)
name = any(self.frame.columns.names)
for i, lev in enumerate(self.frame.index.levels):
lev2 = lev.format(name=name)
blank = ' ' * len(lev2[0])
lev3 = [blank] * clevels
for level_idx, group in itertools.groupby(
self.frame.index.labels[i]):
count = len(list(group))
lev3.extend([lev2[level_idx]] + [blank] * (count - 1))
strcols.insert(i, lev3)
if column_format is None:
dtypes = self.frame.dtypes.values
column_format = ''.join(map(get_col_type, dtypes))
if self.index:
index_format = 'l' * self.frame.index.nlevels
column_format = index_format + column_format
elif not isinstance(column_format,
compat.string_types): # pragma: no cover
raise AssertionError('column_format must be str or unicode, not %s'
% type(column_format))
def write(buf, frame, column_format, strcols, longtable=False):
if not longtable:
buf.write('\\begin{tabular}{%s}\n' % column_format)
buf.write('\\toprule\n')
else:
buf.write('\\begin{longtable}{%s}\n' % column_format)
buf.write('\\toprule\n')
nlevels = frame.columns.nlevels
for i, row in enumerate(zip(*strcols)):
if i == nlevels:
buf.write('\\midrule\n') # End of header
if longtable:
buf.write('\\endhead\n')
buf.write('\\midrule\n')
buf.write('\\multicolumn{3}{r}{{Continued on next '
'page}} \\\\\n')
buf.write('\midrule\n')
buf.write('\endfoot\n\n')
buf.write('\\bottomrule\n')
buf.write('\\endlastfoot\n')
if self.escape:
crow = [(x.replace('\\', '\\textbackslash') # escape backslashes first
.replace('_', '\\_')
.replace('%', '\\%')
.replace('$', '\\$')
.replace('#', '\\#')
.replace('{', '\\{')
.replace('}', '\\}')
.replace('~', '\\textasciitilde')
.replace('^', '\\textasciicircum')
.replace('&', '\\&') if x else '{}') for x in row]
else:
crow = [x if x else '{}' for x in row]
buf.write(' & '.join(crow))
buf.write(' \\\\\n')
if not longtable:
buf.write('\\bottomrule\n')
buf.write('\\end{tabular}\n')
else:
buf.write('\\end{longtable}\n')
if hasattr(self.buf, 'write'):
write(self.buf, frame, column_format, strcols, longtable)
elif isinstance(self.buf, compat.string_types):
with open(self.buf, 'w') as f:
write(f, frame, column_format, strcols, longtable)
else:
raise TypeError('buf is not a file name and it has no write '
'method')
def _format_col(self, i):
frame = self.tr_frame
formatter = self._get_formatter(i)
return format_array(
(frame.iloc[:, i]).get_values(),
formatter, float_format=self.float_format, na_rep=self.na_rep,
space=self.col_space
)
def to_html(self, classes=None):
"""
Render a DataFrame to a html table.
"""
html_renderer = HTMLFormatter(self, classes=classes,
max_rows=self.max_rows,
max_cols=self.max_cols)
if hasattr(self.buf, 'write'):
html_renderer.write_result(self.buf)
elif isinstance(self.buf, compat.string_types):
with open(self.buf, 'w') as f:
html_renderer.write_result(f)
else:
raise TypeError('buf is not a file name and it has no write '
' method')
def _get_formatted_column_labels(self, frame):
from pandas.core.index import _sparsify
def is_numeric_dtype(dtype):
return issubclass(dtype.type, np.number)
columns = frame.columns
if isinstance(columns, MultiIndex):
fmt_columns = columns.format(sparsify=False, adjoin=False)
fmt_columns = lzip(*fmt_columns)
dtypes = self.frame.dtypes.values
# if we have a Float level, they don't use leading space at all
restrict_formatting = any([l.is_floating for l in columns.levels])
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
def space_format(x, y):
if y not in self.formatters and need_leadsp[x] and not restrict_formatting:
return ' ' + y
return y
str_columns = list(zip(*[[space_format(x, y) for y in x] for x in fmt_columns]))
if self.sparsify:
str_columns = _sparsify(str_columns)
str_columns = [list(x) for x in zip(*str_columns)]
else:
fmt_columns = columns.format()
dtypes = self.frame.dtypes
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
str_columns = [[' ' + x
if not self._get_formatter(i) and need_leadsp[x]
else x]
for i, (col, x) in
enumerate(zip(columns, fmt_columns))]
if self.show_index_names and self.has_index_names:
for x in str_columns:
x.append('')
# self.str_columns = str_columns
return str_columns
@property
def has_index_names(self):
return _has_names(self.frame.index)
@property
def has_column_names(self):
return _has_names(self.frame.columns)
def _get_formatted_index(self, frame):
# Note: this is only used by to_string() and to_latex(), not by to_html().
index = frame.index
columns = frame.columns
show_index_names = self.show_index_names and self.has_index_names
show_col_names = (self.show_index_names and self.has_column_names)
fmt = self._get_formatter('__index__')
if isinstance(index, MultiIndex):
fmt_index = index.format(sparsify=self.sparsify, adjoin=False,
names=show_index_names,
formatter=fmt)
else:
fmt_index = [index.format(name=show_index_names, formatter=fmt)]
fmt_index = [tuple(_make_fixed_width(
list(x), justify='left', minimum=(self.col_space or 0)))
for x in fmt_index]
adjoined = adjoin(1, *fmt_index).split('\n')
# empty space for columns
if show_col_names:
col_header = ['%s' % x for x in self._get_column_name_list()]
else:
col_header = [''] * columns.nlevels
if self.header:
return col_header + adjoined
else:
return adjoined
def _get_column_name_list(self):
names = []
columns = self.frame.columns
if isinstance(columns, MultiIndex):
names.extend('' if name is None else name
for name in columns.names)
else:
names.append('' if columns.name is None else columns.name)
return names
class HTMLFormatter(TableFormatter):
indent_delta = 2
def __init__(self, formatter, classes=None, max_rows=None, max_cols=None):
self.fmt = formatter
self.classes = classes
self.frame = self.fmt.frame
self.columns = self.fmt.tr_frame.columns
self.elements = []
self.bold_rows = self.fmt.kwds.get('bold_rows', False)
self.escape = self.fmt.kwds.get('escape', True)
self.max_rows = max_rows or len(self.fmt.frame)
self.max_cols = max_cols or len(self.fmt.columns)
self.show_dimensions = self.fmt.show_dimensions
self.is_truncated = (self.max_rows < len(self.fmt.frame) or
self.max_cols < len(self.fmt.columns))
def write(self, s, indent=0):
rs = com.pprint_thing(s)
self.elements.append(' ' * indent + rs)
def write_th(self, s, indent=0, tags=None):
if (self.fmt.col_space is not None
and self.fmt.col_space > 0):
tags = (tags or "")
tags += 'style="min-width: %s;"' % self.fmt.col_space
return self._write_cell(s, kind='th', indent=indent, tags=tags)
def write_td(self, s, indent=0, tags=None):
return self._write_cell(s, kind='td', indent=indent, tags=tags)
def _write_cell(self, s, kind='td', indent=0, tags=None):
if tags is not None:
start_tag = '<%s %s>' % (kind, tags)
else:
start_tag = '<%s>' % kind
if self.escape:
# escape & first to prevent double escaping of &
esc = OrderedDict(
[('&', r'&'), ('<', r'<'), ('>', r'>')]
)
else:
esc = {}
rs = com.pprint_thing(s, escape_chars=esc).strip()
self.write(
'%s%s</%s>' % (start_tag, rs, kind), indent)
def write_tr(self, line, indent=0, indent_delta=4, header=False,
align=None, tags=None, nindex_levels=0):
if tags is None:
tags = {}
if align is None:
self.write('<tr>', indent)
else:
self.write('<tr style="text-align: %s;">' % align, indent)
indent += indent_delta
for i, s in enumerate(line):
val_tag = tags.get(i, None)
if header or (self.bold_rows and i < nindex_levels):
self.write_th(s, indent, tags=val_tag)
else:
self.write_td(s, indent, tags=val_tag)
indent -= indent_delta
self.write('</tr>', indent)
def write_result(self, buf):
indent = 0
frame = self.frame
_classes = ['dataframe'] # Default class.
if self.classes is not None:
if isinstance(self.classes, str):
self.classes = self.classes.split()
if not isinstance(self.classes, (list, tuple)):
raise AssertionError(('classes must be list or tuple, '
'not %s') % type(self.classes))
_classes.extend(self.classes)
self.write('<table border="1" class="%s">' % ' '.join(_classes),
indent)
indent += self.indent_delta
indent = self._write_header(indent)
indent = self._write_body(indent)
self.write('</table>', indent)
if self.should_show_dimensions:
by = chr(215) if compat.PY3 else unichr(215) # ×
self.write(u('<p>%d rows %s %d columns</p>') %
(len(frame), by, len(frame.columns)))
_put_lines(buf, self.elements)
def _write_header(self, indent):
truncate_h = self.fmt.truncate_h
row_levels = self.frame.index.nlevels
if not self.fmt.header:
# write nothing
return indent
def _column_header():
if self.fmt.index:
row = [''] * (self.frame.index.nlevels - 1)
else:
row = []
if isinstance(self.columns, MultiIndex):
if self.fmt.has_column_names and self.fmt.index:
row.append(single_column_table(self.columns.names))
else:
row.append('')
style = "text-align: %s;" % self.fmt.justify
row.extend([single_column_table(c, self.fmt.justify, style) for
c in self.columns])
else:
if self.fmt.index:
row.append(self.columns.name or '')
row.extend(self.columns)
return row
self.write('<thead>', indent)
row = []
indent += self.indent_delta
if isinstance(self.columns, MultiIndex):
template = 'colspan="%d" halign="left"'
if self.fmt.sparsify:
# GH3547
sentinel = com.sentinel_factory()
else:
sentinel = None
levels = self.columns.format(sparsify=sentinel,
adjoin=False, names=False)
level_lengths = _get_level_lengths(levels, sentinel)
inner_lvl = len(level_lengths) - 1
for lnum, (records, values) in enumerate(zip(level_lengths,
levels)):
if truncate_h:
# modify the header lines
ins_col = self.fmt.tr_col_num
if self.fmt.sparsify:
recs_new = {}
# Increment tags after ... col.
for tag, span in list(records.items()):
if tag >= ins_col:
recs_new[tag + 1] = span
elif tag + span > ins_col:
recs_new[tag] = span + 1
if lnum == inner_lvl:
values = values[:ins_col] + (u('...'),) + \
values[ins_col:]
else: # sparse col headers do not receive a ...
values = (values[:ins_col] + (values[ins_col - 1],) +
values[ins_col:])
else:
recs_new[tag] = span
# if ins_col lies between tags, all col headers get ...
if tag + span == ins_col:
recs_new[ins_col] = 1
values = values[:ins_col] + (u('...'),) + \
values[ins_col:]
records = recs_new
inner_lvl = len(level_lengths) - 1
if lnum == inner_lvl:
records[ins_col] = 1
else:
recs_new = {}
for tag, span in list(records.items()):
if tag >= ins_col:
recs_new[tag + 1] = span
else:
recs_new[tag] = span
recs_new[ins_col] = 1
records = recs_new
values = values[:ins_col] + [u('...')] + values[ins_col:]
name = self.columns.names[lnum]
row = [''] * (row_levels - 1) + ['' if name is None
else com.pprint_thing(name)]
if row == [""] and self.fmt.index is False:
row = []
tags = {}
j = len(row)
for i, v in enumerate(values):
if i in records:
if records[i] > 1:
tags[j] = template % records[i]
else:
continue
j += 1
row.append(v)
self.write_tr(row, indent, self.indent_delta, tags=tags,
header=True)
else:
col_row = _column_header()
align = self.fmt.justify
if truncate_h:
ins_col = row_levels + self.fmt.tr_col_num
col_row.insert(ins_col, '...')
self.write_tr(col_row, indent, self.indent_delta, header=True,
align=align)
if self.fmt.has_index_names:
row = [
x if x is not None else '' for x in self.frame.index.names
] + [''] * min(len(self.columns), self.max_cols)
if truncate_h:
ins_col = row_levels + self.fmt.tr_col_num
row.insert(ins_col, '')
self.write_tr(row, indent, self.indent_delta, header=True)
indent -= self.indent_delta
self.write('</thead>', indent)
return indent
def _write_body(self, indent):
self.write('<tbody>', indent)
indent += self.indent_delta
fmt_values = {}
for i in range(min(len(self.columns), self.max_cols)):
fmt_values[i] = self.fmt._format_col(i)
# write values
if self.fmt.index:
if isinstance(self.frame.index, MultiIndex):
self._write_hierarchical_rows(fmt_values, indent)
else:
self._write_regular_rows(fmt_values, indent)
else:
for i in range(len(self.frame)):
row = [fmt_values[j][i] for j in range(len(self.columns))]
self.write_tr(row, indent, self.indent_delta, tags=None)
indent -= self.indent_delta
self.write('</tbody>', indent)
indent -= self.indent_delta
return indent
def _write_regular_rows(self, fmt_values, indent):
truncate_h = self.fmt.truncate_h
truncate_v = self.fmt.truncate_v
ncols = len(self.fmt.tr_frame.columns)
nrows = len(self.fmt.tr_frame)
fmt = self.fmt._get_formatter('__index__')
if fmt is not None:
index_values = self.fmt.tr_frame.index.map(fmt)
else:
index_values = self.fmt.tr_frame.index.format()
row = []
for i in range(nrows):
if truncate_v and i == (self.fmt.tr_row_num):
str_sep_row = ['...' for ele in row]
self.write_tr(str_sep_row, indent, self.indent_delta, tags=None,
nindex_levels=1)
row = []
row.append(index_values[i])
row.extend(fmt_values[j][i] for j in range(ncols))
if truncate_h:
dot_col_ix = self.fmt.tr_col_num + 1
row.insert(dot_col_ix, '...')
self.write_tr(row, indent, self.indent_delta, tags=None,
nindex_levels=1)
def _write_hierarchical_rows(self, fmt_values, indent):
template = 'rowspan="%d" valign="top"'
truncate_h = self.fmt.truncate_h
truncate_v = self.fmt.truncate_v
frame = self.fmt.tr_frame
ncols = len(frame.columns)
nrows = len(frame)
row_levels = self.frame.index.nlevels
idx_values = frame.index.format(sparsify=False, adjoin=False, names=False)
idx_values = lzip(*idx_values)
if self.fmt.sparsify:
# GH3547
sentinel = com.sentinel_factory()
levels = frame.index.format(sparsify=sentinel, adjoin=False, names=False)
level_lengths = _get_level_lengths(levels, sentinel)
inner_lvl = len(level_lengths) - 1
if truncate_v:
# Insert ... row and adjust idx_values and
# level_lengths to take this into account.
ins_row = self.fmt.tr_row_num
for lnum, records in enumerate(level_lengths):
rec_new = {}
for tag, span in list(records.items()):
if tag >= ins_row:
rec_new[tag + 1] = span
elif tag + span > ins_row:
rec_new[tag] = span + 1
dot_row = list(idx_values[ins_row - 1])
dot_row[-1] = u('...')
idx_values.insert(ins_row, tuple(dot_row))
else:
rec_new[tag] = span
# If ins_row lies between tags, all cols idx cols receive ...
if tag + span == ins_row:
rec_new[ins_row] = 1
if lnum == 0:
idx_values.insert(ins_row, tuple([u('...')]*len(level_lengths)))
level_lengths[lnum] = rec_new
level_lengths[inner_lvl][ins_row] = 1
for ix_col in range(len(fmt_values)):
fmt_values[ix_col].insert(ins_row, '...')
nrows += 1
for i in range(nrows):
row = []
tags = {}
sparse_offset = 0
j = 0
for records, v in zip(level_lengths, idx_values[i]):
if i in records:
if records[i] > 1:
tags[j] = template % records[i]
else:
sparse_offset += 1
continue
j += 1
row.append(v)
row.extend(fmt_values[j][i] for j in range(ncols))
if truncate_h:
row.insert(row_levels - sparse_offset + self.fmt.tr_col_num, '...')
self.write_tr(row, indent, self.indent_delta, tags=tags,
nindex_levels=len(levels) - sparse_offset)
else:
for i in range(len(frame)):
idx_values = list(zip(*frame.index.format(sparsify=False,
adjoin=False,
names=False)))
row = []
row.extend(idx_values[i])
row.extend(fmt_values[j][i] for j in range(ncols))
if truncate_h:
row.insert(row_levels + self.fmt.tr_col_num, '...')
self.write_tr(row, indent, self.indent_delta, tags=None,
nindex_levels=frame.index.nlevels)
def _get_level_lengths(levels, sentinel=''):
from itertools import groupby
def _make_grouper():
record = {'count': 0}
def grouper(x):
if x != sentinel:
record['count'] += 1
return record['count']
return grouper
result = []
for lev in levels:
i = 0
f = _make_grouper()
recs = {}
for key, gpr in groupby(lev, f):
values = list(gpr)
recs[i] = len(values)
i += len(values)
result.append(recs)
return result
class CSVFormatter(object):
def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', float_format=None,
cols=None, header=True, index=True, index_label=None,
mode='w', nanRep=None, encoding=None, quoting=None,
line_terminator='\n', chunksize=None, engine=None,
tupleize_cols=False, quotechar='"', date_format=None,
doublequote=True, escapechar=None, decimal='.'):
self.engine = engine # remove for 0.13
self.obj = obj
if path_or_buf is None:
path_or_buf = StringIO()
self.path_or_buf = path_or_buf
self.sep = sep
self.na_rep = na_rep
self.float_format = float_format
self.decimal = decimal
self.header = header
self.index = index
self.index_label = index_label
self.mode = mode
self.encoding = encoding
if quoting is None:
quoting = csv.QUOTE_MINIMAL
self.quoting = quoting
if quoting == csv.QUOTE_NONE:
# prevents crash in _csv
quotechar = None
self.quotechar = quotechar
self.doublequote = doublequote
self.escapechar = escapechar
self.line_terminator = line_terminator
self.date_format = date_format
# GH3457
if not self.obj.columns.is_unique and engine == 'python':
raise NotImplementedError("columns.is_unique == False not "
"supported with engine='python'")
self.tupleize_cols = tupleize_cols
self.has_mi_columns = isinstance(obj.columns, MultiIndex
) and not self.tupleize_cols
# validate mi options
if self.has_mi_columns:
if cols is not None:
raise TypeError("cannot specify cols with a MultiIndex on the "
"columns")
if cols is not None:
if isinstance(cols, Index):
cols = cols.to_native_types(na_rep=na_rep,
float_format=float_format,
date_format=date_format,
quoting=self.quoting)
else:
cols = np.asarray(list(cols))
self.obj = self.obj.loc[:, cols]
# update columns to include possible multiplicity of dupes
# and make sure sure cols is just a list of labels
cols = self.obj.columns
if isinstance(cols, Index):
cols = cols.to_native_types(na_rep=na_rep,
float_format=float_format,
date_format=date_format,
quoting=self.quoting)
else:
cols = np.asarray(list(cols))
# save it
self.cols = cols
# preallocate data 2d list
self.blocks = self.obj._data.blocks
ncols = sum(b.shape[0] for b in self.blocks)
self.data = [None] * ncols
if chunksize is None:
chunksize = (100000 // (len(self.cols) or 1)) or 1
self.chunksize = int(chunksize)
self.data_index = obj.index
if isinstance(obj.index, PeriodIndex):
self.data_index = obj.index.to_timestamp()
if (isinstance(self.data_index, DatetimeIndex) and
date_format is not None):
self.data_index = Index([x.strftime(date_format)
if notnull(x) else ''
for x in self.data_index])
self.nlevels = getattr(self.data_index, 'nlevels', 1)
if not index:
self.nlevels = 0
# original python implem. of df.to_csv
# invoked by df.to_csv(engine=python)
def _helper_csv(self, writer, na_rep=None, cols=None,
header=True, index=True,
index_label=None, float_format=None, date_format=None):
if cols is None:
cols = self.columns
has_aliases = isinstance(header, (tuple, list, np.ndarray, Index))
if has_aliases or header:
if index:
# should write something for index label
if index_label is not False:
if index_label is None:
if isinstance(self.obj.index, MultiIndex):
index_label = []
for i, name in enumerate(self.obj.index.names):
if name is None:
name = ''
index_label.append(name)
else:
index_label = self.obj.index.name
if index_label is None:
index_label = ['']
else:
index_label = [index_label]
elif not isinstance(index_label,
(list, tuple, np.ndarray, Index)):
# given a string for a DF with Index
index_label = [index_label]
encoded_labels = list(index_label)
else:
encoded_labels = []
if has_aliases:
if len(header) != len(cols):
raise ValueError(('Writing %d cols but got %d aliases'
% (len(cols), len(header))))
else:
write_cols = header
else:
write_cols = cols
encoded_cols = list(write_cols)
writer.writerow(encoded_labels + encoded_cols)
else:
encoded_cols = list(cols)
writer.writerow(encoded_cols)
if date_format is None:
date_formatter = lambda x: Timestamp(x)._repr_base
else:
def strftime_with_nulls(x):
x = Timestamp(x)
if notnull(x):
return x.strftime(date_format)
date_formatter = lambda x: strftime_with_nulls(x)
data_index = self.obj.index
if isinstance(self.obj.index, PeriodIndex):
data_index = self.obj.index.to_timestamp()
if isinstance(data_index, DatetimeIndex) and date_format is not None:
data_index = Index([date_formatter(x) for x in data_index])
values = self.obj.copy()
values.index = data_index
values.columns = values.columns.to_native_types(
na_rep=na_rep,
float_format=float_format,
date_format=date_format,
quoting=self.quoting)
values = values[cols]
series = {}
for k, v in compat.iteritems(values._series):
series[k] = v.values
nlevels = getattr(data_index, 'nlevels', 1)
for j, idx in enumerate(data_index):
row_fields = []
if index:
if nlevels == 1:
row_fields = [idx]
else: # handle MultiIndex
row_fields = list(idx)
for i, col in enumerate(cols):
val = series[col][j]
if lib.checknull(val):
val = na_rep
if float_format is not None and com.is_float(val):
val = float_format % val
elif isinstance(val, (np.datetime64, Timestamp)):
val = date_formatter(val)
row_fields.append(val)
writer.writerow(row_fields)
def save(self):
# create the writer & save
if hasattr(self.path_or_buf, 'write'):
f = self.path_or_buf
close = False
else:
f = com._get_handle(self.path_or_buf, self.mode,
encoding=self.encoding)
close = True
try:
writer_kwargs = dict(lineterminator=self.line_terminator,
delimiter=self.sep, quoting=self.quoting,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar)
if self.encoding is not None:
writer_kwargs['encoding'] = self.encoding
self.writer = com.UnicodeWriter(f, **writer_kwargs)
else:
self.writer = csv.writer(f, **writer_kwargs)
if self.engine == 'python':
# to be removed in 0.13
self._helper_csv(self.writer, na_rep=self.na_rep,
float_format=self.float_format,
cols=self.cols, header=self.header,
index=self.index,
index_label=self.index_label,
date_format=self.date_format)
else:
self._save()
finally:
if close:
f.close()
def _save_header(self):
writer = self.writer
obj = self.obj
index_label = self.index_label
cols = self.cols
has_mi_columns = self.has_mi_columns
header = self.header
encoded_labels = []
has_aliases = isinstance(header, (tuple, list, np.ndarray, Index))
if not (has_aliases or self.header):
return
if has_aliases:
if len(header) != len(cols):
raise ValueError(('Writing %d cols but got %d aliases'
% (len(cols), len(header))))
else:
write_cols = header
else:
write_cols = cols
if self.index:
# should write something for index label
if index_label is not False:
if index_label is None:
if isinstance(obj.index, MultiIndex):
index_label = []
for i, name in enumerate(obj.index.names):
if name is None:
name = ''
index_label.append(name)
else:
index_label = obj.index.name
if index_label is None:
index_label = ['']
else:
index_label = [index_label]
elif not isinstance(index_label, (list, tuple, np.ndarray, Index)):
# given a string for a DF with Index
index_label = [index_label]
encoded_labels = list(index_label)
else:
encoded_labels = []
if not has_mi_columns:
encoded_labels += list(write_cols)
# write out the mi
if has_mi_columns:
columns = obj.columns
# write out the names for each level, then ALL of the values for
# each level
for i in range(columns.nlevels):
# we need at least 1 index column to write our col names
col_line = []
if self.index:
# name is the first column
col_line.append(columns.names[i])
if isinstance(index_label, list) and len(index_label) > 1:
col_line.extend([''] * (len(index_label) - 1))
col_line.extend(columns.get_level_values(i))
writer.writerow(col_line)
# add blanks for the columns, so that we
# have consistent seps
encoded_labels.extend([''] * len(columns))
# write out the index label line
writer.writerow(encoded_labels)
def _save(self):
self._save_header()
nrows = len(self.data_index)
# write in chunksize bites
chunksize = self.chunksize
chunks = int(nrows / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
self._save_chunk(start_i, end_i)
def _save_chunk(self, start_i, end_i):
data_index = self.data_index
# create the data for a chunk
slicer = slice(start_i, end_i)
for i in range(len(self.blocks)):
b = self.blocks[i]
d = b.to_native_types(slicer=slicer,
na_rep=self.na_rep,
float_format=self.float_format,
decimal=self.decimal,
date_format=self.date_format,
quoting=self.quoting)
for col_loc, col in zip(b.mgr_locs, d):
# self.data is a preallocated list
self.data[col_loc] = col
ix = data_index.to_native_types(slicer=slicer,
na_rep=self.na_rep,
float_format=self.float_format,
date_format=self.date_format,
quoting=self.quoting)
lib.write_csv_rows(self.data, ix, self.nlevels, self.cols, self.writer)
# from collections import namedtuple
# ExcelCell = namedtuple("ExcelCell",
# 'row, col, val, style, mergestart, mergeend')
class ExcelCell(object):
__fields__ = ('row', 'col', 'val', 'style', 'mergestart', 'mergeend')
__slots__ = __fields__
def __init__(self, row, col, val,
style=None, mergestart=None, mergeend=None):
self.row = row
self.col = col
self.val = val
self.style = style
self.mergestart = mergestart
self.mergeend = mergeend
header_style = {"font": {"bold": True},
"borders": {"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin"},
"alignment": {"horizontal": "center", "vertical": "top"}}
class ExcelFormatter(object):
"""
Class for formatting a DataFrame to a list of ExcelCells,
Parameters
----------
df : dataframe
na_rep: na representation
float_format : string, default None
Format string for floating point numbers
cols : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is
assumed to be aliases for the column names
index : boolean, default True
output row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
merge_cells : boolean, default False
Format MultiIndex and Hierarchical Rows as merged cells.
inf_rep : string, default `'inf'`
representation for np.inf values (which aren't representable in Excel)
A `'-'` sign will be added in front of -inf.
"""
def __init__(self, df, na_rep='', float_format=None, cols=None,
header=True, index=True, index_label=None, merge_cells=False,
inf_rep='inf'):
self.df = df
self.rowcounter = 0
self.na_rep = na_rep
self.columns = cols
if cols is None:
self.columns = df.columns
self.float_format = float_format
self.index = index
self.index_label = index_label
self.header = header
self.merge_cells = merge_cells
self.inf_rep = inf_rep
def _format_value(self, val):
if lib.checknull(val):
val = self.na_rep
elif com.is_float(val):
if np.isposinf(val):
val = self.inf_rep
elif np.isneginf(val):
val = '-%s' % self.inf_rep
elif self.float_format is not None:
val = float(self.float_format % val)
return val
def _format_header_mi(self):
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
if not(has_aliases or self.header):
return
columns = self.columns
level_strs = columns.format(sparsify=True, adjoin=False, names=False)
level_lengths = _get_level_lengths(level_strs)
coloffset = 0
lnum = 0
if self.index and isinstance(self.df.index, MultiIndex):
coloffset = len(self.df.index[0]) - 1
if self.merge_cells:
# Format multi-index as a merged cells.
for lnum in range(len(level_lengths)):
name = columns.names[lnum]
yield ExcelCell(lnum, coloffset, name, header_style)
for lnum, (spans, levels, labels) in enumerate(zip(level_lengths,
columns.levels,
columns.labels)
):
values = levels.take(labels)
for i in spans:
if spans[i] > 1:
yield ExcelCell(lnum,
coloffset + i + 1,
values[i],
header_style,
lnum,
coloffset + i + spans[i])
else:
yield ExcelCell(lnum,
coloffset + i + 1,
values[i],
header_style)
else:
# Format in legacy format with dots to indicate levels.
for i, values in enumerate(zip(*level_strs)):
v = ".".join(map(com.pprint_thing, values))
yield ExcelCell(lnum, coloffset + i + 1, v, header_style)
self.rowcounter = lnum
def _format_header_regular(self):
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
if has_aliases or self.header:
coloffset = 0
if self.index:
coloffset = 1
if isinstance(self.df.index, MultiIndex):
coloffset = len(self.df.index[0])
colnames = self.columns
if has_aliases:
if len(self.header) != len(self.columns):
raise ValueError(('Writing %d cols but got %d aliases'
% (len(self.columns), len(self.header))))
else:
colnames = self.header
for colindex, colname in enumerate(colnames):
yield ExcelCell(self.rowcounter, colindex + coloffset, colname,
header_style)
def _format_header(self):
if isinstance(self.columns, MultiIndex):
gen = self._format_header_mi()
else:
gen = self._format_header_regular()
gen2 = ()
if self.df.index.names:
row = [x if x is not None else ''
for x in self.df.index.names] + [''] * len(self.columns)
if reduce(lambda x, y: x and y, map(lambda x: x != '', row)):
gen2 = (ExcelCell(self.rowcounter, colindex, val, header_style)
for colindex, val in enumerate(row))
self.rowcounter += 1
return itertools.chain(gen, gen2)
def _format_body(self):
if isinstance(self.df.index, MultiIndex):
return self._format_hierarchical_rows()
else:
return self._format_regular_rows()
def _format_regular_rows(self):
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
if has_aliases or self.header:
self.rowcounter += 1
coloffset = 0
# output index and index_label?
if self.index:
# chek aliases
# if list only take first as this is not a MultiIndex
if self.index_label and isinstance(self.index_label,
(list, tuple, np.ndarray, Index)):
index_label = self.index_label[0]
# if string good to go
elif self.index_label and isinstance(self.index_label, str):
index_label = self.index_label
else:
index_label = self.df.index.names[0]
if index_label and self.header is not False:
if self.merge_cells:
yield ExcelCell(self.rowcounter,
0,
index_label,
header_style)
self.rowcounter += 1
else:
yield ExcelCell(self.rowcounter - 1,
0,
index_label,
header_style)
# write index_values
index_values = self.df.index
if isinstance(self.df.index, PeriodIndex):
index_values = self.df.index.to_timestamp()
coloffset = 1
for idx, idxval in enumerate(index_values):
yield ExcelCell(self.rowcounter + idx, 0, idxval, header_style)
# Get a frame that will account for any duplicates in the column names.
col_mapped_frame = self.df.loc[:, self.columns]
# Write the body of the frame data series by series.
for colidx in range(len(self.columns)):
series = col_mapped_frame.iloc[:, colidx]
for i, val in enumerate(series):
yield ExcelCell(self.rowcounter + i, colidx + coloffset, val)
def _format_hierarchical_rows(self):
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
if has_aliases or self.header:
self.rowcounter += 1
gcolidx = 0
if self.index:
index_labels = self.df.index.names
# check for aliases
if self.index_label and isinstance(self.index_label,
(list, tuple, np.ndarray, Index)):
index_labels = self.index_label
# if index labels are not empty go ahead and dump
if (any(x is not None for x in index_labels)
and self.header is not False):
if not self.merge_cells:
self.rowcounter -= 1
for cidx, name in enumerate(index_labels):
yield ExcelCell(self.rowcounter,
cidx,
name,
header_style)
self.rowcounter += 1
if self.merge_cells:
# Format hierarchical rows as merged cells.
level_strs = self.df.index.format(sparsify=True, adjoin=False,
names=False)
level_lengths = _get_level_lengths(level_strs)
for spans, levels, labels in zip(level_lengths,
self.df.index.levels,
self.df.index.labels):
values = levels.take(labels)
for i in spans:
if spans[i] > 1:
yield ExcelCell(self.rowcounter + i,
gcolidx,
values[i],
header_style,
self.rowcounter + i + spans[i] - 1,
gcolidx)
else:
yield ExcelCell(self.rowcounter + i,
gcolidx,
values[i],
header_style)
gcolidx += 1
else:
# Format hierarchical rows with non-merged values.
for indexcolvals in zip(*self.df.index):
for idx, indexcolval in enumerate(indexcolvals):
yield ExcelCell(self.rowcounter + idx,
gcolidx,
indexcolval,
header_style)
gcolidx += 1
# Get a frame that will account for any duplicates in the column names.
col_mapped_frame = self.df.loc[:, self.columns]
# Write the body of the frame data series by series.
for colidx in range(len(self.columns)):
series = col_mapped_frame.iloc[:, colidx]
for i, val in enumerate(series):
yield ExcelCell(self.rowcounter + i, gcolidx + colidx, val)
def get_formatted_cells(self):
for cell in itertools.chain(self._format_header(),
self._format_body()):
cell.val = self._format_value(cell.val)
yield cell
# ----------------------------------------------------------------------
# Array formatters
def format_array(values, formatter, float_format=None, na_rep='NaN',
digits=None, space=None, justify='right'):
if com.is_float_dtype(values.dtype):
fmt_klass = FloatArrayFormatter
elif com.is_integer_dtype(values.dtype):
fmt_klass = IntArrayFormatter
elif com.is_datetime64_dtype(values.dtype):
fmt_klass = Datetime64Formatter
elif com.is_timedelta64_dtype(values.dtype):
fmt_klass = Timedelta64Formatter
else:
fmt_klass = GenericArrayFormatter
if space is None:
space = get_option("display.column_space")
if float_format is None:
float_format = get_option("display.float_format")
if digits is None:
digits = get_option("display.precision")
fmt_obj = fmt_klass(values, digits=digits, na_rep=na_rep,
float_format=float_format,
formatter=formatter, space=space,
justify=justify)
return fmt_obj.get_result()
class GenericArrayFormatter(object):
def __init__(self, values, digits=7, formatter=None, na_rep='NaN',
space=12, float_format=None, justify='right'):
self.values = values
self.digits = digits
self.na_rep = na_rep
self.space = space
self.formatter = formatter
self.float_format = float_format
self.justify = justify
def get_result(self):
fmt_values = self._format_strings()
return _make_fixed_width(fmt_values, self.justify)
def _format_strings(self):
if self.float_format is None:
float_format = get_option("display.float_format")
if float_format is None:
fmt_str = '%% .%dg' % get_option("display.precision")
float_format = lambda x: fmt_str % x
else:
float_format = self.float_format
formatter = self.formatter if self.formatter is not None else \
(lambda x: com.pprint_thing(x, escape_chars=('\t', '\r', '\n')))
def _format(x):
if self.na_rep is not None and lib.checknull(x):
if x is None:
return 'None'
return self.na_rep
elif isinstance(x, PandasObject):
return '%s' % x
else:
# object dtype
return '%s' % formatter(x)
vals = self.values
is_float = lib.map_infer(vals, com.is_float) & notnull(vals)
leading_space = is_float.any()
fmt_values = []
for i, v in enumerate(vals):
if not is_float[i] and leading_space:
fmt_values.append(' %s' % _format(v))
elif is_float[i]:
fmt_values.append(float_format(v))
else:
fmt_values.append(' %s' % _format(v))
return fmt_values
class FloatArrayFormatter(GenericArrayFormatter):
"""
"""
def __init__(self, *args, **kwargs):
GenericArrayFormatter.__init__(self, *args, **kwargs)
if self.float_format is not None and self.formatter is None:
self.formatter = self.float_format
def _format_with(self, fmt_str):
def _val(x, threshold):
if notnull(x):
if (threshold is None or
abs(x) > get_option("display.chop_threshold")):
return fmt_str % x
else:
if fmt_str.endswith("e"): # engineering format
return "0"
else:
return fmt_str % 0
else:
return self.na_rep
threshold = get_option("display.chop_threshold")
fmt_values = [_val(x, threshold) for x in self.values]
return _trim_zeros(fmt_values, self.na_rep)
def _format_strings(self):
if self.formatter is not None:
fmt_values = [self.formatter(x) for x in self.values]
else:
fmt_str = '%% .%df' % (self.digits - 1)
fmt_values = self._format_with(fmt_str)
if len(fmt_values) > 0:
maxlen = max(len(x) for x in fmt_values)
else:
maxlen = 0
too_long = maxlen > self.digits + 5
abs_vals = np.abs(self.values)
# this is pretty arbitrary for now
has_large_values = (abs_vals > 1e8).any()
has_small_values = ((abs_vals < 10 ** (-self.digits+1)) &
(abs_vals > 0)).any()
if too_long and has_large_values:
fmt_str = '%% .%de' % (self.digits - 1)
fmt_values = self._format_with(fmt_str)
elif has_small_values:
fmt_str = '%% .%de' % (self.digits - 1)
fmt_values = self._format_with(fmt_str)
return fmt_values
class IntArrayFormatter(GenericArrayFormatter):
def _format_strings(self):
formatter = self.formatter or (lambda x: '% d' % x)
fmt_values = [formatter(x) for x in self.values]
return fmt_values
class Datetime64Formatter(GenericArrayFormatter):
def __init__(self, values, nat_rep='NaT', date_format=None, **kwargs):
super(Datetime64Formatter, self).__init__(values, **kwargs)
self.nat_rep = nat_rep
self.date_format = date_format
def _format_strings(self):
# we may have a tz, if so, then need to process element-by-element
# when DatetimeBlockWithTimezones is a reality this could be fixed
values = self.values
if not isinstance(values, DatetimeIndex):
values = DatetimeIndex(values)
if values.tz is None:
fmt_values = format_array_from_datetime(values.asi8.ravel(),
format=_get_format_datetime64_from_values(values, self.date_format),
na_rep=self.nat_rep).reshape(values.shape)
fmt_values = fmt_values.tolist()
else:
values = values.asobject
is_dates_only = _is_dates_only(values)
formatter = (self.formatter or _get_format_datetime64(is_dates_only, values, date_format=self.date_format))
fmt_values = [ formatter(x) for x in self.values ]
return fmt_values
def _is_dates_only(values):
# return a boolean if we are only dates (and don't have a timezone)
values = DatetimeIndex(values)
if values.tz is not None:
return False
values_int = values.asi8
consider_values = values_int != iNaT
one_day_nanos = (86400 * 1e9)
even_days = np.logical_and(consider_values, values_int % one_day_nanos != 0).sum() == 0
if even_days:
return True
return False
def _format_datetime64(x, tz=None, nat_rep='NaT'):
if x is None or lib.checknull(x):
return nat_rep
if tz is not None or not isinstance(x, Timestamp):
x = Timestamp(x, tz=tz)
return str(x)
def _format_datetime64_dateonly(x, nat_rep='NaT', date_format=None):
if x is None or lib.checknull(x):
return nat_rep
if not isinstance(x, Timestamp):
x = Timestamp(x)
if date_format:
return x.strftime(date_format)
else:
return x._date_repr
def _get_format_datetime64(is_dates_only, nat_rep='NaT', date_format=None):
if is_dates_only:
return lambda x, tz=None: _format_datetime64_dateonly(x,
nat_rep=nat_rep,
date_format=date_format)
else:
return lambda x, tz=None: _format_datetime64(x, tz=tz, nat_rep=nat_rep)
def _get_format_datetime64_from_values(values, date_format):
""" given values and a date_format, return a string format """
is_dates_only = _is_dates_only(values)
if is_dates_only:
return date_format or "%Y-%m-%d"
return None
class Timedelta64Formatter(GenericArrayFormatter):
def __init__(self, values, nat_rep='NaT', box=False, **kwargs):
super(Timedelta64Formatter, self).__init__(values, **kwargs)
self.nat_rep = nat_rep
self.box = box
def _format_strings(self):
formatter = self.formatter or _get_format_timedelta64(self.values, nat_rep=self.nat_rep,
box=self.box)
fmt_values = [formatter(x) for x in self.values]
return fmt_values
def _get_format_timedelta64(values, nat_rep='NaT', box=False):
"""
Return a formatter function for a range of timedeltas.
These will all have the same format argument
If box, then show the return in quotes
"""
values_int = values.astype(np.int64)
consider_values = values_int != iNaT
one_day_nanos = (86400 * 1e9)
even_days = np.logical_and(consider_values, values_int % one_day_nanos != 0).sum() == 0
all_sub_day = np.logical_and(consider_values, np.abs(values_int) >= one_day_nanos).sum() == 0
if even_days:
format = 'even_day'
elif all_sub_day:
format = 'sub_day'
else:
format = 'long'
def _formatter(x):
if x is None or lib.checknull(x):
return nat_rep
if not isinstance(x, Timedelta):
x = Timedelta(x)
result = x._repr_base(format=format)
if box:
result = "'{0}'".format(result)
return result
return _formatter
def _make_fixed_width(strings, justify='right', minimum=None):
if len(strings) == 0 or justify == 'all':
return strings
_strlen = _strlen_func()
max_len = np.max([_strlen(x) for x in strings])
if minimum is not None:
max_len = max(minimum, max_len)
conf_max = get_option("display.max_colwidth")
if conf_max is not None and max_len > conf_max:
max_len = conf_max
if justify == 'left':
justfunc = lambda self, x: self.ljust(x)
else:
justfunc = lambda self, x: self.rjust(x)
def just(x):
eff_len = max_len
if conf_max is not None:
if (conf_max > 3) & (_strlen(x) > max_len):
x = x[:eff_len - 3] + '...'
return justfunc(x, eff_len)
result = [just(x) for x in strings]
return result
def _trim_zeros(str_floats, na_rep='NaN'):
"""
Trims zeros and decimal points.
"""
trimmed = str_floats
def _cond(values):
non_na = [x for x in values if x != na_rep]
return (len(non_na) > 0 and all([x.endswith('0') for x in non_na]) and
not(any([('e' in x) or ('E' in x) for x in non_na])))
while _cond(trimmed):
trimmed = [x[:-1] if x != na_rep else x for x in trimmed]
# trim decimal points
return [x[:-1] if x.endswith('.') and x != na_rep else x for x in trimmed]
def single_column_table(column, align=None, style=None):
table = '<table'
if align is not None:
table += (' align="%s"' % align)
if style is not None:
table += (' style="%s"' % style)
table += '><tbody>'
for i in column:
table += ('<tr><td>%s</td></tr>' % str(i))
table += '</tbody></table>'
return table
def single_row_table(row): # pragma: no cover
table = '<table><tbody><tr>'
for i in row:
table += ('<td>%s</td>' % str(i))
table += '</tr></tbody></table>'
return table
def _has_names(index):
if isinstance(index, MultiIndex):
return any([x is not None for x in index.names])
else:
return index.name is not None
# ------------------------------------------------------------------------------
# Global formatting options
_initial_defencoding = None
def detect_console_encoding():
"""
Try to find the most capable encoding supported by the console.
slighly modified from the way IPython handles the same issue.
"""
import locale
global _initial_defencoding
encoding = None
try:
encoding = sys.stdout.encoding or sys.stdin.encoding
except AttributeError:
pass
# try again for something better
if not encoding or 'ascii' in encoding.lower():
try:
encoding = locale.getpreferredencoding()
except Exception:
pass
# when all else fails. this will usually be "ascii"
if not encoding or 'ascii' in encoding.lower():
encoding = sys.getdefaultencoding()
# GH3360, save the reported defencoding at import time
# MPL backends may change it. Make available for debugging.
if not _initial_defencoding:
_initial_defencoding = sys.getdefaultencoding()
return encoding
def get_console_size():
"""Return console size as tuple = (width, height).
Returns (None,None) in non-interactive session.
"""
display_width = get_option('display.width')
# deprecated.
display_height = get_option('display.height', silent=True)
# Consider
# interactive shell terminal, can detect term size
# interactive non-shell terminal (ipnb/ipqtconsole), cannot detect term
# size non-interactive script, should disregard term size
# in addition
# width,height have default values, but setting to 'None' signals
# should use Auto-Detection, But only in interactive shell-terminal.
# Simple. yeah.
if com.in_interactive_session():
if com.in_ipython_frontend():
# sane defaults for interactive non-shell terminal
# match default for width,height in config_init
from pandas.core.config import get_default_val
terminal_width = get_default_val('display.width')
terminal_height = get_default_val('display.height')
else:
# pure terminal
terminal_width, terminal_height = get_terminal_size()
else:
terminal_width, terminal_height = None, None
# Note if the User sets width/Height to None (auto-detection)
# and we're in a script (non-inter), this will return (None,None)
# caller needs to deal.
return (display_width or terminal_width, display_height or terminal_height)
class EngFormatter(object):
"""
Formats float values according to engineering format.
Based on matplotlib.ticker.EngFormatter
"""
# The SI engineering prefixes
ENG_PREFIXES = {
-24: "y",
-21: "z",
-18: "a",
-15: "f",
-12: "p",
-9: "n",
-6: "u",
-3: "m",
0: "",
3: "k",
6: "M",
9: "G",
12: "T",
15: "P",
18: "E",
21: "Z",
24: "Y"
}
def __init__(self, accuracy=None, use_eng_prefix=False):
self.accuracy = accuracy
self.use_eng_prefix = use_eng_prefix
def __call__(self, num):
""" Formats a number in engineering notation, appending a letter
representing the power of 1000 of the original number. Some examples:
>>> format_eng(0) # for self.accuracy = 0
' 0'
>>> format_eng(1000000) # for self.accuracy = 1,
# self.use_eng_prefix = True
' 1.0M'
>>> format_eng("-1e-6") # for self.accuracy = 2
# self.use_eng_prefix = False
'-1.00E-06'
@param num: the value to represent
@type num: either a numeric value or a string that can be converted to
a numeric value (as per decimal.Decimal constructor)
@return: engineering formatted string
"""
import decimal
import math
dnum = decimal.Decimal(str(num))
sign = 1
if dnum < 0: # pragma: no cover
sign = -1
dnum = -dnum
if dnum != 0:
pow10 = decimal.Decimal(int(math.floor(dnum.log10() / 3) * 3))
else:
pow10 = decimal.Decimal(0)
pow10 = pow10.min(max(self.ENG_PREFIXES.keys()))
pow10 = pow10.max(min(self.ENG_PREFIXES.keys()))
int_pow10 = int(pow10)
if self.use_eng_prefix:
prefix = self.ENG_PREFIXES[int_pow10]
else:
if int_pow10 < 0:
prefix = 'E-%02d' % (-int_pow10)
else:
prefix = 'E+%02d' % int_pow10
mant = sign * dnum / (10 ** pow10)
if self.accuracy is None: # pragma: no cover
format_str = u("% g%s")
else:
format_str = (u("%% .%if%%s") % self.accuracy)
formatted = format_str % (mant, prefix)
return formatted # .strip()
def set_eng_float_format(accuracy=3, use_eng_prefix=False):
"""
Alter default behavior on how float is formatted in DataFrame.
Format float in engineering format. By accuracy, we mean the number of
decimal digits after the floating point.
See also EngFormatter.
"""
set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix))
set_option("display.column_space", max(12, accuracy + 9))
def _put_lines(buf, lines):
if any(isinstance(x, compat.text_type) for x in lines):
lines = [compat.text_type(x) for x in lines]
buf.write('\n'.join(lines))
def _binify(cols, line_width):
adjoin_width = 1
bins = []
curr_width = 0
i_last_column = len(cols) - 1
for i, w in enumerate(cols):
w_adjoined = w + adjoin_width
curr_width += w_adjoined
if i_last_column == i:
wrap = curr_width + 1 > line_width and i > 0
else:
wrap = curr_width + 2 > line_width and i > 0
if wrap:
bins.append(i)
curr_width = w_adjoined
bins.append(len(cols))
return bins
if __name__ == '__main__':
arr = np.array([746.03, 0.00, 5620.00, 1592.36])
# arr = np.array([11111111.1, 1.55])
# arr = [314200.0034, 1.4125678]
arr = np.array([327763.3119, 345040.9076, 364460.9915, 398226.8688,
383800.5172, 433442.9262, 539415.0568, 568590.4108,
599502.4276, 620921.8593, 620898.5294, 552427.1093,
555221.2193, 519639.7059, 388175.7, 379199.5854,
614898.25, 504833.3333, 560600., 941214.2857,
1134250., 1219550., 855736.85, 1042615.4286,
722621.3043, 698167.1818, 803750.])
fmt = FloatArrayFormatter(arr, digits=7)
print(fmt.get_result())
| sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/pandas/core/format.py | Python | mit | 88,798 |
from Numberjack.ExternalSolver import ExternalCNFSolver
from Numberjack import NBJ_STD_Solver
import re
class GlucoseSolver(ExternalCNFSolver):
def __init__(self):
super(GlucoseSolver, self).__init__()
self.solverexec = "glucose"
self.info_regexps = { # See doc on ExternalSolver.info_regexps
'nodes': (re.compile(r'^decisions[ ]+:[ ]+(?P<nodes>\d+)[ ]'), int),
'failures': (re.compile(r'^conflicts[ ]+:[ ]+(?P<failures>\d+)[ ]'), int),
'propags': (re.compile(r'^propagations[ ]+:[ ]+(?P<propags>\d+)[ ]'), int),
'time': (re.compile(r'^CPU time[ ]+:[ ]+(?P<time>\d+\.\d+)[ ]'), float),
}
def build_solver_cmd(self):
return "%(solverexec)s -cpu-lim=%(timelimit)d %(filename)s" % vars(self)
class Solver(NBJ_STD_Solver):
def __init__(self, model=None, X=None, FD=False, clause_limit=-1, encoding=None):
NBJ_STD_Solver.__init__(self, "Glucose", "SatWrapper", model, None, FD, clause_limit, encoding)
self.solver_id = model.getSolverId()
self.solver.set_model(model, self.solver_id, self.Library, solver=self)
| JElchison/Numberjack | Numberjack/solvers/Glucose.py | Python | lgpl-2.1 | 1,138 |
# -*- coding: utf-8 -*-
"""Filesystem path resource."""
from __future__ import division
import pathlib
from xal.resource import Resource
class Path(Resource):
POSIX_FLAVOUR = 'posix'
WINDOWS_FLAVOUR = 'windows'
def __init__(self, path, flavour=POSIX_FLAVOUR):
super(Path, self).__init__()
#: Initial path value, as passed to constructor.
#: This attribute makes it possible to initialize a :class:`Path`
#: instance without a `xal` session. Without `xal` session, property
#: :attr:`pure_path` cannot be resolved, because the filesystem's
#: flavour is unknown.
if flavour == Path.POSIX_FLAVOUR:
self.pure_path = pathlib.PurePosixPath(str(path))
else:
raise NotImplementedError()
#: Path instance to restore as working directory on exit.
#: Methods such as ``cd`` return a :class:`Path` instance having this
#: attribute. So that, in a ``with`` context, the previous working
#: directory can be restored on exit.
self._exit_cwd = None
#: Whether this instance is a temporary resource, i.e. whether it
#: should be destroyed on ``__exit__``. Methods like :meth:`mkdir`
#: return a :class:`Path` instance having this attribute.
self._exit_rm = False
def _cast(self, value):
"""Return value converted to :class:`Path`, with XAL session."""
path = Path(value)
path.xal_session = self.xal_session
return path
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# Restore working directory.
if self._exit_cwd:
self.xal_session.path.cd(self._exit_cwd)
# Destroy temporary directory.
if self._exit_rm:
if self.is_absolute():
self.rmdir()
def __div__(self, other):
return self.__truediv__(other)
def __truediv__(self, other):
other_path = Path(self.pure_path / other.pure_path)
other_path.xal_session = self.xal_session
return other_path
def __bytes__(self):
return self.pure_path.__bytes__()
def __str__(self):
return str(self.pure_path)
def __unicode__(self):
return unicode(self.pure_path)
def __repr__(self):
return "{cls}('{path}')".format(cls=self.__class__.__name__,
path=str(self.pure_path))
def __copy__(self):
other = Path(self.pure_path)
other.xal_session = self.xal_session
return other
def __eq__(self, other):
# Compare sessions.
if self.xal_session and other.xal_session:
if self.xal_session != other.xal_session:
return False
# Compare paths.
return self.pure_path == other.pure_path
def __cmp__(self, other):
# Compare sessions.
if self.xal_session and other.xal_session:
if self.xal_session != other.xal_session:
if self.pure_path != other.pure_path:
return cmp(self.pure_path, other.pure_path)
else:
return cmp(self.xal_session, other.xal_session)
# Compare paths.
return cmp(self.pure_path, other.pure_path)
@property
def drive(self):
return self.pure_path.drive
@property
def root(self):
return self.pure_path.root
@property
def anchor(self):
return self.pure_path.anchor
@property
def parents(self):
parents = []
for pure_parent in self.pure_path.parents:
parent = Path(str(pure_parent))
parent.xal_session = self.xal_session
parents.append(parent)
return tuple(parents)
@property
def parent(self):
pure_parent = self.pure_path.parent
parent = Path(str(pure_parent))
parent.xal_session = self.xal_session
return parent
@property
def name(self):
return self.pure_path.name
@property
def suffix(self):
return self.pure_path.suffix
@property
def suffixes(self):
return self.pure_path.suffixes
@property
def stem(self):
return self.pure_path.stem
def cd(self):
"""Change working directory."""
return self.xal_session.path.cd(self)
def as_posix(self):
return self.pure_path.as_posix()
def as_uri(self):
return self.pure_path.as_uri()
def is_absolute(self):
return self.pure_path.is_absolute()
def is_reserved(self):
return self.pure_path.is_reserved()
def joinpath(self, *other):
other_path = self.__copy__()
for third in other:
other_path = other_path / Path(third)
return other_path
def match(self, pattern):
return self.pure_path.match(pattern)
def relative_to(self, *other):
other_path = self.__copy__()
other_pure_path = [self._cast(item).pure_path for item in other]
other_path.pure_path = self.pure_path.relative_to(*other_pure_path)
return other_path
def with_name(self, name):
other_path = self.__copy__()
other_path.pure_path = self.pure_path.with_name(name)
return other_path
def with_suffix(self, suffix):
other_path = self.__copy__()
other_path.pure_path = self.pure_path.with_suffix(suffix)
return other_path
def stat(self):
return self.xal_session.path.stat(self)
def chmod(self, mode):
return self.xal_session.path.chmod(self, mode)
def exists(self):
return self.xal_session.path.exists(self)
def glob(self, pattern):
return self.xal_session.path.glob(self, pattern)
def group(self):
return self.xal_session.path.group(self)
def is_dir(self):
return self.xal_session.path.is_dir(self)
def is_file(self):
return self.xal_session.path.is_file(self)
def is_symlink(self):
return self.xal_session.path.is_symlink(self)
def is_socket(self):
return self.xal_session.path.is_socket(self)
def is_fifo(self):
return self.xal_session.path.is_fifo(self)
def is_block_device(self):
return self.xal_session.path.is_block_device(self)
def is_char_device(self):
return self.xal_session.path.is_char_device(self)
def iterdir(self):
return self.xal_session.path.iterdir(self)
def lchmod(self, mode):
return self.xal_session.path.lchmod(self, mode)
def lstat(self):
return self.xal_session.path.lstat(self)
def mkdir(self, mode=0o777, parents=False):
return self.xal_session.path.mkdir(self, mode=mode, parents=parents)
def open(self, mode='r', buffering=-1, encoding=None, errors=None,
newline=None):
return self.xal_session.path.open(
self,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
def owner(self):
return self.xal_session.path.owner(self)
def rename(self, target):
other_path = self._cast(target)
result = self.xal_session.path.rename(self, other_path)
self.pure_path = other_path.pure_path
return result
def replace(self, target):
other_path = self._cast(target)
result = self.xal_session.path.replace(self, other_path)
self.pure_path = other_path.pure_path
return result
def resolve(self):
return self.xal_session.path.resolve(self)
def rglob(self, pattern):
return self.xal_session.path.rglob(self, pattern)
def rmdir(self):
return self.xal_session.path.rmdir(self)
def symlink_to(self, target, target_is_directory=False):
return self.xal_session.path.symlink_to(
self,
target=target,
target_is_directory=target_is_directory)
def touch(self, mode=0o777, exist_ok=True):
return self.xal_session.path.touch(
self,
mode=mode,
exist_ok=exist_ok)
def unlink(self):
return self.xal_session.path.unlink(self)
| benoitbryon/xal | xal/path/resource.py | Python | bsd-3-clause | 8,243 |
"""
Implementation of the standard :mod:`thread` module that spawns greenlets.
.. note::
This module is a helper for :mod:`gevent.monkey` and is not
intended to be used directly. For spawning greenlets in your
applications, prefer higher level constructs like
:class:`gevent.Greenlet` class or :func:`gevent.spawn`.
"""
from __future__ import absolute_import
import sys
__implements__ = ['allocate_lock',
'get_ident',
'exit',
'LockType',
'stack_size',
'start_new_thread',
'_local']
__imports__ = ['error']
if sys.version_info[0] <= 2:
import thread as __thread__ # pylint:disable=import-error
else:
import _thread as __thread__ # pylint:disable=import-error
__target__ = '_thread'
__imports__ += ['RLock',
'TIMEOUT_MAX',
'allocate',
'exit_thread',
'interrupt_main',
'start_new']
error = __thread__.error
from gevent._compat import PY3
from gevent._compat import PYPY
from gevent._util import copy_globals
from gevent.hub import getcurrent, GreenletExit
from gevent.greenlet import Greenlet
from gevent.lock import BoundedSemaphore
from gevent.local import local as _local
def get_ident(gr=None):
if gr is None:
gr = getcurrent()
return id(gr)
def start_new_thread(function, args=(), kwargs=None):
if kwargs is not None:
greenlet = Greenlet.spawn(function, *args, **kwargs)
else:
greenlet = Greenlet.spawn(function, *args)
return get_ident(greenlet)
class LockType(BoundedSemaphore):
# Change the ValueError into the appropriate thread error
# and any other API changes we need to make to match behaviour
_OVER_RELEASE_ERROR = __thread__.error
if PYPY and PY3:
_OVER_RELEASE_ERROR = RuntimeError
if PY3:
_TIMEOUT_MAX = __thread__.TIMEOUT_MAX # python 2: pylint:disable=no-member
def acquire(self, blocking=True, timeout=-1):
# Transform the default -1 argument into the None that our
# semaphore implementation expects, and raise the same error
# the stdlib implementation does.
if timeout == -1:
timeout = None
if not blocking and timeout is not None:
raise ValueError("can't specify a timeout for a non-blocking call")
if timeout is not None:
if timeout < 0:
# in C: if(timeout < 0 && timeout != -1)
raise ValueError("timeout value must be strictly positive")
if timeout > self._TIMEOUT_MAX:
raise OverflowError('timeout value is too large')
return BoundedSemaphore.acquire(self, blocking, timeout)
allocate_lock = LockType
def exit():
raise GreenletExit
if hasattr(__thread__, 'stack_size'):
_original_stack_size = __thread__.stack_size
def stack_size(size=None):
if size is None:
return _original_stack_size()
if size > _original_stack_size():
return _original_stack_size(size)
else:
pass
# not going to decrease stack_size, because otherwise other greenlets in this thread will suffer
else:
__implements__.remove('stack_size')
__imports__ = copy_globals(__thread__, globals(),
only_names=__imports__,
ignore_missing_names=True)
__all__ = __implements__ + __imports__
__all__.remove('_local')
# XXX interrupt_main
# XXX _count()
| burzillibus/RobHome | venv/lib/python2.7/site-packages/gevent/thread.py | Python | mit | 3,633 |
## Copyright 2013 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, see <http://www.gnu.org/licenses/>.
"""
This defines Lino's standard system signals.
"""
from django.dispatch import Signal, receiver
pre_analyze = Signal(['models_list'])
"""
Sent exactly once per process at site startup,
just before Lino analyzes the models.
sender:
the Site instance
models_list:
list of models
"""
post_analyze = Signal(['models_list'])
"""
Sent exactly once per process at site startup,
just after Site has finished to analyze the models.
"""
auto_create = Signal(["field","value"])
"""
The :attr:`auto_create` signal is sent when
:func:`lookup_or_create <>` silently created a model instance.
Arguments sent with this signal:
``sender``
The model instance that has been created.
``field``
The database field
``known_values``
The specified known values
"""
pre_merge = Signal(['request'])
"""
Sent when a model instance is being merged into another instance.
"""
pre_remove_child = Signal(['request','child'])
pre_add_child = Signal(['request'])
pre_ui_create = Signal(['request'])
pre_ui_update = Signal(['request'])
pre_ui_delete = Signal(['request'])
"""
Sent just before a model instance is being deleted using
the user interface.
``request``:
The HttpRequest object
"""
pre_ui_build = Signal()
post_ui_build = Signal()
database_connected = Signal()
#~ database_ready = Signal()
from django.db.models.fields import NOT_PROVIDED
class ChangeWatcher(object):
"""
Utility to watch changes and send pre_ui_update
"""
def __init__(self,watched):
self.original_state = dict(watched.__dict__)
self.watched = watched
#~ self.is_new = is_new
#~ self.request
def is_dirty(self):
#~ if self.is_new:
#~ return True
for k,v in self.original_state.iteritems():
if v != self.watched.__dict__.get(k, NOT_PROVIDED):
return True
return False
def send_update(self,request):
#~ print "ChangeWatcher.send_update()", self.watched
pre_ui_update.send(sender=self,request=request)
| MaxTyutyunnikov/lino | lino/core/signals.py | Python | gpl-3.0 | 2,881 |
# mysql/base.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the MySQL database.
Supported Versions and Features
-------------------------------
SQLAlchemy supports 6 major MySQL versions: 3.23, 4.0, 4.1, 5.0, 5.1 and 6.0,
with capabilities increasing with more modern servers.
Versions 4.1 and higher support the basic SQL functionality that SQLAlchemy
uses in the ORM and SQL expressions. These versions pass the applicable tests
in the suite 100%. No heroic measures are taken to work around major missing
SQL features- if your server version does not support sub-selects, for
example, they won't work in SQLAlchemy either.
Most available DBAPI drivers are supported; see below.
===================================== ===============
Feature Minimum Version
===================================== ===============
sqlalchemy.orm 4.1.1
Table Reflection 3.23.x
DDL Generation 4.1.1
utf8/Full Unicode Connections 4.1.1
Transactions 3.23.15
Two-Phase Transactions 5.0.3
Nested Transactions 5.0.3
===================================== ===============
See the official MySQL documentation for detailed information about features
supported in any given server release.
Connecting
----------
See the API documentation on individual drivers for details on connecting.
Connection Timeouts
-------------------
MySQL features an automatic connection close behavior, for connections that have
been idle for eight hours or more. To circumvent having this issue, use the
``pool_recycle`` option which controls the maximum age of any connection::
engine = create_engine('mysql+mysqldb://...', pool_recycle=3600)
Storage Engines
---------------
Most MySQL server installations have a default table type of ``MyISAM``, a
non-transactional table type. During a transaction, non-transactional storage
engines do not participate and continue to store table changes in autocommit
mode. For fully atomic transactions, all participating tables must use a
transactional engine such as ``InnoDB``, ``Falcon``, ``SolidDB``, `PBXT`, etc.
Storage engines can be elected when creating tables in SQLAlchemy by supplying
a ``mysql_engine='whatever'`` to the ``Table`` constructor. Any MySQL table
creation option can be specified in this syntax::
Table('mytable', metadata,
Column('data', String(32)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
Case Sensitivity and Table Reflection
-------------------------------------
MySQL has inconsistent support for case-sensitive identifier
names, basing support on specific details of the underlying
operating system. However, it has been observed that no matter
what case sensitivity behavior is present, the names of tables in
foreign key declarations are *always* received from the database
as all-lower case, making it impossible to accurately reflect a
schema where inter-related tables use mixed-case identifier names.
Therefore it is strongly advised that table names be declared as
all lower case both within SQLAlchemy as well as on the MySQL
database itself, especially if database reflection features are
to be used.
Keys
----
Not all MySQL storage engines support foreign keys. For ``MyISAM`` and
similar engines, the information loaded by table reflection will not include
foreign keys. For these tables, you may supply a
:class:`~sqlalchemy.ForeignKeyConstraint` at reflection time::
Table('mytable', metadata,
ForeignKeyConstraint(['other_id'], ['othertable.other_id']),
autoload=True
)
When creating tables, SQLAlchemy will automatically set ``AUTO_INCREMENT`` on
an integer primary key column::
>>> t = Table('mytable', metadata,
... Column('mytable_id', Integer, primary_key=True)
... )
>>> t.create()
CREATE TABLE mytable (
id INTEGER NOT NULL AUTO_INCREMENT,
PRIMARY KEY (id)
)
You can disable this behavior by supplying ``autoincrement=False`` to the
:class:`~sqlalchemy.Column`. This flag can also be used to enable
auto-increment on a secondary column in a multi-column key for some storage
engines::
Table('mytable', metadata,
Column('gid', Integer, primary_key=True, autoincrement=False),
Column('id', Integer, primary_key=True)
)
SQL Mode
--------
MySQL SQL modes are supported. Modes that enable ``ANSI_QUOTES`` (such as
``ANSI``) require an engine option to modify SQLAlchemy's quoting style.
When using an ANSI-quoting mode, supply ``use_ansiquotes=True`` when
creating your ``Engine``::
create_engine('mysql://localhost/test', use_ansiquotes=True)
This is an engine-wide option and is not toggleable on a per-connection basis.
SQLAlchemy does not presume to ``SET sql_mode`` for you with this option. For
the best performance, set the quoting style server-wide in ``my.cnf`` or by
supplying ``--sql-mode`` to ``mysqld``. You can also use a
:class:`sqlalchemy.pool.Pool` listener hook to issue a ``SET SESSION
sql_mode='...'`` on connect to configure each connection.
If you do not specify ``use_ansiquotes``, the regular MySQL quoting style is
used by default.
If you do issue a ``SET sql_mode`` through SQLAlchemy, the dialect must be
updated if the quoting style is changed. Again, this change will affect all
connections::
connection.execute('SET sql_mode="ansi"')
connection.dialect.use_ansiquotes = True
MySQL SQL Extensions
--------------------
Many of the MySQL SQL extensions are handled through SQLAlchemy's generic
function and operator support::
table.select(table.c.password==func.md5('plaintext'))
table.select(table.c.username.op('regexp')('^[a-d]'))
And of course any valid MySQL statement can be executed as a string as well.
Some limited direct support for MySQL extensions to SQL is currently
available.
* SELECT pragma::
select(..., prefixes=['HIGH_PRIORITY', 'SQL_SMALL_RESULT'])
* UPDATE with LIMIT::
update(..., mysql_limit=10)
"""
import datetime, inspect, re, sys
from sqlalchemy import schema as sa_schema
from sqlalchemy import exc, log, sql, util
from sqlalchemy.sql import operators as sql_operators
from sqlalchemy.sql import functions as sql_functions
from sqlalchemy.sql import compiler
from array import array as _array
from sqlalchemy.engine import reflection
from sqlalchemy.engine import base as engine_base, default
from sqlalchemy import types as sqltypes
from sqlalchemy.types import DATE, DATETIME, BOOLEAN, TIME, \
BLOB, BINARY, VARBINARY
RESERVED_WORDS = set(
['accessible', 'add', 'all', 'alter', 'analyze','and', 'as', 'asc',
'asensitive', 'before', 'between', 'bigint', 'binary', 'blob', 'both',
'by', 'call', 'cascade', 'case', 'change', 'char', 'character', 'check',
'collate', 'column', 'condition', 'constraint', 'continue', 'convert',
'create', 'cross', 'current_date', 'current_time', 'current_timestamp',
'current_user', 'cursor', 'database', 'databases', 'day_hour',
'day_microsecond', 'day_minute', 'day_second', 'dec', 'decimal',
'declare', 'default', 'delayed', 'delete', 'desc', 'describe',
'deterministic', 'distinct', 'distinctrow', 'div', 'double', 'drop',
'dual', 'each', 'else', 'elseif', 'enclosed', 'escaped', 'exists',
'exit', 'explain', 'false', 'fetch', 'float', 'float4', 'float8',
'for', 'force', 'foreign', 'from', 'fulltext', 'grant', 'group', 'having',
'high_priority', 'hour_microsecond', 'hour_minute', 'hour_second', 'if',
'ignore', 'in', 'index', 'infile', 'inner', 'inout', 'insensitive',
'insert', 'int', 'int1', 'int2', 'int3', 'int4', 'int8', 'integer',
'interval', 'into', 'is', 'iterate', 'join', 'key', 'keys', 'kill',
'leading', 'leave', 'left', 'like', 'limit', 'linear', 'lines', 'load',
'localtime', 'localtimestamp', 'lock', 'long', 'longblob', 'longtext',
'loop', 'low_priority', 'master_ssl_verify_server_cert', 'match',
'mediumblob', 'mediumint', 'mediumtext', 'middleint',
'minute_microsecond', 'minute_second', 'mod', 'modifies', 'natural',
'not', 'no_write_to_binlog', 'null', 'numeric', 'on', 'optimize',
'option', 'optionally', 'or', 'order', 'out', 'outer', 'outfile',
'precision', 'primary', 'procedure', 'purge', 'range', 'read', 'reads',
'read_only', 'read_write', 'real', 'references', 'regexp', 'release',
'rename', 'repeat', 'replace', 'require', 'restrict', 'return',
'revoke', 'right', 'rlike', 'schema', 'schemas', 'second_microsecond',
'select', 'sensitive', 'separator', 'set', 'show', 'smallint', 'spatial',
'specific', 'sql', 'sqlexception', 'sqlstate', 'sqlwarning',
'sql_big_result', 'sql_calc_found_rows', 'sql_small_result', 'ssl',
'starting', 'straight_join', 'table', 'terminated', 'then', 'tinyblob',
'tinyint', 'tinytext', 'to', 'trailing', 'trigger', 'true', 'undo',
'union', 'unique', 'unlock', 'unsigned', 'update', 'usage', 'use',
'using', 'utc_date', 'utc_time', 'utc_timestamp', 'values', 'varbinary',
'varchar', 'varcharacter', 'varying', 'when', 'where', 'while', 'with',
'write', 'x509', 'xor', 'year_month', 'zerofill', # 5.0
'columns', 'fields', 'privileges', 'soname', 'tables', # 4.1
'accessible', 'linear', 'master_ssl_verify_server_cert', 'range',
'read_only', 'read_write', # 5.1
])
AUTOCOMMIT_RE = re.compile(
r'\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER|LOAD +DATA|REPLACE)',
re.I | re.UNICODE)
SET_RE = re.compile(
r'\s*SET\s+(?:(?:GLOBAL|SESSION)\s+)?\w',
re.I | re.UNICODE)
class _NumericType(object):
"""Base for MySQL numeric types."""
def __init__(self, unsigned=False, zerofill=False, **kw):
self.unsigned = unsigned
self.zerofill = zerofill
super(_NumericType, self).__init__(**kw)
class _FloatType(_NumericType, sqltypes.Float):
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
if isinstance(self, (REAL, DOUBLE)) and \
(
(precision is None and scale is not None) or
(precision is not None and scale is None)
):
raise exc.ArgumentError(
"You must specify both precision and scale or omit "
"both altogether.")
super(_FloatType, self).__init__(precision=precision, asdecimal=asdecimal, **kw)
self.scale = scale
class _IntegerType(_NumericType, sqltypes.Integer):
def __init__(self, display_width=None, **kw):
self.display_width = display_width
super(_IntegerType, self).__init__(**kw)
class _StringType(sqltypes.String):
"""Base for MySQL string types."""
def __init__(self, charset=None, collation=None,
ascii=False, binary=False,
national=False, **kw):
self.charset = charset
# allow collate= or collation=
self.collation = kw.pop('collate', collation)
self.ascii = ascii
# We have to munge the 'unicode' param strictly as a dict
# otherwise 2to3 will turn it into str.
self.__dict__['unicode'] = kw.get('unicode', False)
# sqltypes.String does not accept the 'unicode' arg at all.
if 'unicode' in kw:
del kw['unicode']
self.binary = binary
self.national = national
super(_StringType, self).__init__(**kw)
def __repr__(self):
attributes = inspect.getargspec(self.__init__)[0][1:]
attributes.extend(inspect.getargspec(_StringType.__init__)[0][1:])
params = {}
for attr in attributes:
val = getattr(self, attr)
if val is not None and val is not False:
params[attr] = val
return "%s(%s)" % (self.__class__.__name__,
', '.join(['%s=%r' % (k, params[k]) for k in params]))
class NUMERIC(_NumericType, sqltypes.NUMERIC):
"""MySQL NUMERIC type."""
__visit_name__ = 'NUMERIC'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a NUMERIC.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(NUMERIC, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal, **kw)
class DECIMAL(_NumericType, sqltypes.DECIMAL):
"""MySQL DECIMAL type."""
__visit_name__ = 'DECIMAL'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a DECIMAL.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(DECIMAL, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class DOUBLE(_FloatType):
"""MySQL DOUBLE type."""
__visit_name__ = 'DOUBLE'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a DOUBLE.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(DOUBLE, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class REAL(_FloatType, sqltypes.REAL):
"""MySQL REAL type."""
__visit_name__ = 'REAL'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a REAL.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(REAL, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class FLOAT(_FloatType, sqltypes.FLOAT):
"""MySQL FLOAT type."""
__visit_name__ = 'FLOAT'
def __init__(self, precision=None, scale=None, asdecimal=False, **kw):
"""Construct a FLOAT.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(FLOAT, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
def bind_processor(self, dialect):
return None
class INTEGER(_IntegerType, sqltypes.INTEGER):
"""MySQL INTEGER type."""
__visit_name__ = 'INTEGER'
def __init__(self, display_width=None, **kw):
"""Construct an INTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(INTEGER, self).__init__(display_width=display_width, **kw)
class BIGINT(_IntegerType, sqltypes.BIGINT):
"""MySQL BIGINTEGER type."""
__visit_name__ = 'BIGINT'
def __init__(self, display_width=None, **kw):
"""Construct a BIGINTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(BIGINT, self).__init__(display_width=display_width, **kw)
class MEDIUMINT(_IntegerType):
"""MySQL MEDIUMINTEGER type."""
__visit_name__ = 'MEDIUMINT'
def __init__(self, display_width=None, **kw):
"""Construct a MEDIUMINTEGER
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(MEDIUMINT, self).__init__(display_width=display_width, **kw)
class TINYINT(_IntegerType):
"""MySQL TINYINT type."""
__visit_name__ = 'TINYINT'
def __init__(self, display_width=None, **kw):
"""Construct a TINYINT.
Note: following the usual MySQL conventions, TINYINT(1) columns
reflected during Table(..., autoload=True) are treated as
Boolean columns.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(TINYINT, self).__init__(display_width=display_width, **kw)
class SMALLINT(_IntegerType, sqltypes.SMALLINT):
"""MySQL SMALLINTEGER type."""
__visit_name__ = 'SMALLINT'
def __init__(self, display_width=None, **kw):
"""Construct a SMALLINTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(SMALLINT, self).__init__(display_width=display_width, **kw)
class BIT(sqltypes.TypeEngine):
"""MySQL BIT type.
This type is for MySQL 5.0.3 or greater for MyISAM, and 5.0.5 or greater for
MyISAM, MEMORY, InnoDB and BDB. For older versions, use a MSTinyInteger()
type.
"""
__visit_name__ = 'BIT'
def __init__(self, length=None):
"""Construct a BIT.
:param length: Optional, number of bits.
"""
self.length = length
def result_processor(self, dialect, coltype):
"""Convert a MySQL's 64 bit, variable length binary string to a long.
TODO: this is MySQL-db, pyodbc specific. OurSQL and mysqlconnector
already do this, so this logic should be moved to those dialects.
"""
def process(value):
if value is not None:
v = 0L
for i in map(ord, value):
v = v << 8 | i
return v
return value
return process
class _MSTime(sqltypes.Time):
"""MySQL TIME type."""
__visit_name__ = 'TIME'
def result_processor(self, dialect, coltype):
time = datetime.time
def process(value):
# convert from a timedelta value
if value is not None:
seconds = value.seconds
minutes = seconds / 60
return time(minutes / 60, minutes % 60, seconds - minutes * 60)
else:
return None
return process
class TIMESTAMP(sqltypes.TIMESTAMP):
"""MySQL TIMESTAMP type."""
__visit_name__ = 'TIMESTAMP'
class YEAR(sqltypes.TypeEngine):
"""MySQL YEAR type, for single byte storage of years 1901-2155."""
__visit_name__ = 'YEAR'
def __init__(self, display_width=None):
self.display_width = display_width
class TEXT(_StringType, sqltypes.TEXT):
"""MySQL TEXT type, for text up to 2^16 characters."""
__visit_name__ = 'TEXT'
def __init__(self, length=None, **kw):
"""Construct a TEXT.
:param length: Optional, if provided the server may optimize storage
by substituting the smallest TEXT type sufficient to store
``length`` characters.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(TEXT, self).__init__(length=length, **kw)
class TINYTEXT(_StringType):
"""MySQL TINYTEXT type, for text up to 2^8 characters."""
__visit_name__ = 'TINYTEXT'
def __init__(self, **kwargs):
"""Construct a TINYTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(TINYTEXT, self).__init__(**kwargs)
class MEDIUMTEXT(_StringType):
"""MySQL MEDIUMTEXT type, for text up to 2^24 characters."""
__visit_name__ = 'MEDIUMTEXT'
def __init__(self, **kwargs):
"""Construct a MEDIUMTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(MEDIUMTEXT, self).__init__(**kwargs)
class LONGTEXT(_StringType):
"""MySQL LONGTEXT type, for text up to 2^32 characters."""
__visit_name__ = 'LONGTEXT'
def __init__(self, **kwargs):
"""Construct a LONGTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(LONGTEXT, self).__init__(**kwargs)
class VARCHAR(_StringType, sqltypes.VARCHAR):
"""MySQL VARCHAR type, for variable-length character data."""
__visit_name__ = 'VARCHAR'
def __init__(self, length=None, **kwargs):
"""Construct a VARCHAR.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(VARCHAR, self).__init__(length=length, **kwargs)
class CHAR(_StringType, sqltypes.CHAR):
"""MySQL CHAR type, for fixed-length character data."""
__visit_name__ = 'CHAR'
def __init__(self, length=None, **kwargs):
"""Construct a CHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
super(CHAR, self).__init__(length=length, **kwargs)
class NVARCHAR(_StringType, sqltypes.NVARCHAR):
"""MySQL NVARCHAR type.
For variable-length character data in the server's configured national
character set.
"""
__visit_name__ = 'NVARCHAR'
def __init__(self, length=None, **kwargs):
"""Construct an NVARCHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
kwargs['national'] = True
super(NVARCHAR, self).__init__(length=length, **kwargs)
class NCHAR(_StringType, sqltypes.NCHAR):
"""MySQL NCHAR type.
For fixed-length character data in the server's configured national
character set.
"""
__visit_name__ = 'NCHAR'
def __init__(self, length=None, **kwargs):
"""Construct an NCHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
kwargs['national'] = True
super(NCHAR, self).__init__(length=length, **kwargs)
class TINYBLOB(sqltypes._Binary):
"""MySQL TINYBLOB type, for binary data up to 2^8 bytes."""
__visit_name__ = 'TINYBLOB'
class MEDIUMBLOB(sqltypes._Binary):
"""MySQL MEDIUMBLOB type, for binary data up to 2^24 bytes."""
__visit_name__ = 'MEDIUMBLOB'
class LONGBLOB(sqltypes._Binary):
"""MySQL LONGBLOB type, for binary data up to 2^32 bytes."""
__visit_name__ = 'LONGBLOB'
class ENUM(sqltypes.Enum, _StringType):
"""MySQL ENUM type."""
__visit_name__ = 'ENUM'
def __init__(self, *enums, **kw):
"""Construct an ENUM.
Example:
Column('myenum', MSEnum("foo", "bar", "baz"))
:param enums: The range of valid values for this ENUM. Values will be
quoted when generating the schema according to the quoting flag (see
below).
:param strict: Defaults to False: ensure that a given value is in this
ENUM's range of permissible values when inserting or updating rows.
Note that MySQL will not raise a fatal error if you attempt to store
an out of range value- an alternate value will be stored instead.
(See MySQL ENUM documentation.)
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
:param quoting: Defaults to 'auto': automatically determine enum value
quoting. If all enum values are surrounded by the same quoting
character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
'quoted': values in enums are already quoted, they will be used
directly when generating the schema - this usage is deprecated.
'unquoted': values in enums are not quoted, they will be escaped and
surrounded by single quotes when generating the schema.
Previous versions of this type always required manually quoted
values to be supplied; future versions will always quote the string
literals for you. This is a transitional option.
"""
self.quoting = kw.pop('quoting', 'auto')
if self.quoting == 'auto' and len(enums):
# What quoting character are we using?
q = None
for e in enums:
if len(e) == 0:
self.quoting = 'unquoted'
break
elif q is None:
q = e[0]
if e[0] != q or e[-1] != q:
self.quoting = 'unquoted'
break
else:
self.quoting = 'quoted'
if self.quoting == 'quoted':
util.warn_deprecated(
'Manually quoting ENUM value literals is deprecated. Supply '
'unquoted values and use the quoting= option in cases of '
'ambiguity.')
enums = self._strip_enums(enums)
self.strict = kw.pop('strict', False)
length = max([len(v) for v in enums] + [0])
kw.pop('metadata', None)
kw.pop('schema', None)
kw.pop('name', None)
kw.pop('quote', None)
kw.pop('native_enum', None)
_StringType.__init__(self, length=length, **kw)
sqltypes.Enum.__init__(self, *enums)
@classmethod
def _strip_enums(cls, enums):
strip_enums = []
for a in enums:
if a[0:1] == '"' or a[0:1] == "'":
# strip enclosing quotes and unquote interior
a = a[1:-1].replace(a[0] * 2, a[0])
strip_enums.append(a)
return strip_enums
def bind_processor(self, dialect):
super_convert = super(ENUM, self).bind_processor(dialect)
def process(value):
if self.strict and value is not None and value not in self.enums:
raise exc.InvalidRequestError('"%s" not a valid value for '
'this enum' % value)
if super_convert:
return super_convert(value)
else:
return value
return process
def adapt(self, impltype, **kw):
kw['strict'] = self.strict
return sqltypes.Enum.adapt(self, impltype, **kw)
class SET(_StringType):
"""MySQL SET type."""
__visit_name__ = 'SET'
def __init__(self, *values, **kw):
"""Construct a SET.
Example::
Column('myset', MSSet("'foo'", "'bar'", "'baz'"))
:param values: The range of valid values for this SET. Values will be
used exactly as they appear when generating schemas. Strings must
be quoted, as in the example above. Single-quotes are suggested for
ANSI compatibility and are required for portability to servers with
ANSI_QUOTES enabled.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
self._ddl_values = values
strip_values = []
for a in values:
if a[0:1] == '"' or a[0:1] == "'":
# strip enclosing quotes and unquote interior
a = a[1:-1].replace(a[0] * 2, a[0])
strip_values.append(a)
self.values = strip_values
kw.setdefault('length', max([len(v) for v in strip_values] + [0]))
super(SET, self).__init__(**kw)
def result_processor(self, dialect, coltype):
def process(value):
# The good news:
# No ',' quoting issues- commas aren't allowed in SET values
# The bad news:
# Plenty of driver inconsistencies here.
if isinstance(value, util.set_types):
# ..some versions convert '' to an empty set
if not value:
value.add('')
# ..some return sets.Set, even for pythons that have __builtin__.set
if not isinstance(value, set):
value = set(value)
return value
# ...and some versions return strings
if value is not None:
return set(value.split(','))
else:
return value
return process
def bind_processor(self, dialect):
super_convert = super(SET, self).bind_processor(dialect)
def process(value):
if value is None or isinstance(value, (int, long, basestring)):
pass
else:
if None in value:
value = set(value)
value.remove(None)
value.add('')
value = ','.join(value)
if super_convert:
return super_convert(value)
else:
return value
return process
# old names
MSTime = _MSTime
MSSet = SET
MSEnum = ENUM
MSLongBlob = LONGBLOB
MSMediumBlob = MEDIUMBLOB
MSTinyBlob = TINYBLOB
MSBlob = BLOB
MSBinary = BINARY
MSVarBinary = VARBINARY
MSNChar = NCHAR
MSNVarChar = NVARCHAR
MSChar = CHAR
MSString = VARCHAR
MSLongText = LONGTEXT
MSMediumText = MEDIUMTEXT
MSTinyText = TINYTEXT
MSText = TEXT
MSYear = YEAR
MSTimeStamp = TIMESTAMP
MSBit = BIT
MSSmallInteger = SMALLINT
MSTinyInteger = TINYINT
MSMediumInteger = MEDIUMINT
MSBigInteger = BIGINT
MSNumeric = NUMERIC
MSDecimal = DECIMAL
MSDouble = DOUBLE
MSReal = REAL
MSFloat = FLOAT
MSInteger = INTEGER
colspecs = {
sqltypes.Numeric: NUMERIC,
sqltypes.Float: FLOAT,
sqltypes.Time: _MSTime,
sqltypes.Enum: ENUM,
}
# Everything 3.23 through 5.1 excepting OpenGIS types.
ischema_names = {
'bigint': BIGINT,
'binary': BINARY,
'bit': BIT,
'blob': BLOB,
'boolean': BOOLEAN,
'char': CHAR,
'date': DATE,
'datetime': DATETIME,
'decimal': DECIMAL,
'double': DOUBLE,
'enum': ENUM,
'fixed': DECIMAL,
'float': FLOAT,
'int': INTEGER,
'integer': INTEGER,
'longblob': LONGBLOB,
'longtext': LONGTEXT,
'mediumblob': MEDIUMBLOB,
'mediumint': MEDIUMINT,
'mediumtext': MEDIUMTEXT,
'nchar': NCHAR,
'nvarchar': NVARCHAR,
'numeric': NUMERIC,
'set': SET,
'smallint': SMALLINT,
'text': TEXT,
'time': TIME,
'timestamp': TIMESTAMP,
'tinyblob': TINYBLOB,
'tinyint': TINYINT,
'tinytext': TINYTEXT,
'varbinary': VARBINARY,
'varchar': VARCHAR,
'year': YEAR,
}
class MySQLExecutionContext(default.DefaultExecutionContext):
def should_autocommit_text(self, statement):
return AUTOCOMMIT_RE.match(statement)
class MySQLCompiler(compiler.SQLCompiler):
extract_map = compiler.SQLCompiler.extract_map.copy()
extract_map.update ({
'milliseconds': 'millisecond',
})
def visit_random_func(self, fn, **kw):
return "rand%s" % self.function_argspec(fn)
def visit_utc_timestamp_func(self, fn, **kw):
return "UTC_TIMESTAMP"
def visit_sysdate_func(self, fn, **kw):
return "SYSDATE()"
def visit_concat_op(self, binary, **kw):
return "concat(%s, %s)" % (self.process(binary.left), self.process(binary.right))
def visit_match_op(self, binary, **kw):
return "MATCH (%s) AGAINST (%s IN BOOLEAN MODE)" % (self.process(binary.left), self.process(binary.right))
def get_from_hint_text(self, table, text):
return text
def visit_typeclause(self, typeclause):
type_ = typeclause.type.dialect_impl(self.dialect)
if isinstance(type_, sqltypes.Integer):
if getattr(type_, 'unsigned', False):
return 'UNSIGNED INTEGER'
else:
return 'SIGNED INTEGER'
elif isinstance(type_, sqltypes.TIMESTAMP):
return 'DATETIME'
elif isinstance(type_, (sqltypes.DECIMAL, sqltypes.DateTime, sqltypes.Date, sqltypes.Time)):
return self.dialect.type_compiler.process(type_)
elif isinstance(type_, sqltypes.Text):
return 'CHAR'
elif (isinstance(type_, sqltypes.String) and not
isinstance(type_, (ENUM, SET))):
if getattr(type_, 'length'):
return 'CHAR(%s)' % type_.length
else:
return 'CHAR'
elif isinstance(type_, sqltypes._Binary):
return 'BINARY'
elif isinstance(type_, sqltypes.NUMERIC):
return self.dialect.type_compiler.process(type_).replace('NUMERIC', 'DECIMAL')
else:
return None
def visit_cast(self, cast, **kwargs):
# No cast until 4, no decimals until 5.
if not self.dialect._supports_cast:
return self.process(cast.clause)
type_ = self.process(cast.typeclause)
if type_ is None:
return self.process(cast.clause)
return 'CAST(%s AS %s)' % (self.process(cast.clause), type_)
def render_literal_value(self, value, type_):
value = super(MySQLCompiler, self).render_literal_value(value, type_)
if self.dialect._backslash_escapes:
value = value.replace('\\', '\\\\')
return value
def get_select_precolumns(self, select):
"""Add special MySQL keywords in place of DISTINCT.
.. note:: this usage is deprecated. :meth:`.Select.prefix_with`
should be used for special keywords at the start
of a SELECT.
"""
if isinstance(select._distinct, basestring):
return select._distinct.upper() + " "
elif select._distinct:
return "DISTINCT "
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
# 'JOIN ... ON ...' for inner joins isn't available until 4.0.
# Apparently < 3.23.17 requires theta joins for inner joins
# (but not outer). Not generating these currently, but
# support can be added, preferably after dialects are
# refactored to be version-sensitive.
return ''.join(
(self.process(join.left, asfrom=True, **kwargs),
(join.isouter and " LEFT OUTER JOIN " or " INNER JOIN "),
self.process(join.right, asfrom=True, **kwargs),
" ON ",
self.process(join.onclause, **kwargs)))
def for_update_clause(self, select):
if select.for_update == 'read':
return ' LOCK IN SHARE MODE'
else:
return super(MySQLCompiler, self).for_update_clause(select)
def limit_clause(self, select):
# MySQL supports:
# LIMIT <limit>
# LIMIT <offset>, <limit>
# and in server versions > 3.3:
# LIMIT <limit> OFFSET <offset>
# The latter is more readable for offsets but we're stuck with the
# former until we can refine dialects by server revision.
limit, offset = select._limit, select._offset
if (limit, offset) == (None, None):
return ''
elif offset is not None:
# As suggested by the MySQL docs, need to apply an
# artificial limit if one wasn't provided
# http://dev.mysql.com/doc/refman/5.0/en/select.html
if limit is None:
# hardwire the upper limit. Currently
# needed by OurSQL with Python 3
# (https://bugs.launchpad.net/oursql/+bug/686232),
# but also is consistent with the usage of the upper
# bound as part of MySQL's "syntax" for OFFSET with
# no LIMIT
return ' \n LIMIT %s, %s' % (
self.process(sql.literal(offset)),
"18446744073709551615")
else:
return ' \n LIMIT %s, %s' % (
self.process(sql.literal(offset)),
self.process(sql.literal(limit)))
else:
# No offset provided, so just use the limit
return ' \n LIMIT %s' % (self.process(sql.literal(limit)),)
def visit_update(self, update_stmt):
self.stack.append({'from': set([update_stmt.table])})
self.isupdate = True
colparams = self._get_colparams(update_stmt)
text = "UPDATE " + self.preparer.format_table(update_stmt.table) + \
" SET " + ', '.join(["%s=%s" % (self.preparer.format_column(c[0]), c[1]) for c in colparams])
if update_stmt._whereclause is not None:
text += " WHERE " + self.process(update_stmt._whereclause)
limit = update_stmt.kwargs.get('%s_limit' % self.dialect.name, None)
if limit:
text += " LIMIT %s" % limit
self.stack.pop(-1)
return text
# ug. "InnoDB needs indexes on foreign keys and referenced keys [...].
# Starting with MySQL 4.1.2, these indexes are created automatically.
# In older versions, the indexes must be created explicitly or the
# creation of foreign key constraints fails."
class MySQLDDLCompiler(compiler.DDLCompiler):
def create_table_constraints(self, table):
"""Get table constraints."""
constraint_string = super(MySQLDDLCompiler, self).create_table_constraints(table)
engine_key = '%s_engine' % self.dialect.name
is_innodb = table.kwargs.has_key(engine_key) and \
table.kwargs[engine_key].lower() == 'innodb'
auto_inc_column = table._autoincrement_column
if is_innodb and \
auto_inc_column is not None and \
auto_inc_column is not list(table.primary_key)[0]:
if constraint_string:
constraint_string += ", \n\t"
constraint_string += "KEY `idx_autoinc_%s`(`%s`)" % (auto_inc_column.name, \
self.preparer.format_column(auto_inc_column))
return constraint_string
def get_column_specification(self, column, **kw):
"""Builds column DDL."""
colspec = [self.preparer.format_column(column),
self.dialect.type_compiler.process(column.type)
]
default = self.get_column_default_string(column)
if default is not None:
colspec.append('DEFAULT ' + default)
is_timestamp = isinstance(column.type, sqltypes.TIMESTAMP)
if not column.nullable and not is_timestamp:
colspec.append('NOT NULL')
elif column.nullable and is_timestamp and default is None:
colspec.append('NULL')
if column is column.table._autoincrement_column and column.server_default is None:
colspec.append('AUTO_INCREMENT')
return ' '.join(colspec)
def post_create_table(self, table):
"""Build table-level CREATE options like ENGINE and COLLATE."""
table_opts = []
for k in table.kwargs:
if k.startswith('%s_' % self.dialect.name):
opt = k[len(self.dialect.name)+1:].upper()
arg = table.kwargs[k]
if opt in _options_of_type_string:
arg = "'%s'" % arg.replace("\\", "\\\\").replace("'", "''")
if opt in ('DATA_DIRECTORY', 'INDEX_DIRECTORY',
'DEFAULT_CHARACTER_SET', 'CHARACTER_SET',
'DEFAULT_CHARSET',
'DEFAULT_COLLATE'):
opt = opt.replace('_', ' ')
joiner = '='
if opt in ('TABLESPACE', 'DEFAULT CHARACTER SET',
'CHARACTER SET', 'COLLATE'):
joiner = ' '
table_opts.append(joiner.join((opt, arg)))
return ' '.join(table_opts)
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX %s ON %s" % \
(self.preparer.quote(self._index_identifier(index.name), index.quote),
self.preparer.format_table(index.table))
def visit_drop_constraint(self, drop):
constraint = drop.element
if isinstance(constraint, sa_schema.ForeignKeyConstraint):
qual = "FOREIGN KEY "
const = self.preparer.format_constraint(constraint)
elif isinstance(constraint, sa_schema.PrimaryKeyConstraint):
qual = "PRIMARY KEY "
const = ""
elif isinstance(constraint, sa_schema.UniqueConstraint):
qual = "INDEX "
const = self.preparer.format_constraint(constraint)
else:
qual = ""
const = self.preparer.format_constraint(constraint)
return "ALTER TABLE %s DROP %s%s" % \
(self.preparer.format_table(constraint.table),
qual, const)
class MySQLTypeCompiler(compiler.GenericTypeCompiler):
def _extend_numeric(self, type_, spec):
"Extend a numeric-type declaration with MySQL specific extensions."
if not self._mysql_type(type_):
return spec
if type_.unsigned:
spec += ' UNSIGNED'
if type_.zerofill:
spec += ' ZEROFILL'
return spec
def _extend_string(self, type_, defaults, spec):
"""Extend a string-type declaration with standard SQL CHARACTER SET /
COLLATE annotations and MySQL specific extensions.
"""
def attr(name):
return getattr(type_, name, defaults.get(name))
if attr('charset'):
charset = 'CHARACTER SET %s' % attr('charset')
elif attr('ascii'):
charset = 'ASCII'
elif attr('unicode'):
charset = 'UNICODE'
else:
charset = None
if attr('collation'):
collation = 'COLLATE %s' % type_.collation
elif attr('binary'):
collation = 'BINARY'
else:
collation = None
if attr('national'):
# NATIONAL (aka NCHAR/NVARCHAR) trumps charsets.
return ' '.join([c for c in ('NATIONAL', spec, collation)
if c is not None])
return ' '.join([c for c in (spec, charset, collation)
if c is not None])
def _mysql_type(self, type_):
return isinstance(type_, (_StringType, _NumericType))
def visit_NUMERIC(self, type_):
if type_.precision is None:
return self._extend_numeric(type_, "NUMERIC")
elif type_.scale is None:
return self._extend_numeric(type_,
"NUMERIC(%(precision)s)" %
{'precision': type_.precision})
else:
return self._extend_numeric(type_,
"NUMERIC(%(precision)s, %(scale)s)" %
{'precision': type_.precision, 'scale' : type_.scale})
def visit_DECIMAL(self, type_):
if type_.precision is None:
return self._extend_numeric(type_, "DECIMAL")
elif type_.scale is None:
return self._extend_numeric(type_,
"DECIMAL(%(precision)s)" %
{'precision': type_.precision})
else:
return self._extend_numeric(type_,
"DECIMAL(%(precision)s, %(scale)s)" %
{'precision': type_.precision, 'scale' : type_.scale})
def visit_DOUBLE(self, type_):
if type_.precision is not None and type_.scale is not None:
return self._extend_numeric(type_, "DOUBLE(%(precision)s, %(scale)s)" %
{'precision': type_.precision,
'scale' : type_.scale})
else:
return self._extend_numeric(type_, 'DOUBLE')
def visit_REAL(self, type_):
if type_.precision is not None and type_.scale is not None:
return self._extend_numeric(type_, "REAL(%(precision)s, %(scale)s)" %
{'precision': type_.precision,
'scale' : type_.scale})
else:
return self._extend_numeric(type_, 'REAL')
def visit_FLOAT(self, type_):
if self._mysql_type(type_) and \
type_.scale is not None and \
type_.precision is not None:
return self._extend_numeric(type_,
"FLOAT(%s, %s)" % (type_.precision, type_.scale))
elif type_.precision is not None:
return self._extend_numeric(type_, "FLOAT(%s)" % (type_.precision,))
else:
return self._extend_numeric(type_, "FLOAT")
def visit_INTEGER(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(type_,
"INTEGER(%(display_width)s)" %
{'display_width': type_.display_width})
else:
return self._extend_numeric(type_, "INTEGER")
def visit_BIGINT(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(type_,
"BIGINT(%(display_width)s)" %
{'display_width': type_.display_width})
else:
return self._extend_numeric(type_, "BIGINT")
def visit_MEDIUMINT(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(type_,
"MEDIUMINT(%(display_width)s)" %
{'display_width': type_.display_width})
else:
return self._extend_numeric(type_, "MEDIUMINT")
def visit_TINYINT(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(type_, "TINYINT(%s)" % type_.display_width)
else:
return self._extend_numeric(type_, "TINYINT")
def visit_SMALLINT(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(type_,
"SMALLINT(%(display_width)s)" %
{'display_width': type_.display_width}
)
else:
return self._extend_numeric(type_, "SMALLINT")
def visit_BIT(self, type_):
if type_.length is not None:
return "BIT(%s)" % type_.length
else:
return "BIT"
def visit_DATETIME(self, type_):
return "DATETIME"
def visit_DATE(self, type_):
return "DATE"
def visit_TIME(self, type_):
return "TIME"
def visit_TIMESTAMP(self, type_):
return 'TIMESTAMP'
def visit_YEAR(self, type_):
if type_.display_width is None:
return "YEAR"
else:
return "YEAR(%s)" % type_.display_width
def visit_TEXT(self, type_):
if type_.length:
return self._extend_string(type_, {}, "TEXT(%d)" % type_.length)
else:
return self._extend_string(type_, {}, "TEXT")
def visit_TINYTEXT(self, type_):
return self._extend_string(type_, {}, "TINYTEXT")
def visit_MEDIUMTEXT(self, type_):
return self._extend_string(type_, {}, "MEDIUMTEXT")
def visit_LONGTEXT(self, type_):
return self._extend_string(type_, {}, "LONGTEXT")
def visit_VARCHAR(self, type_):
if type_.length:
return self._extend_string(type_, {}, "VARCHAR(%d)" % type_.length)
else:
raise exc.InvalidRequestError(
"VARCHAR requires a length on dialect %s" %
self.dialect.name)
def visit_CHAR(self, type_):
if type_.length:
return self._extend_string(type_, {}, "CHAR(%(length)s)" % {'length' : type_.length})
else:
return self._extend_string(type_, {}, "CHAR")
def visit_NVARCHAR(self, type_):
# We'll actually generate the equiv. "NATIONAL VARCHAR" instead
# of "NVARCHAR".
if type_.length:
return self._extend_string(type_, {'national':True}, "VARCHAR(%(length)s)" % {'length': type_.length})
else:
raise exc.InvalidRequestError(
"NVARCHAR requires a length on dialect %s" %
self.dialect.name)
def visit_NCHAR(self, type_):
# We'll actually generate the equiv. "NATIONAL CHAR" instead of "NCHAR".
if type_.length:
return self._extend_string(type_, {'national':True}, "CHAR(%(length)s)" % {'length': type_.length})
else:
return self._extend_string(type_, {'national':True}, "CHAR")
def visit_VARBINARY(self, type_):
return "VARBINARY(%d)" % type_.length
def visit_large_binary(self, type_):
return self.visit_BLOB(type_)
def visit_enum(self, type_):
if not type_.native_enum:
return super(MySQLTypeCompiler, self).visit_enum(type_)
else:
return self.visit_ENUM(type_)
def visit_BLOB(self, type_):
if type_.length:
return "BLOB(%d)" % type_.length
else:
return "BLOB"
def visit_TINYBLOB(self, type_):
return "TINYBLOB"
def visit_MEDIUMBLOB(self, type_):
return "MEDIUMBLOB"
def visit_LONGBLOB(self, type_):
return "LONGBLOB"
def visit_ENUM(self, type_):
quoted_enums = []
for e in type_.enums:
quoted_enums.append("'%s'" % e.replace("'", "''"))
return self._extend_string(type_, {}, "ENUM(%s)" % ",".join(quoted_enums))
def visit_SET(self, type_):
return self._extend_string(type_, {}, "SET(%s)" % ",".join(type_._ddl_values))
def visit_BOOLEAN(self, type):
return "BOOL"
class MySQLIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
def __init__(self, dialect, server_ansiquotes=False, **kw):
if not server_ansiquotes:
quote = "`"
else:
quote = '"'
super(MySQLIdentifierPreparer, self).__init__(
dialect,
initial_quote=quote,
escape_quote=quote)
def _quote_free_identifiers(self, *ids):
"""Unilaterally identifier-quote any number of strings."""
return tuple([self.quote_identifier(i) for i in ids if i is not None])
class MySQLDialect(default.DefaultDialect):
"""Details of the MySQL dialect. Not used directly in application code."""
name = 'mysql'
supports_alter = True
# identifiers are 64, however aliases can be 255...
max_identifier_length = 255
max_index_name_length = 64
supports_native_enum = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = False
default_paramstyle = 'format'
colspecs = colspecs
statement_compiler = MySQLCompiler
ddl_compiler = MySQLDDLCompiler
type_compiler = MySQLTypeCompiler
ischema_names = ischema_names
preparer = MySQLIdentifierPreparer
# default SQL compilation settings -
# these are modified upon initialize(),
# i.e. first connect
_backslash_escapes = True
_server_ansiquotes = False
def __init__(self, use_ansiquotes=None, **kwargs):
default.DefaultDialect.__init__(self, **kwargs)
def do_commit(self, connection):
"""Execute a COMMIT."""
# COMMIT/ROLLBACK were introduced in 3.23.15.
# Yes, we have at least one user who has to talk to these old versions!
#
# Ignore commit/rollback if support isn't present, otherwise even basic
# operations via autocommit fail.
try:
connection.commit()
except:
if self.server_version_info < (3, 23, 15):
args = sys.exc_info()[1].args
if args and args[0] == 1064:
return
raise
def do_rollback(self, connection):
"""Execute a ROLLBACK."""
try:
connection.rollback()
except:
if self.server_version_info < (3, 23, 15):
args = sys.exc_info()[1].args
if args and args[0] == 1064:
return
raise
def do_begin_twophase(self, connection, xid):
connection.execute(sql.text("XA BEGIN :xid"), xid=xid)
def do_prepare_twophase(self, connection, xid):
connection.execute(sql.text("XA END :xid"), xid=xid)
connection.execute(sql.text("XA PREPARE :xid"), xid=xid)
def do_rollback_twophase(self, connection, xid, is_prepared=True,
recover=False):
if not is_prepared:
connection.execute(sql.text("XA END :xid"), xid=xid)
connection.execute(sql.text("XA ROLLBACK :xid"), xid=xid)
def do_commit_twophase(self, connection, xid, is_prepared=True,
recover=False):
if not is_prepared:
self.do_prepare_twophase(connection, xid)
connection.execute(sql.text("XA COMMIT :xid"), xid=xid)
def do_recover_twophase(self, connection):
resultset = connection.execute("XA RECOVER")
return [row['data'][0:row['gtrid_length']] for row in resultset]
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.OperationalError):
return self._extract_error_code(e) in \
(2006, 2013, 2014, 2045, 2055)
elif isinstance(e, self.dbapi.InterfaceError):
# if underlying connection is closed,
# this is the error you get
return "(0, '')" in str(e)
else:
return False
def _compat_fetchall(self, rp, charset=None):
"""Proxy result rows to smooth over MySQL-Python driver inconsistencies."""
return [_DecodingRowProxy(row, charset) for row in rp.fetchall()]
def _compat_fetchone(self, rp, charset=None):
"""Proxy a result row to smooth over MySQL-Python driver inconsistencies."""
return _DecodingRowProxy(rp.fetchone(), charset)
def _compat_first(self, rp, charset=None):
"""Proxy a result row to smooth over MySQL-Python driver inconsistencies."""
return _DecodingRowProxy(rp.first(), charset)
def _extract_error_code(self, exception):
raise NotImplementedError()
def _get_default_schema_name(self, connection):
return connection.execute('SELECT DATABASE()').scalar()
def has_table(self, connection, table_name, schema=None):
# SHOW TABLE STATUS LIKE and SHOW TABLES LIKE do not function properly
# on macosx (and maybe win?) with multibyte table names.
#
# TODO: if this is not a problem on win, make the strategy swappable
# based on platform. DESCRIBE is slower.
# [ticket:726]
# full_name = self.identifier_preparer.format_table(table,
# use_schema=True)
full_name = '.'.join(self.identifier_preparer._quote_free_identifiers(
schema, table_name))
st = "DESCRIBE %s" % full_name
rs = None
try:
try:
rs = connection.execute(st)
have = rs.rowcount > 0
rs.close()
return have
except exc.DBAPIError, e:
if self._extract_error_code(e.orig) == 1146:
return False
raise
finally:
if rs:
rs.close()
def initialize(self, connection):
default.DefaultDialect.initialize(self, connection)
self._connection_charset = self._detect_charset(connection)
self._server_casing = self._detect_casing(connection)
self._server_collations = self._detect_collations(connection)
self._detect_ansiquotes(connection)
if self._server_ansiquotes:
# if ansiquotes == True, build a new IdentifierPreparer
# with the new setting
self.identifier_preparer = self.preparer(self,
server_ansiquotes=self._server_ansiquotes)
@property
def _supports_cast(self):
return self.server_version_info is None or \
self.server_version_info >= (4, 0, 2)
@reflection.cache
def get_schema_names(self, connection, **kw):
rp = connection.execute("SHOW schemas")
return [r[0] for r in rp]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
"""Return a Unicode SHOW TABLES from a given schema."""
if schema is not None:
current_schema = schema
else:
current_schema = self.default_schema_name
charset = self._connection_charset
if self.server_version_info < (5, 0, 2):
rp = connection.execute("SHOW TABLES FROM %s" %
self.identifier_preparer.quote_identifier(current_schema))
return [row[0] for row in self._compat_fetchall(rp, charset=charset)]
else:
rp = connection.execute("SHOW FULL TABLES FROM %s" %
self.identifier_preparer.quote_identifier(current_schema))
return [row[0] for row in self._compat_fetchall(rp, charset=charset)\
if row[1] == 'BASE TABLE']
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
charset = self._connection_charset
if self.server_version_info < (5, 0, 2):
raise NotImplementedError
if schema is None:
schema = self.default_schema_name
if self.server_version_info < (5, 0, 2):
return self.get_table_names(connection, schema)
charset = self._connection_charset
rp = connection.execute("SHOW FULL TABLES FROM %s" %
self.identifier_preparer.quote_identifier(schema))
return [row[0] for row in self._compat_fetchall(rp, charset=charset)\
if row[1] == 'VIEW']
@reflection.cache
def get_table_options(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(connection, table_name, schema, **kw)
return parsed_state.table_options
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(connection, table_name, schema, **kw)
return parsed_state.columns
@reflection.cache
def get_primary_keys(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(connection, table_name, schema, **kw)
for key in parsed_state.keys:
if key['type'] == 'PRIMARY':
# There can be only one.
##raise Exception, str(key)
return [s[0] for s in key['columns']]
return []
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(connection, table_name, schema, **kw)
default_schema = None
fkeys = []
for spec in parsed_state.constraints:
# only FOREIGN KEYs
ref_name = spec['table'][-1]
ref_schema = len(spec['table']) > 1 and spec['table'][-2] or schema
if not ref_schema:
if default_schema is None:
default_schema = \
connection.dialect.default_schema_name
if schema == default_schema:
ref_schema = schema
loc_names = spec['local']
ref_names = spec['foreign']
con_kw = {}
for opt in ('name', 'onupdate', 'ondelete'):
if spec.get(opt, False):
con_kw[opt] = spec[opt]
fkey_d = {
'name' : spec['name'],
'constrained_columns' : loc_names,
'referred_schema' : ref_schema,
'referred_table' : ref_name,
'referred_columns' : ref_names,
'options' : con_kw
}
fkeys.append(fkey_d)
return fkeys
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(connection, table_name, schema, **kw)
indexes = []
for spec in parsed_state.keys:
unique = False
flavor = spec['type']
if flavor == 'PRIMARY':
continue
if flavor == 'UNIQUE':
unique = True
elif flavor in (None, 'FULLTEXT', 'SPATIAL'):
pass
else:
self.logger.info(
"Converting unknown KEY type %s to a plain KEY" % flavor)
pass
index_d = {}
index_d['name'] = spec['name']
index_d['column_names'] = [s[0] for s in spec['columns']]
index_d['unique'] = unique
index_d['type'] = flavor
indexes.append(index_d)
return indexes
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
charset = self._connection_charset
full_name = '.'.join(self.identifier_preparer._quote_free_identifiers(
schema, view_name))
sql = self._show_create_table(connection, None, charset,
full_name=full_name)
return sql
def _parsed_state_or_create(self, connection, table_name, schema=None, **kw):
return self._setup_parser(
connection,
table_name,
schema,
info_cache=kw.get('info_cache', None)
)
@util.memoized_property
def _tabledef_parser(self):
"""return the MySQLTableDefinitionParser, generate if needed.
The deferred creation ensures that the dialect has
retrieved server version information first.
"""
if (self.server_version_info < (4, 1) and self._server_ansiquotes):
# ANSI_QUOTES doesn't affect SHOW CREATE TABLE on < 4.1
preparer = self.preparer(self, server_ansiquotes=False)
else:
preparer = self.identifier_preparer
return MySQLTableDefinitionParser(self, preparer)
@reflection.cache
def _setup_parser(self, connection, table_name, schema=None, **kw):
charset = self._connection_charset
parser = self._tabledef_parser
full_name = '.'.join(self.identifier_preparer._quote_free_identifiers(
schema, table_name))
sql = self._show_create_table(connection, None, charset,
full_name=full_name)
if sql.startswith('CREATE ALGORITHM'):
# Adapt views to something table-like.
columns = self._describe_table(connection, None, charset,
full_name=full_name)
sql = parser._describe_to_create(table_name, columns)
return parser.parse(sql, charset)
def _detect_charset(self, connection):
raise NotImplementedError()
def _detect_casing(self, connection):
"""Sniff out identifier case sensitivity.
Cached per-connection. This value can not change without a server
restart.
"""
# http://dev.mysql.com/doc/refman/5.0/en/name-case-sensitivity.html
charset = self._connection_charset
row = self._compat_first(connection.execute(
"SHOW VARIABLES LIKE 'lower_case_table_names'"),
charset=charset)
if not row:
cs = 0
else:
# 4.0.15 returns OFF or ON according to [ticket:489]
# 3.23 doesn't, 4.0.27 doesn't..
if row[1] == 'OFF':
cs = 0
elif row[1] == 'ON':
cs = 1
else:
cs = int(row[1])
return cs
def _detect_collations(self, connection):
"""Pull the active COLLATIONS list from the server.
Cached per-connection.
"""
collations = {}
if self.server_version_info < (4, 1, 0):
pass
else:
charset = self._connection_charset
rs = connection.execute('SHOW COLLATION')
for row in self._compat_fetchall(rs, charset):
collations[row[0]] = row[1]
return collations
def _detect_ansiquotes(self, connection):
"""Detect and adjust for the ANSI_QUOTES sql mode."""
row = self._compat_first(
connection.execute("SHOW VARIABLES LIKE 'sql_mode'"),
charset=self._connection_charset)
if not row:
mode = ''
else:
mode = row[1] or ''
# 4.0
if mode.isdigit():
mode_no = int(mode)
mode = (mode_no | 4 == mode_no) and 'ANSI_QUOTES' or ''
self._server_ansiquotes = 'ANSI_QUOTES' in mode
# as of MySQL 5.0.1
self._backslash_escapes = 'NO_BACKSLASH_ESCAPES' not in mode
def _show_create_table(self, connection, table, charset=None,
full_name=None):
"""Run SHOW CREATE TABLE for a ``Table``."""
if full_name is None:
full_name = self.identifier_preparer.format_table(table)
st = "SHOW CREATE TABLE %s" % full_name
rp = None
try:
rp = connection.execute(st)
except exc.DBAPIError, e:
if self._extract_error_code(e.orig) == 1146:
raise exc.NoSuchTableError(full_name)
else:
raise
row = self._compat_first(rp, charset=charset)
if not row:
raise exc.NoSuchTableError(full_name)
return row[1].strip()
return sql
def _describe_table(self, connection, table, charset=None,
full_name=None):
"""Run DESCRIBE for a ``Table`` and return processed rows."""
if full_name is None:
full_name = self.identifier_preparer.format_table(table)
st = "DESCRIBE %s" % full_name
rp, rows = None, None
try:
try:
rp = connection.execute(st)
except exc.DBAPIError, e:
if self._extract_error_code(e.orig) == 1146:
raise exc.NoSuchTableError(full_name)
else:
raise
rows = self._compat_fetchall(rp, charset=charset)
finally:
if rp:
rp.close()
return rows
class ReflectedState(object):
"""Stores raw information about a SHOW CREATE TABLE statement."""
def __init__(self):
self.columns = []
self.table_options = {}
self.table_name = None
self.keys = []
self.constraints = []
class MySQLTableDefinitionParser(object):
"""Parses the results of a SHOW CREATE TABLE statement."""
def __init__(self, dialect, preparer):
self.dialect = dialect
self.preparer = preparer
self._prep_regexes()
def parse(self, show_create, charset):
state = ReflectedState()
state.charset = charset
for line in re.split(r'\r?\n', show_create):
if line.startswith(' ' + self.preparer.initial_quote):
self._parse_column(line, state)
# a regular table options line
elif line.startswith(') '):
self._parse_table_options(line, state)
# an ANSI-mode table options line
elif line == ')':
pass
elif line.startswith('CREATE '):
self._parse_table_name(line, state)
# Not present in real reflection, but may be if loading from a file.
elif not line:
pass
else:
type_, spec = self._parse_constraints(line)
if type_ is None:
util.warn("Unknown schema content: %r" % line)
elif type_ == 'key':
state.keys.append(spec)
elif type_ == 'constraint':
state.constraints.append(spec)
else:
pass
return state
def _parse_constraints(self, line):
"""Parse a KEY or CONSTRAINT line.
:param line: A line of SHOW CREATE TABLE output
"""
# KEY
m = self._re_key.match(line)
if m:
spec = m.groupdict()
# convert columns into name, length pairs
spec['columns'] = self._parse_keyexprs(spec['columns'])
return 'key', spec
# CONSTRAINT
m = self._re_constraint.match(line)
if m:
spec = m.groupdict()
spec['table'] = \
self.preparer.unformat_identifiers(spec['table'])
spec['local'] = [c[0]
for c in self._parse_keyexprs(spec['local'])]
spec['foreign'] = [c[0]
for c in self._parse_keyexprs(spec['foreign'])]
return 'constraint', spec
# PARTITION and SUBPARTITION
m = self._re_partition.match(line)
if m:
# Punt!
return 'partition', line
# No match.
return (None, line)
def _parse_table_name(self, line, state):
"""Extract the table name.
:param line: The first line of SHOW CREATE TABLE
"""
regex, cleanup = self._pr_name
m = regex.match(line)
if m:
state.table_name = cleanup(m.group('name'))
def _parse_table_options(self, line, state):
"""Build a dictionary of all reflected table-level options.
:param line: The final line of SHOW CREATE TABLE output.
"""
options = {}
if not line or line == ')':
pass
else:
rest_of_line = line[:]
for regex, cleanup in self._pr_options:
m = regex.search(rest_of_line)
if not m:
continue
directive, value = m.group('directive'), m.group('val')
if cleanup:
value = cleanup(value)
options[directive.lower()] = value
rest_of_line = regex.sub('', rest_of_line)
for nope in ('auto_increment', 'data directory', 'index directory'):
options.pop(nope, None)
for opt, val in options.items():
state.table_options['%s_%s' % (self.dialect.name, opt)] = val
def _parse_column(self, line, state):
"""Extract column details.
Falls back to a 'minimal support' variant if full parse fails.
:param line: Any column-bearing line from SHOW CREATE TABLE
"""
spec = None
m = self._re_column.match(line)
if m:
spec = m.groupdict()
spec['full'] = True
else:
m = self._re_column_loose.match(line)
if m:
spec = m.groupdict()
spec['full'] = False
if not spec:
util.warn("Unknown column definition %r" % line)
return
if not spec['full']:
util.warn("Incomplete reflection of column definition %r" % line)
name, type_, args, notnull = \
spec['name'], spec['coltype'], spec['arg'], spec['notnull']
try:
col_type = self.dialect.ischema_names[type_]
except KeyError:
util.warn("Did not recognize type '%s' of column '%s'" %
(type_, name))
col_type = sqltypes.NullType
# Column type positional arguments eg. varchar(32)
if args is None or args == '':
type_args = []
elif args[0] == "'" and args[-1] == "'":
type_args = self._re_csv_str.findall(args)
else:
type_args = [int(v) for v in self._re_csv_int.findall(args)]
# Column type keyword options
type_kw = {}
for kw in ('unsigned', 'zerofill'):
if spec.get(kw, False):
type_kw[kw] = True
for kw in ('charset', 'collate'):
if spec.get(kw, False):
type_kw[kw] = spec[kw]
if type_ == 'enum':
type_args = ENUM._strip_enums(type_args)
type_instance = col_type(*type_args, **type_kw)
col_args, col_kw = [], {}
# NOT NULL
col_kw['nullable'] = True
if spec.get('notnull', False):
col_kw['nullable'] = False
# AUTO_INCREMENT
if spec.get('autoincr', False):
col_kw['autoincrement'] = True
elif issubclass(col_type, sqltypes.Integer):
col_kw['autoincrement'] = False
# DEFAULT
default = spec.get('default', None)
if default == 'NULL':
# eliminates the need to deal with this later.
default = None
col_d = dict(name=name, type=type_instance, default=default)
col_d.update(col_kw)
state.columns.append(col_d)
def _describe_to_create(self, table_name, columns):
"""Re-format DESCRIBE output as a SHOW CREATE TABLE string.
DESCRIBE is a much simpler reflection and is sufficient for
reflecting views for runtime use. This method formats DDL
for columns only- keys are omitted.
:param columns: A sequence of DESCRIBE or SHOW COLUMNS 6-tuples.
SHOW FULL COLUMNS FROM rows must be rearranged for use with
this function.
"""
buffer = []
for row in columns:
(name, col_type, nullable, default, extra) = \
[row[i] for i in (0, 1, 2, 4, 5)]
line = [' ']
line.append(self.preparer.quote_identifier(name))
line.append(col_type)
if not nullable:
line.append('NOT NULL')
if default:
if 'auto_increment' in default:
pass
elif (col_type.startswith('timestamp') and
default.startswith('C')):
line.append('DEFAULT')
line.append(default)
elif default == 'NULL':
line.append('DEFAULT')
line.append(default)
else:
line.append('DEFAULT')
line.append("'%s'" % default.replace("'", "''"))
if extra:
line.append(extra)
buffer.append(' '.join(line))
return ''.join([('CREATE TABLE %s (\n' %
self.preparer.quote_identifier(table_name)),
',\n'.join(buffer),
'\n) '])
def _parse_keyexprs(self, identifiers):
"""Unpack '"col"(2),"col" ASC'-ish strings into components."""
return self._re_keyexprs.findall(identifiers)
def _prep_regexes(self):
"""Pre-compile regular expressions."""
self._re_columns = []
self._pr_options = []
_final = self.preparer.final_quote
quotes = dict(zip(('iq', 'fq', 'esc_fq'),
[re.escape(s) for s in
(self.preparer.initial_quote,
_final,
self.preparer._escape_identifier(_final))]))
self._pr_name = _pr_compile(
r'^CREATE (?:\w+ +)?TABLE +'
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +\($' % quotes,
self.preparer._unescape_identifier)
# `col`,`col2`(32),`col3`(15) DESC
#
# Note: ASC and DESC aren't reflected, so we'll punt...
self._re_keyexprs = _re_compile(
r'(?:'
r'(?:%(iq)s((?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)'
r'(?:\((\d+)\))?(?=\,|$))+' % quotes)
# 'foo' or 'foo','bar' or 'fo,o','ba''a''r'
self._re_csv_str = _re_compile(r'\x27(?:\x27\x27|[^\x27])*\x27')
# 123 or 123,456
self._re_csv_int = _re_compile(r'\d+')
# `colname` <type> [type opts]
# (NOT NULL | NULL)
# DEFAULT ('value' | CURRENT_TIMESTAMP...)
# COMMENT 'comment'
# COLUMN_FORMAT (FIXED|DYNAMIC|DEFAULT)
# STORAGE (DISK|MEMORY)
self._re_column = _re_compile(
r' '
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
r'(?P<coltype>\w+)'
r'(?:\((?P<arg>(?:\d+|\d+,\d+|'
r'(?:\x27(?:\x27\x27|[^\x27])*\x27,?)+))\))?'
r'(?: +(?P<unsigned>UNSIGNED))?'
r'(?: +(?P<zerofill>ZEROFILL))?'
r'(?: +CHARACTER SET +(?P<charset>[\w_]+))?'
r'(?: +COLLATE +(?P<collate>[\w_]+))?'
r'(?: +(?P<notnull>NOT NULL))?'
r'(?: +DEFAULT +(?P<default>'
r'(?:NULL|\x27(?:\x27\x27|[^\x27])*\x27|\w+'
r'(?: +ON UPDATE \w+)?)'
r'))?'
r'(?: +(?P<autoincr>AUTO_INCREMENT))?'
r'(?: +COMMENT +(P<comment>(?:\x27\x27|[^\x27])+))?'
r'(?: +COLUMN_FORMAT +(?P<colfmt>\w+))?'
r'(?: +STORAGE +(?P<storage>\w+))?'
r'(?: +(?P<extra>.*))?'
r',?$'
% quotes
)
# Fallback, try to parse as little as possible
self._re_column_loose = _re_compile(
r' '
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
r'(?P<coltype>\w+)'
r'(?:\((?P<arg>(?:\d+|\d+,\d+|\x27(?:\x27\x27|[^\x27])+\x27))\))?'
r'.*?(?P<notnull>NOT NULL)?'
% quotes
)
# (PRIMARY|UNIQUE|FULLTEXT|SPATIAL) INDEX `name` (USING (BTREE|HASH))?
# (`col` (ASC|DESC)?, `col` (ASC|DESC)?)
# KEY_BLOCK_SIZE size | WITH PARSER name
self._re_key = _re_compile(
r' '
r'(?:(?P<type>\S+) )?KEY'
r'(?: +%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)?'
r'(?: +USING +(?P<using_pre>\S+))?'
r' +\((?P<columns>.+?)\)'
r'(?: +USING +(?P<using_post>\S+))?'
r'(?: +KEY_BLOCK_SIZE +(?P<keyblock>\S+))?'
r'(?: +WITH PARSER +(?P<parser>\S+))?'
r',?$'
% quotes
)
# CONSTRAINT `name` FOREIGN KEY (`local_col`)
# REFERENCES `remote` (`remote_col`)
# MATCH FULL | MATCH PARTIAL | MATCH SIMPLE
# ON DELETE CASCADE ON UPDATE RESTRICT
#
# unique constraints come back as KEYs
kw = quotes.copy()
kw['on'] = 'RESTRICT|CASCASDE|SET NULL|NOACTION'
self._re_constraint = _re_compile(
r' '
r'CONSTRAINT +'
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
r'FOREIGN KEY +'
r'\((?P<local>[^\)]+?)\) REFERENCES +'
r'(?P<table>%(iq)s[^%(fq)s]+%(fq)s(?:\.%(iq)s[^%(fq)s]+%(fq)s)?) +'
r'\((?P<foreign>[^\)]+?)\)'
r'(?: +(?P<match>MATCH \w+))?'
r'(?: +ON DELETE (?P<ondelete>%(on)s))?'
r'(?: +ON UPDATE (?P<onupdate>%(on)s))?'
% kw
)
# PARTITION
#
# punt!
self._re_partition = _re_compile(
r' '
r'(?:SUB)?PARTITION')
# Table-level options (COLLATE, ENGINE, etc.)
# Do the string options first, since they have quoted strings we need to get rid of.
for option in _options_of_type_string:
self._add_option_string(option)
for option in ('ENGINE', 'TYPE', 'AUTO_INCREMENT',
'AVG_ROW_LENGTH', 'CHARACTER SET',
'DEFAULT CHARSET', 'CHECKSUM',
'COLLATE', 'DELAY_KEY_WRITE', 'INSERT_METHOD',
'MAX_ROWS', 'MIN_ROWS', 'PACK_KEYS', 'ROW_FORMAT',
'KEY_BLOCK_SIZE'):
self._add_option_word(option)
self._add_option_regex('UNION', r'\([^\)]+\)')
self._add_option_regex('TABLESPACE', r'.*? STORAGE DISK')
self._add_option_regex('RAID_TYPE',
r'\w+\s+RAID_CHUNKS\s*\=\s*\w+RAID_CHUNKSIZE\s*=\s*\w+')
_optional_equals = r'(?:\s*(?:=\s*)|\s+)'
def _add_option_string(self, directive):
regex = (r'(?P<directive>%s)%s'
r"'(?P<val>(?:[^']|'')*?)'(?!')" %
(re.escape(directive), self._optional_equals))
self._pr_options.append(
_pr_compile(regex, lambda v: v.replace("\\\\","\\").replace("''", "'")))
def _add_option_word(self, directive):
regex = (r'(?P<directive>%s)%s'
r'(?P<val>\w+)' %
(re.escape(directive), self._optional_equals))
self._pr_options.append(_pr_compile(regex))
def _add_option_regex(self, directive, regex):
regex = (r'(?P<directive>%s)%s'
r'(?P<val>%s)' %
(re.escape(directive), self._optional_equals, regex))
self._pr_options.append(_pr_compile(regex))
_options_of_type_string = ('COMMENT', 'DATA DIRECTORY', 'INDEX DIRECTORY',
'PASSWORD', 'CONNECTION')
log.class_logger(MySQLTableDefinitionParser)
log.class_logger(MySQLDialect)
class _DecodingRowProxy(object):
"""Return unicode-decoded values based on type inspection.
Smooth over data type issues (esp. with alpha driver versions) and
normalize strings as Unicode regardless of user-configured driver
encoding settings.
"""
# Some MySQL-python versions can return some columns as
# sets.Set(['value']) (seriously) but thankfully that doesn't
# seem to come up in DDL queries.
def __init__(self, rowproxy, charset):
self.rowproxy = rowproxy
self.charset = charset
def __getitem__(self, index):
item = self.rowproxy[index]
if isinstance(item, _array):
item = item.tostring()
# Py2K
if self.charset and isinstance(item, str):
# end Py2K
# Py3K
#if self.charset and isinstance(item, bytes):
return item.decode(self.charset)
else:
return item
def __getattr__(self, attr):
item = getattr(self.rowproxy, attr)
if isinstance(item, _array):
item = item.tostring()
# Py2K
if self.charset and isinstance(item, str):
# end Py2K
# Py3K
#if self.charset and isinstance(item, bytes):
return item.decode(self.charset)
else:
return item
def _pr_compile(regex, cleanup=None):
"""Prepare a 2-tuple of compiled regex and callable."""
return (_re_compile(regex), cleanup)
def _re_compile(regex):
"""Compile a string to regex, I and UNICODE."""
return re.compile(regex, re.I | re.UNICODE)
| eunchong/build | third_party/sqlalchemy_0_7_1/sqlalchemy/dialects/mysql/base.py | Python | bsd-3-clause | 92,519 |
# -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
from openerp.osv import orm, fields
class oehealth_tag(orm.Model):
_inherit = 'oehealth.tag'
_columns = {
'medicament_group_ids': fields.many2many('oehealth.medicament.group',
'oehealth_medicament_group_tag_rel',
'tag_id',
'medicament_group_id',
'Medicament Groups'),
}
oehealth_tag()
| CLVsol/oehealth | oehealth_medicament_group/oehealth_tag.py | Python | agpl-3.0 | 1,912 |
import os
import tempfile
import numpy as np
from clustering_system.corpus.LineCorpus import LineCorpus
from clustering_system.corpus.LineNewsCorpus import LineNewsCorpus
class TestLineCorpus:
CORPUS = [[1, 3, 2], [0, -1]]
CORPUS_TEXTS = [["hello", "word", "world"], ["!"]]
CORPUS_WITH_META = [([1, 3, 2], (1, "d1")), ([0, -1], (1, "d1"))]
dictionary = {
0: "!",
1: "hello",
2: "world",
3: "word",
}
def test_serialize(self):
file = tempfile.mktemp()
LineCorpus.serialize(file, self.CORPUS, self.dictionary)
corpus = LineCorpus(file)
assert np.array_equal(self.CORPUS_TEXTS, list(corpus))
def test_serialize_with_metadata(self):
file = tempfile.mktemp()
LineCorpus.serialize(file, self.CORPUS_WITH_META, self.dictionary, metadata=True)
corpus = LineCorpus(file)
assert np.array_equal(self.CORPUS_TEXTS, list(corpus))
def test_serialize_load(self):
# Current directory
dir_path = os.path.dirname(os.path.realpath(__file__))
corpus = LineNewsCorpus(input=os.path.join(dir_path, "..", "data", "genuine"), language="en")
temp_corpus_file = tempfile.NamedTemporaryFile(delete=False)
# Serialize pre-processed corpus to temp file
LineCorpus.serialize(temp_corpus_file, corpus, corpus.dictionary)
loaded_corpus = LineCorpus(temp_corpus_file.name)
docs = []
for d in loaded_corpus:
docs.append(d)
# Remove temp file
os.remove(temp_corpus_file.name)
np.testing.assert_array_equal([['human', 'human', 'steal', 'job'], ['human', 'human', 'steal', 'dog', 'cat']], docs)
| vanam/clustering | tests/clustering_system/corpus/test_LineCorpus.py | Python | mit | 1,711 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.metric import metric_service
from openstack import resource2 as resource
class Metric(resource.Resource):
base_path = '/metric'
service = metric_service.MetricService()
# Supported Operations
allow_create = True
allow_get = True
allow_delete = True
allow_list = True
# Properties
#: The name of the archive policy
archive_policy_name = resource.Body('archive_policy_name')
#: The archive policy
archive_policy = resource.Body('archive_policy')
#: The ID of the user who created this metric
created_by_user_id = resource.Body('created_by_user_id')
#: The ID of the project this metric was created under
created_by_project_id = resource.Body('created_by_project_id')
#: The identifier of this metric
resource_id = resource.Body('resource_id')
#: The name of this metric
name = resource.Body('name')
| briancurtin/python-openstacksdk | openstack/metric/v1/metric.py | Python | apache-2.0 | 1,442 |
#!/usr/bin/python
#
# Copyright 2014, Intel Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# native
'''
Created on 13 oct. 2014
@author: ronan.lemartret@open.eurogiciel.org
'''
try:
import cmdln
except:
print >> sys.stderr, 'Error spec2yocto require "python-cmdln" please install it.'
sys.exit( 1 )
import sys
import os
#TODO need precision
#WARNING if patch can be a gz file
#WARNING if patch can be conpose by many commit
def isPatch(files) :
return (".diff" in files) or (".patch" in files)
#TODO need precision
def isFromIntel(patch_file) :
if patch_file.endswith('.diff') or patch_file.endswith('.patch'):
with open(patch_file,"r") as patch_fd:
for line in patch_fd:
if line.startswith("From:") and (("intel.com" in line) or ("eurogiciel.org" in line) or ("fridu.org" in line)):
return True
return False
def count_intel_patch(SRCDIR,package_files):
count_intel_patch=0
for p in package_files:
if isPatch( p) and isFromIntel(os.path.join(SRCDIR,p)):
count_intel_patch+=1
return count_intel_patch
def count_patch(package_files) :
count_patch=0
for p in package_files:
if isPatch( p):
count_patch+=1
return count_patch
#What if many spec file?
def get_license(SRCDIR,package_files) :
license=""
for p in package_files:
if (".spec" in p):
return find_license(os.path.join(SRCDIR,p))
return license
#What if many license file?
#TODO need precision
def find_license(spec_file) :
license=""
with open(spec_file,"r") as spec_fd:
for line in spec_fd:
if "License:" in line:
return line.split("License:")[1].replace("\n","").replace("\t","").replace(" ","")
return license
class CheckRpmSrc(cmdln.Cmdln):
name = "createVersionYoctoTizen"
version = "0.1"
@cmdln.option( "--rpmsSRCDIR",
action = "store",
default = "Tizen-rpm-source.html",
help = "the Tizen rpms source dir" )
def do_status(self, subcmd, opts):
"""generate status
${cmd_usage}--
${cmd_option_list}
"""
for package_rpm in os.listdir(opts.rpmsSRCDIR):
package_dir=package_rpm
release=package_rpm[package_rpm.rfind("-")+1:].replace(".src.rpm","")
package_rpm=package_rpm[:package_rpm.rfind("-")]
version=package_rpm[package_rpm.rfind("-")+1:]
name=package_rpm[:package_rpm.rfind("-")]
package_files = os.listdir(os.path.join(opts.rpmsSRCDIR, package_dir))
nb_patch=count_patch(package_files)
license=get_license(os.path.join(opts.rpmsSRCDIR, package_dir),package_files)
nb_intel_patch=count_intel_patch(os.path.join(opts.rpmsSRCDIR, package_dir),package_files)
print "%s\t%s\t%s\t%s\t%s" %(name, version, license, nb_patch, nb_intel_patch)
def main():
checkRpmSrc = CheckRpmSrc()
sys.exit( checkRpmSrc.main() )
if __name__ == '__main__':
main() | eurogiciel-oss/Tizen-development-report | bin/checkRpmSrc.py | Python | mit | 3,700 |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import subprocess
import uuid
import ldap
import ldap.modlist
from keystone import config
from keystone import exception
from keystone.identity.backends import ldap as identity_ldap
from keystone import tests
from keystone.tests import test_backend_ldap
CONF = config.CONF
def create_object(dn, attrs):
conn = ldap.initialize(CONF.ldap.url)
conn.simple_bind_s(CONF.ldap.user, CONF.ldap.password)
ldif = ldap.modlist.addModlist(attrs)
conn.add_s(dn, ldif)
conn.unbind_s()
class LiveLDAPIdentity(test_backend_ldap.LDAPIdentity):
def setUp(self):
self._ldap_skip_live()
super(LiveLDAPIdentity, self).setUp()
def _ldap_skip_live(self):
self.skip_if_env_not_set('ENABLE_LDAP_LIVE_TEST')
def clear_database(self):
devnull = open('/dev/null', 'w')
subprocess.call(['ldapdelete',
'-x',
'-D', CONF.ldap.user,
'-H', CONF.ldap.url,
'-w', CONF.ldap.password,
'-r', CONF.ldap.suffix],
stderr=devnull)
if CONF.ldap.suffix.startswith('ou='):
tree_dn_attrs = {'objectclass': 'organizationalUnit',
'ou': 'openstack'}
else:
tree_dn_attrs = {'objectclass': ['dcObject', 'organizationalUnit'],
'dc': 'openstack',
'ou': 'openstack'}
create_object(CONF.ldap.suffix, tree_dn_attrs)
create_object(CONF.ldap.user_tree_dn,
{'objectclass': 'organizationalUnit',
'ou': 'Users'})
create_object(CONF.ldap.role_tree_dn,
{'objectclass': 'organizationalUnit',
'ou': 'Roles'})
create_object(CONF.ldap.project_tree_dn,
{'objectclass': 'organizationalUnit',
'ou': 'Projects'})
create_object(CONF.ldap.group_tree_dn,
{'objectclass': 'organizationalUnit',
'ou': 'UserGroups'})
def config_files(self):
config_files = super(LiveLDAPIdentity, self).config_files()
config_files.append(tests.dirs.tests_conf('backend_liveldap.conf'))
return config_files
def config_overrides(self):
super(LiveLDAPIdentity, self).config_overrides()
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.ldap.Identity')
def test_build_tree(self):
"""Regression test for building the tree names
"""
# logic is different from the fake backend.
user_api = identity_ldap.UserApi(CONF)
self.assertTrue(user_api)
self.assertEqual(user_api.tree_dn, CONF.ldap.user_tree_dn)
def tearDown(self):
tests.TestCase.tearDown(self)
def test_ldap_dereferencing(self):
alt_users_ldif = {'objectclass': ['top', 'organizationalUnit'],
'ou': 'alt_users'}
alt_fake_user_ldif = {'objectclass': ['person', 'inetOrgPerson'],
'cn': 'alt_fake1',
'sn': 'alt_fake1'}
aliased_users_ldif = {'objectclass': ['alias', 'extensibleObject'],
'aliasedobjectname': "ou=alt_users,%s" %
CONF.ldap.suffix}
create_object("ou=alt_users,%s" % CONF.ldap.suffix, alt_users_ldif)
create_object("%s=alt_fake1,ou=alt_users,%s" %
(CONF.ldap.user_id_attribute, CONF.ldap.suffix),
alt_fake_user_ldif)
create_object("ou=alt_users,%s" % CONF.ldap.user_tree_dn,
aliased_users_ldif)
self.config_fixture.config(group='ldap',
query_scope='sub',
alias_dereferencing='never')
self.identity_api = identity_ldap.Identity()
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
'alt_fake1')
self.config_fixture.config(group='ldap',
alias_dereferencing='searching')
self.identity_api = identity_ldap.Identity()
user_ref = self.identity_api.get_user('alt_fake1')
self.assertEqual(user_ref['id'], 'alt_fake1')
self.config_fixture.config(group='ldap', alias_dereferencing='always')
self.identity_api = identity_ldap.Identity()
user_ref = self.identity_api.get_user('alt_fake1')
self.assertEqual(user_ref['id'], 'alt_fake1')
# FakeLDAP does not correctly process filters, so this test can only be
# run against a live LDAP server
def test_list_groups_for_user_filtered(self):
domain = self._get_domain_fixture()
test_groups = []
test_users = []
GROUP_COUNT = 3
USER_COUNT = 2
for x in range(0, USER_COUNT):
new_user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
'enabled': True, 'domain_id': domain['id']}
new_user = self.identity_api.create_user(new_user)
test_users.append(new_user)
positive_user = test_users[0]
negative_user = test_users[1]
for x in range(0, USER_COUNT):
group_refs = self.identity_api.list_groups_for_user(
test_users[x]['id'])
self.assertEqual(len(group_refs), 0)
for x in range(0, GROUP_COUNT):
new_group = {'domain_id': domain['id'],
'name': uuid.uuid4().hex}
new_group = self.identity_api.create_group(new_group)
test_groups.append(new_group)
group_refs = self.identity_api.list_groups_for_user(
positive_user['id'])
self.assertEqual(len(group_refs), x)
self.identity_api.add_user_to_group(
positive_user['id'],
new_group['id'])
group_refs = self.identity_api.list_groups_for_user(
positive_user['id'])
self.assertEqual(len(group_refs), x + 1)
group_refs = self.identity_api.list_groups_for_user(
negative_user['id'])
self.assertEqual(len(group_refs), 0)
self.config_fixture.config(group='ldap', group_filter='(dn=xx)')
self.reload_backends(CONF.identity.default_domain_id)
group_refs = self.identity_api.list_groups_for_user(
positive_user['id'])
self.assertEqual(len(group_refs), 0)
group_refs = self.identity_api.list_groups_for_user(
negative_user['id'])
self.assertEqual(len(group_refs), 0)
self.config_fixture.config(group='ldap',
group_filter='(objectclass=*)')
self.reload_backends(CONF.identity.default_domain_id)
group_refs = self.identity_api.list_groups_for_user(
positive_user['id'])
self.assertEqual(len(group_refs), GROUP_COUNT)
group_refs = self.identity_api.list_groups_for_user(
negative_user['id'])
self.assertEqual(len(group_refs), 0)
def test_user_enable_attribute_mask(self):
self.config_fixture.config(
group='ldap',
user_enabled_emulation=False,
user_enabled_attribute='employeeType')
super(LiveLDAPIdentity, self).test_user_enable_attribute_mask()
def test_create_project_case_sensitivity(self):
# The attribute used for the live LDAP tests is case insensitive.
def call_super():
(super(LiveLDAPIdentity, self).
test_create_project_case_sensitivity())
self.assertRaises(exception.Conflict, call_super)
def test_create_user_case_sensitivity(self):
# The attribute used for the live LDAP tests is case insensitive.
def call_super():
super(LiveLDAPIdentity, self).test_create_user_case_sensitivity()
self.assertRaises(exception.Conflict, call_super)
def test_project_update_missing_attrs_with_a_falsey_value(self):
# The description attribute doesn't allow an empty value.
def call_super():
(super(LiveLDAPIdentity, self).
test_project_update_missing_attrs_with_a_falsey_value())
self.assertRaises(ldap.INVALID_SYNTAX, call_super)
| MaheshIBM/keystone | keystone/tests/test_ldap_livetest.py | Python | apache-2.0 | 9,051 |
import logging
import ssl
from urllib import request
from bs4 import BeautifulSoup, Tag
from HTMLParseError import HTMLParseError
from LyricParser import LyricParser
class UltimateGuitarInteractor:
export_location = ""
logger = ""
ultimate_guitar_link = ""
main_html = ""
main_header = ""
print_url = ""
print_html = ""
print_header = ""
title = ""
artist = ""
original_transcriber = ""
capo = 0
lyrics = ""
# testing
success = [0, 0, 0, 0]
def __init__(self, link, export_location, logger_name):
self.ultimate_guitar_link = link
self.export_location = export_location
self.logger = logging.getLogger(logger_name)
def run(self):
"""Run a full execution cycle."""
self.main_html, self.main_header = self._get_html_and_header(self.ultimate_guitar_link.get_link())
self.logger.debug("main HTML and main header.")
self.title, self.artist, self.original_transcriber, self.capo = self.get_metadata()
self.logger.debug("title: {0}".format(self.title))
self.logger.debug("artist: {0}".format(self.artist))
if self.original_transcriber != "":
self.logger.debug("original transcriber: {0}".format(self.original_transcriber))
else:
self.logger.warning("No original transcriber found.")
self.logger.debug("capo: {0}".format(str(self.capo)))
self.print_url = self.get_print_url()
self.logger.debug("print url: {0}".format(self.print_url))
self.print_html, self.print_header = self._get_html_and_header(self.print_url)
self.logger.debug("print HTML and print header.")
self.lyrics = self.get_lyrics()
self.logger.debug("lyrics.")
self._export_tex_file()
self.logger.debug("Exported.")
# testing
self._set_success()
def get_metadata(self) -> [str, str, str, int]:
"""Parse the main HTML for the song title and artist, original transcriber username, and capo #."""
original_transcriber = ""
capo = 0
# get the page title for the song title and artist
soup = BeautifulSoup(self.main_html, "html.parser")
page_title_result = soup.findAll('title')[0]
page_title_contents = page_title_result.contents
page_title = str(page_title_contents[0]).lower()
# assume [title] CHORDS (ver x) by [artist]
title = page_title[:page_title.find(" chords")]
artist = page_title[page_title.find("by ") + 3:page_title.find(" @ ultimate-guitar.com")]
t_dtdes = soup.findAll('div', attrs={'class': 't_dtde'})
for t_dtde in t_dtdes:
if len(t_dtde.contents) > 0 and ' fret ' in t_dtde.contents[0]:
capo_string = str(t_dtde.contents[0])
capo = int(capo_string[capo_string.find("fret") - 4:capo_string.find("fret") - 3])
elif len(t_dtde.contents) >= 6 and 'href' in t_dtde.contents[1].attrs and 'http://profile.ultimate-guitar.com/' in t_dtde.contents[1].attrs['href']:
username_tag = t_dtde.contents[1]
username = username_tag.next
original_transcriber = str(username)
return title, artist, original_transcriber, capo
def get_print_url(self):
"""Parse the main HTML for the print url."""
soup = BeautifulSoup(self.main_html, "html.parser")
url_code = ""
url_code_1 = ""
url_code_2 = ""
# method 1
parent_candidates = soup.findAll('div', attrs={"class": "adv-sms-fixed--footer"})
if len(parent_candidates) > 1:
raise IOError
parent = parent_candidates[0]
children = parent.contents
for child in children:
if type(child) == Tag:
attrs = child.attrs
if 'name' in attrs and 'value' in attrs and attrs['name'] == 'id':
url_code_1 = attrs['value']
# method 2
id_candidates = soup.findAll('input', attrs={"name": "id"})
if len(id_candidates) > 1:
raise IOError
id = id_candidates[0]
url_code_2 = id.attrs['value']
if url_code_1 == url_code_2 and url_code_1 != "":
url_code = url_code_1
else:
raise HTMLParseError("Error getting the print url")
return "https://tabs.ultimate-guitar.com/print/{0}&simplified=0&transpose={1}".format(url_code, self.ultimate_guitar_link.get_transposition())
def get_lyrics(self):
"""Parse the print html for lyrics."""
lp = LyricParser(self.print_html)
return lp.get_lyrics()
def get_title_and_artist_string(self):
"""Get a human-readable string containing the title and artist."""
return "{0} by {1}".format(self.title, self.artist)
@staticmethod
def _get_html_and_header(url: str) -> [str, str]:
logging.debug("Getting HTML and header from {0}".format(url))
context = ssl._create_unverified_context() # TODO: make this less sketchy
website = request.urlopen(url, context=context)
html = website.read().decode("utf-8")
header = website.info()
return [html, header]
def _export_tex_file(self):
"""Create a .tex file holding the lyrics."""
if not self.export_location.endswith('/'):
formatted_export_location = self.export_location + '/'
else:
formatted_export_location = self.export_location
formatted_artist = self.artist.lower().replace(' ', '_')
formatted_title = self.title.replace(' ', '_')
file_name = "{0}{1}__{2}.tex".format(formatted_export_location, formatted_artist, formatted_title)
with open(file_name, 'w') as file:
try:
file.write(self.lyrics)
except UnicodeEncodeError:
self.logger("Failed to encode {0}.".format(self.get_title_and_artist_string()))
# testing
def _set_success(self):
self.success[0] = self.title != ""
self.success[1] = self.artist != ""
self.success[2] = self.original_transcriber != ""
self.success[3] = self.capo > 0
# testing
def get_success(self):
return self.success
| BrianMargolis/TranscriptionGenerator | UltimateGuitarInteractor.py | Python | mit | 6,275 |
def hows_the_parrot():
print("He's pining for the fjords!")
hows_the_parrot()
def lumberjack(name):
if name.lower() == 'casey':
print("Casey's a lumberjack and he's OK!")
else:
print("{} sleeps all night and {} works all day!".format(name, name))
lumberjack("Casey")
def lumberjack2(name, pronoun):
print("{}'s a lumberjack and {}'s OK!".format(name, pronoun))
print("{} sleeps all night and {} works all day!".format(pronoun, pronoun))
lumberjack2("Casey", "he")
lumberjack2("Kira", "she")
| CaseyNord/Treehouse | Python Basics/functions_lumberjack.py | Python | mit | 569 |
"""private_mkt will be populated from puppet and placed in this directory"""
from lib.settings_base import *
from mkt.settings import *
from settings_base import *
import private_mkt
SERVER_EMAIL = 'zmarketplacestage@addons.mozilla.org'
DOMAIN = "payments-alt.allizom.org"
SITE_URL = 'https://%s' % DOMAIN
SERVICES_URL = SITE_URL
STATIC_URL = os.getenv('CUSTOM_CDN', 'https://payments-alt-cdn.allizom.org/')
LOCAL_MIRROR_URL = '%s_files' % STATIC_URL
MIRROR_URL = LOCAL_MIRROR_URL
CSP_STATIC_URL = STATIC_URL[:-1]
CSP_IMG_SRC = CSP_IMG_SRC + (CSP_STATIC_URL,)
CSP_SCRIPT_SRC = CSP_SCRIPT_SRC + (CSP_STATIC_URL,)
CSP_STYLE_SRC = CSP_STYLE_SRC + (CSP_STATIC_URL,)
ADDON_ICON_URL = "%s/%s/%s/images/addon_icon/%%d-%%d.png?modified=%%s" % (STATIC_URL, LANGUAGE_CODE, DEFAULT_APP)
ADDON_ICON_URL = STATIC_URL + 'img/uploads/addon_icons/%s/%s-%s.png?modified=%s'
PREVIEW_THUMBNAIL_URL = (STATIC_URL +
'img/uploads/previews/thumbs/%s/%d.png?modified=%d')
PREVIEW_FULL_URL = (STATIC_URL +
'img/uploads/previews/full/%s/%d.%s?modified=%d')
# paths for uploaded extensions
FILES_URL = STATIC_URL + "%s/%s/downloads/file/%d/%s?src=%s"
SESSION_COOKIE_DOMAIN = ".%s" % DOMAIN
# paths for uploaded extensions
USERPICS_URL = STATIC_URL + 'img/uploads/userpics/%s/%s/%s.png?modified=%d'
COLLECTION_ICON_URL = STATIC_URL + '/img/uploads/collection_icons/%s/%s.png?m=%s'
MEDIA_URL = STATIC_URL + 'media/'
ADDON_ICONS_DEFAULT_URL = MEDIA_URL + 'img/hub'
ADDON_ICON_BASE_URL = MEDIA_URL + 'img/icons/'
PRODUCT_ICON_URL = STATIC_URL + 'product-icons'
CACHE_PREFIX = 'stage.mkt.%s' % CACHE_PREFIX
CACHE_MIDDLEWARE_KEY_PREFIX = CACHE_PREFIX
CACHES['default']['KEY_PREFIX'] = CACHE_PREFIX
LOG_LEVEL = logging.DEBUG
# The django statsd client to use, see django-statsd for more.
#STATSD_CLIENT = 'django_statsd.clients.moz_heka'
SYSLOG_TAG = "http_app_addons_marketplace_altpay"
SYSLOG_TAG2 = "http_app_addons_marketplacestage_timer"
SYSLOG_CSP = "http_app_addons_marketplacestage_csp"
STATSD_PREFIX = 'marketplace-stage'
## Celery
BROKER_URL = private_mkt.BROKER_URL
CELERY_IGNORE_RESULT = True
CELERY_DISABLE_RATE_LIMITS = True
CELERYD_PREFETCH_MULTIPLIER = 1
WEBAPPS_RECEIPT_KEY = private_mkt.WEBAPPS_RECEIPT_KEY
WEBAPPS_RECEIPT_URL = private_mkt.WEBAPPS_RECEIPT_URL
APP_PREVIEW = True
WEBAPPS_UNIQUE_BY_DOMAIN = True
SENTRY_DSN = private_mkt.SENTRY_DSN
SOLITUDE_HOSTS = ('https://payments-alt-solitude.allizom.org',)
SOLITUDE_OAUTH = {'key': private_mkt.SOLITUDE_OAUTH_KEY,
'secret': private_mkt.SOLITUDE_OAUTH_SECRET}
WEBAPPS_PUBLIC_KEY_DIRECTORY = NETAPP_STORAGE + '/public_keys'
PRODUCT_ICON_PATH = NETAPP_STORAGE + '/product-icons'
DUMPED_APPS_PATH = NETAPP_STORAGE + '/dumped-apps'
DUMPED_USERS_PATH = NETAPP_STORAGE + '/dumped-users'
GOOGLE_ANALYTICS_DOMAIN = 'marketplace.firefox.com'
VALIDATOR_IAF_URLS = ['https://marketplace.firefox.com',
'https://marketplace.allizom.org',
'https://payments-alt.allizom.org',
'https://marketplace-dev.allizom.org',
'https://marketplace-altdev.allizom.org']
if getattr(private_mkt, 'LOAD_TESTING', False):
# mock the authentication and use django_fakeauth for this
AUTHENTICATION_BACKENDS = ('django_fakeauth.FakeAuthBackend',)\
+ AUTHENTICATION_BACKENDS
MIDDLEWARE_CLASSES.insert(
MIDDLEWARE_CLASSES.index('access.middleware.ACLMiddleware'),
'django_fakeauth.FakeAuthMiddleware')
FAKEAUTH_TOKEN = private_mkt.FAKEAUTH_TOKEN
# we are also creating access tokens for OAuth, here are the keys and
# secrets used for them
API_PASSWORD = getattr(private_mkt, 'API_PASSWORD', FAKEAUTH_TOKEN)
AMO_LANGUAGES = AMO_LANGUAGES + ('dbg',)
LANGUAGES = lazy(lazy_langs, dict)(AMO_LANGUAGES)
LANGUAGE_URL_MAP = dict([(i.lower(), i) for i in AMO_LANGUAGES])
BLUEVIA_SECRET = private_mkt.BLUEVIA_SECRET
#Bug 748403
SIGNING_SERVER = private_mkt.SIGNING_SERVER
SIGNING_SERVER_ACTIVE = True
SIGNING_VALID_ISSUERS = [DOMAIN]
#Bug 793876
SIGNED_APPS_KEY = private_mkt.SIGNED_APPS_KEY
SIGNED_APPS_SERVER_ACTIVE = True
SIGNED_APPS_SERVER = private_mkt.SIGNED_APPS_SERVER
SIGNED_APPS_REVIEWER_SERVER_ACTIVE = True
SIGNED_APPS_REVIEWER_SERVER = private_mkt.SIGNED_APPS_REVIEWER_SERVER
HEKA_CONF = {
'plugins': {'cef': ('heka_cef.cef_plugin:config_plugin', {
'syslog_facility': 'LOCAL4',
# CEF_PRODUCT is defined in settings_base
'syslog_ident': CEF_PRODUCT,
'syslog_priority': 'INFO'
}),
},
'stream': {
'class': 'heka.streams.UdpStream',
'host': splitstrip(private.HEKA_CONF_SENDER_HOST),
'port': private.HEKA_CONF_SENDER_PORT,
},
'logger': 'addons-marketplace-stage',
}
HEKA = client_from_dict_config(HEKA_CONF)
USE_HEKA_FOR_CEF = True
# See mkt/settings.py for more info.
APP_PURCHASE_KEY = DOMAIN
APP_PURCHASE_AUD = DOMAIN
APP_PURCHASE_TYP = 'mozilla-alt/payments/pay/v1'
APP_PURCHASE_SECRET = private_mkt.APP_PURCHASE_SECRET
MONOLITH_PASSWORD = private_mkt.MONOLITH_PASSWORD
# This is mainly for Marionette tests.
WEBAPP_MANIFEST_NAME = 'Marketplace Stage'
ENABLE_API_ERROR_SERVICE = True
NEWRELIC_INI = None
ES_USE_PLUGINS = True
BANGO_BASE_PORTAL_URL = 'https://mozilla.bango.com/login/al.aspx?'
MONOLITH_INDEX = 'mktstage-time_*'
# IARC content ratings.
IARC_ENV = 'test'
IARC_MOCK = False
IARC_PASSWORD = private_mkt.IARC_PASSWORD
IARC_PLATFORM = 'Firefox'
IARC_SERVICE_ENDPOINT = 'https://www.globalratings.com/IARCDEMOService/IARCServices.svc'
IARC_STOREFRONT_ID = 4
IARC_SUBMISSION_ENDPOINT = 'https://www.globalratings.com/IARCDEMORating/Submission.aspx'
IARC_ALLOW_CERT_REUSE = True
PRE_GENERATE_APKS = False
PRE_GENERATE_APK_URL = \
'https://apk-controller.stage.mozaws.net/application.apk'
# Bug 1002569.
PAYMENT_PROVIDERS = ['bango', 'boku']
DEFAULT_PAYMENT_PROVIDER = 'bango'
| jinankjain/zamboni | sites/paymentsalt/settings_mkt.py | Python | bsd-3-clause | 6,014 |
"""
OLI analytics service event tracker backend.
"""
from __future__ import absolute_import
import json
import logging
from urlparse import urljoin
from django.contrib.auth.models import User
from requests_oauthlib import OAuth1Session
from student.models import anonymous_id_for_user
from track.backends import BaseBackend
LOG = logging.getLogger(__name__)
class OLIAnalyticsBackend(BaseBackend):
"""
Transmit events to the OLI analytics service
"""
def __init__(
self,
url=None,
path=None,
key=None,
secret=None,
course_ids=None,
**kwargs
):
super(OLIAnalyticsBackend, self).__init__(**kwargs)
self.url = url
self.path = path
self.key = key
self.secret = secret
# only courses with id in this set will have their data sent
self.course_ids = set()
if course_ids is not None:
self.course_ids = set(course_ids)
self.oauth = OAuth1Session(self.key, client_secret=self.secret)
def send(self, event):
"""
Forward the event to the OLI analytics server
Exact API here: https://docs.google.com/document/d/1ZB-qwP0bV7ko_xJdJNX1PYKvTyYd4I8CBltfac4dlfw/edit?pli=1#
OAuth 1 with nonce and body signing
"""
if not (self.url and self.secret and self.key):
return None
# Only currently passing problem_check events, which are CAPA only
if event.get('event_type') != 'problem_check':
return None
if event.get('event_source') != 'server':
return None
context = event.get('context')
if not context:
return None
course_id = context.get('course_id')
if not course_id or course_id not in self.course_ids:
return None
user_id = context.get('user_id')
if not user_id:
LOG.info('user_id attribute missing from event for OLI service')
return None
event_data = event.get('event')
if not event_data:
LOG.info('event_data attribute missing from event for OLI service')
return None
# Look where it should be for a capa prob.
problem_id = event_data.get('problem_id')
if not problem_id:
# Look where it should be for an xblock.
problem_id = context.get('module').get('usage_key')
if not problem_id:
LOG.info('problem_id attribute missing from event for OLI service')
return None
grade = event_data.get('grade')
if grade is None:
LOG.info('grade attribute missing from event for OLI service')
return None
max_grade = event_data.get('max_grade')
if max_grade is None:
LOG.info('max_grade attribute missing from event for OLI service')
return None
# This is supplied by the StatTutor Xblock.
problem_question_name = event_data.get('problem_question_name')
# This is the student answer in terms of semantic choices.
submission = event_data.get('submission')
# This is the student answers in terms of choice indices.
answers = event_data.get('answers')
timestamp = event.get('time')
if not timestamp:
LOG.info('time attribute missing from event for OLI service')
return None
# put the most expensive operation (DB access) at the end, to not do it needlessly
try:
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
LOG.info('Can not find a user with user_id: %s', user_id)
return None
request_payload_string = json.dumps({
'payload': {
'course_id': course_id,
'resource_id': problem_id,
'user_id': user_id,
'grade': grade,
'max_grade': max_grade,
'timestamp': timestamp.isoformat(),
'problem_question_name': problem_question_name,
'submission': submission,
'answers': answers,
},
})
endpoint = urljoin(self.url, self.path)
try:
response = None
response = self.oauth.put(endpoint, request_payload_string)
status_code = response.status_code
except Exception as error:
LOG.info(
"Unable to send event to OLI analytics service: %s: %s: %s: %s",
endpoint,
request_payload_string,
response,
error,
)
return None
if status_code == 200:
return 'OK'
else:
LOG.info('OLI analytics service returns error status code: %s.', response.status_code)
return 'Error'
| caesar2164/edx-platform | common/djangoapps/track/backends/oli.py | Python | agpl-3.0 | 4,922 |
#!/usr/bin/env python
from blocking import blocking
from sweep import sweep
if __name__ == "__main__":
nsites = 6
'''
do blocking first
'''
blocking(nsites)
print "done blocking"
'''
next do the weep iterations
'''
sweep(nsites)
print "done sweep"
| v1j4y/pyDMRG | src/main.py | Python | gpl-2.0 | 267 |
#!/usr/bin python
#coding: utf-8
# 最美应用图标下载
import re
import requests
import shutil
for page in range(1,101):
url='http://zuimeia.com/?page='+str(page)+'&platform=1'
r=requests.get(url)
reg=r'alt="([^"]*?) 的 icon" data-original="(http://qstatic.zuimeia.com/[^"]*?\.([^"]*?))"'
img_src=re.findall(reg,r.content)
for i in img_src:
print i[0]
img=requests.get(i[1],stream=True)
if img.status_code == 200:
with open('icon/'+i[0]+'.'+i[2],'wb') as f:
shutil.copyfileobj(img.raw,f)
del img | Urinx/SomeCodes | Python/others/zuimei.py | Python | gpl-2.0 | 525 |
"""
This file provides simple functions to calculate the integrated stellar number counts
as a function of limiting magnitude and galactic coordinates.
:requires: NumPy
:requires: matplotlib
:version: 0.1
:author: Sami-Matias Niemi
:contact: s.niemi@ucl.ac.uk
"""
import matplotlib
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['font.size'] = 17
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('axes', linewidth=1.1)
matplotlib.rcParams['legend.fontsize'] = 11
matplotlib.rcParams['legend.handlelength'] = 3
matplotlib.rcParams['xtick.major.size'] = 5
matplotlib.rcParams['ytick.major.size'] = 5
import numpy as np
import matplotlib.pyplot as plt
def bahcallSoneira(magnitude, longitude, latitude, constants):
"""
Implemented Equation B1 from Bahcall and Soneira 1980 (1980ApJS...44...73B).
Note that the values return do not necessarily agree with the Table 6 of the paper values.
Mostly the integrated number of stars agree within 15%, which is the error quoted
in the paper.
:param magnitude: limiting magnitude
:type magnitude:float
:param longitude: galactic longitude in degrees
:type longitude: float
:param latitude: galactic latitude in degrees
:type latitude: float
:return: Number of stars per square degree
"""
#rename variables for easy reference and convert coordinates to radians
m = magnitude
l = np.deg2rad(longitude)
b = np.deg2rad(latitude)
C1 = constants['C1']
C2 = constants['C2']
beta = constants['beta']
alpha = constants['alpha']
delta = constants['delta']
lam = constants['lam']
eta = constants['eta']
kappa = constants['kappa']
#magnitude dependent values
if m <= 12:
mu = 0.03
gamma = 0.36
elif 12 < m < 20:
mu = 0.0075*(m - 12) + 0.03
gamma = 0.04*(12 - m) + 0.36
else:
mu = 0.09
gamma = 0.04
#position dependency
sigma = 1.45 - 0.2*np.cos(b) * np.cos(l)
#precompute the delta mags
dm = m - constants['mstar']
dm2 = m - constants['mdagger']
#split the equation to two parts
D1 = (C1*10**(beta*dm)) / ((1. + 10**(alpha*dm))**delta) / ((np.sin(b)*(1 - mu/np.tan(b)*np.cos(l)))**(3 - 5*gamma))
D2 = (C2*10**(eta*dm2)) / ((1. + 10**(kappa*dm2))**lam) / ((1 - np.cos(b)*np.cos(l))**sigma)
#final counts
D = D1 + D2
return D
def integratedCountsVband():
"""
Returns constant values for the integrated number counts in the V-band.
:return: constants to be used when calculating the integrated number counts.
:rtype: dict
"""
return dict(C1=925., alpha=-0.132, beta=0.035, delta=3., mstar=15.75,
C2=1050., kappa=-0.18, eta=0.087, lam=2.5, mdagger=17.5)
def _skyProjectionPlot(maglimit, b, l, z, blow, bhigh, llow, lhigh, bnum, lnum):
"""
Generate a sky projection plot.
:param maglimit:
:param b:
:param l:
:param z:
:return:
"""
from kapteyn import maputils
header = {'NAXIS': 2,
'NAXIS1': len(l),
'NAXIS2': len(b),
'CTYPE1': 'GLON',
'CRVAL1': llow,
'CRPIX1': 0,
'CUNIT1': 'deg',
'CDELT1': float(bhigh-blow)/bnum,
'CTYPE2': 'GLAT',
'CRVAL2': blow,
'CRPIX2': 0,
'CUNIT2': 'deg',
'CDELT2': float(lhigh-llow)/lnum}
fig = plt.figure(figsize=(12, 11))
frame1 = fig.add_axes([0.1,0.5,0.85, 0.44])
frame2 = fig.add_axes([0.1,0.07,0.85, 0.4])
#generate image
f = maputils.FITSimage(externalheader=header, externaldata=np.log10(z))
im1 = f.Annotatedimage(frame1)
h = header.copy()
h['CTYPE1'] = 'RA---CAR'
h['CTYPE2'] = 'DEC--CAR'
h['CRVAL1'] = 0
h['CRVAL2'] = 0
# Get an estimate of the new corners
x = [0]*5
y = [0]*5
x[0], y[0] = f.proj.toworld((1, 1))
x[1], y[1] = f.proj.toworld((len(l), 1))
x[2], y[2] = f.proj.toworld((len(l), len(b)))
x[3], y[3] = f.proj.toworld((1, len(b)))
x[4], y[4] = f.proj.toworld((len(l)/2., len(b)))
# Create a dummy object to calculate pixel coordinates
# in the new system. Then we can find the area in pixels
# that corresponds to the area in the sky.
f2 = maputils.FITSimage(externalheader=h)
px, py = f2.proj.topixel((x,y))
pxlim = [int(min(px))-10, int(max(px))+10]
pylim = [int(min(py))-10, int(max(py))+10]
reproj = f.reproject_to(h, pxlim_dst=pxlim, pylim_dst=pylim)
grat1 = im1.Graticule(skyout='Galactic', starty=blow, deltay=10, startx=llow, deltax=20)
colorbar = im1.Colorbar(orientation='horizontal')
colorbar.set_label(label='log10(Stars per sq deg)', fontsize=18)
im1.Image()
im1.plot()
im2 = reproj.Annotatedimage(frame2)
grat2 = im2.Graticule()
im2.Image()
im2.plot()
title = r'Integrated Number Density of Stars $V \leq %.1f$' % (maglimit)
frame1.set_title(title, y=1.02)
plt.savefig('stellarD%i.pdf' % maglimit)
plt.close()
def skyNumbers(maglimit=20, blow=20., bhigh=90., llow=0., lhigh=360., bnum=71, lnum=361, plot=True):
"""
Calculate the integrated stellar number counts in a grid of galactic coordinates.
Plot the results in two projections.
:param maglimit: magnitude limit
:type maglimit: int or float
:param blow: lower limit for the galactic latitude
:type blow: float
:param bhigh: upper limit for the galactic latitude
:type bhigh: float
:param llow: lower limit for the galactic longitude
:type llow: float
:param lhigh: upper limit of the galacti longitude:
:type lhigh: float
:param bnum: number of galactic latitude grid points
:type bnum: int
:param lnum: number of galactic longitude grid points
:type lnum: int
:param plot: whether or not to generate sky coverage plots
:type plot: bool
:return: grid of galactic coordinates and the number of stars in the grid
"""
Nvconst = integratedCountsVband()
b = np.linspace(blow, bhigh, num=bnum)
l = np.linspace(llow, lhigh, num=lnum)
counts = np.vectorize(bahcallSoneira)
ll, bb = np.meshgrid(l, b)
z = counts(maglimit, ll, bb, Nvconst)
#plot
if plot:
_skyProjectionPlot(maglimit, b, l, z, blow, bhigh, llow, lhigh, bnum, lnum)
return l, b, z
if __name__ == '__main__':
#constants for V-band
Nvconst = integratedCountsVband()
skyNumbers(maglimit=10)
skyNumbers(maglimit=15)
skyNumbers(maglimit=18)
skyNumbers(maglimit=20)
skyNumbers(maglimit=22)
skyNumbers(maglimit=24)
skyNumbers(maglimit=26)
skyNumbers(maglimit=29)
#testing
#print bahcallSoneira(22, 90, 20, Nvconst)
#print bahcallSoneira(22, 90, 30, Nvconst)
#print bahcallSoneira(22, 90, 50, Nvconst)
#print bahcallSoneira(22, 90, 90, Nvconst) | sniemi/EuclidVisibleInstrument | sources/stellarNumberCounts.py | Python | bsd-2-clause | 6,889 |
# -*- coding: utf-8 -*-
###############################################################################
#
# Tech-Receptives Solutions Pvt. Ltd.
# Copyright (C) 2009-TODAY Tech-Receptives(<http://www.techreceptives.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from . import op_allocat_division
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| mohamedhagag/community-addons | openeducat_erp/op_allocat_division/__init__.py | Python | agpl-3.0 | 1,099 |
from django.contrib import admin
from .models import LastRun
# Last Run site display
class lastRunModelAdmin(admin.ModelAdmin):
"""
Override the default Django Admin website display for backup history table
"""
list_display = [
"component",
"last_run"
]
class Meta:
model = LastRun
admin.site.register(LastRun, lastRunModelAdmin) | faisaltheparttimecoder/EMEARoster | BackgroundTask/admin.py | Python | mit | 381 |
#!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Do all the steps required to build and test against nacl."""
import optparse
import os.path
import re
import shutil
import subprocess
import sys
# Copied from buildbot/buildbot_lib.py
def TryToCleanContents(path, file_name_filter=lambda fn: True):
"""
Remove the contents of a directory without touching the directory itself.
Ignores all failures.
"""
if os.path.exists(path):
for fn in os.listdir(path):
TryToCleanPath(os.path.join(path, fn), file_name_filter)
# Copied from buildbot/buildbot_lib.py
def TryToCleanPath(path, file_name_filter=lambda fn: True):
"""
Removes a file or directory.
Ignores all failures.
"""
if os.path.exists(path):
if file_name_filter(path):
print 'Trying to remove %s' % path
if os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
else:
try:
os.remove(path)
except Exception:
pass
else:
print 'Skipping %s' % path
def FindChrome(src_dir, options):
if options.browser_path:
return options.browser_path
# List of places that chrome could live.
# In theory we should be more careful about what platform we're actually
# building for.
# As currently constructed, this will also hork people who have debug and
# release builds sitting side by side who build locally.
mode = options.mode
chrome_locations = [
'build/%s/chrome.exe' % mode,
'chrome/%s/chrome.exe' % mode,
# Windows Chromium ninja builder
'out/%s/chrome.exe' % mode,
'out/%s/chrome' % mode,
# Mac Chromium make builder
'out/%s/Chromium.app/Contents/MacOS/Chromium' % mode,
# Mac release make builder
'out/%s/Google Chrome.app/Contents/MacOS/Google Chrome' % mode,
# Mac Chromium xcode builder
'xcodebuild/%s/Chromium.app/Contents/MacOS/Chromium' % mode,
# Mac release xcode builder
'xcodebuild/%s/Google Chrome.app/Contents/MacOS/Google Chrome' % mode,
]
# Pick the first one we find.
for chrome in chrome_locations:
chrome_filename = os.path.join(src_dir, chrome)
if os.path.exists(chrome_filename):
return chrome_filename
raise Exception('Cannot find a chome binary - specify one with '
'--browser_path?')
# TODO(ncbray): this is somewhat unsafe. We should fix the underlying problem.
def CleanTempDir():
# Only delete files and directories like:
# a) C:\temp\83C4.tmp
# b) /tmp/.org.chromium.Chromium.EQrEzl
file_name_re = re.compile(
r'[\\/]([0-9a-fA-F]+\.tmp|\.org\.chrom\w+\.Chrom\w+\..+)$')
file_name_filter = lambda fn: file_name_re.search(fn) is not None
path = os.environ.get('TMP', os.environ.get('TEMP', '/tmp'))
if len(path) >= 4 and os.path.isdir(path):
print
print "Cleaning out the temp directory."
print
TryToCleanContents(path, file_name_filter)
else:
print
print "Cannot find temp directory, not cleaning it."
print
def RunCommand(cmd, cwd, env):
sys.stdout.write('\nRunning %s\n\n' % ' '.join(cmd))
sys.stdout.flush()
retcode = subprocess.call(cmd, cwd=cwd, env=env)
if retcode != 0:
sys.stdout.write('\nFailed: %s\n\n' % ' '.join(cmd))
sys.exit(retcode)
def RunTests(name, cmd, nacl_dir, env):
sys.stdout.write('\n\nBuilding files needed for %s testing...\n\n' % name)
RunCommand(cmd + ['do_not_run_tests=1', '-j8'], nacl_dir, env)
sys.stdout.write('\n\nRunning %s tests...\n\n' % name)
RunCommand(cmd, nacl_dir, env)
def BuildAndTest(options):
# Refuse to run under cygwin.
if sys.platform == 'cygwin':
raise Exception('I do not work under cygwin, sorry.')
# By default, use the version of Python is being used to run this script.
python = sys.executable
if sys.platform == 'darwin':
# Mac 10.5 bots tend to use a particularlly old version of Python, look for
# a newer version.
macpython27 = '/Library/Frameworks/Python.framework/Versions/2.7/bin/python'
if os.path.exists(macpython27):
python = macpython27
script_dir = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.dirname(os.path.dirname(os.path.dirname(script_dir)))
nacl_dir = os.path.join(src_dir, 'native_client')
# Decide platform specifics.
env = dict(os.environ)
if sys.platform in ['win32', 'cygwin']:
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
bits = 64
else:
bits = 32
msvs_path = ';'.join([
r'c:\Program Files\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files\Microsoft Visual Studio 8\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\VC',
r'c:\Program Files\Microsoft Visual Studio 8\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\Common7\Tools',
])
env['PATH'] += ';' + msvs_path
scons = [python, 'scons.py']
elif sys.platform == 'darwin':
bits = 32
scons = [python, 'scons.py']
else:
p = subprocess.Popen(
'uname -m | '
'sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/"',
shell=True, stdout=subprocess.PIPE)
(p_stdout, _) = p.communicate()
assert p.returncode == 0
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif p_stdout.find('64') >= 0:
bits = 64
else:
bits = 32
# xvfb-run has a 2-second overhead per invocation, so it is cheaper to wrap
# the entire build step rather than each test (browser_headless=1).
scons = ['xvfb-run', '--auto-servernum', python, 'scons.py']
chrome_filename = FindChrome(src_dir, options)
if options.jobs > 1:
scons.append('-j%d' % options.jobs)
scons.append('disable_tests=%s' % options.disable_tests)
if options.buildbot is not None:
scons.append('buildbot=%s' % (options.buildbot,))
# Clean the output of the previous build.
# Incremental builds can get wedged in weird ways, so we're trading speed
# for reliability.
shutil.rmtree(os.path.join(nacl_dir, 'scons-out'), True)
# check that the HOST (not target) is 64bit
# this is emulating what msvs_env.bat is doing
if '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
# 64bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 9.0\\Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 8.0\\Common7\\Tools\\')
else:
# 32bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 9.0\\'
'Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 8.0\\'
'Common7\\Tools\\')
# Run nacl/chrome integration tests.
# Note that we have to add nacl_irt_test to --mode in order to get
# inbrowser_test_runner to run.
# TODO(mseaborn): Change it so that inbrowser_test_runner is not a
# special case.
cmd = scons + ['--verbose', '-k', 'platform=x86-%d' % bits,
'--mode=opt-host,nacl,nacl_irt_test',
'chrome_browser_path=%s' % chrome_filename,
]
if not options.integration_bot and not options.morenacl_bot:
cmd.append('disable_flaky_tests=1')
cmd.append('chrome_browser_tests')
# Download the toolchain(s).
if options.enable_pnacl:
pnacl_toolchain = []
else:
pnacl_toolchain = ['--no-pnacl']
RunCommand([python,
os.path.join(nacl_dir, 'build', 'download_toolchains.py'),
'--no-arm-trusted'] + pnacl_toolchain + ['TOOL_REVISIONS'],
nacl_dir, os.environ)
CleanTempDir()
if options.enable_newlib:
RunTests('nacl-newlib', cmd, nacl_dir, env)
if options.enable_glibc:
RunTests('nacl-glibc', cmd + ['--nacl_glibc'], nacl_dir, env)
if options.enable_pnacl:
# TODO(dschuff): remove this when streaming is the default
os.environ['NACL_STREAMING_TRANSLATION'] = 'true'
RunTests('pnacl', cmd + ['bitcode=1'], nacl_dir, env)
def MakeCommandLineParser():
parser = optparse.OptionParser()
parser.add_option('-m', '--mode', dest='mode', default='Debug',
help='Debug/Release mode')
parser.add_option('-j', dest='jobs', default=1, type='int',
help='Number of parallel jobs')
parser.add_option('--enable_newlib', dest='enable_newlib', default=-1,
type='int', help='Run newlib tests?')
parser.add_option('--enable_glibc', dest='enable_glibc', default=-1,
type='int', help='Run glibc tests?')
parser.add_option('--enable_pnacl', dest='enable_pnacl', default=-1,
type='int', help='Run pnacl tests?')
# Deprecated, but passed to us by a script in the Chrome repo.
# Replaced by --enable_glibc=0
parser.add_option('--disable_glibc', dest='disable_glibc',
action='store_true', default=False,
help='Do not test using glibc.')
parser.add_option('--disable_tests', dest='disable_tests',
type='string', default='',
help='Comma-separated list of tests to omit')
builder_name = os.environ.get('BUILDBOT_BUILDERNAME', '')
is_integration_bot = 'nacl-chrome' in builder_name
parser.add_option('--integration_bot', dest='integration_bot',
type='int', default=int(is_integration_bot),
help='Is this an integration bot?')
is_morenacl_bot = (
'More NaCl' in builder_name or
'naclmore' in builder_name)
parser.add_option('--morenacl_bot', dest='morenacl_bot',
type='int', default=int(is_morenacl_bot),
help='Is this a morenacl bot?')
# Not used on the bots, but handy for running the script manually.
parser.add_option('--bits', dest='bits', action='store',
type='int', default=None,
help='32/64')
parser.add_option('--browser_path', dest='browser_path', action='store',
type='string', default=None,
help='Path to the chrome browser.')
parser.add_option('--buildbot', dest='buildbot', action='store',
type='string', default=None,
help='Value passed to scons as buildbot= option.')
return parser
def Main():
parser = MakeCommandLineParser()
options, args = parser.parse_args()
if options.integration_bot and options.morenacl_bot:
parser.error('ERROR: cannot be both an integration bot and a morenacl bot')
# Set defaults for enabling newlib.
if options.enable_newlib == -1:
options.enable_newlib = 1
# Set defaults for enabling glibc.
if options.enable_glibc == -1:
if options.integration_bot or options.morenacl_bot:
options.enable_glibc = 1
else:
options.enable_glibc = 0
# Set defaults for enabling pnacl.
if options.enable_pnacl == -1:
if options.integration_bot or options.morenacl_bot:
options.enable_pnacl = 1
else:
options.enable_pnacl = 0
if args:
parser.error('ERROR: invalid argument')
BuildAndTest(options)
if __name__ == '__main__':
Main()
| leiferikb/bitpop-private | chrome/test/nacl_test_injection/buildbot_chrome_nacl_stage.py | Python | bsd-3-clause | 11,702 |
import io
import locale
import mimetypes
import sys
import unittest
from test import support
# Tell it we don't know about external files:
mimetypes.knownfiles = []
mimetypes.inited = False
mimetypes._default_mime_types()
class MimeTypesTestCase(unittest.TestCase):
def setUp(self):
self.db = mimetypes.MimeTypes()
def test_default_data(self):
eq = self.assertEqual
eq(self.db.guess_type("foo.html"), ("text/html", None))
eq(self.db.guess_type("foo.tgz"), ("application/x-tar", "gzip"))
eq(self.db.guess_type("foo.tar.gz"), ("application/x-tar", "gzip"))
eq(self.db.guess_type("foo.tar.Z"), ("application/x-tar", "compress"))
eq(self.db.guess_type("foo.tar.bz2"), ("application/x-tar", "bzip2"))
eq(self.db.guess_type("foo.tar.xz"), ("application/x-tar", "xz"))
def test_data_urls(self):
eq = self.assertEqual
guess_type = self.db.guess_type
eq(guess_type("data:,thisIsTextPlain"), ("text/plain", None))
eq(guess_type("data:;base64,thisIsTextPlain"), ("text/plain", None))
eq(guess_type("data:text/x-foo,thisIsTextXFoo"), ("text/x-foo", None))
def test_file_parsing(self):
eq = self.assertEqual
sio = io.StringIO("x-application/x-unittest pyunit\n")
self.db.readfp(sio)
eq(self.db.guess_type("foo.pyunit"),
("x-application/x-unittest", None))
eq(self.db.guess_extension("x-application/x-unittest"), ".pyunit")
def test_non_standard_types(self):
eq = self.assertEqual
# First try strict
eq(self.db.guess_type('foo.xul', strict=True), (None, None))
eq(self.db.guess_extension('image/jpg', strict=True), None)
# And then non-strict
eq(self.db.guess_type('foo.xul', strict=False), ('text/xul', None))
eq(self.db.guess_extension('image/jpg', strict=False), '.jpg')
def test_guess_all_types(self):
eq = self.assertEqual
unless = self.assertTrue
# First try strict. Use a set here for testing the results because if
# test_urllib2 is run before test_mimetypes, global state is modified
# such that the 'all' set will have more items in it.
all = set(self.db.guess_all_extensions('text/plain', strict=True))
unless(all >= set(['.bat', '.c', '.h', '.ksh', '.pl', '.txt']))
# And now non-strict
all = self.db.guess_all_extensions('image/jpg', strict=False)
all.sort()
eq(all, ['.jpg'])
# And now for no hits
all = self.db.guess_all_extensions('image/jpg', strict=True)
eq(all, [])
def test_encoding(self):
getpreferredencoding = locale.getpreferredencoding
self.addCleanup(setattr, locale, 'getpreferredencoding',
getpreferredencoding)
locale.getpreferredencoding = lambda: 'ascii'
filename = support.findfile("mime.types")
mimes = mimetypes.MimeTypes([filename])
exts = mimes.guess_all_extensions('application/vnd.geocube+xml',
strict=True)
self.assertEqual(exts, ['.g3', '.g\xb3'])
@unittest.skipUnless(sys.platform.startswith("win"), "Windows only")
class Win32MimeTypesTestCase(unittest.TestCase):
def setUp(self):
# ensure all entries actually come from the Windows registry
self.original_types_map = mimetypes.types_map.copy()
mimetypes.types_map.clear()
mimetypes.init()
self.db = mimetypes.MimeTypes()
def tearDown(self):
# restore default settings
mimetypes.types_map.clear()
mimetypes.types_map.update(self.original_types_map)
def test_registry_parsing(self):
# the original, minimum contents of the MIME database in the
# Windows registry is undocumented AFAIK.
# Use file types that should *always* exist:
eq = self.assertEqual
eq(self.db.guess_type("foo.txt"), ("text/plain", None))
eq(self.db.guess_type("image.jpg"), ("image/jpeg", None))
eq(self.db.guess_type("image.png"), ("image/png", None))
class MiscTestCase(unittest.TestCase):
def test__all__(self):
support.check__all__(self, mimetypes)
if __name__ == "__main__":
unittest.main()
| yotchang4s/cafebabepy | src/main/python/test/test_mimetypes.py | Python | bsd-3-clause | 4,294 |
try:
import rasterio
has_rasterio = True
except:
has_rasterio = False
from functools import partial
import os
import dask
from dask.array import store
import numpy as np
threads = int(os.environ.get('GBDX_THREADS', 64))
threaded_get = partial(dask.threaded.get, num_workers=threads)
class rio_writer(object):
def __init__(self, dst):
self.dst = dst
def __setitem__(self, location, chunk):
window = ((location[1].start, location[1].stop),
(location[2].start, location[2].stop))
self.dst.write(chunk, window=window)
def to_geotiff(arr, path='./output.tif', proj=None, spec=None, bands=None, **kwargs):
''' Write out a geotiff file of the image
Args:
path (str): path to write the geotiff file to, default is ./output.tif
proj (str): EPSG string of projection to reproject to
spec (str): if set to 'rgb', write out color-balanced 8-bit RGB tif
bands (list): list of bands to export. If spec='rgb' will default to RGB bands
Returns:
str: path the geotiff was written to'''
assert has_rasterio, "To create geotiff images please install rasterio"
try:
img_md = arr.rda.metadata["image"]
x_size = img_md["tileXSize"]
y_size = img_md["tileYSize"]
except (AttributeError, KeyError):
x_size = kwargs.get("chunk_size", 256)
y_size = kwargs.get("chunk_size", 256)
try:
tfm = kwargs['transform'] if 'transform' in kwargs else arr.affine
except:
tfm = None
dtype = arr.dtype.name if arr.dtype.name != 'int8' else 'uint8'
if spec is not None and spec.lower() == 'rgb':
assert arr.options.get('dra'), 'To write RGB geotiffs, create your image option with `dra=True`'
if bands is None:
bands = arr._rgb_bands
arr = arr[bands,...].astype(np.uint8)
dtype = 'uint8'
else:
if bands is not None:
arr = arr[bands,...]
meta = {
'width': arr.shape[2],
'height': arr.shape[1],
'count': arr.shape[0],
'dtype': dtype,
'driver': 'GTiff',
'transform': tfm
}
if proj is not None:
meta["crs"] = {'init': proj}
if "tiled" in kwargs and kwargs["tiled"]:
meta.update(blockxsize=x_size, blockysize=y_size, tiled="yes")
with rasterio.open(path, "w", **meta) as dst:
writer = rio_writer(dst)
result = store(arr, writer, compute=False)
result.compute(scheduler=threaded_get)
return path
| DigitalGlobe/gbdxtools | gbdxtools/rda/io.py | Python | mit | 2,579 |
'''
Auto Create Input Provider Config Entry for Available MT Hardware (linux only).
===============================================================================
Thanks to Marc Tardif for the probing code, taken from scan-for-mt-device.
The device discovery is done by this provider. However, the reading of
input can be performed by other providers like: hidinput, mtdev and
linuxwacom. mtdev is used prior to other providers. For more
information about mtdev, check :py:class:`~kivy.input.providers.mtdev`.
Here is an example of auto creation::
[input]
# using mtdev
device_%(name)s = probesysfs,provider=mtdev
# using hidinput
device_%(name)s = probesysfs,provider=hidinput
# using mtdev with a match on name
device_%(name)s = probesysfs,provider=mtdev,match=acer
# using hidinput with custom parameters to hidinput (all on one line)
%(name)s = probesysfs,
provider=hidinput,param=min_pressure=1,param=max_pressure=99
# you can also match your wacom touchscreen
touch = probesysfs,match=E3 Finger,provider=linuxwacom,
select_all=1,param=mode=touch
# and your wacom pen
pen = probesysfs,match=E3 Pen,provider=linuxwacom,
select_all=1,param=mode=pen
By default, ProbeSysfs module will enumerate hardware from the /sys/class/input
device, and configure hardware with ABS_MT_POSITION_X capability. But for
example, the wacom screen doesn't support this capability. You can prevent this
behavior by putting select_all=1 in your config line. Add use_mouse=1 to also
include touchscreen hardware that offers core pointer functionality.
'''
__all__ = ('ProbeSysfsHardwareProbe', )
import os
from os.path import sep
if 'KIVY_DOC' in os.environ:
ProbeSysfsHardwareProbe = None
else:
import ctypes
from re import match, IGNORECASE
from glob import glob
from subprocess import Popen, PIPE
from kivy.logger import Logger
from kivy.input.provider import MotionEventProvider
from kivy.input.providers.mouse import MouseMotionEventProvider
from kivy.input.factory import MotionEventFactory
from kivy.config import _is_rpi
EventLoop = None
# See linux/input.h
ABS_MT_POSITION_X = 0x35
_cache_input = None
_cache_xinput = None
class Input(object):
def __init__(self, path):
query_xinput()
self.path = path
@property
def device(self):
base = os.path.basename(self.path)
return os.path.join("/dev", "input", base)
@property
def name(self):
path = os.path.join(self.path, "device", "name")
return read_line(path)
def get_capabilities(self):
path = os.path.join(self.path, "device", "capabilities", "abs")
line = "0"
try:
line = read_line(path)
except (IOError, OSError):
return []
capabilities = []
long_bit = ctypes.sizeof(ctypes.c_long) * 8
for i, word in enumerate(line.split(" ")):
word = int(word, 16)
subcapabilities = [bool(word & 1 << i)
for i in range(long_bit)]
capabilities[:0] = subcapabilities
return capabilities
def has_capability(self, capability):
capabilities = self.get_capabilities()
return len(capabilities) > capability and capabilities[capability]
@property
def is_mouse(self):
return self.device in _cache_xinput
def getout(*args):
try:
return Popen(args, stdout=PIPE).communicate()[0]
except OSError:
return ''
def query_xinput():
global _cache_xinput
if _cache_xinput is None:
_cache_xinput = []
devids = getout('xinput', '--list', '--id-only')
for did in devids.splitlines():
devprops = getout('xinput', '--list-props', did)
evpath = None
for prop in devprops.splitlines():
prop = prop.strip()
if (prop.startswith(b'Device Enabled') and
prop.endswith(b'0')):
evpath = None
break
if prop.startswith(b'Device Node'):
try:
evpath = prop.split('"')[1]
except Exception:
evpath = None
if evpath:
_cache_xinput.append(evpath)
def get_inputs(path):
global _cache_input
if _cache_input is None:
event_glob = os.path.join(path, "event*")
_cache_input = [Input(x) for x in glob(event_glob)]
return _cache_input
def read_line(path):
f = open(path)
try:
return f.readline().strip()
finally:
f.close()
class ProbeSysfsHardwareProbe(MotionEventProvider):
def __new__(self, device, args):
# hack to not return an instance of this provider.
# :)
instance = super(ProbeSysfsHardwareProbe, self).__new__(self)
instance.__init__(device, args)
def __init__(self, device, args):
super(ProbeSysfsHardwareProbe, self).__init__(device, args)
self.provider = 'mtdev'
self.match = None
self.input_path = '/sys/class/input'
self.select_all = True if _is_rpi else False
self.use_mouse = False
self.use_regex = False
self.args = []
args = args.split(',')
for arg in args:
if arg == '':
continue
arg = arg.split('=', 1)
# ensure it's a key = value
if len(arg) != 2:
Logger.error('ProbeSysfs: invalid parameters %s, not'
' key=value format' % arg)
continue
key, value = arg
if key == 'match':
self.match = value
elif key == 'provider':
self.provider = value
elif key == 'use_regex':
self.use_regex = bool(int(value))
elif key == 'select_all':
self.select_all = bool(int(value))
elif key == 'use_mouse':
self.use_mouse = bool(int(value))
elif key == 'param':
self.args.append(value)
else:
Logger.error('ProbeSysfs: unknown %s option' % key)
continue
self.probe()
def should_use_mouse(self):
return (self.use_mouse or
not any(p for p in EventLoop.input_providers
if isinstance(p, MouseMotionEventProvider)))
def probe(self):
global EventLoop
from kivy.base import EventLoop
inputs = get_inputs(self.input_path)
Logger.debug('ProbeSysfs: using probesysfs!')
use_mouse = self.should_use_mouse()
if not self.select_all:
inputs = [x for x in inputs if
x.has_capability(ABS_MT_POSITION_X) and
(use_mouse or not x.is_mouse)]
for device in inputs:
Logger.debug('ProbeSysfs: found device: %s at %s' % (
device.name, device.device))
# must ignore ?
if self.match:
if self.use_regex:
if not match(self.match, device.name, IGNORECASE):
Logger.debug('ProbeSysfs: device not match the'
' rule in config, ignoring.')
continue
else:
if self.match not in device.name:
continue
Logger.info('ProbeSysfs: device match: %s' % device.device)
d = device.device
devicename = self.device % dict(name=d.split(sep)[-1])
provider = MotionEventFactory.get(self.provider)
if provider is None:
Logger.info('ProbeSysfs: unable to found provider %s' %
self.provider)
Logger.info('ProbeSysfs: fallback on hidinput')
provider = MotionEventFactory.get('hidinput')
if provider is None:
Logger.critical('ProbeSysfs: no input provider found'
' to handle this device !')
continue
instance = provider(devicename, '%s,%s' % (
device.device, ','.join(self.args)))
if instance:
EventLoop.add_input_provider(instance)
MotionEventFactory.register('probesysfs', ProbeSysfsHardwareProbe)
| LogicalDash/kivy | kivy/input/providers/probesysfs.py | Python | mit | 9,117 |
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create the tfrecord files necessary for training onsets and frames.
The training files are split in ~20 second chunks by default, the test files
are not split.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import re
from magenta.models.onsets_frames_transcription import split_audio_and_label_data
from magenta.music import audio_io
from magenta.music import midi_io
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('input_dir', None,
'Directory where the un-zipped MAPS files are.')
tf.app.flags.DEFINE_string('output_dir', './',
'Directory where the two output TFRecord files '
'(train and test) will be placed.')
tf.app.flags.DEFINE_integer('min_length', 5, 'minimum segment length')
tf.app.flags.DEFINE_integer('max_length', 20, 'maximum segment length')
tf.app.flags.DEFINE_integer('sample_rate', 16000, 'desired sample rate')
test_dirs = ['ENSTDkCl/MUS', 'ENSTDkAm/MUS']
train_dirs = [
'AkPnBcht/MUS', 'AkPnBsdf/MUS', 'AkPnCGdD/MUS', 'AkPnStgb/MUS',
'SptkBGAm/MUS', 'SptkBGCl/MUS', 'StbgTGd2/MUS'
]
def filename_to_id(filename):
"""Translate a .wav or .mid path to a MAPS sequence id."""
return re.match(r'.*MUS-(.*)_[^_]+\.\w{3}',
os.path.basename(filename)).group(1)
def generate_train_set(exclude_ids):
"""Generate the train TFRecord."""
train_file_pairs = []
for directory in train_dirs:
path = os.path.join(FLAGS.input_dir, directory)
path = os.path.join(path, '*.wav')
wav_files = glob.glob(path)
# find matching mid files
for wav_file in wav_files:
base_name_root, _ = os.path.splitext(wav_file)
mid_file = base_name_root + '.mid'
if filename_to_id(wav_file) not in exclude_ids:
train_file_pairs.append((wav_file, mid_file))
train_output_name = os.path.join(FLAGS.output_dir,
'maps_config2_train.tfrecord')
with tf.python_io.TFRecordWriter(train_output_name) as writer:
for idx, pair in enumerate(train_file_pairs):
print('{} of {}: {}'.format(idx, len(train_file_pairs), pair[0]))
# load the wav data
wav_data = tf.gfile.Open(pair[0], 'rb').read()
# load the midi data and convert to a notesequence
ns = midi_io.midi_file_to_note_sequence(pair[1])
for example in split_audio_and_label_data.process_record(
wav_data, ns, pair[0], FLAGS.min_length, FLAGS.max_length,
FLAGS.sample_rate):
writer.write(example.SerializeToString())
def generate_test_set():
"""Generate the test TFRecord."""
test_file_pairs = []
for directory in test_dirs:
path = os.path.join(FLAGS.input_dir, directory)
path = os.path.join(path, '*.wav')
wav_files = glob.glob(path)
# find matching mid files
for wav_file in wav_files:
base_name_root, _ = os.path.splitext(wav_file)
mid_file = base_name_root + '.mid'
test_file_pairs.append((wav_file, mid_file))
test_output_name = os.path.join(FLAGS.output_dir,
'maps_config2_test.tfrecord')
with tf.python_io.TFRecordWriter(test_output_name) as writer:
for idx, pair in enumerate(test_file_pairs):
print('{} of {}: {}'.format(idx, len(test_file_pairs), pair[0]))
# load the wav data and resample it.
samples = audio_io.load_audio(pair[0], FLAGS.sample_rate)
wav_data = audio_io.samples_to_wav_data(samples, FLAGS.sample_rate)
# load the midi data and convert to a notesequence
ns = midi_io.midi_file_to_note_sequence(pair[1])
example = split_audio_and_label_data.create_example(pair[0], ns, wav_data)
writer.write(example.SerializeToString())
return [filename_to_id(wav) for wav, _ in test_file_pairs]
def main(unused_argv):
test_ids = generate_test_set()
generate_train_set(test_ids)
def console_entry_point():
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
| jesseengel/magenta | magenta/models/onsets_frames_transcription/onsets_frames_transcription_create_dataset_maps.py | Python | apache-2.0 | 5,250 |
"""Sprite and tile engine.
tilevid, isovid, hexvid are all subclasses of this interface.
Includes support for:
* Foreground Tiles
* Background Tiles
* Sprites
* Sprite-Sprite Collision handling
* Sprite-Tile Collision handling
* Scrolling
* Loading from PGU tile and sprite formats (optional)
* Set rate FPS (optional)
This code was previously known as the King James Version (named after the
Bible of the same name for historical reasons.)
"""
import pygame
from pygame.rect import Rect
from pygame.locals import *
import math
class Sprite:
"""The object used for Sprites.
Arguments:
ishape -- an image, or an image, rectstyle. The rectstyle will
describe the shape of the image, used for collision
detection.
pos -- initial (x,y) position of the Sprite.
Attributes:
rect -- the current position of the Sprite
_rect -- the previous position of the Sprite
groups -- the groups the Sprite is in
agroups -- the groups the Sprite can hit in a collision
hit -- the handler for hits -- hit(g,s,a)
loop -- the loop handler, called once a frame
"""
def __init__(self,ishape,pos):
if not isinstance(ishape, tuple):
ishape = ishape,None
image,shape = ishape
if shape == None:
shape = pygame.Rect(0,0,image.get_width(),image.get_height())
if isinstance(shape, tuple): shape = pygame.Rect(shape)
self.image = image
self._image = self.image
self.shape = shape
self.rect = pygame.Rect(pos[0],pos[1],shape.w,shape.h)
self._rect = pygame.Rect(self.rect)
self.irect = pygame.Rect(pos[0]-self.shape.x,pos[1]-self.shape.y,
image.get_width(),image.get_height())
self._irect = pygame.Rect(self.irect)
self.groups = 0
self.agroups = 0
self.updated = 1
def setimage(self,ishape):
"""Set the image of the Sprite.
Arguments:
ishape -- an image, or an image, rectstyle. The rectstyle will
describe the shape of the image, used for collision detection.
"""
if not isinstance(ishape, tuple):
ishape = ishape,None
image,shape = ishape
if shape == None:
shape = pygame.Rect(0,0,image.get_width(),image.get_height())
if isinstance(shape, tuple):
shape = pygame.Rect(shape)
self.image = image
self.shape = shape
self.rect.w,self.rect.h = shape.w,shape.h
self.irect.w,self.irect.h = image.get_width(),image.get_height()
self.updated = 1
class Tile:
"""Tile Object used by TileCollide.
Arguments:
image -- an image for the Tile.
Attributes:
agroups -- the groups the Tile can hit in a collision
hit -- the handler for hits -- hit(g,t,a)
"""
def __init__(self,image=None):
self.image = image
self.agroups = 0
def __setattr__(self,k,v):
if k == 'image' and v != None:
self.image_h = v.get_height()
self.image_w = v.get_width()
self.__dict__[k] = v
class _Sprites(list):
def __init__(self):
list.__init__(self)
self.removed = []
def append(self,v):
list.append(self,v)
v.updated = 1
def remove(self,v):
list.remove(self,v)
v.updated = 1
self.removed.append(v)
class Vid:
"""An engine for rendering Sprites and Tiles.
Attributes:
sprites -- a list of the Sprites to be displayed. You may append and
remove Sprites from it.
images -- a dict for images to be put in.
size -- the width, height in Tiles of the layers. Do not modify.
view -- a pygame.Rect of the viewed area. You may change .x, .y,
etc to move the viewed area around.
bounds -- a pygame.Rect (set to None by default) that sets the bounds
of the viewable area. Useful for setting certain borders
as not viewable.
tlayer -- the foreground tiles layer
clayer -- the code layer (optional)
blayer -- the background tiles layer (optional)
groups -- a hash of group names to group values (32 groups max, as a tile/sprites
membership in a group is determined by the bits in an integer)
"""
def __init__(self):
self.tiles = [None for x in range(0,256)]
self.sprites = _Sprites()
self.images = {} #just a store for images.
self.layers = None
self.size = None
self.view = pygame.Rect(0,0,0,0)
self._view = pygame.Rect(self.view)
self.bounds = None
self.updates = []
self.groups = {}
def resize(self,size,bg=0):
"""Resize the layers.
Arguments:
size -- w,h in Tiles of the layers
bg -- set to 1 if you wish to use both a foreground layer and a
background layer
"""
self.size = size
w,h = size
self.layers = [[[0 for x in range(0,w)] for y in range(0,h)]
for z in range(0,4)]
self.tlayer = self.layers[0]
self.blayer = self.layers[1]
if not bg: self.blayer = None
self.clayer = self.layers[2]
self.alayer = self.layers[3]
self.view.x, self.view.y = 0,0
self._view.x, self.view.y = 0,0
self.bounds = None
self.updates = []
def set(self,pos,v):
"""Set a tile in the foreground to a value.
Use this method to set tiles in the foreground, as it will make
sure the screen is updated with the change. Directly changing
the tlayer will not guarantee updates unless you are using .paint()
Arguments:
pos -- (x,y) of tile
v -- value
"""
if self.tlayer[pos[1]][pos[0]] == v: return
self.tlayer[pos[1]][pos[0]] = v
self.alayer[pos[1]][pos[0]] = 1
self.updates.append(pos)
def get(self,pos):
"""Get the tlayer at pos.
Arguments:
pos -- (x,y) of tile
"""
return self.tlayer[pos[1]][pos[0]]
def paint(self,s):
"""Paint the screen.
Arguments:
screen -- a pygame.Surface to paint to
Returns the updated portion of the screen (all of it)
"""
return []
def update(self,s):
"""Update the screen.
Arguments:
screen -- a pygame.Rect to update
Returns a list of updated rectangles.
"""
self.updates = []
return []
def tga_load_level(self,fname,bg=0):
"""Load a TGA level.
Arguments:
g -- a Tilevid instance
fname -- tga image to load
bg -- set to 1 if you wish to load the background layer
"""
if type(fname) == str: img = pygame.image.load(fname)
else: img = fname
w,h = img.get_width(),img.get_height()
self.resize((w,h),bg)
for y in range(0,h):
for x in range(0,w):
t,b,c,_a = img.get_at((x,y))
self.tlayer[y][x] = t
if bg: self.blayer[y][x] = b
self.clayer[y][x] = c
def tga_save_level(self,fname):
"""Save a TGA level.
Arguments:
fname -- tga image to save to
"""
w,h = self.size
img = pygame.Surface((w,h),SWSURFACE,32)
img.fill((0,0,0,0))
for y in range(0,h):
for x in range(0,w):
t = self.tlayer[y][x]
b = 0
if self.blayer:
b = self.blayer[y][x]
c = self.clayer[y][x]
_a = 0
img.set_at((x,y),(t,b,c,_a))
pygame.image.save(img,fname)
def tga_load_tiles(self,fname,size,tdata={}):
"""Load a TGA tileset.
Arguments:
g -- a Tilevid instance
fname -- tga image to load
size -- (w,h) size of tiles in pixels
tdata -- tile data, a dict of tile:(agroups, hit handler, config)
"""
TW,TH = size
if type(fname) == str: img = pygame.image.load(fname).convert_alpha()
else: img = fname
w,h = img.get_width(),img.get_height()
n = 0
for y in range(0,h,TH):
for x in range(0,w,TW):
i = img.subsurface((x,y,TW,TH))
tile = Tile(i)
self.tiles[n] = tile
if n in tdata:
agroups,hit,config = tdata[n]
tile.agroups = self.string2groups(agroups)
tile.hit = hit
tile.config = config
n += 1
def load_images(self,idata):
"""Load images.
Arguments:
idata -- a list of (name, fname, shape)
"""
for name,fname,shape in idata:
self.images[name] = pygame.image.load(fname).convert_alpha(),shape
def run_codes(self,cdata,rect):
"""Run codes.
Arguments:
cdata -- a dict of code:(handler function, value)
rect -- a tile rect of the parts of the layer that should have
their codes run
"""
tw,th = self.tiles[0].image.get_width(),self.tiles[0].image.get_height()
x1,y1,w,h = rect
clayer = self.clayer
t = Tile()
for y in range(y1,y1+h):
for x in range(x1,x1+w):
n = clayer[y][x]
if n in cdata:
fnc,value = cdata[n]
t.tx,t.ty = x,y
t.rect = pygame.Rect(x*tw,y*th,tw,th)
fnc(self,t,value)
def string2groups(self,str):
"""Convert a string to groups."""
if str == None: return 0
return self.list2groups(str.split(","))
def list2groups(self,igroups):
"""Convert a list to groups."""
for s in igroups:
if not s in self.groups:
self.groups[s] = 2**len(self.groups)
v = 0
for s,n in list(self.groups.items()):
if s in igroups: v|=n
return v
def groups2list(self,groups):
"""Convert a groups to a list."""
v = []
for s,n in list(self.groups.items()):
if (n&groups)!=0: v.append(s)
return v
def hit(self,x,y,t,s):
tiles = self.tiles
tw,th = tiles[0].image.get_width(),tiles[0].image.get_height()
t.tx = x
t.ty = y
t.rect = Rect(x*tw,y*th,tw,th)
t._rect = t.rect
if hasattr(t,'hit'):
t.hit(self,t,s)
def loop(self):
"""Update and hit testing loop. Run this once per frame."""
self.loop_sprites() #sprites may move
self.loop_tilehits() #sprites move
self.loop_spritehits() #no sprites should move
for s in self.sprites:
s._rect = pygame.Rect(s.rect)
def loop_sprites(self):
as_ = self.sprites[:]
for s in as_:
if hasattr(s,'loop'):
s.loop(self,s)
def loop_tilehits(self):
tiles = self.tiles
tw,th = tiles[0].image.get_width(),tiles[0].image.get_height()
layer = self.layers[0]
as_ = self.sprites[:]
for s in as_:
self._tilehits(s)
def _tilehits(self,s):
tiles = self.tiles
tw,th = tiles[0].image.get_width(),tiles[0].image.get_height()
layer = self.layers[0]
for _z in (0,):
if s.groups != 0:
_rect = s._rect
rect = s.rect
_rectx = _rect.x
_recty = _rect.y
_rectw = _rect.w
_recth = _rect.h
rectx = rect.x
recty = rect.y
rectw = rect.w
recth = rect.h
rect.y = _rect.y
rect.h = _rect.h
hits = []
ct,cb,cl,cr = rect.top,rect.bottom,rect.left,rect.right
#nasty ol loops
y = ct/th*th
while y < cb:
x = cl/tw*tw
yy = y/th
while x < cr:
xx = x/tw
t = tiles[layer[yy][xx]]
if (s.groups & t.agroups)!=0:
#self.hit(xx,yy,t,s)
d = math.hypot(rect.centerx-(xx*tw+tw/2),
rect.centery-(yy*th+th/2))
hits.append((d,t,xx,yy))
x += tw
y += th
hits.sort()
#if len(hits) > 0: print self.frame,hits
for d,t,xx,yy in hits:
self.hit(xx,yy,t,s)
#switching directions...
_rect.x = rect.x
_rect.w = rect.w
rect.y = recty
rect.h = recth
hits = []
ct,cb,cl,cr = rect.top,rect.bottom,rect.left,rect.right
#nasty ol loops
y = ct/th*th
while y < cb:
x = cl/tw*tw
yy = y/th
while x < cr:
xx = x/tw
t = tiles[layer[yy][xx]]
if (s.groups & t.agroups)!=0:
d = math.hypot(rect.centerx-(xx*tw+tw/2),
rect.centery-(yy*th+th/2))
hits.append((d,t,xx,yy))
#self.hit(xx,yy,t,s)
x += tw
y += th
hits.sort()
#if len(hits) > 0: print self.frame,hits
for d,t,xx,yy in hits:
self.hit(xx,yy,t,s)
#done with loops
_rect.x = _rectx
_rect.y = _recty
def loop_spritehits(self):
as_ = self.sprites[:]
groups = {}
for n in range(0,31):
groups[1<<n] = []
for s in as_:
g = s.groups
n = 1
while g:
if (g&1)!=0: groups[n].append(s)
g >>= 1
n <<= 1
for s in as_:
if s.agroups!=0:
rect1,rect2 = s.rect,Rect(s.rect)
#if rect1.centerx < 320: rect2.x += 640
#else: rect2.x -= 640
g = s.agroups
n = 1
while g:
if (g&1)!=0:
for b in groups[n]:
if (s != b and (s.agroups & b.groups)!=0
and s.rect.colliderect(b.rect)):
s.hit(self,s,b)
g >>= 1
n <<= 1
def screen_to_tile(self,pos):
"""Convert a screen position to a tile position."""
return pos
def tile_to_screen(self,pos):
"""Convert a tile position to a screen position."""
return pos
| gentooza/Freedom-Fighters-of-Might-Magic | src/gamelib/pgu/vid.py | Python | gpl-3.0 | 15,711 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from hwt.simulator.simTestCase import SimTestCase
from hwtLib.logic.bcdToBin import BcdToBin
from hwtSimApi.constants import CLK_PERIOD
def bin_to_bcd(v: int, digits: int):
_v = v
bcd = 0
for i in range(digits):
bcd |= (v % 10) << (i * 4)
v //= 10
assert v == 0, ("Not enough digits", _v, digits)
return bcd
class BcdToBinTC(SimTestCase):
@classmethod
def setUpClass(cls):
cls.u = BcdToBin()
cls.u.BCD_DIGITS = 3
cls.compileSim(cls.u)
def test_0to127(self):
u = self.u
N = 128
u.din._ag.data.extend([bin_to_bcd(i, u.BCD_DIGITS) for i in range(N)])
self.runSim(CLK_PERIOD * 13 * N)
res = u.dout._ag.data
self.assertEqual(len(res), N)
for i, d in enumerate(res):
self.assertValEqual(d, i)
def test_128to255(self):
u = self.u
u.din._ag.data.extend([bin_to_bcd(i, u.BCD_DIGITS) for i in range(128, 256)])
N = 256 - 128
self.runSim(CLK_PERIOD * 13 * N)
res = u.dout._ag.data
self.assertEqual(len(res), N)
for i, d in enumerate(res):
i += 128
self.assertValEqual(d, i)
def test_r_96to150(self):
u = self.u
u.din._ag.data.extend([bin_to_bcd(i, u.BCD_DIGITS) for i in range(96, 150)])
N = 150 - 96
self.randomize(u.din)
self.randomize(u.dout)
self.runSim(CLK_PERIOD * 13 * 2 * N)
res = u.dout._ag.data
self.assertEqual(len(res), N)
for i, d in enumerate(res):
i += 96
self.assertValEqual(d, i)
if __name__ == "__main__":
import unittest
suite = unittest.TestSuite()
# suite.addTest(IndexingTC('test_split'))
suite.addTest(unittest.makeSuite(BcdToBinTC))
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
| Nic30/hwtLib | hwtLib/logic/bcdToBin_test.py | Python | mit | 1,922 |
"""Monkey patch lame-o vanilla unittest with test skip feature.
From the patch that was never applied (shameful!):
http://bugs.python.org/issue1034053
"""
import time
import unittest
class SkipException(Exception):
pass
def TestResult__init__(self):
self.failures = []
self.errors = []
self.skipped = []
self.testsRun = 0
self.shouldStop = 0
unittest.TestResult.__init__ = TestResult__init__
def TestResult_addSkipped(self, test, err):
"""Called when a test is skipped.
'err' is a tuple of values as returned by sys.exc_info().
"""
self.skipped.append((test, str(err[1])))
unittest.TestResult.addSkipped = TestResult_addSkipped
def TestResult__repr__(self):
return "<%s run=%i errors=%i failures=%i skipped=%i>" % (
unittest._strclass(self.__class__), self.testsRun,
len(self.errors), len(self.failures), len(self.skipped))
unittest.TestResult.__repr__ = TestResult__repr__
class TestCase(unittest.TestCase):
# Yuck, all of run has to be copied for this.
# I don't care about wrapping setUp atm.
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
result.startTest(self)
# Support variable naming differences between 2.4 and 2.6
# Yay for silly variable hiding
try:
testMethodName = self.__testMethodName
exc_info = self.__exc_info
except AttributeError:
testMethodName = self._testMethodName
exc_info = self._exc_info
testMethod = getattr(self, testMethodName)
try:
try:
self.setUp()
except KeyboardInterrupt:
raise
except:
result.addError(self, exc_info())
return
ok = False
try:
testMethod()
ok = True
except self.failureException:
result.addFailure(self, exc_info())
except SkipException:
result.addSkipped(self, exc_info())
except KeyboardInterrupt:
raise
except:
result.addError(self, exc_info())
try:
self.tearDown()
except KeyboardInterrupt:
raise
except:
result.addError(self, exc_info())
ok = False
if ok:
result.addSuccess(self)
finally:
result.stopTest(self)
def skip(self, msg=None):
"""Skip the test, with the given message."""
raise SkipException(msg)
def skipIf(self, expr, msg=None):
"""Skip the test if the expression is true."""
if expr:
raise SkipException(msg)
def _TextTestResult_addSkipped(self, test, err):
unittest.TestResult.addSkipped(self, test, err)
if self.showAll:
msg = str(err[1])
if msg:
msg = " (" + msg + ")"
self.stream.writeln("SKIPPED" + msg)
elif self.dots:
self.stream.write('S')
unittest._TextTestResult.addSkipped = _TextTestResult_addSkipped
# Bah
def TextTestRunner_run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
startTime = time.time()
test(result)
stopTime = time.time()
timeTaken = stopTime - startTime
result.printErrors()
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
if not result.wasSuccessful():
self.stream.write("FAILED (")
failed, errored, skipped = map(
len, (result.failures, result.errors, result.skipped))
if failed:
self.stream.write("failures=%d" % failed)
if errored:
if failed:
self.stream.write(", ")
self.stream.write("errors=%d" % errored)
if skipped:
self.stream.write(", skipped=%d" % skipped)
self.stream.writeln(")")
else:
if result.skipped:
self.stream.writeln(
"OK (skipped=%d)" % len(result.skipped))
else:
self.stream.writeln("OK")
return result
unittest.TextTestRunner.run = TextTestRunner_run
| joyxu/autotest | tko/parsers/test/unittest_hotfix.py | Python | gpl-2.0 | 4,373 |
from django.db.models.sql.compiler import SQLCompiler
# This monkey patch allows us to write subqueries in the `tables` argument to the
# QuerySet.extra method. For example Foo.objects.all().extra(tables=["(SELECT * FROM Bar) t"])
# See: http://djangosnippets.org/snippets/236/#c3754
_quote_name_unless_alias = SQLCompiler.quote_name_unless_alias
SQLCompiler.quote_name_unless_alias = lambda self, name: name if name.strip().startswith('(') else _quote_name_unless_alias(self, name)
| mdj2/django-arcutils | arcutils/models.py | Python | mit | 484 |
'''
You are a professional robber planning to rob houses along a street. Each house has a certain amount of money stashed, the only constraint stopping you from robbing each of them is that adjacent houses have security system connected and it will automatically contact the police if two adjacent houses were broken into on the same night.
Given a list of non-negative integers representing the amount of money of each house, determine the maximum amount of money you can rob tonight without alerting the police.
Example 1:
Input: [1,2,3,1]
Output: 4
Explanation: Rob house 1 (money = 1) and then rob house 3 (money = 3).
Total amount you can rob = 1 + 3 = 4.
Example 2:
Input: [2,7,9,3,1]
Output: 12
Explanation: Rob house 1 (money = 2), rob house 3 (money = 9) and rob house 5 (money = 1).
Total amount you can rob = 2 + 9 + 1 = 12.
'''
class Solution:
def rob(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# robbery of current house + loot from houses before the previous
# loot from the previous house robbery and any loot captured before that
# rob(i) = Math.max( rob(i - 2) + currentHouseValue, rob(i - 1) )
curPrev = 0
prev2 = 0
for i in nums:
tmp = curPrev
curPrev = max(curPrev, prev2+i)
prev2 = tmp
return curPrev | Vaibhav/InterviewPrep | LeetCode/Easy/198-House-Robber.py | Python | mit | 1,460 |
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: user
#
# Created: 05/08/2012
# Copyright: (c) user 2012
# Licence: <your licence>
#-------------------------------------------------------------------------------
#!/usr/bin/env python
from igraph import *
def main():
#g = Graph.Read_GraphMLz(r"D:\User's documents\technion\project\workset\6.12.gcc\funcgraphs\.realloc.gml")
#
if 1==0:
g = Graph()
g.add_vertex()
g.add_vertex()
g.add_vertex()
g.add_vertex()
g.add_edge(0,1)
g.add_edge(0,2)
g.add_edge(2,3)
else:
g = read(r"D:\getftp.gml")
g.vs[0]['color'] = 'green'
# g.vs["label"] = g.vs["HstartEA"]
#print g["asd"]
#print g['name']
#g.is
#plot(g)
#g = Graph()
#g.add_vertex(2)
#g.add_edge(0,1)
#g['check'] = 'asd'
# is cool
# bad = "rt_circular","rt"
# good = "grid"
styleList3d = ["drl_3d","grid_3d","kk_3d","fr_3d"]
#"auto","circle","drl"
#styleList =["fr","grid","graphopt","gfr","kk","lgl","mds","sphere","star","sugiyama"]
"""
for plotStyle in ["grid"]:
print plotStyle
plot(g,layout=plotStyle ,bbox = (1600, 1600), margin = 20)
print "DONE"
"""
plot(g,layout="rt" ,root=0,bbox = (1600, 1600), margin = 20)
#plot(g1)
if __name__ == '__main__':
main()
| tech-srl/TRACY | graph_printer.py | Python | epl-1.0 | 1,511 |
# Copyright: (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import bisect
import json
import pkgutil
import re
from ansible import constants as C
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.distro import LinuxDistribution
from ansible.utils.display import Display
from ansible.utils.plugin_docs import get_versioned_doclink
from distutils.version import LooseVersion
from traceback import format_exc
display = Display()
foundre = re.compile(r'(?s)PLATFORM[\r\n]+(.*)FOUND(.*)ENDFOUND')
class InterpreterDiscoveryRequiredError(Exception):
def __init__(self, message, interpreter_name, discovery_mode):
super(InterpreterDiscoveryRequiredError, self).__init__(message)
self.interpreter_name = interpreter_name
self.discovery_mode = discovery_mode
def __str__(self):
return self.message
def __repr__(self):
# TODO: proper repr impl
return self.message
def discover_interpreter(action, interpreter_name, discovery_mode, task_vars):
# interpreter discovery is a 2-step process with the target. First, we use a simple shell-agnostic bootstrap to
# get the system type from uname, and find any random Python that can get us the info we need. For supported
# target OS types, we'll dispatch a Python script that calls plaform.dist() (for older platforms, where available)
# and brings back /etc/os-release (if present). The proper Python path is looked up in a table of known
# distros/versions with included Pythons; if nothing is found, depending on the discovery mode, either the
# default fallback of /usr/bin/python is used (if we know it's there), or discovery fails.
# FUTURE: add logical equivalence for "python3" in the case of py3-only modules?
if interpreter_name != 'python':
raise ValueError('Interpreter discovery not supported for {0}'.format(interpreter_name))
host = task_vars.get('inventory_hostname', 'unknown')
res = None
platform_type = 'unknown'
found_interpreters = [u'/usr/bin/python'] # fallback value
is_auto_legacy = discovery_mode.startswith('auto_legacy')
is_silent = discovery_mode.endswith('_silent')
try:
platform_python_map = C.config.get_config_value('INTERPRETER_PYTHON_DISTRO_MAP', variables=task_vars)
bootstrap_python_list = C.config.get_config_value('INTERPRETER_PYTHON_FALLBACK', variables=task_vars)
display.vvv(msg=u"Attempting {0} interpreter discovery".format(interpreter_name), host=host)
# not all command -v impls accept a list of commands, so we have to call it once per python
command_list = ["command -v '%s'" % py for py in bootstrap_python_list]
shell_bootstrap = "echo PLATFORM; uname; echo FOUND; {0}; echo ENDFOUND".format('; '.join(command_list))
# FUTURE: in most cases we probably don't want to use become, but maybe sometimes we do?
res = action._low_level_execute_command(shell_bootstrap, sudoable=False)
raw_stdout = res.get('stdout', u'')
match = foundre.match(raw_stdout)
if not match:
display.debug(u'raw interpreter discovery output: {0}'.format(raw_stdout), host=host)
raise ValueError('unexpected output from Python interpreter discovery')
platform_type = match.groups()[0].lower().strip()
found_interpreters = [interp.strip() for interp in match.groups()[1].splitlines() if interp.startswith('/')]
display.debug(u"found interpreters: {0}".format(found_interpreters), host=host)
if not found_interpreters:
action._discovery_warnings.append(u'No python interpreters found for host {0} (tried {1})'.format(host, bootstrap_python_list))
# this is lame, but returning None or throwing an exception is uglier
return u'/usr/bin/python'
if platform_type != 'linux':
raise NotImplementedError('unsupported platform for extended discovery: {0}'.format(to_native(platform_type)))
platform_script = pkgutil.get_data('ansible.executor.discovery', 'python_target.py')
# FUTURE: respect pipelining setting instead of just if the connection supports it?
if action._connection.has_pipelining:
res = action._low_level_execute_command(found_interpreters[0], sudoable=False, in_data=platform_script)
else:
# FUTURE: implement on-disk case (via script action or ?)
raise NotImplementedError('pipelining support required for extended interpreter discovery')
platform_info = json.loads(res.get('stdout'))
distro, version = _get_linux_distro(platform_info)
if not distro or not version:
raise NotImplementedError('unable to get Linux distribution/version info')
version_map = platform_python_map.get(distro.lower().strip())
if not version_map:
raise NotImplementedError('unsupported Linux distribution: {0}'.format(distro))
platform_interpreter = to_text(_version_fuzzy_match(version, version_map), errors='surrogate_or_strict')
# provide a transition period for hosts that were using /usr/bin/python previously (but shouldn't have been)
if is_auto_legacy:
if platform_interpreter != u'/usr/bin/python' and u'/usr/bin/python' in found_interpreters:
# FIXME: support comments in sivel's deprecation scanner so we can get reminded on this
if not is_silent:
action._discovery_deprecation_warnings.append(dict(
msg=u"Distribution {0} {1} on host {2} should use {3}, but is using "
u"/usr/bin/python for backward compatibility with prior Ansible releases. "
u"A future Ansible release will default to using the discovered platform "
u"python for this host. See {4} for more information"
.format(distro, version, host, platform_interpreter,
get_versioned_doclink('reference_appendices/interpreter_discovery.html')),
version='2.12'))
return u'/usr/bin/python'
if platform_interpreter not in found_interpreters:
if platform_interpreter not in bootstrap_python_list:
# sanity check to make sure we looked for it
if not is_silent:
action._discovery_warnings \
.append(u"Platform interpreter {0} on host {1} is missing from bootstrap list"
.format(platform_interpreter, host))
if not is_silent:
action._discovery_warnings \
.append(u"Distribution {0} {1} on host {2} should use {3}, but is using {4}, since the "
u"discovered platform python interpreter was not present. See {5} "
u"for more information."
.format(distro, version, host, platform_interpreter, found_interpreters[0],
get_versioned_doclink('reference_appendices/interpreter_discovery.html')))
return found_interpreters[0]
return platform_interpreter
except NotImplementedError as ex:
display.vvv(msg=u'Python interpreter discovery fallback ({0})'.format(to_text(ex)), host=host)
except Exception as ex:
if not is_silent:
display.warning(msg=u'Unhandled error in Python interpreter discovery for host {0}: {1}'.format(host, to_text(ex)))
display.debug(msg=u'Interpreter discovery traceback:\n{0}'.format(to_text(format_exc())), host=host)
if res and res.get('stderr'):
display.vvv(msg=u'Interpreter discovery remote stderr:\n{0}'.format(to_text(res.get('stderr'))), host=host)
if not is_silent:
action._discovery_warnings \
.append(u"Platform {0} on host {1} is using the discovered Python interpreter at {2}, but future installation of "
u"another Python interpreter could change the meaning of that path. See {3} "
u"for more information."
.format(platform_type, host, found_interpreters[0],
get_versioned_doclink('reference_appendices/interpreter_discovery.html')))
return found_interpreters[0]
def _get_linux_distro(platform_info):
dist_result = platform_info.get('platform_dist_result', [])
if len(dist_result) == 3 and any(dist_result):
return dist_result[0], dist_result[1]
osrelease_content = platform_info.get('osrelease_content')
if not osrelease_content:
return u'', u''
osr = LinuxDistribution._parse_os_release_content(osrelease_content)
return osr.get('id', u''), osr.get('version_id', u'')
def _version_fuzzy_match(version, version_map):
# try exact match first
res = version_map.get(version)
if res:
return res
sorted_looseversions = sorted([LooseVersion(v) for v in version_map.keys()])
find_looseversion = LooseVersion(version)
# slot match; return nearest previous version we're newer than
kpos = bisect.bisect(sorted_looseversions, find_looseversion)
if kpos == 0:
# older than everything in the list, return the oldest version
# TODO: warning-worthy?
return version_map.get(sorted_looseversions[0].vstring)
# TODO: is "past the end of the list" warning-worthy too (at least if it's not a major version match)?
# return the next-oldest entry that we're newer than...
return version_map.get(sorted_looseversions[kpos - 1].vstring)
| azaghal/ansible | lib/ansible/executor/interpreter_discovery.py | Python | gpl-3.0 | 9,872 |
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates sample_info table schema."""
from apache_beam.io.gcp.internal.clients import bigquery
from gcp_variant_transforms.libs import bigquery_util
from gcp_variant_transforms.libs import partitioning
SAMPLE_ID = 'sample_id'
SAMPLE_NAME = 'sample_name'
FILE_PATH = 'file_path'
INGESTION_DATETIME = 'ingestion_datetime'
SAMPLE_INFO_TABLE_SUFFIX = 'sample_info'
SAMPLE_INFO_TABLE_SCHEMA_FILE_PATH = (
'gcp_variant_transforms/data/schema/sample_info.json')
def generate_schema():
# type: () -> bigquery.TableSchema
schema = bigquery.TableSchema()
schema.fields.append(bigquery.TableFieldSchema(
name=SAMPLE_ID,
type=bigquery_util.TableFieldConstants.TYPE_INTEGER,
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description='An Integer that uniquely identifies a sample.'))
schema.fields.append(bigquery.TableFieldSchema(
name=SAMPLE_NAME,
type=bigquery_util.TableFieldConstants.TYPE_STRING,
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description=('Name of the sample as we read it from the VCF file.')))
schema.fields.append(bigquery.TableFieldSchema(
name=FILE_PATH,
type=bigquery_util.TableFieldConstants.TYPE_STRING,
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description=('Full file path on GCS of the sample.')))
schema.fields.append(bigquery.TableFieldSchema(
name=INGESTION_DATETIME,
type=bigquery_util.TableFieldConstants.TYPE_TIMESTAMP,
mode=bigquery_util.TableFieldConstants.MODE_NULLABLE,
description=('Ingestion datetime (up to current minute) of samples.')))
return schema
def create_sample_info_table(output_table):
full_table_id = bigquery_util.compose_table_name(output_table,
SAMPLE_INFO_TABLE_SUFFIX)
partitioning.create_bq_table(full_table_id,
SAMPLE_INFO_TABLE_SCHEMA_FILE_PATH)
return full_table_id
| googlegenomics/gcp-variant-transforms | gcp_variant_transforms/libs/sample_info_table_schema_generator.py | Python | apache-2.0 | 2,538 |
#!/usr/bin/python
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from math import fabs
import numpy as np
import cart_analysis_db.io.reader as db
from cart_analysis_db.utils.utilities import *
import pydot
simulation = "L500_NR_0"
mt = db.Simulation(simulation+"_mt", db_dir = "/Users/Kaylea/Data/L500_NR_0")
clusters = mt.get_halo_ids(is_main_halo=True)
halos = mt.get_halo_properties(clusters, ["M_hc", "r_hc"], "1.0005")
for cluster in clusters:
dot_object = pydot.Dot(graph_type='graph')
dot_object.set_node_defaults(shape='circle',fontsize="14")
dot_object.set_edge_defaults(arrowhead = "diamond")
print cluster
z0_mass = halos[halos["id"] == cluster]["M_hc"]
z0_radius = halos[halos["id"] == cluster]["r_hc"]
nodes = {}
edges = []
node_name = "1.0005_%d" % cluster
nodes[node_name] = pydot.Node(name=node_name, style="bold", xlabel=str(cluster), label="", height="1")
tree = mt.get_full_tree(cluster, get_main_line=True)
edge_nodes = [node_name]
for node in tree :
ratio = float(node["r_hc"])/float(z0_radius)
# if ratio < 0.01 :
# continue
#print "%0.4f_%d" % (node["aexp"], node["id"]), ratio, float(node["num_shared_particles"])/float(node["num_particles"])
this_node_name = "%0.4f_%d" % (node["aexp"], node["id"])
this_node_label = "%0.4f, %d" % (node["aexp"], node["id"])
parent_node_name = "%0.4f_%d" % (node["parent_aexp"], node["parent_id"])
# print this_node_name
skip = False
#exclude very minor mergers from tree
parent_node = tree[np.logical_and(tree["id"] == node["parent_id"],
tree["aexp"] == node["parent_aexp"])]
#exclude minor mergers
# if len(parent_node) == 1 and parent_node["is_main_line"] == 1 and node["is_main_line"] == 0:
# if float(node["M_hc"])/float(parent_node["M_hc"]) < 0.1 :
# print this_node_name, "skipped due to merger ratio"
# continue
#exclude poor matches
# if float(node["num_shared_particles"])/float(node["num_particles"]) < 0.01 :
# print this_node_name, "skipped due to particle ratio"
# skip = True
ratio = str(ratio)
if parent_node_name in edge_nodes :
if node["is_main_line"] == 0 and this_node_name not in nodes.keys():
nodes[this_node_name] = pydot.Node(name=this_node_name, xlabel=str(node["id"]), label="", height=ratio, width=ratio)
elif node["is_main_line"] == 1 :
nodes[this_node_name] = pydot.Node(name=this_node_name, xlabel=this_node_label, label="", height=ratio, width=ratio, style="bold")
# if not skip :
if True:
# print "not skipped", this_node_name
edge_nodes.append(parent_node_name)
edge_nodes.append(this_node_name)
if node["is_main_line"] == 0 :
edges.append(pydot.Edge(nodes[parent_node_name], nodes[this_node_name], weight="1"))
else :
edges.append(pydot.Edge(nodes[parent_node_name], nodes[this_node_name], style="bold", weight="10"))
for node in nodes.values() :
# name = node.get_name().replace('"', '').strip()
# if name in edge_nodes :
dot_object.add_node(node)
for edge in edges :
dot_object.add_edge(edge)
dot_object.write_png('images/full_mergertree_'+str(cluster)+'.png')
# plt.show()
mt.close()
| cavestruz/L500analysis | caps/diagnostics/mergertree/plot_full_merger_trees.py | Python | mit | 3,597 |
'''
Created on Apr 19, 2017
@author: Leo Zhong
'''
import numpy as np
import random
# m denotes the number of examples here, not the number of features
def gradientDescent(x, y, theta, alpha, m, numIterations):
xTrans = x.transpose()
for i in range(0, numIterations):
hypothesis = np.dot(x, theta)
loss = hypothesis - y
# avg cost per example (the 2 in 2*m doesn't really matter here.
# But to be consistent with the gradient, I include it)
cost = np.sum(loss ** 2) / (2 * m)
print("Iteration %d | Cost: %f" % (i, cost))
# avg gradient per example
gradient = np.dot(xTrans, loss) / m
# update
theta = theta - alpha * gradient
return theta
def genData(numPoints, bias, variance):
x = np.zeros(shape=(numPoints, 2))
y = np.zeros(shape=numPoints)
# basically a straight line
for i in range(0, numPoints):
# bias feature
x[i][0] = 1
x[i][1] = i
# our target variable
y[i] = (i + bias) + random.uniform(0, 1) * variance
return x, y
# gen 100 points with a bias of 25 and 10 variance as a bit of noise
x, y = genData(100, 25, 10)
m, n = np.shape(x)
numIterations= 100000
alpha = 0.0005
theta = np.ones(n)
theta = gradientDescent(x, y, theta, alpha, m, numIterations)
print(theta)
| LeoZ123/Machine-Learning-Practice | Regression_Problem/Losgistic_Regression.py | Python | mit | 1,332 |
# -*- python -*-
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gdb_test import AssertEquals
import gdb_test
def test(gdb):
gdb.Command('break leaf_call')
gdb.ResumeAndExpectStop('continue', 'breakpoint-hit')
result = gdb.Command('-stack-list-frames 0 2')
AssertEquals(result['stack'][0]['frame']['func'], 'leaf_call')
AssertEquals(result['stack'][1]['frame']['func'], 'nested_calls')
AssertEquals(result['stack'][2]['frame']['func'], 'main')
result = gdb.Command('-stack-list-arguments 1 0 1')
AssertEquals(result['stack-args'][0]['frame']['args'][0]['value'], '2')
AssertEquals(result['stack-args'][1]['frame']['args'][0]['value'], '1')
gdb.Command('return')
gdb.ResumeAndExpectStop('finish', 'function-finished')
AssertEquals(gdb.Eval('global_var'), '1')
if __name__ == '__main__':
gdb_test.RunTest(test, 'stack_trace')
| cvsuser-chromium/native_client | tests/gdb/stack_trace.py | Python | bsd-3-clause | 984 |
# -*- coding: utf-8 -*-
# Copyright(C) 2014 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from datetime import date
from decimal import Decimal
from weboob.capabilities.bank import Account, Transaction
from weboob.exceptions import BrowserIncorrectPassword, BrowserHTTPError, BrowserUnavailable, ParseError
from weboob.browser import DomainBrowser
__all__ = ['BredBrowser']
class BredBrowser(DomainBrowser):
BASEURL = 'https://www.bred.fr'
def __init__(self, accnum, login, password, *args, **kwargs):
super(BredBrowser, self).__init__(*args, **kwargs)
self.login = login
self.password = password
self.accnum = accnum
def do_login(self, login, password):
r = self.open('/transactionnel/Authentication', data={'identifiant': login, 'password': password})
if 'gestion-des-erreurs/erreur-pwd' in r.url:
raise BrowserIncorrectPassword('Bad login/password.')
if 'gestion-des-erreurs/opposition' in r.url:
raise BrowserIncorrectPassword('Your account is disabled')
if '/pages-gestion-des-erreurs/erreur-technique' in r.url:
raise BrowserUnavailable('A technical error occured')
ACCOUNT_TYPES = {'000': Account.TYPE_CHECKING,
'999': Account.TYPE_MARKET,
'011': Account.TYPE_CARD,
'023': Account.TYPE_SAVINGS,
'078': Account.TYPE_SAVINGS,
'080': Account.TYPE_SAVINGS,
'027': Account.TYPE_SAVINGS,
'037': Account.TYPE_SAVINGS,
'730': Account.TYPE_DEPOSIT,
}
def api_open(self, *args, **kwargs):
try:
return super(BredBrowser, self).open(*args, **kwargs)
except BrowserHTTPError:
self.do_login(self.login, self.password)
return super(BredBrowser, self).open(*args, **kwargs)
def get_accounts_list(self):
r = self.api_open('/transactionnel/services/rest/Account/accounts')
for content in r.json()['content']:
if self.accnum != '00000000000' and content['numero'] != self.accnum:
continue
for poste in content['postes']:
a = Account()
a._number = content['numeroLong']
a._nature = poste['codeNature']
a._consultable = poste['consultable']
a.id = '%s.%s' % (a._number, a._nature)
a.type = self.ACCOUNT_TYPES.get(poste['codeNature'], Account.TYPE_UNKNOWN)
if 'numeroDossier' in poste and poste['numeroDossier']:
a._file_number = poste['numeroDossier']
a.id += '.%s' % a._file_number
if poste['postePortefeuille']:
a.label = u'Portefeuille Titres'
a.balance = Decimal(str(poste['montantTitres']['valeur']))
a.currency = poste['montantTitres']['monnaie']['code'].strip()
yield a
if 'libelle' not in poste:
continue
a.label = ' '.join([content['intitule'].strip(), poste['libelle'].strip()])
a.balance = Decimal(str(poste['solde']['valeur']))
a.currency = poste['solde']['monnaie']['code'].strip()
yield a
def get_history(self, account):
if not account._consultable:
raise NotImplementedError()
offset = 0
next_page = True
seen = set()
while next_page:
r = self.api_open('/transactionnel/services/applications/operations/get/%(number)s/%(nature)s/00/%(currency)s/%(startDate)s/%(endDate)s/%(offset)s/%(limit)s' %
{'number': account._number,
'nature': account._nature,
'currency': account.currency,
'startDate': '2000-01-01',
'endDate': date.today().strftime('%Y-%m-%d'),
'offset': offset,
'limit': 50
})
next_page = False
offset += 50
transactions = []
for op in reversed(r.json()['content']['operations']):
next_page = True
t = Transaction()
if op['id'] in seen:
raise ParseError('There are several transactions with the same ID, probably an infinite loop')
t.id = op['id']
seen.add(t.id)
t.amount = Decimal(str(op['montant']))
t.date = date.fromtimestamp(op.get('dateDebit', op.get('dateOperation'))/1000)
t.rdate = date.fromtimestamp(op.get('dateOperation', op.get('dateDebit'))/1000)
t.vdate = date.fromtimestamp(op.get('dateValeur', op.get('dateDebit', op.get('dateOperation')))/1000)
if 'categorie' in op:
t.category = op['categorie']
t.label = op['libelle']
t.raw = ' '.join([op['libelle']] + op['details'])
transactions.append(t)
# Transactions are unsorted
for t in sorted(transactions, key=lambda t: t.rdate, reverse=True):
yield t
| sputnick-dev/weboob | modules/bred/bred/browser.py | Python | agpl-3.0 | 5,970 |
from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from neuralnilm.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for BLSTM
e110
* Back to Uniform(5) for BLSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single BLSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
e128
* Add back all 5 appliances
* Seq length 1500
* skip_prob = 0.7
e129
* max_input_power = None
* 2nd layer has Uniform(5)
* pre-train bottom layer for 2000 epochs
* add third layer at 4000 epochs
e131
e138
* Trying to replicate e82 and then break it ;)
e140
diff
e141
conv1D layer has Uniform(1), as does 2nd BLSTM layer
e142
diff AND power
e144
diff and power and max power is 5900
e145
Uniform(25) for first layer
e146
gradient clip and use peepholes
e147
* try again with new code
e148
* learning rate 0.1
e150
* Same as e149 but without peepholes and using BLSTM not BBLSTM
e151
* Max pooling
171
lower learning rate
172
even lower learning rate
173
slightly higher learning rate!
175
same as 174 but with skip prob = 0, and LSTM not BLSTM, and only 4000 epochs
176
new cost function
177
another new cost func (this one avoids NaNs)
skip prob 0.7
10x higher learning rate
178
refactored cost func (functionally equiv to 177)
0.1x learning rate
e180
* mse
e181
* back to scaled cost
* different architecture:
- convd1 at input (2x)
- then 3 LSTM layers, each with a 2x conv in between
- no diff input
e189
* divide dominant appliance power
* mse
"""
# def scaled_cost(x, t):
# raw_cost = (x - t) ** 2
# energy_per_seq = t.sum(axis=1)
# energy_per_batch = energy_per_seq.sum(axis=1)
# energy_per_batch = energy_per_batch.reshape((-1, 1))
# normaliser = energy_per_seq / energy_per_batch
# cost = raw_cost.mean(axis=1) * (1 - normaliser)
# return cost.mean()
from theano.ifelse import ifelse
import theano.tensor as T
THRESHOLD = 0
def scaled_cost(x, t):
sq_error = (x - t) ** 2
def mask_and_mean_sq_error(mask):
masked_sq_error = sq_error[mask.nonzero()]
mean = masked_sq_error.mean()
mean = ifelse(T.isnan(mean), 0.0, mean)
return mean
above_thresh_mean = mask_and_mean_sq_error(t > THRESHOLD)
below_thresh_mean = mask_and_mean_sq_error(t <= THRESHOLD)
return (above_thresh_mean + below_thresh_mean) / 2.0
def exp_a(name):
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[1, 0.5, 2, 10, 10],
on_power_thresholds=[5, 5, 5, 5, 5],
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1520,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=25,
input_padding=1,
include_diff=False,
clip_appliance_power=False
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=1000,
loss_function=mse,
updates=partial(nesterov_momentum, learning_rate=.00001, clip_range=(-1, 1)),
layers_config=[
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 10,
'filter_length': 2,
'stride': 1,
'nonlinearity': sigmoid
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Uniform(25)
}
]
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=None)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
raise
print("EXCEPTION:", exception)
import ipdb; ipdb.set_trace()
if __name__ == "__main__":
main()
| JackKelly/neuralnilm_prototype | scripts/e189.py | Python | mit | 6,646 |
# Copyright 2016 Fortinet Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
import br_tun
class FortinetOVSTunnelBridge(br_tun.OVSTunnelBridge):
"""Fortinetopenvswitch agent tunnel bridge specific logic."""
def install_dvr_process(self, vlan_tag, vif_mac, dvr_mac_address):
self.add_flow(table=self.dvr_process_table_id,
priority=3,
dl_vlan=vlan_tag,
proto='arp',
dl_src=vif_mac,
actions="resubmit(,%s)" % self.dvr_process_next_table_id)
super(FortinetOVSTunnelBridge, self).install_dvr_process(
vlan_tag, vif_mac, dvr_mac_address)
| samsu/networking-fortinet | networking_fortinet/agent/l2/openvswitch/br_tun.py | Python | apache-2.0 | 1,287 |
#!/usr/bin/env python3
from modules.pastafari.libraries.task import Task
from settings import config
import unittest, os
class TestTask(unittest.TestCase):
def test_task(self):
# You need have defined config.server_test variable
task=Task(config.server_test)
file_path='modules/pastafari/tests/scripts/alive.sh'
task.files=[[file_path, 0o750]]
task.commands_to_execute=[[file_path, '']]
task.delete_files=[file_path]
task.delete_directories=['modules/pastafari/tests']
self.assertTrue(task.exec())
| paramecio/pastafari | tests/tasktest.py | Python | gpl-2.0 | 652 |
from random import choice
import fauxfactory
import pytest
from manageiq_client.filters import Q
from cfme import test_requirements
from cfme.infrastructure.provider import InfraProvider
from cfme.markers.env_markers.provider import ONE
from cfme.rest.gen_data import vm as _vm
from cfme.utils.log_validator import LogValidator
from cfme.utils.rest import assert_response
from cfme.utils.rest import delete_resources_from_collection
from cfme.utils.rest import delete_resources_from_detail
from cfme.utils.rest import query_resource_attributes
from cfme.utils.wait import wait_for
from cfme.utils.wait import wait_for_decorator
pytestmark = [
test_requirements.rest,
pytest.mark.provider(classes=[InfraProvider], selector=ONE),
pytest.mark.usefixtures('setup_provider')
]
@pytest.fixture(scope='function')
def vm(request, provider, appliance):
vm_name = _vm(request, provider, appliance)
return appliance.rest_api.collections.vms.get(name=vm_name)
@pytest.mark.tier(3)
def test_query_vm_attributes(vm, soft_assert):
"""Tests access to VM attributes using /api/vms.
Metadata:
test_flag: rest
Polarion:
assignee: pvala
casecomponent: Infra
caseimportance: high
initialEstimate: 1/4h
"""
outcome = query_resource_attributes(vm)
for failure in outcome.failed:
# BZ 1546995
soft_assert(False, '{} "{}": status: {}, error: `{}`'.format(
failure.type, failure.name, failure.response.status_code, failure.error))
@pytest.mark.tier(2)
@pytest.mark.parametrize('from_detail', [True, False], ids=['from_detail', 'from_collection'])
def test_vm_scan(appliance, vm, from_detail):
"""Tests running VM scan using REST API.
Metadata:
test_flag: rest
Polarion:
assignee: pvala
casecomponent: Infra
caseimportance: high
initialEstimate: 1/3h
"""
if from_detail:
response = vm.action.scan()
else:
response, = appliance.rest_api.collections.vms.action.scan(vm)
assert_response(appliance)
@wait_for_decorator(timeout='5m', delay=5, message='REST running VM scan finishes')
def _finished():
response.task.reload()
if 'error' in response.task.status.lower():
pytest.fail(f'Error when running scan vm method: `{response.task.message}`')
return response.task.state.lower() == 'finished'
@pytest.mark.tier(3)
@pytest.mark.meta(automates=[1833362, 1428250])
@pytest.mark.parametrize("from_detail", [True, False], ids=["from_detail", "from_collection"])
@pytest.mark.parametrize("attribute", ["name", "description"])
def test_edit_vm(request, vm, appliance, from_detail, attribute):
"""Tests edit VMs using REST API.
Testing BZ 1428250.
Metadata:
test_flag: rest
Bugzilla:
1428250
1833362
Polarion:
assignee: pvala
casecomponent: Infra
caseimportance: high
initialEstimate: 1/4h
"""
request.addfinalizer(vm.action.delete)
payload = {attribute: fauxfactory.gen_alphanumeric(15, start=f"Edited-{attribute}-")}
if from_detail:
edited = vm.action.edit(**payload)
assert_response(appliance)
else:
edited = appliance.rest_api.collections.vms.action.edit({**payload, **vm._ref_repr()})
assert_response(appliance)
edited = edited[0]
record, __ = wait_for(
lambda: appliance.rest_api.collections.vms.find_by(**payload) or False,
num_sec=100,
delay=5,
)
vm.reload()
assert getattr(vm, attribute) == getattr(edited, attribute) == getattr(record[0], attribute)
@pytest.mark.tier(3)
@pytest.mark.parametrize('method', ['post', 'delete'], ids=['POST', 'DELETE'])
def test_delete_vm_from_detail(vm, method):
"""
Polarion:
assignee: pvala
initialEstimate: 1/4h
casecomponent: Infra
"""
delete_resources_from_detail([vm], method=method, num_sec=300, delay=10)
@pytest.mark.tier(3)
def test_delete_vm_from_collection(vm):
"""
Polarion:
assignee: pvala
initialEstimate: 1/4h
casecomponent: Infra
"""
delete_resources_from_collection([vm], not_found=True, num_sec=300, delay=10)
@pytest.mark.tier(1)
@pytest.mark.ignore_stream("5.10")
@pytest.mark.meta(automates=[1684681])
@pytest.mark.provider(
classes=[InfraProvider],
selector=ONE,
required_fields=[["cap_and_util", "capandu_vm"]],
)
def test_filtering_vm_with_multiple_ips(appliance, provider):
"""
Polarion:
assignee: pvala
caseimportance: high
casecomponent: Rest
initialEstimate: 1/4h
setup:
1. Add a provider.
testSteps:
1. Select a VM with multiple IP addresses and note one ipaddress.
2. Send a GET request with the noted ipaddress.
GET /api/vms?expand=resources&attributes=ipaddresses&filter[]=ipaddresses=':ipaddr'
expectedResults:
1.
2. Selected VM must be present in the resources sent by response.
Bugzilla:
1684681
"""
# 1
vm = appliance.collections.infra_vms.instantiate(
provider.data["cap_and_util"]["capandu_vm"], provider
)
# 2
result = appliance.rest_api.collections.vms.filter(
Q("ipaddresses", "=", choice(vm.all_ip_addresses))
)
assert_response(appliance)
assert vm.name in [resource.name for resource in result.resources]
@pytest.mark.meta(automates=[1581853])
@pytest.mark.tier(3)
@pytest.mark.provider(classes=[InfraProvider], selector=ONE)
def test_database_wildcard_should_work_and_be_included_in_the_query(appliance, request, provider):
""" Database wildcard should work and be included in the query
Bugzilla:
1581853
Polarion:
assignee: pvala
casecomponent: Rest
testtype: functional
initialEstimate: 1/4h
startsin: 5.10
testSteps:
1. Create a VM with some name, for e.g test-25-xyz.
2. Filter VM with wild character and substring of the name, for e.g. "%25%"
expectedResults:
1. VM is created successfully.
2. VM is obtained without any error.
"""
vm_name = _vm(
request, provider, appliance, name=fauxfactory.gen_alpha(start="test-25-", length=12)
)
with LogValidator(
"/var/www/miq/vmdb/log/production.log", failure_patterns=[".*FATAL.*"]
).waiting(timeout=20):
result = appliance.rest_api.collections.vms.filter(Q("name", "=", "%25%"))
assert result.subcount
assert vm_name in [vm.name for vm in result.resources]
def test_vm_disk_subcollection(appliance, vm, provider):
""" Test querying VM disks via API
Polarion:
assignee: pvala
casecomponent: Rest
testtype: functional
initialEstimate: 1/4h
setup:
1. Provision a VM.
testSteps:
1. Query disks of the VM via REST and compare it with UI and database data.
"""
ui_vm = appliance.collections.infra_vms.instantiate(vm.name, provider)
config = ui_vm.configuration
filenames = [disk.filename for disk in vm.disks.all]
view = ui_vm.load_details()
# config does not show CD-ROM disk, and thus num_disks returns 1 disk less
assert (
int(view.entities.summary("Datastore Allocation Summary").get_text_of("Number of Disks"))
== len(vm.disks)
== (config.num_disks + 1)
)
assert all([disk.filename in filenames for disk in config.disks])
| nachandr/cfme_tests | cfme/tests/infrastructure/test_vm_rest.py | Python | gpl-2.0 | 7,581 |
"find all paths from start to goal in graph"
def search(start, goal, graph):
solns = []
generate([start], goal, solns, graph) # collect paths
solns.sort(key=lambda x: len(x)) # sort by path length
return solns
def generate(path, goal, solns, graph):
state = path[-1]
if state == goal: # found goal here
solns.append(path) # change solns in-place
else: # check all arcs here
for arc in graph[state]: # skip cycles on path
if arc not in path:
generate(path + [arc], goal, solns, graph)
if __name__ == '__main__':
import gtestfunc
gtestfunc.tests(search)
| simontakite/sysadmin | pythonscripts/programmingpython/Dstruct/Classics/gsearch1.py | Python | gpl-2.0 | 814 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.